From c6ce3f98d2edcbbcec652b485a670c2d7eb8638d Mon Sep 17 00:00:00 2001 From: fcdl94 Date: Fri, 22 Nov 2024 12:05:09 +0100 Subject: [PATCH 1/8] feat: adding demo code --- Makefile | 3 + demo/.monitor.ipynb | 126 +++++++++++ demo/dataset.py | 80 +++++++ demo/datasets.ipynb | 102 +++++++++ demo/demo.ipynb | 218 ++++++++++++++++++ demo/utils.py | 189 ++++++++++++++++ focoos/focoos.py | 4 +- focoos/local_model.py | 5 +- focoos/remote_model.py | 3 +- notebooks/_playground.ipynb | 434 ------------------------------------ pyproject.toml | 2 + 11 files changed, 728 insertions(+), 438 deletions(-) create mode 100644 demo/.monitor.ipynb create mode 100644 demo/dataset.py create mode 100644 demo/datasets.ipynb create mode 100644 demo/demo.ipynb create mode 100644 demo/utils.py delete mode 100644 notebooks/_playground.ipynb diff --git a/Makefile b/Makefile index 1ccb3f8..f1f4a6a 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,9 @@ install: install-dev: @pip install -e ".[dev]" --no-cache-dir +install-demo: + @pip install -e ".[demo]" --no-cache-dir + install-pre-commit: @pre-commit install diff --git a/demo/.monitor.ipynb b/demo/.monitor.ipynb new file mode 100644 index 0000000..331d85a --- /dev/null +++ b/demo/.monitor.ipynb @@ -0,0 +1,126 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from focoos import Focoos, FocoosEnvHostUrl\n", + "from pprint import pprint\n", + "import os\n", + "\n", + "focoos = Focoos(api_key=os.getenv(\"FOCOOS_API_KEY\"), host_url=FocoosEnvHostUrl.DEV)\n", + "\n", + "models = focoos.list_models()\n", + "\n", + "print(f\"Found {len(models)} models\")\n", + "print()\n", + "\n", + "deployed_models = []\n", + "print(\"** Deployed models **\")\n", + "for model in models:\n", + " if model.status == \"DEPLOYED\":\n", + " print(model.name, model.ref)\n", + " deployed_models.append(model.name)\n", + "print()\n", + "\n", + "print(\"** Currently training models **\")\n", + "for model in models:\n", + " if \"TRAINING_RUNNING\" in model.status:\n", + " print(model.name, model.ref)\n", + "print()\n", + "\n", + "print(\"** Ready to deploy models **\")\n", + "for model in models:\n", + " if \"TRAINING_COMPLETED\" in model.status:\n", + " print(model.name, model.ref)\n", + "print()\n", + "\n", + "print(\"** Others **\")\n", + "for model in models:\n", + " if model.status not in [\"DEPLOYED\", \"TRAINING_RUNNING\", \"TRAINING_COMPLETED\"]:\n", + " print(model.name, model.ref, model.status)\n", + "print()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# MONITOR TRAINING\n", + "model = focoos.get_model_by_name(\"cable\")\n", + "model.monitor_train()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# STOP TRAINING\n", + "model = focoos.get_remote_model(\"31b7cd71c1584951\")\n", + "model.stop_training()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# UNLOAD MODELS\n", + "model_names = deployed_models\n", + "\n", + "for name in model_names:\n", + " model = focoos.get_model_by_name(name)\n", + " model.unload()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# DELETE MODELS\n", + "model_names = [\"31b7cd71c1584951\"]\n", + "\n", + "for name in model_names:\n", + " model = focoos.get_remote_model(name)\n", + " model.delete_model()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "focoos", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/demo/dataset.py b/demo/dataset.py new file mode 100644 index 0000000..b389ed9 --- /dev/null +++ b/demo/dataset.py @@ -0,0 +1,80 @@ +DATASETS = { + "bottles": { + "name": "bottles", + "path": "../data/bottles", + "task": "detection", + "workspace": "roboflow-100", + "project": "soda-bottles", + "version": 4, + }, + "blister": { + "name": "blister", + "path": "../data/blister", + "task": "instance_segmentation", + "workspace": "blisterdetection", + "project": "blister-pills-segmentation", + "version": 1, + }, + "boxes": { + "name": "boxes", + "path": "../data/boxes", + "task": "detection", + "workspace": "moyed-chowdhury", + "project": "mv_train_data", + "version": 2, + }, + "cable": { + "name": "cable", + "path": "../data/cable", + "task": "detection", + "workspace": "roboflow-100", + "project": "cable-damage", + "version": 2, + }, + "concrete": { + "name": "concrete", + "path": "../data/concrete", + "task": "instance_segmentation", + "workspace": "focoosai", + "project": "concrete-merge-d91ow", + "version": 1, + }, + "lettuce": { + "name": "lettuce", + "path": "../data/lettuce", + "task": "detection", + "workspace": "object-detection", + "project": "lettuce-pallets", + "version": 1, + }, + "peanuts": { + "name": "Peanuts", + "path": "../data/peanuts", + "task": "detection", + "workspace": "roboflow-100", + "project": "peanuts-sd4kf", + "version": 1, + }, + "safety": { + "name": "Safety", + "path": "../data/safety", + "task": "detection", + "workspace": "roboflow-100", + "project": "construction-safety-gsnvb", + "version": 1, + }, + "strawberry": { + "name": "Strawberries", + "path": "../data/strawberries", + "task": "instance_segmentation", + "workspace": "marstrawberry", + "project": "strawberry-disease-uudgf", + "version": 1, + }, +} + + +def get_dataset(name): + if name not in DATASETS: + raise ValueError(f"Dataset {name} not found") + return DATASETS[name] diff --git a/demo/datasets.ipynb b/demo/datasets.ipynb new file mode 100644 index 0000000..baff521 --- /dev/null +++ b/demo/datasets.ipynb @@ -0,0 +1,102 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Links:\n", + "## Detection\n", + "- [x] [Bottles in Fridge](https://universe.roboflow.com/roboflow-100/soda-bottles/dataset)\n", + "- [x] [Box on Conveyor](https://universe.roboflow.com/moyed-chowdhury/mv_train_data/dataset/2)\n", + "- [x] [Cable Defects](https://universe.roboflow.com/roboflow-100/cable-damage/dataset/2)\n", + "- [x] [Lattuce Growth](https://universe.roboflow.com/object-detection/lettuce-pallets/dataset)\n", + "- [x] [Peanuts Quality Control](https://universe.roboflow.com/roboflow-100/peanuts-sd4kf)\n", + "- [x] [Safety in Workplace](https://universe.roboflow.com/roboflow-100/construction-safety-gsnvb)\n", + "\n", + "## Segmentation\n", + "- [x] [Blister and Pills](https://universe.roboflow.com/blisterdetection/blister-pills-segmentation/dataset)\n", + "- [x] [Concrete Cracks and Defects](https://app.roboflow.com/focoosai/concrete-merge-d91ow/)\n", + "- [x] [Strawberry Harvest](https://universe.roboflow.com/marstrawberry/strawberry-disease-uudgf/dataset/13)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Download datasets\n", + "# !pip install roboflow\n", + "from dataset import get_dataset\n", + "from roboflow import Roboflow\n", + "import os\n", + "\n", + "dataset_cfg = get_dataset(\"bottles\")\n", + "\n", + "rf = Roboflow(api_key=os.getenv(\"ROBOFLOW_API_KEY\"))\n", + "project = rf.workspace(dataset_cfg[\"workspace\"]).project(dataset_cfg[\"project\"])\n", + "version = project.version(dataset_cfg[\"version\"])\n", + "dataset = version.download(\"coco\", location=dataset_cfg[\"path\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from utils import display_instseg, display_detection\n", + "from dataset import get_dataset\n", + "\n", + "dataset_cfg = get_dataset(\"bottles\")\n", + "PATH = dataset_cfg[\"path\"] + \"/valid\"\n", + "\n", + "if dataset_cfg[\"task\"] == \"instance_segmentation\":\n", + " display_instseg(PATH, num_images=4, annotate=True)\n", + "else:\n", + " display_detection(PATH, num_images=4, annotate=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from focoos import Focoos, FocoosEnvHostUrl\n", + "\n", + "focoos = Focoos(api_key=os.getenv(\"FOCOOS_API_KEY\"), host_url=FocoosEnvHostUrl.DEV)\n", + "focoos.list_shared_datasets()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "focoos", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/demo/demo.ipynb b/demo/demo.ipynb new file mode 100644 index 0000000..3eaafa5 --- /dev/null +++ b/demo/demo.ipynb @@ -0,0 +1,218 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### SETUP" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import random\n", + "from dotenv import load_dotenv\n", + "import os\n", + "from pprint import pprint\n", + "from focoos import DeploymentMode, Focoos, FocoosEnvHostUrl\n", + "from focoos.ports import Hyperparameters\n", + "from supervision import plot_image\n", + "from utils import display_instseg, display_detection\n", + "from dataset import get_dataset\n", + "\n", + "load_dotenv()\n", + "\n", + "dataset_cfg = get_dataset(\"cable\") # Get dataset configuration\n", + "\n", + "DATASET_NAME = dataset_cfg[\"name\"] # Dataset name\n", + "MODEL_NAME = dataset_cfg[\"name\"] # Model name\n", + "VIS_FUNC = (\n", + " display_detection if dataset_cfg[\"task\"] == \"detection\" else display_instseg\n", + ") # Visualization function\n", + "FOCOOS_MODEL = (\n", + " \"focoos_object365\"\n", + " if dataset_cfg[\"task\"] == \"detection\"\n", + " else \"focoos_ade_ins_medium\"\n", + ") # Start model\n", + "\n", + "PATH = dataset_cfg[\"path\"] + \"/valid\" # Path to the dataset (for visualization only)\n", + "PATHS = [os.path.join(PATH, p) for p in os.listdir(PATH)]\n", + "\n", + "API_KEY = os.getenv(\"FOCOOS_API_KEY\")\n", + "HOST = FocoosEnvHostUrl.DEV" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualize the dataset\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if dataset_cfg[\"task\"] == \"instance_segmentation\":\n", + " display_instseg(PATH, num_images=1, annotate=True)\n", + "else:\n", + " display_detection(PATH, num_images=1, annotate=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Training and Inferencing with Focoos " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "focoos = Focoos(api_key=API_KEY, host_url=HOST)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Train a model for the dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model = focoos.new_model(\n", + " name=MODEL_NAME + \"_live\",\n", + " focoos_model=FOCOOS_MODEL,\n", + " description=f\"A model for the {DATASET_NAME} dataset\",\n", + ")\n", + "\n", + "dataset = focoos.get_dataset_by_name(DATASET_NAME)\n", + "\n", + "res = model.train(\n", + " dataset_ref=dataset.ref, # Dataset reference\n", + " hyperparameters=Hyperparameters( # Hyperparameters for the training\n", + " learning_rate=0.0005, batch_size=16, max_iters=1000\n", + " ),\n", + ")\n", + "\n", + "model.monitor_train()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Deploy the model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use the model on Focoos servers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model = focoos.get_model_by_name(MODEL_NAME)\n", + "model.monitor_train()\n", + "output, preview = model.infer(\n", + " PATHS[random.randint(0, len(PATHS) - 1)], threshold=0.4, annotate=True\n", + ")\n", + "\n", + "plot_image(preview)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use the model locally (first download the model then use it)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "local_model = focoos.get_local_model(model.metadata.ref)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "output, preview = local_model.infer(\n", + " PATHS[random.randint(0, len(PATHS) - 1)], annotate=True, threshold=0.4\n", + ")\n", + "\n", + "pprint(output.latency)\n", + "\n", + "plot_image(preview)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Finally, we can also run a gradio DEMO" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "# Select multiple random images\n", + "PATHS = [os.path.join(PATH, p) for p in os.listdir(PATH)]\n", + "\n", + "from utils import start_gradio\n", + "start_gradio(model=model, paths=PATHS, allowed_paths=[\"/Users/fcdl94/Develop/focoos/data\"])\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/demo/utils.py b/demo/utils.py new file mode 100644 index 0000000..554a4c5 --- /dev/null +++ b/demo/utils.py @@ -0,0 +1,189 @@ +import json +import os +import random + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +from PIL import Image + +from focoos.utils.vision import image_preprocess + + +def load_coco_annotations(path): + with open(os.path.join(path, "_annotations.coco.json")) as f: + return json.load(f) + + +def get_random_image_indices(coco, num_images): + num_images = min(num_images, len(coco["images"])) + return np.random.choice(len(coco["images"]), num_images, replace=False) + + +def create_category_colors(alpha=1.0): + category_colors = {} + for i in range(1, 256): + hue = (i * 137.5) % 255 + hsv = np.array([[[hue, 255, 255]]], dtype=np.uint8) + rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)[0][0] + category_colors[i] = np.array([rgb[0] / 255, rgb[1] / 255, rgb[2] / 255, alpha]) + return category_colors + + +def setup_plot_grid(num_images, title=None): + grid_size = int(np.ceil(np.sqrt(num_images))) + fig, axes = plt.subplots(grid_size, grid_size, figsize=(8, 8)) + if title: + fig.suptitle(title, fontsize=16) + return axes.flat if num_images > 1 else [axes] + + +def create_legend(categories_seen, category_colors, category_map): + """Create a legend for instance segmentation visualization.""" + legend_elements = [] + for cat_id in categories_seen: + color = category_colors.get(cat_id, (0, 0, 0, 0.5)) + patch = plt.Rectangle((0, 0), 1, 1, facecolor=color) + legend_elements.append((patch, category_map.get(cat_id, f"Category {cat_id}"))) + + fig = plt.gcf() + if legend_elements: + fig.legend(*zip(*legend_elements), loc="center right") + + +def display_detection(path, num_images=9, annotate=True): + coco = load_coco_annotations(path) + img_indices = get_random_image_indices(coco, num_images) + category_colors = create_category_colors(alpha=1.0) + axes_flat = setup_plot_grid(num_images) + # Create a mapping of category IDs to names + category_map = {cat["id"]: cat["name"] for cat in coco["categories"]} + + # Keep track of categories seen for legend + categories_seen = set() + + for idx, ax in zip(img_indices, axes_flat): + img_info = coco["images"][idx] + img_path = os.path.join(path, img_info["file_name"]) + img = Image.open(img_path) + img_array = np.array(img) + + if annotate: + for ann in coco["annotations"]: + if ann["image_id"] == img_info["id"]: + x, y, w, h = (int(v) for v in ann["bbox"]) + category_id = ann["category_id"] + categories_seen.add(category_id) + color = category_colors.get(category_id, (255, 255, 255)) + cv2.rectangle(img_array, (x, y), (x + w, y + h), color * 255, 2) + + ax.imshow(img_array) + ax.axis("off") + + for ax in axes_flat[num_images:]: + ax.axis("off") + + # Add legend if annotations are enabled + if annotate: + create_legend(categories_seen, category_colors, category_map) + + plt.tight_layout() + plt.show() + + +def display_instseg(path, num_images=9, annotate=True, alpha=0.6): + coco = load_coco_annotations(path) + img_indices = get_random_image_indices(coco, num_images) + category_colors = create_category_colors(alpha=alpha) + axes_flat = setup_plot_grid(num_images) + + # Create a mapping of category IDs to names + category_map = {cat["id"]: cat["name"] for cat in coco["categories"]} + + # Keep track of categories seen for legend + categories_seen = set() + + for idx, ax in zip(img_indices, axes_flat): + img_info = coco["images"][idx] + img_path = os.path.join(path, img_info["file_name"]) + img = Image.open(img_path) + + ax.imshow(img) + + if annotate: + masks = {} + for ann in coco["annotations"]: + if ann["image_id"] == img_info["id"]: + category_id = ann["category_id"] + categories_seen.add(category_id) + if category_id not in masks: + masks[category_id] = np.zeros( + (img_info["height"], img_info["width"]) + ) + for seg in ann["segmentation"]: + poly = np.array(seg).reshape(-1, 2).astype(np.int32) + cv2.fillPoly(masks[category_id], [poly], 1) + + for category_id, mask in masks.items(): + color = category_colors.get(category_id, (0, 0, 0, 0.5)) + h, w = mask.shape[-2:] + mask = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) + ax.imshow(mask) + + ax.axis("off") + + for ax in axes_flat[num_images:]: + ax.axis("off") + + # Add legend if annotations are enabled + if annotate: + create_legend(categories_seen, category_colors, category_map) + + plt.tight_layout() + plt.show() + + +def start_gradio(model, paths, allowed_paths=["/Users/fcdl94/Develop/focoos/data"]): + import gradio as gr + + def run_inference(image, conf=0.5): + # Load and resize the image + resized, _ = image_preprocess(image, resize=640) # Using standard 640 size + # Save to temporary file + tmp_path = ( + f"/Users/fcdl94/Develop/focoos/data/{os.path.basename(image)}_resized.jpg" + ) + # resized is in CHW format, need to convert to HWC and uint8 for saving + img_to_save = resized[0].transpose(1, 2, 0).astype(np.uint8) + cv2.imwrite(tmp_path, img_to_save) + image = tmp_path + + detections, annotated_image = model.infer(image, conf, annotate=True) + os.remove(tmp_path) + return cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB), detections.model_dump() + + with gr.Blocks() as demo: + with gr.Row(): + with gr.Column(): + image = gr.Image(type="filepath") + start_btn = gr.Button("Run Inference") + conf = gr.Slider( + maximum=0.9, minimum=0, value=0.5, label="Confidencte threshold" + ) + with gr.Column(): + output_image = gr.Image(type="pil") + output_detections = gr.JSON() + examples = gr.Examples( + fn=run_inference, + inputs=[image], + outputs=[output_image], + examples=[ + paths[i] for i in random.sample(range(len(paths)), min(5, len(paths))) + ], + ) + start_btn.click( + fn=run_inference, + inputs=[image, conf], + outputs=[output_image, output_detections], + ) + return demo.launch(allowed_paths=allowed_paths) diff --git a/focoos/focoos.py b/focoos/focoos.py index 09b92bf..cc27e4c 100644 --- a/focoos/focoos.py +++ b/focoos/focoos.py @@ -170,7 +170,7 @@ def get_dataset_by_name(self, name: str) -> Optional[DatasetMetadata]: found = False datasets = self.list_shared_datasets() for dataset in datasets: - if name == dataset.name: + if name.lower() == dataset.name.lower(): found = True break @@ -182,7 +182,7 @@ def get_model_by_name( found = False models = self.list_models() for model in models: - if name == model.name: + if name.lower() == model.name.lower(): found = True break if found: diff --git a/focoos/local_model.py b/focoos/local_model.py index bfd3b69..0fbb3b6 100644 --- a/focoos/local_model.py +++ b/focoos/local_model.py @@ -73,7 +73,10 @@ def _annotate(self, im: np.ndarray, detections: Detections) -> np.ndarray: annotated_im = self.label_annotator.annotate( scene=annotated_im, detections=detections, labels=labels ) - elif self.metadata.task == FocoosTask.SEMSEG: + elif self.metadata.task in [ + FocoosTask.SEMSEG, + FocoosTask.INSTANCE_SEGMENTATION, + ]: annotated_im = self.mask_annotator.annotate( scene=im.copy(), detections=detections ) diff --git a/focoos/remote_model.py b/focoos/remote_model.py index d7a8e9b..06bc312 100644 --- a/focoos/remote_model.py +++ b/focoos/remote_model.py @@ -204,6 +204,7 @@ def infer( if not os.path.exists(image_path): logger.error(f"Image file not found: {image_path}") raise FileNotFoundError(f"Image file not found: {image_path}") + files = {"file": open(image_path, "rb")} t0 = time.time() res = self.http_client.post( @@ -307,7 +308,7 @@ def monitor_train(self, update_period=30): logger.info(f"Model is not training, status: {status['main_status']}") return - def stop_traing(self): + def stop_training(self): res = self.http_client.delete(f"models/{self.model_ref}/train") if res.status_code != 200: logger.error(f"Failed to get stop training: {res.status_code} {res.text}") diff --git a/notebooks/_playground.ipynb b/notebooks/_playground.ipynb deleted file mode 100644 index 7065cf0..0000000 --- a/notebooks/_playground.ipynb +++ /dev/null @@ -1,434 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Setup" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip install -e ..[dev]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 🤖 Focoos Foundational Models\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from focoos import Focoos, FocoosEnvHostUrl\n", - "from pprint import pprint\n", - "import os\n", - "\n", - "focoos = Focoos(api_key=os.getenv(\"FOCOOS_API_KEY\"), host_url=FocoosEnvHostUrl.DEV)\n", - "\n", - "pprint(focoos.list_focoos_models())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Cloud Inference" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pprint import pprint\n", - "import os\n", - "from focoos import Focoos, FocoosEnvHostUrl\n", - "from supervision import plot_image\n", - "\n", - "\n", - "model_ref = \"focoos_object365\"\n", - "image_path = \"./assets/ade_val_034.jpg\"\n", - "\n", - "focoos = Focoos(api_key=os.getenv(\"FOCOOS_API_KEY\"), host_url=FocoosEnvHostUrl.DEV)\n", - "\n", - "model = focoos.get_remote_model(model_ref)\n", - "## Only admin can deploy foundational models\n", - "model.deploy(wait=True)\n", - "\n", - "output, preview = model.infer(image_path, threshold=0.4, annotate=True)\n", - "plot_image(preview)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Unload Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "## Only admin can deploy foundational models\n", - "model.unload()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Local Inference\n", - "\n", - "This section demonstrates how to perform local inference using a model from the Focoos platform. \n", - "We will load a model, deploy it locally, and then run inference on a sample image.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from focoos import Focoos, FocoosEnvHostUrl\n", - "import os\n", - "from pprint import pprint\n", - "from supervision import plot_image\n", - "\n", - "\n", - "focoos = Focoos(\n", - " api_key=os.getenv(\"FOCOOS_API_KEY\"),\n", - " host_url=FocoosEnvHostUrl.LOCAL,\n", - ")\n", - "image_path = \"./assets/ade_val_034.jpg\"\n", - "model_ref = \"focoos_object365\"\n", - "\n", - "\n", - "model = focoos.get_local_model(model_ref)\n", - "\n", - "latency = model.benchmark(iterations=10, size=640)\n", - "pprint(latency)\n", - "# pprint(latency)\n", - "output, preview = model.infer(image_path, threshold=0.3, annotate=True)\n", - "pprint(output.detections)\n", - "pprint(output.latency)\n", - "\n", - "plot_image(preview)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# User Models" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### List User Models" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pprint import pprint\n", - "import os\n", - "from focoos import Focoos, FocoosEnvHostUrl\n", - "from dotenv import load_dotenv\n", - "\n", - "\n", - "load_dotenv()\n", - "\n", - "focoos = Focoos(api_key=os.getenv(\"FOCOOS_API_KEY\"), host_url=FocoosEnvHostUrl.DEV)\n", - "\n", - "models = focoos.list_models()\n", - "pprint(models)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Create Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pprint import pprint\n", - "import os\n", - "from focoos import DeploymentMode, Focoos, FocoosEnvHostUrl\n", - "from dotenv import load_dotenv\n", - "\n", - "\n", - "load_dotenv()\n", - "\n", - "focoos = Focoos(api_key=os.getenv(\"FOCOOS_API_KEY\"), host_url=FocoosEnvHostUrl.LOCAL)\n", - "\n", - "model = focoos.new_model(\n", - " name=\"test-model\", focoos_model=\"focoos_object365\", description=\"Test model\"\n", - ")\n", - "### Get Model Info" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Deploy user model on shared cloud endpoint" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pprint import pprint\n", - "import os\n", - "from focoos import Focoos, FocoosEnvHostUrl\n", - "from dotenv import load_dotenv\n", - "\n", - "\n", - "load_dotenv()\n", - "\n", - "focoos = Focoos(api_key=os.getenv(\"FOCOOS_API_KEY\"), host_url=FocoosEnvHostUrl.DEV)\n", - "\n", - "model = focoos.get_remote_model(\"fa94df6806c84c11\")\n", - "model_info = model.get_info()\n", - "pprint(model_info.location)\n", - "model.deploy(wait=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Cloud Inference" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pprint import pprint\n", - "import os\n", - "from focoos import Focoos, FocoosEnvHostUrl\n", - "from dotenv import load_dotenv\n", - "import cv2\n", - "import numpy as np\n", - "import supervision as sv\n", - "\n", - "load_dotenv()\n", - "\n", - "focoos = Focoos(api_key=os.getenv(\"FOCOOS_API_KEY\"), host_url=FocoosEnvHostUrl.DEV)\n", - "\n", - "model = focoos.get_remote_model(\"fa94df6806c84c11\")\n", - "model_info = model.get_info()\n", - "image_path = \"./assets/aquarium.jpg\"\n", - "\n", - "output, preview = model.infer(image_path, threshold=0.5, annotate=True)\n", - "sv.plot_image(preview)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Model unload " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model.unload()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Local Inference\n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from focoos import Focoos, FocoosEnvHostUrl, DeploymentMode\n", - "import os\n", - "from pprint import pprint\n", - "from supervision import plot_image\n", - "\n", - "\n", - "focoos = Focoos(\n", - " api_key=os.getenv(\"FOCOOS_API_KEY\"),\n", - " host_url=FocoosEnvHostUrl.DEV,\n", - ")\n", - "image_path = \"./assets/aquarium.jpg\"\n", - "model_ref = \"focoos_object365\"\n", - "\n", - "model_info = focoos.get_model_info(model_ref)\n", - "\n", - "model = focoos.get_local_model(model_ref)\n", - "\n", - "latency = model.benchmark(iterations=10, size=640)\n", - "# pprint(latency)\n", - "output, preview = model.infer(image_path, threshold=0.3, annotate=True)\n", - "\n", - "plot_image(preview)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Train a Model" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### List Public Datasets\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pprint import pprint\n", - "import os\n", - "from focoos import Focoos, FocoosEnvHostUrl\n", - "from dotenv import load_dotenv\n", - "\n", - "\n", - "load_dotenv()\n", - "\n", - "focoos = Focoos(api_key=os.getenv(\"FOCOOS_API_KEY\"), host_url=FocoosEnvHostUrl.LOCAL)\n", - "\n", - "models = focoos.list_models()\n", - "pprint(models)\n", - "\n", - "model = focoos.new_model(\n", - " name=\"test-model-2\", focoos_model=\"focoos_object365\", description=\"Test model\"\n", - ")\n", - "\n", - "\n", - "model = focoos.get_remote_model(\"fa94df6806c84c11\")\n", - "\n", - "datasets = focoos.list_shared_datasets()\n", - "pprint(datasets)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from focoos.ports import Hyperparameters, TrainInstance\n", - "\n", - "\n", - "res = model.remote_train(\n", - " anyma_version=\"0.11.1\",\n", - " dataset_ref=\"11e80dd77806450f\",\n", - " instance_type=TrainInstance.ML_G4DN_XLARGE,\n", - " volume_size=50,\n", - " max_runtime_in_seconds=36000,\n", - " hyperparameters=Hyperparameters(\n", - " learning_rate=0.0001,\n", - " batch_size=16,\n", - " max_iters=1500,\n", - " eval_period=100,\n", - " resolution=640,\n", - " ), # type: ignore\n", - ")\n", - "pprint(res)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "completed_status = [\"Completed\", \"Failed\"]\n", - "import time\n", - "from focoos.utils.logger import get_logger\n", - "\n", - "logger = get_logger(__name__)\n", - "\n", - "status = model.train_status()\n", - "while status[\"main_status\"] not in completed_status:\n", - " status = model.train_status()\n", - " logger.info(f\"Training status: {status['main_status']}\")\n", - " pprint(f\"Training progress: {status['status_transitions']}\")\n", - " time.sleep(30)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "logs = model.train_logs()\n", - "pprint(logs)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.10" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/pyproject.toml b/pyproject.toml index f03d302..690b638 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,3 +44,5 @@ dev = [ inference = ["onnxruntime==1.18.0"] inference-gpu = ["onnxruntime-gpu==1.18.0"] + +demo = ["gradio~=5.6.0", "opencv-python~=4.9.0", "supervision~=0.24.0", "matplotlib~=3.8.2"] From 466535fba06f649e6616428e17b1d0d49f95719a Mon Sep 17 00:00:00 2001 From: fcdl94 Date: Thu, 5 Dec 2024 19:50:08 +0100 Subject: [PATCH 2/8] fix: remote model now save metadata --- demo/.monitor.ipynb | 126 ----------------------------------------- demo/demo.ipynb | 105 +++++++++++++++++++++------------- focoos/remote_model.py | 65 +-------------------- 3 files changed, 67 insertions(+), 229 deletions(-) delete mode 100644 demo/.monitor.ipynb diff --git a/demo/.monitor.ipynb b/demo/.monitor.ipynb deleted file mode 100644 index 331d85a..0000000 --- a/demo/.monitor.ipynb +++ /dev/null @@ -1,126 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from focoos import Focoos, FocoosEnvHostUrl\n", - "from pprint import pprint\n", - "import os\n", - "\n", - "focoos = Focoos(api_key=os.getenv(\"FOCOOS_API_KEY\"), host_url=FocoosEnvHostUrl.DEV)\n", - "\n", - "models = focoos.list_models()\n", - "\n", - "print(f\"Found {len(models)} models\")\n", - "print()\n", - "\n", - "deployed_models = []\n", - "print(\"** Deployed models **\")\n", - "for model in models:\n", - " if model.status == \"DEPLOYED\":\n", - " print(model.name, model.ref)\n", - " deployed_models.append(model.name)\n", - "print()\n", - "\n", - "print(\"** Currently training models **\")\n", - "for model in models:\n", - " if \"TRAINING_RUNNING\" in model.status:\n", - " print(model.name, model.ref)\n", - "print()\n", - "\n", - "print(\"** Ready to deploy models **\")\n", - "for model in models:\n", - " if \"TRAINING_COMPLETED\" in model.status:\n", - " print(model.name, model.ref)\n", - "print()\n", - "\n", - "print(\"** Others **\")\n", - "for model in models:\n", - " if model.status not in [\"DEPLOYED\", \"TRAINING_RUNNING\", \"TRAINING_COMPLETED\"]:\n", - " print(model.name, model.ref, model.status)\n", - "print()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# MONITOR TRAINING\n", - "model = focoos.get_model_by_name(\"cable\")\n", - "model.monitor_train()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# STOP TRAINING\n", - "model = focoos.get_remote_model(\"31b7cd71c1584951\")\n", - "model.stop_training()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# UNLOAD MODELS\n", - "model_names = deployed_models\n", - "\n", - "for name in model_names:\n", - " model = focoos.get_model_by_name(name)\n", - " model.unload()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# DELETE MODELS\n", - "model_names = [\"31b7cd71c1584951\"]\n", - "\n", - "for name in model_names:\n", - " model = focoos.get_remote_model(name)\n", - " model.delete_model()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "focoos", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.10" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/demo/demo.ipynb b/demo/demo.ipynb index 3eaafa5..b70120e 100644 --- a/demo/demo.ipynb +++ b/demo/demo.ipynb @@ -1,5 +1,15 @@ { "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "MODEL_NAME = \"peanuts\" # Model name\n", + "DATASET_NAME = \"peanuts\" # Dataset name" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -10,7 +20,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "import random\n", @@ -23,12 +37,22 @@ "from utils import display_instseg, display_detection\n", "from dataset import get_dataset\n", "\n", - "load_dotenv()\n", - "\n", - "dataset_cfg = get_dataset(\"cable\") # Get dataset configuration\n", + "load_dotenv()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, + "outputs": [], + "source": [ + "dataset_cfg = get_dataset(DATASET_NAME) # Get dataset configuration\n", "\n", "DATASET_NAME = dataset_cfg[\"name\"] # Dataset name\n", - "MODEL_NAME = dataset_cfg[\"name\"] # Model name\n", "VIS_FUNC = (\n", " display_detection if dataset_cfg[\"task\"] == \"detection\" else display_instseg\n", ") # Visualization function\n", @@ -59,9 +83,9 @@ "outputs": [], "source": [ "if dataset_cfg[\"task\"] == \"instance_segmentation\":\n", - " display_instseg(PATH, num_images=1, annotate=True)\n", + " display_instseg(PATH, num_images=5, annotate=True)\n", "else:\n", - " display_detection(PATH, num_images=1, annotate=True)" + " display_detection(PATH, num_images=5, annotate=True)" ] }, { @@ -80,37 +104,6 @@ "focoos = Focoos(api_key=API_KEY, host_url=HOST)" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Train a model for the dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model = focoos.new_model(\n", - " name=MODEL_NAME + \"_live\",\n", - " focoos_model=FOCOOS_MODEL,\n", - " description=f\"A model for the {DATASET_NAME} dataset\",\n", - ")\n", - "\n", - "dataset = focoos.get_dataset_by_name(DATASET_NAME)\n", - "\n", - "res = model.train(\n", - " dataset_ref=dataset.ref, # Dataset reference\n", - " hyperparameters=Hyperparameters( # Hyperparameters for the training\n", - " learning_rate=0.0005, batch_size=16, max_iters=1000\n", - " ),\n", - ")\n", - "\n", - "model.monitor_train()" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -132,7 +125,7 @@ "outputs": [], "source": [ "model = focoos.get_model_by_name(MODEL_NAME)\n", - "model.monitor_train()\n", + "\n", "output, preview = model.infer(\n", " PATHS[random.randint(0, len(PATHS) - 1)], threshold=0.4, annotate=True\n", ")\n", @@ -168,7 +161,39 @@ "\n", "pprint(output.latency)\n", "\n", - "plot_image(preview)" + "pprint(output)\n", + "# plot_image(preview)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Train a model for the dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model = focoos.new_model(\n", + " name=MODEL_NAME + \"_live\",\n", + " focoos_model=FOCOOS_MODEL,\n", + " description=f\"A model for the {DATASET_NAME} dataset\",\n", + ")\n", + "\n", + "dataset = focoos.get_dataset_by_name(DATASET_NAME)\n", + "\n", + "res = model.train(\n", + " dataset_ref=dataset.ref, # Dataset reference\n", + " hyperparameters=Hyperparameters( # Hyperparameters for the training\n", + " learning_rate=0.0005, batch_size=16, max_iters=1000\n", + " ),\n", + ")\n", + "\n", + "model.monitor_train()" ] }, { diff --git a/focoos/remote_model.py b/focoos/remote_model.py index 06bc312..c5f47b5 100644 --- a/focoos/remote_model.py +++ b/focoos/remote_model.py @@ -54,8 +54,9 @@ def __init__(self, model_ref: str, http_client: HttpClient): def get_info(self) -> ModelMetadata: res = self.http_client.get(f"models/{self.model_ref}") + self.metadata = ModelMetadata(**res.json()) if res.status_code == 200: - return ModelMetadata(**res.json()) + return self.metadata else: logger.error(f"Failed to get model info: {res.status_code} {res.text}") raise ValueError(f"Failed to get model info: {res.status_code} {res.text}") @@ -96,58 +97,6 @@ def train_status(self): f"Failed to get train status: {res.status_code} {res.text}" ) - def deploy(self, wait: bool = True): - self.metadata = self.get_info() - if self.metadata.status not in [ - ModelStatus.DEPLOYED, - ModelStatus.TRAINING_COMPLETED, - ]: - raise ValueError( - f"Model {self.model_ref} is not in a valid state to be deployed. Current status: {self.metadata.status}, expected: {ModelStatus.TRAINING_COMPLETED}" - ) - if self.metadata.status == ModelStatus.DEPLOYED: - deployment_info = self._deployment_info() - logger.debug( - f"Model {self.model_ref} is already deployed, deployment info: {deployment_info}" - ) - return deployment_info - - logger.info( - f"🚀 Deploying model {self.model_ref} to inference endpoint... this might take a while." - ) - res = self.http_client.post(f"models/{self.model_ref}/deploy") - if res.status_code in [200, 201, 409]: - if res.status_code == 409: - logger.info(f"Status code 409, model is already deployed") - - if wait: - for i in range(self.max_deploy_wait): - logger.info( - f"⏱️ Waiting for model {self.model_ref} to be ready... {i+1} of {self.max_deploy_wait}" - ) - if self._deployment_info()["status"] == "READY": - logger.info(f"✅ Model {self.model_ref} deployed successfully") - return - time.sleep(1 + i) - logger.error( - f"Model {self.model_ref} deployment timed out after {self.max_deploy_wait} attempts." - ) - raise ValueError( - f"Model {self.model_ref} deployment timed out after {self.max_deploy_wait} attempts." - ) - return res.json() - else: - logger.error(f"Failed to deploy model: {res.status_code} {res.text}") - raise ValueError(f"Failed to deploy model: {res.status_code} {res.text}") - - def unload(self): - res = self.http_client.delete(f"models/{self.model_ref}/deploy") - if res.status_code in [200, 204, 409]: - return res.json() - else: - logger.error(f"Failed to unload model: {res.status_code} {res.text}") - raise ValueError(f"Failed to unload model: {res.status_code} {res.text}") - def train_logs(self) -> list[str]: res = self.http_client.get(f"models/{self.model_ref}/train/logs") if res.status_code == 200: @@ -156,16 +105,6 @@ def train_logs(self) -> list[str]: logger.warning(f"Failed to get train logs: {res.status_code} {res.text}") return [] - def _deployment_info(self): - res = self.http_client.get(f"models/{self.model_ref}/deploy") - if res.status_code == 200: - return res.json() - else: - logger.error(f"Failed to get deployment info: {res.status_code} {res.text}") - raise ValueError( - f"Failed to get deployment info: {res.status_code} {res.text}" - ) - def _annotate(self, im: np.ndarray, detections: Detections) -> np.ndarray: classes = self.metadata.classes if classes is not None: From 5ba9601c5713b1a561d008dc007c847914a5de2d Mon Sep 17 00:00:00 2001 From: fcdl94 Date: Thu, 5 Dec 2024 19:59:05 +0100 Subject: [PATCH 3/8] fix: remove makefile demo --- Makefile | 3 --- 1 file changed, 3 deletions(-) diff --git a/Makefile b/Makefile index 4bd5654..48c529e 100644 --- a/Makefile +++ b/Makefile @@ -5,9 +5,6 @@ install: install-dev: @pip install -e ".[dev]" --no-cache-dir -install-demo: - @pip install -e ".[demo]" --no-cache-dir - install-pre-commit: @pre-commit install lint: From 711c75c1a0b00fc1515645af4f78cfd54f5da969 Mon Sep 17 00:00:00 2001 From: Giuseppe Ambrosio Date: Mon, 9 Dec 2024 15:21:49 +0100 Subject: [PATCH 4/8] refactor(focoos): get_dataset_by_name --- focoos/focoos.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/focoos/focoos.py b/focoos/focoos.py index b69a386..cf1d1d4 100644 --- a/focoos/focoos.py +++ b/focoos/focoos.py @@ -157,14 +157,10 @@ def _download_model(self, model_ref: str) -> str: raise ValueError(f"Failed to download model: {res.status_code} {res.text}") def get_dataset_by_name(self, name: str) -> Optional[DatasetMetadata]: - found = False datasets = self.list_shared_datasets() for dataset in datasets: if name.lower() == dataset.name.lower(): - found = True - break - - return dataset if found else None + return dataset def get_model_by_name( self, name: str, remote=True From 1a97d02443d2e1f00db496dbe180eac493955080 Mon Sep 17 00:00:00 2001 From: Giuseppe Ambrosio Date: Mon, 9 Dec 2024 15:27:56 +0100 Subject: [PATCH 5/8] refactor(focoos): get_model_by_name found logic --- focoos/focoos.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/focoos/focoos.py b/focoos/focoos.py index cf1d1d4..7b8d68d 100644 --- a/focoos/focoos.py +++ b/focoos/focoos.py @@ -165,16 +165,10 @@ def get_dataset_by_name(self, name: str) -> Optional[DatasetMetadata]: def get_model_by_name( self, name: str, remote=True ) -> Optional[Union[RemoteModel, LocalModel]]: - found = False models = self.list_models() for model in models: if name.lower() == model.name.lower(): - found = True - break - if found: - if remote: - return self.get_remote_model(model.ref) - else: - return self.get_local_model(model.ref) - else: - return None + if remote: + return self.get_remote_model(model.ref) + else: + return self.get_local_model(model.ref) From b70d4a233bcfb4cebb6d9e77efde2b88d37ae676 Mon Sep 17 00:00:00 2001 From: Giuseppe Ambrosio Date: Mon, 9 Dec 2024 15:28:48 +0100 Subject: [PATCH 6/8] refactor(remote_model): unused import --- focoos/remote_model.py | 1 - 1 file changed, 1 deletion(-) diff --git a/focoos/remote_model.py b/focoos/remote_model.py index f0557d2..a5b08f4 100644 --- a/focoos/remote_model.py +++ b/focoos/remote_model.py @@ -13,7 +13,6 @@ FocoosTask, Hyperparameters, ModelMetadata, - ModelStatus, TrainInstance, ) from focoos.utils.logger import get_logger From 4b57251d5e7a421b440e9d4faa2ea394759bd4ce Mon Sep 17 00:00:00 2001 From: fcdl94 Date: Tue, 10 Dec 2024 09:56:41 +0100 Subject: [PATCH 7/8] Fix misalignement between Local and Remote model infer input types. --- focoos/__init__.py | 1 + focoos/remote_model.py | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/focoos/__init__.py b/focoos/__init__.py index 69049cd..b8ab9d6 100644 --- a/focoos/__init__.py +++ b/focoos/__init__.py @@ -1,3 +1,4 @@ from .focoos import Focoos +from .local_model import LocalModel from .ports import * from .remote_model import RemoteModel diff --git a/focoos/remote_model.py b/focoos/remote_model.py index a5b08f4..7ccb337 100644 --- a/focoos/remote_model.py +++ b/focoos/remote_model.py @@ -4,6 +4,7 @@ from time import sleep from typing import Optional, Tuple, Union +import cv2 import numpy as np from supervision import BoxAnnotator, Detections, LabelAnnotator, MaskAnnotator @@ -120,16 +121,19 @@ def _annotate(self, im: np.ndarray, detections: Detections) -> np.ndarray: def infer( self, - image: Union[str, Path, bytes], + image: Union[str, Path, np.ndarray, bytes], threshold: float = 0.5, annotate: bool = False, ) -> Tuple[FocoosDetections, Optional[np.ndarray]]: image_bytes = None - if not isinstance(image, bytes): + if isinstance(image, str) or isinstance(image, Path): if not os.path.exists(image): logger.error(f"Image file not found: {image}") raise FileNotFoundError(f"Image file not found: {image}") image_bytes = open(image, "rb").read() + elif isinstance(image, np.ndarray): + _, buffer = cv2.imencode(".jpg", image) + image_bytes = buffer.tobytes() else: image_bytes = image files = {"file": image_bytes} From be4c73a42717a2f0382d60f6adbf81015b95e9ed Mon Sep 17 00:00:00 2001 From: Giuseppe Ambrosio Date: Tue, 10 Dec 2024 10:04:52 +0100 Subject: [PATCH 8/8] feat(remote-model): metadata update --- focoos/remote_model.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/focoos/remote_model.py b/focoos/remote_model.py index 7ccb337..ad5c9a7 100644 --- a/focoos/remote_model.py +++ b/focoos/remote_model.py @@ -39,12 +39,11 @@ def __init__(self, model_ref: str, http_client: HttpClient): def get_info(self) -> ModelMetadata: res = self.http_client.get(f"models/{self.model_ref}") - self.metadata = ModelMetadata(**res.json()) - if res.status_code == 200: - return self.metadata - else: + if res.status_code != 200: logger.error(f"Failed to get model info: {res.status_code} {res.text}") raise ValueError(f"Failed to get model info: {res.status_code} {res.text}") + self.metadata = ModelMetadata(**res.json()) + return self.metadata def train( self,