diff --git a/notebooks/train-yolo11-instance-segmentation-on-custom-dataset.ipynb b/notebooks/train-yolo11-instance-segmentation-on-custom-dataset.ipynb index afb9a18..93f7dce 100644 --- a/notebooks/train-yolo11-instance-segmentation-on-custom-dataset.ipynb +++ b/notebooks/train-yolo11-instance-segmentation-on-custom-dataset.ipynb @@ -917,6 +917,84 @@ } ] }, + { + "cell_type": "markdown", + "source": [ + "## Deploy model on Roboflow\n", + "\n", + "Once you have finished training your YOLOv11 model, you’ll have a set of trained weights ready for use. These weights will be in the `/runs/segment/train/weights/best.pt` folder of your project. You can upload your model weights to Roboflow Deploy to use your trained weights on our infinitely scalable infrastructure.\n", + "\n", + "The `.deploy()` function in the [Roboflow pip package](https://docs.roboflow.com/python) now supports uploading YOLOv11 weights." + ], + "metadata": { + "id": "4Z2R1eJiz4ux" + } + }, + { + "cell_type": "code", + "source": [ + "project.version(dataset.version).deploy(model_type=\"yolov11-seg\", model_path=f\"{HOME}/runs/segment/train/\")" + ], + "metadata": { + "id": "IjZQqXwU0UEo" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "!pip install inference" + ], + "metadata": { + "id": "pz765jno0Xl3" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "import os, random, cv2\n", + "import supervision as sv\n", + "import IPython\n", + "import inference\n", + "\n", + "model_id = project.id.split(\"/\")[1] + \"/\" + dataset.version\n", + "model = inference.get_model(model_id, userdata.get('ROBOFLOW_API_KEY'))\n", + "\n", + "# Location of test set images\n", + "test_set_loc = dataset.location + \"/test/images/\"\n", + "test_images = os.listdir(test_set_loc)\n", + "\n", + "# Run inference on 4 random test images, or fewer if fewer images are available\n", + "for img_name in random.sample(test_images, min(4, len(test_images))):\n", + " print(\"Running inference on \" + img_name)\n", + "\n", + " # Load image\n", + " image = cv2.imread(os.path.join(test_set_loc, img_name))\n", + "\n", + " # Perform inference\n", + " results = model.infer(image)[0]\n", + " detections = sv.Detections.from_inference(results)\n", + "\n", + " # Annotate boxes and labels\n", + " mask_annotator = sv.MaskAnnotator()\n", + " label_annotator = sv.LabelAnnotator()\n", + " annotated_image = mask_annotator.annotate(scene=image, detections=detections)\n", + " annotated_image = label_annotator.annotate(scene=annotated_image, detections=detections)\n", + "\n", + " # Display annotated image\n", + " _, ret = cv2.imencode('.jpg', annotated_image)\n", + " i = IPython.display.Image(data=ret)\n", + " IPython.display.display(i)\n" + ], + "metadata": { + "id": "seFGjEE20X05" + }, + "execution_count": null, + "outputs": [] + }, { "cell_type": "markdown", "source": [ diff --git a/notebooks/train-yolo11-object-detection-on-custom-dataset.ipynb b/notebooks/train-yolo11-object-detection-on-custom-dataset.ipynb index b46a4e9..1f9b8ce 100644 --- a/notebooks/train-yolo11-object-detection-on-custom-dataset.ipynb +++ b/notebooks/train-yolo11-object-detection-on-custom-dataset.ipynb @@ -888,21 +888,23 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "id": "fUFNm4O0znB2" + }, "source": [ "## Deploy model on Roboflow\n", "\n", "Once you have finished training your YOLOv11 model, you’ll have a set of trained weights ready for use. These weights will be in the `/runs/detect/train/weights/best.pt` folder of your project. You can upload your model weights to Roboflow Deploy to use your trained weights on our infinitely scalable infrastructure.\n", "\n", - "The `.deploy()` function in the [Roboflow pip package](https://docs.roboflow.com/python) now supports uploading YOLOv11 weights.\n", - "\n", - "To upload model weights, add the following code to the “Inference with Custom Model” section in the aforementioned notebook:" + "The `.deploy()` function in the [Roboflow pip package](https://docs.roboflow.com/python) now supports uploading YOLOv11 weights." ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "id": "3Y6_K1MRznB2" + }, "outputs": [], "source": [ "project.version(dataset.version).deploy(model_type=\"yolov11\", model_path=f\"{HOME}/runs/detect/train/\")" @@ -911,7 +913,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "id": "V5K2IU_SznB2" + }, "outputs": [], "source": [ "!pip install inference" @@ -920,7 +924,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "id": "BwYVU9S5znB2" + }, "outputs": [], "source": [ "import os, random, cv2\n", @@ -938,20 +944,20 @@ "# Run inference on 4 random test images, or fewer if fewer images are available\n", "for img_name in random.sample(test_images, min(4, len(test_images))):\n", " print(\"Running inference on \" + img_name)\n", - " \n", + "\n", " # Load image\n", " image = cv2.imread(os.path.join(test_set_loc, img_name))\n", - " \n", + "\n", " # Perform inference\n", " results = model.infer(image, confidence=0.4, overlap=30)[0]\n", " detections = sv.Detections.from_inference(results)\n", - " \n", + "\n", " # Annotate boxes and labels\n", " box_annotator = sv.BoxAnnotator()\n", " label_annotator = sv.LabelAnnotator()\n", " annotated_image = box_annotator.annotate(scene=image, detections=detections)\n", " annotated_image = label_annotator.annotate(scene=annotated_image, detections=detections)\n", - " \n", + "\n", " # Display annotated image\n", " _, ret = cv2.imencode('.jpg', annotated_image)\n", " i = IPython.display.Image(data=ret)\n", @@ -1011,4 +1017,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file