{ "cells": [ { "metadata": {}, "cell_type": "markdown", "source": [ "This file provides an example of how to use some of the underlying python methods used in this app.\n", "\n", "It requires a python environment with pytorch and detectron2. This can be set up on google colab using the following cell." ], "id": "54812cfe385be4e3" }, { "metadata": {}, "cell_type": "code", "source": [ "## To run on google colab, run this cell to install requirements. You will then, however, need to copy some of our custom methods over (e.g. load_model etc..)\n", "## In future we may package these properly, but they are just utility functions\n", "try:\n", " import google.colab\n", " IN_COLAB = True\n", "except:\n", " IN_COLAB = False\n", "if IN_COLAB:\n", "\n", " import sys, os, distutils.core\n", " # Note: This is a faster way to install detectron2 in Colab, but it does not include all functionalities (e.g. compiled operators).\n", " # See https://detectron2.readthedocs.io/tutorials/install.html for full installation instructions\n", " # Issues raised:\n", " # - pyaml install https://github.com/facebookresearch/detectron2/issues/5122 (think this is fixed)\n", " !git clone 'https://github.com/facebookresearch/detectron2'\n", " dist = distutils.core.run_setup(\"./detectron2/setup.py\")\n", " !python -m pip install {' '.join([f\"'{x}'\" for x in dist.install_requires])}\n", " sys.path.insert(0, os.path.abspath('./detectron2'))\n", "\n", " # Properly install detectron2. (Please do not install twice in both ways)\n", " # !python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'\n", " import torch, detectron2\n", " !nvcc --version\n", " TORCH_VERSION = \".\".join(torch.__version__.split(\".\")[:2])\n", " CUDA_VERSION = torch.__version__.split(\"+\")[-1]\n", " print(\"torch: \", TORCH_VERSION, \"; cuda: \", CUDA_VERSION)\n", " print(\"detectron2:\", detectron2.__version__)\n", " print(f'GPU available: {torch.cuda.is_available()}')" ], "id": "ae06666362c0995d", "outputs": [], "execution_count": null }, { "cell_type": "code", "id": "initial_id", "metadata": { "collapsed": true }, "source": [ "## Import the model utilities that we will need, and load the model.\n", "from python_utils import load_model, apply_nms, OPTIMAL_NMS_THRESHOLD\n", "\n", "predictor = load_model()" ], "outputs": [], "execution_count": null }, { "metadata": {}, "cell_type": "code", "source": [ "## Import and display an image.\n", "import cv2\n", "\n", "if IN_COLAB:\n", " from google.colab import files\n", " uploaded = files.upload()\n", " import cv2\n", " from google.colab.patches import cv2_imshow\n", "\n", " im = cv2.imread(uploaded[0]) # just look at the first image\n", " cv2_imshow(im)\n", "else:\n", "\n", " import os\n", " from IPython.display import Image\n", "\n", " img_file = os.path.join('assets', 'rbg_kew.jpg')\n", " display(Image(filename=img_file))\n", " im = cv2.imread(img_file)" ], "id": "12ab5c1116d6ff38", "outputs": [], "execution_count": null }, { "metadata": {}, "cell_type": "code", "source": [ "## Run the model on the image, then apply NMS to filter out overlapping masks.\n", "raw_output = predictor(im)\n", "prediction = apply_nms(raw_output, mask=True, cls_agnostic_nms=OPTIMAL_NMS_THRESHOLD)" ], "id": "d9305d58e42d6504", "outputs": [], "execution_count": null }, { "metadata": {}, "cell_type": "code", "source": [ "# Get the seed counts (0 = viable, 1 = non-viable, 2 = empty)\n", "classes = prediction[\"instances\"].pred_classes.tolist()\n", "counts = {\"viable\": classes.count(0),\n", " \"non-viable\": classes.count(1),\n", " \"empty\": classes.count(2),\n", " \"total\": len(classes)}\n", "print(counts)" ], "id": "676bd6fab69c10f2", "outputs": [], "execution_count": null }, { "metadata": {}, "cell_type": "code", "source": [ "# Visualise the segmentation masks\n", "\n", "from matplotlib import pyplot as plt\n", "from app import get_overlayed_image_from_single_result\n", "\n", "prediction['image'] = im\n", "visualiser = get_overlayed_image_from_single_result(prediction)\n", "fig, ax = plt.subplots(figsize=(8, 6.4))\n", "ax.imshow(cv2.cvtColor(visualiser.get_image()[:, :, ::-1], cv2.COLOR_BGR2RGB))\n", "ax.get_xaxis().set_visible(False)\n", "ax.get_yaxis().set_visible(False)\n", "ax.set_title(\"Annotated\")\n", "plt.tight_layout()" ], "id": "18934b98c3461a28", "outputs": [], "execution_count": null } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.6" } }, "nbformat": 4, "nbformat_minor": 5 }