{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "5d0e0b69",
"metadata": {},
"outputs": [],
"source": [
"# Copyright (c) Meta Platforms, Inc. and affiliates."
]
},
{
"cell_type": "markdown",
"id": "11912666",
"metadata": {},
"source": [
"# \n",
"# \n",
"# "
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "8517f5f6",
"metadata": {},
"outputs": [],
"source": [
"using_colab = False"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "2540e376",
"metadata": {},
"outputs": [],
"source": [
"if using_colab:\n",
" import torch\n",
" import torchvision\n",
" print(\"PyTorch version:\", torch.__version__)\n",
" print(\"Torchvision version:\", torchvision.__version__)\n",
" print(\"CUDA is available:\", torch.cuda.is_available())\n",
" import sys\n",
" !{sys.executable} -m pip install opencv-python matplotlib scikit-learn\n",
" !{sys.executable} -m pip install 'git+https://github.com/facebookresearch/sam3.git'"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "90073483-58f6-404e-90ac-c22efcd76216",
"metadata": {},
"outputs": [],
"source": [
"%matplotlib widget"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "13325376-658b-48d6-8528-2a006f223d44",
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"# turn on tfloat32 for Ampere GPUs\n",
"# https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices\n",
"torch.backends.cuda.matmul.allow_tf32 = True\n",
"torch.backends.cudnn.allow_tf32 = True\n",
"\n",
"# use bfloat16 for the entire notebook. If your card doesn't support it, try float16 instead\n",
"torch.autocast(\"cuda\", dtype=torch.bfloat16).__enter__()\n",
"\n",
"# inference mode for the whole notebook. Disable if you need gradients\n",
"torch.inference_mode().__enter__()"
]
},
{
"cell_type": "markdown",
"id": "fb863772-56a9-4ee2-be52-5d8933066519",
"metadata": {},
"source": [
"# Load the model"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "f84b4ccc-9db2-4d88-ac8f-4c272694d25a",
"metadata": {},
"outputs": [],
"source": [
"import sam3\n",
"from sam3 import build_sam3_image_model\n",
"import os\n",
"sam3_root = os.path.join(os.path.dirname(sam3.__file__), \"..\")\n",
"bpe_path = f\"{sam3_root}/assets/bpe_simple_vocab_16e6.txt.gz\""
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "de01a36e-1221-4497-a5ab-e6c796689480",
"metadata": {},
"outputs": [],
"source": [
"model = build_sam3_image_model(bpe_path=bpe_path)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "b01ec8a9-d9f6-4baf-96ac-1e5d21fd90b8",
"metadata": {},
"outputs": [],
"source": [
"from sam3.model.sam3_image_processor import Sam3Processor\n",
"processor = Sam3Processor(model)"
]
},
{
"cell_type": "markdown",
"id": "e6172a69-35ca-487c-bd67-6f1f1ecb20d5",
"metadata": {},
"source": [
"# Jupyter widget"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "2a4ac22f-5d5c-4272-a5a1-dfe0c04253a7",
"metadata": {},
"outputs": [],
"source": [
"import io\n",
"\n",
"import ipywidgets as widgets\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import PIL.Image\n",
"import requests\n",
"from IPython.display import clear_output, display, HTML\n",
"from matplotlib.patches import Rectangle\n",
"\n",
"\n",
"class Sam3SegmentationWidget:\n",
" \"\"\"Interactive Jupyter widget for SAM3 segmentation with text and box prompts.\"\"\"\n",
"\n",
" def __init__(self, processor):\n",
" \"\"\"\n",
" Initialize the segmentation widget.\n",
"\n",
" Args:\n",
" processor: Sam3Processor instance\n",
" \"\"\"\n",
" self.processor = processor\n",
" self.state = None\n",
" self.current_image = None\n",
" self.current_image_array = None\n",
" self.box_mode = \"positive\"\n",
" self.drawing_box = False\n",
" self.box_start = None\n",
" self.current_rect = None\n",
"\n",
" self._setup_ui()\n",
" self._setup_plot()\n",
"\n",
" def _setup_ui(self):\n",
" \"\"\"Set up the UI components.\"\"\"\n",
" self.upload_widget = widgets.FileUpload(\n",
" accept=\"image/*\", multiple=False, description=\"Upload Image\"\n",
" )\n",
" self.upload_widget.observe(self._on_image_upload, names=\"value\")\n",
"\n",
" self.url_input = widgets.Text(\n",
" placeholder=\"Or enter image URL\",\n",
" )\n",
" self.url_button = widgets.Button(description=\"Load URL\", button_style=\"info\")\n",
" self.url_button.on_click(self._on_load_url)\n",
" url_box = widgets.HBox(\n",
" [self.url_input, self.url_button],\n",
" layout=widgets.Layout(width=\"100%\", justify_content=\"space-between\"),\n",
" )\n",
"\n",
" self.text_input = widgets.Text(\n",
" placeholder='Enter segmentation prompt (e.g., \"person\", \"dog\")',\n",
" continuous_update=False,\n",
" )\n",
" self.text_input.observe(self._on_text_submit, names=\"value\")\n",
" self.text_button = widgets.Button(description=\"Segment\", button_style=\"success\")\n",
" self.text_button.on_click(self._on_text_prompt)\n",
" text_box = widgets.HBox(\n",
" [self.text_input, self.text_button],\n",
" layout=widgets.Layout(width=\"100%\", justify_content=\"space-between\"),\n",
" )\n",
"\n",
" self.box_mode_buttons = widgets.ToggleButtons(\n",
" options=[\"Positive Boxes\", \"Negative Boxes\"],\n",
" description=\"Box Mode:\",\n",
" button_style=\"\",\n",
" tooltips=[\n",
" \"Draw boxes around objects to include\",\n",
" \"Draw boxes around objects to exclude\",\n",
" ],\n",
" )\n",
" self.box_mode_buttons.observe(self._on_box_mode_change, names=\"value\")\n",
"\n",
" self.clear_button = widgets.Button(\n",
" description=\"Clear All Prompts\", button_style=\"warning\"\n",
" )\n",
" self.clear_button.on_click(self._on_clear_prompts)\n",
"\n",
" self.confidence_slider = widgets.FloatSlider(\n",
" value=0.5,\n",
" min=0.0,\n",
" max=1.0,\n",
" step=0.01,\n",
" description=\"Confidence:\",\n",
" continuous_update=False,\n",
" style={\"description_width\": \"initial\"},\n",
" )\n",
" self.confidence_slider.observe(self._on_confidence_change, names=\"value\")\n",
"\n",
" self.size_slider = widgets.IntSlider(\n",
" value=960,\n",
" min=300,\n",
" max=2000,\n",
" step=10,\n",
" description=\"Image Size:\",\n",
" continuous_update=False,\n",
" style={\"description_width\": \"initial\"},\n",
" )\n",
" self.size_slider.observe(self._on_size_change, names=\"value\")\n",
"\n",
" slider_box = widgets.HBox(\n",
" [self.confidence_slider, self.size_slider],\n",
" layout=widgets.Layout(justify_content=\"space-between\"),\n",
" )\n",
"\n",
" self.output = widgets.Output()\n",
" self.status_label = widgets.Label(value=\"Upload an image to begin\")\n",
"\n",
" # This box will hold our matplotlib output and we can target it with CSS.\n",
" self.plot_container = widgets.Box([self.output])\n",
" self.plot_container.add_class(\"no-drag\")\n",
"\n",
" # CSS to make the cursor a crosshair over the matplotlib canvas\n",
" css_style = widgets.HTML(\n",
" \"\"\"\n",
" \n",
" \"\"\"\n",
" )\n",
" # Create VBoxes for each accordion pane\n",
" source_pane = widgets.VBox([self.upload_widget, url_box])\n",
" prompt_pane = widgets.VBox(\n",
" [\n",
" widgets.Label(\"Text Prompt:\"),\n",
" text_box,\n",
" self.box_mode_buttons,\n",
" self.confidence_slider,\n",
" self.clear_button,\n",
" ]\n",
" )\n",
" display_pane = widgets.VBox([self.size_slider])\n",
"\n",
" # Create the Accordion to hold the control panes\n",
" self.accordion = widgets.Accordion(\n",
" children=[source_pane, prompt_pane, display_pane]\n",
" )\n",
" self.accordion.set_title(0, \"Image Source\")\n",
" self.accordion.set_title(1, \"Segmentation Prompts\")\n",
" self.accordion.set_title(2, \"Display Settings\")\n",
" self.accordion.selected_index = 0 # Start with the first pane open\n",
"\n",
" # Create the left sidebar for controls\n",
" sidebar = widgets.VBox(\n",
" [self.status_label, widgets.HTML(\"