Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- README.md +8 -11
- app.py +42 -0
- requirements.txt +7 -0
README.md
CHANGED
|
@@ -1,12 +1,9 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
pinned: false
|
| 10 |
-
---
|
| 11 |
|
| 12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
+
# SAM Gradio App for Hugging Face Space
|
| 2 |
+
|
| 3 |
+
A simple Gradio demo for Meta's Segment Anything Model (SAM), deployable on Hugging Face Spaces.
|
| 4 |
+
|
| 5 |
+
## Instructions
|
| 6 |
+
1. Upload the `sam_vit_b.pth` model weights into the root of this repo on Hugging Face.
|
| 7 |
+
2. Hugging Face will automatically install the dependencies and launch the app.
|
| 8 |
+
3. Interact with the model via Gradio UI.
|
|
|
|
|
|
|
| 9 |
|
|
|
app.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from segment_anything import sam_model_registry, SamPredictor
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
# Load model
|
| 8 |
+
sam_checkpoint = "sam_vit_b.pth" # Upload this manually to the Hugging Face Space
|
| 9 |
+
model_type = "vit_b"
|
| 10 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 11 |
+
|
| 12 |
+
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint).to(device)
|
| 13 |
+
predictor = SamPredictor(sam)
|
| 14 |
+
|
| 15 |
+
def segment_with_sam(image):
|
| 16 |
+
image_np = np.array(image.convert("RGB"))
|
| 17 |
+
predictor.set_image(image_np)
|
| 18 |
+
|
| 19 |
+
# Dummy point for prompt (center)
|
| 20 |
+
h, w, _ = image_np.shape
|
| 21 |
+
point = np.array([[w // 2, h // 2]])
|
| 22 |
+
label = np.array([1])
|
| 23 |
+
|
| 24 |
+
masks, scores, _ = predictor.predict(
|
| 25 |
+
point_coords=point,
|
| 26 |
+
point_labels=label,
|
| 27 |
+
multimask_output=False
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
mask = masks[0]
|
| 31 |
+
mask_img = (mask[..., None] * np.array([255, 0, 0])).astype(np.uint8)
|
| 32 |
+
overlay = Image.fromarray((0.5 * image_np + 0.5 * mask_img).astype(np.uint8))
|
| 33 |
+
|
| 34 |
+
return overlay
|
| 35 |
+
|
| 36 |
+
iface = gr.Interface(fn=segment_with_sam,
|
| 37 |
+
inputs=gr.Image(type="pil"),
|
| 38 |
+
outputs=gr.Image(type="pil"),
|
| 39 |
+
title="Segment Anything with SAM",
|
| 40 |
+
description="Simple SAM demo using Gradio.")
|
| 41 |
+
|
| 42 |
+
iface.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
opencv-python
|
| 3 |
+
numpy
|
| 4 |
+
torch
|
| 5 |
+
torchvision
|
| 6 |
+
pillow
|
| 7 |
+
git+https://github.com/facebookresearch/segment-anything.git
|