KurtLin's picture
Initial Commit
c46f4bf
raw
history blame
1.97 kB
import numpy as np
import matplotlib.pyplot as plt
import cv2
import torch
from segment_anything import sam_model_registry, SamPredictor
from preprocess import show_mask, show_points, show_box
import gradio as gr
sam_checkpoint = "weights/sam_vit_b_01ec64.pth"
model_type = "vit_b"
device = "cuda" if torch.cuda.is_available() else "cpu"
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
sam.to(device=device)
predictor = SamPredictor(sam)
def get_coords(evt: gr.SelectData):
return f"{evt.index[0]}, {evt.index[1]}"
def inference(image, input_label):
predictor.set_image(image)
input_point = np.array([[int(input_label.split(',')[0]), int(input_label.split(',')[1])]])
input_label = np.array([1])
masks, scores, logits = predictor.predict(
point_coords=input_point,
point_labels=input_label,
multimask_output=True,
)
mask = masks[0]
image2 = image.copy()
image2[mask, 0] = 255
return image2
my_app = gr.Blocks()
with my_app:
gr.Markdown("Segment Anything Testing")
with gr.Tabs():
with gr.TabItem("Select your image"):
with gr.Row():
with gr.Column():
img_source = gr.Image(label="Please select picture.", value='./images/truck.jpg', shape=(768, 768))
coords = gr.Label(label="Image Coordinate")
infer = gr.Button(label="Segment")
with gr.Column():
img_output = gr.Image(label="Output Mask")
img_source.select(get_coords, [], coords)
infer.click(
inference,
[
img_source,
coords
],
[
img_output
]
)
my_app.launch(debug=True)