Spaces:
Runtime error
Runtime error
initial commit
Browse files
app.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
from PIL import Image
|
| 5 |
+
from transformers import SamModel, SamProcessor
|
| 6 |
+
|
| 7 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 8 |
+
processor = SamProcessor.from_pretrained('facebook/sam-vit-base')
|
| 9 |
+
model = SamModel.from_pretrained('hmdliu/sidewalks-seg-base')
|
| 10 |
+
model.to(device)
|
| 11 |
+
|
| 12 |
+
def segment_sidewalk(image, threshold):
|
| 13 |
+
# init data
|
| 14 |
+
width, height = image.size
|
| 15 |
+
prompt = [0, 0, width, height]
|
| 16 |
+
inputs = processor(image, input_boxes=[[prompt]], return_tensors='pt')
|
| 17 |
+
# make prediction
|
| 18 |
+
outputs = model(pixel_values=inputs['pixel_values'].to(device),
|
| 19 |
+
input_boxes=inputs['input_boxes'].to(device),
|
| 20 |
+
multimask_output=False)
|
| 21 |
+
prob_map = torch.sigmoid(outputs.pred_masks.squeeze()).cpu().detach()
|
| 22 |
+
prediction = (prob_map > threshold).float()
|
| 23 |
+
prob_map, prediction = prob_map.numpy(), prediction.numpy()
|
| 24 |
+
# visualize results
|
| 25 |
+
save_image(image, 'image.png')
|
| 26 |
+
save_image(prob_map, 'prob.png', cmap='jet')
|
| 27 |
+
save_image(prediction, 'mask.png', cmap='gray')
|
| 28 |
+
return Image.open('image.png'), Image.open('mask.png'), Image.open('prob.png')
|
| 29 |
+
|
| 30 |
+
def save_image(image, path, **kwargs):
|
| 31 |
+
plt.figure(figsize=(8, 8))
|
| 32 |
+
plt.imshow(image, interpolation='nearest', **kwargs)
|
| 33 |
+
plt.axis('off')
|
| 34 |
+
plt.tight_layout()
|
| 35 |
+
plt.savefig(path, bbox_inches='tight', pad_inches=0)
|
| 36 |
+
plt.close()
|
| 37 |
+
|
| 38 |
+
with gr.Blocks() as demo:
|
| 39 |
+
with gr.Row():
|
| 40 |
+
with gr.Column():
|
| 41 |
+
image_input = gr.Image(type='pil', label='TIFF Image')
|
| 42 |
+
threshold_slider = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.5, label='Prediction Threshold')
|
| 43 |
+
segment_button = gr.Button('Segment')
|
| 44 |
+
with gr.Column():
|
| 45 |
+
prediction = gr.Image(type='pil', label='Segmentation Result')
|
| 46 |
+
prob_map = gr.Image(type='pil', label='Probability Map')
|
| 47 |
+
segment_button.click(
|
| 48 |
+
segment_image,
|
| 49 |
+
inputs=[image_input, threshold_slider],
|
| 50 |
+
outputs=[image_input, prediction, prob_map]
|
| 51 |
+
)
|
| 52 |
+
demo.launch(debug=True, show_error=True)
|