Update app.py
Browse files
app.py
CHANGED
|
@@ -1,15 +1,30 @@
|
|
| 1 |
'''
|
| 2 |
contains the infernce code for fastsam.py
|
| 3 |
'''
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
import numpy as np
|
| 5 |
import gradio as gr
|
| 6 |
from PIL import Image
|
|
|
|
| 7 |
from ultralytics import FastSAM
|
| 8 |
from ultralytics.models.fastsam import FastSAMPrompt
|
| 9 |
|
| 10 |
def inference(image):
|
| 11 |
|
| 12 |
-
model = FastSAM('FastSAM.pt')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
## inference results
|
| 15 |
# Run inference on an image
|
|
@@ -17,29 +32,33 @@ def inference(image):
|
|
| 17 |
|
| 18 |
# Prepare a Prompt Process object
|
| 19 |
prompt_process = FastSAMPrompt(source, results, device='cpu')
|
| 20 |
-
|
| 21 |
-
# Everything prompt
|
| 22 |
-
ann = prompt_process.everything_prompt()
|
| 23 |
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
# Bbox default shape [0,0,0,0] -> [x1,y1,x2,y2]
|
| 31 |
-
ann = prompt_process.point_prompt(points=[[200,200]],pointlabel=[1])
|
| 32 |
-
return prompt_process.plot(annotations=ann,output='./')
|
| 33 |
-
|
| 34 |
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
title = "Usage of FastSAM"
|
| 38 |
description = "Implementation of pre-trained fast-sam model for spaces."
|
| 39 |
|
| 40 |
-
demo = gr.Interface(inference,inputs=[gr.Image(shape=(32,32),labels="Input Image"),
|
|
|
|
|
|
|
| 41 |
outputs = [gr.Image(shape=(32,32),label='Output').style(width=128,height=128)],
|
| 42 |
-
title = title,
|
| 43 |
description = description)
|
| 44 |
|
| 45 |
demo.launch(debug = True)
|
|
|
|
| 1 |
'''
|
| 2 |
contains the infernce code for fastsam.py
|
| 3 |
'''
|
| 4 |
+
import os
|
| 5 |
+
import sys
|
| 6 |
+
import cv2
|
| 7 |
+
import torch
|
| 8 |
import numpy as np
|
| 9 |
import gradio as gr
|
| 10 |
from PIL import Image
|
| 11 |
+
import matplotlib.pyplot as plt
|
| 12 |
from ultralytics import FastSAM
|
| 13 |
from ultralytics.models.fastsam import FastSAMPrompt
|
| 14 |
|
| 15 |
def inference(image):
|
| 16 |
|
| 17 |
+
#model = FastSAM('FastSAM.pt')
|
| 18 |
+
model = FastSAM(params['./FastSAM.pt'])
|
| 19 |
+
device = torch.device("cuda") if torch.cuda.is_available() else "cpu"
|
| 20 |
+
text_prompt = params['text_prompt']
|
| 21 |
+
input_image = Image.fromarray(input_image).convert("RGB")
|
| 22 |
+
w,h = input_image.size
|
| 23 |
+
scale = params['input_size']/max(w,h)
|
| 24 |
+
|
| 25 |
+
new_w,new_h = int(w*scale),int(h*scale)
|
| 26 |
+
input_image = input_image.resize((new_w,new_h))
|
| 27 |
+
|
| 28 |
|
| 29 |
## inference results
|
| 30 |
# Run inference on an image
|
|
|
|
| 32 |
|
| 33 |
# Prepare a Prompt Process object
|
| 34 |
prompt_process = FastSAMPrompt(source, results, device='cpu')
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
+
if text_prompt:
|
| 37 |
+
ann = prompt_process.text_prompt(text=text_prompt)
|
| 38 |
+
if show_all:
|
| 39 |
+
gr.Warning("Annotations are enabled, Overiding the text prompt!")
|
| 40 |
+
ann = prompt_process.everything_prompt()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
+
result = prompt_process.plot_to_result(annotations=ann,bboxes=None,points=None,point_label=None,withContours=True,better_quality=False,)
|
| 43 |
+
|
| 44 |
+
return result
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
params = {
|
| 48 |
+
'input_size':1024,
|
| 49 |
+
'point_prompt':[[0,0]],
|
| 50 |
+
'text_prompt':'jeep',
|
| 51 |
+
'output_dir':'./testimg.png'
|
| 52 |
+
}
|
| 53 |
|
| 54 |
title = "Usage of FastSAM"
|
| 55 |
description = "Implementation of pre-trained fast-sam model for spaces."
|
| 56 |
|
| 57 |
+
demo = gr.Interface(inference,inputs=[gr.Image(shape=(32,32),labels="Input Image"),
|
| 58 |
+
gr.Checkbox(True,label='Show All Annotations'),
|
| 59 |
+
gr.Textbox('',label='Provide the text prompt: '),],
|
| 60 |
outputs = [gr.Image(shape=(32,32),label='Output').style(width=128,height=128)],
|
| 61 |
+
title = title,
|
| 62 |
description = description)
|
| 63 |
|
| 64 |
demo.launch(debug = True)
|