File size: 2,123 Bytes
88761b4 209e27a 88761b4 209e27a 88761b4 4480e8f 1f9d444 4480e8f 209e27a 4480e8f 88761b4 4480e8f 88761b4 4480e8f 209e27a 4480e8f 209e27a 4480e8f 88761b4 4480e8f e157f89 209e27a b5e25d4 209e27a 88761b4 4480e8f 88761b4 4480e8f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
'''
contains the infernce code for fastsam.py
'''
import os
import sys
import cv2
import torch
import numpy as np
import gradio as gr
from PIL import Image
import matplotlib.pyplot as plt
from ultralytics import FastSAM
from ultralytics.models.fastsam import FastSAMPrompt
def inference(input_image,show_all=True,text_prompt=''):
#model = FastSAM('FastSAM.pt')
model = FastSAM(params['./FastSAM.pt'])
device = torch.device("cuda") if torch.cuda.is_available() else "cpu"
text_prompt = params['text_prompt']
input_image = Image.fromarray(input_image).convert("RGB")
w,h = input_image.size
scale = params['input_size']/max(w,h)
new_w,new_h = int(w*scale),int(h*scale)
input_image = input_image.resize((new_w,new_h))
## inference results
# Run inference on an image
results = model(source, device='cpu', retina_masks=True, imgsz=1024, conf=0.4, iou=0.9)
# Prepare a Prompt Process object
prompt_process = FastSAMPrompt(source, results, device='cpu')
if text_prompt:
ann = prompt_process.text_prompt(text=text_prompt)
if show_all:
gr.Warning("Annotations are enabled, Overiding the text prompt!")
ann = prompt_process.everything_prompt()
result = prompt_process.plot_to_result(annotations=ann,bboxes=None,points=None,point_label=None,withContours=True,better_quality=False,)
return result
params = {
'input_size':1024,
'point_prompt':[[0,0]],
'text_prompt':'jeep',
'output_dir':'./testimg.png'
}
title = "Usage of FastSAM"
description = "Implementation of pre-trained fast-sam model for spaces."
demo = gr.Interface(inference,inputs=[gr.Image(sources=['clipboard','upload']),
gr.Checkbox(True,label='Show All Annotations'),
gr.Textbox('',label='Provide the text prompt: '),],
outputs = [gr.Image(label='Output')],
title = title,
description = description)
demo.launch(debug = True)
|