|
|
import gradio as gr |
|
|
import numpy as np |
|
|
from PIL import Image |
|
|
from segment_anything import sam_model_registry, SamPredictor |
|
|
from transformers import BlipProcessor, BlipForQuestionAnswering |
|
|
|
|
|
|
|
|
|
|
|
sam_checkpoint = "sam_vit_b_01ec64.pth" |
|
|
sam_model_type = "vit_b" |
|
|
sam_model = sam_model_registry[sam_model_type](checkpoint=sam_checkpoint) |
|
|
sam_predictor = SamPredictor(sam_model) |
|
|
|
|
|
|
|
|
blip_model_name = "Salesforce/blip-vqa-base" |
|
|
blip_processor = BlipProcessor.from_pretrained(blip_model_name) |
|
|
blip_model = BlipForQuestionAnswering.from_pretrained(blip_model_name) |
|
|
|
|
|
|
|
|
base_image = None |
|
|
|
|
|
|
|
|
def set_base(image): |
|
|
global base_image |
|
|
base_image = image |
|
|
return "Base image saved successfully." |
|
|
|
|
|
|
|
|
def detect_trash(trash_image): |
|
|
global base_image |
|
|
if base_image is None: |
|
|
return "Please upload a base image first." |
|
|
|
|
|
|
|
|
base_np = np.array(base_image.resize(trash_image.size)) |
|
|
trash_np = np.array(trash_image) |
|
|
|
|
|
|
|
|
diff = np.abs(trash_np.astype(np.int16) - base_np.astype(np.int16)) |
|
|
mask = (diff.sum(axis=2) > 50).astype(np.uint8) |
|
|
|
|
|
|
|
|
coords = np.argwhere(mask) |
|
|
if coords.size == 0: |
|
|
return "No difference detected." |
|
|
y0, x0 = coords.min(axis=0) |
|
|
y1, x1 = coords.max(axis=0) |
|
|
box = np.array([[x0, y0, x1, y1]]) |
|
|
|
|
|
|
|
|
sam_predictor.set_image(trash_np) |
|
|
masks, scores, logits = sam_predictor.predict(boxes=box) |
|
|
|
|
|
mask_refined = masks[0] |
|
|
|
|
|
|
|
|
ys, xs = np.where(mask_refined) |
|
|
if ys.size == 0: |
|
|
return "SAM did not find any object." |
|
|
cropped = trash_np[ys.min():ys.max(), xs.min():xs.max()] |
|
|
|
|
|
|
|
|
cropped_img = Image.fromarray(cropped) |
|
|
|
|
|
|
|
|
question = "What material is this? Choose from plastic, metal, paper, cardboard, glass, trash." |
|
|
inputs = blip_processor(cropped_img, question, return_tensors="pt") |
|
|
out = blip_model.generate(**inputs) |
|
|
answer = blip_processor.decode(out[0], skip_special_tokens=True) |
|
|
|
|
|
|
|
|
valid_classes = ["plastic", "metal", "paper", "cardboard", "glass", "trash"] |
|
|
result = next((c for c in valid_classes if c in answer.lower()), "trash") |
|
|
|
|
|
return result.capitalize() |
|
|
|
|
|
|
|
|
set_base_ui = gr.Interface( |
|
|
fn=set_base, |
|
|
inputs=gr.Image(type="pil", label="Upload Base Image"), |
|
|
outputs=gr.Textbox(label="Result"), |
|
|
title="Set Base Image", |
|
|
api_name="/set_base" |
|
|
) |
|
|
|
|
|
detect_trash_ui = gr.Interface( |
|
|
fn=detect_trash, |
|
|
inputs=gr.Image(type="pil", label="Upload Trash Image"), |
|
|
outputs=gr.Textbox(label="Detected Material"), |
|
|
title="Detect Trash Material", |
|
|
api_name="/detect_trash" |
|
|
) |
|
|
|
|
|
demo = gr.TabbedInterface([set_base_ui, detect_trash_ui], ["Set Base", "Detect Trash"]) |
|
|
demo.launch() |
|
|
|