UPA_complete / app.py
valegro's picture
Create app.py
14d3133 verified
import io, os, json, time
import streamlit as st
import numpy as np
from PIL import Image
import torch
from huggingface_hub import hf_hub_download
# ────────── impostazioni Streamlit
st.set_page_config(page_title="♻️ UPA – Upcycling Parts Analyzer",
page_icon="♻️", layout="wide")
torch.set_float32_matmul_precision("medium") # meno VRAM fp32
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
st.sidebar.info(f"Device attivo: **{DEVICE}**")
# ────────── controlli UI
sam_backbone = st.sidebar.selectbox(
"Backbone SAM", ("vit_h", "vit_l", "vit_b"), index=0,
help="Con la GPU T4 puoi tenere vit_h; vit_b Γ¨ piΓΉ leggero per CPU."
)
points_per_side = st.sidebar.slider("Punti per lato", 0, 128, 32)
iou_th = st.sidebar.slider("Soglia IoU", 0.0, 1.0, 0.8)
stab_th = st.sidebar.slider("Soglia StabilitΓ ", 0.0, 1.0, 0.9)
min_area = st.sidebar.number_input("Area minima (px)", 0, 10_000, 100)
# ────────── helper model loader
@st.cache_resource(show_spinner="πŸ“₯ Carico SAM …")
def load_sam(backbone: str):
from segment_anything import sam_model_registry, SamAutomaticMaskGenerator
repo, fname = {
"vit_h": ("sam-vit-h", "sam_vit_h_4b8939.pth"),
"vit_l": ("sam-vit-l", "sam_vit_l_0b3195.pth"),
"vit_b": ("sam-vit-b", "sam_vit_b_01ec64.pth"),
}[backbone]
ckpt = hf_hub_download(repo_id=f"facebook/{repo}", filename=fname)
sam = sam_model_registry[backbone](checkpoint=ckpt).to(DEVICE).eval()
if DEVICE.type == "cuda":
sam = torch.compile(sam)
gen = SamAutomaticMaskGenerator(
sam,
points_per_side=points_per_side,
pred_iou_thresh=iou_th,
stability_score_thresh=stab_th,
crop_n_layers=1,
crop_n_points_downscale_factor=2
)
return gen
mask_generator = load_sam(sam_backbone)
@st.cache_resource(show_spinner="πŸ”€ Carico CLIP …")
def load_clip():
from transformers import CLIPModel, CLIPProcessor
name = "laion/CLIP-ViT-L-14-laion2B-s32B-b82K"
proc = CLIPProcessor.from_pretrained(name)
model = CLIPModel.from_pretrained(name).to(DEVICE).eval()
return model, proc
clip_model, clip_proc = load_clip()
# ────────── funzioni utility
MAX_SIDE = 1024
LABELS = ["lamiera", "foro circolare", "scanalatura rettangolare"]
def preprocess(img: Image.Image) -> Image.Image:
if max(img.size) > MAX_SIDE:
img = img.copy()
img.thumbnail((MAX_SIDE, MAX_SIDE))
return img
def classify_piece(np_img: np.ndarray, mask_u8: np.ndarray):
from cv2 import bitwise_and
crop = Image.fromarray(bitwise_and(np_img, np_img, mask=mask_u8))
inputs = clip_proc(text=LABELS, images=crop, return_tensors="pt", padding=True)
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
with torch.no_grad():
logits = clip_model(**inputs).logits_per_image
probs = logits.softmax(dim=1)
conf, idx = torch.max(probs, 1)
return LABELS[idx.item()], float(conf.item())
def draw_overlay(np_img: np.ndarray, results):
import cv2
overlay = np_img.copy()
rng = np.random.default_rng(seed=42)
for lbl, _, mask in results:
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
color = tuple(int(c) for c in rng.integers(50, 255, 3))
cv2.drawContours(overlay, contours, -1, color, 2)
# scrive la label sul centroide
for cnt in contours:
M = cv2.moments(cnt)
if M["m00"]:
cx, cy = int(M["m10"]/M["m00"]), int(M["m01"]/M["m00"])
cv2.putText(overlay, lbl, (cx, cy), cv2.FONT_HERSHEY_SIMPLEX,
0.6, color, 2, cv2.LINE_AA)
return overlay
# ────────── interfaccia principale
st.title("πŸ” Riconoscimento Parti Meccaniche")
files = st.file_uploader("Carica immagini JPG / PNG", type=["jpg","jpeg","png"],
accept_multiple_files=True)
if files:
if st.button("Segmenta + Classifica"):
for f in files:
img_pil = preprocess(Image.open(f).convert("RGB"))
np_img = np.array(img_pil)
with st.spinner(f"Segmentazione {f.name}…"):
t0 = time.time()
masks = [m for m in mask_generator.generate(np_img) if m["area"] >= min_area]
t1 = time.time()
results = []
for m in masks:
mask_u8 = (m["segmentation"] * 255).astype(np.uint8)
lbl, conf = classify_piece(np_img, mask_u8)
results.append((lbl, conf, mask_u8))
col1, col2 = st.columns([2,1])
with col1:
overlay = draw_overlay(np_img, results)
st.image(overlay, caption=f"{f.name} – {len(results)} pezzi βˆ™ {t1-t0:.1f}s",
use_container_width=True)
with col2:
st.subheader("Dettagli")
for lbl, cf, _ in results:
st.write(f"{lbl}: {cf:.2%}")