valegro commited on
Commit
14d3133
Β·
verified Β·
1 Parent(s): d06be5a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -0
app.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io, os, json, time
2
+ import streamlit as st
3
+ import numpy as np
4
+ from PIL import Image
5
+ import torch
6
+ from huggingface_hub import hf_hub_download
7
+
8
+ # ────────── impostazioni Streamlit
9
+ st.set_page_config(page_title="♻️ UPA – Upcycling Parts Analyzer",
10
+ page_icon="♻️", layout="wide")
11
+ torch.set_float32_matmul_precision("medium") # meno VRAM fp32
12
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+ st.sidebar.info(f"Device attivo: **{DEVICE}**")
14
+
15
+ # ────────── controlli UI
16
+ sam_backbone = st.sidebar.selectbox(
17
+ "Backbone SAM", ("vit_h", "vit_l", "vit_b"), index=0,
18
+ help="Con la GPU T4 puoi tenere vit_h; vit_b Γ¨ piΓΉ leggero per CPU."
19
+ )
20
+ points_per_side = st.sidebar.slider("Punti per lato", 0, 128, 32)
21
+ iou_th = st.sidebar.slider("Soglia IoU", 0.0, 1.0, 0.8)
22
+ stab_th = st.sidebar.slider("Soglia StabilitΓ ", 0.0, 1.0, 0.9)
23
+ min_area = st.sidebar.number_input("Area minima (px)", 0, 10_000, 100)
24
+
25
+ # ────────── helper model loader
26
+ @st.cache_resource(show_spinner="πŸ“₯ Carico SAM …")
27
+ def load_sam(backbone: str):
28
+ from segment_anything import sam_model_registry, SamAutomaticMaskGenerator
29
+ repo, fname = {
30
+ "vit_h": ("sam-vit-h", "sam_vit_h_4b8939.pth"),
31
+ "vit_l": ("sam-vit-l", "sam_vit_l_0b3195.pth"),
32
+ "vit_b": ("sam-vit-b", "sam_vit_b_01ec64.pth"),
33
+ }[backbone]
34
+ ckpt = hf_hub_download(repo_id=f"facebook/{repo}", filename=fname)
35
+ sam = sam_model_registry[backbone](checkpoint=ckpt).to(DEVICE).eval()
36
+ if DEVICE.type == "cuda":
37
+ sam = torch.compile(sam)
38
+ gen = SamAutomaticMaskGenerator(
39
+ sam,
40
+ points_per_side=points_per_side,
41
+ pred_iou_thresh=iou_th,
42
+ stability_score_thresh=stab_th,
43
+ crop_n_layers=1,
44
+ crop_n_points_downscale_factor=2
45
+ )
46
+ return gen
47
+ mask_generator = load_sam(sam_backbone)
48
+
49
+ @st.cache_resource(show_spinner="πŸ”€ Carico CLIP …")
50
+ def load_clip():
51
+ from transformers import CLIPModel, CLIPProcessor
52
+ name = "laion/CLIP-ViT-L-14-laion2B-s32B-b82K"
53
+ proc = CLIPProcessor.from_pretrained(name)
54
+ model = CLIPModel.from_pretrained(name).to(DEVICE).eval()
55
+ return model, proc
56
+ clip_model, clip_proc = load_clip()
57
+
58
+ # ────────── funzioni utility
59
+ MAX_SIDE = 1024
60
+ LABELS = ["lamiera", "foro circolare", "scanalatura rettangolare"]
61
+
62
+ def preprocess(img: Image.Image) -> Image.Image:
63
+ if max(img.size) > MAX_SIDE:
64
+ img = img.copy()
65
+ img.thumbnail((MAX_SIDE, MAX_SIDE))
66
+ return img
67
+
68
+ def classify_piece(np_img: np.ndarray, mask_u8: np.ndarray):
69
+ from cv2 import bitwise_and
70
+ crop = Image.fromarray(bitwise_and(np_img, np_img, mask=mask_u8))
71
+ inputs = clip_proc(text=LABELS, images=crop, return_tensors="pt", padding=True)
72
+ inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
73
+ with torch.no_grad():
74
+ logits = clip_model(**inputs).logits_per_image
75
+ probs = logits.softmax(dim=1)
76
+ conf, idx = torch.max(probs, 1)
77
+ return LABELS[idx.item()], float(conf.item())
78
+
79
+ def draw_overlay(np_img: np.ndarray, results):
80
+ import cv2
81
+ overlay = np_img.copy()
82
+ rng = np.random.default_rng(seed=42)
83
+ for lbl, _, mask in results:
84
+ contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
85
+ color = tuple(int(c) for c in rng.integers(50, 255, 3))
86
+ cv2.drawContours(overlay, contours, -1, color, 2)
87
+ # scrive la label sul centroide
88
+ for cnt in contours:
89
+ M = cv2.moments(cnt)
90
+ if M["m00"]:
91
+ cx, cy = int(M["m10"]/M["m00"]), int(M["m01"]/M["m00"])
92
+ cv2.putText(overlay, lbl, (cx, cy), cv2.FONT_HERSHEY_SIMPLEX,
93
+ 0.6, color, 2, cv2.LINE_AA)
94
+ return overlay
95
+
96
+ # ────────── interfaccia principale
97
+ st.title("πŸ” Riconoscimento Parti Meccaniche")
98
+ files = st.file_uploader("Carica immagini JPG / PNG", type=["jpg","jpeg","png"],
99
+ accept_multiple_files=True)
100
+
101
+ if files:
102
+ if st.button("Segmenta + Classifica"):
103
+ for f in files:
104
+ img_pil = preprocess(Image.open(f).convert("RGB"))
105
+ np_img = np.array(img_pil)
106
+
107
+ with st.spinner(f"Segmentazione {f.name}…"):
108
+ t0 = time.time()
109
+ masks = [m for m in mask_generator.generate(np_img) if m["area"] >= min_area]
110
+ t1 = time.time()
111
+
112
+ results = []
113
+ for m in masks:
114
+ mask_u8 = (m["segmentation"] * 255).astype(np.uint8)
115
+ lbl, conf = classify_piece(np_img, mask_u8)
116
+ results.append((lbl, conf, mask_u8))
117
+
118
+ col1, col2 = st.columns([2,1])
119
+ with col1:
120
+ overlay = draw_overlay(np_img, results)
121
+ st.image(overlay, caption=f"{f.name} – {len(results)} pezzi βˆ™ {t1-t0:.1f}s",
122
+ use_container_width=True)
123
+ with col2:
124
+ st.subheader("Dettagli")
125
+ for lbl, cf, _ in results:
126
+ st.write(f"{lbl}: {cf:.2%}")