jacopo22295 commited on
Commit
e651242
·
verified ·
1 Parent(s): ab8480c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +239 -137
app.py CHANGED
@@ -1,12 +1,23 @@
1
  import os
 
2
  import time
 
 
 
3
  import gradio as gr
4
  from PIL import Image
 
5
  import torch
6
  import torchvision.transforms as T
7
  import torchvision.models as models
8
- from openai import OpenAI
9
- import spaces # per ZeroGPU
 
 
 
 
 
 
10
 
11
  # ======================
12
  # Config / Model / Classes
@@ -23,7 +34,7 @@ IDX2LABEL = {
23
  5: "pitting_corrosion",
24
  6: "stress_corrosion",
25
  7: "under_insulation_corrosion",
26
- 8: "uniform_corrosion"
27
  }
28
 
29
  ZONES = [
@@ -36,9 +47,13 @@ ZONES = [
36
  "Cargo holds / Dry bulk",
37
  "Engine room / Hot surfaces",
38
  "Pipes / Under insulation (UIC/CUI)",
39
- "Other / Not sure"
40
  ]
41
 
 
 
 
 
42
  def load_model_cpu():
43
  m = models.resnet50(weights=None)
44
  num_ftrs = m.fc.in_features
@@ -48,6 +63,7 @@ def load_model_cpu():
48
  m.eval()
49
  return m
50
 
 
51
  model_cpu = load_model_cpu()
52
 
53
  transform = T.Compose([
@@ -59,24 +75,32 @@ transform = T.Compose([
59
  ])
60
 
61
  # ======================
62
- # OpenAI Assistant
63
  # ======================
64
 
65
  OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
66
- if not OPENAI_API_KEY:
67
- print("WARNING: OPENAI_API_KEY not set. Add it in HF Space Secrets.")
68
-
69
- client = OpenAI(api_key=OPENAI_API_KEY)
70
-
71
  ASSISTANT_ID = os.environ.get("PPG_ASSISTANT_ID", "asst_20DNMEENkfBsYupFjPCwfijZ")
72
  VECTOR_STORE_ID = os.environ.get("PPG_VECTOR_STORE_ID", "")
73
  APP_FORCE_LANG = os.environ.get("APP_FORCE_LANG", "").strip()
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  # ======================
76
  # Inference utils (CPU/GPU)
77
  # ======================
78
 
79
- def predict_on_cpu(img_pil: Image.Image):
80
  x = transform(img_pil.convert("RGB")).unsqueeze(0)
81
  with torch.no_grad():
82
  logits = model_cpu(x)
@@ -85,7 +109,7 @@ def predict_on_cpu(img_pil: Image.Image):
85
  return IDX2LABEL.get(idx, f"class_{idx}"), float(probs[idx])
86
 
87
  @spaces.GPU(duration=60)
88
- def predict_on_gpu(img_pil: Image.Image):
89
  device = "cuda"
90
  m = models.resnet50(weights=None)
91
  num_ftrs = m.fc.in_features
@@ -101,134 +125,221 @@ def predict_on_gpu(img_pil: Image.Image):
101
  idx = int(probs.argmax())
102
  return IDX2LABEL.get(idx, f"class_{idx}"), float(probs[idx])
103
 
104
- def predict_image(image: Image.Image):
105
  try:
106
  if torch.cuda.is_available():
107
  return predict_on_gpu(image)
108
- except Exception:
109
- pass
110
  return predict_on_cpu(image)
111
 
112
  # ======================
113
- # Assistant calls
114
  # ======================
115
 
116
- def call_assistant(label, confidence, zone, note, user_question, thread_id=None):
117
- if not thread_id:
118
- if VECTOR_STORE_ID:
119
- thread = client.beta.threads.create(
120
- tool_resources={"file_search": {"vector_store_ids": [VECTOR_STORE_ID]}}
121
- )
122
- else:
123
- thread = client.beta.threads.create()
124
- thread_id = thread.id
125
-
126
- core_context = f"""
127
- Classification: {label} ({round(confidence*100,2)}%).
128
- Zone: {zone or "Not specified"}.
129
- User note: {note or "(none)"}.
130
- """
131
- user_payload = core_context + "\nUser question:\n" + (user_question or "Provide initial advisory.")
132
 
133
- client.beta.threads.messages.create(
134
- thread_id=thread_id,
135
- role="user",
136
- content=user_payload,
137
- )
138
-
139
- second_lang_clause = f"Then provide the same content in {APP_FORCE_LANG}." if APP_FORCE_LANG else \
140
- "Then repeat in the user's language if detectable from note; else in Italian."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
 
142
- extra_instructions = (
143
- "Act as a PPG marine coatings technical specialist for ships (marine environments only). "
144
- "Answer ONLY using information found in the attached docs via File Search. "
145
- "If docs lack details, reply 'Not in docs'. "
146
- "ALWAYS ask for the zone if missing before prescribing. "
147
- "Structure: Diagnosis; Surface Preparation; System; Notes; Disclaimer. "
148
- "Provide first in English. " + second_lang_clause
149
- )
150
 
151
- run = client.beta.threads.runs.create(
152
- thread_id=thread_id,
153
- assistant_id=ASSISTANT_ID,
154
- instructions=extra_instructions,
155
- )
156
 
157
- while True:
158
- r = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run.id)
159
- if r.status in ["completed", "failed", "cancelled", "expired"]:
160
- break
161
- time.sleep(0.6)
162
-
163
- msgs = client.beta.threads.messages.list(thread_id=thread_id)
164
- reply = None
165
- for m in msgs.data:
166
- if m.role == "assistant":
167
- for part in m.content:
168
- if getattr(part, "type", "") == "text":
169
- reply = part.text.value
170
- break
171
- if reply:
172
- break
173
- return reply or "No reply from Assistant.", thread_id
174
 
175
  # ======================
176
- # Pipelines (generator) con barra di avanzamento
177
  # ======================
178
 
179
  def run_analysis(image, note, zone, chat_history, thread_state):
180
- # La progress bar mostra avanzamenti deterministici lato server.
 
 
 
181
  with gr.Progress() as prog:
182
- prog(0.02, desc="Ricezione immagine")
183
- if image is None:
184
- yield "No image received.", chat_history, thread_state
185
- return
186
-
187
- prog(0.06, desc="Validazione input")
188
- if not zone or zone == "Other / Not sure":
189
- yield "**Please select the area/zone first.**", chat_history, thread_state
190
- return
191
-
192
- # messaggio intermedio per feedback immediato
193
- yield "**Analyzing image...** Please wait.", chat_history, thread_state
194
-
195
- prog(0.15, desc="Preprocessing")
196
- # un micro ritardo solo per dare feedback visivo
197
- time.sleep(0.05)
198
-
199
- prog(0.45, desc="Classificazione (CNN)")
200
- label, conf = predict_image(image)
 
 
 
 
 
 
 
 
 
 
201
 
202
- prog(0.70, desc="Consulto PPG Assistant")
203
- reply, thread_id = call_assistant(label, conf, zone, note, "Provide initial advisory.")
 
 
 
204
 
205
- prog(0.98, desc="Composizione risposta")
206
 
207
- header = f"**Model result:** `{label}` — confidence **{round(conf*100,2)}%**\n\n"
208
- out_text = header + (reply or "")
209
- new_history = chat_history[:] if chat_history else []
210
- new_history.append(("", reply))
211
- prog(1.0, desc="Fatto")
 
212
 
213
- yield out_text, new_history, {"thread_id": thread_id, "label": label, "confidence": conf, "zone": zone or ""}
 
 
 
 
 
214
 
215
  def continue_chat(user_msg, chat_history, thread_state, note, zone):
216
- if not user_msg.strip():
217
  return chat_history, ""
218
  with gr.Progress() as prog:
219
- prog(0.1, desc="Invio messaggio")
220
- label = (thread_state or {}).get("label") or "unknown"
221
- conf = (thread_state or {}).get("confidence") or 0.0
222
- current_zone = zone or (thread_state or {}).get("zone") or "Not specified"
223
- thread_id = (thread_state or {}).get("thread_id")
224
-
225
- prog(0.6, desc="Consulto PPG Assistant")
226
- reply, thread_id = call_assistant(label, conf, current_zone, note, user_msg, thread_id)
227
-
228
- chat_history.append((user_msg, reply))
229
- thread_state["thread_id"] = thread_id
230
- prog(1.0, desc="Fatto")
231
- return chat_history, ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
 
233
  # ======================
234
  # UI
@@ -238,13 +349,10 @@ WELCOME = """
238
  # Corrosion Assistant — Beta
239
 
240
  **Welcome!**
241
- This model is trained for educational purpose only. At this time is locally trained with c.a. 9000 Images, some of them AI generated to give better instruction to the Model
242
- and our proprietary AI Analyzer. More data/Pictures are needed to increase accuracy. At this time Crevice corrosion and Galvanic corrosion are still not well
243
- recognized so use this Model at your own risk.
244
- **Disclaimer**: research & experimental only. Made with love by JQ.
245
  """
246
 
247
- # Overlay HTML/CSS per stato di caricamento/analisi
248
  LOADER_HTML = """
249
  <div id="overlay-mask" style="
250
  position: fixed; inset: 0; background: rgba(0,0,0,0.55);
@@ -253,22 +361,19 @@ LOADER_HTML = """
253
  ">
254
  <div style="background:#111; color:#fff; padding:24px 28px; border-radius:16px;
255
  font-family: ui-sans-serif, system-ui, -apple-system; text-align:center;
256
- box-shadow: 0 10px 30px rgba(0,0,0,0.5);">
257
  <div class="spinner" style="
258
  width:48px;height:48px;border:4px solid #444;border-top-color:#fff;border-radius:50%;
259
- margin:0 auto 14px; animation: spin 1s linear infinite;"></div>
260
- <div style="font-size:16px; font-weight:600;">Elaborazione in corso…</div>
261
- <div style="opacity:0.85; font-size:12px; margin-top:6px;">Non toccare nulla, grazie.</div>
262
  </div>
263
  </div>
264
- <style>
265
- @keyframes spin { to { transform: rotate(360deg); } }
266
- </style>
267
  """
268
 
269
- # piccoli helper per mostrare/nascondere overlay e bloccare/sbloccare bottone
270
  def _show_overlay_and_busy():
271
- return gr.update(visible=True), gr.update(interactive=False, value="🔄 Analisi in corso…")
272
 
273
  def _hide_overlay_and_idle():
274
  return gr.update(visible=False), gr.update(interactive=True, value="Analyze image")
@@ -276,15 +381,11 @@ def _hide_overlay_and_idle():
276
  with gr.Blocks(title="Corrosion Assistant", theme=gr.themes.Soft()) as demo:
277
  gr.Markdown(WELCOME)
278
 
279
- # overlay nascosto di default
280
  overlay = gr.HTML(LOADER_HTML, visible=False)
281
- # NB: l’overlay sta in cima all’app grazie a position:fixed
282
 
283
  with gr.Row():
284
  with gr.Column(scale=2):
285
- # Lato upload”: non posso mostrare percentuale reale dell’upload,
286
- # ma il bottone mostrerà spinner e l’overlay partirà subito al click.
287
- img = gr.Image(type="pil", sources=["upload","webcam"], label="Upload or webcam")
288
  note = gr.Textbox(label="Notes / Context (optional)")
289
  zone = gr.Dropdown(choices=ZONES, label="Zone (indicative)", value="Other / Not sure")
290
  analyze_btn = gr.Button("Analyze image", variant="primary")
@@ -301,14 +402,14 @@ with gr.Blocks(title="Corrosion Assistant", theme=gr.themes.Soft()) as demo:
301
  clear_btn = gr.Button("Clear chat")
302
  with gr.Column(scale=2):
303
  gr.Markdown(
304
- "> **Disclaimer:** Research & experimental use only. Validate with official PPG specs. "
305
- "No professional advice or responsibility assumed."
306
  )
307
 
308
  chat_state = gr.State([])
309
  thread_state = gr.State({"thread_id": None, "label": None, "confidence": 0.0, "zone": ""})
310
 
311
- # catena: mostra overlay + disabilita bottone -> run -> nascondi overlay + riabilita -> aggiorna chat
312
  analyze_btn.click(
313
  fn=_show_overlay_and_busy,
314
  inputs=[],
@@ -318,7 +419,7 @@ with gr.Blocks(title="Corrosion Assistant", theme=gr.themes.Soft()) as demo:
318
  fn=run_analysis,
319
  inputs=[img, note, zone, chat_state, thread_state],
320
  outputs=[out_md, chat_state, thread_state],
321
- show_progress=True # barra deterministica server-side
322
  ).then(
323
  fn=_hide_overlay_and_idle,
324
  inputs=[],
@@ -348,5 +449,6 @@ with gr.Blocks(title="Corrosion Assistant", theme=gr.themes.Soft()) as demo:
348
  demo.api_mode = "enabled"
349
 
350
  if __name__ == "__main__":
 
351
  demo.launch()
352
 
 
1
  import os
2
+ import io
3
  import time
4
+ import traceback
5
+ from typing import Optional, Tuple
6
+
7
  import gradio as gr
8
  from PIL import Image
9
+
10
  import torch
11
  import torchvision.transforms as T
12
  import torchvision.models as models
13
+
14
+ try:
15
+ from openai import OpenAI
16
+ except Exception:
17
+ OpenAI = None # gestiamo assenza pacchetto elegantemente
18
+
19
+ import spaces # ZeroGPU decorator
20
+
21
 
22
  # ======================
23
  # Config / Model / Classes
 
34
  5: "pitting_corrosion",
35
  6: "stress_corrosion",
36
  7: "under_insulation_corrosion",
37
+ 8: "uniform_corrosion",
38
  }
39
 
40
  ZONES = [
 
47
  "Cargo holds / Dry bulk",
48
  "Engine room / Hot surfaces",
49
  "Pipes / Under insulation (UIC/CUI)",
50
+ "Other / Not sure",
51
  ]
52
 
53
+ # ======================
54
+ # Model load (CPU default)
55
+ # ======================
56
+
57
  def load_model_cpu():
58
  m = models.resnet50(weights=None)
59
  num_ftrs = m.fc.in_features
 
63
  m.eval()
64
  return m
65
 
66
+ print("[BOOT] Loading model on CPU…")
67
  model_cpu = load_model_cpu()
68
 
69
  transform = T.Compose([
 
75
  ])
76
 
77
  # ======================
78
+ # OpenAI Assistant (optional)
79
  # ======================
80
 
81
  OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
 
 
 
 
 
82
  ASSISTANT_ID = os.environ.get("PPG_ASSISTANT_ID", "asst_20DNMEENkfBsYupFjPCwfijZ")
83
  VECTOR_STORE_ID = os.environ.get("PPG_VECTOR_STORE_ID", "")
84
  APP_FORCE_LANG = os.environ.get("APP_FORCE_LANG", "").strip()
85
 
86
+ client = None
87
+ assistant_enabled = False
88
+ if OPENAI_API_KEY and OpenAI is not None:
89
+ try:
90
+ client = OpenAI(api_key=OPENAI_API_KEY)
91
+ assistant_enabled = True
92
+ print("[BOOT] OpenAI client initialized.")
93
+ except Exception as e:
94
+ print("[BOOT][WARN] OpenAI init failed:", e)
95
+
96
+ def _assistant_safe() -> bool:
97
+ return bool(assistant_enabled and client is not None and ASSISTANT_ID)
98
+
99
  # ======================
100
  # Inference utils (CPU/GPU)
101
  # ======================
102
 
103
+ def predict_on_cpu(img_pil: Image.Image) -> Tuple[str, float]:
104
  x = transform(img_pil.convert("RGB")).unsqueeze(0)
105
  with torch.no_grad():
106
  logits = model_cpu(x)
 
109
  return IDX2LABEL.get(idx, f"class_{idx}"), float(probs[idx])
110
 
111
  @spaces.GPU(duration=60)
112
+ def predict_on_gpu(img_pil: Image.Image) -> Tuple[str, float]:
113
  device = "cuda"
114
  m = models.resnet50(weights=None)
115
  num_ftrs = m.fc.in_features
 
125
  idx = int(probs.argmax())
126
  return IDX2LABEL.get(idx, f"class_{idx}"), float(probs[idx])
127
 
128
+ def predict_image(image: Image.Image) -> Tuple[str, float]:
129
  try:
130
  if torch.cuda.is_available():
131
  return predict_on_gpu(image)
132
+ except Exception as e:
133
+ print("[GPU][WARN] Falling back to CPU:", e)
134
  return predict_on_cpu(image)
135
 
136
  # ======================
137
+ # Assistant calls (with optional image)
138
  # ======================
139
 
140
+ def call_assistant(
141
+ label: str,
142
+ confidence: float,
143
+ zone: str,
144
+ note: str,
145
+ user_question: str,
146
+ image: Optional[Image.Image],
147
+ thread_id: Optional[str] = None,
148
+ max_wait_s: int = 45,
149
+ ) -> Tuple[str, str]:
150
+ """
151
+ Ritorna (reply_text, thread_id). Non lancia eccezioni.
152
+ """
153
+ if not _assistant_safe():
154
+ return ("[Assistant disabled] No OPENAI_API_KEY or client not available. "
155
+ "Model classification shown above.", thread_id or "")
156
 
157
+ try:
158
+ # crea thread se serve
159
+ if not thread_id:
160
+ if VECTOR_STORE_ID:
161
+ thread = client.beta.threads.create(
162
+ tool_resources={"file_search": {"vector_store_ids": [VECTOR_STORE_ID]}}
163
+ )
164
+ else:
165
+ thread = client.beta.threads.create()
166
+ thread_id = thread.id
167
+
168
+ # prepara testo
169
+ core_context = (
170
+ f"Classification: {label} ({round(confidence*100,2)}%).\n"
171
+ f"Zone: {zone or 'Not specified'}.\n"
172
+ f"User note: {note or '(none)'}.\n"
173
+ )
174
+ user_payload = core_context + "\nUser question:\n" + (user_question or "Provide initial advisory.")
175
+
176
+ # costruisci contenuto multi-part con immagine
177
+ content = [{"type": "input_text", "text": user_payload}]
178
+
179
+ if image is not None:
180
+ buf = io.BytesIO()
181
+ image.convert("RGB").save(buf, format="PNG", optimize=True)
182
+ buf.seek(0)
183
+ uploaded = client.files.create(file=buf, purpose="assistants")
184
+ content.append({"type": "input_image", "image_file": {"file_id": uploaded.id}})
185
+
186
+ client.beta.threads.messages.create(
187
+ thread_id=thread_id,
188
+ role="user",
189
+ content=content
190
+ )
191
+
192
+ second_lang_clause = (
193
+ f"Then provide the same content in {APP_FORCE_LANG}."
194
+ if APP_FORCE_LANG else
195
+ "Then repeat in the user's language if detectable from note; else in Italian."
196
+ )
197
+
198
+ extra_instructions = (
199
+ "Act as a PPG marine coatings technical specialist for ships (marine environments only). "
200
+ "Answer ONLY using information found in the attached docs via File Search. "
201
+ "If docs lack details, reply 'Not in docs'. "
202
+ "ALWAYS ask for the zone if missing before prescribing. "
203
+ "Structure: Diagnosis; Surface Preparation; System; Notes; Disclaimer. "
204
+ "Provide first in English. " + second_lang_clause
205
+ )
206
+
207
+ run = client.beta.threads.runs.create(
208
+ thread_id=thread_id,
209
+ assistant_id=ASSISTANT_ID,
210
+ instructions=extra_instructions,
211
+ )
212
+
213
+ # polling con timeout hard
214
+ t0 = time.time()
215
+ while True:
216
+ r = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run.id)
217
+ if r.status in ["completed", "failed", "cancelled", "expired"]:
218
+ break
219
+ if time.time() - t0 > max_wait_s:
220
+ print("[Assistant][WARN] Timeout waiting run.")
221
+ break
222
+ time.sleep(0.7)
223
+
224
+ msgs = client.beta.threads.messages.list(thread_id=thread_id)
225
+ reply = None
226
+ for m in msgs.data:
227
+ if m.role == "assistant":
228
+ for part in m.content:
229
+ if getattr(part, "type", "") == "text":
230
+ reply = part.text.value
231
+ break
232
+ if reply:
233
+ break
234
 
235
+ if not reply:
236
+ reply = "[Assistant] No reply received."
 
 
 
 
 
 
237
 
238
+ return reply, thread_id
 
 
 
 
239
 
240
+ except Exception as e:
241
+ print("[Assistant][ERROR]", e)
242
+ traceback.print_exc()
243
+ return ("[Assistant error] " + str(e) + "\nProceed using model result only.", thread_id or "")
 
 
 
 
 
 
 
 
 
 
 
 
 
244
 
245
  # ======================
246
+ # Pipelines (generator) SAFE
247
  # ======================
248
 
249
  def run_analysis(image, note, zone, chat_history, thread_state):
250
+ """
251
+ Generator sicuro: intercetta ogni eccezione e restituisce sempre qualcosa,
252
+ così l’overlay non resta appeso e l’utente non rimane a fissare il vuoto cosmico.
253
+ """
254
  with gr.Progress() as prog:
255
+ try:
256
+ prog(0.03, desc="Checking input")
257
+ if image is None:
258
+ yield "No image received.", chat_history, thread_state
259
+ return
260
+
261
+ if not zone or zone == "Other / Not sure":
262
+ yield "**Please select the area/zone first.**", chat_history, thread_state
263
+ return
264
+
265
+ # feedback immediato
266
+ yield "**Analyzing image...** Please wait.", chat_history, thread_state
267
+
268
+ prog(0.18, desc="Preprocessing")
269
+ time.sleep(0.05)
270
+
271
+ prog(0.50, desc="Classifying (ResNet50)")
272
+ label, conf = predict_image(image)
273
+
274
+ prog(0.72, desc="Consulting PPG Assistant")
275
+ reply, thread_id = call_assistant(
276
+ label=label,
277
+ confidence=conf,
278
+ zone=zone,
279
+ note=note or "",
280
+ user_question="Provide initial advisory.",
281
+ image=image,
282
+ thread_id=(thread_state or {}).get("thread_id")
283
+ )
284
 
285
+ header = f"**Model result:** `{label}` — confidence **{round(conf*100,2)}%**\n\n"
286
+ out_text = header + (reply or "")
287
+ new_history = (chat_history[:] if chat_history else [])
288
+ if reply:
289
+ new_history.append(("", reply))
290
 
291
+ prog(1.0, desc="Done")
292
 
293
+ yield out_text, new_history, {
294
+ "thread_id": thread_id,
295
+ "label": label,
296
+ "confidence": conf,
297
+ "zone": zone or "",
298
+ }
299
 
300
+ except Exception as e:
301
+ # non lasciamo l’overlay attivo in eterno
302
+ print("[Pipeline][ERROR]", e)
303
+ traceback.print_exc()
304
+ err = f"**Error during analysis**:\n```\n{e}\n```\nCheck logs/keys and try again."
305
+ yield err, chat_history, thread_state or {}
306
 
307
  def continue_chat(user_msg, chat_history, thread_state, note, zone):
308
+ if not user_msg or not user_msg.strip():
309
  return chat_history, ""
310
  with gr.Progress() as prog:
311
+ try:
312
+ prog(0.2, desc="Sending")
313
+ label = (thread_state or {}).get("label") or "unknown"
314
+ conf = (thread_state or {}).get("confidence") or 0.0
315
+ current_zone = zone or (thread_state or {}).get("zone") or "Not specified"
316
+ thread_id = (thread_state or {}).get("thread_id")
317
+
318
+ prog(0.7, desc="Consulting PPG Assistant")
319
+ reply, thread_id = call_assistant(
320
+ label=label,
321
+ confidence=conf,
322
+ zone=current_zone,
323
+ note=note or "",
324
+ user_question=user_msg,
325
+ image=None, # la thread ha già il file dell'ultima analisi
326
+ thread_id=thread_id
327
+ )
328
+
329
+ chat_history = chat_history or []
330
+ chat_history.append((user_msg, reply))
331
+ if isinstance(thread_state, dict):
332
+ thread_state["thread_id"] = thread_id
333
+
334
+ prog(1.0, desc="Done")
335
+ return chat_history, ""
336
+
337
+ except Exception as e:
338
+ print("[Chat][ERROR]", e)
339
+ traceback.print_exc()
340
+ chat_history = chat_history or []
341
+ chat_history.append((user_msg, f"[Error] {e}"))
342
+ return chat_history, ""
343
 
344
  # ======================
345
  # UI
 
349
  # Corrosion Assistant — Beta
350
 
351
  **Welcome!**
352
+ This model is trained for educational purpose only. Some classes still weak (crevice, galvanic).
353
+ **Disclaimer**: research & experimental only. Validate with official PPG specs.
 
 
354
  """
355
 
 
356
  LOADER_HTML = """
357
  <div id="overlay-mask" style="
358
  position: fixed; inset: 0; background: rgba(0,0,0,0.55);
 
361
  ">
362
  <div style="background:#111; color:#fff; padding:24px 28px; border-radius:16px;
363
  font-family: ui-sans-serif, system-ui, -apple-system; text-align:center;
364
+ box-shadow: 0 10px 30px rgba(0,0,0,0.5); max-width: 360px;">
365
  <div class="spinner" style="
366
  width:48px;height:48px;border:4px solid #444;border-top-color:#fff;border-radius:50%;
367
+ margin:0 auto 14px; animation: spin 0.9s linear infinite;"></div>
368
+ <div style="font-size:16px; font-weight:700;">Elaborazione in corso…</div>
369
+ <div style="opacity:0.9; font-size:12px; margin-top:6px;">Potrebbe richiedere alcuni secondi.</div>
370
  </div>
371
  </div>
372
+ <style>@keyframes spin { to { transform: rotate(360deg); } }</style>
 
 
373
  """
374
 
 
375
  def _show_overlay_and_busy():
376
+ return gr.update(visible=True), gr.update(interactive=False, value="🔄 Analyzing…")
377
 
378
  def _hide_overlay_and_idle():
379
  return gr.update(visible=False), gr.update(interactive=True, value="Analyze image")
 
381
  with gr.Blocks(title="Corrosion Assistant", theme=gr.themes.Soft()) as demo:
382
  gr.Markdown(WELCOME)
383
 
 
384
  overlay = gr.HTML(LOADER_HTML, visible=False)
 
385
 
386
  with gr.Row():
387
  with gr.Column(scale=2):
388
+ img = gr.Image(type="pil", sources=["upload", "webcam"], label="Upload or webcam")
 
 
389
  note = gr.Textbox(label="Notes / Context (optional)")
390
  zone = gr.Dropdown(choices=ZONES, label="Zone (indicative)", value="Other / Not sure")
391
  analyze_btn = gr.Button("Analyze image", variant="primary")
 
402
  clear_btn = gr.Button("Clear chat")
403
  with gr.Column(scale=2):
404
  gr.Markdown(
405
+ "> **Privacy note:** If enabled, the image is sent to OpenAI to allow visual analysis. "
406
+ "Disable API key to skip assistant."
407
  )
408
 
409
  chat_state = gr.State([])
410
  thread_state = gr.State({"thread_id": None, "label": None, "confidence": 0.0, "zone": ""})
411
 
412
+ # Catena robusta: overlay ON -> run -> overlay OFF -> sync chat
413
  analyze_btn.click(
414
  fn=_show_overlay_and_busy,
415
  inputs=[],
 
419
  fn=run_analysis,
420
  inputs=[img, note, zone, chat_state, thread_state],
421
  outputs=[out_md, chat_state, thread_state],
422
+ show_progress=True
423
  ).then(
424
  fn=_hide_overlay_and_idle,
425
  inputs=[],
 
449
  demo.api_mode = "enabled"
450
 
451
  if __name__ == "__main__":
452
+ # in Space, Gradio gestisce host/porta; local dev ok
453
  demo.launch()
454