jacopo22295 commited on
Commit
193efac
·
verified ·
1 Parent(s): e0197e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -195
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import gradio as gr
3
  from PIL import Image
4
  import torch
@@ -59,19 +60,20 @@ transform = T.Compose([
59
  ])
60
 
61
  # ======================
62
- # OpenAI client
63
  # ======================
64
 
65
  OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
66
  if not OPENAI_API_KEY:
67
- print("WARNING: OPENAI_API_KEY not set. Set it in HF Space Secrets.")
68
 
69
  client = OpenAI(api_key=OPENAI_API_KEY)
70
- OPENAI_MODEL = os.environ.get("OPENAI_MODEL", "gpt-4o-mini")
71
- APP_FORCE_LANG = os.environ.get("APP_FORCE_LANG", "").strip()
 
72
 
73
  # ======================
74
- # Helpers: prediction + PPG advice
75
  # ======================
76
 
77
  def predict_image(image: Image.Image):
@@ -85,193 +87,92 @@ def predict_image(image: Image.Image):
85
  confidence = float(probs[idx])
86
  return label, confidence
87
 
88
- def zone_recommendation(zone: str, label: str):
89
  """
90
- Indicative, non-binding. Placeholder PPG-style suggestions by zone + corrosion label.
91
- No warranties, research-only, always verify local specs and standards.
92
  """
93
- # Surface prep suggestions (very generic)
94
- prep = {
95
- "light": "Mechanical St 3 or power tool clean to sound substrate; remove salts/oil; profile ~25–50 μm.",
96
- "medium": "Abrasive blast to ISO 8501-1 Sa 2 or Sa 2½; profile 50–75 μm; soluble salts to low levels.",
97
- "heavy": "High standard abrasive blast Sa 2½; profile 75–100 μm; thorough decontamination and edge rounding."
98
- }
99
-
100
- # Very simplified decisioning
101
- zone = zone or "Other / Not sure"
102
- base = []
103
- prep_note = prep["medium"]
104
-
105
- if zone == "Below waterline (hull/AF area)":
106
- base.append("Primer: epoxy anticorrosive (e.g., Sigmaprime 200).")
107
- base.append("Tie-coat: epoxy tie-coat where required.")
108
- base.append("AF: Ecofleet 530 or silicone-based (e.g., SigmaGlide 2390) depending on performance needs.")
109
- prep_note = prep["medium"]
110
-
111
- elif zone == "Waterline / Splash zone":
112
- base.append("High-build epoxy barrier (e.g., Sigmashield 1200) + polyurethane finish (Sigmadur 550) above WL.")
113
- base.append("Consider elastomeric or glass-flake epoxy in severe splash.")
114
- prep_note = prep["heavy"]
115
-
116
- elif zone == "Topsides / Boot-top":
117
- base.append("Epoxy build (Sigmacover 456) + polyurethane finish (Sigmadur 550).")
118
- prep_note = prep["medium"]
119
-
120
- elif zone == "Deck / Weather deck":
121
- base.append("Abrasion-resistant epoxy (Sigmashield 1200) + non-skid where needed + PU topcoat.")
122
- prep_note = prep["heavy"]
123
-
124
- elif zone == "Superstructure / Accommodation":
125
- base.append("Epoxy primer (Sigmacover 280/456) + PU finish (Sigmadur 550 / One).")
126
- prep_note = prep["light"]
127
-
128
- elif zone == "Ballast tanks (immersed)":
129
- base.append("Certified tank lining epoxy system (consult tank-lining spec).")
130
- prep_note = prep["heavy"]
131
-
132
- elif zone == "Cargo holds / Dry bulk":
133
- base.append("Impact/abrasion resistant epoxy lining; verify cargo compatibility.")
134
- prep_note = prep["heavy"]
135
-
136
- elif zone == "Engine room / Hot surfaces":
137
- base.append("Heat-resistant coating per temperature class; check T-ratings.")
138
- prep_note = prep["medium"]
139
-
140
- elif zone == "Pipes / Under insulation (UIC/CUI)":
141
- base.append("CUI-rated epoxy/novolac system; consider high temp and cyclic wet/dry.")
142
- prep_note = prep["heavy"]
143
-
144
- else:
145
- base.append("Generic: epoxy primer + epoxy build + PU finish. Validate per environment.")
146
- prep_note = prep["medium"]
147
-
148
- # Tiny tweak by corrosion label for nuance
149
- extra = []
150
- if label in ["pitting_corrosion", "crevice_corrosion"]:
151
- extra.append("Pay attention to edges, welds, crevices; stripe coat before full coats.")
152
- if label in ["galvanic_corrosion"]:
153
- extra.append("Isolate dissimilar metals; ensure dielectric barrier.")
154
- if label in ["mic_corrosion"]:
155
- extra.append("Address microbiological contamination; thorough cleaning/sanitization.")
156
- if label in ["under_insulation_corrosion"]:
157
- extra.append("Remove/inspect insulation; ensure dry, sealed design before recoating.")
158
-
159
- rec = "- " + "\n- ".join(base + extra) if base else "Consult dedicated spec."
160
- return rec, prep_note
161
-
162
- # ======================
163
- # OpenAI prompts
164
- # ======================
165
-
166
- def build_dual_lang_prompt(label: str, confidence: float, user_note: str):
167
- # Second language selection logic
168
- if APP_FORCE_LANG:
169
- second_lang_clause = f"Then provide the same content in {APP_FORCE_LANG}."
170
- else:
171
- if user_note and user_note.strip():
172
- second_lang_clause = "Then repeat the same content in the same language used by the user note."
173
- else:
174
- second_lang_clause = "Then repeat the same content in Italian."
175
-
176
- return f"""
177
- You are a marine coatings technical assistant (PPG style). Image classification:
178
- - Corrosion type: {label}
179
- - Model confidence: {round(confidence*100,2)}%
180
-
181
- Provide a technical advisory in two parts:
182
-
183
- 1) English section (concise, bullets):
184
- - Diagnosis
185
- - Likely causes
186
- - Generic paint system suggestion (no unrealistic promises)
187
- - Warnings about substrate condition / surface preparation
188
 
189
- 2) {second_lang_clause}
 
 
 
 
190
 
191
- User note (optional): {user_note or "(none)"}
 
 
 
 
 
192
 
193
- Be pragmatic and brief.
194
- """
 
 
 
 
 
195
 
196
- def openai_answer(messages):
197
- """Call OpenAI with a list of messages [{'role': 'user'|'assistant'|'system', 'content': str}, ...]."""
198
- try:
199
- resp = client.chat.completions.create(
200
- model=OPENAI_MODEL,
201
- messages=messages,
202
- temperature=0.3
203
- )
204
- return resp.choices[0].message.content.strip()
205
- except Exception as e:
206
- return f"OpenAI error: {e}"
207
 
208
  # ======================
209
- # Pipelines: analysis + chat
210
  # ======================
211
 
212
- def run_analysis(image, note, zone, history):
213
  if image is None:
214
- return "No image received.", history, {"label": None, "confidence": 0.0, "zone": zone or ""}
215
 
216
  label, conf = predict_image(image)
 
 
217
 
218
- # Dual-language advisory
219
- initial_prompt = build_dual_lang_prompt(label, conf, note or "")
220
- advisory = openai_answer([{"role": "user", "content": initial_prompt}])
221
-
222
- # Zone-based recommendation block
223
- rec, prep_note = zone_recommendation(zone or "", label)
224
  header = f"**Model result:** `{label}` — confidence **{round(conf*100,2)}%**\n\n"
225
- zone_blk = f"### Zone-based indicative advice\n- **Zone:** {zone or 'Not specified'}\n- **Prep:** {prep_note}\n{rec}\n\n"
226
- disclaimer = "> **Disclaimer:** Research & experimental use only. No warranty. Validate all advice with certified specs and local regulations.\n"
227
-
228
- # UI markdown
229
- out_text = header + advisory + "\n\n" + zone_blk + disclaimer
230
-
231
- # Initialize/seed chat with the first assistant message (advisory)
232
- new_history = history or []
233
- new_history.append(("", f"{advisory}\n\n{zone_blk}\n{disclaimer}"))
234
-
235
- # Save last result in a dict state
236
- result_state = {"label": label, "confidence": conf, "zone": zone or ""}
237
-
238
- return out_text, new_history, result_state
239
-
240
- def continue_chat(user_msg, history, result_state, user_note, zone):
241
- if not user_msg or not user_msg.strip():
242
- return history, ""
243
 
244
- label = (result_state or {}).get("label") or "unknown"
245
- conf = (result_state or {}).get("confidence") or 0.0
246
- current_zone = zone or (result_state or {}).get("zone") or "Not specified"
247
 
248
- # Provide a focused system prompt to keep bot on-topic
249
- system_prompt = f"""
250
- You are a corrosion & marine coatings assistant. Stay on topic (corrosion, surface prep, PPG-style paint systems).
251
- Context:
252
- - Last classification: {label} (confidence {round(conf*100,2)}%)
253
- - Zone: {current_zone}
254
- - Note: {user_note or "(none)"}
255
 
256
- When recommending, be indicative and short, and include surface preparation standards where relevant (e.g., ISO 8501-1).
257
- Always include a short safety/legal disclaimer at the end: "Research use only; verify with official specs."
258
- """
259
-
260
- # Convert history (list[tuple(user, assistant)]) to messages
261
- msgs = [{"role": "system", "content": system_prompt}]
262
- for u, a in history:
263
- if u:
264
- msgs.append({"role": "user", "content": u})
265
- if a:
266
- msgs.append({"role": "assistant", "content": a})
267
 
268
- msgs.append({"role": "user", "content": user_msg})
 
 
 
269
 
270
- reply = openai_answer(msgs)
271
 
272
- # Update history
273
- history.append((user_msg, reply))
274
- return history, "" # clear textbox
275
 
276
  # ======================
277
  # UI
@@ -280,12 +181,12 @@ Always include a short safety/legal disclaimer at the end: "Research use only; v
280
  WELCOME = """
281
  # Corrosion Assistant — Beta
282
 
283
- **Welcome!** This demo runs a custom **ResNet50 corrosion classifier** and a chat assistant on top.
284
- - **Model**: ResNet50 classifier, **trained locally** on **~9,000 images**.
285
- - **Data collection**: a public **contribution link** for new images will open **soon**.
286
- - **Scope**: research & experimental only. No professional advice, no warranty.
287
 
288
- > This interface first returns an English advisory, then repeats in your input language (or Italian if none).
289
  """
290
 
291
  with gr.Blocks(title="Corrosion Assistant", theme=gr.themes.Soft()) as demo:
@@ -294,46 +195,43 @@ with gr.Blocks(title="Corrosion Assistant", theme=gr.themes.Soft()) as demo:
294
  with gr.Row():
295
  with gr.Column(scale=2):
296
  img = gr.Image(type="pil", sources=["upload","webcam"], label="Upload or webcam")
297
- note = gr.Textbox(label="Notes / Context (optional)", placeholder="Write in your language. EN comes first, then your language.")
298
  zone = gr.Dropdown(choices=ZONES, label="Zone (indicative)", value="Other / Not sure")
299
  analyze_btn = gr.Button("Analyze image", variant="primary")
300
  with gr.Column(scale=3):
301
  out_md = gr.Markdown(label="Analysis")
302
 
303
- gr.Markdown(
304
- "### Continue the conversation\nAsk follow-up questions. The assistant will use the last classification and the selected zone."
305
- )
306
 
307
  with gr.Row():
308
  with gr.Column(scale=3):
309
  chat = gr.Chatbot(height=320, label="Advisor chat")
310
- chat_in = gr.Textbox(label="Type your message", placeholder="Ask about prep, products, recoats, etc.")
311
  send_btn = gr.Button("Send")
312
  clear_btn = gr.Button("Clear chat")
313
  with gr.Column(scale=2):
314
  gr.Markdown(
315
- "> **Disclaimer:** Research & experimental use only. Validate with certified specifications, class rules and local regulations. "
316
  "No professional advice is provided and no responsibility is assumed."
317
  )
318
 
319
  # States
320
- chat_state = gr.State([]) # list of (user, assistant)
321
- result_state = gr.State({"label": None, "confidence": 0.0, "zone": ""})
322
 
323
- # Wire up
324
  analyze_btn.click(
325
  fn=run_analysis,
326
- inputs=[img, note, zone, chat_state],
327
- outputs=[out_md, chat_state, result_state]
328
  ).then(
329
- lambda h: h, # reflect chat state to UI
330
  inputs=[chat_state],
331
  outputs=[chat]
332
  )
333
 
334
  send_btn.click(
335
  fn=continue_chat,
336
- inputs=[chat_in, chat_state, result_state, note, zone],
337
  outputs=[chat, chat_in]
338
  )
339
 
@@ -343,13 +241,8 @@ with gr.Blocks(title="Corrosion Assistant", theme=gr.themes.Soft()) as demo:
343
  outputs=[chat, chat_in]
344
  )
345
 
346
- gr.Markdown(
347
- "<small>Tip: keep photos under ~2MB. Secrets are stored server-side. "
348
- "The HF outer shell cannot be customized; this inner Gradio UI can.</small>"
349
- )
350
-
351
- # Programmatic API for /api/predict etc.
352
  demo.api_mode = "enabled"
353
 
354
  if __name__ == "__main__":
355
  demo.launch()
 
 
1
  import os
2
+ import time
3
  import gradio as gr
4
  from PIL import Image
5
  import torch
 
60
  ])
61
 
62
  # ======================
63
+ # OpenAI client & Assistant
64
  # ======================
65
 
66
  OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
67
  if not OPENAI_API_KEY:
68
+ print("WARNING: OPENAI_API_KEY not set. Add it in HF Space Secrets.")
69
 
70
  client = OpenAI(api_key=OPENAI_API_KEY)
71
+
72
+ # Assistant PPG ID (quello che mi hai dato)
73
+ ASSISTANT_ID = "asst_20DNMEENkfBsYupFjPCwfijZ"
74
 
75
  # ======================
76
+ # Helpers
77
  # ======================
78
 
79
  def predict_image(image: Image.Image):
 
87
  confidence = float(probs[idx])
88
  return label, confidence
89
 
90
+ def call_assistant(label, confidence, zone, note, user_question, thread_id=None):
91
  """
92
+ Manda input all'assistente OpenAI. Se thread_id è None, crea un nuovo thread.
93
+ Ritorna (reply, thread_id).
94
  """
95
+ # se non esiste un thread, crealo
96
+ if not thread_id:
97
+ thread = client.beta.threads.create()
98
+ thread_id = thread.id
99
+
100
+ # messaggio utente con contesto
101
+ context = f"""
102
+ Classification result: {label} ({round(confidence*100,2)}%).
103
+ Zone: {zone or "Not specified"}.
104
+ Note: {note or "(none)"}.
105
+
106
+ User question:
107
+ {user_question}
108
+
109
+ Always act as a PPG marine coatings technical specialist.
110
+ Keep answers concise, technical, and always add:
111
+ "Research use only; verify with official PPG specs."
112
+ """
113
+ client.beta.threads.messages.create(
114
+ thread_id=thread_id,
115
+ role="user",
116
+ content=context
117
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
+ # run con l'assistente
120
+ run = client.beta.threads.runs.create(
121
+ thread_id=thread_id,
122
+ assistant_id=ASSISTANT_ID
123
+ )
124
 
125
+ # polling fino a completamento
126
+ while True:
127
+ r = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run.id)
128
+ if r.status in ["completed", "failed", "cancelled", "expired"]:
129
+ break
130
+ time.sleep(0.8)
131
 
132
+ # recupera ultimo messaggio
133
+ msgs = client.beta.threads.messages.list(thread_id=thread_id)
134
+ reply = None
135
+ for m in msgs.data:
136
+ if m.role == "assistant":
137
+ reply = m.content[0].text.value
138
+ break
139
 
140
+ return reply or "No reply from Assistant.", thread_id
 
 
 
 
 
 
 
 
 
 
141
 
142
  # ======================
143
+ # Pipelines
144
  # ======================
145
 
146
+ def run_analysis(image, note, zone, chat_history, thread_state):
147
  if image is None:
148
+ return "No image received.", chat_history, thread_state
149
 
150
  label, conf = predict_image(image)
151
+ user_question = "Provide initial advisory based on classification and note."
152
+ reply, thread_id = call_assistant(label, conf, zone, note, user_question)
153
 
 
 
 
 
 
 
154
  header = f"**Model result:** `{label}` — confidence **{round(conf*100,2)}%**\n\n"
155
+ out_text = header + (reply or "")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
+ new_history = chat_history or []
158
+ new_history.append(("", reply))
 
159
 
160
+ return out_text, new_history, {"thread_id": thread_id, "label": label, "confidence": conf, "zone": zone or ""}
 
 
 
 
 
 
161
 
162
+ def continue_chat(user_msg, chat_history, thread_state, note, zone):
163
+ if not user_msg.strip():
164
+ return chat_history, ""
 
 
 
 
 
 
 
 
165
 
166
+ label = (thread_state or {}).get("label") or "unknown"
167
+ conf = (thread_state or {}).get("confidence") or 0.0
168
+ current_zone = zone or (thread_state or {}).get("zone") or "Not specified"
169
+ thread_id = (thread_state or {}).get("thread_id")
170
 
171
+ reply, thread_id = call_assistant(label, conf, current_zone, note, user_msg, thread_id)
172
 
173
+ chat_history.append((user_msg, reply))
174
+ thread_state["thread_id"] = thread_id
175
+ return chat_history, ""
176
 
177
  # ======================
178
  # UI
 
181
  WELCOME = """
182
  # Corrosion Assistant — Beta
183
 
184
+ **Welcome!** This demo runs a custom **ResNet50 corrosion classifier** and connects to a dedicated **PPG Assistant** on OpenAI.
185
+ - **Model**: ResNet50 classifier, **trained locally** on ~9,000 images
186
+ - **Data collection**: a public link for contributing images will open soon
187
+ - **Disclaimer**: research & experimental only. No professional advice, no warranty.
188
 
189
+ After image analysis you can continue chatting with the assistant.
190
  """
191
 
192
  with gr.Blocks(title="Corrosion Assistant", theme=gr.themes.Soft()) as demo:
 
195
  with gr.Row():
196
  with gr.Column(scale=2):
197
  img = gr.Image(type="pil", sources=["upload","webcam"], label="Upload or webcam")
198
+ note = gr.Textbox(label="Notes / Context (optional)", placeholder="Write in your language.")
199
  zone = gr.Dropdown(choices=ZONES, label="Zone (indicative)", value="Other / Not sure")
200
  analyze_btn = gr.Button("Analyze image", variant="primary")
201
  with gr.Column(scale=3):
202
  out_md = gr.Markdown(label="Analysis")
203
 
204
+ gr.Markdown("### Continue the conversation with the PPG Assistant")
 
 
205
 
206
  with gr.Row():
207
  with gr.Column(scale=3):
208
  chat = gr.Chatbot(height=320, label="Advisor chat")
209
+ chat_in = gr.Textbox(label="Your message", placeholder="Ask about prep, products, recoats, etc.")
210
  send_btn = gr.Button("Send")
211
  clear_btn = gr.Button("Clear chat")
212
  with gr.Column(scale=2):
213
  gr.Markdown(
214
+ "> **Disclaimer:** Research & experimental use only. Validate with certified PPG specs and local regulations. "
215
  "No professional advice is provided and no responsibility is assumed."
216
  )
217
 
218
  # States
219
+ chat_state = gr.State([])
220
+ thread_state = gr.State({"thread_id": None, "label": None, "confidence": 0.0, "zone": ""})
221
 
 
222
  analyze_btn.click(
223
  fn=run_analysis,
224
+ inputs=[img, note, zone, chat_state, thread_state],
225
+ outputs=[out_md, chat_state, thread_state]
226
  ).then(
227
+ lambda h: h,
228
  inputs=[chat_state],
229
  outputs=[chat]
230
  )
231
 
232
  send_btn.click(
233
  fn=continue_chat,
234
+ inputs=[chat_in, chat_state, thread_state, note, zone],
235
  outputs=[chat, chat_in]
236
  )
237
 
 
241
  outputs=[chat, chat_in]
242
  )
243
 
 
 
 
 
 
 
244
  demo.api_mode = "enabled"
245
 
246
  if __name__ == "__main__":
247
  demo.launch()
248
+