petter2025 commited on
Commit
8b3c90f
·
verified ·
1 Parent(s): a7e82b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -112
app.py CHANGED
@@ -10,7 +10,7 @@ import pandas as pd
10
  from datetime import datetime
11
 
12
  # ----------------------------------------------------------------------
13
- # Logging setup (MUST be early so logger is defined before any imports that use it)
14
  # ----------------------------------------------------------------------
15
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
16
  logger = logging.getLogger(__name__)
@@ -110,13 +110,15 @@ from diffusers import StableDiffusionPipeline
110
  image_pipe = None
111
  try:
112
  image_pipe = StableDiffusionPipeline.from_pretrained(
113
- "hf-internal-testing/tiny-stable-diffusion-torch"
 
114
  )
115
  if not torch.cuda.is_available():
116
  image_pipe.to("cpu")
117
  logger.info("Image pipeline loaded.")
118
  except Exception as e:
119
  logger.warning(f"Image pipeline load failed (will be disabled): {e}")
 
120
 
121
  # ----------------------------------------------------------------------
122
  # Audio transcription (Whisper tiny)
@@ -173,7 +175,6 @@ else:
173
 
174
  # ----------------------------------------------------------------------
175
  # Helper: update risk with feedback (global state – shared across users)
176
- # For per‑session risk, use gr.State instead of globals.
177
  # ----------------------------------------------------------------------
178
  last_task_category = None
179
 
@@ -193,6 +194,7 @@ async def handle_text(task_type, prompt):
193
  global last_task_category
194
  last_task_category = task_type
195
  try:
 
196
  response, avg_log_prob = generate_with_logprobs(prompt)
197
  retrieval_score = retriever.get_similarity(prompt)
198
  event = AIEvent(
@@ -210,7 +212,7 @@ async def handle_text(task_type, prompt):
210
  prompt=prompt,
211
  response=response,
212
  response_length=len(response),
213
- confidence=float(np.exp(avg_log_prob)), # convert to [0,1] scale (approx)
214
  perplexity=None,
215
  retrieval_scores=[retrieval_score],
216
  user_feedback=None,
@@ -229,8 +231,8 @@ async def handle_text(task_type, prompt):
229
  "risk_metrics": risk_metrics
230
  }
231
  except Exception as e:
232
- logger.error(f"Text task error: {e}")
233
- return {"error": str(e)}
234
 
235
  async def handle_image(prompt, steps):
236
  """Handle image generation with configurable steps. Returns (image, json_data)."""
@@ -257,9 +259,9 @@ async def handle_image(prompt, steps):
257
  model_name="tiny-sd",
258
  model_version="latest",
259
  prompt=prompt,
260
- response="", # not text
261
  response_length=0,
262
- confidence=1.0 / (gen_time + 1), # heuristic
263
  perplexity=None,
264
  retrieval_scores=[retrieval_score, gen_time],
265
  user_feedback=None,
@@ -273,8 +275,8 @@ async def handle_image(prompt, steps):
273
  }
274
  return image, json_data
275
  except Exception as e:
276
- logger.error(f"Image task error: {e}")
277
- return None, {"error": str(e)}
278
 
279
  async def handle_audio(audio_file):
280
  """Handle audio transcription and quality analysis."""
@@ -282,17 +284,27 @@ async def handle_audio(audio_file):
282
  last_task_category = "audio"
283
  if audio_pipe is None:
284
  return {"error": "Audio model not loaded"}
 
 
285
  try:
286
  import librosa
287
  import soundfile as sf
288
  import tempfile
 
 
289
  audio, sr = librosa.load(audio_file, sr=16000)
290
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp:
291
- sf.write(tmp.name, audio, sr)
292
- result = audio_pipe(tmp.name, return_timestamps=False)
 
 
 
293
  text = result["text"]
294
- # Whisper does not output log probs easily; use placeholder
295
- avg_log_prob = -2.0
 
 
 
296
  event = AIEvent(
297
  timestamp=datetime.utcnow(),
298
  component="audio",
@@ -305,7 +317,7 @@ async def handle_audio(audio_file):
305
  action_category="audio",
306
  model_name="whisper-tiny.en",
307
  model_version="latest",
308
- prompt="", # audio file path
309
  response=text,
310
  response_length=len(text),
311
  confidence=float(np.exp(avg_log_prob)),
@@ -322,98 +334,107 @@ async def handle_audio(audio_file):
322
  "quality_detection": quality_result
323
  }
324
  except Exception as e:
325
- logger.error(f"Audio task error: {e}")
326
- return {"error": str(e)}
327
 
328
  async def read_iot_sensors(fault_type, history_state):
329
  """Read simulated IoT sensors, run diagnostics, predict failure, and return updated plot data."""
330
  global last_task_category
331
  last_task_category = "iot"
332
- iot_sim.set_fault(fault_type if fault_type != "none" else None)
333
- data = iot_sim.read()
334
- history_state.append(data)
335
- if len(history_state) > 100:
336
- history_state.pop(0)
337
-
338
- # Create IoTEvent with valid component name
339
- event = IoTEvent(
340
- timestamp=datetime.utcnow(),
341
- component="robotic-arm",
342
- service_mesh="factory",
343
- latency_p99=0,
344
- error_rate=0.0,
345
- throughput=1,
346
- cpu_util=None,
347
- memory_util=None,
348
- temperature=data['temperature'],
349
- vibration=data['vibration'],
350
- motor_current=data['motor_current'],
351
- position_error=data['position_error']
352
- )
353
- diag_result = await robotics_diagnostician.analyze(event)
354
-
355
- # Simple failure prediction
356
- prediction = None
357
- if len(history_state) >= 5:
358
- temps = [h['temperature'] for h in history_state[-5:]]
359
- x = np.arange(len(temps))
360
- slope, intercept = np.polyfit(x, temps, 1)
361
- next_temp = slope * len(temps) + intercept
362
- if slope > 0.1:
363
- time_to_threshold = (40.0 - next_temp) / slope if slope > 0 else None
364
- prediction = {
365
- "predicted_temperature": next_temp,
366
- "time_to_overheat_min": time_to_threshold
367
- }
368
-
369
- # Prepare temperature history for plotting as DataFrame
370
- temp_history = [h['temperature'] for h in history_state[-20:]]
371
- df = pd.DataFrame({
372
- "index": list(range(len(temp_history))),
373
- "temperature": temp_history
374
- })
375
-
376
- return data, diag_result, prediction, df, history_state
377
-
378
- # ========== NEW: Infrastructure Reliability Handler ==========
 
 
 
 
379
  async def handle_infra(fault_type, session_state):
380
  """Run infrastructure reliability analysis."""
381
  if not INFRA_DEPS_AVAILABLE:
382
  return {"error": "Infrastructure modules not installed (see logs)"}, session_state
383
 
384
- # Create a new simulator per session (or reuse from state)
385
- if "sim" not in session_state or session_state["sim"] is None:
386
- session_state["sim"] = InfraSimulator()
387
- sim = session_state["sim"]
388
-
389
- # Inject fault
390
- sim.set_fault(fault_type if fault_type != "none" else None)
391
- components = sim.read_state()
392
-
393
- # Update graph
394
- infra_graph.update_from_state(components)
395
-
396
- # Run Bayesian inference (mock for now; in reality would use Pyro)
397
- bayesian_risk = {"switch_failure": 0.1, "server_failure": 0.05}
398
-
399
- # Run GNN prediction (mock if PyG not available)
400
- predictions = {"at_risk": ["server-1"] if fault_type != "none" else []}
401
-
402
- # Run ProbLog (via python-problog)
403
- logic_explanations = "ProbLog output: ..." # Replace with actual ProbLog call
404
-
405
- # Ontology reasoning
406
- ontology_result = ontology.classify("server") if ontology else {"inferred": [], "consistent": True}
407
-
408
- # Combine results
409
- output = {
410
- "topology": components,
411
- "bayesian_risk": bayesian_risk,
412
- "gnn_predictions": predictions,
413
- "logic_explanations": logic_explanations,
414
- "ontology": ontology_result
415
- }
416
- return output, session_state
 
 
 
 
 
417
 
418
  # ----------------------------------------------------------------------
419
  # Gradio UI
@@ -432,14 +453,14 @@ with gr.Blocks(title="ARF v4 – AI Reliability Lab", theme="soft") as demo:
432
  # Tab 2: Image Generation
433
  with gr.TabItem("Image Generation"):
434
  img_prompt = gr.Textbox(label="Prompt", value="A cat wearing a hat")
435
- img_steps = gr.Slider(1, 10, value=2, step=1, label="Inference Steps (higher = better quality, slower)")
436
  img_btn = gr.Button("Generate")
437
  img_output = gr.Image(label="Generated Image")
438
  img_json = gr.JSON(label="Analysis")
439
 
440
  # Tab 3: Audio Transcription
441
  with gr.TabItem("Audio Transcription"):
442
- gr.Markdown("Click the microphone to record, or upload a file. Try the sample: [Sample Audio](https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac)")
443
  audio_input = gr.Audio(type="filepath", label="Upload audio file")
444
  audio_btn = gr.Button("Transcribe")
445
  audio_output = gr.JSON(label="Analysis")
@@ -465,17 +486,16 @@ with gr.Blocks(title="ARF v4 – AI Reliability Lab", theme="soft") as demo:
465
  with gr.Column():
466
  pred_display = gr.JSON(label="Failure Prediction")
467
  with gr.Row():
468
- with gr.Column(scale=1, min_width=600):
469
- temp_plot = gr.LinePlot(
470
- label="Temperature History (last 20 readings)",
471
- x="index",
472
- y="temperature"
473
- )
474
 
475
- # ========== NEW: Infrastructure Reliability Tab ==========
476
  with gr.TabItem("Infrastructure Reliability"):
477
- gr.Markdown("### Neuro‑Symbolic Infrastructure Monitoring (Bayesian + Graph + Logic)")
478
- infra_state = gr.State(value={}) # per‑session state
479
 
480
  with gr.Row():
481
  with gr.Column():
@@ -488,7 +508,7 @@ with gr.Blocks(title="ARF v4 – AI Reliability Lab", theme="soft") as demo:
488
  with gr.Column():
489
  infra_output = gr.JSON(label="Analysis Results")
490
 
491
- # Tab 5: Enterprise
492
  with gr.TabItem("Enterprise"):
493
  gr.Markdown("""
494
  ## 🚀 ARF Enterprise – Governed Execution for Autonomous Infrastructure
@@ -506,9 +526,6 @@ with gr.Blocks(title="ARF v4 – AI Reliability Lab", theme="soft") as demo:
506
  ### Get Started
507
  - 📅 [Book a Demo](https://calendly.com/petter2025us/30min)
508
  - 📧 [Contact Sales](mailto:petter2025us@outlook.com)
509
- - 📄 [Download Datasheet](#) (coming soon)
510
-
511
- *Already using ARF OSS? Upgrade seamlessly – same core, governed execution.*
512
  """)
513
 
514
  # Feedback row
 
10
  from datetime import datetime
11
 
12
  # ----------------------------------------------------------------------
13
+ # Logging setup
14
  # ----------------------------------------------------------------------
15
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
16
  logger = logging.getLogger(__name__)
 
110
  image_pipe = None
111
  try:
112
  image_pipe = StableDiffusionPipeline.from_pretrained(
113
+ "hf-internal-testing/tiny-stable-diffusion-torch",
114
+ safety_checker=None
115
  )
116
  if not torch.cuda.is_available():
117
  image_pipe.to("cpu")
118
  logger.info("Image pipeline loaded.")
119
  except Exception as e:
120
  logger.warning(f"Image pipeline load failed (will be disabled): {e}")
121
+ image_pipe = None
122
 
123
  # ----------------------------------------------------------------------
124
  # Audio transcription (Whisper tiny)
 
175
 
176
  # ----------------------------------------------------------------------
177
  # Helper: update risk with feedback (global state – shared across users)
 
178
  # ----------------------------------------------------------------------
179
  last_task_category = None
180
 
 
194
  global last_task_category
195
  last_task_category = task_type
196
  try:
197
+ logger.info(f"Handling text task: {task_type}, prompt: {prompt[:50]}...")
198
  response, avg_log_prob = generate_with_logprobs(prompt)
199
  retrieval_score = retriever.get_similarity(prompt)
200
  event = AIEvent(
 
212
  prompt=prompt,
213
  response=response,
214
  response_length=len(response),
215
+ confidence=float(np.exp(avg_log_prob)),
216
  perplexity=None,
217
  retrieval_scores=[retrieval_score],
218
  user_feedback=None,
 
231
  "risk_metrics": risk_metrics
232
  }
233
  except Exception as e:
234
+ logger.error(f"Text task error: {e}", exc_info=True)
235
+ return {"error": str(e), "traceback": traceback.format_exc()}
236
 
237
  async def handle_image(prompt, steps):
238
  """Handle image generation with configurable steps. Returns (image, json_data)."""
 
259
  model_name="tiny-sd",
260
  model_version="latest",
261
  prompt=prompt,
262
+ response="",
263
  response_length=0,
264
+ confidence=1.0 / (gen_time + 1),
265
  perplexity=None,
266
  retrieval_scores=[retrieval_score, gen_time],
267
  user_feedback=None,
 
275
  }
276
  return image, json_data
277
  except Exception as e:
278
+ logger.error(f"Image task error: {e}", exc_info=True)
279
+ return None, {"error": str(e), "traceback": traceback.format_exc()}
280
 
281
  async def handle_audio(audio_file):
282
  """Handle audio transcription and quality analysis."""
 
284
  last_task_category = "audio"
285
  if audio_pipe is None:
286
  return {"error": "Audio model not loaded"}
287
+ if audio_file is None:
288
+ return {"error": "No audio file provided"}
289
  try:
290
  import librosa
291
  import soundfile as sf
292
  import tempfile
293
+
294
+ # Load and process audio
295
  audio, sr = librosa.load(audio_file, sr=16000)
296
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp:
297
+ tmp_path = tmp.name
298
+ sf.write(tmp_path, audio, sr)
299
+
300
+ # Transcribe
301
+ result = audio_pipe(tmp_path, return_timestamps=False)
302
  text = result["text"]
303
+
304
+ # Clean up temp file
305
+ os.unlink(tmp_path)
306
+
307
+ avg_log_prob = -2.0 # Placeholder
308
  event = AIEvent(
309
  timestamp=datetime.utcnow(),
310
  component="audio",
 
317
  action_category="audio",
318
  model_name="whisper-tiny.en",
319
  model_version="latest",
320
+ prompt="",
321
  response=text,
322
  response_length=len(text),
323
  confidence=float(np.exp(avg_log_prob)),
 
334
  "quality_detection": quality_result
335
  }
336
  except Exception as e:
337
+ logger.error(f"Audio task error: {e}", exc_info=True)
338
+ return {"error": str(e), "traceback": traceback.format_exc()}
339
 
340
  async def read_iot_sensors(fault_type, history_state):
341
  """Read simulated IoT sensors, run diagnostics, predict failure, and return updated plot data."""
342
  global last_task_category
343
  last_task_category = "iot"
344
+ try:
345
+ iot_sim.set_fault(fault_type if fault_type != "none" else None)
346
+ data = iot_sim.read()
347
+ history_state.append(data)
348
+ if len(history_state) > 100:
349
+ history_state.pop(0)
350
+
351
+ # Create IoTEvent
352
+ event = IoTEvent(
353
+ timestamp=datetime.utcnow(),
354
+ component="robotic-arm",
355
+ service_mesh="factory",
356
+ latency_p99=0,
357
+ error_rate=0.0,
358
+ throughput=1,
359
+ cpu_util=None,
360
+ memory_util=None,
361
+ temperature=data['temperature'],
362
+ vibration=data['vibration'],
363
+ motor_current=data['motor_current'],
364
+ position_error=data['position_error']
365
+ )
366
+ diag_result = await robotics_diagnostician.analyze(event)
367
+
368
+ # Simple failure prediction
369
+ prediction = None
370
+ if len(history_state) >= 5:
371
+ temps = [h['temperature'] for h in history_state[-5:]]
372
+ x = np.arange(len(temps))
373
+ slope, intercept = np.polyfit(x, temps, 1)
374
+ next_temp = slope * len(temps) + intercept
375
+ if slope > 0.1:
376
+ time_to_threshold = (40.0 - next_temp) / slope if slope > 0 else None
377
+ prediction = {
378
+ "predicted_temperature": float(next_temp),
379
+ "time_to_overheat_min": float(time_to_threshold) if time_to_threshold else None
380
+ }
381
+
382
+ # Prepare temperature history for plotting
383
+ temp_history = [h['temperature'] for h in history_state[-20:]]
384
+ df = pd.DataFrame({
385
+ "index": list(range(len(temp_history))),
386
+ "temperature": temp_history
387
+ })
388
+
389
+ return data, diag_result, prediction, df, history_state
390
+ except Exception as e:
391
+ logger.error(f"IoT task error: {e}", exc_info=True)
392
+ return {"error": str(e)}, {"error": str(e)}, {"error": str(e)}, pd.DataFrame({"index": [], "temperature": []}), history_state
393
+
394
+ # ========== Infrastructure Reliability Handler ==========
395
  async def handle_infra(fault_type, session_state):
396
  """Run infrastructure reliability analysis."""
397
  if not INFRA_DEPS_AVAILABLE:
398
  return {"error": "Infrastructure modules not installed (see logs)"}, session_state
399
 
400
+ try:
401
+ # Create a new simulator per session (or reuse from state)
402
+ if "sim" not in session_state or session_state["sim"] is None:
403
+ session_state["sim"] = InfraSimulator()
404
+ sim = session_state["sim"]
405
+
406
+ # Inject fault
407
+ sim.set_fault(fault_type if fault_type != "none" else None)
408
+ components = sim.read_state()
409
+
410
+ # Update graph
411
+ if infra_graph:
412
+ infra_graph.update_from_state(components)
413
+
414
+ # Run Bayesian inference (mock for now)
415
+ bayesian_risk = {"switch_failure": 0.1, "server_failure": 0.05}
416
+
417
+ # Run GNN prediction (mock)
418
+ predictions = {"at_risk": ["server-1"] if fault_type != "none" else []}
419
+
420
+ # Run ProbLog (mock)
421
+ logic_explanations = "ProbLog output: ..."
422
+
423
+ # Ontology reasoning
424
+ ontology_result = ontology.classify("server") if ontology else {"inferred": [], "consistent": True}
425
+
426
+ # Combine results
427
+ output = {
428
+ "topology": components,
429
+ "bayesian_risk": bayesian_risk,
430
+ "gnn_predictions": predictions,
431
+ "logic_explanations": logic_explanations,
432
+ "ontology": ontology_result
433
+ }
434
+ return output, session_state
435
+ except Exception as e:
436
+ logger.error(f"Infra task error: {e}", exc_info=True)
437
+ return {"error": str(e), "traceback": traceback.format_exc()}, session_state
438
 
439
  # ----------------------------------------------------------------------
440
  # Gradio UI
 
453
  # Tab 2: Image Generation
454
  with gr.TabItem("Image Generation"):
455
  img_prompt = gr.Textbox(label="Prompt", value="A cat wearing a hat")
456
+ img_steps = gr.Slider(1, 10, value=2, step=1, label="Inference Steps")
457
  img_btn = gr.Button("Generate")
458
  img_output = gr.Image(label="Generated Image")
459
  img_json = gr.JSON(label="Analysis")
460
 
461
  # Tab 3: Audio Transcription
462
  with gr.TabItem("Audio Transcription"):
463
+ gr.Markdown("Upload an audio file to transcribe")
464
  audio_input = gr.Audio(type="filepath", label="Upload audio file")
465
  audio_btn = gr.Button("Transcribe")
466
  audio_output = gr.JSON(label="Analysis")
 
486
  with gr.Column():
487
  pred_display = gr.JSON(label="Failure Prediction")
488
  with gr.Row():
489
+ temp_plot = gr.LinePlot(
490
+ label="Temperature History",
491
+ x="index",
492
+ y="temperature"
493
+ )
 
494
 
495
+ # Tab 5: Infrastructure Reliability
496
  with gr.TabItem("Infrastructure Reliability"):
497
+ gr.Markdown("### Neuro‑Symbolic Infrastructure Monitoring")
498
+ infra_state = gr.State(value={})
499
 
500
  with gr.Row():
501
  with gr.Column():
 
508
  with gr.Column():
509
  infra_output = gr.JSON(label="Analysis Results")
510
 
511
+ # Tab 6: Enterprise
512
  with gr.TabItem("Enterprise"):
513
  gr.Markdown("""
514
  ## 🚀 ARF Enterprise – Governed Execution for Autonomous Infrastructure
 
526
  ### Get Started
527
  - 📅 [Book a Demo](https://calendly.com/petter2025us/30min)
528
  - 📧 [Contact Sales](mailto:petter2025us@outlook.com)
 
 
 
529
  """)
530
 
531
  # Feedback row