OliverPerrin commited on
Commit
6dd5c4b
·
1 Parent(s): 4a825c2

Fix: Remove Gallery to avoid Gradio schema bug

Browse files
Files changed (1) hide show
  1. scripts/demo_gradio.py +18 -27
scripts/demo_gradio.py CHANGED
@@ -394,14 +394,14 @@ def prepare_download(
394
  return handle.name
395
 
396
 
397
- def load_visualization_gallery() -> tuple[list[tuple[str, str]], str]:
398
  """Collect visualization images produced by model tests."""
399
- items: list[tuple[str, str]] = []
400
  missing: list[str] = []
401
- for filename, label in VISUALIZATION_ASSETS:
402
  path = VISUALIZATION_DIR / filename
403
  if path.exists():
404
- items.append((str(path), label))
405
  else:
406
  missing.append(filename)
407
 
@@ -444,7 +444,9 @@ def generate_fallback_summary(text: str, max_chars: int = 320) -> str:
444
  def load_metrics_report_as_markdown() -> tuple[str, str, str | None, str]:
445
  """Load metrics and return as Markdown strings to avoid Gradio schema issues."""
446
  if not EVAL_REPORT_PATH.exists():
447
- error_msg = f"Evaluation report not found at {EVAL_REPORT_PATH}. Run scripts/evaluate.py first."
 
 
448
  return error_msg, "", None, error_msg
449
 
450
  try:
@@ -457,11 +459,11 @@ def load_metrics_report_as_markdown() -> tuple[str, str, str | None, str]:
457
 
458
  # Build overall metrics markdown table
459
  summary_md = """| Task | Metric | Value |
460
- |------|--------|-------|
461
- | Summarization | ROUGE-Like | {:.4f} |
462
- | Summarization | BLEU | {:.4f} |
463
- | Emotion | F1 (Macro) | {:.4f} |
464
- | Topic | Accuracy | {:.4f} |""".format(
465
  report["summarization"]["rouge_like"],
466
  report["summarization"]["bleu"],
467
  report["emotion"]["f1_macro"],
@@ -470,7 +472,10 @@ def load_metrics_report_as_markdown() -> tuple[str, str, str | None, str]:
470
 
471
  # Build topic classification report markdown table
472
  topic_report = report["topic"]["classification_report"]
473
- topic_lines = ["| Label | Precision | Recall | F1-Score | Support |", "|-------|-----------|--------|----------|---------|"]
 
 
 
474
  for label, metrics in topic_report.items():
475
  if isinstance(metrics, dict) and "precision" in metrics:
476
  topic_lines.append(
@@ -509,7 +514,7 @@ def create_interface() -> gr.Blocks:
509
  """
510
  )
511
 
512
- initial_visuals, initial_visual_status = load_visualization_gallery()
513
  summary_md, topic_md, cm_image, metrics_meta = load_metrics_report_as_markdown()
514
 
515
  with gr.Row():
@@ -551,19 +556,10 @@ def create_interface() -> gr.Blocks:
551
  refresh_metrics = gr.Button("Refresh Metrics")
552
 
553
  with gr.TabItem("Model Visuals"):
554
- visuals = gr.Gallery(
555
- label="Test Visualizations",
556
- value=initial_visuals,
557
- columns=2,
558
- height=400,
559
- interactive=False,
560
- type="filepath",
561
- )
562
  gr.Markdown(
563
  "These PNGs come from the visualization-focused tests in `tests/test_models` and are consumed as-is."
564
  )
565
- visuals_notice = gr.Markdown(initial_visual_status)
566
- refresh_visuals = gr.Button("Refresh Visuals")
567
 
568
  gr.Markdown("### Download Results")
569
  download_btn = gr.DownloadButton("Download JSON", visible=False)
@@ -574,11 +570,6 @@ def create_interface() -> gr.Blocks:
574
  inputs=[input_text],
575
  outputs=[summary_output, emotion_output, topic_output, attention_output, download_btn],
576
  )
577
- refresh_visuals.click(
578
- fn=load_visualization_gallery,
579
- inputs=None,
580
- outputs=[visuals, visuals_notice],
581
- )
582
  refresh_metrics.click(
583
  fn=load_metrics_report_as_markdown,
584
  inputs=None,
 
394
  return handle.name
395
 
396
 
397
+ def load_visualization_gallery() -> tuple[list[str], str]:
398
  """Collect visualization images produced by model tests."""
399
+ items: list[str] = []
400
  missing: list[str] = []
401
+ for filename, _label in VISUALIZATION_ASSETS:
402
  path = VISUALIZATION_DIR / filename
403
  if path.exists():
404
+ items.append(str(path))
405
  else:
406
  missing.append(filename)
407
 
 
444
  def load_metrics_report_as_markdown() -> tuple[str, str, str | None, str]:
445
  """Load metrics and return as Markdown strings to avoid Gradio schema issues."""
446
  if not EVAL_REPORT_PATH.exists():
447
+ error_msg = (
448
+ f"Evaluation report not found at {EVAL_REPORT_PATH}. Run scripts/evaluate.py first."
449
+ )
450
  return error_msg, "", None, error_msg
451
 
452
  try:
 
459
 
460
  # Build overall metrics markdown table
461
  summary_md = """| Task | Metric | Value |
462
+ |------|--------|-------|
463
+ | Summarization | ROUGE-Like | {:.4f} |
464
+ | Summarization | BLEU | {:.4f} |
465
+ | Emotion | F1 (Macro) | {:.4f} |
466
+ | Topic | Accuracy | {:.4f} |""".format(
467
  report["summarization"]["rouge_like"],
468
  report["summarization"]["bleu"],
469
  report["emotion"]["f1_macro"],
 
472
 
473
  # Build topic classification report markdown table
474
  topic_report = report["topic"]["classification_report"]
475
+ topic_lines = [
476
+ "| Label | Precision | Recall | F1-Score | Support |",
477
+ "|-------|-----------|--------|----------|---------|",
478
+ ]
479
  for label, metrics in topic_report.items():
480
  if isinstance(metrics, dict) and "precision" in metrics:
481
  topic_lines.append(
 
514
  """
515
  )
516
 
517
+ _, initial_visual_status = load_visualization_gallery()
518
  summary_md, topic_md, cm_image, metrics_meta = load_metrics_report_as_markdown()
519
 
520
  with gr.Row():
 
556
  refresh_metrics = gr.Button("Refresh Metrics")
557
 
558
  with gr.TabItem("Model Visuals"):
 
 
 
 
 
 
 
 
559
  gr.Markdown(
560
  "These PNGs come from the visualization-focused tests in `tests/test_models` and are consumed as-is."
561
  )
562
+ gr.Markdown(initial_visual_status)
 
563
 
564
  gr.Markdown("### Download Results")
565
  download_btn = gr.DownloadButton("Download JSON", visible=False)
 
570
  inputs=[input_text],
571
  outputs=[summary_output, emotion_output, topic_output, attention_output, download_btn],
572
  )
 
 
 
 
 
573
  refresh_metrics.click(
574
  fn=load_metrics_report_as_markdown,
575
  inputs=None,