atlas / ui.py
ANISA09's picture
Update ui.py
80d7cb1 verified
# ui.py
import gradio as gr
from PIL import Image
import json
import logging
import traceback
from orchestrator import ORCH
import utils
logger = logging.getLogger("newsorchestra.ui")
title = "NewsOrchestra β€” Multi-agent AI Orchestrator for Explainable Fact-checking"
description = """Paste article text or a URL and optionally upload an image.
This demo orchestrates multiple models (zero-shot classifier, SerpApi evidence fetch, optional Gemini planner/synthesizer) and returns a verdict and human-friendly explanation."""
def build_md_from_merged(merged: dict) -> str:
md_lines = []
md_lines.append(f"### Claim\n\n> {merged.get('claim')}\n")
md_lines.append(f"**Final verdict:** **{merged.get('final_verdict', 'β€”')}** ")
conf = merged.get('confidence')
md_lines.append(f"**Confidence:** {(conf*100):.1f}% " if conf is not None else "**Confidence:** β€” ")
md_lines.append("\n---\n")
ea = merged.get("evidence_agg", {}).get("evidence", [])
if ea:
md_lines.append("### Top evidence (click to open)\n")
for e in ea[:6]:
title = e.get('title') or e.get('snippet') or "Result"
link_display = "πŸ”—" if e.get('link') else ""
link_url = e.get('link') or "#"
md_lines.append(f"- **[{e.get('domain')}]** [{link_display}] [{title}]({link_url}) β€” trust **{e.get('trust')}** \n > {e.get('snippet')}\n")
else:
md_lines.append("- No top search results found.\n")
md_lines.append("\n---\n")
img_chk = merged.get("image_check")
if img_chk:
md_lines.append("### Attached media summary\n")
if img_chk.get("caption"):
md_lines.append(f"- Caption (auto): {img_chk.get('caption')} \n")
if img_chk.get("ocr_text"):
md_lines.append(f"- OCR text found: `{img_chk.get('ocr_text')}` \n")
if img_chk.get("exif"):
md_lines.append(f"- EXIF metadata keys: {', '.join(list(img_chk.get('exif').keys())[:6])} \n")
df = img_chk.get("deepfake_check")
if df and isinstance(df, list) and len(df) > 0:
top = df[0]
lab = top.get("label")
sc = top.get("score")
md_lines.append(f"- **Deepfake detector:** top label **{lab}** (score {sc:.2f}) \n")
md_lines.append("\n---\n")
if merged.get("explanation"):
md_lines.append("### Why we reached this verdict\n")
md_lines.append(merged.get("explanation") + "\n")
if merged.get("planner"):
md_lines.append("\n---\n**Orchestrator planner rationale:**\n")
md_lines.append("```\n" + json.dumps(merged.get("planner"), indent=2) + "\n```\n")
if merged.get("synthesizer"):
md_lines.append("\n---\n**Synthesizer output (Gemini):**\n")
md_lines.append("```\n" + json.dumps(merged.get("synthesizer"), indent=2) + "\n```\n")
return "\n".join(md_lines)
def create_ui():
with gr.Blocks(title=title) as demo:
gr.Markdown(f"# {title}\n\n{description}")
with gr.Row():
inp = gr.Textbox(lines=8, label="Article text or URL", placeholder="Paste full article text or a URL (http...)")
with gr.Row():
img_upload = gr.Image(type="pil", label="Upload image (optional)")
img_url = gr.Textbox(lines=1, label="Or image URL (optional)", placeholder="https://...")
with gr.Row():
run_gemini_cb = gr.Checkbox(label="Run Gemini planner + synthesizer (if configured)", value=True)
run_serp_cb = gr.Checkbox(label="Run SerpApi evidence fetch (recommended)", value=bool(False))
run_deepfake_cb = gr.Checkbox(label="Run deepfake detection (Hugging Face model)", value=True)
with gr.Row():
analyze_btn = gr.Button("Analyze (run orchestration)")
with gr.Row():
merged_out = gr.JSON(label="Merged verification report (JSON)")
with gr.Row():
human_md = gr.Markdown(label="Friendly explanation & evidence")
def on_click_analyze(text_or_url: str = "", image_upload=None, image_url: str = "", run_gemini: bool = True, run_serpapi: bool = True, run_deepfake: bool = True):
try:
raw_input = (text_or_url or "").strip()
def is_url_only(s: str) -> bool:
if not s:
return False
s = s.strip()
return bool(__import__("re").match(r"^https?://\S+$", s))
mode_is_url = is_url_only(raw_input)
article_text = None
url = None
claim = None
if mode_is_url:
url = raw_input
# delegate fetch to orchestrator.fetch_article_text_from_url if available
article_text = ORCH.fetch_article_text_from_url(url) if hasattr(ORCH, "fetch_article_text_from_url") else None
claim = (article_text and utils.sanitize_text(article_text).split("\n",1)[0]) or ""
else:
article_text = raw_input or None
if article_text:
claim = utils.sanitize_text(article_text)
else:
# derive claim from image if possible
img_obj = None
if image_upload:
if isinstance(image_upload, Image.Image):
img_obj = image_upload
else:
img_obj = image_upload
if img_obj is None and image_url:
try:
img_obj, b, err = ORCH.fetch_image_bytes(image_url) if hasattr(ORCH, "fetch_image_bytes") else (None, None, None)
except Exception:
img_obj = None
if img_obj:
ocr = None
# Try a model-level OCR (pytesseract used in original) β€” omitted here for brevity
cap = models.hf_image_caption(img_obj)
claim = (ocr or cap or "No claim text provided") or "No claim text provided"
else:
claim = "No claim text provided"
# pass image_upload as PIL if it is already PIL
image_to_pass = image_upload if isinstance(image_upload, Image.Image) else None
merged = ORCH.run(
claim_text=claim,
article_text=article_text,
url=url,
image_upload=image_to_pass,
image_url=image_url,
run_gemini=run_gemini,
run_serpapi=run_serpapi,
run_deepfake=run_deepfake
)
md = build_md_from_merged(merged)
return merged, md
except Exception as e:
tb = traceback.format_exc()
logger.exception("Error in analyze handler: %s", e)
return {"error": str(e), "trace": tb}, f"Error running analysis: {e}"
analyze_btn.click(on_click_analyze, inputs=[inp, img_upload, img_url, run_gemini_cb, run_serp_cb, run_deepfake_cb],
outputs=[merged_out, human_md])
gr.Markdown(
"- Notes: set SERPAPI_KEY and GEMINI_API_KEY in environment for best results.\n"
"- Set HF_DEEPFAKE_MODEL to choose a different deepfake detection model.\n"
"- The demo is for research/educational purposes. Always human-review outputs before acting on them."
)
return demo