# app.py import os, pathlib, pandas as pd, gradio as gr from agents import run_pipeline from files_process import prepare_input_arg, load_input_text # load_input_text not used here but handy def _scores_to_df(result_json: dict) -> pd.DataFrame: rows = result_json.get("scores", []) or [] df = pd.DataFrame(rows) # Drop justification from table if "justification" in df.columns: df = df.drop(columns=["justification"]) cols = ["agent", "clinical_completeness", "ai_rigor", "trial_framing", "privacy_regulatory", "clarity_structure"] for c in cols: if c not in df.columns: df[c] = None return df[cols] def _winner_md_block(result_json: dict) -> str: winner = result_json.get("winner", "") or "N/A" just = "" for r in result_json.get("scores", []): if r.get("agent") == winner: just = r.get("justification", "") or "" break if just: return f"### 🏆 Winner: **{winner}**\n\n> *{just}*" return f"### 🏆 Winner: **{winner}**" def run_ui(text_in, file_in, oai_model, gem_model, ds_model): try: input_arg = prepare_input_arg(text_in, file_in) result_json = run_pipeline( input_arg, oai_model=oai_model, gem_model=gem_model, ds_model=ds_model, ) # Read agent drafts saved by run_pipeline p1, p2, p3 = [pathlib.Path(f"agent{i}.md") for i in range(1, 4)] agent1_md = p1.read_text(encoding="utf-8") if p1.exists() else "*agent1.md not found*" agent2_md = p2.read_text(encoding="utf-8") if p2.exists() else "*agent2.md not found*" agent3_md = p3.read_text(encoding="utf-8") if p3.exists() else "*agent3.md not found*" scores_df = _scores_to_df(result_json) winner_md = _winner_md_block(result_json) return ( agent1_md, agent2_md, agent3_md, scores_df, winner_md, str(p1) if p1.exists() else None, str(p2) if p2.exists() else None, str(p3) if p3.exists() else None ) except Exception as e: # Keep output shapes consistent return f"**Error:** {e}", "", "", pd.DataFrame(), "", None, None, None with gr.Blocks(title="Healthcare–AI Case Studies (3 Agents + Manager)") as demo: gr.Markdown("# Healthcare–AI Case Studies\nProvide text or upload a .txt/.docx/.pdf, then click **Run**.") with gr.Accordion("Models (optional)", open=False): m1 = gr.Textbox(value="gpt-4o-mini", label="Agent 1 (OpenAI)") m2 = gr.Textbox(value="gpt-4.1-nano", label="Agent 2 (style-2)") m3 = gr.Textbox(value="gpt-4.1-mini", label="Agent 3 (style-3)") gr.Markdown("### Manager Scores") scores_df = gr.Dataframe(label="Scores (justification hidden)") winner_md = gr.Markdown(label="Winner & rationale") gr.Markdown("### Download agent drafts") with gr.Row(): dl1 = gr.DownloadButton(label="Download agent1.md") dl2 = gr.DownloadButton(label="Download agent2.md") dl3 = gr.DownloadButton(label="Download agent3.md") gr.Markdown("### Input") with gr.Row(): txt = gr.Textbox(lines=10, label="Paste source text (optional)") fil = gr.File(label="Upload .txt / .docx / .pdf (optional)", file_count="single", file_types=["text", ".docx", ".pdf"]) run_btn = gr.Button("Run") gr.Markdown("### Agent Drafts (expand to view)") with gr.Accordion("Agent outputs", open=False): with gr.Row(): a1_md = gr.Markdown(label="Agent 1 draft") a2_md = gr.Markdown(label="Agent 2 draft") a3_md = gr.Markdown(label="Agent 3 draft") run_btn.click( fn=run_ui, inputs=[txt, fil, m1, m2, m3], outputs=[a1_md, a2_md, a3_md, scores_df, winner_md, dl1, dl2, dl3], ) # On Spaces, it's enough to expose `demo`; running locally calls launch(). if __name__ == "__main__": demo.launch()