HIB-Automedica commited on
Commit
52b89b0
·
verified ·
1 Parent(s): 568d998

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +205 -0
app.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import gradio as gr
4
+ import json, ast
5
+
6
+ #-----------------------------------------------------------------------------------------------
7
+ #-----------------------------------------------------------------------------------------------
8
+ #-----------------------------------------------------------------------------------------------
9
+ #-------------------------------BACKEND COMMUNICATION SETUP-------------------------------------
10
+ #-----------------------------------------------------------------------------------------------
11
+ #-----------------------------------------------------------------------------------------------
12
+ #-----------------------------------------------------------------------------------------------
13
+ # URL of your private backend Space API
14
+ BACKEND_URL = "https://your-username-your-backend-space.hf.space/respond"
15
+
16
+ # Hugging Face token (add this as a Secret in your frontend Space)
17
+ HF_TOKEN = os.getenv("HF_TOKEN")
18
+
19
+
20
+ #-----------------------------------------------------------------------------------------------
21
+ #-----------------------------------------------------------------------------------------------
22
+ #-----------------------------------------------------------------------------------------------
23
+ #----------------------------FORMAT DEV-MODE REPORTER BOXES-------------------------------------
24
+ #-----------------------------------------------------------------------------------------------
25
+ #-----------------------------------------------------------------------------------------------
26
+ #-----------------------------------------------------------------------------------------------
27
+
28
+ def coerce_json_obj(maybe_json):
29
+ """
30
+ If maybe_json is a JSON/py-literal string, parse it.
31
+ Otherwise return as-is.
32
+ """
33
+ if isinstance(maybe_json, (dict, list)):
34
+ return maybe_json
35
+ if isinstance(maybe_json, str):
36
+ s = maybe_json.strip()
37
+ try:
38
+ return json.loads(s) # strict JSON
39
+ except Exception:
40
+ pass
41
+ try:
42
+ return ast.literal_eval(s) # python literal
43
+ except Exception:
44
+ pass
45
+ return maybe_json # fallback
46
+
47
+
48
+ def format_specialities_need_md(data) -> str:
49
+ """
50
+ Expected shape:
51
+ {
52
+ "selected_specialities": ["Dermatology", "Clinical Pharmacology"],
53
+ "reasoning": {
54
+ "Dermatology": "...",
55
+ "Clinical Pharmacology": "..."
56
+ }
57
+ }
58
+ """
59
+ data = coerce_json_obj(data)
60
+
61
+ if not isinstance(data, dict):
62
+ return "```json\n" + json.dumps(data, indent=2, ensure_ascii=False) + "\n```"
63
+
64
+ selected = data.get("selected_specialities") or []
65
+ reasoning = data.get("reasoning") or {}
66
+
67
+ if not selected:
68
+ return "```json\n" + json.dumps(data, indent=2, ensure_ascii=False) + "\n```"
69
+
70
+ lines = []
71
+ for spec in selected:
72
+ why = reasoning.get(spec, "").strip()
73
+ if not why:
74
+ why = "No specific reasoning provided."
75
+ lines.append(f"- **{spec}**: {why}")
76
+ return "\n".join(lines)
77
+
78
+
79
+ def format_critical_md(eval_history, aura_report) -> str:
80
+ """
81
+ Pretty-print eval history + aura report.
82
+ """
83
+ eval_history = coerce_json_obj(eval_history)
84
+ aura_report = coerce_json_obj(aura_report)
85
+
86
+ if not isinstance(eval_history, list) or not isinstance(aura_report, dict):
87
+ payload = {"aura_report": aura_report, "eval_history": eval_history}
88
+ return "```json\n" + json.dumps(payload, indent=2, ensure_ascii=False) + "\n```"
89
+
90
+ lines = []
91
+ final_status = (aura_report.get("final_status") or "").upper()
92
+ attempts = aura_report.get("attempts")
93
+ summary_line = f"**Final status:** {final_status or 'N/A'}"
94
+ if attempts is not None:
95
+ summary_line += f" • **Attempts:** {attempts}"
96
+ lines.append(summary_line)
97
+ lines.append("")
98
+
99
+ if not eval_history:
100
+ lines.append("_No critical evaluations available._")
101
+ else:
102
+ for i, item in enumerate(eval_history, start=1):
103
+ att_no = item.get("attempt") or i
104
+ ce = item.get("critical_eval")
105
+ ce0 = ce[0] if isinstance(ce, list) and ce else (ce if isinstance(ce, dict) else {})
106
+ verdict = (ce0.get("verdict") or "").upper() if isinstance(ce0, dict) else ""
107
+ lines.append(f"### Attempt {att_no} — {verdict or 'UNKNOWN'}")
108
+
109
+ rationale = ce0.get("rationale") or ce0.get("reason") or ""
110
+ notes = ce0.get("notes") or ""
111
+ suggestions = ce0.get("improvement_suggestions") or []
112
+
113
+ if rationale:
114
+ lines.append(f"- **Rationale:** {rationale}")
115
+ if notes:
116
+ lines.append(f"- **Notes:** {notes}")
117
+ if isinstance(suggestions, list) and suggestions:
118
+ lines.append("- **Improvement suggestions:**")
119
+ for s in suggestions:
120
+ lines.append(f" - {s}")
121
+ lines.append("")
122
+ return "\n".join(lines).strip()
123
+
124
+
125
+ #-----------------------------------------------------------------------------------------------
126
+ #-----------------------------------------------------------------------------------------------
127
+ #-----------------------------------------------------------------------------------------------
128
+ #--------------------------------------GRADIO CHAT INTERFACE------------------------------------
129
+ #-----------------------------------------------------------------------------------------------
130
+ #-----------------------------------------------------------------------------------------------
131
+ #-----------------------------------------------------------------------------------------------
132
+
133
+ def respond(message, history):
134
+ try:
135
+ r = requests.post(
136
+ BACKEND_URL,
137
+ headers={"Authorization": f"Bearer {HF_TOKEN}"},
138
+ json={"message": message},
139
+ timeout=60
140
+ )
141
+ r.raise_for_status()
142
+ data = r.json()
143
+
144
+ final_answer = data.get("final_answer", "⚠️ No answer returned")
145
+ specialities_need = data.get("specialities_need", {})
146
+ eval_history = data.get("eval_history", [])
147
+ aura_report = data.get("aura_report", {})
148
+ except Exception as e:
149
+ final_answer = f"⚠️ Error contacting backend: {str(e)}"
150
+ specialities_need = {"error": str(e)}
151
+ eval_history = []
152
+ aura_report = {"final_status": "ERROR", "details": str(e)}
153
+
154
+ # Chatbot history (still using tuple format, you could upgrade to messages type)
155
+ history = (history or []) + [(message, final_answer)]
156
+
157
+ # Format side panels
158
+ specialities_md = format_specialities_need_md(specialities_need)
159
+ critical_md = format_critical_md(eval_history, aura_report)
160
+
161
+ return history, specialities_md, critical_md
162
+
163
+ def clear_all():
164
+ return [], {}, {}
165
+
166
+ with gr.Blocks() as demo:
167
+ gr.Markdown("# Project Nightingale - Due Diligence Explorer (Dev mode)")
168
+ gr.Markdown("""
169
+ Ask a question — we will:
170
+ 1. Select sources intelligently with our internal routing agent
171
+ 2. Search the indexed guideline data using our **G-PRISM** retrieval pipeline
172
+ 3. Internally verify responses with the **AURA** adversarial critical evaluator mechanism
173
+ """)
174
+ with gr.Row():
175
+ with gr.Column(scale=3):
176
+ chatbot = gr.Chatbot(label="Clinical Knowledge Chat (G-PRISM Retrieval)", height=500, render_markdown=True, sanitize_html=False)
177
+ with gr.Row():
178
+ user_in = gr.Textbox(placeholder="Type your question…", scale=4)
179
+ send_btn = gr.Button("Send", scale=1, variant="primary")
180
+ clear_btn = gr.Button("Clear Chat")
181
+
182
+ with gr.Column(scale=2):
183
+ with gr.Group():
184
+ gr.Markdown("## Intelligent source selection (Internal Routing)")
185
+ spec_box = gr.Markdown(value="") # Markdown instead of JSON
186
+
187
+ with gr.Group():
188
+ gr.Markdown("## Critical Appraisal (AURA)")
189
+ critical_box = gr.Markdown(value="") # Markdown instead of JSON
190
+
191
+
192
+ # wire events
193
+ user_in.submit(respond, inputs=[user_in, chatbot], outputs=[chatbot, spec_box, critical_box])
194
+ send_btn.click(respond, inputs=[user_in, chatbot], outputs=[chatbot, spec_box, critical_box])
195
+
196
+ # clear input after sending
197
+ user_in.submit(lambda: "", None, user_in)
198
+ send_btn.click(lambda: "", None, user_in)
199
+
200
+ # clear everything
201
+ clear_btn.click(clear_all, outputs=[chatbot, spec_box, critical_box])
202
+
203
+ if __name__ == "__main__":
204
+ demo.launch()
205
+