ManojGowda commited on
Commit
ec517ac
·
verified ·
1 Parent(s): 07f12b2

Upload 8 files

Browse files
.env ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ GROQ_API_KEY = "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" # <-- PASTE YOUR GROQ API KEY HERE
2
+ # .env file
3
+ # Paste your secret API keys here
4
+
5
+ GROQ_API_KEY="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"
6
+ GOOGLE_API_KEY="AIzaSyDaWTMQ3xQTFIDJ5YLJZaq67AMMKRdj76k"
7
+ GOOGLE_DOCTOR_API_KEY="AIzaSyAAmC656joB1x5534GnQ3mMcjg7bkdPRbc"
8
+
9
+ TOGETHER_API_KEY = "tgp_v1_dRxzBz77bgEiTVDqDli3KjVZ5rjhIXbE1k4l9GBBzu4"
app.py.py ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import asyncio
4
+ import gspread
5
+ from google.oauth2.service_account import Credentials
6
+ from gradio.themes.base import Base
7
+ import os
8
+ import google.generativeai as genai
9
+ from PIL import Image
10
+ import traceback
11
+ import time
12
+ import re
13
+
14
+ # --- PDF Generation Imports ---
15
+ from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image as RLImage, PageBreak
16
+ from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
17
+ from reportlab.lib.units import inch
18
+ from reportlab.lib.colors import navy, black, dimgrey
19
+ from reportlab.lib.enums import TA_CENTER, TA_JUSTIFY
20
+
21
+ # ==============================================================================
22
+ # 1. AUTHENTICATION & CONFIGURATION
23
+ # ==============================================================================
24
+
25
+ MAX_IMAGES = 5
26
+ SHEET_COLUMN_MAP = {
27
+ "image1_summary": "J", "image2_summary": "K", "image3_summary": "L",
28
+ "image4_summary": "M", "image5_summary": "N", "executive_summary": "O"
29
+ }
30
+
31
+ # --- Google Sheets ---
32
+ GOOGLE_SHEETS_CREDS_PATH = os.getenv("GOOGLE_SHEETS_CREDS_PATH", "dbott-464906-c46c8756b829.json")
33
+ is_sheets_authenticated = False
34
+ try:
35
+ SCOPES = ["https://www.googleapis.com/auth/spreadsheets", "https://www.googleapis.com/auth/drive"]
36
+ creds = Credentials.from_service_account_file(GOOGLE_SHEETS_CREDS_PATH, scopes=SCOPES)
37
+ gc = gspread.authorize(creds)
38
+ sh = gc.open("PatientData")
39
+ ws = sh.get_worksheet(0)
40
+ print("✅ Google Sheets authenticated successfully.")
41
+ is_sheets_authenticated = True
42
+ except Exception as e:
43
+ print(f"⚠️ Could not authenticate with Google Sheets: {e}. Using offline fallback data.")
44
+ ws = pd.DataFrame({
45
+ "abha_id": ["12345678901233"], "full_name": ["Pashwiwi Sharma"], "Age": [22], "weight_kg": ["64"],
46
+ "reason_for_visit": ["Allergy on right hand..."], "allergies": ["Pollen"],
47
+ "Medication": ["None"], "symptoms_description": ["Unsure of cause..."],
48
+ "Summary": ["Patient presents with an acute allergic reaction..."]
49
+ })
50
+
51
+ # --- Gemini API ---
52
+ is_gemini_configured = False
53
+ try:
54
+ GOOGLE_API_KEY = "AIzaSyDO6o5B2u5WEMXqR7a-xnGorfzt64Vjq14" # <- Replace with actual API key
55
+ genai.configure(api_key=GOOGLE_API_KEY)
56
+ gemini_model = genai.GenerativeModel('gemini-1.5-flash-latest')
57
+ print("✅ Gemini API configured successfully.")
58
+ is_gemini_configured = True
59
+ except Exception as e:
60
+ print(f"⚠️ Could not configure Gemini API: {e}. AI features will be disabled.")
61
+ gemini_model = None
62
+
63
+ # ==============================================================================
64
+ # 2. SYSTEM PROMPTS
65
+ # ==============================================================================
66
+ SYSTEM_PROMPT_IMAGE_ANALYSIS = """
67
+ You are a highly skilled medical imaging expert AI. Analyze the provided medical image, prescription, or report and structure your response according to the following points using clear markdown formatting.
68
+ 0. **Report Information** (if applicable): Doctor/Clinic Name, Date, Hospital/Facility, Patient Details (Age, Sex, etc.) if visible.
69
+ 1. **Image Type & Region**: Modality (X-ray, MRI, CT, Ultrasound, Photo, etc.), anatomical region, and positioning.
70
+ 2. **Key Findings**: Systematically list primary observations and potential abnormalities with detailed descriptions.
71
+ 3. **Diagnostic Assessment**: Provide a primary assessment or impression. List differential diagnoses if applicable. Highlight any critical/urgent findings.
72
+ 4. **Patient-Friendly Explanation**: Simplify the findings in clear, non-technical language.
73
+ ---
74
+ ***Disclaimer:** This AI-generated analysis is for informational purposes only and is NOT a substitute for professional medical advice, diagnosis, or treatment. A qualified healthcare professional must perform the final interpretation.*
75
+ """
76
+
77
+ SYSTEM_PROMPT_DETAILED_REPORT = """You are an expert medical scribe AI. Your task is to create a single, comprehensive, and data-rich patient report by synthesizing all the provided information.
78
+ **Your Goal:** Weave the patient's demographics, their past medical summary, the reason for their current visit, and the findings from new medical images into a cohesive and professional narrative.
79
+ **Required Structure:** Generate the report in Markdown format using the exact following headings:
80
+ ### Patient Information
81
+ (Summarize the patient's key demographic details: ABHA ID, Name, Age, Weight.)
82
+ ### Medical History & Previous Summary
83
+ (Detail the patient's known allergies, current medications, and the summary from their previous visits. This provides historical context.)
84
+ ### Current Visit Details
85
+ (Describe the primary reason for the current visit and the specific symptoms the patient is experiencing now.)
86
+ ### Comprehensive Image Analysis
87
+ (Integrate the findings from all the provided image analyses. For each image, present its key findings and diagnostic assessment in a clear, organized manner. If there are multiple images, address each one.)
88
+ ### Overall Synthesis & Impression
89
+ (This is the most important section. Provide a concise, professional synthesis that connects the dots. Correlate the patient's history and current symptoms with the new findings from the image analysis. Formulate a concluding impression based on the totality of the information.)
90
+ """
91
+
92
+ # ==============================================================================
93
+ # 3. PDF GENERATION ENGINE
94
+ # ==============================================================================
95
+ def create_report_pdf(markdown_text, image_paths, image_analyses):
96
+ # This function remains the same.
97
+ try:
98
+ pdf_path = f"temp_report_{int(time.time())}.pdf"
99
+ doc = SimpleDocTemplate(pdf_path, pagesize=(8.5 * inch, 11 * inch), topMargin=0.75*inch, bottomMargin=0.75*inch, leftMargin=0.75*inch, rightMargin=0.75*inch)
100
+ styles = getSampleStyleSheet()
101
+ styles.add(ParagraphStyle(name='TitleStyle', fontName='Helvetica-Bold', fontSize=18, alignment=TA_CENTER, textColor=navy, spaceAfter=24))
102
+ styles.add(ParagraphStyle(name='HeadingStyle', fontName='Helvetica-Bold', fontSize=14, textColor=navy, spaceBefore=12, spaceAfter=6))
103
+ styles.add(ParagraphStyle(name='Justify', parent=styles['Normal'], alignment=TA_JUSTIFY))
104
+ styles.add(ParagraphStyle(name='BulletStyle', parent=styles['Justify'], leftIndent=20, spaceAfter=4))
105
+ styles.add(ParagraphStyle(name='ImageTitle', parent=styles['Normal'], alignment=TA_CENTER, spaceBefore=18, spaceAfter=4, fontName='Helvetica-Bold'))
106
+ styles.add(ParagraphStyle(name='ImageCaption', parent=styles['Normal'], alignment=TA_CENTER, spaceAfter=12, fontName='Helvetica-Oblique', textColor=dimgrey, fontSize=9))
107
+
108
+ story = [Paragraph("Comprehensive Medical Report", styles['TitleStyle'])]
109
+
110
+ for line in markdown_text.split('\n'):
111
+ line = line.strip()
112
+ if not line: continue
113
+ line = re.sub(r'\*\*(.*?)\*\*', r'<b>\1</b>', line)
114
+ if line.startswith('### '):
115
+ story.append(Paragraph(line.replace('### ', ''), styles['HeadingStyle']))
116
+ elif line.startswith('* '):
117
+ story.append(Paragraph(f"• {line.replace('* ', '', 1)}", styles['BulletStyle']))
118
+ else:
119
+ story.append(Paragraph(line, styles['Justify']))
120
+
121
+ if image_paths and image_analyses:
122
+ story.append(PageBreak())
123
+ story.append(Paragraph("Appendix: Medical Images & Findings", styles['HeadingStyle']))
124
+
125
+ for i, img_path in enumerate(image_paths):
126
+ if i < len(image_analyses):
127
+ analysis_text = image_analyses[i]
128
+ caption_text = "No specific assessment found."
129
+ assessment_match = re.search(r"3\.\s*\*\*Diagnostic Assessment\*\*\n(.*?)(?=\n\n|\n4\.|\Z)", analysis_text, re.DOTALL | re.IGNORECASE)
130
+ if assessment_match:
131
+ caption_text = assessment_match.group(1).strip()
132
+ else:
133
+ findings_match = re.search(r"2\.\s*\*\*Key Findings\*\*\n(.*?)(?=\n\n|\n3\.|\Z)", analysis_text, re.DOTALL | re.IGNORECASE)
134
+ if findings_match:
135
+ caption_text = findings_match.group(1).strip()
136
+
137
+ story.append(Paragraph(f"Image {i+1}", styles['ImageTitle']))
138
+ story.append(Paragraph(f"<i>Summary: {caption_text}</i>", styles['ImageCaption']))
139
+ try:
140
+ img = RLImage(img_path, width=5.5*inch, height=5.5*inch, kind='proportional')
141
+ img.hAlign = 'CENTER'
142
+ story.append(img)
143
+ except Exception as img_e:
144
+ traceback.print_exc()
145
+ story.append(Paragraph(f"<i>Error: Could not display image {i+1}.</i>", styles['Normal']))
146
+
147
+ doc.build(story)
148
+ return pdf_path
149
+ except Exception as e:
150
+ traceback.print_exc()
151
+ return None
152
+
153
+ # ==============================================================================
154
+ # 4. CORE LOGIC
155
+ # ==============================================================================
156
+
157
+ async def update_google_sheet(abha_id, report_text, *image_analyses):
158
+ if not is_sheets_authenticated:
159
+ gr.Warning("Google Sheets not authenticated. Skipping database update.")
160
+ return "Could not update Sheet: Authentication failed."
161
+ try:
162
+ print(f"Attempting to update Google Sheet for ABHA ID: {abha_id}")
163
+ cell = ws.find(abha_id, in_column=1) # Search only in the first column for efficiency
164
+ if not cell:
165
+ gr.Warning(f"ABHA ID {abha_id} not found in Sheet. Skipping database update.")
166
+ return f"Could not update Sheet: ABHA ID {abha_id} not found."
167
+
168
+ row_number = cell.row
169
+ updates_to_make = []
170
+
171
+ # Prepare image analysis updates
172
+ for i, analysis in enumerate(image_analyses):
173
+ if i >= MAX_IMAGES: break
174
+ col_name = f"image{i+1}_summary"
175
+ if analysis and "Pending" not in analysis and "Failed" not in analysis:
176
+ col_letter = SHEET_COLUMN_MAP[col_name]
177
+ updates_to_make.append({'range': f'{col_letter}{row_number}', 'values': [[analysis]]})
178
+
179
+ # Prepare executive summary update
180
+ col_letter = SHEET_COLUMN_MAP["executive_summary"]
181
+ updates_to_make.append({'range': f'{col_letter}{row_number}', 'values': [[report_text]]})
182
+
183
+ if updates_to_make:
184
+ ws.batch_update(updates_to_make)
185
+ print(f"✅ Successfully updated row {row_number} for ABHA ID: {abha_id}")
186
+ return "✅ Database update complete."
187
+ else:
188
+ return "No new data to update in the database."
189
+ except Exception as e:
190
+ print(f"❌ FAILED to update Google Sheet: {e}")
191
+ traceback.print_exc()
192
+ gr.Error(f"Failed to update Google Sheet: {e}")
193
+ return "❌ Database update failed. See console for details."
194
+
195
+
196
+ # **ROBUST FIX APPLIED HERE**
197
+ async def fetch_patient_data(abha_id):
198
+ placeholder_demographics = "*Patient details will appear here.*"
199
+ placeholder_summary = "*Patient history will appear here.*"
200
+ if not abha_id:
201
+ return placeholder_demographics, placeholder_summary
202
+
203
+ try:
204
+ if is_sheets_authenticated:
205
+ # ROBUST METHOD: Use get_all_values() to avoid header errors.
206
+ all_values = ws.get_all_values()
207
+ if len(all_values) < 2:
208
+ return "Spreadsheet has no data records.", ""
209
+
210
+ # Manually construct the DataFrame
211
+ headers = all_values[0]
212
+ data = all_values[1:]
213
+ df = pd.DataFrame(data, columns=headers)
214
+ else:
215
+ df = ws
216
+
217
+ df["abha_id"] = df["abha_id"].astype(str).str.strip()
218
+ row = df[df["abha_id"] == abha_id.strip()]
219
+
220
+ if row.empty:
221
+ return f"**Status:** No record found for ABHA ID: `{abha_id}`", ""
222
+
223
+ record = row.iloc[0]
224
+ patient_info_md = f"""
225
+ **ABHA ID:** {record.get('abha_id', 'N/A')}
226
+ **Name:** {record.get('full_name', 'N/A')}
227
+ **Age:** {record.get('Age', 'N/A')}
228
+ **Weight:** {record.get('weight_kg', 'N/A')} kg
229
+
230
+ ---
231
+ **Reason for Visit:**
232
+ {record.get('reason_for_visit', 'N/A')}
233
+
234
+ **Symptoms:**
235
+ {record.get('symptoms_description', 'N/A')}
236
+ """
237
+ summary_text = f"""
238
+ **Known Allergies:**
239
+ {record.get('allergies', 'N/A')}
240
+
241
+ **Current Medications:**
242
+ {record.get('Medication', 'N/A')}
243
+
244
+ ---
245
+ **Previous Visit Summary:**
246
+ {record.get('Summary', 'No previous summary available.')}
247
+ """
248
+ return patient_info_md.strip(), summary_text.strip()
249
+ except Exception as e:
250
+ traceback.print_exc()
251
+ # Provide a more specific error message if it's the GSpreadException
252
+ if "GSpreadException" in str(e):
253
+ return ("**Error:** Could not read the spreadsheet. Please ensure the first row has unique, non-empty headers for all columns.", "")
254
+ return f"**Error:** An error occurred while fetching data: {e}", ""
255
+
256
+
257
+ async def analyze_images_on_upload(files):
258
+ # This function remains the same.
259
+ gallery_update = gr.update(value=None, visible=False)
260
+ row_updates = [gr.update(visible=False)] * MAX_IMAGES
261
+ image_updates = [gr.update(value=None)] * MAX_IMAGES
262
+ markdown_updates = [gr.update(value="")] * MAX_IMAGES
263
+
264
+ if not files:
265
+ yield (gallery_update, *row_updates, *image_updates, *markdown_updates)
266
+ return
267
+
268
+ if len(files) > MAX_IMAGES:
269
+ gr.Warning(f"Max {MAX_IMAGES} images allowed. Analyzing the first {MAX_IMAGES}.")
270
+ files = files[:MAX_IMAGES]
271
+
272
+ filepaths = [f.name for f in files]
273
+ gallery_update = gr.update(value=filepaths, visible=True)
274
+
275
+ for i in range(MAX_IMAGES):
276
+ if i < len(files):
277
+ row_updates[i] = gr.update(visible=True)
278
+ image_updates[i] = gr.update(value=filepaths[i])
279
+ markdown_updates[i] = gr.update(value="⌛ Pending analysis...")
280
+ else:
281
+ row_updates[i] = gr.update(visible=False)
282
+ image_updates[i] = gr.update(value=None)
283
+ markdown_updates[i] = gr.update(value="")
284
+
285
+ yield (gallery_update, *row_updates, *image_updates, *markdown_updates)
286
+
287
+ if not is_gemini_configured:
288
+ for i in range(len(files)):
289
+ markdown_updates[i] = gr.update(value="### Analysis Disabled\nGemini API not configured.")
290
+ yield (gallery_update, *row_updates, *image_updates, *markdown_updates)
291
+ return
292
+
293
+ for i in range(len(files)):
294
+ markdown_updates[i] = gr.update(value=f"⏳ Analyzing Image {i+1}...")
295
+ yield (gallery_update, *row_updates, *image_updates, *markdown_updates)
296
+
297
+ try:
298
+ img = Image.open(filepaths[i])
299
+ response = await gemini_model.generate_content_async(
300
+ [SYSTEM_PROMPT_IMAGE_ANALYSIS, img],
301
+ generation_config=genai.GenerationConfig(temperature=0.1)
302
+ )
303
+ markdown_updates[i] = gr.update(value=response.text)
304
+ except Exception as e:
305
+ traceback.print_exc()
306
+ markdown_updates[i] = gr.update(value=f"### Analysis Failed\nAn error occurred: {e}")
307
+
308
+ yield (gallery_update, *row_updates, *image_updates, *markdown_updates)
309
+
310
+
311
+ async def generate_detailed_report(abha_id, uploaded_files, *image_analyses):
312
+ # This function remains the same.
313
+ yield "⏳ Generating report...", gr.update(visible=False), gr.update(visible=False, value="")
314
+
315
+ if not is_gemini_configured:
316
+ yield "### Report Generation Disabled\nGemini API not configured.", gr.update(visible=False), gr.update(visible=False, value="")
317
+ return
318
+
319
+ patient_info, visit_summary = await fetch_patient_data(abha_id)
320
+ if "No record found" in patient_info or "Error:" in patient_info:
321
+ yield "### Report Generation Failed\nPlease fetch a valid patient record first.", gr.update(visible=False), gr.update(visible=False, value="")
322
+ return
323
+
324
+ prompt_context = "Here is all the available information for a patient...\n"
325
+ prompt_context += f"## PATIENT DETAILS & CURRENT VISIT INFO:\n{patient_info}\n\n"
326
+ prompt_context += f"## PAST MEDICAL SUMMARY:\n{visit_summary}\n\n"
327
+ analysis_texts = [text for text in image_analyses if text and "Pending" not in text and "Failed" not in text]
328
+ if analysis_texts:
329
+ prompt_context += "## NEW IMAGE ANALYSIS FINDINGS:\n"
330
+ for i, text in enumerate(analysis_texts):
331
+ prompt_context += f"### Analysis of Image {i+1}\n{text}\n\n"
332
+ else:
333
+ prompt_context += "## NEW IMAGE ANALYSIS FINDINGS:\nNo successful image analyses were performed.\n\n"
334
+
335
+ final_prompt = [SYSTEM_PROMPT_DETAILED_REPORT, prompt_context]
336
+
337
+ try:
338
+ response = await gemini_model.generate_content_async(final_prompt, generation_config=genai.GenerationConfig(temperature=0.4))
339
+ markdown_report = response.text
340
+
341
+ valid_image_paths = [f.name for f in uploaded_files[:MAX_IMAGES]] if uploaded_files else []
342
+ pdf_path = create_report_pdf(markdown_report, valid_image_paths, analysis_texts)
343
+ pdf_update = gr.update(value=pdf_path, visible=True) if pdf_path else gr.update(visible=False)
344
+
345
+ yield markdown_report, pdf_update, gr.update(visible=True, value="🔄 Updating database...")
346
+
347
+ status_message = await update_google_sheet(abha_id, markdown_report, *analysis_texts)
348
+
349
+ yield markdown_report, pdf_update, gr.update(visible=True, value=status_message)
350
+ await asyncio.sleep(3)
351
+ yield markdown_report, pdf_update, gr.update(visible=False)
352
+
353
+ except Exception as e:
354
+ traceback.print_exc()
355
+ yield f"### Report Generation Failed\nAn error occurred: {e}", gr.update(visible=False), gr.update(visible=False)
356
+
357
+
358
+ # ==============================================================================
359
+ # 5. GRADIO UI LAYOUT
360
+ # ==============================================================================
361
+ with gr.Blocks(theme=Base(), title="Advanced Medical Report Generator") as app:
362
+ # This section remains the same.
363
+ gr.Markdown("# Advanced Medical Report Generator")
364
+
365
+ with gr.Row():
366
+ abha_id_input = gr.Textbox(label="Enter Patient ABHA ID", scale=3)
367
+ fetch_button = gr.Button("Fetch Patient Details", variant="primary", scale=1)
368
+
369
+ with gr.Row(variant="panel"):
370
+ with gr.Column(scale=1):
371
+ with gr.Accordion("Patient Demographics & Current Visit", open=True):
372
+ patient_info_output = gr.Markdown("*Patient details will appear here.*")
373
+ with gr.Column(scale=1):
374
+ with gr.Accordion("Medical History & Visit Summary", open=True):
375
+ summary_output = gr.Markdown("*Patient history will appear here.*")
376
+
377
+ gr.Markdown("---")
378
+
379
+ gr.Markdown("### 1. Upload Scans & View AI Analysis")
380
+ with gr.Column(variant="panel"):
381
+ image_uploader = gr.File(label=f"Upload up to {MAX_IMAGES} images", file_count="multiple", file_types=["image"])
382
+ image_gallery = gr.Gallery(label="Image Preview", visible=False, columns=5, height="auto")
383
+
384
+ analysis_rows, analysis_images, analysis_markdowns = [], [], []
385
+ for i in range(MAX_IMAGES):
386
+ with gr.Row(visible=False, variant='panel') as row:
387
+ with gr.Column(scale=1, min_width=200):
388
+ img = gr.Image(interactive=False, show_label=False)
389
+ with gr.Column(scale=2):
390
+ md = gr.Markdown()
391
+ analysis_rows.append(row)
392
+ analysis_images.append(img)
393
+ analysis_markdowns.append(md)
394
+
395
+ gr.Markdown("---")
396
+ gr.Markdown("### 2. Generate Final Synthesized Report")
397
+ with gr.Column(variant='panel'):
398
+ generate_report_button = gr.Button("Generate Detailed Report & Update Database", variant="primary")
399
+ status_output = gr.Markdown(visible=False)
400
+ gr.Markdown("#### Report Preview")
401
+ report_preview_output = gr.Markdown("*Click the button above to generate a comprehensive, synthesized report.*")
402
+ download_report_button = gr.File(label="Download Report (PDF)", visible=False)
403
+
404
+ # ==============================================================================
405
+ # 6. EVENT LISTENERS
406
+ # ==============================================================================
407
+ # This section remains the same.
408
+ fetch_button.click(
409
+ fn=fetch_patient_data,
410
+ inputs=[abha_id_input],
411
+ outputs=[patient_info_output, summary_output]
412
+ )
413
+
414
+ image_uploader.change(
415
+ fn=analyze_images_on_upload,
416
+ inputs=[image_uploader],
417
+ outputs=[image_gallery, *analysis_rows, *analysis_images, *analysis_markdowns]
418
+ )
419
+
420
+ generate_report_button.click(
421
+ fn=generate_detailed_report,
422
+ inputs=[
423
+ abha_id_input,
424
+ image_uploader,
425
+ *analysis_markdowns
426
+ ],
427
+ outputs=[report_preview_output, download_report_button, status_output]
428
+ )
429
+
430
+ # ==============================================================================
431
+ # 7. LAUNCH APP
432
+ # ==============================================================================
433
+ if __name__ == "__main__":
434
+ app.launch(share=True, debug=True)
categorizer_prompt.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are a medical information categorization engine. Your sole purpose is to analyze a new piece of conversation and determine which section of a standard medical note it belongs to.
2
+
3
+ **CRITICAL INSTRUCTIONS:**
4
+ 1. Analyze the "NEW TEXT SNIPPET". Use the "FULL TRANSCRIPT HISTORY" for context if needed.
5
+ 2. Identify which single medical note section the new information belongs to. The available sections are: `Patient Information`, `History of Present Illness (HPI)`, `Past Medical History (PMH)`, `Medication History`, `Allergies`.
6
+ 3. Summarize the new piece of information into a single, concise bullet point.
7
+ 4. You MUST respond in a strict JSON format. **Your response MUST be a JSON object with two keys: "section" and "content".** Do not respond with a plain string.
8
+ 5. If the new text is conversational filler, a greeting, or does not belong in any medical section, you MUST return the JSON object: `{"section": "N/A", "content": ""}`.
9
+ 6. Do not invent information. If the snippet is unclear, use the "N/A" response.
10
+
11
+ **Example 1:**
12
+ - NEW TEXT SNIPPET: "and the pain has been going on for about three days now"
13
+ - YOUR RESPONSE: `{"section": "HPI", "content": "- Onset was three days ago."}`
14
+
15
+ **Example 2:**
16
+ - NEW TEXT SNIPPET: "I'm allergic to penicillin, it gives me hives"
17
+ - YOUR RESPONSE: `{"section": "Allergies", "content": "- Penicillin (causes hives)."}`
18
+
19
+ **Example 3:**
20
+ - NEW TEXT SNIPPET: "okay thank you so much doc"
21
+ - YOUR RESPONSE: `{"section": "N/A", "content": ""}`
dbott-464906-c46c8756b829.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "service_account",
3
+ "project_id": "dbott-464906",
4
+ "private_key_id": "c46c8756b8299f42f67124c7004ef65e7b151a0a",
5
+ "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCwHe9/7q9AAcow\nCzPKZ4gt5H+W6DeFMOE2/JVzX+LbsT5kc1ndR984hfduW3yzL3m2389PmBxu13Cr\nspV0NdW3Oj9ltYWpiyFDIfg9VlfmDxwp7JQVw4MCD5KMnpD9Z1yvp5URAdcedtYR\n5Yl4D6bZVCTQoslIo+SleQ+BIb+HGBISxjZW+E8X3TFdOJ+rhEk1lu7l3qAkokb3\nnNpQHa5Oj5MtoC7LXsaxjtBA9iV1F0aYg/gWijYsJceCfYEFNWmSwiCGoSYEzTk4\nTrw3HagCuK8YPKrUXzNvqpsYQSBvR1sPJISomyHCJAMF9m2S4DN1iRUPwGwaxxK4\n9B42RjWxAgMBAAECggEAEPicLwHQgCSBV/vcXnF3nU8wPwSFmnTJXxkb9brpti3w\nSzxqY85x45Uqm3HUJG2QwbSeL5dYNhOArKr63UX1dZX2rKsUru/UoWQmnb0ySd3E\nOFivNHcGAC7WpReqn8tUEoEy9ilCkjxKs1LL2naiUhMFBoMz+Pyszd1KnIZPWd8o\ntsRRLjjlEINM2ixsW0lwBQOrTWLjjb7CYUAYr4Be3o+lq1nbx6sH3WEboEBjFFAJ\nRKACc8am64NqltAQiLdB6cmNZSfrfSIAIwdWj3Xtvk1qBplbZClHASWSUS97qjVq\nrbKbFZKIWQIHc4FeQcpQo1hVDFsUQHydjgh+i3vIGQKBgQDvbq8mfezPfSUZgw23\n6V0wu2oxiP27mU5W8ddt6zKDp5hQ6emCOcVS/2JCxQQzNqn6LMf+pXYhitdDei5E\nwAy7IBzJDWRDvMzc031WloA+S6i1xZuY/Ne4zWvHhXaBP2XWS5rWEo0KDNm2Awei\n9c9YD4Y4LietjJ40sLR7Lz429wKBgQC8Ta3U96IbJwp2KTBJ5RpzC0DOSoxgiFDF\nTAPFUe6OfuwPvi3izGXvjuUkHahAXpFI4YIElZjuJBcO2Kqahp6ykTRw2Pw3SJ3c\nQeyLwM7RgW0xqupHULSdv30/O+q4FrtMKDRxbFhio0bgjie2yO4PPp/pT731g1eC\nZNvCUoYGlwKBgQCnpZh+Gy31GmsfseOpIn1d4dw5UvJWqMFxn2R4UnbMOE0uWppl\n1I2Vz7u9hLWsJlpeEXz3kGNmmRCg7qv294HyhEmjfPz3cPsApBTezAJ/m/pFTFfm\nhyOFAlC1I34WgY2MvuNrgRHAN7848mYmdHb58eTI8YhWvF8KBbBZkHq/gQKBgC45\ng2q8P3ca5l6LTedV7mA/avE5K6ymye0k8+gEbONeFOTocqsyMfPUyDtNbHggvtl9\nQkWN07Th9ycV2QuF8H81VgI9wexwTxA6vq6v7hVQCFYg9tH65duznjNfqgb2zZOs\navNM/YV5P3TwcJ9WQ9pKLUdA5AjY7Sp9R9U0HOKRAoGAKV4rACCVTM0C/VhnIo53\ny/e48ttCopbEWlYUn9/p/FIOT/T+7dXSJbMGU55FPaVeEIPkYBr+YU3wQLYZsgBg\ngt6D3lgPDHNyCgbdZU5jItN/wHGfc+VwjvV6OLJjQd3iDlpdvPx2Pog4z/zcIk2m\nEsxZ3XcbS36sHs7ofT/QG+k=\n-----END PRIVATE KEY-----\n",
6
+ "client_email": "spreedsheet1@dbott-464906.iam.gserviceaccount.com",
7
+ "client_id": "113104737724933717188",
8
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
9
+ "token_uri": "https://oauth2.googleapis.com/token",
10
+ "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
11
+ "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/spreedsheet1%40dbott-464906.iam.gserviceaccount.com",
12
+ "universe_domain": "googleapis.com"
13
+ }
doctor_prompt.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are a clinical decision support assistant for a doctor. Your role is to listen to the live transcript of a patient encounter and provide real-time, actionable insights.
2
+
3
+ **CRITICAL INSTRUCTIONS:**
4
+ 1. Analyze the **FULL CONVERSATION TRANSCRIPT**.
5
+ 2. Your output MUST be in two distinct sections: "Suggested Next Steps" and "Key Questions Asked by Doctor".
6
+ 3. Under "Suggested Next Steps," recommend potential actions, tests, or follow-ups based on the patient's symptoms and history (e.g., "Consider ordering a CBC," "Recommend a follow-up in 2 weeks").
7
+ 4. Under "Key Questions Asked by Doctor," list the important diagnostic questions the doctor has asked the patient so far. This helps the doctor track what has been covered.
8
+ 5. Be concise and to the point.
9
+ 6. Format the output using clean Markdown (`###` for headers, `-` for bullets).
10
+ 7. If there isn't enough information for a section, you can state "Awaiting more information..." under the relevant heading.
11
+ 8. Your output MUST ONLY be the two-section Markdown note. Do not add any other text or explanations.
jarvis_command_prompt.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ You are a highly efficient command parser. Your sole function is to analyze a given text segment for the activation keyword "jarvis".
2
+
3
+ **CRITICAL INSTRUCTIONS:**
4
+ 1. Read the "TEXT SEGMENT TO ANALYZE".
5
+ 2. If you find the keyword "jarvis" (case-insensitive), extract the specific and complete command or question that immediately follows it.
6
+ 3. Your output MUST BE ONLY the extracted command text.
7
+ 4. If the keyword "jarvis" is NOT present in the text, you MUST respond with the exact string: `[NO_COMMAND]`
8
+ 5. Do not add any greetings, explanations, or introductory phrases like "The command is:".
jarvis_prompt.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ You are a highly efficient command parser. Your sole function is to analyze a given text segment for the activation keyword "jarvis".
2
+
3
+ **CRITICAL INSTRUCTIONS:**
4
+ 1. Read the "TEXT SEGMENT TO ANALYZE".
5
+ 2. If you find the keyword "jarvis" (case-insensitive), extract the specific and complete command or question that immediately follows it.
6
+ 3. Your output MUST BE ONLY the extracted command text.
7
+ 4. If the keyword "jarvis" is NOT present in the text, you MUST respond with the exact string: `[NO_COMMAND]`
8
+ 5. Do not add any greetings, explanations, or introductory phrases like "The command is:".
system_prompt.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are an expert medical scribe. Your task is to generate a structured clinical note from a real-time conversation transcript.
2
+
3
+ **CRITICAL INSTRUCTIONS:**
4
+ 1. Analyze the **FULL CONVERSATION TRANSCRIPT**.
5
+ 2. Use the **PREVIOUS SUMMARY** as a reference to build upon. Do not repeat information.
6
+ 3. **ONLY create a heading and its corresponding section if you find relevant information for it in the transcript.** DO NOT output empty sections or headers like "Past Medical History:" if there is no such information.
7
+ 4. Format the output using clean and professional Markdown. Use `###` for main headings (e.g., `### History of Present Illness (HPI)`) and `-` for bullet points.
8
+ 5. Keep the language concise and clinical.
9
+ 6. Your output MUST ONLY be the Markdown-formatted note. Do not include any other text, greetings, or explanations.
10
+
11
+ **Available Section Headings (Use only when applicable):**
12
+ - Patient Information
13
+ - History of Present Illness (HPI)
14
+ - Past Medical History (PMH)
15
+ - Medication History
16
+ - Allergies