Amarthya7 commited on
Commit
fa40b2d
·
verified ·
1 Parent(s): e88e7d0

Update mediSync/app.py

Browse files
Files changed (1) hide show
  1. mediSync/app.py +593 -560
mediSync/app.py CHANGED
@@ -1,560 +1,593 @@
1
- import logging
2
- import os
3
- import sys
4
- import tempfile
5
- from pathlib import Path
6
-
7
- import gradio as gr
8
- import matplotlib.pyplot as plt
9
- from PIL import Image
10
-
11
- # Add parent directory to path
12
- parent_dir = os.path.dirname(os.path.abspath(__file__))
13
- sys.path.append(parent_dir)
14
-
15
- # Import our modules
16
- from models.multimodal_fusion import MultimodalFusion
17
- from utils.preprocessing import enhance_xray_image, normalize_report_text
18
- from utils.visualization import (
19
- plot_image_prediction,
20
- plot_multimodal_results,
21
- plot_report_entities,
22
- )
23
-
24
- # Set up logging
25
- logging.basicConfig(
26
- level=logging.INFO,
27
- format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
28
- handlers=[logging.StreamHandler(), logging.FileHandler("mediSync.log")],
29
- )
30
- logger = logging.getLogger(__name__)
31
-
32
- # Create temporary directory for sample data if it doesn't exist
33
- os.makedirs(os.path.join(parent_dir, "data", "sample"), exist_ok=True)
34
-
35
-
36
- class MediSyncApp:
37
- """
38
- Main application class for the MediSync multi-modal medical analysis system.
39
- """
40
-
41
- def __init__(self):
42
- """Initialize the application and load models."""
43
- self.logger = logging.getLogger(__name__)
44
- self.logger.info("Initializing MediSync application")
45
-
46
- # Initialize models with None for lazy loading
47
- self.fusion_model = None
48
- self.image_model = None
49
- self.text_model = None
50
-
51
- def load_models(self):
52
- """
53
- Load models if not already loaded.
54
-
55
- Returns:
56
- bool: True if models loaded successfully, False otherwise
57
- """
58
- try:
59
- if self.fusion_model is None:
60
- self.logger.info("Loading models...")
61
- self.fusion_model = MultimodalFusion()
62
- self.image_model = self.fusion_model.image_analyzer
63
- self.text_model = self.fusion_model.text_analyzer
64
- self.logger.info("Models loaded successfully")
65
- return True
66
-
67
- except Exception as e:
68
- self.logger.error(f"Error loading models: {e}")
69
- return False
70
-
71
- def analyze_image(self, image):
72
- """
73
- Analyze a medical image.
74
-
75
- Args:
76
- image: Image file uploaded through Gradio
77
-
78
- Returns:
79
- tuple: (image, image_results_html, plot_as_html)
80
- """
81
- try:
82
- # Ensure models are loaded
83
- if not self.load_models() or self.image_model is None:
84
- return image, "Error: Models not loaded properly.", None
85
-
86
- # Save uploaded image to a temporary file
87
- temp_dir = tempfile.mkdtemp()
88
- temp_path = os.path.join(temp_dir, "upload.png")
89
-
90
- if isinstance(image, str):
91
- # Copy the file if it's a path
92
- from shutil import copyfile
93
-
94
- copyfile(image, temp_path)
95
- else:
96
- # Save if it's a Gradio UploadButton image
97
- image.save(temp_path)
98
-
99
- # Run image analysis
100
- self.logger.info(f"Analyzing image: {temp_path}")
101
- results = self.image_model.analyze(temp_path)
102
-
103
- # Create visualization
104
- fig = plot_image_prediction(
105
- image,
106
- results.get("predictions", []),
107
- f"Primary Finding: {results.get('primary_finding', 'Unknown')}",
108
- )
109
-
110
- # Convert to HTML for display
111
- plot_html = self.fig_to_html(fig)
112
-
113
- # Format results as HTML
114
- html_result = f"""
115
- <h2>X-ray Analysis Results</h2>
116
- <p><strong>Primary Finding:</strong> {results.get("primary_finding", "Unknown")}</p>
117
- <p><strong>Confidence:</strong> {results.get("confidence", 0):.1%}</p>
118
- <p><strong>Abnormality Detected:</strong> {"Yes" if results.get("has_abnormality", False) else "No"}</p>
119
-
120
- <h3>Top Predictions:</h3>
121
- <ul>
122
- """
123
-
124
- # Add top 5 predictions
125
- for label, prob in results.get("predictions", [])[:5]:
126
- html_result += f"<li>{label}: {prob:.1%}</li>"
127
-
128
- html_result += "</ul>"
129
-
130
- # Add explanation
131
- explanation = self.image_model.get_explanation(results)
132
- html_result += f"<h3>Analysis Explanation:</h3><p>{explanation}</p>"
133
-
134
- return image, html_result, plot_html
135
-
136
- except Exception as e:
137
- self.logger.error(f"Error in image analysis: {e}")
138
- return image, f"Error analyzing image: {str(e)}", None
139
-
140
- def analyze_text(self, text):
141
- """
142
- Analyze a medical report text.
143
-
144
- Args:
145
- text: Report text input through Gradio
146
-
147
- Returns:
148
- tuple: (text, text_results_html, entities_plot_html)
149
- """
150
- try:
151
- # Ensure models are loaded
152
- if not self.load_models() or self.text_model is None:
153
- return text, "Error: Models not loaded properly.", None
154
-
155
- # Check for empty text
156
- if not text or len(text.strip()) < 10:
157
- return (
158
- text,
159
- "Error: Please enter a valid medical report text (at least 10 characters).",
160
- None,
161
- )
162
-
163
- # Normalize text
164
- normalized_text = normalize_report_text(text)
165
-
166
- # Run text analysis
167
- self.logger.info("Analyzing medical report text")
168
- results = self.text_model.analyze(normalized_text)
169
-
170
- # Get entities and create visualization
171
- entities = results.get("entities", {})
172
- fig = plot_report_entities(normalized_text, entities)
173
-
174
- # Convert to HTML for display
175
- entities_plot_html = self.fig_to_html(fig)
176
-
177
- # Format results as HTML
178
- html_result = f"""
179
- <h2>Medical Report Analysis Results</h2>
180
- <p><strong>Severity Level:</strong> {results.get("severity", {}).get("level", "Unknown")}</p>
181
- <p><strong>Severity Score:</strong> {results.get("severity", {}).get("score", 0)}/4</p>
182
- <p><strong>Confidence:</strong> {results.get("severity", {}).get("confidence", 0):.1%}</p>
183
-
184
- <h3>Key Findings:</h3>
185
- <ul>
186
- """
187
-
188
- # Add findings
189
- findings = results.get("findings", [])
190
- if findings:
191
- for finding in findings:
192
- html_result += f"<li>{finding}</li>"
193
- else:
194
- html_result += "<li>No specific findings detailed.</li>"
195
-
196
- html_result += "</ul>"
197
-
198
- # Add entities
199
- html_result += "<h3>Extracted Medical Entities:</h3>"
200
-
201
- for category, items in entities.items():
202
- if items:
203
- html_result += f"<p><strong>{category.capitalize()}:</strong> {', '.join(items)}</p>"
204
-
205
- # Add follow-up recommendations
206
- html_result += "<h3>Follow-up Recommendations:</h3><ul>"
207
- followups = results.get("followup_recommendations", [])
208
-
209
- if followups:
210
- for rec in followups:
211
- html_result += f"<li>{rec}</li>"
212
- else:
213
- html_result += "<li>No specific follow-up recommendations.</li>"
214
-
215
- html_result += "</ul>"
216
-
217
- return text, html_result, entities_plot_html
218
-
219
- except Exception as e:
220
- self.logger.error(f"Error in text analysis: {e}")
221
- return text, f"Error analyzing text: {str(e)}", None
222
-
223
- def analyze_multimodal(self, image, text):
224
- """
225
- Perform multimodal analysis of image and text.
226
-
227
- Args:
228
- image: Image file uploaded through Gradio
229
- text: Report text input through Gradio
230
-
231
- Returns:
232
- tuple: (results_html, multimodal_plot_html)
233
- """
234
- try:
235
- # Ensure models are loaded
236
- if not self.load_models() or self.fusion_model is None:
237
- return "Error: Models not loaded properly.", None
238
-
239
- # Check for empty inputs
240
- if image is None:
241
- return "Error: Please upload an X-ray image for analysis.", None
242
-
243
- if not text or len(text.strip()) < 10:
244
- return (
245
- "Error: Please enter a valid medical report text (at least 10 characters).",
246
- None,
247
- )
248
-
249
- # Save uploaded image to a temporary file
250
- temp_dir = tempfile.mkdtemp()
251
- temp_path = os.path.join(temp_dir, "upload.png")
252
-
253
- if isinstance(image, str):
254
- # Copy the file if it's a path
255
- from shutil import copyfile
256
-
257
- copyfile(image, temp_path)
258
- else:
259
- # Save if it's a Gradio UploadButton image
260
- image.save(temp_path)
261
-
262
- # Normalize text
263
- normalized_text = normalize_report_text(text)
264
-
265
- # Run multimodal analysis
266
- self.logger.info("Performing multimodal analysis")
267
- results = self.fusion_model.analyze(temp_path, normalized_text)
268
-
269
- # Create visualization
270
- fig = plot_multimodal_results(results, image, text)
271
-
272
- # Convert to HTML for display
273
- plot_html = self.fig_to_html(fig)
274
-
275
- # Generate explanation
276
- explanation = self.fusion_model.get_explanation(results)
277
-
278
- # Format results as HTML
279
- html_result = f"""
280
- <h2>Multimodal Medical Analysis Results</h2>
281
-
282
- <h3>Overview</h3>
283
- <p><strong>Primary Finding:</strong> {results.get("primary_finding", "Unknown")}</p>
284
- <p><strong>Severity Level:</strong> {results.get("severity", {}).get("level", "Unknown")}</p>
285
- <p><strong>Severity Score:</strong> {results.get("severity", {}).get("score", 0)}/4</p>
286
- <p><strong>Agreement Score:</strong> {results.get("agreement_score", 0):.0%}</p>
287
-
288
- <h3>Detailed Findings</h3>
289
- <ul>
290
- """
291
-
292
- # Add findings
293
- findings = results.get("findings", [])
294
- if findings:
295
- for finding in findings:
296
- html_result += f"<li>{finding}</li>"
297
- else:
298
- html_result += "<li>No specific findings detailed.</li>"
299
-
300
- html_result += "</ul>"
301
-
302
- # Add follow-up recommendations
303
- html_result += "<h3>Recommended Follow-up</h3><ul>"
304
- followups = results.get("followup_recommendations", [])
305
-
306
- if followups:
307
- for rec in followups:
308
- html_result += f"<li>{rec}</li>"
309
- else:
310
- html_result += (
311
- "<li>No specific follow-up recommendations provided.</li>"
312
- )
313
-
314
- html_result += "</ul>"
315
-
316
- # Add confidence note
317
- confidence = results.get("severity", {}).get("confidence", 0)
318
- html_result += f"""
319
- <p><em>Note: This analysis has a confidence level of {confidence:.0%}.
320
- Please consult with healthcare professionals for official diagnosis.</em></p>
321
- """
322
-
323
- return html_result, plot_html
324
-
325
- except Exception as e:
326
- self.logger.error(f"Error in multimodal analysis: {e}")
327
- return f"Error in multimodal analysis: {str(e)}", None
328
-
329
- def enhance_image(self, image):
330
- """
331
- Enhance X-ray image contrast.
332
-
333
- Args:
334
- image: Image file uploaded through Gradio
335
-
336
- Returns:
337
- PIL.Image: Enhanced image
338
- """
339
- try:
340
- if image is None:
341
- return None
342
-
343
- # Save uploaded image to a temporary file
344
- temp_dir = tempfile.mkdtemp()
345
- temp_path = os.path.join(temp_dir, "upload.png")
346
-
347
- if isinstance(image, str):
348
- # Copy the file if it's a path
349
- from shutil import copyfile
350
-
351
- copyfile(image, temp_path)
352
- else:
353
- # Save if it's a Gradio UploadButton image
354
- image.save(temp_path)
355
-
356
- # Enhance image
357
- self.logger.info(f"Enhancing image: {temp_path}")
358
- output_path = os.path.join(temp_dir, "enhanced.png")
359
- enhance_xray_image(temp_path, output_path)
360
-
361
- # Load enhanced image
362
- enhanced = Image.open(output_path)
363
- return enhanced
364
-
365
- except Exception as e:
366
- self.logger.error(f"Error enhancing image: {e}")
367
- return image # Return original image on error
368
-
369
- def fig_to_html(self, fig):
370
- """Convert matplotlib figure to HTML for display in Gradio."""
371
- try:
372
- import base64
373
- import io
374
-
375
- buf = io.BytesIO()
376
- fig.savefig(buf, format="png", bbox_inches="tight")
377
- buf.seek(0)
378
- img_str = base64.b64encode(buf.read()).decode("utf-8")
379
- plt.close(fig)
380
-
381
- return f'<img src="data:image/png;base64,{img_str}" alt="Analysis Plot">'
382
-
383
- except Exception as e:
384
- self.logger.error(f"Error converting figure to HTML: {e}")
385
- return "<p>Error displaying visualization.</p>"
386
-
387
-
388
- def create_interface():
389
- """Create and launch the Gradio interface."""
390
-
391
- app = MediSyncApp()
392
-
393
- # Example medical report for demo
394
- example_report = """
395
- CHEST X-RAY EXAMINATION
396
-
397
- CLINICAL HISTORY: 55-year-old male with cough and fever.
398
-
399
- FINDINGS: The heart size is at the upper limits of normal. The lungs are clear without focal consolidation,
400
- effusion, or pneumothorax. There is mild prominence of the pulmonary vasculature. No pleural effusion is seen.
401
- There is a small nodular opacity noted in the right lower lobe measuring approximately 8mm, which is suspicious
402
- and warrants further investigation. The mediastinum is unremarkable. The visualized bony structures show no acute abnormalities.
403
-
404
- IMPRESSION:
405
- 1. Mild cardiomegaly.
406
- 2. 8mm nodular opacity in the right lower lobe, recommend follow-up CT for further evaluation.
407
- 3. No acute pulmonary parenchymal abnormality.
408
-
409
- RECOMMENDATIONS: Follow-up chest CT to further characterize the nodular opacity in the right lower lobe.
410
- """
411
-
412
- # Get sample image path if available
413
- sample_images_dir = Path(parent_dir) / "data" / "sample"
414
- sample_images = list(sample_images_dir.glob("*.png")) + list(
415
- sample_images_dir.glob("*.jpg")
416
- )
417
-
418
- sample_image_path = None
419
- if sample_images:
420
- sample_image_path = str(sample_images[0])
421
-
422
- # Define interface
423
- with gr.Blocks(
424
- title="MediSync: Multi-Modal Medical Analysis System", theme=gr.themes.Soft()
425
- ) as interface:
426
- gr.Markdown("""
427
- # MediSync: Multi-Modal Medical Analysis System
428
-
429
- This AI-powered healthcare solution combines X-ray image analysis with patient report text processing
430
- to provide comprehensive medical insights.
431
-
432
- ## How to Use
433
- 1. Upload a chest X-ray image
434
- 2. Enter the corresponding medical report text
435
- 3. Choose the analysis type: image-only, text-only, or multimodal (combined)
436
- """)
437
-
438
- with gr.Tab("Multimodal Analysis"):
439
- with gr.Row():
440
- with gr.Column():
441
- multi_img_input = gr.Image(label="Upload X-ray Image", type="pil")
442
- multi_img_enhance = gr.Button("Enhance Image")
443
-
444
- multi_text_input = gr.Textbox(
445
- label="Enter Medical Report Text",
446
- placeholder="Enter the radiologist's report text here...",
447
- lines=10,
448
- value=example_report if sample_image_path is None else None,
449
- )
450
-
451
- multi_analyze_btn = gr.Button(
452
- "Analyze Image & Text", variant="primary"
453
- )
454
-
455
- with gr.Column():
456
- multi_results = gr.HTML(label="Analysis Results")
457
- multi_plot = gr.HTML(label="Visualization")
458
-
459
- # Set up examples if sample image exists
460
- if sample_image_path:
461
- gr.Examples(
462
- examples=[[sample_image_path, example_report]],
463
- inputs=[multi_img_input, multi_text_input],
464
- label="Example X-ray and Report",
465
- )
466
-
467
- with gr.Tab("Image Analysis"):
468
- with gr.Row():
469
- with gr.Column():
470
- img_input = gr.Image(label="Upload X-ray Image", type="pil")
471
- img_enhance = gr.Button("Enhance Image")
472
- img_analyze_btn = gr.Button("Analyze Image", variant="primary")
473
-
474
- with gr.Column():
475
- img_output = gr.Image(label="Processed Image")
476
- img_results = gr.HTML(label="Analysis Results")
477
- img_plot = gr.HTML(label="Visualization")
478
-
479
- # Set up example if sample image exists
480
- if sample_image_path:
481
- gr.Examples(
482
- examples=[[sample_image_path]],
483
- inputs=[img_input],
484
- label="Example X-ray Image",
485
- )
486
-
487
- with gr.Tab("Text Analysis"):
488
- with gr.Row():
489
- with gr.Column():
490
- text_input = gr.Textbox(
491
- label="Enter Medical Report Text",
492
- placeholder="Enter the radiologist's report text here...",
493
- lines=10,
494
- value=example_report,
495
- )
496
- text_analyze_btn = gr.Button("Analyze Text", variant="primary")
497
-
498
- with gr.Column():
499
- text_output = gr.Textbox(label="Processed Text")
500
- text_results = gr.HTML(label="Analysis Results")
501
- text_plot = gr.HTML(label="Entity Visualization")
502
-
503
- # Set up example
504
- gr.Examples(
505
- examples=[[example_report]],
506
- inputs=[text_input],
507
- label="Example Medical Report",
508
- )
509
-
510
- with gr.Tab("About"):
511
- gr.Markdown("""
512
- ## About MediSync
513
-
514
- MediSync is an AI-powered healthcare solution that uses multi-modal analysis to provide comprehensive insights from medical images and reports.
515
-
516
- ### Key Features
517
-
518
- - **X-ray Image Analysis**: Detects abnormalities in chest X-rays using pre-trained vision models
519
- - **Medical Report Processing**: Extracts key information from patient reports using NLP models
520
- - **Multi-modal Integration**: Combines insights from both image and text data for more accurate analysis
521
-
522
- ### Models Used
523
-
524
- - **X-ray Analysis**: facebook/deit-base-patch16-224-medical-cxr
525
- - **Medical Text Analysis**: medicalai/ClinicalBERT
526
-
527
- ### Important Disclaimer
528
-
529
- This tool is for educational and research purposes only. It is not intended to provide medical advice or replace professional healthcare. Always consult with qualified healthcare providers for medical decisions.
530
- """)
531
-
532
- # Set up event handlers
533
- multi_img_enhance.click(
534
- app.enhance_image, inputs=multi_img_input, outputs=multi_img_input
535
- )
536
- multi_analyze_btn.click(
537
- app.analyze_multimodal,
538
- inputs=[multi_img_input, multi_text_input],
539
- outputs=[multi_results, multi_plot],
540
- )
541
-
542
- img_enhance.click(app.enhance_image, inputs=img_input, outputs=img_output)
543
- img_analyze_btn.click(
544
- app.analyze_image,
545
- inputs=img_input,
546
- outputs=[img_output, img_results, img_plot],
547
- )
548
-
549
- text_analyze_btn.click(
550
- app.analyze_text,
551
- inputs=text_input,
552
- outputs=[text_output, text_results, text_plot],
553
- )
554
-
555
- # Run the interface
556
- interface.launch()
557
-
558
-
559
- if __name__ == "__main__":
560
- create_interface()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import sys
4
+ import tempfile
5
+
6
+ import matplotlib.pyplot as plt
7
+ from PIL import Image
8
+
9
+ # Add parent directory to path
10
+ parent_dir = os.path.dirname(os.path.abspath(__file__))
11
+ sys.path.append(parent_dir)
12
+
13
+ # Import our modules
14
+ from models.multimodal_fusion import MultimodalFusion
15
+ from utils.preprocessing import enhance_xray_image, normalize_report_text
16
+ from utils.visualization import (
17
+ plot_image_prediction,
18
+ plot_multimodal_results,
19
+ plot_report_entities,
20
+ )
21
+
22
+ # Set up logging
23
+ logging.basicConfig(
24
+ level=logging.INFO,
25
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
26
+ handlers=[logging.StreamHandler(), logging.FileHandler("mediSync.log")],
27
+ )
28
+ logger = logging.getLogger(__name__)
29
+
30
+ # Create temporary directory for sample data if it doesn't exist
31
+ os.makedirs(os.path.join(parent_dir, "data", "sample"), exist_ok=True)
32
+
33
+
34
+ class MediSyncApp:
35
+ """
36
+ Main application class for the MediSync multi-modal medical analysis system.
37
+ """
38
+
39
+ def __init__(self):
40
+ """Initialize the application and load models."""
41
+ self.logger = logging.getLogger(__name__)
42
+ self.logger.info("Initializing MediSync application")
43
+
44
+ # Initialize models with None for lazy loading
45
+ self.fusion_model = None
46
+ self.image_model = None
47
+ self.text_model = None
48
+
49
+ def load_models(self):
50
+ """
51
+ Load models if not already loaded.
52
+
53
+ Returns:
54
+ bool: True if models loaded successfully, False otherwise
55
+ """
56
+ try:
57
+ if self.fusion_model is None:
58
+ self.logger.info("Loading models...")
59
+ self.fusion_model = MultimodalFusion()
60
+ self.image_model = self.fusion_model.image_analyzer
61
+ self.text_model = self.fusion_model.text_analyzer
62
+ self.logger.info("Models loaded successfully")
63
+ return True
64
+
65
+ except Exception as e:
66
+ self.logger.error(f"Error loading models: {e}")
67
+ return False
68
+
69
+ def analyze_image(self, image):
70
+ """
71
+ Analyze a medical image.
72
+
73
+ Args:
74
+ image: Image file uploaded through Gradio
75
+
76
+ Returns:
77
+ tuple: (image, image_results_html, plot_as_html)
78
+ """
79
+ try:
80
+ # Ensure models are loaded
81
+ if not self.load_models() or self.image_model is None:
82
+ return image, "Error: Models not loaded properly.", None
83
+
84
+ # Save uploaded image to a temporary file
85
+ temp_dir = tempfile.mkdtemp()
86
+ temp_path = os.path.join(temp_dir, "upload.png")
87
+
88
+ if isinstance(image, str):
89
+ # Copy the file if it's a path
90
+ from shutil import copyfile
91
+
92
+ copyfile(image, temp_path)
93
+ else:
94
+ # Save if it's a Gradio UploadButton image
95
+ image.save(temp_path)
96
+
97
+ # Run image analysis
98
+ self.logger.info(f"Analyzing image: {temp_path}")
99
+ results = self.image_model.analyze(temp_path)
100
+
101
+ # Create visualization
102
+ fig = plot_image_prediction(
103
+ image,
104
+ results.get("predictions", []),
105
+ f"Primary Finding: {results.get('primary_finding', 'Unknown')}",
106
+ )
107
+
108
+ # Convert to HTML for display
109
+ plot_html = self.fig_to_html(fig)
110
+
111
+ # Format results as HTML
112
+ html_result = f"""
113
+ <h2>X-ray Analysis Results</h2>
114
+ <p><strong>Primary Finding:</strong> {results.get("primary_finding", "Unknown")}</p>
115
+ <p><strong>Confidence:</strong> {results.get("confidence", 0):.1%}</p>
116
+ <p><strong>Abnormality Detected:</strong> {"Yes" if results.get("has_abnormality", False) else "No"}</p>
117
+
118
+ <h3>Top Predictions:</h3>
119
+ <ul>
120
+ """
121
+
122
+ # Add top 5 predictions
123
+ for label, prob in results.get("predictions", [])[:5]:
124
+ html_result += f"<li>{label}: {prob:.1%}</li>"
125
+
126
+ html_result += "</ul>"
127
+
128
+ # Add explanation
129
+ explanation = self.image_model.get_explanation(results)
130
+ html_result += f"<h3>Analysis Explanation:</h3><p>{explanation}</p>"
131
+
132
+ return image, html_result, plot_html
133
+
134
+ except Exception as e:
135
+ self.logger.error(f"Error in image analysis: {e}")
136
+ return image, f"Error analyzing image: {str(e)}", None
137
+
138
+ def analyze_text(self, text):
139
+ """
140
+ Analyze a medical report text.
141
+
142
+ Args:
143
+ text: Report text input through Gradio
144
+
145
+ Returns:
146
+ tuple: (text, text_results_html, entities_plot_html)
147
+ """
148
+ try:
149
+ # Ensure models are loaded
150
+ if not self.load_models() or self.text_model is None:
151
+ return text, "Error: Models not loaded properly.", None
152
+
153
+ # Check for empty text
154
+ if not text or len(text.strip()) < 10:
155
+ return (
156
+ text,
157
+ "Error: Please enter a valid medical report text (at least 10 characters).",
158
+ None,
159
+ )
160
+
161
+ # Normalize text
162
+ normalized_text = normalize_report_text(text)
163
+
164
+ # Run text analysis
165
+ self.logger.info("Analyzing medical report text")
166
+ results = self.text_model.analyze(normalized_text)
167
+
168
+ # Get entities and create visualization
169
+ entities = results.get("entities", {})
170
+ fig = plot_report_entities(normalized_text, entities)
171
+
172
+ # Convert to HTML for display
173
+ entities_plot_html = self.fig_to_html(fig)
174
+
175
+ # Format results as HTML
176
+ html_result = f"""
177
+ <h2>Medical Report Analysis Results</h2>
178
+ <p><strong>Severity Level:</strong> {results.get("severity", {}).get("level", "Unknown")}</p>
179
+ <p><strong>Severity Score:</strong> {results.get("severity", {}).get("score", 0)}/4</p>
180
+ <p><strong>Confidence:</strong> {results.get("severity", {}).get("confidence", 0):.1%}</p>
181
+
182
+ <h3>Key Findings:</h3>
183
+ <ul>
184
+ """
185
+
186
+ # Add findings
187
+ findings = results.get("findings", [])
188
+ if findings:
189
+ for finding in findings:
190
+ html_result += f"<li>{finding}</li>"
191
+ else:
192
+ html_result += "<li>No specific findings detailed.</li>"
193
+
194
+ html_result += "</ul>"
195
+
196
+ # Add entities
197
+ html_result += "<h3>Extracted Medical Entities:</h3>"
198
+
199
+ for category, items in entities.items():
200
+ if items:
201
+ html_result += f"<p><strong>{category.capitalize()}:</strong> {', '.join(items)}</p>"
202
+
203
+ # Add follow-up recommendations
204
+ html_result += "<h3>Follow-up Recommendations:</h3><ul>"
205
+ followups = results.get("followup_recommendations", [])
206
+
207
+ if followups:
208
+ for rec in followups:
209
+ html_result += f"<li>{rec}</li>"
210
+ else:
211
+ html_result += "<li>No specific follow-up recommendations.</li>"
212
+
213
+ html_result += "</ul>"
214
+
215
+ return text, html_result, entities_plot_html
216
+
217
+ except Exception as e:
218
+ self.logger.error(f"Error in text analysis: {e}")
219
+ return text, f"Error analyzing text: {str(e)}", None
220
+
221
+ def analyze_multimodal(self, image, text):
222
+ """
223
+ Perform multimodal analysis of image and text.
224
+
225
+ Args:
226
+ image: Image file uploaded through Gradio
227
+ text: Report text input through Gradio
228
+
229
+ Returns:
230
+ tuple: (results_html, multimodal_plot_html)
231
+ """
232
+ try:
233
+ # Ensure models are loaded
234
+ if not self.load_models() or self.fusion_model is None:
235
+ return "Error: Models not loaded properly.", None
236
+
237
+ # Check for empty inputs
238
+ if image is None:
239
+ return "Error: Please upload an X-ray image for analysis.", None
240
+
241
+ if not text or len(text.strip()) < 10:
242
+ return (
243
+ "Error: Please enter a valid medical report text (at least 10 characters).",
244
+ None,
245
+ )
246
+
247
+ # Save uploaded image to a temporary file
248
+ temp_dir = tempfile.mkdtemp()
249
+ temp_path = os.path.join(temp_dir, "upload.png")
250
+
251
+ if isinstance(image, str):
252
+ # Copy the file if it's a path
253
+ from shutil import copyfile
254
+
255
+ copyfile(image, temp_path)
256
+ else:
257
+ # Save if it's a Gradio UploadButton image
258
+ image.save(temp_path)
259
+
260
+ # Normalize text
261
+ normalized_text = normalize_report_text(text)
262
+
263
+ # Run multimodal analysis
264
+ self.logger.info("Performing multimodal analysis")
265
+ results = self.fusion_model.analyze(temp_path, normalized_text)
266
+
267
+ # Create visualization
268
+ fig = plot_multimodal_results(results, image, text)
269
+
270
+ # Convert to HTML for display
271
+ plot_html = self.fig_to_html(fig)
272
+
273
+ # Generate explanation
274
+ explanation = self.fusion_model.get_explanation(results)
275
+
276
+ # Format results as HTML
277
+ html_result = f"""
278
+ <h2>Multimodal Medical Analysis Results</h2>
279
+
280
+ <h3>Overview</h3>
281
+ <p><strong>Primary Finding:</strong> {results.get("primary_finding", "Unknown")}</p>
282
+ <p><strong>Severity Level:</strong> {results.get("severity", {}).get("level", "Unknown")}</p>
283
+ <p><strong>Severity Score:</strong> {results.get("severity", {}).get("score", 0)}/4</p>
284
+ <p><strong>Agreement Score:</strong> {results.get("agreement_score", 0):.0%}</p>
285
+
286
+ <h3>Detailed Findings</h3>
287
+ <ul>
288
+ """
289
+
290
+ # Add findings
291
+ findings = results.get("findings", [])
292
+ if findings:
293
+ for finding in findings:
294
+ html_result += f"<li>{finding}</li>"
295
+ else:
296
+ html_result += "<li>No specific findings detailed.</li>"
297
+
298
+ html_result += "</ul>"
299
+
300
+ # Add follow-up recommendations
301
+ html_result += "<h3>Recommended Follow-up</h3><ul>"
302
+ followups = results.get("followup_recommendations", [])
303
+
304
+ if followups:
305
+ for rec in followups:
306
+ html_result += f"<li>{rec}</li>"
307
+ else:
308
+ html_result += (
309
+ "<li>No specific follow-up recommendations provided.</li>"
310
+ )
311
+
312
+ html_result += "</ul>"
313
+
314
+ # Add confidence note
315
+ confidence = results.get("severity", {}).get("confidence", 0)
316
+ html_result += f"""
317
+ <p><em>Note: This analysis has a confidence level of {confidence:.0%}.
318
+ Please consult with healthcare professionals for official diagnosis.</em></p>
319
+ """
320
+
321
+ return html_result, plot_html
322
+
323
+ except Exception as e:
324
+ self.logger.error(f"Error in multimodal analysis: {e}")
325
+ return f"Error in multimodal analysis: {str(e)}", None
326
+
327
+ def enhance_image(self, image):
328
+ """
329
+ Enhance X-ray image contrast.
330
+
331
+ Args:
332
+ image: Image file uploaded through Gradio
333
+
334
+ Returns:
335
+ PIL.Image: Enhanced image
336
+ """
337
+ try:
338
+ if image is None:
339
+ return None
340
+
341
+ # Save uploaded image to a temporary file
342
+ temp_dir = tempfile.mkdtemp()
343
+ temp_path = os.path.join(temp_dir, "upload.png")
344
+
345
+ if isinstance(image, str):
346
+ # Copy the file if it's a path
347
+ from shutil import copyfile
348
+
349
+ copyfile(image, temp_path)
350
+ else:
351
+ # Save if it's a Gradio UploadButton image
352
+ image.save(temp_path)
353
+
354
+ # Enhance image
355
+ self.logger.info(f"Enhancing image: {temp_path}")
356
+ output_path = os.path.join(temp_dir, "enhanced.png")
357
+ enhance_xray_image(temp_path, output_path)
358
+
359
+ # Load enhanced image
360
+ enhanced = Image.open(output_path)
361
+ return enhanced
362
+
363
+ except Exception as e:
364
+ self.logger.error(f"Error enhancing image: {e}")
365
+ return image # Return original image on error
366
+
367
+ def fig_to_html(self, fig):
368
+ """Convert matplotlib figure to HTML for display in Gradio."""
369
+ try:
370
+ import base64
371
+ import io
372
+
373
+ buf = io.BytesIO()
374
+ fig.savefig(buf, format="png", bbox_inches="tight")
375
+ buf.seek(0)
376
+ img_str = base64.b64encode(buf.read()).decode("utf-8")
377
+ plt.close(fig)
378
+
379
+ return f'<img src="data:image/png;base64,{img_str}" alt="Analysis Plot">'
380
+
381
+ except Exception as e:
382
+ self.logger.error(f"Error converting figure to HTML: {e}")
383
+ return "<p>Error displaying visualization.</p>"
384
+
385
+
386
+ def create_interface():
387
+ """
388
+ Create and return the Gradio interface without launching it.
389
+ For Hugging Face Spaces compatibility.
390
+ """
391
+ try:
392
+ logging.info("Creating MediSync interface for Hugging Face Spaces")
393
+ app = MediSyncApp()
394
+
395
+ # Example medical report for demo
396
+ example_report = """
397
+ CHEST X-RAY EXAMINATION
398
+
399
+ CLINICAL HISTORY: 55-year-old male with cough and fever.
400
+
401
+ FINDINGS: The heart size is at the upper limits of normal. The lungs are clear without focal consolidation,
402
+ effusion, or pneumothorax. There is mild prominence of the pulmonary vasculature. No pleural effusion is seen.
403
+ There is a small nodular opacity noted in the right lower lobe measuring approximately 8mm, which is suspicious
404
+ and warrants further investigation. The mediastinum is unremarkable. The visualized bony structures show no acute abnormalities.
405
+
406
+ IMPRESSION:
407
+ 1. Mild cardiomegaly.
408
+ 2. 8mm nodular opacity in the right lower lobe, recommend follow-up CT for further evaluation.
409
+ 3. No acute pulmonary parenchymal abnormality.
410
+
411
+ RECOMMENDATIONS: Follow-up chest CT to further characterize the nodular opacity in the right lower lobe.
412
+ """
413
+
414
+ # Get sample image path if available
415
+ import os
416
+ from pathlib import Path
417
+
418
+ current_dir = os.path.dirname(os.path.abspath(__file__))
419
+ parent_dir = os.path.dirname(current_dir)
420
+
421
+ sample_images_dir = Path(parent_dir) / "mediSync" / "data" / "sample"
422
+ sample_images = list(sample_images_dir.glob("*.png")) + list(
423
+ sample_images_dir.glob("*.jpg")
424
+ )
425
+
426
+ sample_image_path = None
427
+ if sample_images:
428
+ sample_image_path = str(sample_images[0])
429
+
430
+ # Import gradio here to avoid early initialization
431
+ import gradio as gr
432
+
433
+ # Define interface
434
+ with gr.Blocks(
435
+ title="MediSync: Multi-Modal Medical Analysis System",
436
+ theme=gr.themes.Soft(),
437
+ ) as interface:
438
+ gr.Markdown("""
439
+ # MediSync: Multi-Modal Medical Analysis System
440
+
441
+ This AI-powered healthcare solution combines X-ray image analysis with patient report text processing
442
+ to provide comprehensive medical insights.
443
+
444
+ ## How to Use
445
+ 1. Upload a chest X-ray image
446
+ 2. Enter the corresponding medical report text
447
+ 3. Choose the analysis type: image-only, text-only, or multimodal (combined)
448
+ """)
449
+
450
+ with gr.Tab("Multimodal Analysis"):
451
+ with gr.Row():
452
+ with gr.Column():
453
+ multi_img_input = gr.Image(
454
+ label="Upload X-ray Image", type="pil"
455
+ )
456
+ multi_img_enhance = gr.Button("Enhance Image")
457
+
458
+ multi_text_input = gr.Textbox(
459
+ label="Enter Medical Report Text",
460
+ placeholder="Enter the radiologist's report text here...",
461
+ lines=10,
462
+ value=example_report if sample_image_path is None else None,
463
+ )
464
+
465
+ multi_analyze_btn = gr.Button(
466
+ "Analyze Image & Text", variant="primary"
467
+ )
468
+
469
+ with gr.Column():
470
+ multi_results = gr.HTML(label="Analysis Results")
471
+ multi_plot = gr.HTML(label="Visualization")
472
+
473
+ # Set up examples if sample image exists
474
+ if sample_image_path:
475
+ gr.Examples(
476
+ examples=[[sample_image_path, example_report]],
477
+ inputs=[multi_img_input, multi_text_input],
478
+ label="Example X-ray and Report",
479
+ )
480
+
481
+ with gr.Tab("Image Analysis"):
482
+ with gr.Row():
483
+ with gr.Column():
484
+ img_input = gr.Image(label="Upload X-ray Image", type="pil")
485
+ img_enhance = gr.Button("Enhance Image")
486
+ img_analyze_btn = gr.Button("Analyze Image", variant="primary")
487
+
488
+ with gr.Column():
489
+ img_output = gr.Image(label="Processed Image")
490
+ img_results = gr.HTML(label="Analysis Results")
491
+ img_plot = gr.HTML(label="Visualization")
492
+
493
+ # Set up example if sample image exists
494
+ if sample_image_path:
495
+ gr.Examples(
496
+ examples=[[sample_image_path]],
497
+ inputs=[img_input],
498
+ label="Example X-ray Image",
499
+ )
500
+
501
+ with gr.Tab("Text Analysis"):
502
+ with gr.Row():
503
+ with gr.Column():
504
+ text_input = gr.Textbox(
505
+ label="Enter Medical Report Text",
506
+ placeholder="Enter the radiologist's report text here...",
507
+ lines=10,
508
+ value=example_report,
509
+ )
510
+ text_analyze_btn = gr.Button("Analyze Text", variant="primary")
511
+
512
+ with gr.Column():
513
+ text_output = gr.Textbox(label="Processed Text")
514
+ text_results = gr.HTML(label="Analysis Results")
515
+ text_plot = gr.HTML(label="Entity Visualization")
516
+
517
+ # Set up example
518
+ gr.Examples(
519
+ examples=[[example_report]],
520
+ inputs=[text_input],
521
+ label="Example Medical Report",
522
+ )
523
+
524
+ with gr.Tab("About"):
525
+ gr.Markdown("""
526
+ ## About MediSync
527
+
528
+ MediSync is an AI-powered healthcare solution that uses multi-modal analysis to provide comprehensive insights from medical images and reports.
529
+
530
+ ### Key Features
531
+
532
+ - **X-ray Image Analysis**: Detects abnormalities in chest X-rays using pre-trained vision models
533
+ - **Medical Report Processing**: Extracts key information from patient reports using NLP models
534
+ - **Multi-modal Integration**: Combines insights from both image and text data for more accurate analysis
535
+
536
+ ### Models Used
537
+
538
+ - **X-ray Analysis**: facebook/deit-base-patch16-224-medical-cxr
539
+ - **Medical Text Analysis**: medicalai/ClinicalBERT
540
+
541
+ ### Important Disclaimer
542
+
543
+ This tool is for educational and research purposes only. It is not intended to provide medical advice or replace professional healthcare. Always consult with qualified healthcare providers for medical decisions.
544
+ """)
545
+
546
+ # Set up event handlers
547
+ multi_img_enhance.click(
548
+ app.enhance_image, inputs=multi_img_input, outputs=multi_img_input
549
+ )
550
+ multi_analyze_btn.click(
551
+ app.analyze_multimodal,
552
+ inputs=[multi_img_input, multi_text_input],
553
+ outputs=[multi_results, multi_plot],
554
+ )
555
+
556
+ img_enhance.click(app.enhance_image, inputs=img_input, outputs=img_output)
557
+ img_analyze_btn.click(
558
+ app.analyze_image,
559
+ inputs=img_input,
560
+ outputs=[img_output, img_results, img_plot],
561
+ )
562
+
563
+ text_analyze_btn.click(
564
+ app.analyze_text,
565
+ inputs=text_input,
566
+ outputs=[text_output, text_results, text_plot],
567
+ )
568
+
569
+ # Return the interface WITHOUT launching it
570
+ return interface
571
+
572
+ except Exception as e:
573
+ logging.error(f"Error creating interface: {e}")
574
+ import traceback
575
+
576
+ logging.error(traceback.format_exc())
577
+
578
+ # Create a simple fallback interface
579
+ import gradio as gr
580
+
581
+ return gr.Interface(
582
+ fn=lambda: f"Error initializing MediSync: {str(e)}",
583
+ inputs=None,
584
+ outputs="text",
585
+ title="MediSync Error",
586
+ description="There was an error initializing the application. Please check the logs for details.",
587
+ )
588
+
589
+
590
+ if __name__ == "__main__":
591
+ # For running locally, create and launch the interface
592
+ interface = create_interface()
593
+ interface.launch()