Anupam007 commited on
Commit
7c0d26b
·
verified ·
1 Parent(s): f9002d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +175 -150
app.py CHANGED
@@ -1,7 +1,4 @@
1
- # Install required libraries
2
- #!pip install opencv-python-headless pytesseract gradio
3
- #!apt-get update && apt-get install -y tesseract-ocr
4
-
5
  import cv2
6
  import numpy as np
7
  import pytesseract
@@ -11,167 +8,195 @@ import base64
11
  from datetime import datetime
12
  import pytz
13
  import urllib.parse
 
 
 
 
14
 
15
  # Set timezone to IST
16
  ist = pytz.timezone('Asia/Kolkata')
17
  current_time = datetime.now(ist).strftime("%Y-%m-%d %I:%M %p IST")
18
 
 
 
 
 
 
19
  # Function to preprocess poor-quality images
20
  def preprocess_image(image):
21
- # Convert to grayscale
22
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
23
-
24
- # Denoise the image using Gaussian blur
25
- denoised = cv2.GaussianBlur(gray, (5, 5), 0)
26
-
27
- # Sharpen the image using a kernel
28
- sharpening_kernel = np.array([[-1, -1, -1],
29
- [-1, 9, -1],
30
- [-1, -1, -1]])
31
- sharpened = cv2.filter2D(denoised, -1, sharpening_kernel)
32
-
33
- # Adjust contrast using CLAHE (Contrast Limited Adaptive Histogram Equalization)
34
- clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
35
- contrast_adjusted = clahe.apply(sharpened)
36
-
37
- return contrast_adjusted
 
 
 
38
 
39
  # Function to analyze the eye image
40
  def analyze_eye_image(image, patient_name, doctor_email, doctor_phone):
41
- # Convert Gradio image (numpy array) to OpenCV format
42
- image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
43
-
44
- # Keep a copy of the original image for annotations
45
- annotated_image = image.copy()
46
-
47
- # Preprocess the image to handle poor quality
48
- processed_image = preprocess_image(image)
49
-
50
- # Apply adaptive thresholding to improve text detection for poor-quality images
51
- thresh = cv2.adaptiveThreshold(processed_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
52
- cv2.THRESH_BINARY_INV, 11, 2)
53
-
54
- # Detect text using pytesseract
55
- text_data = pytesseract.image_to_data(thresh, output_type=pytesseract.Output.DICT)
56
-
57
- # Store detected labels and their positions
58
- labels = []
59
- for i in range(len(text_data['text'])):
60
- confidence = int(text_data['conf'][i])
61
- if confidence > 50: # Lowered confidence threshold for poor-quality images
62
- label = text_data['text'][i].strip()
63
- if label: # Ignore empty strings
64
- x, y = text_data['left'][i], text_data['top'][i]
65
- w, h = text_data['width'][i], text_data['height'][i]
66
- labels.append({'label': label, 'position': (x, y), 'size': (w, h)})
67
-
68
- # Annotate the image with a bounding box and label
69
- cv2.rectangle(annotated_image, (x, y), (x + w, y + h), (0, 255, 0), 2)
70
- cv2.putText(annotated_image, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
71
-
72
- # Merge "Optic" and "nerve" if they are close to each other
73
- merged_labels = []
74
- i = 0
75
- while i < len(labels):
76
- if i + 1 < len(labels) and labels[i]['label'].lower() == 'optic' and labels[i + 1]['label'].lower() == 'nerve':
77
- merged_label = "Optic Nerve"
78
- x, y = labels[i]['position']
79
- merged_labels.append({'label': merged_label, 'position': (x, y)})
80
- i += 2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  else:
82
- merged_labels.append(labels[i])
83
- i += 1
84
-
85
- # Generate a detailed analysis report
86
- report = f"Eye Image Analysis Report for {patient_name} (Generated on {current_time}):\n\n"
87
- report += "Detected Text and Their Positions:\n"
88
- if merged_labels:
89
- for item in merged_labels:
90
- report += f"- {item['label']} at position (x: {item['position'][0]}, y: {item['position'][1]})\n"
91
- else:
92
- report += "No text detected in the image.\n"
93
-
94
- # Image Quality Assessment
95
- report += "\nImage Quality Assessment:\n"
96
- # Estimate image quality based on sharpness (using Laplacian variance)
97
- sharpness = cv2.Laplacian(processed_image, cv2.CV_64F).var()
98
- if sharpness < 50:
99
- report += "- The image quality appears to be poor (blurry or low resolution). This may affect the accuracy of the analysis.\n"
100
- report += "- Recommendation: Use a higher-quality image or improve lighting and focus during image capture.\n"
101
- else:
102
- report += "- The image quality appears to be sufficient for analysis.\n"
103
-
104
- # Detailed Analysis
105
- report += "\nDetailed Analysis:\n"
106
- detected_labels = [item['label'] for item in merged_labels]
107
- if "iStock" in detected_labels and "Credit" in detected_labels and "Gannet77" in detected_labels:
108
- report += "- The detected text ('iStock', 'Credit', 'Gannet77') suggests that this image is likely a stock image from iStock, credited to the contributor 'Gannet77'.\n"
109
- report += "- This image does not appear to be an actual patient eye scan (e.g., fundus image). It might be a labeled diagram or an illustrative image.\n"
110
- report += "- No anatomical features (e.g., optic disc, macula, blood vessels) were detected in this analysis, likely because the image is not a real eye scan.\n"
111
- else:
112
- report += "- No specific anatomical features were detected. This could be due to the image quality, type, or limitations of the text detection system.\n"
113
- report += "- For poor-quality images, consider using a fundus camera or smartphone attachment with better focus and lighting.\n"
114
-
115
- # Preliminary Abnormality Check
116
- report += "\nPreliminary Abnormality Check:\n"
117
- report += "- This is a placeholder. Integrate a deep learning model to detect abnormalities like diabetic retinopathy or glaucoma.\n"
118
- report += "- Since this appears to be a stock image, no actual abnormality detection is applicable.\n"
119
- report += "- For poor-quality images, abnormality detection accuracy may be reduced. A higher-quality image is recommended.\n"
120
-
121
- # Recommendations for the Doctor
122
- report += "\nRecommendations for the Doctor:\n"
123
- report += "- Request the patient to provide a real eye scan (e.g., a fundus image) using a fundus camera or smartphone attachment for accurate analysis.\n"
124
- report += "- If this is intended for educational purposes, confirm the anatomical labels manually and use the image for reference.\n"
125
- report += "- For clinical diagnosis, a high-quality fundus image or slit-lamp image is recommended to assess conditions like diabetic retinopathy, glaucoma, or cataracts.\n"
126
- if sharpness < 50:
127
- report += "- The image quality is poor. Advise the patient to recapture the image with better lighting, focus, and resolution.\n"
128
-
129
- # Convert annotated image back to RGB for Gradio display
130
- annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
131
-
132
- # Convert the report to a downloadable text file
133
- report_file = io.StringIO(report)
134
- report_bytes = report.encode('utf-8')
135
- report_b64 = base64.b64encode(report_bytes).decode('utf-8')
136
- report_download_link = f'<a href="data:text/plain;base64,{report_b64}" download="eye_analysis_report_{patient_name}.txt">Download Report</a>'
137
-
138
- # Generate WhatsApp link
139
- whatsapp_message = f"Eye Image Analysis Report for {patient_name}:\n\n{report}"
140
- whatsapp_link = f"https://wa.me/{doctor_phone}?text={urllib.parse.quote(whatsapp_message)}"
141
- whatsapp_html = f'<a href="{whatsapp_link}" target="_blank">Send Report via WhatsApp</a>'
142
-
143
- # Simulate email content (to be sent to the doctor)
144
- email_content = f"Subject: Eye Image Analysis Report for {patient_name}\n\n"
145
- email_content += "Dear Doctor,\n\n"
146
- email_content += f"Please find the analysis report for patient {patient_name} below:\n\n"
147
- email_content += report
148
- email_content += "\nThe annotated image and report are attached for your review.\n"
149
- email_content += "For further details, please contact the patient or the system administrator.\n\n"
150
- email_content += "Best regards,\nEye Analysis System\n\n"
151
- email_content += f"Note: To send this email to {doctor_email}, deploy this app on a server with email functionality (e.g., using smtplib in Python)."
152
-
153
- return annotated_image, report, report_download_link, email_content, whatsapp_html
154
-
155
- # Gradio interface for patient use
156
  interface = gr.Interface(
157
- fn=analyze_eye_image, # Function to call
158
  inputs=[
159
- gr.Image(label="Upload Eye Image"), # Input: eye image
160
- gr.Textbox(label="Patient Name", placeholder="Enter patient's name"), # Input: patient name
161
- gr.Textbox(label="Doctor's Email", placeholder="Enter doctor's email address"), # Input: doctor's email
162
- gr.Textbox(label="Doctor's Phone Number (WhatsApp)", placeholder="Enter doctor's phone number (e.g., +919876543210)") # Input: doctor's phone number
163
  ],
164
  outputs=[
165
- gr.Image(label="Annotated Image"), # Output: annotated image
166
- gr.Textbox(label="Detailed Analysis Report"), # Output: text report
167
- gr.HTML(label="Download Report"), # Output: downloadable link
168
- gr.Textbox(label="Email Content (To Be Sent to Doctor)"), # Output: email content
169
- gr.HTML(label="WhatsApp Link") # Output: WhatsApp link
170
  ],
171
- title="Remote Eye Image Analysis for Patients in India (Supports Poor-Quality Images)",
172
- description="Upload an eye image (e.g., fundus image or diagram) to analyze anatomical features. The system supports poor-quality images with enhanced preprocessing. The report will be prepared for the doctor to review, with options to send via email or WhatsApp.",
173
- live=True # Enable live updates for real-time analysis
 
 
 
 
 
 
174
  )
175
 
176
- # Launch the Gradio interface
177
- interface.launch()
 
 
1
+ import os
 
 
 
2
  import cv2
3
  import numpy as np
4
  import pytesseract
 
8
  from datetime import datetime
9
  import pytz
10
  import urllib.parse
11
+ import re
12
+
13
+ # Install Tesseract OCR in the Hugging Face container
14
+ os.system("apt-get update && apt-get install -y tesseract-ocr")
15
 
16
  # Set timezone to IST
17
  ist = pytz.timezone('Asia/Kolkata')
18
  current_time = datetime.now(ist).strftime("%Y-%m-%d %I:%M %p IST")
19
 
20
+ # Function to validate phone number (e.g., +91 followed by 10 digits)
21
+ def validate_phone(phone):
22
+ pattern = r"^\+\d{1,3}\d{10}$"
23
+ return bool(re.match(pattern, phone))
24
+
25
  # Function to preprocess poor-quality images
26
  def preprocess_image(image):
27
+ try:
28
+ # Convert to grayscale
29
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
30
+
31
+ # Denoise the image using Gaussian blur
32
+ denoised = cv2.GaussianBlur(gray, (5, 5), 0)
33
+
34
+ # Sharpen the image using a kernel
35
+ sharpening_kernel = np.array([[-1, -1, -1],
36
+ [-1, 9, -1],
37
+ [-1, -1, -1]])
38
+ sharpened = cv2.filter2D(denoised, -1, sharpening_kernel)
39
+
40
+ # Adjust contrast using CLAHE
41
+ clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
42
+ contrast_adjusted = clahe.apply(sharpened)
43
+
44
+ return contrast_adjusted, None
45
+ except Exception as e:
46
+ return None, f"Error preprocessing image: {str(e)}"
47
 
48
  # Function to analyze the eye image
49
  def analyze_eye_image(image, patient_name, doctor_email, doctor_phone):
50
+ try:
51
+ # Input validation
52
+ if image is None or not isinstance(image, np.ndarray) or image.size == 0:
53
+ return None, "Error: No valid image uploaded. Please upload a PNG or JPEG image.", "", "", ""
54
+ if not patient_name.strip():
55
+ return None, "Error: Patient name is required.", "", "", ""
56
+ if not doctor_email.strip():
57
+ return None, "Error: Doctor's email is required.", "", "", ""
58
+ if not doctor_phone.strip() or not validate_phone(doctor_phone):
59
+ return None, "Error: Invalid phone number. Use format: +91XXXXXXXXXX", "", "", ""
60
+
61
+ # Convert Gradio image (numpy) to OpenCV format
62
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
63
+ annotated_image = image.copy()
64
+
65
+ # Preprocess the image
66
+ processed_image, error = preprocess_image(image)
67
+ if error:
68
+ return None, error, "", "", ""
69
+
70
+ # Apply adaptive thresholding
71
+ thresh = cv2.adaptiveThreshold(processed_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
72
+ cv2.THRESH_BINARY_INV, 11, 2)
73
+
74
+ # Detect text using pytesseract
75
+ text_data = pytesseract.image_to_data(thresh, output_type=pytesseract.Output.DICT)
76
+
77
+ # Store detected labels and their positions
78
+ labels = []
79
+ for i in range(len(text_data['text'])):
80
+ confidence = int(text_data['conf'][i])
81
+ if confidence > 50: # Lowered threshold for poor-quality images
82
+ label = text_data['text'][i].strip()
83
+ if label:
84
+ x, y = text_data['left'][i], text_data['top'][i]
85
+ w, h = text_data['width'][i], text_data['height'][i]
86
+ labels.append({'label': label, 'position': (x, y), 'size': (w, h)})
87
+ # Annotate the image
88
+ cv2.rectangle(annotated_image, (x, y), (x + w, y + h), (0, 255, 0), 2)
89
+ cv2.putText(annotated_image, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
90
+
91
+ # Merge "Optic" and "nerve" if close
92
+ merged_labels = []
93
+ i = 0
94
+ while i < len(labels):
95
+ if i + 1 < len(labels) and labels[i]['label'].lower() == 'optic' and labels[i + 1]['label'].lower() == 'nerve':
96
+ merged_label = "Optic Nerve"
97
+ x, y = labels[i]['position']
98
+ merged_labels.append({'label': merged_label, 'position': (x, y)})
99
+ i += 2
100
+ else:
101
+ merged_labels.append(labels[i])
102
+ i += 1
103
+
104
+ # Generate report
105
+ report = f"Eye Image Analysis Report for {patient_name} (Generated on {current_time}):\n\n"
106
+ report += "Detected Text and Their Positions:\n"
107
+ if merged_labels:
108
+ for item in merged_labels:
109
+ report += f"- {item['label']} at position (x: {item['position'][0]}, y: {item['position'][1]})\n"
110
+ else:
111
+ report += "No text detected in the image.\n"
112
+
113
+ # Image Quality Assessment
114
+ report += "\nImage Quality Assessment:\n"
115
+ sharpness = cv2.Laplacian(processed_image, cv2.CV_64F).var()
116
+ if sharpness < 50:
117
+ report += "- The image quality is poor (blurry or low resolution). This may affect analysis accuracy.\n"
118
+ report += "- Recommendation: Use a higher-quality image or improve lighting and focus.\n"
119
  else:
120
+ report += "- The image quality is sufficient for analysis.\n"
121
+
122
+ # Detailed Analysis
123
+ report += "\nDetailed Analysis:\n"
124
+ detected_labels = [item['label'] for item in merged_labels]
125
+ if "iStock" in detected_labels and "Credit" in detected_labels and "Gannet77" in detected_labels:
126
+ report += "- Detected text ('iStock', 'Credit', 'Gannet77') suggests this is a stock image from iStock, credited to 'Gannet77'.\n"
127
+ report += "- This is likely not a real patient eye scan but a labeled diagram.\n"
128
+ report += "- No anatomical features (e.g., optic disc, macula) detected, as this is not a real scan.\n"
129
+ else:
130
+ report += "- No specific anatomical features detected, possibly due to image quality or type.\n"
131
+ report += "- For poor-quality images, use a fundus camera or smartphone attachment.\n"
132
+
133
+ # Preliminary Abnormality Check
134
+ report += "\nPreliminary Abnormality Check:\n"
135
+ report += "- Placeholder: Integrate a deep learning model for abnormality detection (e.g., diabetic retinopathy).\n"
136
+ report += "- For stock images, abnormality detection is not applicable.\n"
137
+ if sharpness < 50:
138
+ report += "- Poor image quality may reduce abnormality detection accuracy.\n"
139
+
140
+ # Recommendations
141
+ report += "\nRecommendations for the Doctor:\n"
142
+ report += "- Request a real eye scan (e.g., fundus image) for accurate analysis.\n"
143
+ report += "- For educational use, confirm anatomical labels manually.\n"
144
+ report += "- For clinical diagnosis, use high-quality fundus or slit-lamp images.\n"
145
+ if sharpness < 50:
146
+ report += "- Advise patient to recapture image with better lighting and resolution.\n"
147
+
148
+ # Convert annotated image back to RGB
149
+ annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
150
+
151
+ # Create downloadable report
152
+ report_bytes = report.encode('utf-8')
153
+ report_b64 = base64.b64encode(report_bytes).decode('utf-8')
154
+ report_download_link = f'<a href="data:text/plain;base64,{report_b64}" download="eye_analysis_report_{patient_name}.txt">Download Report</a>'
155
+
156
+ # Generate WhatsApp link
157
+ whatsapp_message = f"Eye Image Analysis Report for {patient_name}:\n\n{report}"
158
+ whatsapp_link = f"https://wa.me/{doctor_phone}?text={urllib.parse.quote(whatsapp_message)}"
159
+ whatsapp_html = f'<a href="{whatsapp_link}" target="_blank">Send Report via WhatsApp</a>'
160
+
161
+ # Simulate email content
162
+ email_content = f"Subject: Eye Image Analysis Report for {patient_name}\n\n"
163
+ email_content += "Dear Doctor,\n\n"
164
+ email_content += f"Analysis report for patient {patient_name}:\n\n{report}"
165
+ email_content += "\nThe annotated image and report are attached for review.\n"
166
+ email_content += "Note: To send this email, deploy with email functionality (e.g., smtplib).\n"
167
+
168
+ return annotated_image, report, report_download_link, email_content, whatsapp_html
169
+
170
+ except Exception as e:
171
+ return None, f"Error processing image: {str(e)}", "", "", ""
172
+
173
+ # Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
  interface = gr.Interface(
175
+ fn=analyze_eye_image,
176
  inputs=[
177
+ gr.Image(label="Upload Eye Image (PNG/JPEG)", type="numpy"),
178
+ gr.Textbox(label="Patient Name", placeholder="Enter patient's name"),
179
+ gr.Textbox(label="Doctor's Email", placeholder="Enter doctor's email (e.g., doctor@example.com)"),
180
+ gr.Textbox(label="Doctor's Phone Number (WhatsApp)", placeholder="Enter phone number (e.g., +919876543210)")
181
  ],
182
  outputs=[
183
+ gr.Image(label="Annotated Image"),
184
+ gr.Textbox(label="Detailed Analysis Report"),
185
+ gr.HTML(label="Download Report"),
186
+ gr.Textbox(label="Email Content (To Be Sent to Doctor)"),
187
+ gr.HTML(label="WhatsApp Link")
188
  ],
189
+ title="EyeScanIndia: Remote Eye Image Analysis",
190
+ description="""
191
+ Upload an eye image (e.g., fundus image or diagram) to analyze anatomical features.
192
+ Supports poor-quality images with enhanced preprocessing.
193
+ Enter patient name, doctor's email, and WhatsApp number to generate a report.
194
+ **Note**: Ensure compliance with India’s DPDP Act for medical data.
195
+ For best results, use high-quality fundus images.
196
+ """,
197
+ allow_flagging="never"
198
  )
199
 
200
+ # Launch the app for Hugging Face
201
+ if __name__ == "__main__":
202
+ interface.launch(server_name="0.0.0.0", server_port=7860)