Spaces:
Build error
Build error
File size: 11,574 Bytes
7c0d26b 44fa519 7c0d26b 44fa519 7c0d26b 44fa519 7c0d26b 26bc9b2 7c0d26b 26bc9b2 44fa519 b0ddf0a 26bc9b2 b0ddf0a 26bc9b2 b0ddf0a 26bc9b2 b0ddf0a 26bc9b2 b0ddf0a 26bc9b2 b0ddf0a 26bc9b2 b0ddf0a 44fa519 7c0d26b b0ddf0a 7c0d26b 26bc9b2 7c0d26b 26bc9b2 7c0d26b 26bc9b2 7c0d26b 26bc9b2 7c0d26b b0ddf0a 7c0d26b 26bc9b2 7c0d26b 44fa519 7c0d26b 26bc9b2 7c0d26b 26bc9b2 b0ddf0a 7c0d26b 44fa519 7c0d26b 44fa519 7c0d26b 26bc9b2 44fa519 7c0d26b 44fa519 7c0d26b 26bc9b2 7c0d26b 26bc9b2 7c0d26b 44fa519 7c0d26b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 | import os
import cv2
import numpy as np
import pytesseract
import gradio as gr
import io
import base64
from datetime import datetime
import pytz
import urllib.parse
import re
# Install Tesseract OCR in the Hugging Face container
os.system("apt-get update && apt-get install -y tesseract-ocr")
# Set timezone to IST
ist = pytz.timezone('Asia/Kolkata')
current_time = datetime.now(ist).strftime("%Y-%m-%d %I:%M %p IST")
# Function to validate phone number (e.g., +91 followed by 10 digits)
def validate_phone(phone):
pattern = r"^\+\d{1,3}\d{10}$"
return bool(re.match(pattern, phone))
# Function to preprocess poor-quality images
def preprocess_image(image):
try:
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Denoise the image using Gaussian blur
denoised = cv2.GaussianBlur(gray, (5, 5), 0)
# Sharpen the image using a kernel
sharpening_kernel = np.array([[-1, -1, -1],
[-1, 9, -1],
[-1, -1, -1]])
sharpened = cv2.filter2D(denoised, -1, sharpening_kernel)
# Adjust contrast using CLAHE
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
contrast_adjusted = clahe.apply(sharpened)
# Alternative preprocessing: Otsu's thresholding for better text detection
_, otsu_thresh = cv2.threshold(contrast_adjusted, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return otsu_thresh, contrast_adjusted, None
except Exception as e:
return None, None, f"Error preprocessing image: {str(e)}"
# Simulated abnormality detection (placeholder for deep learning model)
def detect_abnormalities(image, sharpness, contrast_adjusted):
try:
# Calculate mean brightness and contrast
mean_brightness = np.mean(image)
contrast = np.std(contrast_adjusted)
warnings = []
if mean_brightness > 200:
warnings.append("- Warning: Unusually high brightness detected (mean: {:.1f}). This may indicate overexposure or potential abnormalities like cataracts.".format(mean_brightness))
warnings.append("- Recommendation: Verify with a properly exposed fundus image.")
elif mean_brightness < 50:
warnings.append("- Warning: Unusually low brightness detected (mean: {:.1f}). This may indicate underexposure or potential issues like retinal detachment.".format(mean_brightness))
warnings.append("- Recommendation: Recapture with better lighting.")
if sharpness < 50:
warnings.append("- Warning: Poor image sharpness (variance: {:.1f}). This may obscure abnormalities.".format(sharpness))
warnings.append("- Recommendation: Use a higher-quality image with better focus.")
if contrast < 20:
warnings.append("- Warning: Low contrast detected (std: {:.1f}). This may reduce visibility of anatomical features.".format(contrast))
warnings.append("- Recommendation: Adjust lighting or use a fundus camera.")
if not warnings:
warnings.append("- No obvious abnormalities detected based on basic image characteristics.")
return "\n".join(warnings)
except Exception as e:
return f"- Error in abnormality detection: {str(e)}"
# Function to analyze the eye image
def analyze_eye_image(image, patient_name, doctor_email, doctor_phone):
try:
# Input validation
if image is None or not isinstance(image, np.ndarray) or image.size == 0:
return None, "Error: No valid image uploaded. Please upload a PNG or JPEG image.", "", "", ""
patient_name = patient_name.strip() if patient_name.strip() else "Unknown Patient"
if not doctor_email.strip():
return None, "Error: Doctor's email is required.", "", "", ""
if not doctor_phone.strip() or not validate_phone(doctor_phone):
return None, "Error: Invalid phone number. Use format: +91XXXXXXXXXX", "", "", ""
# Convert Gradio image (numpy) to OpenCV format
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
annotated_image = image.copy()
# Preprocess the image
otsu_thresh, contrast_adjusted, error = preprocess_image(image)
if error:
return None, error, "", "", ""
# Apply adaptive thresholding
thresh = cv2.adaptiveThreshold(otsu_thresh, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 11, 2)
# Detect text using pytesseract with optimized parameters
text_data = pytesseract.image_to_data(thresh, output_type=pytesseract.Output.DICT, config='--psm 6 --oem 3')
# Store detected labels and their positions
labels = []
for i in range(len(text_data['text'])):
confidence = int(text_data['conf'][i])
if confidence > 20: # Lowered threshold to capture iStock, Credit, Gannet77
label = text_data['text'][i].strip()
if label:
x, y = text_data['left'][i], text_data['top'][i]
w, h = text_data['width'][i], text_data['height'][i]
labels.append({'label': label, 'position': (x, y), 'size': (w, h)})
# Annotate the image
cv2.rectangle(annotated_image, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(annotated_image, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# Merge "Optic" and "nerve" if close
merged_labels = []
i = 0
while i < len(labels):
if i + 1 < len(labels) and labels[i]['label'].lower() == 'optic' and labels[i + 1]['label'].lower() == 'nerve':
merged_label = "Optic Nerve"
x, y = labels[i]['position']
merged_labels.append({'label': merged_label, 'position': (x, y)})
i += 2
else:
merged_labels.append(labels[i])
i += 1
# Generate report
report = f"Eye Image Analysis Report for {patient_name} (Generated on {current_time}):\n\n"
report += "Detected Anatomical Features and Their Positions:\n"
if merged_labels:
for item in merged_labels:
report += f"- {item['label']} at position (x: {item['position'][0]}, y: {item['position'][1]})\n"
else:
report += "No text detected in the image.\n"
# Image Quality Assessment
report += "\nImage Quality Assessment:\n"
sharpness = cv2.Laplacian(contrast_adjusted, cv2.CV_64F).var()
if sharpness < 50:
report += "- The image quality is poor (blurry or low resolution). This may affect analysis accuracy.\n"
report += "- Recommendation: Use a higher-quality image or improve lighting and focus.\n"
else:
report += "- The image quality is sufficient for analysis.\n"
# Detailed Analysis
report += "\nDetailed Analysis:\n"
detected_labels = [item['label'] for item in merged_labels]
if "iStock" in detected_labels and "Credit" in detected_labels and "Gannet77" in detected_labels:
report += "- Detected text ('iStock', 'Credit', 'Gannet77') suggests this is a stock image from iStock, credited to 'Gannet77'.\n"
report += "- This is likely not a real patient eye scan but a labeled diagram.\n"
report += "- No anatomical features (e.g., optic disc, macula) detected, as this is not a real scan.\n"
else:
report += "- No specific anatomical features detected, possibly due to image quality, type, or lack of text labels.\n"
report += "- For poor-quality images or real fundus images, use a fundus camera or smartphone attachment.\n"
# Preliminary Abnormality Check
report += "\nPreliminary Abnormality Check:\n"
report += detect_abnormalities(image, sharpness, contrast_adjusted)
report += "- Note: This is a basic check. For accurate diagnosis, integrate a deep learning model (e.g., for diabetic retinopathy or glaucoma) and use a high-quality fundus image.\n"
# Recommendations
report += "\nRecommendations for the Doctor:\n"
report += "- Request a real eye scan (e.g., fundus image) for accurate analysis.\n"
report += "- For educational use, confirm anatomical labels manually.\n"
report += "- For clinical diagnosis, use high-quality fundus or slit-lamp images.\n"
if sharpness < 50:
report += "- Advise patient to recapture image with better lighting and resolution.\n"
# Convert annotated image back to RGB
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
# Create downloadable report
report_bytes = report.encode('utf-8')
report_b64 = base64.b64encode(report_bytes).decode('utf-8')
report_download_link = f'<a href="data:text/plain;base64,{report_b64}" download="eye_analysis_report_{patient_name}.txt">Download Report</a>'
# Generate WhatsApp link
whatsapp_message = f"Eye Image Analysis Report for {patient_name}:\n\n{report}"
whatsapp_link = f"https://wa.me/{doctor_phone}?text={urllib.parse.quote(whatsapp_message)}"
whatsapp_html = f'<a href="{whatsapp_link}" target="_blank">Send Report via WhatsApp</a>'
# Simulate email content
email_content = f"Subject: Eye Image Analysis Report for {patient_name}\n\n"
email_content += "Dear Doctor,\n\n"
email_content += f"Analysis report for patient {patient_name}:\n\n{report}"
email_content += "\nThe annotated image and report are attached for review.\n"
email_content += "Note: To send this email, deploy with email functionality (e.g., smtplib).\n"
return annotated_image, report, report_download_link, email_content, whatsapp_html
except Exception as e:
return None, f"Error processing image: {str(e)}", "", "", ""
# Gradio interface
interface = gr.Interface(
fn=analyze_eye_image,
inputs=[
gr.Image(label="Upload Eye Image (PNG/JPEG)", type="numpy"),
gr.Textbox(label="Patient Name (Required)", placeholder="Enter patient's name", lines=1, max_lines=1),
gr.Textbox(label="Doctor's Email (Required)", placeholder="Enter doctor's email (e.g., doctor@example.com)"),
gr.Textbox(label="Doctor's Phone Number (WhatsApp, Required)", placeholder="Enter phone number (e.g., +919876543210)")
],
outputs=[
gr.Image(label="Annotated Image"),
gr.Textbox(label="Detailed Analysis Report"),
gr.HTML(label="Download Report"),
gr.Textbox(label="Email Content (To Be Sent to Doctor)"),
gr.HTML(label="WhatsApp Link")
],
title="EyeScanIndia: Remote Eye Image Analysis",
description="""
Upload an eye image (e.g., fundus image or diagram) to analyze anatomical features.
Supports poor-quality images with enhanced preprocessing.
Provide patient name, doctor's email, and WhatsApp number to generate a report.
**Note**: Ensure compliance with India’s DPDP Act for medical data.
For best results, use high-quality fundus images from a fundus camera or smartphone attachment.
""",
allow_flagging="never"
)
# Launch the app for Hugging Face
if __name__ == "__main__":
interface.launch(server_name="0.0.0.0", server_port=7860) |