Spaces:
Sleeping
Sleeping
Upload 7 files
Browse files- .env.example +9 -0
- Dockerfile +23 -0
- app.py +8 -0
- main.py +546 -0
- model.onnx +3 -0
- prompts.py +91 -0
- requirements.txt +9 -0
.env.example
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ClarirAI Environment Variables
|
| 2 |
+
|
| 3 |
+
# OpenRouter API Configuration
|
| 4 |
+
OPENAI_API_KEY=your_openrouter_api_key_here
|
| 5 |
+
OPENROUTER_API_BASE=https://openrouter.ai/api/v1
|
| 6 |
+
OPENROUTER_REFERER=https://clarirai.example.com
|
| 7 |
+
OPENROUTER_MODEL=mistralai/mistral-7b-instruct
|
| 8 |
+
|
| 9 |
+
# You can set these in the Hugging Face Spaces secrets section
|
Dockerfile
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.12-slim
|
| 2 |
+
|
| 3 |
+
# Set working directory
|
| 4 |
+
WORKDIR /app
|
| 5 |
+
|
| 6 |
+
# Copy all project files
|
| 7 |
+
COPY requirements.txt .
|
| 8 |
+
COPY model.onnx .
|
| 9 |
+
COPY main.py .
|
| 10 |
+
COPY prompts.py .
|
| 11 |
+
COPY app.py .
|
| 12 |
+
|
| 13 |
+
# Create directory for environment variables
|
| 14 |
+
RUN mkdir -p /app/.env
|
| 15 |
+
|
| 16 |
+
# Install dependencies
|
| 17 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 18 |
+
|
| 19 |
+
# Expose port 7860 (required for Hugging Face Spaces)
|
| 20 |
+
EXPOSE 7860
|
| 21 |
+
|
| 22 |
+
# Run the FastAPI app
|
| 23 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Hugging Face Spaces entry point
|
| 2 |
+
# This file redirects to our main.py application
|
| 3 |
+
|
| 4 |
+
from main import app
|
| 5 |
+
|
| 6 |
+
if __name__ == "__main__":
|
| 7 |
+
import uvicorn
|
| 8 |
+
uvicorn.run("main:app", host="0.0.0.0", port=7860)
|
main.py
ADDED
|
@@ -0,0 +1,546 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import logging
|
| 3 |
+
import time
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
from fastapi import FastAPI, File, UploadFile, HTTPException, BackgroundTasks, Query, Body
|
| 6 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 7 |
+
from fastapi.responses import JSONResponse, FileResponse
|
| 8 |
+
import onnxruntime
|
| 9 |
+
import numpy as np
|
| 10 |
+
from PIL import Image
|
| 11 |
+
import uvicorn
|
| 12 |
+
import uuid
|
| 13 |
+
import json
|
| 14 |
+
import os
|
| 15 |
+
import tempfile
|
| 16 |
+
from typing import Optional, List, Dict
|
| 17 |
+
|
| 18 |
+
# Import for .env file support
|
| 19 |
+
try:
|
| 20 |
+
from dotenv import load_dotenv
|
| 21 |
+
load_dotenv() # Load environment variables from .env file
|
| 22 |
+
ENV_LOADED = True
|
| 23 |
+
except ImportError:
|
| 24 |
+
ENV_LOADED = False
|
| 25 |
+
logging.warning("python-dotenv package not installed. Using environment variables directly.")
|
| 26 |
+
|
| 27 |
+
# Import for OpenRouter integration
|
| 28 |
+
try:
|
| 29 |
+
import openai
|
| 30 |
+
OPENAI_AVAILABLE = True
|
| 31 |
+
except ImportError:
|
| 32 |
+
OPENAI_AVAILABLE = False
|
| 33 |
+
logging.warning("OpenAI package not installed. AI consultation features will be disabled.")
|
| 34 |
+
|
| 35 |
+
# Import for report generation
|
| 36 |
+
try:
|
| 37 |
+
from docx import Document
|
| 38 |
+
DOCX_AVAILABLE = True
|
| 39 |
+
except ImportError:
|
| 40 |
+
DOCX_AVAILABLE = False
|
| 41 |
+
logging.warning("python-docx package not installed. Report generation will be disabled.")
|
| 42 |
+
|
| 43 |
+
# Import prompts and clinical information
|
| 44 |
+
from prompts import DR_CLINICAL_INFO, CONSULTATION_SYSTEM_PROMPT, DEFAULT_QUESTIONS, REPORT_SYSTEM_PROMPT
|
| 45 |
+
|
| 46 |
+
logging.basicConfig(
|
| 47 |
+
level=logging.INFO,
|
| 48 |
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
| 49 |
+
)
|
| 50 |
+
logger = logging.getLogger("clarirai-api")
|
| 51 |
+
|
| 52 |
+
# Store analysis results for later consultation
|
| 53 |
+
analysis_cache = {}
|
| 54 |
+
|
| 55 |
+
app = FastAPI(
|
| 56 |
+
title="ClarirAI - Advanced Diabetic Retinopathy Analysis",
|
| 57 |
+
description="Next-generation API for detecting and analyzing diabetic retinopathy from retinal images with enhanced metrics",
|
| 58 |
+
version="2.0.0"
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
app.add_middleware(
|
| 62 |
+
CORSMiddleware,
|
| 63 |
+
allow_origins=[
|
| 64 |
+
"https://diabetes-detection-zeta.vercel.app",
|
| 65 |
+
"https://diabetes-detection-harishvijayasarangank-gmailcoms-projects.vercel.app",
|
| 66 |
+
"*" # Allow all origins for development - remove in production
|
| 67 |
+
],
|
| 68 |
+
allow_credentials=True,
|
| 69 |
+
allow_methods=["*"],
|
| 70 |
+
allow_headers=["*"],
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
# Enhanced classification labels with descriptions
|
| 74 |
+
labels = {
|
| 75 |
+
0: {"name": "No DR", "description": "No signs of diabetic retinopathy detected"},
|
| 76 |
+
1: {"name": "Mild", "description": "Mild nonproliferative diabetic retinopathy"},
|
| 77 |
+
2: {"name": "Moderate", "description": "Moderate nonproliferative diabetic retinopathy"},
|
| 78 |
+
3: {"name": "Severe", "description": "Severe nonproliferative diabetic retinopathy"},
|
| 79 |
+
4: {"name": "Proliferative DR", "description": "Proliferative diabetic retinopathy"},
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
# Use the imported clinical information instead of defining it here
|
| 83 |
+
dr_clinical_info = DR_CLINICAL_INFO
|
| 84 |
+
|
| 85 |
+
# Model metadata
|
| 86 |
+
MODEL_INFO = {
|
| 87 |
+
"name": "ClarirAI Model",
|
| 88 |
+
"version": "1.2.0",
|
| 89 |
+
"architecture": "Densenet121",
|
| 90 |
+
"accuracy": "89.8%",
|
| 91 |
+
"last_updated": "2025-04-01",
|
| 92 |
+
"input_size": [224, 224],
|
| 93 |
+
"color_channels": 3,
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
# OpenAI/OpenRouter configuration
|
| 97 |
+
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "")
|
| 98 |
+
OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
|
| 99 |
+
OPENROUTER_REFERER = os.environ.get("OPENROUTER_REFERER", "https://clarirai.example.com")
|
| 100 |
+
OPENROUTER_MODEL = os.environ.get("OPENROUTER_MODEL", "mistralai/mistral-7b-instruct") # Default to a free model
|
| 101 |
+
|
| 102 |
+
if OPENAI_AVAILABLE and OPENAI_API_KEY:
|
| 103 |
+
openai.api_key = OPENAI_API_KEY
|
| 104 |
+
openai.api_base = OPENROUTER_API_BASE
|
| 105 |
+
logger.info(f"OpenRouter configured with model: {OPENROUTER_MODEL}")
|
| 106 |
+
else:
|
| 107 |
+
logger.warning("OpenRouter not configured. AI consultation will not be available.")
|
| 108 |
+
|
| 109 |
+
try:
|
| 110 |
+
logger.info("Loading ONNX model...")
|
| 111 |
+
start_time = time.time()
|
| 112 |
+
session = onnxruntime.InferenceSession('model.onnx')
|
| 113 |
+
load_time = time.time() - start_time
|
| 114 |
+
logger.info(f"Model loaded successfully in {load_time:.2f} seconds")
|
| 115 |
+
MODEL_INFO["load_time_seconds"] = round(load_time, 2)
|
| 116 |
+
except Exception as e:
|
| 117 |
+
logger.error(f"Error loading model: {e}")
|
| 118 |
+
session = None
|
| 119 |
+
|
| 120 |
+
@app.get("/")
|
| 121 |
+
async def root():
|
| 122 |
+
"""
|
| 123 |
+
Root endpoint that provides API information
|
| 124 |
+
"""
|
| 125 |
+
return {
|
| 126 |
+
"name": "ClarirAI: Advanced Diabetic Retinopathy Analysis API",
|
| 127 |
+
"version": "2.0.0",
|
| 128 |
+
"description": "AI-powered diabetic retinopathy detection and analysis with enhanced metrics and consultation",
|
| 129 |
+
"endpoints": [
|
| 130 |
+
"/predict", "/health", "/model-info", "/consult", "/generate-report/{analysis_id}"
|
| 131 |
+
],
|
| 132 |
+
"documentation": "/docs"
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
@app.get("/health")
|
| 136 |
+
async def health_check():
|
| 137 |
+
"""Check the health status of the API and model"""
|
| 138 |
+
if session is None:
|
| 139 |
+
return JSONResponse(
|
| 140 |
+
status_code=503,
|
| 141 |
+
content={"status": "unhealthy", "message": "Model failed to load", "timestamp": datetime.now().isoformat()}
|
| 142 |
+
)
|
| 143 |
+
return {
|
| 144 |
+
"status": "healthy",
|
| 145 |
+
"service": "ClarirAI API",
|
| 146 |
+
"timestamp": datetime.now().isoformat(),
|
| 147 |
+
"model_loaded": session is not None
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
@app.get("/model-info")
|
| 151 |
+
async def get_model_info():
|
| 152 |
+
"""Get information about the model being used"""
|
| 153 |
+
if session is None:
|
| 154 |
+
raise HTTPException(status_code=503, detail="Model not available")
|
| 155 |
+
|
| 156 |
+
return {
|
| 157 |
+
"name": "ClarirAI Diabetic Retinopathy Classifier",
|
| 158 |
+
"version": "2.0.0",
|
| 159 |
+
"framework": "ONNX Runtime",
|
| 160 |
+
"input_shape": [1, 3, 224, 224],
|
| 161 |
+
"classes": [labels[i]["name"] for i in labels],
|
| 162 |
+
"preprocessing": "Resize to 224x224, normalize with ImageNet mean and std"
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
def transform_image(image):
|
| 166 |
+
"""Preprocess image for model inference with enhanced normalization"""
|
| 167 |
+
image = image.resize((224, 224))
|
| 168 |
+
img_array = np.array(image, dtype=np.float32) / 255.0
|
| 169 |
+
mean = np.array([0.5353, 0.3628, 0.2486], dtype=np.float32)
|
| 170 |
+
std = np.array([0.2126, 0.1586, 0.1401], dtype=np.float32)
|
| 171 |
+
img_array = (img_array - mean) / std
|
| 172 |
+
img_array = np.transpose(img_array, (2, 0, 1))
|
| 173 |
+
return np.expand_dims(img_array, axis=0).astype(np.float32)
|
| 174 |
+
|
| 175 |
+
def calculate_severity_index(probabilities):
|
| 176 |
+
"""Calculate a weighted severity index (0-100) based on class probabilities"""
|
| 177 |
+
# Weights increase with severity
|
| 178 |
+
weights = [0, 25, 50, 75, 100]
|
| 179 |
+
# Convert numpy values to Python native float
|
| 180 |
+
probs = [float(p) for p in probabilities]
|
| 181 |
+
severity_index = sum(weights[i] * probs[i] for i in range(5))
|
| 182 |
+
return round(severity_index, 1)
|
| 183 |
+
|
| 184 |
+
def get_confidence_level(probability):
|
| 185 |
+
"""Convert probability to a confidence level description"""
|
| 186 |
+
if probability >= 0.90:
|
| 187 |
+
return "Very High"
|
| 188 |
+
elif probability >= 0.75:
|
| 189 |
+
return "High"
|
| 190 |
+
elif probability >= 0.50:
|
| 191 |
+
return "Moderate"
|
| 192 |
+
elif probability >= 0.25:
|
| 193 |
+
return "Low"
|
| 194 |
+
else:
|
| 195 |
+
return "Very Low"
|
| 196 |
+
|
| 197 |
+
@app.post("/predict")
|
| 198 |
+
async def predict(file: UploadFile = File(...)):
|
| 199 |
+
"""
|
| 200 |
+
Predict diabetic retinopathy from retinal image with enhanced metrics
|
| 201 |
+
|
| 202 |
+
- **file**: Upload a retinal image file
|
| 203 |
+
|
| 204 |
+
Returns detailed classification with confidence levels, severity index, and recommendations
|
| 205 |
+
"""
|
| 206 |
+
analysis_id = str(uuid.uuid4())[:8] # Generate a unique ID for this analysis
|
| 207 |
+
logger.info(f"Analysis #{analysis_id}: Received image: {file.filename}, content-type: {file.content_type}")
|
| 208 |
+
|
| 209 |
+
if session is None:
|
| 210 |
+
raise HTTPException(status_code=503, detail="Model not available")
|
| 211 |
+
if not file.content_type.startswith("image/"):
|
| 212 |
+
raise HTTPException(status_code=400, detail="File provided is not an image")
|
| 213 |
+
|
| 214 |
+
try:
|
| 215 |
+
# Start timing for performance metrics
|
| 216 |
+
process_start = time.time()
|
| 217 |
+
|
| 218 |
+
image_data = await file.read()
|
| 219 |
+
input_img = Image.open(io.BytesIO(image_data)).convert("RGB")
|
| 220 |
+
input_tensor = transform_image(input_img)
|
| 221 |
+
input_name = session.get_inputs()[0].name
|
| 222 |
+
output_name = session.get_outputs()[0].name
|
| 223 |
+
|
| 224 |
+
logger.info(f"Analysis #{analysis_id}: Running inference")
|
| 225 |
+
inference_start = time.time()
|
| 226 |
+
prediction = session.run([output_name], {input_name: input_tensor})[0][0]
|
| 227 |
+
inference_time = time.time() - inference_start
|
| 228 |
+
|
| 229 |
+
exp_preds = np.exp(prediction - np.max(prediction))
|
| 230 |
+
probabilities = exp_preds / exp_preds.sum()
|
| 231 |
+
|
| 232 |
+
# Convert numpy array to Python list of floats
|
| 233 |
+
prob_list = [float(p) for p in probabilities]
|
| 234 |
+
|
| 235 |
+
# Calculate severity index
|
| 236 |
+
severity_index = calculate_severity_index(probabilities)
|
| 237 |
+
|
| 238 |
+
# Format detailed results with confidence levels
|
| 239 |
+
detailed_results = []
|
| 240 |
+
for i in labels:
|
| 241 |
+
prob = float(probabilities[i]) # Convert numpy.float32 to Python float
|
| 242 |
+
detailed_results.append({
|
| 243 |
+
"class": labels[i]["name"],
|
| 244 |
+
"description": labels[i]["description"],
|
| 245 |
+
"probability": round(prob, 4),
|
| 246 |
+
"percentage": round(prob * 100, 1),
|
| 247 |
+
"confidence_level": get_confidence_level(prob)
|
| 248 |
+
})
|
| 249 |
+
|
| 250 |
+
# Sort by probability (highest first)
|
| 251 |
+
detailed_results.sort(key=lambda x: x["probability"], reverse=True)
|
| 252 |
+
highest_class = detailed_results[0]["class"]
|
| 253 |
+
highest_prob = detailed_results[0]["probability"]
|
| 254 |
+
|
| 255 |
+
# Generate recommendation based on severity
|
| 256 |
+
recommendation = "Regular screening recommended."
|
| 257 |
+
if severity_index > 75:
|
| 258 |
+
recommendation = "Urgent ophthalmologist consultation required."
|
| 259 |
+
elif severity_index > 50:
|
| 260 |
+
recommendation = "Prompt ophthalmologist evaluation recommended."
|
| 261 |
+
elif severity_index > 25:
|
| 262 |
+
recommendation = "Follow-up with ophthalmologist advised."
|
| 263 |
+
|
| 264 |
+
# Get clinical information for the highest probability class
|
| 265 |
+
clinical_info = dr_clinical_info.get(highest_class, {})
|
| 266 |
+
|
| 267 |
+
# Calculate binary classification (DR vs No DR)
|
| 268 |
+
dr_probability = sum(float(probabilities[i]) for i in range(1, 5)) # Sum of all DR classes, convert to Python float
|
| 269 |
+
binary_classification = {
|
| 270 |
+
"no_dr": float(probabilities[0]), # Convert to Python float
|
| 271 |
+
"dr_detected": float(dr_probability),
|
| 272 |
+
"primary_assessment": "DR Detected" if dr_probability > float(probabilities[0]) else "No DR"
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
# Get suggested questions and generate answers if OpenAI is available
|
| 276 |
+
suggested_questions = clinical_info.get("suggested_questions", [])
|
| 277 |
+
question_answers = []
|
| 278 |
+
|
| 279 |
+
if OPENAI_AVAILABLE and OPENAI_API_KEY and suggested_questions:
|
| 280 |
+
try:
|
| 281 |
+
# Create an OpenAI client
|
| 282 |
+
client = openai.OpenAI(
|
| 283 |
+
api_key=OPENAI_API_KEY,
|
| 284 |
+
base_url=OPENROUTER_API_BASE,
|
| 285 |
+
default_headers={"HTTP-Referer": OPENROUTER_REFERER}
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
# Prepare context for the AI
|
| 289 |
+
context = f"""Patient Analysis Summary:
|
| 290 |
+
- Diagnosis: {highest_class} (Confidence: {get_confidence_level(highest_prob)})
|
| 291 |
+
- Severity Index: {severity_index}/100
|
| 292 |
+
- Clinical Findings: {clinical_info.get('findings', 'Not available')}
|
| 293 |
+
- Associated Risks: {clinical_info.get('risks', 'Not available')}
|
| 294 |
+
- Standard Recommendations: {clinical_info.get('recommendations', 'Not available')}
|
| 295 |
+
- Standard Follow-up: {clinical_info.get('follow_up', 'Not available')}
|
| 296 |
+
"""
|
| 297 |
+
|
| 298 |
+
# Generate answers for each suggested question
|
| 299 |
+
for question in suggested_questions:
|
| 300 |
+
try:
|
| 301 |
+
response = client.chat.completions.create(
|
| 302 |
+
model=OPENROUTER_MODEL,
|
| 303 |
+
messages=[
|
| 304 |
+
{"role": "system", "content": CONSULTATION_SYSTEM_PROMPT},
|
| 305 |
+
{"role": "user", "content": f"{context}\n\nPatient Question: {question}"}
|
| 306 |
+
]
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
answer = response.choices[0].message.content
|
| 310 |
+
question_answers.append({
|
| 311 |
+
"question": question,
|
| 312 |
+
"answer": answer
|
| 313 |
+
})
|
| 314 |
+
except Exception as e:
|
| 315 |
+
logger.warning(f"Error generating answer for question '{question}': {e}")
|
| 316 |
+
question_answers.append({
|
| 317 |
+
"question": question,
|
| 318 |
+
"answer": "Unable to generate answer at this time. Please try asking this question directly."
|
| 319 |
+
})
|
| 320 |
+
except Exception as e:
|
| 321 |
+
logger.warning(f"Error generating answers for suggested questions: {e}")
|
| 322 |
+
# Still include the questions even if answers couldn't be generated
|
| 323 |
+
question_answers = [{"question": q, "answer": None} for q in suggested_questions]
|
| 324 |
+
else:
|
| 325 |
+
# If OpenAI is not available, just include the questions without answers
|
| 326 |
+
question_answers = [{"question": q, "answer": None} for q in suggested_questions]
|
| 327 |
+
|
| 328 |
+
total_time = time.time() - process_start
|
| 329 |
+
|
| 330 |
+
logger.info(f"Analysis #{analysis_id}: Prediction complete: highest probability class = {highest_class} ({highest_prob:.2f})")
|
| 331 |
+
|
| 332 |
+
# Prepare response
|
| 333 |
+
response = {
|
| 334 |
+
"analysis_id": analysis_id,
|
| 335 |
+
"timestamp": datetime.now().isoformat(),
|
| 336 |
+
"detailed_classification": detailed_results,
|
| 337 |
+
"binary_classification": binary_classification,
|
| 338 |
+
"highest_probability_class": highest_class,
|
| 339 |
+
"severity_index": severity_index,
|
| 340 |
+
"recommendation": recommendation,
|
| 341 |
+
"clinical_information": clinical_info,
|
| 342 |
+
"ai_explanation": clinical_info.get("explanation", "No explanation available for this classification."),
|
| 343 |
+
"suggested_questions_with_answers": question_answers,
|
| 344 |
+
"performance": {
|
| 345 |
+
"inference_time_ms": round(inference_time * 1000, 2),
|
| 346 |
+
"total_processing_time_ms": round(total_time * 1000, 2)
|
| 347 |
+
}
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
# Cache the analysis result for later consultation
|
| 351 |
+
analysis_cache[analysis_id] = {
|
| 352 |
+
"result": response,
|
| 353 |
+
"image_filename": file.filename,
|
| 354 |
+
"analysis_time": datetime.now().isoformat()
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
+
return response
|
| 358 |
+
|
| 359 |
+
except Exception as e:
|
| 360 |
+
logger.error(f"Analysis #{analysis_id}: Error processing image: {e}", exc_info=True)
|
| 361 |
+
raise HTTPException(status_code=500, detail=f"Error processing image: {str(e)}")
|
| 362 |
+
|
| 363 |
+
@app.get("/analysis/{analysis_id}")
|
| 364 |
+
async def get_analysis(analysis_id: str):
|
| 365 |
+
"""Retrieve a previous analysis by its ID"""
|
| 366 |
+
if analysis_id not in analysis_cache:
|
| 367 |
+
raise HTTPException(status_code=404, detail=f"Analysis with ID {analysis_id} not found")
|
| 368 |
+
|
| 369 |
+
return analysis_cache[analysis_id]["result"]
|
| 370 |
+
|
| 371 |
+
@app.post("/consult")
|
| 372 |
+
async def get_ai_consultation(
|
| 373 |
+
analysis_id: str = Body(...),
|
| 374 |
+
question: Optional[str] = Body(None),
|
| 375 |
+
background_tasks: BackgroundTasks = None
|
| 376 |
+
):
|
| 377 |
+
"""
|
| 378 |
+
Get AI consultation based on analysis results
|
| 379 |
+
|
| 380 |
+
- analysis_id: ID of the previous analysis
|
| 381 |
+
- question: Optional specific question about the diagnosis (if not provided, general consultation is given)
|
| 382 |
+
"""
|
| 383 |
+
if not OPENAI_AVAILABLE or not OPENAI_API_KEY:
|
| 384 |
+
raise HTTPException(status_code=501, detail="AI consultation feature is not available. OpenAI package or API key not configured.")
|
| 385 |
+
|
| 386 |
+
if analysis_id not in analysis_cache:
|
| 387 |
+
raise HTTPException(status_code=404, detail=f"Analysis with ID {analysis_id} not found")
|
| 388 |
+
|
| 389 |
+
analysis_data = analysis_cache[analysis_id]["result"]
|
| 390 |
+
dr_class = analysis_data["highest_probability_class"]
|
| 391 |
+
severity_index = analysis_data["severity_index"]
|
| 392 |
+
clinical_info = dr_clinical_info.get(dr_class, {})
|
| 393 |
+
|
| 394 |
+
# Prepare context for the AI
|
| 395 |
+
context = f"""Patient Analysis Summary:
|
| 396 |
+
- Diagnosis: {dr_class} (Confidence: {get_confidence_level(analysis_data['detailed_classification'][0]['probability'])})
|
| 397 |
+
- Severity Index: {severity_index}/100
|
| 398 |
+
- Clinical Findings: {clinical_info.get('findings', 'Not available')}
|
| 399 |
+
- Associated Risks: {clinical_info.get('risks', 'Not available')}
|
| 400 |
+
- Standard Recommendations: {clinical_info.get('recommendations', 'Not available')}
|
| 401 |
+
- Standard Follow-up: {clinical_info.get('follow_up', 'Not available')}
|
| 402 |
+
"""
|
| 403 |
+
|
| 404 |
+
# Default question if none provided
|
| 405 |
+
if not question:
|
| 406 |
+
question = DEFAULT_QUESTIONS["general"].replace("my diagnosis", f"my diagnosis of {dr_class} diabetic retinopathy with a severity index of {severity_index}")
|
| 407 |
+
|
| 408 |
+
try:
|
| 409 |
+
# Create an OpenAI client with the API key and base URL
|
| 410 |
+
client = openai.OpenAI(
|
| 411 |
+
api_key=OPENAI_API_KEY,
|
| 412 |
+
base_url=OPENROUTER_API_BASE,
|
| 413 |
+
default_headers={"HTTP-Referer": OPENROUTER_REFERER}
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
# Call the LLM through OpenRouter using the new API format
|
| 417 |
+
response = client.chat.completions.create(
|
| 418 |
+
model=OPENROUTER_MODEL,
|
| 419 |
+
messages=[
|
| 420 |
+
{"role": "system", "content": CONSULTATION_SYSTEM_PROMPT},
|
| 421 |
+
{"role": "user", "content": f"{context}\n\nPatient Question: {question}"}
|
| 422 |
+
]
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
# Extract the content from the response using the new API format
|
| 426 |
+
consultation = response.choices[0].message.content
|
| 427 |
+
|
| 428 |
+
return {
|
| 429 |
+
"analysis_id": analysis_id,
|
| 430 |
+
"dr_class": dr_class,
|
| 431 |
+
"severity_index": severity_index,
|
| 432 |
+
"question": question,
|
| 433 |
+
"consultation": consultation,
|
| 434 |
+
"timestamp": datetime.now().isoformat()
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
except Exception as e:
|
| 438 |
+
logger.error(f"Error getting AI consultation: {e}")
|
| 439 |
+
raise HTTPException(status_code=500, detail=f"Error generating consultation: {str(e)}")
|
| 440 |
+
|
| 441 |
+
@app.get("/generate-report/{analysis_id}")
|
| 442 |
+
async def generate_report(analysis_id: str, include_consultation: bool = True):
|
| 443 |
+
"""
|
| 444 |
+
Generate a downloadable medical report based on analysis results
|
| 445 |
+
|
| 446 |
+
- analysis_id: ID of the previous analysis
|
| 447 |
+
- include_consultation: Whether to include AI consultation in the report
|
| 448 |
+
"""
|
| 449 |
+
if not DOCX_AVAILABLE:
|
| 450 |
+
raise HTTPException(status_code=501, detail="Report generation is not available. python-docx package not installed.")
|
| 451 |
+
|
| 452 |
+
if analysis_id not in analysis_cache:
|
| 453 |
+
raise HTTPException(status_code=404, detail=f"Analysis with ID {analysis_id} not found")
|
| 454 |
+
|
| 455 |
+
analysis_data = analysis_cache[analysis_id]["result"]
|
| 456 |
+
|
| 457 |
+
# Get consultation if requested and available
|
| 458 |
+
consultation_text = None
|
| 459 |
+
if include_consultation and OPENAI_AVAILABLE and OPENAI_API_KEY:
|
| 460 |
+
try:
|
| 461 |
+
consultation_response = await get_ai_consultation(analysis_id=analysis_id)
|
| 462 |
+
consultation_text = consultation_response.get("consultation")
|
| 463 |
+
except Exception as e:
|
| 464 |
+
logger.warning(f"Could not get consultation for report: {e}")
|
| 465 |
+
|
| 466 |
+
# Generate the report
|
| 467 |
+
try:
|
| 468 |
+
doc = Document()
|
| 469 |
+
|
| 470 |
+
# Add header
|
| 471 |
+
doc.add_heading('ClarirAI Medical Report', 0)
|
| 472 |
+
doc.add_paragraph(f"Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
| 473 |
+
doc.add_paragraph(f"Analysis ID: {analysis_id}")
|
| 474 |
+
doc.add_paragraph(f"Original Analysis Date: {analysis_data['timestamp']}")
|
| 475 |
+
|
| 476 |
+
# Add diagnosis section
|
| 477 |
+
doc.add_heading('Diabetic Retinopathy Assessment', level=1)
|
| 478 |
+
doc.add_paragraph(f"Primary Diagnosis: {analysis_data['highest_probability_class']}")
|
| 479 |
+
doc.add_paragraph(f"Severity Index: {analysis_data['severity_index']}/100")
|
| 480 |
+
|
| 481 |
+
# Add detailed classification table
|
| 482 |
+
doc.add_heading('Detailed Classification', level=2)
|
| 483 |
+
table = doc.add_table(rows=1, cols=4)
|
| 484 |
+
table.style = 'Table Grid'
|
| 485 |
+
hdr_cells = table.rows[0].cells
|
| 486 |
+
hdr_cells[0].text = 'Classification'
|
| 487 |
+
hdr_cells[1].text = 'Description'
|
| 488 |
+
hdr_cells[2].text = 'Probability'
|
| 489 |
+
hdr_cells[3].text = 'Confidence Level'
|
| 490 |
+
|
| 491 |
+
for item in analysis_data['detailed_classification']:
|
| 492 |
+
row_cells = table.add_row().cells
|
| 493 |
+
row_cells[0].text = item['class']
|
| 494 |
+
row_cells[1].text = item['description']
|
| 495 |
+
row_cells[2].text = f"{item['percentage']}%"
|
| 496 |
+
row_cells[3].text = item['confidence_level']
|
| 497 |
+
|
| 498 |
+
# Add clinical information
|
| 499 |
+
dr_class = analysis_data['highest_probability_class']
|
| 500 |
+
clinical_info = dr_clinical_info.get(dr_class, {})
|
| 501 |
+
|
| 502 |
+
if clinical_info:
|
| 503 |
+
doc.add_heading('Clinical Information', level=1)
|
| 504 |
+
doc.add_heading('Findings', level=2)
|
| 505 |
+
doc.add_paragraph(clinical_info.get('findings', 'Not available'))
|
| 506 |
+
|
| 507 |
+
doc.add_heading('Associated Risks', level=2)
|
| 508 |
+
doc.add_paragraph(clinical_info.get('risks', 'Not available'))
|
| 509 |
+
|
| 510 |
+
doc.add_heading('Recommendations', level=2)
|
| 511 |
+
doc.add_paragraph(clinical_info.get('recommendations', 'Not available'))
|
| 512 |
+
|
| 513 |
+
doc.add_heading('Follow-up', level=2)
|
| 514 |
+
doc.add_paragraph(clinical_info.get('follow_up', 'Not available'))
|
| 515 |
+
|
| 516 |
+
# Add AI explanation
|
| 517 |
+
doc.add_heading('AI Analysis Explanation', level=1)
|
| 518 |
+
doc.add_paragraph(clinical_info.get('explanation', 'No explanation available for this classification.'))
|
| 519 |
+
|
| 520 |
+
# Add AI consultation if available
|
| 521 |
+
if consultation_text:
|
| 522 |
+
doc.add_heading('AI-Generated Medical Consultation', level=1)
|
| 523 |
+
doc.add_paragraph(consultation_text)
|
| 524 |
+
|
| 525 |
+
# Add disclaimer
|
| 526 |
+
doc.add_heading('Disclaimer', level=1)
|
| 527 |
+
doc.add_paragraph('This report is generated using artificial intelligence and should not replace professional medical advice. The analysis and recommendations provided are based on automated image processing and AI consultation. Please consult with a qualified healthcare provider for proper diagnosis, treatment, and follow-up care.')
|
| 528 |
+
|
| 529 |
+
# Save to temp file
|
| 530 |
+
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".docx")
|
| 531 |
+
doc.save(temp_file.name)
|
| 532 |
+
temp_file.close()
|
| 533 |
+
|
| 534 |
+
return FileResponse(
|
| 535 |
+
path=temp_file.name,
|
| 536 |
+
filename=f"ClarirAI_Report_{analysis_id}.docx",
|
| 537 |
+
media_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document"
|
| 538 |
+
)
|
| 539 |
+
|
| 540 |
+
except Exception as e:
|
| 541 |
+
logger.error(f"Error generating report: {e}")
|
| 542 |
+
raise HTTPException(status_code=500, detail=f"Error generating report: {str(e)}")
|
| 543 |
+
|
| 544 |
+
# Run the server
|
| 545 |
+
if __name__ == "__main__":
|
| 546 |
+
uvicorn.run("main:app", host="0.0.0.0", port=7860, reload=True)
|
model.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b2df12f77e5a9240ad729d61a4e63c0304cb232bc99f47c4a166c2330efa0780
|
| 3 |
+
size 28227960
|
prompts.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ClarirAI - System Prompts and Questions
|
| 2 |
+
|
| 3 |
+
# System prompt for AI consultation
|
| 4 |
+
CONSULTATION_SYSTEM_PROMPT = """
|
| 5 |
+
You are ClarirAI's medical consultation assistant specializing in diabetic retinopathy.
|
| 6 |
+
Provide clear, accurate, and personalized information about DR diagnosis, treatment options, and management strategies.
|
| 7 |
+
Base your response on the specific diagnosis and clinical information provided.
|
| 8 |
+
Include both medical facts and supportive guidance.
|
| 9 |
+
Always remind the patient that this is AI-generated advice and they should consult with their healthcare provider.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
# Clinical information and suggested questions for each DR stage
|
| 13 |
+
DR_CLINICAL_INFO = {
|
| 14 |
+
"No DR": {
|
| 15 |
+
"findings": "No visible abnormalities in the retina.",
|
| 16 |
+
"risks": "Low risk for vision loss, but continued monitoring is important for diabetic patients.",
|
| 17 |
+
"recommendations": "Annual eye examination. Maintain good blood sugar control, blood pressure, and cholesterol levels.",
|
| 18 |
+
"follow_up": "Routine annual diabetic eye screening.",
|
| 19 |
+
"explanation": "The model has classified this image as showing no signs of diabetic retinopathy. This means the retinal image appears normal without microaneurysms, hemorrhages, exudates, or other abnormalities typically associated with diabetic eye disease. The blood vessels appear normal in caliber and pattern.",
|
| 20 |
+
"suggested_questions": [
|
| 21 |
+
"What preventive measures should I take to maintain my eye health with diabetes?",
|
| 22 |
+
"How often should I get my eyes checked given my current status?",
|
| 23 |
+
"What early symptoms should I watch for that might indicate developing diabetic retinopathy?"
|
| 24 |
+
]
|
| 25 |
+
},
|
| 26 |
+
"Mild": {
|
| 27 |
+
"findings": "Microaneurysms (small areas of balloon-like swelling in the retina's tiny blood vessels).",
|
| 28 |
+
"risks": "Low risk for vision loss in the short term, but indicates progression of diabetes affecting the eyes.",
|
| 29 |
+
"recommendations": "Improve glycemic control. Monitor blood pressure and cholesterol levels. Consider lifestyle modifications.",
|
| 30 |
+
"follow_up": "Eye examination every 6-12 months depending on other risk factors.",
|
| 31 |
+
"explanation": "The model has detected mild nonproliferative diabetic retinopathy in this image. This early stage is characterized by the presence of at least one microaneurysm (small swelling in the retina's blood vessels). At this stage, the condition typically doesn't affect vision, but it indicates that diabetes is beginning to damage the eye's blood vessels.",
|
| 32 |
+
"suggested_questions": [
|
| 33 |
+
"What lifestyle changes would be most effective to prevent my condition from progressing?",
|
| 34 |
+
"How quickly can mild diabetic retinopathy progress to more severe stages?",
|
| 35 |
+
"Are there specific dietary recommendations that could help my condition?"
|
| 36 |
+
]
|
| 37 |
+
},
|
| 38 |
+
"Moderate": {
|
| 39 |
+
"findings": "More microaneurysms, dot and blot hemorrhages, venous beading, and/or cotton wool spots.",
|
| 40 |
+
"risks": "Moderate risk for vision loss. Indicates progressive damage to the retina.",
|
| 41 |
+
"recommendations": "Strict glycemic control. Blood pressure management. Possible referral to endocrinologist for diabetes management optimization.",
|
| 42 |
+
"follow_up": "Eye examination every 4-6 months. Possible additional testing such as OCT or fluorescein angiography.",
|
| 43 |
+
"explanation": "The model has identified moderate nonproliferative diabetic retinopathy in this image. This stage shows increased retinal damage with multiple microaneurysms, dot and blot hemorrhages (small spots of blood leaking into the retina), and potentially cotton wool spots (areas of retinal nerve fiber layer infarction). Venous beading (irregularity in vein diameter) may also be present. These changes indicate more significant vascular damage from diabetes.",
|
| 44 |
+
"suggested_questions": [
|
| 45 |
+
"What treatment options should I consider at this stage of diabetic retinopathy?",
|
| 46 |
+
"How might this condition affect my vision in the near future?",
|
| 47 |
+
"What specific blood sugar targets should I aim for to slow the progression?"
|
| 48 |
+
]
|
| 49 |
+
},
|
| 50 |
+
"Severe": {
|
| 51 |
+
"findings": "Substantial intraretinal hemorrhages, definite venous beading, and prominent intraretinal microvascular abnormalities (IRMA).",
|
| 52 |
+
"risks": "High risk for vision loss. Significant retinal damage present.",
|
| 53 |
+
"recommendations": "Urgent ophthalmologist consultation. Aggressive management of diabetes and other risk factors.",
|
| 54 |
+
"follow_up": "Frequent monitoring every 2-4 months by an ophthalmologist. Possible preparation for intervention.",
|
| 55 |
+
"explanation": "The model has detected severe nonproliferative diabetic retinopathy in this image. This advanced stage shows extensive vascular damage with numerous hemorrhages in all four quadrants of the retina, prominent venous beading, and intraretinal microvascular abnormalities (IRMA). At this stage, the retina is severely compromised, and there is a high risk of progression to proliferative diabetic retinopathy within a year if not treated.",
|
| 56 |
+
"suggested_questions": [
|
| 57 |
+
"What urgent interventions should I consider to prevent further vision loss?",
|
| 58 |
+
"Am I at immediate risk for blindness, and what can be done to prevent it?",
|
| 59 |
+
"How will this condition affect my daily activities and independence?"
|
| 60 |
+
]
|
| 61 |
+
},
|
| 62 |
+
"Proliferative DR": {
|
| 63 |
+
"findings": "Neovascularization (growth of new abnormal blood vessels), potential vitreous hemorrhage or retinal detachment.",
|
| 64 |
+
"risks": "Very high risk for severe vision loss or blindness without treatment.",
|
| 65 |
+
"recommendations": "Immediate referral for ophthalmological intervention. Possible laser photocoagulation, anti-VEGF injections, or vitrectomy.",
|
| 66 |
+
"follow_up": "Frequent specialized ophthalmological care. Post-treatment monitoring schedule as determined by specialist.",
|
| 67 |
+
"explanation": "The model has identified proliferative diabetic retinopathy in this image, the most advanced stage of the disease. This is characterized by neovascularization - the growth of abnormal new blood vessels on the surface of the retina or optic disc. These fragile vessels can easily rupture and bleed into the vitreous (gel-like substance in the eye), causing sudden vision loss. There may also be signs of previous treatments like laser scars if intervention has already occurred.",
|
| 68 |
+
"suggested_questions": [
|
| 69 |
+
"What immediate surgical or laser treatments do I need to consider?",
|
| 70 |
+
"What is the likelihood of regaining vision that has been lost?",
|
| 71 |
+
"How will ongoing treatment affect my quality of life and what support systems should I put in place?"
|
| 72 |
+
]
|
| 73 |
+
}
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
# Default questions for each severity level
|
| 77 |
+
DEFAULT_QUESTIONS = {
|
| 78 |
+
"general": "Based on my diagnosis, what should I know about my condition and what steps should I take?",
|
| 79 |
+
"lifestyle": "What lifestyle changes would be most beneficial for my specific condition?",
|
| 80 |
+
"prognosis": "What is the likely progression of my condition if I follow all medical recommendations?",
|
| 81 |
+
"treatment": "What are the current best treatment options for my condition?"
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
# Report generation system prompt
|
| 85 |
+
REPORT_SYSTEM_PROMPT = """
|
| 86 |
+
You are ClarirAI's medical report assistant specializing in diabetic retinopathy.
|
| 87 |
+
Your role is to generate professional medical report content based on diabetic retinopathy analysis.
|
| 88 |
+
Include all relevant clinical information, findings, and recommendations.
|
| 89 |
+
Use professional medical terminology but ensure explanations are clear enough for patients to understand.
|
| 90 |
+
Clearly mark any sections that require follow-up or urgent attention.
|
| 91 |
+
"""
|
requirements.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn
|
| 3 |
+
onnxruntime
|
| 4 |
+
numpy
|
| 5 |
+
pillow
|
| 6 |
+
python-multipart
|
| 7 |
+
openai
|
| 8 |
+
python-docx
|
| 9 |
+
python-dotenv
|