Spaces:
Sleeping
Sleeping
File size: 4,019 Bytes
782e635 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 | """
=================================================================
DERMASCAN-AI β FastAPI Application
=================================================================
"""
import io
import time
import numpy as np
from PIL import Image
from pathlib import Path
from fastapi import FastAPI, File, UploadFile, HTTPException, Query
from fastapi.middleware.cors import CORSMiddleware
from contextlib import asynccontextmanager
from src.inference.predictor import SkinPredictor
from src.response.response_engine import ResponseEngine
from src.response.hospital_finder import HospitalFinder
from api.schemas import HealthResponse
# ββ Global objects ββ
predictor = None
response_engine = None
hospital_finder = None
@asynccontextmanager
async def lifespan(app: FastAPI):
global predictor, response_engine, hospital_finder
print("π Starting DermaScan-AI...")
predictor = SkinPredictor(
model_path="checkpoints/best_model.pth",
class_config_path="configs/class_config.json",
device="cpu",
)
response_engine = ResponseEngine(
class_config_path="configs/class_config.json",
response_templates_path="configs/response_templates.json",
)
hospital_finder = HospitalFinder()
print("β
DermaScan-AI ready!")
yield
print("π Shutting down...")
app = FastAPI(
title="π¬ DermaScan-AI",
description="AI-powered skin disease detection with clinical guidance",
version="1.0.0",
lifespan=lifespan,
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
def convert_numpy(obj):
"""Convert numpy types to Python native for JSON serialization."""
if isinstance(obj, dict):
return {k: convert_numpy(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [convert_numpy(v) for v in obj]
elif isinstance(obj, (np.bool_,)):
return bool(obj)
elif isinstance(obj, (np.integer,)):
return int(obj)
elif isinstance(obj, (np.floating,)):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
return obj
@app.get("/health", response_model=HealthResponse)
async def health():
return HealthResponse(
status="healthy",
model_loaded=predictor is not None,
model_name="EfficientNet-B3",
version="1.0.0",
)
@app.post("/predict")
async def predict(
file: UploadFile = File(...),
city: str = Query("Delhi", description="City in India"),
state: str = Query("Delhi", description="State in India"),
):
if file.content_type not in ["image/jpeg", "image/png", "image/jpg"]:
raise HTTPException(400, "Only JPG/PNG images supported")
contents = await file.read()
if len(contents) > 10 * 1024 * 1024:
raise HTTPException(400, "File too large (max 10MB)")
try:
image = Image.open(io.BytesIO(contents)).convert('RGB')
except Exception:
raise HTTPException(400, "Invalid image file")
start = time.time()
prediction = predictor.predict(image)
inference_time = time.time() - start
response = response_engine.generate_response(
predicted_class=prediction['predicted_class'],
confidence=prediction['confidence'],
all_probabilities=prediction['all_probabilities'],
)
hospital_result = hospital_finder.search(
query=response['hospital_search_query'],
city=city,
state=state,
)
response['maps_url'] = hospital_result['maps_url']
response['maps_embed_url'] = hospital_result['embed_url']
response['hospital_location'] = hospital_result['location']
response['inference_time'] = round(inference_time, 3)
response['emergency_numbers'] = hospital_finder.get_emergency_numbers()
return convert_numpy(response)
if __name__ == "__main__":
import uvicorn
uvicorn.run("api.app:app", host="0.0.0.0", port=8000, reload=True) |