Upload 3 files
Browse files- Dockerfile +25 -0
- app.py +79 -0
- requirements.txt +8 -0
Dockerfile
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
# Set working directory
|
| 4 |
+
WORKDIR /app
|
| 5 |
+
|
| 6 |
+
# Install system dependencies
|
| 7 |
+
RUN apt-get update && apt-get install -y \
|
| 8 |
+
build-essential \
|
| 9 |
+
curl \
|
| 10 |
+
software-properties-common \
|
| 11 |
+
git \
|
| 12 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 13 |
+
|
| 14 |
+
# Copy requirements and install
|
| 15 |
+
COPY requirements.txt .
|
| 16 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 17 |
+
|
| 18 |
+
# Copy the application code
|
| 19 |
+
COPY . .
|
| 20 |
+
|
| 21 |
+
# Expose port (HF Spaces uses 7860)
|
| 22 |
+
EXPOSE 7860
|
| 23 |
+
|
| 24 |
+
# Run the app
|
| 25 |
+
CMD ["python", "app.py"]
|
app.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, UploadFile, File, Form
|
| 2 |
+
from transformers import pipeline
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import torch
|
| 5 |
+
import io
|
| 6 |
+
import uvicorn
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
app = FastAPI(title="MedGemma 4B Internal API")
|
| 10 |
+
|
| 11 |
+
# Check if we are running on Hugging Face Spaces
|
| 12 |
+
# Spaces usually provide GPUs, if not it will fallback to CPU (will be slow)
|
| 13 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 14 |
+
dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
|
| 15 |
+
|
| 16 |
+
print(f"Loading full 9GB MedGemma model on {device}...")
|
| 17 |
+
|
| 18 |
+
try:
|
| 19 |
+
# Use the pipeline API for the easiest implementation of the 9GB model
|
| 20 |
+
pipe = pipeline(
|
| 21 |
+
"image-text-to-text",
|
| 22 |
+
model="google/medgemma-4b-it",
|
| 23 |
+
torch_dtype=dtype,
|
| 24 |
+
device_map="auto",
|
| 25 |
+
)
|
| 26 |
+
print("Model loaded successfully!")
|
| 27 |
+
except Exception as e:
|
| 28 |
+
print(f"Error loading model: {e}")
|
| 29 |
+
pipe = None
|
| 30 |
+
|
| 31 |
+
@app.get("/")
|
| 32 |
+
def read_root():
|
| 33 |
+
return {
|
| 34 |
+
"status": "MedGemma 4B API is active",
|
| 35 |
+
"device": device,
|
| 36 |
+
"model_size": "Full 9GB"
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
@app.post("/analyze")
|
| 40 |
+
async def analyze_image(
|
| 41 |
+
prompt: str = Form("Describe this medical image and give a preliminary analysis."),
|
| 42 |
+
file: UploadFile = File(...)
|
| 43 |
+
):
|
| 44 |
+
if pipe is None:
|
| 45 |
+
return {"error": "Model not loaded properly. Check logs."}
|
| 46 |
+
|
| 47 |
+
# Read the uploaded image
|
| 48 |
+
contents = await file.read()
|
| 49 |
+
image = Image.open(io.BytesIO(contents)).convert("RGB")
|
| 50 |
+
|
| 51 |
+
# Format messages for MedGemma
|
| 52 |
+
messages = [
|
| 53 |
+
{
|
| 54 |
+
"role": "system",
|
| 55 |
+
"content": [{"type": "text", "text": "You are an expert radiologist and medical consultant."}]
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"role": "user",
|
| 59 |
+
"content": [
|
| 60 |
+
{"type": "text", "text": prompt},
|
| 61 |
+
{"type": "image", "image": image}
|
| 62 |
+
]
|
| 63 |
+
}
|
| 64 |
+
]
|
| 65 |
+
|
| 66 |
+
# Inference
|
| 67 |
+
output = pipe(text=messages, max_new_tokens=250)
|
| 68 |
+
|
| 69 |
+
# Extract the response text
|
| 70 |
+
result = output[0]["generated_text"][-1]["content"]
|
| 71 |
+
|
| 72 |
+
return {
|
| 73 |
+
"analysis": result,
|
| 74 |
+
"success": True
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
if __name__ == "__main__":
|
| 78 |
+
# Port 7860 is the default for Hugging Face Spaces
|
| 79 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
transformers>=4.50.0
|
| 2 |
+
accelerate
|
| 3 |
+
torch
|
| 4 |
+
pillow
|
| 5 |
+
fastapi
|
| 6 |
+
uvicorn
|
| 7 |
+
python-multipart
|
| 8 |
+
requests
|