Spaces:
Runtime error
Runtime error
Initial FastAPI Docker Space
Browse files- .dockerignore +12 -0
- .gitignore +8 -0
- Dockerfile +24 -0
- README.md +34 -0
- app/__init__.py +1 -0
- app/detector.py +67 -0
- app/main.py +60 -0
- requirements.txt +8 -0
.dockerignore
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
| 3 |
+
*.pyo
|
| 4 |
+
*.pyd
|
| 5 |
+
.Python
|
| 6 |
+
.env
|
| 7 |
+
.venv/
|
| 8 |
+
venv/
|
| 9 |
+
.git/
|
| 10 |
+
.gitignore
|
| 11 |
+
.DS_Store
|
| 12 |
+
node_modules/
|
.gitignore
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.env
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.pyc
|
| 4 |
+
*.pyo
|
| 5 |
+
*.pyd
|
| 6 |
+
.Python
|
| 7 |
+
.venv/
|
| 8 |
+
venv/
|
Dockerfile
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
ENV PYTHONDONTWRITEBYTECODE=1 \
|
| 4 |
+
PYTHONUNBUFFERED=1
|
| 5 |
+
|
| 6 |
+
WORKDIR /app
|
| 7 |
+
|
| 8 |
+
# System deps: pillow image codecs + git (some HF model downloads may require git-lfs at runtime, but not for standard hub downloads)
|
| 9 |
+
RUN apt-get update \
|
| 10 |
+
&& apt-get install -y --no-install-recommends \
|
| 11 |
+
build-essential \
|
| 12 |
+
libjpeg62-turbo-dev \
|
| 13 |
+
zlib1g-dev \
|
| 14 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 15 |
+
|
| 16 |
+
COPY requirements.txt /app/requirements.txt
|
| 17 |
+
RUN pip install --no-cache-dir -r /app/requirements.txt
|
| 18 |
+
|
| 19 |
+
COPY app /app/app
|
| 20 |
+
|
| 21 |
+
ENV PORT=7860
|
| 22 |
+
EXPOSE 7860
|
| 23 |
+
|
| 24 |
+
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
|
@@ -8,4 +8,38 @@ pinned: false
|
|
| 8 |
short_description: Bench detector
|
| 9 |
---
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 8 |
short_description: Bench detector
|
| 9 |
---
|
| 10 |
|
| 11 |
+
# bench-detector (FastAPI)
|
| 12 |
+
|
| 13 |
+
## Local Docker
|
| 14 |
+
|
| 15 |
+
Build:
|
| 16 |
+
|
| 17 |
+
```bash
|
| 18 |
+
docker build -t bench-detector:local .
|
| 19 |
+
```
|
| 20 |
+
|
| 21 |
+
Run:
|
| 22 |
+
|
| 23 |
+
```bash
|
| 24 |
+
docker run --rm -p 7860:7860 bench-detector:local
|
| 25 |
+
```
|
| 26 |
+
|
| 27 |
+
Health check:
|
| 28 |
+
|
| 29 |
+
```bash
|
| 30 |
+
curl -s http://localhost:7860/health
|
| 31 |
+
```
|
| 32 |
+
|
| 33 |
+
Score an image:
|
| 34 |
+
|
| 35 |
+
```bash
|
| 36 |
+
curl -s -X POST http://localhost:7860/score \
|
| 37 |
+
-F "image=@/path/to/image.jpg" | jq
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
## Environment variables
|
| 41 |
+
|
| 42 |
+
- `MODEL_ID` (default: `openai/clip-vit-base-patch32`)
|
| 43 |
+
- `DEVICE` (default: auto; set to `cpu` or `cuda` if desired)
|
| 44 |
+
|
| 45 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
app/detector.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Iterable
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from PIL import Image
|
| 8 |
+
from transformers import CLIPModel, CLIPProcessor
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@dataclass(frozen=True)
|
| 12 |
+
class BenchDetectionResult:
|
| 13 |
+
probability: float
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
_BENCH_PROMPTS: tuple[str, ...] = (
|
| 17 |
+
"a park bench",
|
| 18 |
+
"a wooden bench outdoors",
|
| 19 |
+
"a metal bench in a park",
|
| 20 |
+
"a sitting bench",
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
_NON_BENCH_PROMPTS: tuple[str, ...] = (
|
| 24 |
+
"a chair",
|
| 25 |
+
"a sofa",
|
| 26 |
+
"a table",
|
| 27 |
+
"a street",
|
| 28 |
+
"a tree",
|
| 29 |
+
"no bench",
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class BenchDetector:
|
| 34 |
+
def __init__(
|
| 35 |
+
self,
|
| 36 |
+
model_id: str = "openai/clip-vit-base-patch32",
|
| 37 |
+
device: str | None = None,
|
| 38 |
+
) -> None:
|
| 39 |
+
self.model_id = model_id
|
| 40 |
+
self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
|
| 41 |
+
|
| 42 |
+
self._processor = CLIPProcessor.from_pretrained(model_id)
|
| 43 |
+
self._model = CLIPModel.from_pretrained(model_id)
|
| 44 |
+
self._model.to(self.device)
|
| 45 |
+
self._model.eval()
|
| 46 |
+
|
| 47 |
+
def predict_probability(
|
| 48 |
+
self,
|
| 49 |
+
image: Image.Image,
|
| 50 |
+
bench_prompts: Iterable[str] = _BENCH_PROMPTS,
|
| 51 |
+
non_bench_prompts: Iterable[str] = _NON_BENCH_PROMPTS,
|
| 52 |
+
) -> BenchDetectionResult:
|
| 53 |
+
bench_prompts = tuple(bench_prompts)
|
| 54 |
+
non_bench_prompts = tuple(non_bench_prompts)
|
| 55 |
+
texts = bench_prompts + non_bench_prompts
|
| 56 |
+
|
| 57 |
+
inputs = self._processor(text=list(texts), images=image, return_tensors="pt", padding=True)
|
| 58 |
+
inputs = {k: v.to(self.device) for k, v in inputs.items()}
|
| 59 |
+
|
| 60 |
+
with torch.no_grad():
|
| 61 |
+
outputs = self._model(**inputs)
|
| 62 |
+
logits_per_image = outputs.logits_per_image
|
| 63 |
+
probs = logits_per_image.softmax(dim=1)[0]
|
| 64 |
+
|
| 65 |
+
bench_prob = float(probs[: len(bench_prompts)].sum().item())
|
| 66 |
+
bench_prob = max(0.0, min(1.0, bench_prob))
|
| 67 |
+
return BenchDetectionResult(probability=bench_prob)
|
app/main.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import io
|
| 4 |
+
import os
|
| 5 |
+
import time
|
| 6 |
+
from typing import Any
|
| 7 |
+
|
| 8 |
+
from fastapi import FastAPI, File, HTTPException, UploadFile
|
| 9 |
+
from fastapi.responses import JSONResponse
|
| 10 |
+
from PIL import Image
|
| 11 |
+
|
| 12 |
+
from app.detector import BenchDetector
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
app = FastAPI(title="bench-detector", version="0.1.0")
|
| 16 |
+
|
| 17 |
+
_detector: BenchDetector | None = None
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _get_detector() -> BenchDetector:
|
| 21 |
+
global _detector
|
| 22 |
+
if _detector is None:
|
| 23 |
+
model_id = os.environ.get("MODEL_ID", "openai/clip-vit-base-patch32")
|
| 24 |
+
device = os.environ.get("DEVICE")
|
| 25 |
+
_detector = BenchDetector(model_id=model_id, device=device)
|
| 26 |
+
return _detector
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@app.get("/health")
|
| 30 |
+
def health() -> dict[str, Any]:
|
| 31 |
+
return {"ok": True}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@app.post("/score")
|
| 35 |
+
async def score(image: UploadFile = File(...)) -> JSONResponse:
|
| 36 |
+
raw = await image.read()
|
| 37 |
+
if not raw:
|
| 38 |
+
raise HTTPException(status_code=400, detail="Empty file")
|
| 39 |
+
|
| 40 |
+
try:
|
| 41 |
+
pil = Image.open(io.BytesIO(raw))
|
| 42 |
+
if pil.mode != "RGB":
|
| 43 |
+
pil = pil.convert("RGB")
|
| 44 |
+
except Exception as e:
|
| 45 |
+
raise HTTPException(status_code=400, detail=f"Invalid image: {e}")
|
| 46 |
+
|
| 47 |
+
detector = _get_detector()
|
| 48 |
+
|
| 49 |
+
t0 = time.perf_counter()
|
| 50 |
+
result = detector.predict_probability(pil)
|
| 51 |
+
elapsed_ms = (time.perf_counter() - t0) * 1000.0
|
| 52 |
+
|
| 53 |
+
return JSONResponse(
|
| 54 |
+
{
|
| 55 |
+
"probability": result.probability,
|
| 56 |
+
"model_id": detector.model_id,
|
| 57 |
+
"device": detector.device,
|
| 58 |
+
"elapsed_ms": elapsed_ms,
|
| 59 |
+
}
|
| 60 |
+
)
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi==0.115.6
|
| 2 |
+
python-multipart==0.0.20
|
| 3 |
+
uvicorn[standard]==0.34.0
|
| 4 |
+
pillow==11.0.0
|
| 5 |
+
transformers==4.47.1
|
| 6 |
+
# CPU-only PyTorch
|
| 7 |
+
--extra-index-url https://download.pytorch.org/whl/cpu
|
| 8 |
+
torch==2.5.1
|