Spaces:
Sleeping
Sleeping
setup for hf spaces
Browse files- Dockerfile +15 -0
- README.md +28 -0
- frontend/src/api.ts +1 -1
- server/detect.py +3 -1
Dockerfile
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
COPY server/requirements.txt .
|
| 6 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 7 |
+
|
| 8 |
+
COPY server/ ./server/
|
| 9 |
+
COPY training/exported_models/inference_model.sim.onnx ./model/inference_model.sim.onnx
|
| 10 |
+
|
| 11 |
+
ENV MODEL_PATH=/app/model/inference_model.sim.onnx
|
| 12 |
+
|
| 13 |
+
EXPOSE 7860
|
| 14 |
+
|
| 15 |
+
CMD ["uvicorn", "server.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
|
@@ -73,6 +73,34 @@ Response:
|
|
| 73 |
|
| 74 |
Returns `{"status": "ok", "model_loaded": true}`.
|
| 75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
## Model
|
| 77 |
|
| 78 |
- Architecture: RF-DETR (medium)
|
|
|
|
| 73 |
|
| 74 |
Returns `{"status": "ok", "model_loaded": true}`.
|
| 75 |
|
| 76 |
+
## Deployment
|
| 77 |
+
|
| 78 |
+
The backend and frontend are deployed separately so the page loads instantly (static CDN) while the model runs server-side.
|
| 79 |
+
|
| 80 |
+
### Backend — Hugging Face Spaces
|
| 81 |
+
|
| 82 |
+
1. Create a new Space at [huggingface.co/new-space](https://huggingface.co/new-space) with **Docker** SDK
|
| 83 |
+
2. Clone the Space repo and copy in the required files:
|
| 84 |
+
```bash
|
| 85 |
+
git clone https://huggingface.co/spaces/YOUR_USER/YOUR_SPACE
|
| 86 |
+
cd YOUR_SPACE
|
| 87 |
+
cp /path/to/parking/Dockerfile .
|
| 88 |
+
cp -r /path/to/parking/server ./server
|
| 89 |
+
mkdir -p training/exported_models
|
| 90 |
+
cp /path/to/parking/training/exported_models/inference_model.sim.onnx ./training/exported_models/
|
| 91 |
+
git add . && git commit -m "Initial deploy" && git push
|
| 92 |
+
```
|
| 93 |
+
3. The Space will build the Docker image and start serving at `https://YOUR_USER-YOUR_SPACE.hf.space`
|
| 94 |
+
|
| 95 |
+
### Frontend — Vercel
|
| 96 |
+
|
| 97 |
+
1. Push the `frontend/` directory to a GitHub repo (or use the full project repo)
|
| 98 |
+
2. Import the project at [vercel.com/new](https://vercel.com/new)
|
| 99 |
+
3. Set **Root Directory** to `frontend`
|
| 100 |
+
4. Add the environment variable:
|
| 101 |
+
- `VITE_API_URL` = `https://YOUR_USER-YOUR_SPACE.hf.space`
|
| 102 |
+
5. Deploy — Vercel auto-detects Vite and builds it
|
| 103 |
+
|
| 104 |
## Model
|
| 105 |
|
| 106 |
- Architecture: RF-DETR (medium)
|
frontend/src/api.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
const API_URL = 'http://localhost:8000';
|
| 2 |
|
| 3 |
export interface Detection {
|
| 4 |
bbox: [number, number, number, number];
|
|
|
|
| 1 |
+
const API_URL = (import.meta.env.VITE_API_URL as string) || 'http://localhost:8000';
|
| 2 |
|
| 3 |
export interface Detection {
|
| 4 |
bbox: [number, number, number, number];
|
server/detect.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
"""ONNX inference for car detection in aerial images."""
|
| 2 |
|
| 3 |
import base64
|
|
|
|
| 4 |
from pathlib import Path
|
| 5 |
|
| 6 |
import cv2
|
|
@@ -8,7 +9,8 @@ import numpy as np
|
|
| 8 |
import onnxruntime as ort
|
| 9 |
|
| 10 |
_PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
| 11 |
-
|
|
|
|
| 12 |
|
| 13 |
|
| 14 |
def load_model(model_path: Path = MODEL_PATH) -> ort.InferenceSession:
|
|
|
|
| 1 |
"""ONNX inference for car detection in aerial images."""
|
| 2 |
|
| 3 |
import base64
|
| 4 |
+
import os
|
| 5 |
from pathlib import Path
|
| 6 |
|
| 7 |
import cv2
|
|
|
|
| 9 |
import onnxruntime as ort
|
| 10 |
|
| 11 |
_PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
| 12 |
+
_DEFAULT_MODEL_PATH = _PROJECT_ROOT / "training" / "exported_models" / "inference_model.sim.onnx"
|
| 13 |
+
MODEL_PATH = Path(os.environ.get("MODEL_PATH", str(_DEFAULT_MODEL_PATH)))
|
| 14 |
|
| 15 |
|
| 16 |
def load_model(model_path: Path = MODEL_PATH) -> ort.InferenceSession:
|