Hiconcep commited on
Commit
e6cacc5
·
verified ·
1 Parent(s): f1f3ea7

Upload Dockerfile with huggingface_hub

Browse files
Files changed (1) hide show
  1. Dockerfile +99 -0
Dockerfile ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # HuggingFace Spaces — single container (backend + frontend)
2
+ # Port 7860 (Spaces default)
3
+
4
+ # --- Stage 1: Build frontend ---
5
+ FROM node:22-alpine AS frontend-build
6
+ WORKDIR /app
7
+ RUN corepack enable
8
+ COPY frontend/package.json frontend/pnpm-lock.yaml ./
9
+ RUN pnpm install --frozen-lockfile
10
+ COPY frontend/ .
11
+ RUN pnpm build
12
+
13
+ # --- Stage 2: Runtime ---
14
+ FROM python:3.11-slim
15
+
16
+ WORKDIR /app
17
+
18
+ # Install nginx + supervisor
19
+ RUN apt-get update && apt-get install -y --no-install-recommends nginx supervisor \
20
+ && rm -rf /var/lib/apt/lists/*
21
+
22
+ # Install uv
23
+ COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/uv
24
+
25
+ # Install Python dependencies
26
+ COPY backend/pyproject.toml backend/uv.lock ./
27
+ RUN uv sync --frozen --no-dev
28
+
29
+ # Copy backend code
30
+ COPY backend/neural_mri/ neural_mri/
31
+
32
+ # Copy frontend build
33
+ COPY --from=frontend-build /app/dist /usr/share/nginx/html
34
+
35
+ # Nginx config: serve frontend + proxy API/WS to backend
36
+ RUN cat > /etc/nginx/conf.d/default.conf <<'NGINX'
37
+ server {
38
+ listen 7860;
39
+ root /usr/share/nginx/html;
40
+ index index.html;
41
+ client_max_body_size 10M;
42
+
43
+ location / {
44
+ try_files $uri $uri/ /index.html;
45
+ }
46
+ location /api/ {
47
+ proxy_pass http://127.0.0.1:8000;
48
+ proxy_read_timeout 300;
49
+ }
50
+ location /ws/ {
51
+ proxy_pass http://127.0.0.1:8000;
52
+ proxy_http_version 1.1;
53
+ proxy_set_header Upgrade $http_upgrade;
54
+ proxy_set_header Connection "upgrade";
55
+ proxy_read_timeout 300;
56
+ }
57
+ }
58
+ NGINX
59
+ # Remove default site
60
+ RUN rm -f /etc/nginx/sites-enabled/default
61
+
62
+ # Supervisor config: run nginx + uvicorn together
63
+ RUN cat > /etc/supervisor/conf.d/app.conf <<'SUPER'
64
+ [supervisord]
65
+ nodaemon=true
66
+ logfile=/dev/stdout
67
+ logfile_maxbytes=0
68
+
69
+ [program:backend]
70
+ command=uv run uvicorn neural_mri.main:app --host 127.0.0.1 --port 8000
71
+ directory=/app
72
+ autostart=true
73
+ autorestart=true
74
+ stdout_logfile=/dev/stdout
75
+ stdout_logfile_maxbytes=0
76
+ stderr_logfile=/dev/stderr
77
+ stderr_logfile_maxbytes=0
78
+
79
+ [program:nginx]
80
+ command=nginx -g "daemon off;"
81
+ autostart=true
82
+ autorestart=true
83
+ stdout_logfile=/dev/stdout
84
+ stdout_logfile_maxbytes=0
85
+ stderr_logfile=/dev/stderr
86
+ stderr_logfile_maxbytes=0
87
+ SUPER
88
+
89
+ # Pre-download GPT-2 to avoid first-load delay
90
+ RUN uv run python -c "from transformers import AutoTokenizer, AutoModelForCausalLM; AutoTokenizer.from_pretrained('gpt2'); AutoModelForCausalLM.from_pretrained('gpt2')"
91
+
92
+ ENV NMRI_DEVICE=cpu
93
+ ENV NMRI_DEFAULT_MODEL=gpt2
94
+ ENV NMRI_ENVIRONMENT=huggingface
95
+ ENV NMRI_CORS_ORIGINS='["*"]'
96
+
97
+ EXPOSE 7860
98
+
99
+ CMD ["supervisord", "-c", "/etc/supervisor/supervisord.conf"]