nexusbert commited on
Commit
53d01b0
·
verified ·
1 Parent(s): 6d5c145

Create Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +53 -0
Dockerfile ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Base Image
2
+ FROM python:3.10-slim
3
+
4
+ ENV DEBIAN_FRONTEND=noninteractive \
5
+ PYTHONUNBUFFERED=1 \
6
+ PYTHONDONTWRITEBYTECODE=1
7
+
8
+ WORKDIR /code
9
+
10
+ # System Dependencies
11
+ RUN apt-get update && apt-get install -y --no-install-recommends \
12
+ build-essential \
13
+ git \
14
+ curl \
15
+ libopenblas-dev \
16
+ libomp-dev \
17
+ && rm -rf /var/lib/apt/lists/*
18
+
19
+ # Copy requirements and install Python dependencies
20
+ COPY requirements.txt .
21
+ RUN pip install --no-cache-dir -r requirements.txt
22
+
23
+ # Hugging Face + model tools
24
+ RUN pip install --no-cache-dir huggingface-hub sentencepiece accelerate
25
+
26
+ # Hugging Face cache environment
27
+ ENV HF_HOME=/models/huggingface \
28
+ TRANSFORMERS_CACHE=/models/huggingface \
29
+ HUGGINGFACE_HUB_CACHE=/models/huggingface \
30
+ HF_HUB_CACHE=/models/huggingface
31
+
32
+ # Created cache dir and set permissions
33
+ RUN mkdir -p /models/huggingface && chmod -R 777 /models/huggingface
34
+
35
+ # Pre-download models at build time (sports predictor specific models)
36
+ RUN python -c "from huggingface_hub import snapshot_download; snapshot_download(repo_id='valhalla/distilbart-mnli-12-1')" \
37
+ && python -c "from huggingface_hub import snapshot_download; snapshot_download(repo_id='google/flan-t5-base')" \
38
+ && python -c "from huggingface_hub import snapshot_download; snapshot_download(repo_id='sentence-transformers/all-MiniLM-L6-v2')" \
39
+ && find /models/huggingface -name '*.lock' -delete
40
+
41
+ # Preload tokenizers (avoid runtime delays)
42
+ RUN python -c "from transformers import AutoTokenizer; AutoTokenizer.from_pretrained('valhalla/distilbart-mnli-12-1', use_fast=True)" \
43
+ && python -c "from transformers import AutoTokenizer; AutoTokenizer.from_pretrained('google/flan-t5-base', use_fast=True)" \
44
+ && python -c "from transformers import AutoTokenizer; AutoTokenizer.from_pretrained('sentence-transformers/all-MiniLM-L6-v2', use_fast=True)"
45
+
46
+ # Copy project files
47
+ COPY . .
48
+
49
+ # Expose FastAPI port
50
+ EXPOSE 7860
51
+
52
+ # Run FastAPI app with uvicorn (1 workers for concurrency)
53
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860", "--workers", "1"]