rabiyulfahim commited on
Commit
2e910dc
·
verified ·
1 Parent(s): fc18d76

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +16 -10
Dockerfile CHANGED
@@ -1,25 +1,31 @@
1
- # Use lightweight Python
2
  FROM python:3.10-slim
3
 
4
  # Set working directory
5
  WORKDIR /app
6
 
7
- # Install system dependencies (for torch)
8
- RUN apt-get update && apt-get install -y git
9
 
10
  # Copy requirements and install
11
  COPY requirements.txt .
12
  RUN pip install --no-cache-dir -r requirements.txt
13
 
14
- # Copy app code
15
  COPY . .
16
 
 
 
 
 
 
 
 
 
 
 
17
  # Expose FastAPI port
18
  EXPOSE 8000
19
 
20
- # Before CMD, add this:
21
- ENV TRANSFORMERS_CACHE=/app/cache
22
- RUN mkdir -p /app/cache
23
-
24
- # Run API with uvicorn
25
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]
 
1
+ # Use lightweight Python base image
2
  FROM python:3.10-slim
3
 
4
  # Set working directory
5
  WORKDIR /app
6
 
7
+ # Install system dependencies (for PyTorch and git, sometimes needed)
8
+ RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
9
 
10
  # Copy requirements and install
11
  COPY requirements.txt .
12
  RUN pip install --no-cache-dir -r requirements.txt
13
 
14
+ # Copy application code
15
  COPY . .
16
 
17
+ # ✅ Hugging Face cache directory (writable in Spaces)
18
+ ENV HF_HOME=/app/cache
19
+ RUN mkdir -p /app/cache
20
+
21
+ # Pre-download model so container starts faster
22
+ RUN python -c "from transformers import AutoTokenizer, AutoModelForCausalLM; \
23
+ model_id='rabiyulfahim/qa_python_gpt2'; \
24
+ AutoTokenizer.from_pretrained(model_id, cache_dir='/app/cache'); \
25
+ AutoModelForCausalLM.from_pretrained(model_id, cache_dir='/app/cache')"
26
+
27
  # Expose FastAPI port
28
  EXPOSE 8000
29
 
30
+ # Run FastAPI with Uvicorn
31
+ CMD [\"uvicorn\", \"app:app\", \"--host\", \"0.0.0.0\", \"--port\", \"8000\"]