Grinding commited on
Commit
487c8cb
·
verified ·
1 Parent(s): 6b7471c

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +11 -20
Dockerfile CHANGED
@@ -2,35 +2,26 @@ FROM python:3.9-slim
2
 
3
  WORKDIR /code
4
 
5
- # System deps for audio (minimize with --no-install-recommends)
6
- RUN apt-get update && apt-get install -y --no-install-recommends ffmpeg libsndfile1 && rm -rf /var/lib/apt/lists/*
7
-
8
- # Install CPU-only Torch first (to satisfy optimum dep without CUDA bloat)
9
- RUN pip install --no-cache-dir torch --index-url https://download.pytorch.org/whl/cpu
10
-
11
- # Copy requirements
12
  COPY ./requirements.txt /code/requirements.txt
13
 
14
- # Then install the rest (Torch dep is now satisfied)
15
- RUN pip install --no-cache-dir -r /code/requirements.txt
16
 
17
- # Hugging Face cache inside container (clean + writable)
 
 
 
18
  ENV HF_HOME=/code/hf_cache
19
  RUN mkdir -p /code/hf_cache && chmod -R 777 /code/hf_cache
20
 
21
- # Pre-download ONNX model + processor at build time
22
  RUN python - <<'PY'
23
- from transformers import WhisperProcessor
24
- from optimum.onnxruntime import ORTModelForSeq2SeqLM
25
-
26
- print("➡️ Pre-downloading ONNX models...")
27
- WhisperProcessor.from_pretrained("distil-whisper/distil-large-v3")
28
- ORTModelForSeq2SeqLM.from_pretrained("distil-whisper/distil-large-v3.5-ONNX")
29
- print("✅ Pre-download complete.")
30
  PY
31
 
32
- # App
33
  COPY ./app.py /code/app.py
34
 
35
  EXPOSE 7860
36
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
2
 
3
  WORKDIR /code
4
 
5
+ # Copy requirements (make sure it includes soundfile)
 
 
 
 
 
 
6
  COPY ./requirements.txt /code/requirements.txt
7
 
8
+ # Install system deps for ffmpeg + audio libs
9
+ RUN apt-get update && apt-get install -y ffmpeg libsndfile1 && rm -rf /var/lib/apt/lists/*
10
 
11
+ # Install Python deps
12
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
13
+
14
+ # Use HF_HOME to control transformers cache location
15
  ENV HF_HOME=/code/hf_cache
16
  RUN mkdir -p /code/hf_cache && chmod -R 777 /code/hf_cache
17
 
18
+ # Pre-download ASR model at build time (so runtime doesn't download/cache)
19
  RUN python - <<'PY'
20
+ from transformers import pipeline
21
+ pipeline('automatic-speech-recognition', model='distil-whisper/distil-large-v3')
 
 
 
 
 
22
  PY
23
 
 
24
  COPY ./app.py /code/app.py
25
 
26
  EXPOSE 7860
27
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]