Osama-Ahmed-27 commited on
Commit
faaee74
·
verified ·
1 Parent(s): a97143b

Create Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +40 -0
Dockerfile ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Lightweight Python image
2
+ FROM python:3.10-slim
3
+
4
+ # System deps: ffmpeg (for Whisper), git + g++ (some wheels may need it)
5
+ RUN apt-get update && apt-get install -y --no-install-recommends \
6
+ ffmpeg git build-essential \
7
+ && rm -rf /var/lib/apt/lists/*
8
+
9
+ # For faster HF model caching inside the container
10
+ ENV TRANSFORMERS_CACHE=/root/.cache/huggingface/transformers
11
+ ENV HF_HOME=/root/.cache/huggingface
12
+ ENV PYTHONDONTWRITEBYTECODE=1
13
+ ENV PYTHONUNBUFFERED=1
14
+
15
+ # Optional: choose Whisper size via env (tiny/base/small/medium)
16
+ # You can override this in Space settings too.
17
+ ENV WHISPER_MODEL=base
18
+
19
+ # Copy files
20
+ WORKDIR /app
21
+ COPY requirements.txt .
22
+ RUN pip install --no-cache-dir -r requirements.txt
23
+
24
+ COPY app.py .
25
+ COPY README.md .
26
+
27
+ # Hugging Face Spaces expects the app to listen on port 7860
28
+ ENV PORT=7860
29
+
30
+ # (Optional) Pre-download models on build to reduce cold-start
31
+ # Comment out if build time becomes too long
32
+ # RUN python -c "import nltk; \
33
+ # import whisper; \
34
+ # from nltk.sentiment.vader import SentimentIntensityAnalyzer as _; \
35
+ # import nltk; \
36
+ # nltk.download('vader_lexicon'); \
37
+ # whisper.load_model('${WHISPER_MODEL}')"
38
+
39
+ # Start FastAPI with uvicorn
40
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]