FROM python:3.11-slim WORKDIR /app RUN useradd -m -u 1000 user # System deps RUN apt-get update && apt-get install -y --no-install-recommends \ gcc g++ && rm -rf /var/lib/apt/lists/* # Requirements (PyTorch CPU-only first — much smaller than CUDA build) COPY requirements.txt . RUN pip install --no-cache-dir torch --index-url https://download.pytorch.org/whl/cpu RUN pip install --no-cache-dir -r requirements.txt COPY . . RUN mkdir -p stats && chown -R user:user /app USER user EXPOSE 7860 # Cache the local model at build time (optional — comment out to skip) # RUN python -c "from transformers import AutoTokenizer, AutoModelForCausalLM; AutoTokenizer.from_pretrained('Qwen/Qwen2.5-0.5B-Instruct'); AutoModelForCausalLM.from_pretrained('Qwen/Qwen2.5-0.5B-Instruct')" CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]