James040's picture
Update Dockerfile
f8e4b78 verified
raw
history blame contribute delete
827 Bytes
FROM ghcr.io/ggml-org/llama.cpp:full
WORKDIR /app
# Install Python and essential tools
RUN apt-get update && apt-get install -y python3 python3-pip python3-venv curl
# Set up a Virtual Environment and force install modern Gradio
RUN python3 -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
RUN pip install --no-cache-dir -U pip
RUN pip install --no-cache-dir "gradio>=5.0" requests huggingface_hub
# Download the model (SmolLM2-1.7B is perfect for fast CPU generation)
RUN python3 -c 'from huggingface_hub import hf_hub_download; hf_hub_download(repo_id="bartowski/SmolLM2-1.7B-Instruct-GGUF", filename="SmolLM2-1.7B-Instruct-Q4_K_M.gguf", local_dir="/app")'
# Copy your application files
COPY app.py /app/app.py
COPY start.sh /app/start.sh
RUN chmod +x /app/start.sh
# Run the startup script
ENTRYPOINT ["/app/start.sh"]