botty / Dockerfile
diamond-in's picture
Update Dockerfile
83b1783 verified
FROM mambaorg/micromamba:1.5.3
# 1. Root setup for permissions (Standard HF Space requirement)
USER root
WORKDIR /app
RUN chown -R mambauser:mambauser /app
# 2. Switch to the non-root user
USER mambauser
# 3. Initialize the shell
RUN micromamba shell init --shell=bash --root-prefix=~/micromamba
# 4. INSTALL PACKAGES VIA MICROMAMBA (FAST)
# - llama-cpp-python: Pre-compiled binary (no build time)
# - gcc/make: Installed so your AI can "install anything" later
# - python=3.10: Stable version
RUN micromamba install -y -n base \
python=3.10 \
pip \
llama-cpp-python \
gcc_linux-64 \
gxx_linux-64 \
make \
cmake \
-c conda-forge && \
micromamba clean --all --yes
# 5. Copy requirements
COPY --chown=mambauser:mambauser requirements.txt /app/requirements.txt
# 6. Install Pip packages (Gradio 5+)
ARG MAMBA_DOCKERFILE_ACTIVATE=1
RUN pip install --no-cache-dir -r requirements.txt
# 7. Download the Model (Qwen 2.5 1.5B - Fast CPU)
RUN python -c "from huggingface_hub import hf_hub_download; hf_hub_download(repo_id='Qwen/Qwen2.5-1.5B-Instruct-GGUF', filename='qwen2.5-1.5b-instruct-q4_k_m.gguf', local_dir='./models')"
# 8. Copy the application code
COPY --chown=mambauser:mambauser . /app
# 9. Expose the single allowed port
EXPOSE 7860
# 10. Start the app
CMD ["python", "main.py"]