| |
| FROM ghcr.io/ggml-org/llama.cpp:server |
|
|
| LABEL maintainer="your-name <your-email@example.com>" |
| LABEL description="CoPaw with llama.cpp support (launch directly)" |
|
|
| |
| ENV PYTHONUNBUFFERED=1 \ |
| PORT=7860 \ |
| HOST=0.0.0.0 \ |
| COPAW_WORKING_DIR=/app/working \ |
| COPAW_SECRETS_DIR=/app/working.secret \ |
| COPAW_ACCEPT_SECURITY_NOTICE=yes \ |
| |
| MODEL_PATH=/models/Qwen3.5-4B-Q4_K_M.gguf |
|
|
| |
| RUN apt-get update && apt-get install -y --no-install-recommends \ |
| python3 \ |
| python3-pip \ |
| python3-venv \ |
| build-essential \ |
| cmake \ |
| curl \ |
| jq \ |
| && rm -rf /var/lib/apt/lists/* |
|
|
| |
| RUN ln -s /usr/bin/python3 /usr/bin/python |
|
|
| |
| RUN pip3 install --no-cache-dir --upgrade pip && \ |
| pip3 install --no-cache-dir 'copaw[llamacpp]' uvicorn fastapi |
|
|
| |
| RUN mkdir -p ${COPAW_WORKING_DIR} ${COPAW_SECRETS_DIR} /models |
|
|
| |
| RUN echo "yes" | copaw init --defaults |
|
|
| |
| |
| RUN CONFIG_PATH=${COPAW_WORKING_DIR}/config.json && \ |
| jq '.models.local_llama = {"provider":"llamacpp","model_path":"'${MODEL_PATH}'","model_name":"qwen3.5-4b"}' ${CONFIG_PATH} > ${CONFIG_PATH}.tmp && \ |
| mv ${CONFIG_PATH}.tmp ${CONFIG_PATH} && \ |
| jq '.model.default = "local_llama"' ${CONFIG_PATH} > ${CONFIG_PATH}.tmp && \ |
| mv ${CONFIG_PATH}.tmp ${CONFIG_PATH} |
|
|
| |
| EXPOSE 7860 |
|
|
| |
| WORKDIR ${COPAW_WORKING_DIR} |
|
|
| |
| CMD ["copaw", "app", "--host", "0.0.0.0", "--port", "7860"] |