copaw / Dockerfile
lxui's picture
Update Dockerfile
70ab5e6 verified
# 基础镜像:官方 llama.cpp server 镜像(已包含预编译的 llama 二进制)
FROM ghcr.io/ggml-org/llama.cpp:server
LABEL maintainer="your-name <your-email@example.com>"
LABEL description="CoPaw with llama.cpp support (launch directly)"
# 设置环境变量(可沿用你原有的配置)
ENV PYTHONUNBUFFERED=1 \
PORT=7860 \
HOST=0.0.0.0 \
COPAW_WORKING_DIR=/app/working \
COPAW_SECRETS_DIR=/app/working.secret \
COPAW_ACCEPT_SECURITY_NOTICE=yes \
# 模型路径(CoPaw 加载模型时使用)
MODEL_PATH=/models/Qwen3.5-4B-Q4_K_M.gguf
# 安装 Python 及编译工具(用于编译 llama-cpp-python)
RUN apt-get update && apt-get install -y --no-install-recommends \
python3 \
python3-pip \
python3-venv \
build-essential \
cmake \
curl \
jq \
&& rm -rf /var/lib/apt/lists/*
# 创建 python 软链接(方便使用 python 命令)
RUN ln -s /usr/bin/python3 /usr/bin/python
# 升级 pip 并安装 CoPaw(包含 llama.cpp 后端支持)
RUN pip3 install --no-cache-dir --upgrade pip && \
pip3 install --no-cache-dir 'copaw[llamacpp]' uvicorn fastapi
# 创建必要目录
RUN mkdir -p ${COPAW_WORKING_DIR} ${COPAW_SECRETS_DIR} /models
# 非交互式初始化 CoPaw,生成基础配置
RUN echo "yes" | copaw init --defaults
# 使用 jq 修改配置,添加本地 llama 模型(注意:provider 需根据 copaw 实际版本调整)
# 常见取值可能是 "llamacpp" 或 "llama",此处以 "llamacpp" 为例
RUN CONFIG_PATH=${COPAW_WORKING_DIR}/config.json && \
jq '.models.local_llama = {"provider":"llamacpp","model_path":"'${MODEL_PATH}'","model_name":"qwen3.5-4b"}' ${CONFIG_PATH} > ${CONFIG_PATH}.tmp && \
mv ${CONFIG_PATH}.tmp ${CONFIG_PATH} && \
jq '.model.default = "local_llama"' ${CONFIG_PATH} > ${CONFIG_PATH}.tmp && \
mv ${CONFIG_PATH}.tmp ${CONFIG_PATH}
# 暴露 CoPaw 端口
EXPOSE 7860
# 设置工作目录
WORKDIR ${COPAW_WORKING_DIR}
# 直接使用你原有的 CoPaw 启动命令(无需额外脚本)
CMD ["copaw", "app", "--host", "0.0.0.0", "--port", "7860"]