| |
| |
| FROM nvidia/cuda:12.1.1-cudnn8-devel-ubuntu22.04 |
|
|
| |
| ENV HOST 0.0.0.0 |
|
|
| RUN apt-get update && apt-get upgrade -y \ |
| && apt-get install -y git build-essential \ |
| python3 python3-pip gcc wget \ |
| ocl-icd-opencl-dev opencl-headers clinfo \ |
| libclblast-dev libopenblas-dev \ |
| && mkdir -p /etc/OpenCL/vendors && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd |
|
|
|
|
| |
| RUN useradd -m -u 1000 gee |
| USER gee |
| ENV HOME=/home/gee \ |
| PATH=/home/gee/.local/bin:$PATH |
|
|
| ENV HF_HOME=$HOME/app/.cache/huggingface |
| WORKDIR $HOME/app |
|
|
|
|
| COPY --chown=gee . $HOME/app |
|
|
|
|
| |
| ENV CUDA_DOCKER_ARCH=all |
| ENV GGML_CUDA=1 |
|
|
| |
| RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings starlette-context huggingface_hub hf_xet |
|
|
| |
| RUN pip install llama-cpp-python --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu121 |
|
|
| |
| |
|
|
| |
| CMD ["python3", "-W","ignore","-m", "llama_cpp.server", "--hf_model_repo_id", "unsloth/Qwen3-30B-A3B-GGUF" ,"--model", "*Q4_0.gguf", "--host", "0.0.0.0", "--port", "8000"] |