Spaces:
Paused
Paused
| FROM nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu22.04 | |
| ENV VLLM_LOGGING_LEVEL=DEBUG | |
| ENV HF_HOME=/tmp/.cache/huggingface | |
| ENV OMP_NUM_THREADS=1 | |
| RUN useradd -m appuser | |
| USER appuser | |
| RUN mkdir -p /tmp/.cache/huggingface | |
| RUN apt-get update && apt-get install -y python3 python3-pip git | |
| RUN pip3 install --upgrade pip | |
| # Install vLLM | |
| RUN pip3 install vllm==0.10.0 --extra-index-url https://download.pytorch.org/whl/cu113 | |
| # Download at build time, | |
| # to ensure during restart we won't have to wait for the download from HF (only wait for docker pull). | |
| # In Docker Spaces, the secrets management is different for security reasons. | |
| # Once you create a secret in the Settings tab, | |
| # you can expose the secret by adding the following line in your Dockerfile: | |
| # | |
| # For example, if SECRET_EXAMPLE is the name of the secret you created in the Settings tab, | |
| # you can read it at build time by mounting it to a file, then reading it with $(cat /run/secrets/SECRET_EXAMPLE). | |
| # https://huggingface.co/docs/hub/en/spaces-sdks-docker#buildtime | |
| # | |
| # AFTER TRIAL AND ERROR WE GOT 16GB (16431849854 bytes) OF LAYERS :( | |
| # | |
| # RUN --mount=type=secret,id=HF_TOKEN,mode=0444,required=true HF_TOKEN=$(cat /run/secrets/HF_TOKEN) python /app/download_model.py | |
| EXPOSE 7860 | |
| CMD python3 -m vllm.entrypoints.openai.api_server \ | |
| --model "meta-llama/Llama-3.2-3B-Instruct" \ | |
| --task generate \ | |
| --revision "0cb88a4f764b7a12671c53f0838cd831a0843b95" \ | |
| --code-revision "0cb88a4f764b7a12671c53f0838cd831a0843b95" \ | |
| --tokenizer-revision "0cb88a4f764b7a12671c53f0838cd831a0843b95" \ | |
| --seed 42 \ | |
| --host 0.0.0.0 \ | |
| --port 7860 \ | |
| --max-num-batched-tokens 32768 \ | |
| --max-model-len 32768 \ | |
| --dtype float16 \ | |
| --enforce-eager \ | |
| --gpu-memory-utilization 0.9 \ | |
| --enable-prefix-caching \ | |
| --disable-log-requests \ | |
| --trust-remote-code | |