DarkMindForever commited on
Commit
976bc26
·
verified ·
1 Parent(s): c62f68c

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +18 -34
Dockerfile CHANGED
@@ -1,36 +1,20 @@
1
- FROM python:3.10-slim
2
-
3
  USER root
4
- WORKDIR /app
5
-
6
- # 1. Install system basics
7
- RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
8
-
9
- # 2. Install AI Stack
10
- # We use --extra-index-url so pip can find both Torch (CPU) and standard packages (FastAPI)
11
- RUN pip3 install --no-cache-dir --upgrade pip && \
12
- pip3 install --no-cache-dir \
13
- uvicorn \
14
- fastapi \
15
- transformers \
16
- accelerate \
17
- safetensors \
18
- torch --extra-index-url https://download.pytorch.org/whl/cpu
19
-
20
- # 3. Create model directory and download GPT-2 XL Safetensors
21
- RUN mkdir -p /models/gpt2-xl && \
22
- curl -L https://huggingface.co/openai-community/gpt2-xl/resolve/main/model.safetensors -o /models/gpt2-xl/model.safetensors && \
23
- curl -L https://huggingface.co/openai-community/gpt2-xl/resolve/main/config.json -o /models/gpt2-xl/config.json && \
24
- curl -L https://huggingface.co/openai-community/gpt2-xl/resolve/main/vocab.json -o /models/gpt2-xl/vocab.json && \
25
- curl -L https://huggingface.co/openai-community/gpt2-xl/resolve/main/merges.txt -o /models/gpt2-xl/merges.txt && \
26
- curl -L https://huggingface.co/openai-community/gpt2-xl/resolve/main/tokenizer.json -o /models/gpt2-xl/tokenizer.json && \
27
- curl -L https://huggingface.co/openai-community/gpt2-xl/resolve/main/generation_config.json -o /models/gpt2-xl/generation_config.json
28
-
29
- # 4. Copy your server code
30
- COPY app.py .
31
- RUN chown -R 1000:1000 /app /models
32
  USER 1000
33
-
34
- EXPOSE 8080
35
-
36
- CMD ["python3", "app.py"]
 
 
 
 
 
 
 
 
 
 
1
+ FROM ghcr.io/ggml-org/llama.cpp:server
 
2
  USER root
3
+ RUN apt-get update && apt-get install -y curl
4
+ RUN mkdir -p /models && \
5
+ curl -L https://huggingface.co/unsloth/gemma-3n-E2B-it-GGUF/resolve/main/gemma-3n-E2B-it-Q8_0.gguf -o /models/model.gguf && \
6
+ chown -R 1000:1000 /models
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  USER 1000
8
+ ENV LLAMA_ARG_MODEL=/models/model.gguf
9
+ ENV LLAMA_ARG_HOST=0.0.0.0
10
+ ENV LLAMA_ARG_PORT=7860
11
+ ENV LLAMA_ARG_THREADS=8
12
+ ENV LLAMA_ARG_BATCH_SIZE=2048
13
+ ENV LLAMA_ARG_UBATCH_SIZE=512
14
+ ENV LLAMA_ARG_CTX_SIZE=8196
15
+ ENV LLAMA_ARG_FLASH_ATTN=true
16
+ ENV LLAMA_ARG_NO_MMAP=false
17
+ ENV LLAMA_ARG_MLOCK=true
18
+ HEALTHCHECK --interval=30s --timeout=15s --start-period=10s --retries=3 \
19
+ CMD curl -f http://localhost:7860/health || exit 1
20
+ ENTRYPOINT ["/app/llama-server"]