Spaces:
Sleeping
Sleeping
Add Dockerfile and README config for Hugging Face Space
Browse files- Dockerfile +4 -8
- README.md +8 -7
Dockerfile
CHANGED
|
@@ -1,12 +1,8 @@
|
|
| 1 |
-
FROM
|
| 2 |
|
| 3 |
-
|
| 4 |
-
RUN apt-get update && apt-get install -y --no-install-recommends git build-essential cmake python3 python3-pip curl ca-certificates pkg-config libcurl4-openssl-dev && rm -rf /var/lib/apt/lists/*
|
| 5 |
|
| 6 |
-
|
| 7 |
-
RUN git clone --depth 1 https://github.com/ggerganov/llama.cpp.git && cd llama.cpp && mkdir build && cd build && cmake -DGGML_NATIVE=ON -DLLAMA_BUILD_SERVER=ON .. && cmake --build . -j
|
| 8 |
-
|
| 9 |
-
RUN mkdir -p /models && curl -L -o /models/model.gguf https://huggingface.co/bartowski/Qwen2.5-0.5B-Instruct-GGUF/resolve/main/Qwen2.5-0.5B-Instruct-Q4_K_M.gguf
|
| 10 |
|
| 11 |
EXPOSE 7860
|
| 12 |
-
CMD ["
|
|
|
|
| 1 |
+
FROM ghcr.io/ggerganov/llama.cpp:server
|
| 2 |
|
| 3 |
+
ENV MODEL_URL="https://huggingface.co/bartowski/Qwen2.5-0.5B-Instruct-GGUF/resolve/main/Qwen2.5-0.5B-Instruct-Q4_K_M.gguf"
|
|
|
|
| 4 |
|
| 5 |
+
RUN mkdir -p /models && curl -L -o /models/model.gguf %MODEL_URL%
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
EXPOSE 7860
|
| 8 |
+
CMD ["llama-server", "-m", "/models/model.gguf", "-c", "2048", "-ngl", "0", "-t", "4", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
|
@@ -1,7 +1,8 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: Llama.cpp Tiny
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Llama.cpp Tiny
|
| 3 |
+
colorFrom: gray
|
| 4 |
+
colorTo: blue
|
| 5 |
+
sdk: docker
|
| 6 |
+
app_port: 7860
|
| 7 |
+
pinned: false
|
| 8 |
+
---
|