Spaces:
Running
Running
Runtime fallback: ensure /models/model.gguf exists, then exec llama-server
Browse files- Dockerfile +2 -0
Dockerfile
CHANGED
|
@@ -4,3 +4,5 @@ ADD https://huggingface.co/bartowski/Qwen2.5-0.5B-Instruct-GGUF/resolve/main/Qwe
|
|
| 4 |
|
| 5 |
EXPOSE 7860
|
| 6 |
CMD ["-m","/models/model.gguf","-c","2048","-ngl","0","-t","4","--host","0.0.0.0","--port","7860"]
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
EXPOSE 7860
|
| 6 |
CMD ["-m","/models/model.gguf","-c","2048","-ngl","0","-t","4","--host","0.0.0.0","--port","7860"]
|
| 7 |
+
ENTRYPOINT ["/bin/sh","-lc"]
|
| 8 |
+
CMD ["test -f /models/model.gguf || (mkdir -p /models && curl -fL --retry 5 --retry-delay 2 -o /models/model.gguf https://huggingface.co/bartowski/Qwen2.5-0.5B-Instruct-GGUF/resolve/main/Qwen2.5-0.5B-Instruct-Q4_K_M.gguf); exec llama-server -m /models/model.gguf -c 2048 -ngl 0 -t 4 --host 0.0.0.0 --port 7860"]
|