Spaces:
Running
Running
GitHub Actions commited on
Commit ·
6a80b9a
1
Parent(s): 5f27e25
Auto-deploy from GitHub Actions: ee976ea535d5ab7fe544c4e25ac0c9710675fb06
Browse files- Dockerfile +11 -1
- env.example +1 -1
Dockerfile
CHANGED
|
@@ -1,15 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
FROM ghcr.io/sergey21000/chatbot-rag:main-cpu
|
| 2 |
|
| 3 |
RUN useradd -m -u 1000 user \
|
| 4 |
&& chown -R user:user /app
|
| 5 |
-
|
| 6 |
USER user
|
| 7 |
ENV HOME=/home/user \
|
| 8 |
PATH=/home/user/.local/bin:$PATH \
|
| 9 |
RUN_IN_DOCHER=0
|
| 10 |
|
| 11 |
WORKDIR /app
|
|
|
|
|
|
|
| 12 |
COPY --chown=user config.py .
|
| 13 |
COPY --chown=user env.example .env
|
| 14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
CMD ["python3", "app.py"]
|
|
|
|
| 1 |
+
# ---------- stage 1 ----------
|
| 2 |
+
FROM ghcr.io/ggml-org/llama.cpp:server AS llama
|
| 3 |
+
|
| 4 |
+
# ---------- stage 2 ----------
|
| 5 |
FROM ghcr.io/sergey21000/chatbot-rag:main-cpu
|
| 6 |
|
| 7 |
RUN useradd -m -u 1000 user \
|
| 8 |
&& chown -R user:user /app
|
|
|
|
| 9 |
USER user
|
| 10 |
ENV HOME=/home/user \
|
| 11 |
PATH=/home/user/.local/bin:$PATH \
|
| 12 |
RUN_IN_DOCHER=0
|
| 13 |
|
| 14 |
WORKDIR /app
|
| 15 |
+
|
| 16 |
+
COPY --from=llama /app /opt/llama.cpp
|
| 17 |
COPY --chown=user config.py .
|
| 18 |
COPY --chown=user env.example .env
|
| 19 |
|
| 20 |
+
ENV LD_LIBRARY_PATH=/opt/llama.cpp:$LD_LIBRARY_PATH
|
| 21 |
+
ENV LLAMACPP_DIR=/opt/llama.cpp
|
| 22 |
+
ENV PATH=/opt/llama.cpp:$PATH
|
| 23 |
+
ENV RUN_IN_DOCKER=0
|
| 24 |
+
|
| 25 |
CMD ["python3", "app.py"]
|
env.example
CHANGED
|
@@ -100,7 +100,7 @@ LLAMA_ARG_HOST=0.0.0.0
|
|
| 100 |
EMBED_MODEL_REPO=Alibaba-NLP/gte-multilingual-base
|
| 101 |
|
| 102 |
# увеличение ожидания запуска сервера llama.cpp
|
| 103 |
-
LLAMACPP_SERVER_TIMEOUT_WAIT=
|
| 104 |
|
| 105 |
# прямая ссылка на релиз llama.cpp
|
| 106 |
# https://github.com/ggml-org/llama.cpp/releases
|
|
|
|
| 100 |
EMBED_MODEL_REPO=Alibaba-NLP/gte-multilingual-base
|
| 101 |
|
| 102 |
# увеличение ожидания запуска сервера llama.cpp
|
| 103 |
+
LLAMACPP_SERVER_TIMEOUT_WAIT=1500
|
| 104 |
|
| 105 |
# прямая ссылка на релиз llama.cpp
|
| 106 |
# https://github.com/ggml-org/llama.cpp/releases
|