Update space
Browse files- README.md +6 -1
- dockerfile +29 -0
- requirements.txt +1 -1
- start.sh +11 -0
README.md
CHANGED
|
@@ -1,4 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
|
|
|
| 2 |
title: RAG Test
|
| 3 |
emoji: 💬
|
| 4 |
colorFrom: yellow
|
|
@@ -7,6 +11,7 @@ sdk: gradio
|
|
| 7 |
sdk_version: 5.0.1
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
-
An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
|
|
|
|
| 1 |
+
sdk: docker
|
| 2 |
+
app_port: 7860
|
| 3 |
+
|
| 4 |
---
|
| 5 |
+
|
| 6 |
title: RAG Test
|
| 7 |
emoji: 💬
|
| 8 |
colorFrom: yellow
|
|
|
|
| 11 |
sdk_version: 5.0.1
|
| 12 |
app_file: app.py
|
| 13 |
pinned: false
|
| 14 |
+
|
| 15 |
---
|
| 16 |
|
| 17 |
+
An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
|
dockerfile
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 1) Base Python leve
|
| 2 |
+
FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
# 2) Instala curl (para baixar Ollama) e dependências básicas
|
| 5 |
+
RUN apt-get update && apt-get install -y curl && \
|
| 6 |
+
# 3) Instala Ollama via script oficial
|
| 7 |
+
curl -fsSL https://ollama.com/install.sh | sh && \
|
| 8 |
+
apt-get clean && rm -rf /var/lib/apt/lists/*
|
| 9 |
+
|
| 10 |
+
# 4) Cria usuário não-root recomendado
|
| 11 |
+
RUN useradd -m -u 1000 user
|
| 12 |
+
USER user
|
| 13 |
+
ENV HOME=/home/user \
|
| 14 |
+
PATH="/home/user/.local/bin:$PATH"
|
| 15 |
+
|
| 16 |
+
WORKDIR $HOME/app
|
| 17 |
+
|
| 18 |
+
# 5) Copia app e requisitos
|
| 19 |
+
COPY --chown=user:user app.py requirements.txt ./
|
| 20 |
+
|
| 21 |
+
# 6) Instala libs Python
|
| 22 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 23 |
+
|
| 24 |
+
# 7) Torna start.sh executável
|
| 25 |
+
COPY --chown=user:user start.sh ./
|
| 26 |
+
RUN chmod +x start.sh
|
| 27 |
+
|
| 28 |
+
# 8) Comando padrão: inicia Ollama e depois app
|
| 29 |
+
CMD ["./start.sh"]
|
requirements.txt
CHANGED
|
@@ -2,4 +2,4 @@ huggingface_hub==0.25.2
|
|
| 2 |
ollama
|
| 3 |
faiss-cpu
|
| 4 |
gradio
|
| 5 |
-
numpy
|
|
|
|
| 2 |
ollama
|
| 3 |
faiss-cpu
|
| 4 |
gradio
|
| 5 |
+
numpy
|
start.sh
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# Inicia o servidor Ollama em background (porta 11434 por padrão)
|
| 3 |
+
ollama serve &
|
| 4 |
+
|
| 5 |
+
# Garante que o modelo nomic-embed-text esteja presente
|
| 6 |
+
if ! ollama list | grep -q "nomic-embed-text"; then
|
| 7 |
+
ollama pull nomic-embed-text
|
| 8 |
+
fi
|
| 9 |
+
|
| 10 |
+
# Por fim, inicia seu app Gradio/Streamlit
|
| 11 |
+
python app.py
|