- Dockerfile +7 -13
- app.py +5 -7
- entrypoint.sh +0 -13
Dockerfile
CHANGED
|
@@ -1,24 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
FROM python:3.9
|
| 2 |
|
| 3 |
-
# Crear un usuario no root
|
| 4 |
RUN useradd -m -u 1000 user
|
| 5 |
USER user
|
| 6 |
ENV PATH="/home/user/.local/bin:$PATH"
|
| 7 |
|
| 8 |
WORKDIR /app
|
| 9 |
|
| 10 |
-
|
| 11 |
-
COPY ./requirements.txt requirements.txt
|
| 12 |
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 13 |
|
| 14 |
-
|
| 15 |
-
COPY . /app
|
| 16 |
-
|
| 17 |
-
# Copiar el archivo entrypoint.sh al directorio correcto
|
| 18 |
-
COPY entrypoint.sh /app/entrypoint.sh
|
| 19 |
-
|
| 20 |
-
# Asegurarnos de que entrypoint.sh tenga permisos de ejecuci贸n
|
| 21 |
-
RUN chmod +x /app/entrypoint.sh
|
| 22 |
|
| 23 |
-
#
|
| 24 |
-
|
|
|
|
| 1 |
+
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
| 2 |
+
# you will also find guides on how best to write your Dockerfile
|
| 3 |
+
|
| 4 |
FROM python:3.9
|
| 5 |
|
|
|
|
| 6 |
RUN useradd -m -u 1000 user
|
| 7 |
USER user
|
| 8 |
ENV PATH="/home/user/.local/bin:$PATH"
|
| 9 |
|
| 10 |
WORKDIR /app
|
| 11 |
|
| 12 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
|
|
|
| 13 |
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 14 |
|
| 15 |
+
COPY --chown=user . /app
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
+
# Usa la variable de entorno PORT para ejecutar el servidor
|
| 18 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "218728"]
|
app.py
CHANGED
|
@@ -6,7 +6,6 @@ import uvicorn
|
|
| 6 |
import threading
|
| 7 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 8 |
import torch
|
| 9 |
-
import requests
|
| 10 |
|
| 11 |
# ======== Cargar el modelo DialoGPT =========
|
| 12 |
MODEL_NAME = "microsoft/DialoGPT-small"
|
|
@@ -30,17 +29,16 @@ def chat(msg: Message):
|
|
| 30 |
|
| 31 |
# ======== Funci贸n para ejecutar FastAPI en segundo plano =========
|
| 32 |
def run_api():
|
| 33 |
-
port =
|
| 34 |
uvicorn.run(app, host="0.0.0.0", port=port)
|
| 35 |
|
|
|
|
|
|
|
| 36 |
# ======== Interfaz con Streamlit =========
|
| 37 |
st.title("Mi Amigo Virtual 馃")
|
| 38 |
st.write("Escr铆beme algo y te responder茅!")
|
| 39 |
|
| 40 |
user_input = st.text_input("T煤:")
|
| 41 |
if user_input:
|
| 42 |
-
response =
|
| 43 |
-
|
| 44 |
-
st.write("馃:", response.json()["response"])
|
| 45 |
-
else:
|
| 46 |
-
st.write("鈿狅笍 Error al obtener respuesta del chatbot.")
|
|
|
|
| 6 |
import threading
|
| 7 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 8 |
import torch
|
|
|
|
| 9 |
|
| 10 |
# ======== Cargar el modelo DialoGPT =========
|
| 11 |
MODEL_NAME = "microsoft/DialoGPT-small"
|
|
|
|
| 29 |
|
| 30 |
# ======== Funci贸n para ejecutar FastAPI en segundo plano =========
|
| 31 |
def run_api():
|
| 32 |
+
port = 218728
|
| 33 |
uvicorn.run(app, host="0.0.0.0", port=port)
|
| 34 |
|
| 35 |
+
threading.Thread(target=run_api, daemon=True).start()
|
| 36 |
+
|
| 37 |
# ======== Interfaz con Streamlit =========
|
| 38 |
st.title("Mi Amigo Virtual 馃")
|
| 39 |
st.write("Escr铆beme algo y te responder茅!")
|
| 40 |
|
| 41 |
user_input = st.text_input("T煤:")
|
| 42 |
if user_input:
|
| 43 |
+
response = chat(Message(text=user_input))
|
| 44 |
+
st.write("馃:", response["response"])
|
|
|
|
|
|
|
|
|
entrypoint.sh
DELETED
|
@@ -1,13 +0,0 @@
|
|
| 1 |
-
#!/bin/bash
|
| 2 |
-
|
| 3 |
-
# Asignar un puerto aleatorio entre 10000 y 65000
|
| 4 |
-
PORT=$(shuf -i 10000-65000 -n 1)
|
| 5 |
-
|
| 6 |
-
# Exportar la variable de entorno PORT para que Streamlit y FastAPI lo usen
|
| 7 |
-
export PORT
|
| 8 |
-
|
| 9 |
-
# Ejecutar FastAPI en segundo plano en el puerto aleatorio
|
| 10 |
-
uvicorn app:app --host 0.0.0.0 --port $PORT &
|
| 11 |
-
|
| 12 |
-
# Ejecutar Streamlit, configurando el puerto a trav茅s de la variable de entorno
|
| 13 |
-
streamlit run app.py --server.port $PORT
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|