as
Browse files- app.py +9 -6
- requirements.txt +2 -9
app.py
CHANGED
|
@@ -6,12 +6,12 @@ import threading
|
|
| 6 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 7 |
import torch
|
| 8 |
|
| 9 |
-
# Cargar modelo
|
| 10 |
-
MODEL_NAME = "microsoft/DialoGPT-small"
|
| 11 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 12 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
| 13 |
|
| 14 |
-
#
|
| 15 |
app = FastAPI()
|
| 16 |
|
| 17 |
class Message(BaseModel):
|
|
@@ -19,21 +19,24 @@ class Message(BaseModel):
|
|
| 19 |
|
| 20 |
@app.post("/chat")
|
| 21 |
def chat(msg: Message):
|
|
|
|
| 22 |
input_text = msg.text
|
| 23 |
inputs = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors="pt")
|
| 24 |
response_ids = model.generate(inputs, max_length=100, pad_token_id=tokenizer.eos_token_id)
|
| 25 |
response_text = tokenizer.decode(response_ids[:, inputs.shape[-1]:][0], skip_special_tokens=True)
|
| 26 |
return {"response": response_text}
|
| 27 |
|
| 28 |
-
#
|
| 29 |
def run_api():
|
| 30 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
| 31 |
|
| 32 |
threading.Thread(target=run_api, daemon=True).start()
|
| 33 |
|
| 34 |
-
# Streamlit
|
| 35 |
st.title("Mi Amigo Virtual 🤖")
|
| 36 |
-
|
|
|
|
|
|
|
| 37 |
if user_input:
|
| 38 |
response = chat(Message(text=user_input))
|
| 39 |
st.write("🤖:", response["response"])
|
|
|
|
| 6 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 7 |
import torch
|
| 8 |
|
| 9 |
+
# ======== Cargar el modelo DialoGPT =========
|
| 10 |
+
MODEL_NAME = "microsoft/DialoGPT-small"
|
| 11 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 12 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
| 13 |
|
| 14 |
+
# ======== Definir API con FastAPI =========
|
| 15 |
app = FastAPI()
|
| 16 |
|
| 17 |
class Message(BaseModel):
|
|
|
|
| 19 |
|
| 20 |
@app.post("/chat")
|
| 21 |
def chat(msg: Message):
|
| 22 |
+
"""Genera respuesta basada en el input del usuario."""
|
| 23 |
input_text = msg.text
|
| 24 |
inputs = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors="pt")
|
| 25 |
response_ids = model.generate(inputs, max_length=100, pad_token_id=tokenizer.eos_token_id)
|
| 26 |
response_text = tokenizer.decode(response_ids[:, inputs.shape[-1]:][0], skip_special_tokens=True)
|
| 27 |
return {"response": response_text}
|
| 28 |
|
| 29 |
+
# ======== Función para ejecutar FastAPI en segundo plano =========
|
| 30 |
def run_api():
|
| 31 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
| 32 |
|
| 33 |
threading.Thread(target=run_api, daemon=True).start()
|
| 34 |
|
| 35 |
+
# ======== Interfaz con Streamlit =========
|
| 36 |
st.title("Mi Amigo Virtual 🤖")
|
| 37 |
+
st.write("Escríbeme algo y te responderé!")
|
| 38 |
+
|
| 39 |
+
user_input = st.text_input("Tú:")
|
| 40 |
if user_input:
|
| 41 |
response = chat(Message(text=user_input))
|
| 42 |
st.write("🤖:", response["response"])
|
requirements.txt
CHANGED
|
@@ -1,12 +1,5 @@
|
|
| 1 |
-
#transformers
|
| 2 |
-
#torch
|
| 3 |
-
#streamlit
|
| 4 |
-
#ffmpeg
|
| 5 |
-
#fastapi
|
| 6 |
-
#uvicorn
|
| 7 |
fastapi
|
| 8 |
uvicorn
|
| 9 |
-
streamlit
|
| 10 |
-
requests
|
| 11 |
transformers
|
| 12 |
-
torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
fastapi
|
| 2 |
uvicorn
|
|
|
|
|
|
|
| 3 |
transformers
|
| 4 |
+
torch
|
| 5 |
+
streamlit
|