- app.py +21 -21
- requirements.txt +3 -1
app.py
CHANGED
|
@@ -1,27 +1,17 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
-
import requests
|
| 3 |
from fastapi import FastAPI
|
| 4 |
from pydantic import BaseModel
|
| 5 |
import uvicorn
|
| 6 |
import threading
|
|
|
|
|
|
|
| 7 |
|
| 8 |
-
#
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
user_input = st.text_input("Tu mensaje:")
|
| 13 |
-
if st.button("Enviar"):
|
| 14 |
-
if user_input:
|
| 15 |
-
response = requests.post(
|
| 16 |
-
"http://localhost:7860/chat",
|
| 17 |
-
json={"text": user_input}
|
| 18 |
-
)
|
| 19 |
-
if response.status_code == 200:
|
| 20 |
-
st.write("Respuesta:", response.json()["response"])
|
| 21 |
-
else:
|
| 22 |
-
st.write("Error en la respuesta del servidor.")
|
| 23 |
|
| 24 |
-
#
|
| 25 |
app = FastAPI()
|
| 26 |
|
| 27 |
class Message(BaseModel):
|
|
@@ -29,11 +19,21 @@ class Message(BaseModel):
|
|
| 29 |
|
| 30 |
@app.post("/chat")
|
| 31 |
def chat(msg: Message):
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
-
#
|
| 35 |
def run_api():
|
| 36 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
| 37 |
|
| 38 |
-
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
|
|
|
| 2 |
from fastapi import FastAPI
|
| 3 |
from pydantic import BaseModel
|
| 4 |
import uvicorn
|
| 5 |
import threading
|
| 6 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 7 |
+
import torch
|
| 8 |
|
| 9 |
+
# Cargar modelo y tokenizer de Hugging Face
|
| 10 |
+
MODEL_NAME = "microsoft/DialoGPT-small" # También puedes probar 'medium' o 'large'
|
| 11 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 12 |
+
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
# FastAPI para la API
|
| 15 |
app = FastAPI()
|
| 16 |
|
| 17 |
class Message(BaseModel):
|
|
|
|
| 19 |
|
| 20 |
@app.post("/chat")
|
| 21 |
def chat(msg: Message):
|
| 22 |
+
input_text = msg.text
|
| 23 |
+
inputs = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors="pt")
|
| 24 |
+
response_ids = model.generate(inputs, max_length=100, pad_token_id=tokenizer.eos_token_id)
|
| 25 |
+
response_text = tokenizer.decode(response_ids[:, inputs.shape[-1]:][0], skip_special_tokens=True)
|
| 26 |
+
return {"response": response_text}
|
| 27 |
|
| 28 |
+
# Ejecutar FastAPI en segundo plano
|
| 29 |
def run_api():
|
| 30 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
| 31 |
|
| 32 |
+
threading.Thread(target=run_api, daemon=True).start()
|
| 33 |
+
|
| 34 |
+
# Streamlit UI
|
| 35 |
+
st.title("Mi Amigo Virtual 🤖")
|
| 36 |
+
user_input = st.text_input("Escribe un mensaje:")
|
| 37 |
+
if user_input:
|
| 38 |
+
response = chat(Message(text=user_input))
|
| 39 |
+
st.write("🤖:", response["response"])
|
requirements.txt
CHANGED
|
@@ -7,4 +7,6 @@
|
|
| 7 |
fastapi
|
| 8 |
uvicorn
|
| 9 |
streamlit
|
| 10 |
-
requests
|
|
|
|
|
|
|
|
|
| 7 |
fastapi
|
| 8 |
uvicorn
|
| 9 |
streamlit
|
| 10 |
+
requests
|
| 11 |
+
transformers
|
| 12 |
+
torch
|