|
|
import os |
|
|
import streamlit as st |
|
|
from fastapi import FastAPI |
|
|
from pydantic import BaseModel |
|
|
import uvicorn |
|
|
import threading |
|
|
from transformers import AutoTokenizer |
|
|
import torch |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium") |
|
|
|
|
|
|
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
class Message(BaseModel): |
|
|
text: str |
|
|
|
|
|
@app.post("/chat") |
|
|
def chat(msg: Message): |
|
|
"""Genera respuesta basada en el input del usuario.""" |
|
|
input_text = msg.text |
|
|
print(f"Mensaje recibido: {input_text}") |
|
|
|
|
|
|
|
|
|
|
|
inputs = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors="pt") |
|
|
|
|
|
|
|
|
response_ids = model.generate(inputs, |
|
|
max_length=100, |
|
|
pad_token_id=tokenizer.eos_token_id, |
|
|
no_repeat_ngram_size=2, |
|
|
top_p=0.95, |
|
|
top_k=60) |
|
|
|
|
|
|
|
|
response_text = tokenizer.decode(response_ids[:, inputs.shape[-1]:][0], skip_special_tokens=True) |
|
|
|
|
|
print(f"Respuesta generada: {response_text}") |
|
|
|
|
|
return {"response": response_text} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_api(): |
|
|
port = int(os.getenv("PORT", 7860)) |
|
|
uvicorn.run(app, host="0.0.0.0", port=port) |
|
|
|
|
|
threading.Thread(target=run_api, daemon=True).start() |
|
|
|
|
|
|
|
|
st.title("Mi Amigo Virtual 🤖") |
|
|
st.write("Escríbeme algo y te responderé!") |
|
|
|
|
|
user_input = st.text_input("Tú:") |
|
|
if user_input: |
|
|
response = chat(Message(text=user_input)) |
|
|
st.write("🤖:", response["response"]) |