SrGuanche commited on
Commit
d92f799
·
verified ·
1 Parent(s): 09bd12c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -18
app.py CHANGED
@@ -1,31 +1,53 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  import torch
 
4
 
5
- MODEL_NAME = "distilgpt2" # Modelo ligero para CPU Basic
 
 
 
6
 
7
- def load_model(model_name):
8
- tokenizer = AutoTokenizer.from_pretrained(model_name)
9
- model = AutoModelForCausalLM.from_pretrained(model_name)
10
- gen = pipeline(
11
- "text-generation",
12
- model=model,
13
- tokenizer=tokenizer,
14
- device=-1 # CPU
15
- )
16
- return gen
17
 
18
- generator = load_model(MODEL_NAME)
 
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  def answer(history, message):
21
  if not message.strip():
22
  return history, ""
23
 
 
 
 
 
24
  context = ""
25
  for user, bot in history[-6:]:
26
  context += f"Usuario: {user}\nIA: {bot}\n"
27
- context += f"Usuario: {message}\nIA:"
28
 
 
29
  output = generator(
30
  context,
31
  max_new_tokens=150,
@@ -36,17 +58,22 @@ def answer(history, message):
36
  )[0]["generated_text"]
37
 
38
  if "IA:" in output:
39
- response = output.split("IA:")[-1].strip()
40
  else:
41
- response = output
 
 
 
42
 
43
- history.append((message, response))
 
44
  return history, ""
45
 
 
46
  with gr.Blocks() as demo:
47
- gr.Markdown("# 🤖 Chatbot gratuito hecho por ti")
48
  chat = gr.Chatbot()
49
- msg = gr.Textbox(placeholder="Escribe un mensaje…")
50
  clear_btn = gr.Button("Limpiar")
51
  state = gr.State([])
52
 
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  import torch
4
+ from langdetect import detect
5
 
6
+ # --- CONFIG ---
7
+ MODEL_NAME = "distilgpt2" # Modelo ligero para CPU
8
+ TRANSLATE_TO_ES_MODEL = "Helsinki-NLP/opus-mt-mul-es"
9
+ TRANSLATE_FROM_ES_MODEL = "Helsinki-NLP/opus-mt-es-mul"
10
 
11
+ # --- Cargar modelos ---
12
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
13
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
14
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
 
 
 
 
 
 
15
 
16
+ translator_to_es = pipeline("translation", model=TRANSLATE_TO_ES_MODEL, device=-1)
17
+ translator_from_es = pipeline("translation", model=TRANSLATE_FROM_ES_MODEL, device=-1)
18
 
19
+ # --- Funciones de traducción ---
20
+ def translate_to_es(text):
21
+ try:
22
+ lang = detect(text)
23
+ except:
24
+ lang = "es"
25
+ if lang != "es":
26
+ translated = translator_to_es(text)[0]["translation_text"]
27
+ return translated, lang
28
+ return text, lang
29
+
30
+ def translate_from_es(text, lang):
31
+ if lang != "es":
32
+ translated = translator_from_es(text)[0]["translation_text"]
33
+ return translated
34
+ return text
35
+
36
+ # --- Función principal del chatbot ---
37
  def answer(history, message):
38
  if not message.strip():
39
  return history, ""
40
 
41
+ # Detectar idioma y traducir a español si es necesario
42
+ msg_es, lang = translate_to_es(message)
43
+
44
+ # Construir contexto
45
  context = ""
46
  for user, bot in history[-6:]:
47
  context += f"Usuario: {user}\nIA: {bot}\n"
48
+ context += f"Usuario: {msg_es}\nIA:"
49
 
50
+ # Generar respuesta en español
51
  output = generator(
52
  context,
53
  max_new_tokens=150,
 
58
  )[0]["generated_text"]
59
 
60
  if "IA:" in output:
61
+ response_es = output.split("IA:")[-1].strip()
62
  else:
63
+ response_es = output
64
+
65
+ # Traducir de vuelta al idioma original
66
+ response_final = translate_from_es(response_es, lang)
67
 
68
+ # Actualizar historial
69
+ history.append((message, response_final))
70
  return history, ""
71
 
72
+ # --- Interfaz Gradio ---
73
  with gr.Blocks() as demo:
74
+ gr.Markdown("# 🤖 Chatbot Multilenguaje (Traducción automática)")
75
  chat = gr.Chatbot()
76
+ msg = gr.Textbox(placeholder="Escribe tu mensaje…")
77
  clear_btn = gr.Button("Limpiar")
78
  state = gr.State([])
79