hexanovapixel commited on
Commit
3689f94
·
verified ·
1 Parent(s): 1e6c8ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -26
app.py CHANGED
@@ -1,37 +1,16 @@
1
  import gradio as gr
2
- import requests
3
- import os
4
- import time
5
 
6
- API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom-560m"
7
- headers = {"Authorization": f"Bearer {os.getenv('HF_TOKEN')}"}
8
-
9
- def query(payload):
10
- for _ in range(3): # intenta 3 veces
11
- response = requests.post(API_URL, headers=headers, json=payload)
12
- result = response.json()
13
-
14
- # Si el modelo está cargando, espera y reintenta
15
- if isinstance(result, dict) and "error" in result:
16
- time.sleep(5)
17
- continue
18
-
19
- return result
20
-
21
- return {"error": "No response"}
22
 
23
  def respond(message, history):
24
  if not message:
25
  return "Pregúntame algo 😊"
26
 
27
- output = query({
28
- "inputs": "Responde en español y explica de forma sencilla: " + message
29
- })
30
 
31
- if isinstance(output, list) and "generated_text" in output[0]:
32
- return output[0]["generated_text"]
33
- else:
34
- return "La IA está ocupada 😭 intenta otra vez en unos segundos"
35
 
36
  demo = gr.ChatInterface(fn=respond)
37
 
 
1
  import gradio as gr
2
+ from transformers import pipeline
 
 
3
 
4
+ chatbot = pipeline("text-generation", model="distilgpt2")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  def respond(message, history):
7
  if not message:
8
  return "Pregúntame algo 😊"
9
 
10
+ prompt = "Responde en español de forma clara: " + message
11
+ response = chatbot(prompt, max_length=100, do_sample=True)
 
12
 
13
+ return response[0]["generated_text"]
 
 
 
14
 
15
  demo = gr.ChatInterface(fn=respond)
16