Daniel00611 commited on
Commit
2921510
·
verified ·
1 Parent(s): fbb2dde

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -18
app.py CHANGED
@@ -91,29 +91,24 @@ def respond(message, history: list[tuple[str, str]], system_message, max_tokens,
91
 
92
 
93
  # Llamar a la API con streaming usando `responses`
94
- stream = client.responses.stream(
95
  model="gpt-5-mini",
96
  input=messages,
97
  tools=[{"type": "web_search", "search_context_size": "high"}],
98
- )
 
 
99
 
100
- response = ""
 
 
 
 
 
101
 
102
- # Recibir los fragmentos del stream
103
- for event in stream:
104
- if event.type == "response.output_text.delta":
105
- response += event.delta
106
- yield response
107
- elif event.type == "response.completed":
108
- break # 🚨 Importante: terminar cuando el modelo indique fin del stream
109
-
110
- # Cerrar el stream correctamente
111
- stream.close()
112
-
113
- # Procesar el texto final
114
- citations = extract_unique_citations(response)
115
- response = response + "\nFuentes: " + citations
116
- yield response
117
 
118
  # Configuración de la interfaz Gradio
119
  demo = gr.ChatInterface(
 
91
 
92
 
93
  # Llamar a la API con streaming usando `responses`
94
+ with client.responses.stream(
95
  model="gpt-5-mini",
96
  input=messages,
97
  tools=[{"type": "web_search", "search_context_size": "high"}],
98
+ ) as stream: # ✅ el bloque with maneja correctamente el flujo
99
+
100
+ response = ""
101
 
102
+ for event in stream: # ✅ ahora sí es iterable dentro del context manager
103
+ if event.type == "response.output_text.delta":
104
+ response += event.delta
105
+ yield response
106
+ elif event.type == "response.completed":
107
+ break
108
 
109
+ citations = extract_unique_citations(response)
110
+ response = response + "\nFuentes: " + citations
111
+ yield response
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
  # Configuración de la interfaz Gradio
114
  demo = gr.ChatInterface(