Spaces:
Sleeping
Sleeping
fredcaixeta
commited on
Commit
·
c6c62c3
1
Parent(s):
937875a
go
Browse files
app.py
CHANGED
|
@@ -64,7 +64,7 @@ async def respond(message, history):
|
|
| 64 |
|
| 65 |
# Simular streaming da resposta
|
| 66 |
async for text_chunk in simulate_streaming_adaptive(full_response):
|
| 67 |
-
yield text_chunk
|
| 68 |
|
| 69 |
except Exception as e:
|
| 70 |
print(f"Erro durante o processamento: {e}")
|
|
@@ -72,9 +72,9 @@ async def respond(message, history):
|
|
| 72 |
traceback.print_exc()
|
| 73 |
yield f"❌ Erro: {str(e)}", get_current_chart()
|
| 74 |
|
| 75 |
-
def refresh_chart():
|
| 76 |
-
|
| 77 |
-
|
| 78 |
|
| 79 |
if __name__ == "__main__":
|
| 80 |
with gr.Blocks(
|
|
@@ -177,7 +177,7 @@ if __name__ == "__main__":
|
|
| 177 |
async def respond_and_update(message, history):
|
| 178 |
"""Responde e retorna tanto o chat quanto o gráfico atualizado"""
|
| 179 |
if not message.strip():
|
| 180 |
-
yield history
|
| 181 |
return
|
| 182 |
|
| 183 |
# Adicionar mensagem do usuário
|
|
@@ -198,7 +198,7 @@ if __name__ == "__main__":
|
|
| 198 |
# Streaming adaptativo
|
| 199 |
async for text_chunk in simulate_streaming_adaptive(full_response):
|
| 200 |
history[-1] = gr.ChatMessage(role="assistant", content=text_chunk)
|
| 201 |
-
yield history
|
| 202 |
|
| 203 |
except asyncio.TimeoutError:
|
| 204 |
history.append([message, "⏱️ Timeout: consulta demorou muito"])
|
|
@@ -209,13 +209,13 @@ if __name__ == "__main__":
|
|
| 209 |
import traceback
|
| 210 |
traceback.print_exc()
|
| 211 |
history.append(gr.ChatMessage(role="assistant", content=f"⚠️ Error: {str(e)}"))
|
| 212 |
-
yield history
|
| 213 |
|
| 214 |
# Eventos
|
| 215 |
submit_btn.click(
|
| 216 |
fn=respond_and_update,
|
| 217 |
inputs=[msg, chatbot],
|
| 218 |
-
outputs=[chatbot
|
| 219 |
).then(
|
| 220 |
lambda: "",
|
| 221 |
None,
|
|
@@ -225,17 +225,17 @@ if __name__ == "__main__":
|
|
| 225 |
msg.submit(
|
| 226 |
fn=respond_and_update,
|
| 227 |
inputs=[msg, chatbot],
|
| 228 |
-
outputs=[chatbot
|
| 229 |
).then(
|
| 230 |
lambda: "",
|
| 231 |
None,
|
| 232 |
[msg]
|
| 233 |
)
|
| 234 |
|
| 235 |
-
refresh_btn.click(
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
)
|
| 240 |
|
| 241 |
demo.launch(ssr_mode=False, share=False)
|
|
|
|
| 64 |
|
| 65 |
# Simular streaming da resposta
|
| 66 |
async for text_chunk in simulate_streaming_adaptive(full_response):
|
| 67 |
+
yield text_chunk
|
| 68 |
|
| 69 |
except Exception as e:
|
| 70 |
print(f"Erro durante o processamento: {e}")
|
|
|
|
| 72 |
traceback.print_exc()
|
| 73 |
yield f"❌ Erro: {str(e)}", get_current_chart()
|
| 74 |
|
| 75 |
+
# def refresh_chart():
|
| 76 |
+
# """Atualiza a visualização do gráfico"""
|
| 77 |
+
# return get_current_chart()
|
| 78 |
|
| 79 |
if __name__ == "__main__":
|
| 80 |
with gr.Blocks(
|
|
|
|
| 177 |
async def respond_and_update(message, history):
|
| 178 |
"""Responde e retorna tanto o chat quanto o gráfico atualizado"""
|
| 179 |
if not message.strip():
|
| 180 |
+
yield history
|
| 181 |
return
|
| 182 |
|
| 183 |
# Adicionar mensagem do usuário
|
|
|
|
| 198 |
# Streaming adaptativo
|
| 199 |
async for text_chunk in simulate_streaming_adaptive(full_response):
|
| 200 |
history[-1] = gr.ChatMessage(role="assistant", content=text_chunk)
|
| 201 |
+
yield history
|
| 202 |
|
| 203 |
except asyncio.TimeoutError:
|
| 204 |
history.append([message, "⏱️ Timeout: consulta demorou muito"])
|
|
|
|
| 209 |
import traceback
|
| 210 |
traceback.print_exc()
|
| 211 |
history.append(gr.ChatMessage(role="assistant", content=f"⚠️ Error: {str(e)}"))
|
| 212 |
+
yield history
|
| 213 |
|
| 214 |
# Eventos
|
| 215 |
submit_btn.click(
|
| 216 |
fn=respond_and_update,
|
| 217 |
inputs=[msg, chatbot],
|
| 218 |
+
outputs=[chatbot]
|
| 219 |
).then(
|
| 220 |
lambda: "",
|
| 221 |
None,
|
|
|
|
| 225 |
msg.submit(
|
| 226 |
fn=respond_and_update,
|
| 227 |
inputs=[msg, chatbot],
|
| 228 |
+
outputs=[chatbot]
|
| 229 |
).then(
|
| 230 |
lambda: "",
|
| 231 |
None,
|
| 232 |
[msg]
|
| 233 |
)
|
| 234 |
|
| 235 |
+
# refresh_btn.click(
|
| 236 |
+
# fn=refresh_chart,
|
| 237 |
+
# inputs=[],
|
| 238 |
+
# outputs=[chart_display]
|
| 239 |
+
# )
|
| 240 |
|
| 241 |
demo.launch(ssr_mode=False, share=False)
|