fredcaixeta
desc
d955a39
import gradio as gr
import uuid
import subprocess
import time
import asyncio
import re
USER_ID = str(uuid.uuid4())
# Iniciar o servidor MCP em background
# subprocess.Popen(["python", "mcp_players_table_sql.py"])
# subprocess.Popen(["python", "mcp_team_table_sql.py"])
# subprocess.Popen(["python", "mcp_one_player_supabase.py"])
# time.sleep(3)
# subprocess.Popen(["python", "mcp_graph_server.py"])
# time.sleep(3)
import pandas as pd
# Caminho do CSV já presente no repositório do Space (coloque seu arquivo em /, ou numa pasta).
CSV_PATH = "barca.csv" # ajuste para o seu caminho real
# Carregamento único no startup
try:
_df_full = pd.read_csv(CSV_PATH)
_df_preview = _df_full.head(100)
_df_meta = f"Source: Author • Rows: {len(_df_full)} • Columns: {len(_df_full.columns)}"
except Exception as e:
_df_full = None
_df_preview = None
_df_meta = f"Erro ao carregar CSV em startup: {e}"
#from place_holder_image import PLACEHOLDER_IMAGE, get_current_chart
from main_agent import stream_agent_response_safe, agent_conventional_response
from utils import CUSTOM_CSS
async def simulate_streaming_adaptive(full_text: str):
"""Streaming adaptativo com delays diferentes para pontuação"""
words = re.findall(r'\S+\s*', full_text)
current_text = ""
for word in words:
current_text += word
if word.strip().endswith(('.', '!', '?')):
delay = 0.15
elif word.strip().endswith((',', ';', ':')):
delay = 0.08
else:
delay = 0.04
await asyncio.sleep(delay)
yield current_text
async def respond(message, history):
"""Função de resposta com streaming + atualização de gráfico"""
message = str(message)
print(f"Message received: {message}")
try:
print("Obtendo resposta completa do agente...")
full_response = await stream_agent_response_safe(message)
print(f"Resposta obtida: {len(full_response)} caracteres")
# Simular streaming da resposta
async for text_chunk in simulate_streaming_adaptive(full_response):
yield text_chunk
except Exception as e:
print(f"Erro durante o processamento: {e}")
import traceback
traceback.print_exc()
yield f"❌ Erro: {str(e)}", get_current_chart()
# def refresh_chart():
# """Atualiza a visualização do gráfico"""
# return get_current_chart()
if __name__ == "__main__":
with gr.Blocks(
title="Barcelona Analytics Platform",
theme=gr.themes.Soft(
primary_hue="blue",
secondary_hue="slate",
neutral_hue="slate",
font=[gr.themes.GoogleFont("Inter"), "sans-serif"],
font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "monospace"]
),
css=CUSTOM_CSS
) as demo:
gr.Markdown(
"""
## Data Analyst Agent with FC Barcelona Statistics (2020/2021)
"""
)
with gr.Tabs() as tabs:
# ABA 1: ANALYSIS & CHAT
with gr.Tab("Chat", id=0, scale=0):
#gr.Markdown("### Intelligent Assistant")
chatbot = gr.Chatbot(
type="messages",
label="",
height=400,
show_copy_button=True,
scale=0
)
with gr.Row():
msg = gr.Textbox(
placeholder="Ask about players, matches, or request visualizations...",
label="Your Query",
lines=2,
scale=4,
show_label=False
)
submit_btn = gr.Button("Send", variant="primary", scale=1, size="lg")
with gr.Accordion("📋 Query Examples", open=False):
gr.Examples(
examples=[
"Return the top 10 players by total xG and assists combined",
"Create a bar chart showing the top 5 players with most passes",
"Find players with at least 900 minutes who rank in the top 5% for progressive passes and top 10% for xG assisted in 2020/2021, returning player, minutes, prog_passes, xA, and z-scores"
],
inputs=msg,
label=""
)
# ABA 2: DATA VISUALIZATION
# with gr.Tab("Generated Chart", id=1):
# gr.Markdown(
# """
# ### Interactive Visualizations
# Charts and graphs generated by the AI assistant will appear here.
# Request visualizations in the Analysis tab to see them rendered.
# """
# )
# chart_display = gr.Image(
# value=PLACEHOLDER_IMAGE,
# label="",
# type="pil",
# height=360,
# show_label=False,
# show_download_button=True,
# show_share_button=False
# )
# with gr.Row():
# refresh_btn = gr.Button("🔄 Refresh Visualization", variant="secondary", size="lg")
# gr.Markdown("_Last updated: Live_", elem_classes="status-badge")
with gr.Tab("CSV Source Data", id=2):
gr.Markdown("### Data consulted by the Agent\nStatic view (100 lines) from preloaded dataset.", elem_classes=["section-title"])
meta = gr.Markdown(value=_df_meta)
df_view = gr.Dataframe(
value=_df_preview,
label="Preview - 100 lines",
wrap=False,
interactive=False
)
# Footer informativo
gr.Markdown(
"""
---
**Data Source:** SQL Database • **AI Model:** Groq-powered Analysis • **Coverage:** 1 Seasons (2020-2021)
"""
)
async def respond_and_update(message, history):
"""Responde e retorna tanto o chat quanto o gráfico atualizado"""
if not message.strip():
yield history
return
# Adicionar mensagem do usuário
history.append(gr.ChatMessage(role="user", content=message))
try:
# Obter resposta
# full_response = await asyncio.wait_for(
# stream_agent_response_safe(message),
# timeout=120.0
# )
full_response = await agent_conventional_response(message)
# Adicionar mensagem inicial do assistente
history.append(gr.ChatMessage(role="assistant", content=""))
# Streaming adaptativo
async for text_chunk in simulate_streaming_adaptive(full_response):
history[-1] = gr.ChatMessage(role="assistant", content=text_chunk)
yield history
except asyncio.TimeoutError:
history.append([message, "⏱️ Timeout: consulta demorou muito"])
yield history, get_current_chart()
except Exception as e:
print(f"Erro: {e}")
import traceback
traceback.print_exc()
history.append(gr.ChatMessage(role="assistant", content=f"⚠️ Error: {str(e)}"))
yield history
# Eventos
submit_btn.click(
fn=respond_and_update,
inputs=[msg, chatbot],
outputs=[chatbot]
).then(
lambda: "",
None,
[msg]
)
msg.submit(
fn=respond_and_update,
inputs=[msg, chatbot],
outputs=[chatbot]
).then(
lambda: "",
None,
[msg]
)
# refresh_btn.click(
# fn=refresh_chart,
# inputs=[],
# outputs=[chart_display]
# )
demo.launch(ssr_mode=False, share=False)