Spaces:
Sleeping
Sleeping
File size: 8,611 Bytes
6837750 2bc88b5 6837750 2bc88b5 6837750 2bc88b5 0111f03 2bc88b5 6837750 a975481 1d7ab06 a975481 2bc88b5 955b224 2bc88b5 6837750 2bc88b5 6837750 2bc88b5 c6c62c3 2bc88b5 6837750 c6c62c3 6837750 2bc88b5 1c249c3 2bc88b5 d04f823 2bc88b5 96350f4 807c350 2bc88b5 90fd151 2bc88b5 937875a 2bc88b5 937875a 2bc88b5 937875a 2bc88b5 937875a a975481 1d7ab06 dd4e4ff a975481 1d7ab06 a975481 eca178d a975481 2bc88b5 c6c62c3 2bc88b5 c6c62c3 2bc88b5 c6c62c3 2bc88b5 c6c62c3 2bc88b5 c6c62c3 2bc88b5 c6c62c3 2bc88b5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 |
import gradio as gr
import uuid
import subprocess
import time
import asyncio
import re
USER_ID = str(uuid.uuid4())
# Iniciar o servidor MCP em background
# subprocess.Popen(["python", "mcp_players_table_sql.py"])
# subprocess.Popen(["python", "mcp_team_table_sql.py"])
# subprocess.Popen(["python", "mcp_one_player_supabase.py"])
# time.sleep(3)
# subprocess.Popen(["python", "mcp_graph_server.py"])
# time.sleep(3)
import pandas as pd
# Caminho do CSV já presente no repositório do Space (coloque seu arquivo em /, ou numa pasta).
CSV_PATH = "barca.csv" # ajuste para o seu caminho real
# Carregamento único no startup
try:
_df_full = pd.read_csv(CSV_PATH)
_df_preview = _df_full.head(100)
_df_meta = f"Source: Author • Rows: {len(_df_full)} • Columns: {len(_df_full.columns)}"
except Exception as e:
_df_full = None
_df_preview = None
_df_meta = f"Erro ao carregar CSV em startup: {e}"
#from place_holder_image import PLACEHOLDER_IMAGE, get_current_chart
from main_agent import stream_agent_response_safe, agent_conventional_response
from utils import CUSTOM_CSS
async def simulate_streaming_adaptive(full_text: str):
"""Streaming adaptativo com delays diferentes para pontuação"""
words = re.findall(r'\S+\s*', full_text)
current_text = ""
for word in words:
current_text += word
if word.strip().endswith(('.', '!', '?')):
delay = 0.15
elif word.strip().endswith((',', ';', ':')):
delay = 0.08
else:
delay = 0.04
await asyncio.sleep(delay)
yield current_text
async def respond(message, history):
"""Função de resposta com streaming + atualização de gráfico"""
message = str(message)
print(f"Message received: {message}")
try:
print("Obtendo resposta completa do agente...")
full_response = await stream_agent_response_safe(message)
print(f"Resposta obtida: {len(full_response)} caracteres")
# Simular streaming da resposta
async for text_chunk in simulate_streaming_adaptive(full_response):
yield text_chunk
except Exception as e:
print(f"Erro durante o processamento: {e}")
import traceback
traceback.print_exc()
yield f"❌ Erro: {str(e)}", get_current_chart()
# def refresh_chart():
# """Atualiza a visualização do gráfico"""
# return get_current_chart()
if __name__ == "__main__":
with gr.Blocks(
title="Barcelona Analytics Platform",
theme=gr.themes.Soft(
primary_hue="blue",
secondary_hue="slate",
neutral_hue="slate",
font=[gr.themes.GoogleFont("Inter"), "sans-serif"],
font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "monospace"]
),
css=CUSTOM_CSS
) as demo:
gr.Markdown(
"""
## Data Analyst Agent with FC Barcelona Statistics (2020/2021)
"""
)
with gr.Tabs() as tabs:
# ABA 1: ANALYSIS & CHAT
with gr.Tab("Chat", id=0, scale=0):
#gr.Markdown("### Intelligent Assistant")
chatbot = gr.Chatbot(
type="messages",
label="",
height=400,
show_copy_button=True,
scale=0
)
with gr.Row():
msg = gr.Textbox(
placeholder="Ask about players, matches, or request visualizations...",
label="Your Query",
lines=2,
scale=4,
show_label=False
)
submit_btn = gr.Button("Send", variant="primary", scale=1, size="lg")
with gr.Accordion("📋 Query Examples", open=False):
gr.Examples(
examples=[
"Return the top 10 players by total xG and assists combined",
"Create a bar chart showing the top 5 players with most passes",
"Find players with at least 900 minutes who rank in the top 5% for progressive passes and top 10% for xG assisted in 2020/2021, returning player, minutes, prog_passes, xA, and z-scores"
],
inputs=msg,
label=""
)
# ABA 2: DATA VISUALIZATION
# with gr.Tab("Generated Chart", id=1):
# gr.Markdown(
# """
# ### Interactive Visualizations
# Charts and graphs generated by the AI assistant will appear here.
# Request visualizations in the Analysis tab to see them rendered.
# """
# )
# chart_display = gr.Image(
# value=PLACEHOLDER_IMAGE,
# label="",
# type="pil",
# height=360,
# show_label=False,
# show_download_button=True,
# show_share_button=False
# )
# with gr.Row():
# refresh_btn = gr.Button("🔄 Refresh Visualization", variant="secondary", size="lg")
# gr.Markdown("_Last updated: Live_", elem_classes="status-badge")
with gr.Tab("CSV Source Data", id=2):
gr.Markdown("### Data consulted by the Agent\nStatic view (100 lines) from preloaded dataset.", elem_classes=["section-title"])
meta = gr.Markdown(value=_df_meta)
df_view = gr.Dataframe(
value=_df_preview,
label="Preview - 100 lines",
wrap=False,
interactive=False
)
# Footer informativo
gr.Markdown(
"""
---
**Data Source:** SQL Database • **AI Model:** Groq-powered Analysis • **Coverage:** 1 Seasons (2020-2021)
"""
)
async def respond_and_update(message, history):
"""Responde e retorna tanto o chat quanto o gráfico atualizado"""
if not message.strip():
yield history
return
# Adicionar mensagem do usuário
history.append(gr.ChatMessage(role="user", content=message))
try:
# Obter resposta
# full_response = await asyncio.wait_for(
# stream_agent_response_safe(message),
# timeout=120.0
# )
full_response = await agent_conventional_response(message)
# Adicionar mensagem inicial do assistente
history.append(gr.ChatMessage(role="assistant", content=""))
# Streaming adaptativo
async for text_chunk in simulate_streaming_adaptive(full_response):
history[-1] = gr.ChatMessage(role="assistant", content=text_chunk)
yield history
except asyncio.TimeoutError:
history.append([message, "⏱️ Timeout: consulta demorou muito"])
yield history, get_current_chart()
except Exception as e:
print(f"Erro: {e}")
import traceback
traceback.print_exc()
history.append(gr.ChatMessage(role="assistant", content=f"⚠️ Error: {str(e)}"))
yield history
# Eventos
submit_btn.click(
fn=respond_and_update,
inputs=[msg, chatbot],
outputs=[chatbot]
).then(
lambda: "",
None,
[msg]
)
msg.submit(
fn=respond_and_update,
inputs=[msg, chatbot],
outputs=[chatbot]
).then(
lambda: "",
None,
[msg]
)
# refresh_btn.click(
# fn=refresh_chart,
# inputs=[],
# outputs=[chart_display]
# )
demo.launch(ssr_mode=False, share=False)
|