import gradio as gr import pandas as pd import plotly.express as px import plotly.graph_objects as go from datetime import datetime, timedelta from typing import List, Dict, Any, Tuple import json from ..core.database_logger import get_logger, LogLevel, LogCategory class LogViewer: """Interface para visualização de logs""" def __init__(self): self.logger = get_logger() def get_log_data(self, level_filter: str = "ALL", category_filter: str = "ALL", hours_back: int = 24, limit: int = 1000) -> pd.DataFrame: """Recupera dados de log filtrados""" # Calcular tempo de início start_time = (datetime.now() - timedelta(hours=hours_back)).isoformat() # Aplicar filtros level = None if level_filter == "ALL" else level_filter category = None if category_filter == "ALL" else category_filter # Buscar logs logs = self.logger.get_logs( level=level, category=category, start_time=start_time, limit=limit ) if not logs: return pd.DataFrame() # Converter para DataFrame df = pd.DataFrame(logs) df['timestamp'] = pd.to_datetime(df['timestamp']) return df def create_log_table(self, level_filter: str, category_filter: str, hours_back: int, limit: int) -> str: """Cria tabela HTML com os logs""" df = self.get_log_data(level_filter, category_filter, hours_back, limit) if df.empty: return "

Nenhum log encontrado com os filtros aplicados.

" # Preparar dados para exibição display_df = df[['timestamp', 'level', 'category', 'message', 'module', 'function']].copy() display_df['timestamp'] = display_df['timestamp'].dt.strftime('%Y-%m-%d %H:%M:%S') # Aplicar cores baseadas no nível def style_level(level): colors = { 'DEBUG': '#6c757d', 'INFO': '#17a2b8', 'WARNING': '#ffc107', 'ERROR': '#dc3545', 'CRITICAL': '#6f42c1' } return f'{level}' display_df['level'] = display_df['level'].apply(style_level) # Converter para HTML html_table = display_df.to_html(escape=False, index=False, classes='table table-striped') return f"""
{html_table}
""" def create_log_statistics(self, hours_back: int) -> str: """Cria estatísticas dos logs""" stats = self.logger.get_statistics() # Logs das últimas N horas start_time = (datetime.now() - timedelta(hours=hours_back)).isoformat() recent_logs = self.logger.get_logs(start_time=start_time, limit=10000) recent_df = pd.DataFrame(recent_logs) if recent_logs else pd.DataFrame() html = f"""

📊 Estatísticas Gerais

📈 Distribuição por Nível

🏷️ Distribuição por Categoria

""" return html def create_performance_chart(self, hours_back: int) -> go.Figure: """Cria gráfico de métricas de performance""" start_time = (datetime.now() - timedelta(hours=hours_back)).isoformat() metrics = self.logger.get_performance_metrics(start_time=start_time, limit=1000) if not metrics: fig = go.Figure() fig.add_annotation( text="Nenhuma métrica de performance encontrada", xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False ) return fig df = pd.DataFrame(metrics) df['timestamp'] = pd.to_datetime(df['timestamp']) # Agrupar por nome da métrica fig = go.Figure() for metric_name in df['metric_name'].unique(): metric_data = df[df['metric_name'] == metric_name] fig.add_trace(go.Scatter( x=metric_data['timestamp'], y=metric_data['metric_value'], mode='lines+markers', name=metric_name, hovertemplate='%{fullData.name}
' + 'Tempo: %{x}
' + 'Valor: %{y}
' + '' )) fig.update_layout( title="Métricas de Performance ao Longo do Tempo", xaxis_title="Timestamp", yaxis_title="Valor", hovermode='closest' ) return fig def create_log_timeline_chart(self, hours_back: int) -> go.Figure: """Cria gráfico de timeline dos logs""" start_time = (datetime.now() - timedelta(hours=hours_back)).isoformat() logs = self.logger.get_logs(start_time=start_time, limit=5000) if not logs: fig = go.Figure() fig.add_annotation( text="Nenhum log encontrado no período", xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False ) return fig df = pd.DataFrame(logs) df['timestamp'] = pd.to_datetime(df['timestamp']) # Contar logs por hora e nível df['hour'] = df['timestamp'].dt.floor('H') log_counts = df.groupby(['hour', 'level']).size().reset_index(name='count') fig = go.Figure() colors = { 'DEBUG': '#6c757d', 'INFO': '#17a2b8', 'WARNING': '#ffc107', 'ERROR': '#dc3545', 'CRITICAL': '#6f42c1' } for level in log_counts['level'].unique(): level_data = log_counts[log_counts['level'] == level] fig.add_trace(go.Bar( x=level_data['hour'], y=level_data['count'], name=level, marker_color=colors.get(level, '#000000') )) fig.update_layout( title="Distribuição de Logs por Hora e Nível", xaxis_title="Hora", yaxis_title="Quantidade de Logs", barmode='stack' ) return fig def search_logs(self, search_term: str, hours_back: int) -> str: """Busca logs por termo específico""" start_time = (datetime.now() - timedelta(hours=hours_back)).isoformat() all_logs = self.logger.get_logs(start_time=start_time, limit=10000) if not all_logs: return "

Nenhum log encontrado.

" # Filtrar logs que contêm o termo de busca filtered_logs = [ log for log in all_logs if search_term.lower() in log['message'].lower() or search_term.lower() in log['module'].lower() or search_term.lower() in log['function'].lower() ] if not filtered_logs: return f"

Nenhum log encontrado com o termo '{search_term}'.

" df = pd.DataFrame(filtered_logs) df['timestamp'] = pd.to_datetime(df['timestamp']) # Preparar dados para exibição display_df = df[['timestamp', 'level', 'category', 'message', 'module', 'function']].copy() display_df['timestamp'] = display_df['timestamp'].dt.strftime('%Y-%m-%d %H:%M:%S') # Destacar termo de busca def highlight_term(text, term): if pd.isna(text): return text return str(text).replace(term, f'{term}') for col in ['message', 'module', 'function']: display_df[col] = display_df[col].apply(lambda x: highlight_term(x, search_term)) html_table = display_df.to_html(escape=False, index=False, classes='table table-striped') return f"""

Encontrados {len(filtered_logs)} logs com o termo '{search_term}'

{html_table}
""" def export_logs(self, level_filter: str, category_filter: str, hours_back: int, format_type: str) -> str: """Exporta logs em diferentes formatos""" df = self.get_log_data(level_filter, category_filter, hours_back, 10000) if df.empty: return "Nenhum log para exportar." timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") if format_type == "CSV": filename = f"logs_export_{timestamp}.csv" df.to_csv(filename, index=False) return f"Logs exportados para {filename}" elif format_type == "JSON": filename = f"logs_export_{timestamp}.json" df.to_json(filename, orient='records', date_format='iso') return f"Logs exportados para {filename}" elif format_type == "HTML": filename = f"logs_export_{timestamp}.html" html_content = f""" Relatório de Logs - {timestamp}

Relatório de Logs

Gerado em: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}

Total de registros: {len(df)}

{df.to_html(escape=False, index=False, classes='table')} """ with open(filename, 'w', encoding='utf-8') as f: f.write(html_content) return f"Relatório HTML exportado para {filename}" return "Formato não suportado." def create_log_viewer_interface() -> gr.Blocks: """Cria a interface do visualizador de logs""" viewer = LogViewer() with gr.Blocks(title="📊 Visualizador de Logs") as interface: gr.Markdown("# 📊 Sistema de Visualização de Logs") with gr.Tab("📋 Logs Recentes"): with gr.Row(): with gr.Column(scale=1): level_filter = gr.Dropdown( choices=["ALL"] + [level.value for level in LogLevel], value="ALL", label="Filtrar por Nível" ) category_filter = gr.Dropdown( choices=["ALL"] + [cat.value for cat in LogCategory], value="ALL", label="Filtrar por Categoria" ) hours_back = gr.Slider( minimum=1, maximum=168, value=24, step=1, label="Horas Anteriores" ) limit = gr.Slider( minimum=10, maximum=5000, value=100, step=10, label="Limite de Registros" ) refresh_btn = gr.Button("🔄 Atualizar", variant="primary") with gr.Column(scale=3): log_table = gr.HTML(label="Logs") refresh_btn.click( fn=viewer.create_log_table, inputs=[level_filter, category_filter, hours_back, limit], outputs=log_table ) with gr.Tab("📊 Estatísticas"): with gr.Row(): stats_hours = gr.Slider( minimum=1, maximum=168, value=24, step=1, label="Período (horas)" ) stats_refresh_btn = gr.Button("🔄 Atualizar Estatísticas", variant="primary") stats_html = gr.HTML(label="Estatísticas") stats_refresh_btn.click( fn=viewer.create_log_statistics, inputs=[stats_hours], outputs=stats_html ) with gr.Tab("📈 Gráficos"): with gr.Row(): chart_hours = gr.Slider( minimum=1, maximum=168, value=24, step=1, label="Período (horas)" ) chart_refresh_btn = gr.Button("🔄 Atualizar Gráficos", variant="primary") with gr.Row(): timeline_chart = gr.Plot(label="Timeline de Logs") performance_chart = gr.Plot(label="Métricas de Performance") chart_refresh_btn.click( fn=lambda hours: (viewer.create_log_timeline_chart(hours), viewer.create_performance_chart(hours)), inputs=[chart_hours], outputs=[timeline_chart, performance_chart] ) with gr.Tab("🔍 Busca"): with gr.Row(): search_term = gr.Textbox(label="Termo de Busca", placeholder="Digite o termo para buscar...") search_hours = gr.Slider( minimum=1, maximum=168, value=24, step=1, label="Período (horas)" ) search_btn = gr.Button("🔍 Buscar", variant="primary") search_results = gr.HTML(label="Resultados da Busca") search_btn.click( fn=viewer.search_logs, inputs=[search_term, search_hours], outputs=search_results ) with gr.Tab("📤 Exportar"): with gr.Row(): export_level = gr.Dropdown( choices=["ALL"] + [level.value for level in LogLevel], value="ALL", label="Filtrar por Nível" ) export_category = gr.Dropdown( choices=["ALL"] + [cat.value for cat in LogCategory], value="ALL", label="Filtrar por Categoria" ) export_hours = gr.Slider( minimum=1, maximum=168, value=24, step=1, label="Período (horas)" ) export_format = gr.Dropdown( choices=["CSV", "JSON", "HTML"], value="CSV", label="Formato" ) export_btn = gr.Button("📤 Exportar", variant="primary") export_result = gr.Textbox(label="Resultado da Exportação") export_btn.click( fn=viewer.export_logs, inputs=[export_level, export_category, export_hours, export_format], outputs=export_result ) # Carregar dados iniciais interface.load( fn=lambda: (viewer.create_log_table("ALL", "ALL", 24, 100), viewer.create_log_statistics(24)), outputs=[log_table, stats_html] ) return interface