|
|
|
|
|
""" |
|
|
Universal Multi-Agent Platform - Core Application (Production Ready) |
|
|
Auto-generated with Gradio 4.x compatibility |
|
|
""" |
|
|
|
|
|
import gradio as gr |
|
|
import pandas as pd |
|
|
from typing import Dict, Any, List, Optional, Tuple |
|
|
from pathlib import Path |
|
|
import json |
|
|
import os |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from plugins.processors.schema_detector import * |
|
|
from plugins.processors.text_processor import * |
|
|
from plugins.outputs.table_formatter import * |
|
|
from plugins.processors.date_normalizer import * |
|
|
from plugins.file_handlers.csv_handler import * |
|
|
from plugins.outputs.report_generator import * |
|
|
from plugins.file_handlers.excel_handler import * |
|
|
from plugins.memory.document_memory import * |
|
|
from plugins.processors.data_cleaner import * |
|
|
from plugins.analyzers.statistical_analyzer import * |
|
|
from plugins.analyzers.time_series_analyzer import * |
|
|
from plugins.outputs.chart_generator import * |
|
|
from plugins.memory.conversation_memory import * |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class PluginManager: |
|
|
"""Manage all plugins and application state.""" |
|
|
|
|
|
def __init__(self): |
|
|
|
|
|
self.file_handlers = [] |
|
|
self.file_handlers.append(CSVHandler()) |
|
|
self.file_handlers.append(ExcelHandler()) |
|
|
|
|
|
|
|
|
self.data_cleaner = DataCleaner() if True else None |
|
|
self.time_series_analyzer = TimeSeriesAnalyzer() if True else None |
|
|
self.statistical_analyzer = StatisticalAnalyzer() if True else None |
|
|
|
|
|
|
|
|
self.conversation_memory = ConversationMemory() if True else None |
|
|
self.table_formatter = TableFormatter() if True else None |
|
|
self.chart_generator = ChartGenerator() if True else None |
|
|
|
|
|
|
|
|
self.loaded_data: Optional[Dict[str, Any]] = None |
|
|
self.cleaned_df: Optional[pd.DataFrame] = None |
|
|
self.last_chart_json: Optional[str] = None |
|
|
|
|
|
def load_file(self, file_path: str) -> Dict[str, Any]: |
|
|
"""Load file using appropriate handler and automatically clean data.""" |
|
|
self.loaded_data = None |
|
|
self.cleaned_df = None |
|
|
self.last_chart_json = None |
|
|
|
|
|
if not os.path.exists(file_path): |
|
|
return {"success": False, "error": "File not found on server"} |
|
|
|
|
|
for handler in self.file_handlers: |
|
|
if handler.can_handle(file_path): |
|
|
result = handler.load(file_path) |
|
|
if result.get("success"): |
|
|
self.loaded_data = result |
|
|
|
|
|
|
|
|
df = self._get_raw_df() |
|
|
if df is not None and self.data_cleaner: |
|
|
df = self.data_cleaner.clean_dataframe(df) |
|
|
self.cleaned_df = self.data_cleaner.enforce_schema(df) |
|
|
|
|
|
if "metadata" not in result: |
|
|
result["metadata"] = {} |
|
|
result["metadata"]["cleaned_shape"] = list(self.cleaned_df.shape) |
|
|
result["metadata"]["cleaned_cols"] = list(self.cleaned_df.columns) |
|
|
|
|
|
return result |
|
|
|
|
|
return {"success": False, "error": "No handler found for this file type"} |
|
|
|
|
|
def _get_raw_df(self) -> Optional[pd.DataFrame]: |
|
|
"""Internal method to extract a DataFrame from loaded_data.""" |
|
|
if not self.loaded_data: |
|
|
return None |
|
|
if "combined" in self.loaded_data and isinstance(self.loaded_data["combined"], pd.DataFrame): |
|
|
return self.loaded_data["combined"] |
|
|
elif "data" in self.loaded_data and isinstance(self.loaded_data["data"], pd.DataFrame): |
|
|
return self.loaded_data["data"] |
|
|
return None |
|
|
|
|
|
|
|
|
pm = PluginManager() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def upload_file(file): |
|
|
"""Handle file upload.""" |
|
|
if file is None: |
|
|
return "โ No file uploaded", None |
|
|
|
|
|
try: |
|
|
result = pm.load_file(file.name) |
|
|
|
|
|
if result.get("success"): |
|
|
|
|
|
preview_html = "Data loaded successfully" |
|
|
for handler in pm.file_handlers: |
|
|
if handler.can_handle(file.name) and hasattr(handler, 'preview'): |
|
|
preview_html = handler.preview(result) |
|
|
break |
|
|
|
|
|
shape_info = f"Shape: {pm.cleaned_df.shape}" if pm.cleaned_df is not None else "Non-tabular data" |
|
|
|
|
|
summary = "โ
File loaded and processed successfully\n" |
|
|
summary += f"Type: {result.get('file_type', 'unknown')}\n" |
|
|
summary += f"Data: {shape_info}\n\n" |
|
|
summary += "Ready for conversational analysis!" |
|
|
|
|
|
return summary, preview_html |
|
|
|
|
|
return f"โ Error: {result.get('error')}", None |
|
|
|
|
|
except Exception as e: |
|
|
return f"โ Critical Error: {str(e)}", None |
|
|
|
|
|
|
|
|
def process_query(query: str, history: List) -> Tuple[List, str, Optional[str]]: |
|
|
""" |
|
|
Executes conversational analytics. |
|
|
Returns: updated history, empty query text, and chart JSON. |
|
|
""" |
|
|
|
|
|
if not query or not query.strip(): |
|
|
return history + [("", "โ Please enter a question")], "", None |
|
|
|
|
|
if pm.conversation_memory: |
|
|
pm.conversation_memory.add_message("user", query) |
|
|
|
|
|
df = pm.cleaned_df |
|
|
pm.last_chart_json = None |
|
|
|
|
|
|
|
|
if df is None or df.empty: |
|
|
|
|
|
if pm.loaded_data and pm.loaded_data.get('file_type') in ['pdf', 'docx']: |
|
|
document_text = pm.loaded_data.get('text', '') or str(pm.loaded_data.get('text_data', [{}])[0].get('text', 'No text')) |
|
|
response = "๐ **Document Content Loaded**\n\n" |
|
|
response += "The system has loaded a document. Advanced NLP analysis would be applied here.\n" |
|
|
response += f"Text Sample: {document_text[:200]}..." |
|
|
else: |
|
|
response = "โ No **data** loaded for analysis. Please upload a file first." |
|
|
|
|
|
if pm.conversation_memory: |
|
|
pm.conversation_memory.add_message("assistant", response) |
|
|
return history + [(query, response)], "", None |
|
|
|
|
|
try: |
|
|
|
|
|
if pm.time_series_analyzer: |
|
|
description, result_df = pm.time_series_analyzer.analyze_query(df, query) |
|
|
elif pm.statistical_analyzer: |
|
|
stats = pm.statistical_analyzer.analyze(df) |
|
|
description = "๐ Statistical Analysis Results" |
|
|
result_df = pd.DataFrame(stats.get('columns', {})).T |
|
|
else: |
|
|
description = "โ ๏ธ No analyzer available. Upload data and try basic queries." |
|
|
result_df = None |
|
|
|
|
|
final_response = f"**Query:** {query}\n\n{description}\n\n" |
|
|
chart_json = None |
|
|
|
|
|
if result_df is not None and not result_df.empty: |
|
|
|
|
|
if pm.table_formatter: |
|
|
table_markdown = pm.table_formatter.format_to_markdown(result_df.head(10)) |
|
|
final_response += "### Results (Top 10 Rows):\n" |
|
|
final_response += table_markdown |
|
|
final_response += f"\n\n*Total Rows: {len(result_df):,}*" |
|
|
|
|
|
|
|
|
if pm.chart_generator and len(result_df.columns) >= 2: |
|
|
try: |
|
|
x_col = result_df.columns[0] |
|
|
y_col = result_df.columns[1] |
|
|
chart_json = pm.chart_generator.create_chart_html( |
|
|
result_df.head(20), |
|
|
'bar', |
|
|
x=x_col, |
|
|
y=y_col, |
|
|
title=description.split('\n')[0][:50] |
|
|
) |
|
|
except Exception as chart_err: |
|
|
print(f"Chart generation failed: {chart_err}") |
|
|
|
|
|
else: |
|
|
final_response = f"**Query:** {query}\n\n{description}" |
|
|
|
|
|
if pm.conversation_memory: |
|
|
pm.conversation_memory.add_message("assistant", final_response) |
|
|
|
|
|
return history + [(query, final_response)], "", chart_json |
|
|
|
|
|
except Exception as e: |
|
|
import traceback |
|
|
error_trace = traceback.format_exc() |
|
|
response = f"โ Analysis Error: {str(e)}\n\nDebug Info:\n```\n{error_trace[:500]}\n```" |
|
|
return history + [(query, response)], "", None |
|
|
|
|
|
|
|
|
def create_ui(): |
|
|
"""Create Gradio interface (Gradio 4.x compatible).""" |
|
|
|
|
|
with gr.Blocks(title="Universal AI Platform", theme=gr.themes.Soft()) as demo: |
|
|
gr.Markdown("# ๐ค Universal Multi-Agent Platform") |
|
|
gr.Markdown("## AI-Powered Analysis & Conversational Intelligence") |
|
|
|
|
|
with gr.Tabs(): |
|
|
|
|
|
|
|
|
|
|
|
with gr.Tab("๐ Upload & Process"): |
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
file_upload = gr.File( |
|
|
label="Upload Your File", |
|
|
file_types=[".xlsx", ".xls", ".csv", ".pdf", ".docx", ".json", ".xml"], |
|
|
interactive=True |
|
|
) |
|
|
upload_btn = gr.Button("๐ค Process File", variant="primary", size="lg") |
|
|
upload_status = gr.Textbox( |
|
|
label="Status", |
|
|
lines=8, |
|
|
value="Ready to process files. Supported: Excel, CSV, PDF, Word, JSON, XML", |
|
|
interactive=False |
|
|
) |
|
|
|
|
|
with gr.Column(scale=2): |
|
|
data_preview = gr.HTML(label="Data Preview") |
|
|
|
|
|
upload_btn.click( |
|
|
fn=upload_file, |
|
|
inputs=[file_upload], |
|
|
outputs=[upload_status, data_preview] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Tab("๐ฌ Ask Questions"): |
|
|
chatbot = gr.Chatbot( |
|
|
height=450, |
|
|
label="Conversational AI Assistant", |
|
|
type='tuples', |
|
|
show_copy_button=True |
|
|
) |
|
|
|
|
|
gr.Markdown(""" |
|
|
### ๐ Example Queries: |
|
|
- "Summarize the data" |
|
|
- "Show me aggregated statistics" |
|
|
- "Group by [column name]" |
|
|
- "Segment the data into categories" |
|
|
- "Analyze trends over time" |
|
|
- "Show correlation between columns" |
|
|
""") |
|
|
|
|
|
with gr.Row(): |
|
|
msg = gr.Textbox( |
|
|
label="Your Query", |
|
|
placeholder="Ask anything about your data...", |
|
|
scale=4, |
|
|
lines=2 |
|
|
) |
|
|
submit_btn = gr.Button("Send", variant="primary", scale=1, size="lg") |
|
|
|
|
|
|
|
|
chart_display = gr.HTML( |
|
|
label="Visualization", |
|
|
value="" |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
clear_btn = gr.Button("๐๏ธ Clear Chat", variant="secondary") |
|
|
|
|
|
def process_and_display(query: str, history: List) -> Tuple[List, str, str]: |
|
|
"""Process query and return chart HTML.""" |
|
|
updated_history, empty_msg, chart_json_str = process_query(query, history) |
|
|
|
|
|
|
|
|
|
|
|
chart_html = "" |
|
|
if chart_json_str: |
|
|
|
|
|
chart_html = ( |
|
|
'<div style="width: 100%; height: 500px; margin-top: 20px;">' + |
|
|
'<script src="https://cdn.plot.ly/plotly-2.27.0.min.js"></script>' + |
|
|
'<div id="plotly-chart-container"></div>' + |
|
|
'<script>' + |
|
|
'(function() {' + |
|
|
'try {' + |
|
|
'const chartData = ' + chart_json_str + ';' + |
|
|
"Plotly.newPlot('plotly-chart-container', chartData.data, chartData.layout, {responsive: true, displayModeBar: true});" + |
|
|
'} catch (e) {' + |
|
|
"console.error('Chart rendering error:', e);" + |
|
|
"document.getElementById('plotly-chart-container').innerHTML = '<p style=\"color: red; padding: 20px;\">Chart rendering failed: ' + e.message + '</p>';" + |
|
|
'}' + |
|
|
'})();' + |
|
|
'</script>' + |
|
|
'</div>' |
|
|
) |
|
|
|
|
|
return updated_history, empty_msg, chart_html |
|
|
|
|
|
|
|
|
msg.submit( |
|
|
process_and_display, |
|
|
inputs=[msg, chatbot], |
|
|
outputs=[chatbot, msg, chart_display] |
|
|
) |
|
|
|
|
|
submit_btn.click( |
|
|
process_and_display, |
|
|
inputs=[msg, chatbot], |
|
|
outputs=[chatbot, msg, chart_display] |
|
|
) |
|
|
|
|
|
clear_btn.click( |
|
|
lambda: ([], ""), |
|
|
outputs=[chatbot, chart_display] |
|
|
) |
|
|
|
|
|
gr.Markdown("---") |
|
|
gr.Markdown(f"**Enabled Plugins:** Schema Detector, Text Processor, Table Formatter, Date Normalizer, CSV Handler, Report Generator, Excel Handler, Document Memory, Data Cleaner, Statistical Analyzer, Time Series Analyzer, Chart Generator, Conversation Memory") |
|
|
gr.Markdown("*Powered by Universal AI Agent Development Platform*") |
|
|
|
|
|
return demo |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
if not os.getenv("OPENAI_API_KEY"): |
|
|
print("โ ๏ธ Warning: OPENAI_API_KEY not set (not required for basic analytics)") |
|
|
|
|
|
|
|
|
print("๐ Launching Universal AI Platform...") |
|
|
demo = create_ui() |
|
|
demo.launch( |
|
|
server_name="0.0.0.0", |
|
|
server_port=7860, |
|
|
share=False, |
|
|
show_error=True |
|
|
) |
|
|
|