hlc_chatbot / app.py
iammraat's picture
Update app.py
f740717 verified
# 1. Add the CSS string at the top of your file
CSS = """
.container { max-width: 1200px; margin: auto; }
/* This forces tables to scroll horizontally instead of squishing */
.prose table {
display: block;
overflow-x: auto;
white-space: nowrap;
width: 100%;
}
.prose th, .prose td {
padding: 10px;
border: 1px solid #444;
min-width: 150px;
}
"""
import gradio as gr
import json
import os
# Import the engine and workflow logic we just created
# from engine import (
# SchemaGraph, LinguisticEngine, run_multi_step_workflow
# )
# Import the engine and workflow logic we just created
from engine import (
SchemaGraph, LinguisticEngine, run_multi_step_workflow, chat_memory
)
print("Initializing AI trained Models... (This might take a minute on HF)")
# 1. Load Schema definitions
tables_path = './spider_data/tables.json'
with open(tables_path) as f:
tables_raw = json.load(f)
schema_map = {s['db_id']: s for s in tables_raw}
# 2. Instantiate the Schema Graphs
graph_derby = SchemaGraph(spider_schema=schema_map['derby_system'])
graph_influx = SchemaGraph(spider_schema=schema_map['influx_system'])
# 3. Define the exact paths to your extracted model folders
table_model_path = './schema_linking_data/model_tables'
column_model_path = './schema_linking_data/model_columns'
value_model_path = './schema_linking_data/model_values'
skeleton_model_path = './schema_linking_data/model_skeleton'
# 4. Instantiate the Linguistic Engines
print("Loading Derby Engine...")
engine_derby = LinguisticEngine(
graph_derby,
table_model_path=table_model_path,
column_model_path=column_model_path,
value_model_path=value_model_path,
skeleton_model_path=skeleton_model_path,
db_id='derby_system'
)
print("Loading Influx Engine...")
engine_influx = LinguisticEngine(
graph_influx,
table_model_path=table_model_path,
column_model_path=column_model_path,
value_model_path=value_model_path,
skeleton_model_path=skeleton_model_path,
db_id='influx_system'
)
print("Engine Ready! Launching UI...")
# 5. Define the Gradio Chat Function
def respond_to_coworker(message, history):
try:
# Call your orchestrator directly
response = run_multi_step_workflow(message, engine_derby, engine_influx, chat_memory, debug=True)
return response
except Exception as e:
import traceback
traceback.print_exc()
return f"🚨 Engine Error: {str(e)}"
# 6. Build the UI
# demo = gr.ChatInterface(
# fn=respond_to_coworker,
# title="πŸ“Š DBA Diagnostic Copilot",
# css=CSS,
# description="Ask me about system targets, performance bottlenecks, templates, or request specific execution plans.",
# examples=[
# "Identify the single worst spike and show me its execution plan.",
# "What is the current status of all my targets?",
# "What are the top sql issue on 'MySQL_QUICK_1711_1' target",
# "List all targets running on Linux.",
# "Show me the database schema for the Derby system."
# ],
# )
# if __name__ == "__main__":
# demo.launch()
with gr.Blocks(css=CSS) as demo:
gr.Markdown("# πŸ“Š DBA Diagnostic Copilot")
gr.ChatInterface(
fn=respond_to_coworker,
examples=[
"Identify the single worst spike and show me its execution plan.",
"List all targets running on Linux.",
"What are the top sql issues on the production target?",
"Show me the database schema for the Derby system.",
"What is the current status of all my targets?"
]
)
if __name__ == "__main__":
demo.launch()