Timothy Eastridge commited on
Commit
f831e98
Β·
1 Parent(s): da2713e

commit streamlit

Browse files
Makefile CHANGED
@@ -35,6 +35,7 @@ health:
35
  @docker-compose exec postgres pg_isready -U postgres > /dev/null 2>&1 && echo "βœ… PostgreSQL: Healthy" || echo "❌ PostgreSQL: Unhealthy"
36
  @curl -s http://localhost:8000/health > /dev/null && echo "βœ… MCP Server: Healthy" || echo "❌ MCP Server: Unhealthy"
37
  @curl -s http://localhost:3000 > /dev/null && echo "βœ… Frontend: Healthy" || echo "❌ Frontend: Unhealthy"
 
38
  @docker-compose ps agent | grep -q "Up" && echo "βœ… Agent: Running" || echo "❌ Agent: Not running"
39
 
40
  # Run integration test
@@ -52,10 +53,11 @@ demo:
52
  @make seed
53
  @echo ""
54
  @echo "πŸŽ‰ Demo Ready!"
55
- @echo "1. Open http://localhost:3000 in your browser"
56
- @echo "2. Ask a question like: 'Show me all customers who have placed orders'"
57
- @echo "3. Watch the agent process through the workflow"
58
- @echo "4. Check Neo4j Browser at http://localhost:7474 (neo4j/password)"
 
59
  @echo ""
60
  @echo "During 5-minute pauses, you can edit instructions in Neo4j Browser:"
61
  @echo "MATCH (i:Instruction {status: 'pending'}) SET i.parameters = '{\"question\": \"new question\"}'"
@@ -74,6 +76,9 @@ restart-mcp:
74
  restart-frontend:
75
  docker-compose restart frontend
76
 
 
 
 
77
  # Debug commands
78
  debug-agent:
79
  docker-compose logs agent
@@ -84,6 +89,9 @@ debug-mcp:
84
  debug-frontend:
85
  docker-compose logs frontend
86
 
 
 
 
87
  # Quick status check
88
  status:
89
  docker-compose ps
 
35
  @docker-compose exec postgres pg_isready -U postgres > /dev/null 2>&1 && echo "βœ… PostgreSQL: Healthy" || echo "❌ PostgreSQL: Unhealthy"
36
  @curl -s http://localhost:8000/health > /dev/null && echo "βœ… MCP Server: Healthy" || echo "❌ MCP Server: Unhealthy"
37
  @curl -s http://localhost:3000 > /dev/null && echo "βœ… Frontend: Healthy" || echo "❌ Frontend: Unhealthy"
38
+ @curl -s http://localhost:8501 > /dev/null && echo "βœ… Streamlit: Healthy" || echo "❌ Streamlit: Unhealthy"
39
  @docker-compose ps agent | grep -q "Up" && echo "βœ… Agent: Running" || echo "❌ Agent: Not running"
40
 
41
  # Run integration test
 
53
  @make seed
54
  @echo ""
55
  @echo "πŸŽ‰ Demo Ready!"
56
+ @echo "1. Open http://localhost:3000 in your browser (Main Chat Interface)"
57
+ @echo "2. Open http://localhost:8501 in your browser (Streamlit Monitor)"
58
+ @echo "3. Ask a question like: 'Show me all customers who have placed orders'"
59
+ @echo "4. Watch the agent process through the workflow"
60
+ @echo "5. Check Neo4j Browser at http://localhost:7474 (neo4j/password)"
61
  @echo ""
62
  @echo "During 5-minute pauses, you can edit instructions in Neo4j Browser:"
63
  @echo "MATCH (i:Instruction {status: 'pending'}) SET i.parameters = '{\"question\": \"new question\"}'"
 
76
  restart-frontend:
77
  docker-compose restart frontend
78
 
79
+ restart-streamlit:
80
+ docker-compose restart streamlit
81
+
82
  # Debug commands
83
  debug-agent:
84
  docker-compose logs agent
 
89
  debug-frontend:
90
  docker-compose logs frontend
91
 
92
+ debug-streamlit:
93
+ docker-compose logs streamlit
94
+
95
  # Quick status check
96
  status:
97
  docker-compose ps
SYSTEM_OVERVIEW.md CHANGED
@@ -1,3 +1,8 @@
 
 
 
 
 
1
  # Graph-Driven Agentic System with Human-in-the-Loop Controls
2
 
3
  ## What This System Is
 
1
+ #### Quick start prompt:
2
+ Scan the repo: list the directory tree, key config files, and required env vars. Summarize how to start the system using existing scripts. Do not modify files; just report and wait for confirmation before any changes.
3
+ Proceed with a fresh start by leveraging: powershell -ExecutionPolicy Bypass -File ops/scripts/fresh_start.ps1
4
+
5
+
6
  # Graph-Driven Agentic System with Human-in-the-Loop Controls
7
 
8
  ## What This System Is
app_requirements/{6_feature_Streamlit.md β†’ 6_feature_Streamlit.txt} RENAMED
File without changes
docker-compose.yml CHANGED
@@ -101,6 +101,21 @@ services:
101
  networks:
102
  - agent-network
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  networks:
105
  agent-network:
106
  driver: bridge
 
101
  networks:
102
  - agent-network
103
 
104
+ streamlit:
105
+ build: ./streamlit
106
+ ports:
107
+ - "8501:8501"
108
+ environment:
109
+ - MCP_URL=http://mcp:8000/mcp
110
+ - MCP_API_KEY=dev-key-123
111
+ depends_on:
112
+ mcp:
113
+ condition: service_healthy
114
+ volumes:
115
+ - ./streamlit:/app
116
+ networks:
117
+ - agent-network
118
+
119
  networks:
120
  agent-network:
121
  driver: bridge
streamlit/Dockerfile ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install system dependencies
6
+ RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
7
+
8
+ # Copy requirements and install Python dependencies
9
+ COPY requirements.txt .
10
+ RUN pip install --no-cache-dir -r requirements.txt
11
+
12
+ # Copy application code
13
+ COPY . .
14
+
15
+ # Expose port
16
+ EXPOSE 8501
17
+
18
+ # Run Streamlit
19
+ CMD ["streamlit", "run", "app.py", "--server.address=0.0.0.0"]
streamlit/app.py ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Streamlit MCP Monitor & Query Tester
4
+ A lightweight monitoring and testing interface for the agentic system.
5
+ All database access MUST go through MCP server - no direct connections allowed.
6
+ """
7
+
8
+ import streamlit as st
9
+ import requests
10
+ import time
11
+ import json
12
+ import pandas as pd
13
+ from datetime import datetime, timedelta
14
+ import os
15
+ from typing import Dict, Any, Optional, Tuple
16
+
17
+ # Configuration
18
+ MCP_URL = os.getenv("MCP_URL", "http://mcp:8000/mcp")
19
+ MCP_API_KEY = os.getenv("MCP_API_KEY", "dev-key-123")
20
+
21
+ # Page configuration
22
+ st.set_page_config(
23
+ page_title="MCP Monitor & Query Tester",
24
+ page_icon="πŸ€–",
25
+ layout="wide",
26
+ initial_sidebar_state="expanded"
27
+ )
28
+
29
+ # Initialize session state
30
+ if 'workflow_id' not in st.session_state:
31
+ st.session_state.workflow_id = None
32
+ if 'debug_log' not in st.session_state:
33
+ st.session_state.debug_log = []
34
+ if 'last_refresh' not in st.session_state:
35
+ st.session_state.last_refresh = None
36
+
37
+ def call_mcp(tool: str, params: Optional[Dict[str, Any]] = None) -> Tuple[Dict[str, Any], int]:
38
+ """
39
+ Call MCP server - the ONLY way to access databases.
40
+ Returns (response_data, response_time_ms)
41
+ """
42
+ start_time = time.time()
43
+
44
+ try:
45
+ response = requests.post(
46
+ MCP_URL,
47
+ headers={
48
+ "X-API-Key": MCP_API_KEY,
49
+ "Content-Type": "application/json"
50
+ },
51
+ json={"tool": tool, "params": params or {}},
52
+ timeout=10
53
+ )
54
+
55
+ response_time = int((time.time() - start_time) * 1000)
56
+
57
+ # Log the request/response for debugging
58
+ debug_entry = {
59
+ "timestamp": datetime.now().isoformat(),
60
+ "tool": tool,
61
+ "params": params,
62
+ "status_code": response.status_code,
63
+ "response_time_ms": response_time,
64
+ "success": response.status_code == 200
65
+ }
66
+ st.session_state.debug_log.append(debug_entry)
67
+
68
+ # Keep only last 5 entries
69
+ if len(st.session_state.debug_log) > 5:
70
+ st.session_state.debug_log = st.session_state.debug_log[-5:]
71
+
72
+ if response.status_code == 200:
73
+ return response.json(), response_time
74
+ else:
75
+ return {"error": f"HTTP {response.status_code}: {response.text}"}, response_time
76
+
77
+ except requests.exceptions.RequestException as e:
78
+ response_time = int((time.time() - start_time) * 1000)
79
+ error_msg = f"MCP Server Error: {str(e)}"
80
+
81
+ # Log the error
82
+ debug_entry = {
83
+ "timestamp": datetime.now().isoformat(),
84
+ "tool": tool,
85
+ "params": params,
86
+ "status_code": 0,
87
+ "response_time_ms": response_time,
88
+ "success": False,
89
+ "error": error_msg
90
+ }
91
+ st.session_state.debug_log.append(debug_entry)
92
+
93
+ return {"error": error_msg}, response_time
94
+
95
+ def test_neo4j_connection() -> Tuple[bool, int, str]:
96
+ """Test Neo4j connection through MCP server"""
97
+ result, response_time = call_mcp("get_schema")
98
+ if "error" in result:
99
+ return False, response_time, result["error"]
100
+ return True, response_time, "Connected"
101
+
102
+ def test_postgres_connection() -> Tuple[bool, int, str]:
103
+ """Test PostgreSQL connection through MCP server"""
104
+ result, response_time = call_mcp("query_postgres", {"query": "SELECT 1 as test"})
105
+ if "error" in result:
106
+ return False, response_time, result["error"]
107
+ return True, response_time, "Connected"
108
+
109
+ def test_mcp_server() -> Tuple[bool, int, str]:
110
+ """Test MCP server health"""
111
+ try:
112
+ start_time = time.time()
113
+ response = requests.get(f"{MCP_URL.replace('/mcp', '/health')}", timeout=5)
114
+ response_time = int((time.time() - start_time) * 1000)
115
+
116
+ if response.status_code == 200:
117
+ return True, response_time, "Healthy"
118
+ else:
119
+ return False, response_time, f"HTTP {response.status_code}"
120
+ except Exception as e:
121
+ return False, 0, str(e)
122
+
123
+ def get_performance_stats() -> Dict[str, Any]:
124
+ """Get performance statistics through MCP"""
125
+ result, _ = call_mcp("query_graph", {
126
+ "query": "MATCH (l:Log) WHERE l.timestamp > datetime() - duration('PT1H') RETURN count(l) as count"
127
+ })
128
+
129
+ if "error" in result:
130
+ return {"error": result["error"]}
131
+
132
+ return result.get("data", [{}])[0] if result.get("data") else {}
133
+
134
+ def create_workflow(question: str) -> Optional[str]:
135
+ """Create a new workflow for the given question"""
136
+ workflow_id = f"streamlit-{int(time.time())}"
137
+
138
+ # Create workflow node
139
+ workflow_result, _ = call_mcp("write_graph", {
140
+ "action": "create_node",
141
+ "label": "Workflow",
142
+ "properties": {
143
+ "id": workflow_id,
144
+ "name": f"Streamlit Query: {question[:50]}...",
145
+ "description": f"Query from Streamlit: {question}",
146
+ "status": "active",
147
+ "created_at": datetime.now().isoformat(),
148
+ "source": "streamlit"
149
+ }
150
+ })
151
+
152
+ if "error" in workflow_result:
153
+ st.error(f"Failed to create workflow: {workflow_result['error']}")
154
+ return None
155
+
156
+ # Create instruction sequence
157
+ instructions = [
158
+ {
159
+ "id": f"{workflow_id}-inst-1",
160
+ "type": "discover_schema",
161
+ "sequence": 1,
162
+ "description": "Discover database schema",
163
+ "status": "pending",
164
+ "pause_duration": 5, # Short pause for testing
165
+ "parameters": "{}"
166
+ },
167
+ {
168
+ "id": f"{workflow_id}-inst-2",
169
+ "type": "generate_sql",
170
+ "sequence": 2,
171
+ "description": f"Generate SQL for: {question}",
172
+ "status": "pending",
173
+ "pause_duration": 5,
174
+ "parameters": json.dumps({"question": question})
175
+ },
176
+ {
177
+ "id": f"{workflow_id}-inst-3",
178
+ "type": "review_results",
179
+ "sequence": 3,
180
+ "description": "Review and format results",
181
+ "status": "pending",
182
+ "pause_duration": 0,
183
+ "parameters": "{}"
184
+ }
185
+ ]
186
+
187
+ # Create instruction nodes
188
+ for inst in instructions:
189
+ inst_result, _ = call_mcp("write_graph", {
190
+ "action": "create_node",
191
+ "label": "Instruction",
192
+ "properties": inst
193
+ })
194
+
195
+ if "error" in inst_result:
196
+ st.error(f"Failed to create instruction: {inst_result['error']}")
197
+ return None
198
+
199
+ # Link instruction to workflow
200
+ link_result, _ = call_mcp("query_graph", {
201
+ "query": """
202
+ MATCH (w:Workflow {id: $workflow_id}), (i:Instruction {id: $inst_id})
203
+ CREATE (w)-[:HAS_INSTRUCTION]->(i)
204
+ """,
205
+ "parameters": {"workflow_id": workflow_id, "inst_id": inst["id"]}
206
+ })
207
+
208
+ # Create instruction chain
209
+ for i in range(len(instructions) - 1):
210
+ chain_result, _ = call_mcp("query_graph", {
211
+ "query": """
212
+ MATCH (i1:Instruction {id: $id1}), (i2:Instruction {id: $id2})
213
+ CREATE (i1)-[:NEXT_INSTRUCTION]->(i2)
214
+ """,
215
+ "parameters": {"id1": instructions[i]["id"], "id2": instructions[i + 1]["id"]}
216
+ })
217
+
218
+ return workflow_id
219
+
220
+ def get_workflow_status(workflow_id: str) -> Dict[str, Any]:
221
+ """Get workflow execution status"""
222
+ result, _ = call_mcp("query_graph", {
223
+ "query": """
224
+ MATCH (w:Workflow {id: $id})-[:HAS_INSTRUCTION]->(i:Instruction)
225
+ RETURN w.status as workflow_status,
226
+ collect(i.status) as instruction_statuses,
227
+ collect(i.type) as instruction_types,
228
+ collect(i.sequence) as sequences
229
+ """,
230
+ "parameters": {"id": workflow_id}
231
+ })
232
+
233
+ if "error" in result or not result.get("data"):
234
+ return {"error": "Workflow not found"}
235
+
236
+ return result["data"][0]
237
+
238
+ def get_workflow_results(workflow_id: str) -> Dict[str, Any]:
239
+ """Get workflow execution results"""
240
+ result, _ = call_mcp("query_graph", {
241
+ "query": """
242
+ MATCH (w:Workflow {id: $id})-[:HAS_INSTRUCTION]->(i:Instruction)-[:EXECUTED_AS]->(e:Execution)
243
+ RETURN i.sequence as sequence,
244
+ i.type as type,
245
+ i.description as description,
246
+ e.result as result,
247
+ e.started_at as started_at,
248
+ e.completed_at as completed_at
249
+ ORDER BY i.sequence
250
+ """,
251
+ "parameters": {"id": workflow_id}
252
+ })
253
+
254
+ if "error" in result:
255
+ return {"error": result["error"]}
256
+
257
+ return {"executions": result.get("data", [])}
258
+
259
+ def get_schema_context() -> str:
260
+ """Get database schema context for display"""
261
+ result, _ = call_mcp("query_graph", {
262
+ "query": """
263
+ MATCH (t:Table)-[:HAS_COLUMN]->(c:Column)
264
+ RETURN t.name as table_name,
265
+ collect({name: c.name, type: c.data_type, nullable: c.nullable}) as columns
266
+ ORDER BY t.name
267
+ """
268
+ })
269
+
270
+ if "error" in result:
271
+ return f"Error fetching schema: {result['error']}"
272
+
273
+ schema_text = "Database Schema:\n"
274
+ for record in result.get("data", []):
275
+ table_name = record["table_name"]
276
+ columns = record["columns"]
277
+ schema_text += f"\nTable: {table_name}\n"
278
+ for col in columns:
279
+ nullable = "NULL" if col["nullable"] else "NOT NULL"
280
+ schema_text += f" - {col['name']}: {col['type']} {nullable}\n"
281
+
282
+ return schema_text
283
+
284
+ def main():
285
+ st.title("πŸ€– MCP Monitor & Query Tester")
286
+ st.caption("Monitor agentic system health and test queries through MCP server")
287
+
288
+ # Sidebar
289
+ with st.sidebar:
290
+ st.header("πŸ”§ Configuration")
291
+ st.code(f"MCP URL: {MCP_URL}")
292
+ st.code(f"API Key: {MCP_API_KEY[:10]}...")
293
+
294
+ if st.button("πŸ”„ Refresh All", type="primary"):
295
+ st.rerun()
296
+
297
+ st.header("πŸ“Š Quick Stats")
298
+ stats = get_performance_stats()
299
+ if "error" not in stats:
300
+ st.metric("Logs (1h)", stats.get("count", 0))
301
+ else:
302
+ st.error(f"Stats error: {stats['error']}")
303
+
304
+ # Main tabs
305
+ tab1, tab2 = st.tabs(["πŸ”Œ Connection Status", "πŸ€– Query Tester"])
306
+
307
+ with tab1:
308
+ st.header("Connection Status Monitor")
309
+ st.caption("All database access goes through MCP server - no direct connections allowed")
310
+
311
+ # Connection status in columns
312
+ col1, col2, col3 = st.columns(3)
313
+
314
+ with col1:
315
+ st.subheader("Neo4j (via MCP)")
316
+ neo4j_ok, neo4j_time, neo4j_msg = test_neo4j_connection()
317
+ st.metric(
318
+ label="Status",
319
+ value="Online" if neo4j_ok else "Offline",
320
+ delta=f"{neo4j_time}ms"
321
+ )
322
+ if neo4j_ok:
323
+ st.success(neo4j_msg)
324
+ else:
325
+ st.error(neo4j_msg)
326
+
327
+ with col2:
328
+ st.subheader("PostgreSQL (via MCP)")
329
+ postgres_ok, postgres_time, postgres_msg = test_postgres_connection()
330
+ st.metric(
331
+ label="Status",
332
+ value="Online" if postgres_ok else "Offline",
333
+ delta=f"{postgres_time}ms"
334
+ )
335
+ if postgres_ok:
336
+ st.success(postgres_msg)
337
+ else:
338
+ st.error(postgres_msg)
339
+
340
+ with col3:
341
+ st.subheader("MCP Server")
342
+ mcp_ok, mcp_time, mcp_msg = test_mcp_server()
343
+ st.metric(
344
+ label="Status",
345
+ value="Online" if mcp_ok else "Offline",
346
+ delta=f"{mcp_time}ms"
347
+ )
348
+ if mcp_ok:
349
+ st.success(mcp_msg)
350
+ else:
351
+ st.error(mcp_msg)
352
+
353
+ # Performance stats
354
+ st.subheader("Performance Statistics")
355
+ stats = get_performance_stats()
356
+ if "error" not in stats:
357
+ st.info(f"Operations in last hour: {stats.get('count', 0)}")
358
+ else:
359
+ st.error(f"Cannot fetch stats: {stats['error']}")
360
+
361
+ # Auto-refresh info
362
+ st.session_state.last_refresh = datetime.now()
363
+ st.caption(f"Last checked: {st.session_state.last_refresh.strftime('%H:%M:%S')}")
364
+
365
+ # Auto-refresh every 5 seconds
366
+ time.sleep(5)
367
+ st.rerun()
368
+
369
+ with tab2:
370
+ st.header("Query Tester")
371
+ st.caption("Test natural language queries through the agentic engine")
372
+
373
+ # Query input
374
+ question = st.text_area(
375
+ "Enter your question:",
376
+ height=100,
377
+ placeholder="e.g., 'How many customers do we have?' or 'Show me all orders from last month'"
378
+ )
379
+
380
+ col1, col2 = st.columns([1, 1])
381
+
382
+ with col1:
383
+ if st.button("πŸš€ Execute Query", type="primary", disabled=not question.strip()):
384
+ if question.strip():
385
+ with st.spinner("Creating workflow..."):
386
+ workflow_id = create_workflow(question.strip())
387
+ if workflow_id:
388
+ st.session_state.workflow_id = workflow_id
389
+ st.success(f"Workflow created: {workflow_id}")
390
+ else:
391
+ st.error("Failed to create workflow")
392
+
393
+ with col2:
394
+ if st.button("πŸ—‘οΈ Clear Results"):
395
+ st.session_state.workflow_id = None
396
+ st.rerun()
397
+
398
+ # Workflow execution monitoring
399
+ if st.session_state.workflow_id:
400
+ st.subheader("Execution Progress")
401
+
402
+ # Get workflow status
403
+ status = get_workflow_status(st.session_state.workflow_id)
404
+
405
+ if "error" in status:
406
+ st.error(f"Status error: {status['error']}")
407
+ else:
408
+ workflow_status = status.get("workflow_status", "unknown")
409
+ instruction_statuses = status.get("instruction_statuses", [])
410
+ instruction_types = status.get("instruction_types", [])
411
+
412
+ # Progress bar
413
+ completed = sum(1 for s in instruction_statuses if s == "complete")
414
+ total = len(instruction_statuses)
415
+ progress = completed / total if total > 0 else 0
416
+
417
+ st.progress(progress)
418
+ st.caption(f"Progress: {completed}/{total} instructions completed")
419
+
420
+ # Status display
421
+ status_cols = st.columns(len(instruction_types))
422
+ for i, (inst_type, inst_status) in enumerate(zip(instruction_types, instruction_statuses)):
423
+ with status_cols[i]:
424
+ if inst_status == "complete":
425
+ st.success(f"βœ… {inst_type}")
426
+ elif inst_status == "executing":
427
+ st.warning(f"πŸ”„ {inst_type}")
428
+ elif inst_status == "failed":
429
+ st.error(f"❌ {inst_type}")
430
+ else:
431
+ st.info(f"⏳ {inst_type}")
432
+
433
+ # Get and display results
434
+ if completed > 0:
435
+ results = get_workflow_results(st.session_state.workflow_id)
436
+
437
+ if "error" not in results:
438
+ st.subheader("Execution Results")
439
+
440
+ for execution in results.get("executions", []):
441
+ with st.expander(f"Step {execution['sequence']}: {execution['type']}"):
442
+ st.write(f"**Description:** {execution['description']}")
443
+
444
+ if execution['started_at'] and execution['completed_at']:
445
+ start = datetime.fromisoformat(execution['started_at'].replace('Z', '+00:00'))
446
+ end = datetime.fromisoformat(execution['completed_at'].replace('Z', '+00:00'))
447
+ duration = (end - start).total_seconds()
448
+ st.write(f"**Duration:** {duration:.2f} seconds")
449
+
450
+ if execution['result']:
451
+ try:
452
+ result_data = json.loads(execution['result']) if isinstance(execution['result'], str) else execution['result']
453
+
454
+ if execution['type'] == 'generate_sql' and 'generated_sql' in result_data:
455
+ st.write("**Generated SQL:**")
456
+ st.code(result_data['generated_sql'], language='sql')
457
+
458
+ if 'data' in result_data and result_data['data']:
459
+ st.write("**Query Results:**")
460
+ df = pd.DataFrame(result_data['data'])
461
+ st.dataframe(df)
462
+
463
+ if 'error' in result_data:
464
+ st.error(f"Error: {result_data['error']}")
465
+
466
+ except Exception as e:
467
+ st.write("**Raw Result:**")
468
+ st.code(str(execution['result']))
469
+ else:
470
+ st.error(f"Results error: {results['error']}")
471
+
472
+ # Debug information
473
+ with st.expander("πŸ”§ Debug Information"):
474
+ st.write("**Last 5 MCP Requests:**")
475
+ for entry in st.session_state.debug_log:
476
+ status_icon = "βœ…" if entry["success"] else "❌"
477
+ st.write(f"{status_icon} {entry['timestamp']} - {entry['tool']} ({entry['response_time_ms']}ms)")
478
+ if not entry["success"] and "error" in entry:
479
+ st.error(f"Error: {entry['error']}")
480
+
481
+ st.write("**Important:** All database operations go through MCP server. Direct database access is not permitted.")
482
+
483
+ if __name__ == "__main__":
484
+ main()
streamlit/requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit==1.28.0
2
+ requests==2.31.0
3
+ pandas==2.1.0
4
+ python-dotenv==1.0.0