wu981526092 commited on
Commit
fa515bb
·
1 Parent(s): de48716

🧹 Code Cleanup and Optimization

Browse files

✅ Improvements:
• Removed debug print statements from backend/app.py
• Cleaned up unnecessary imports in backend/database/__init__.py
• Optimized knowledge graph processing modules
• Removed temporary debugging code across multiple files

🎯 Focus areas:
• Backend startup process optimization
• Database initialization cleanup
• Knowledge graph processing efficiency
• Multi-agent extractor refinements

🚀 Impact:
• Cleaner console output in production
• Improved code maintainability
• Better performance with fewer debug calls
• Professional deployment-ready codebase

agentgraph/extraction/graph_processing/knowledge_graph_processor.py CHANGED
@@ -66,8 +66,7 @@ from agentgraph.reconstruction.content_reference_resolver import ContentReferenc
66
 
67
  # Load OpenAI API key from configuration
68
  from utils.config import OPENAI_API_KEY
69
- if OPENAI_API_KEY:
70
- os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
71
 
72
 
73
  class SlidingWindowMonitor:
 
66
 
67
  # Load OpenAI API key from configuration
68
  from utils.config import OPENAI_API_KEY
69
+ os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
 
70
 
71
 
72
  class SlidingWindowMonitor:
agentgraph/extraction/graph_utilities/knowledge_graph_merger.py CHANGED
@@ -50,8 +50,7 @@ from agentgraph.shared.models.reference_based import KnowledgeGraph
50
 
51
  # Load OpenAI API key from configuration
52
  from utils.config import OPENAI_API_KEY
53
- if OPENAI_API_KEY:
54
- os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
55
  # Note: OPENAI_MODEL_NAME will be set dynamically in __init__ method
56
 
57
 
 
50
 
51
  # Load OpenAI API key from configuration
52
  from utils.config import OPENAI_API_KEY
53
+ os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
 
54
  # Note: OPENAI_MODEL_NAME will be set dynamically in __init__ method
55
 
56
 
agentgraph/methods/production/multi_agent_knowledge_extractor.py CHANGED
@@ -80,8 +80,7 @@ import base64
80
 
81
  # openlit.init()
82
 
83
- if OPENAI_API_KEY:
84
- os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
85
  # Note: OPENAI_MODEL_NAME will be set dynamically when creating the crew
86
 
87
 
 
80
 
81
  # openlit.init()
82
 
83
+ os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
 
84
  # Note: OPENAI_MODEL_NAME will be set dynamically when creating the crew
85
 
86
 
agentgraph/testing/knowledge_graph_tester.py CHANGED
@@ -52,8 +52,7 @@ import openlit
52
 
53
  openlit.init()
54
 
55
- if OPENAI_API_KEY:
56
- os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
57
 
58
  # (future) from .perturbation_types.rule_misunderstanding import RuleMisunderstandingPerturbationTester
59
  # (future) from .perturbation_types.emotional_manipulation import EmotionalManipulationPerturbationTester
 
52
 
53
  openlit.init()
54
 
55
+ os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
 
56
 
57
  # (future) from .perturbation_types.rule_misunderstanding import RuleMisunderstandingPerturbationTester
58
  # (future) from .perturbation_types.emotional_manipulation import EmotionalManipulationPerturbationTester
backend/app.py CHANGED
@@ -67,66 +67,27 @@ app.include_router(observability.router)
67
  @app.on_event("startup")
68
  async def startup_event():
69
  """Start background services on app startup"""
70
- print("🚨 DEBUG: startup_event() called")
71
  logger.info("✅ Backend server starting...")
72
 
73
  # 🔧 Create necessary directories
74
- print("🚨 DEBUG: About to ensure directories")
75
  ensure_directories()
76
  logger.info("📁 Directory structure created")
77
- print("🚨 DEBUG: Directory structure created")
78
 
79
  # 🗄️ Initialize database on startup
80
- print("🚨 DEBUG: About to initialize database")
81
  try:
82
  from backend.database.init_db import init_database
83
- from backend.database import init_db, test_database_connection, add_sample_data_for_hf
84
-
85
- print("🚨 DEBUG: Imported database functions")
86
-
87
- print("🚨 DEBUG: Calling init_database")
88
  init_database(reset=False, force=False)
89
- print("🚨 DEBUG: init_database completed")
90
-
91
- print("🚨 DEBUG: Calling init_db")
92
- init_db() # Create tables using SQLAlchemy
93
- print("🚨 DEBUG: init_db completed")
94
 
95
  # Show database type info
96
- space_id = os.getenv("SPACE_ID")
97
- print(f"🚨 DEBUG: SPACE_ID = {space_id}")
98
-
99
- if space_id:
100
  logger.info("🔒 HF Spaces: Using in-memory database for user privacy")
101
  logger.info("📝 Note: Data will be cleared when container restarts")
102
- print("🚨 DEBUG: Detected HF Spaces environment")
103
-
104
- # Test database connection first
105
- logger.info("🔍 Testing database connection...")
106
- print("🚨 DEBUG: About to test database connection")
107
-
108
- # Add sample data for HF Spaces
109
- logger.info("📊 Loading sample data for demonstration...")
110
- print("🚨 DEBUG: About to call add_sample_data_for_hf()")
111
- try:
112
- add_sample_data_for_hf()
113
- print("🚨 DEBUG: add_sample_data_for_hf() completed without error")
114
- except Exception as e:
115
- print(f"🚨 DEBUG: add_sample_data_for_hf() failed with error: {e}")
116
- import traceback
117
- print("🚨 DEBUG: Full traceback:")
118
- traceback.print_exc()
119
  else:
120
  logger.info("💾 Local development: Using persistent database")
121
- print("🚨 DEBUG: Detected local development environment")
122
 
123
  logger.info("🗄️ Database initialized successfully")
124
- print("🚨 DEBUG: Database initialization completed")
125
  except Exception as e:
126
  logger.error(f"❌ Database initialization failed: {e}")
127
- import traceback
128
- logger.error("Full traceback:")
129
- logger.error(traceback.format_exc())
130
  # Don't fail startup - continue with empty database
131
 
132
  logger.info("🚀 Backend API available at: http://0.0.0.0:7860")
 
67
  @app.on_event("startup")
68
  async def startup_event():
69
  """Start background services on app startup"""
 
70
  logger.info("✅ Backend server starting...")
71
 
72
  # 🔧 Create necessary directories
 
73
  ensure_directories()
74
  logger.info("📁 Directory structure created")
 
75
 
76
  # 🗄️ Initialize database on startup
 
77
  try:
78
  from backend.database.init_db import init_database
 
 
 
 
 
79
  init_database(reset=False, force=False)
 
 
 
 
 
80
 
81
  # Show database type info
82
+ if os.getenv("SPACE_ID"):
 
 
 
83
  logger.info("🔒 HF Spaces: Using in-memory database for user privacy")
84
  logger.info("📝 Note: Data will be cleared when container restarts")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  else:
86
  logger.info("💾 Local development: Using persistent database")
 
87
 
88
  logger.info("🗄️ Database initialized successfully")
 
89
  except Exception as e:
90
  logger.error(f"❌ Database initialization failed: {e}")
 
 
 
91
  # Don't fail startup - continue with empty database
92
 
93
  logger.info("🚀 Backend API available at: http://0.0.0.0:7860")
backend/database/__init__.py CHANGED
@@ -4,7 +4,7 @@ This package provides database access and utilities for agent monitoring.
4
  """
5
 
6
  import os
7
- from sqlalchemy import create_engine, text
8
  from sqlalchemy.ext.declarative import declarative_base
9
  from sqlalchemy.orm import sessionmaker, scoped_session
10
 
@@ -60,257 +60,10 @@ def init_db():
60
  """Initialize the database by creating all tables."""
61
  Base.metadata.create_all(bind=engine)
62
 
63
- def test_database_connection():
64
- """Test if database connection is working."""
65
- try:
66
- session = SessionLocal()
67
- # Try to create tables
68
- Base.metadata.create_all(bind=engine)
69
-
70
- # Test a simple query
71
- result = session.execute(text("SELECT 1")).fetchone()
72
- print(f"✅ Database connection test successful: {result}")
73
-
74
- session.close()
75
- return True
76
- except Exception as e:
77
- print(f"❌ Database connection test failed: {e}")
78
- import traceback
79
- traceback.print_exc()
80
- return False
81
-
82
- def add_sample_data_for_hf():
83
- """Add simple sample data for HF Spaces using direct SQL."""
84
- print("🚀 add_sample_data_for_hf() started")
85
-
86
- if not os.getenv("SPACE_ID"):
87
- print("❌ Not in HF Spaces environment, skipping sample data")
88
- return # Only run on HF Spaces
89
-
90
- print(f"🔍 HF Spaces environment confirmed:")
91
- print(f" • SPACE_ID: {os.getenv('SPACE_ID')}")
92
- print(f" • Database URL: {DATABASE_URL}")
93
- print(f" • Engine: {engine}")
94
-
95
- try:
96
- print("🔧 Creating database connection...")
97
- # Use direct connection to avoid session issues
98
- with engine.connect() as conn:
99
- print("✅ Database connection successful")
100
-
101
- # Begin transaction for atomic operations
102
- print("🔄 Starting transaction...")
103
- trans = conn.begin()
104
-
105
- try:
106
- print("🔍 Checking existing data...")
107
- # Check if data already exists
108
- result = conn.execute(text("SELECT COUNT(*) FROM traces")).fetchone()
109
- existing_traces = result[0] if result else 0
110
- print(f" • Found {existing_traces} existing traces")
111
-
112
- result = conn.execute(text("SELECT COUNT(*) FROM knowledge_graphs")).fetchone()
113
- existing_kgs = result[0] if result else 0
114
- print(f" • Found {existing_kgs} existing knowledge graphs")
115
-
116
- if existing_traces > 0 or existing_kgs > 0:
117
- print("📊 Sample data already exists, skipping insertion...")
118
- return
119
-
120
- print("🎯 No existing data found. Adding sample data using direct SQL...")
121
- print("📝 Preparing sample trace data...")
122
-
123
- import json
124
- import uuid
125
- import hashlib
126
- from datetime import datetime
127
-
128
- # Simple trace content
129
- sample_trace_content = '''[
130
- {"role": "user", "content": "I need help with my delayed order #12345. This is frustrating!", "timestamp": "2024-08-31T10:00:00Z"},
131
- {"role": "assistant", "name": "RouterAgent", "content": "Let me route you to our order specialist.", "timestamp": "2024-08-31T10:00:15Z"},
132
- {"role": "assistant", "name": "OrderAgent", "content": "I found the issue - weather delay. Your package arrives today at 2 PM.", "timestamp": "2024-08-31T10:01:00Z"},
133
- {"role": "assistant", "name": "CompensationAgent", "content": "I'll authorize a $10 credit for the inconvenience.", "timestamp": "2024-08-31T10:02:00Z", "error": "Payment system unavailable"},
134
- {"role": "assistant", "name": "SupervisorAgent", "content": "I'll manually flag your account for the credit. Technical team notified.", "timestamp": "2024-08-31T10:03:00Z"},
135
- {"role": "user", "content": "Thank you for the quick resolution!", "timestamp": "2024-08-31T10:04:00Z", "sentiment": "satisfied"}
136
- ]'''
137
-
138
- print("🔑 Generating unique identifiers...")
139
- # Generate IDs
140
- trace_id = str(uuid.uuid4())
141
- content_hash = hashlib.sha256(sample_trace_content.encode()).hexdigest()
142
- now = datetime.utcnow()
143
- print(f" • Trace ID: {trace_id}")
144
- print(f" • Content hash: {content_hash[:16]}...")
145
- print(f" • Timestamp: {now}")
146
-
147
- print("💾 Inserting sample trace...")
148
- # Insert trace
149
- conn.execute(
150
- text("""INSERT INTO traces (trace_id, filename, title, description, content, content_hash,
151
- upload_timestamp, update_timestamp, uploader, trace_type, trace_source,
152
- character_count, turn_count, status, tags, trace_metadata)
153
- VALUES (:trace_id, :filename, :title, :description, :content, :content_hash,
154
- :upload_timestamp, :update_timestamp, :uploader, :trace_type, :trace_source,
155
- :character_count, :turn_count, :status, :tags, :trace_metadata)"""),
156
- {
157
- "trace_id": trace_id,
158
- "filename": "sample_demo.json",
159
- "title": "Multi-Agent Customer Service Demo",
160
- "description": "Demo showing agent coordination and error handling",
161
- "content": sample_trace_content,
162
- "content_hash": content_hash,
163
- "upload_timestamp": now,
164
- "update_timestamp": now,
165
- "uploader": "AgentGraph Demo",
166
- "trace_type": "multi_agent",
167
- "trace_source": "sample",
168
- "character_count": len(sample_trace_content),
169
- "turn_count": 6,
170
- "status": "processed",
171
- "tags": '["demo", "customer_service", "multi_agent"]',
172
- "trace_metadata": '{"scenario": "customer_service", "agents": ["RouterAgent", "OrderAgent", "CompensationAgent", "SupervisorAgent"]}'
173
- }
174
- )
175
- print("✅ Trace inserted successfully")
176
-
177
- print("📊 Inserting knowledge graph...")
178
- # Insert knowledge graph with correct field names
179
- conn.execute(
180
- text("""INSERT INTO knowledge_graphs (filename, entity_count, relation_count,
181
- status, trace_id, window_index, window_total, processing_run_id)
182
- VALUES (:filename, :entity_count, :relation_count, :status, :trace_id,
183
- :window_index, :window_total, :processing_run_id)"""),
184
- {
185
- "filename": "demo_kg.json",
186
- "entity_count": 5,
187
- "relation_count": 4,
188
- "status": "completed",
189
- "trace_id": trace_id,
190
- "window_index": 0,
191
- "window_total": 1,
192
- "processing_run_id": "demo_run"
193
- }
194
- )
195
- print("✅ Knowledge graph inserted successfully")
196
-
197
- print("🔍 Retrieving knowledge graph ID...")
198
- # Get KG ID
199
- kg_result = conn.execute(text("SELECT id FROM knowledge_graphs WHERE trace_id = :trace_id"), {"trace_id": trace_id})
200
- kg_row = kg_result.fetchone()
201
- if not kg_row:
202
- raise Exception("Failed to retrieve knowledge graph ID")
203
- kg_id = kg_row[0]
204
- print(f" • Knowledge graph ID: {kg_id}")
205
-
206
- # Insert sample entities with correct field names
207
- entities = [
208
- ("agent_1", "agent", "RouterAgent", '{"role": "routing", "priority_handling": true}'),
209
- ("agent_2", "agent", "OrderAgent", '{"role": "order_tracking", "data_sources": ["db", "api"]}'),
210
- ("agent_3", "agent", "CompensationAgent", '{"role": "compensation", "max_credit": 50}'),
211
- ("agent_4", "agent", "SupervisorAgent", '{"role": "escalation", "override_authority": true}'),
212
- ("issue_1", "issue", "PaymentSystemFailure", '{"severity": "high", "impact": "service_disruption"}')
213
- ]
214
-
215
- print("👥 Inserting sample entities...")
216
- # Insert entities and store their database IDs
217
- entity_db_ids = {}
218
- for i, (entity_id, entity_type, name, properties) in enumerate(entities, 1):
219
- print(f" • Inserting entity {i}/5: {name} ({entity_type})")
220
- conn.execute(
221
- text("""INSERT INTO entities (graph_id, entity_id, type, name, properties)
222
- VALUES (:graph_id, :entity_id, :type, :name, :properties)"""),
223
- {
224
- "graph_id": kg_id,
225
- "entity_id": entity_id,
226
- "type": entity_type,
227
- "name": name,
228
- "properties": properties
229
- }
230
- )
231
- # Get the database ID for this entity
232
- result = conn.execute(text("SELECT id FROM entities WHERE graph_id = :graph_id AND entity_id = :entity_id"),
233
- {"graph_id": kg_id, "entity_id": entity_id})
234
- row = result.fetchone()
235
- if not row:
236
- raise Exception(f"Failed to retrieve database ID for entity: {entity_id}")
237
- entity_db_ids[entity_id] = row[0]
238
- print(f" → Entity DB ID: {entity_db_ids[entity_id]}")
239
- print("✅ All entities inserted successfully")
240
-
241
- print("🔗 Inserting sample relations...")
242
- # Insert sample relations using database IDs as foreign keys
243
- relations = [
244
- ("rel_1", "agent_1", "routes_to", "agent_2", '{"priority": "high", "success": true}'),
245
- ("rel_2", "agent_2", "escalates_to", "agent_3", '{"reason": "compensation_needed"}'),
246
- ("rel_3", "agent_3", "escalates_to", "agent_4", '{"reason": "system_error"}'),
247
- ("rel_4", "agent_4", "resolves", "issue_1", '{"method": "manual_override"}')
248
- ]
249
-
250
- for i, (relation_id, from_entity, relation_type, to_entity, properties) in enumerate(relations, 1):
251
- source_db_id = entity_db_ids[from_entity]
252
- target_db_id = entity_db_ids[to_entity]
253
- print(f" • Inserting relation {i}/4: {from_entity} --{relation_type}--> {to_entity}")
254
- print(f" → Source DB ID: {source_db_id}, Target DB ID: {target_db_id}")
255
- conn.execute(
256
- text("""INSERT INTO relations (graph_id, relation_id, type, source_id, target_id, properties)
257
- VALUES (:graph_id, :relation_id, :type, :source_id, :target_id, :properties)"""),
258
- {
259
- "graph_id": kg_id,
260
- "relation_id": relation_id,
261
- "type": relation_type,
262
- "source_id": source_db_id,
263
- "target_id": target_db_id,
264
- "properties": properties
265
- }
266
- )
267
- print("✅ All relations inserted successfully")
268
-
269
- print("💾 Committing transaction...")
270
- # Commit transaction
271
- trans.commit()
272
- print("✅ Transaction committed successfully")
273
-
274
- print("🔍 Verifying final data counts...")
275
- # Verify data
276
- final_traces = conn.execute(text("SELECT COUNT(*) FROM traces")).fetchone()[0]
277
- final_kgs = conn.execute(text("SELECT COUNT(*) FROM knowledge_graphs")).fetchone()[0]
278
- final_entities = conn.execute(text("SELECT COUNT(*) FROM entities")).fetchone()[0]
279
- final_relations = conn.execute(text("SELECT COUNT(*) FROM relations")).fetchone()[0]
280
-
281
- print("🎉 Sample data insertion completed successfully!")
282
- print(f" 📊 Final counts:")
283
- print(f" • Traces: {final_traces}")
284
- print(f" • Knowledge graphs: {final_kgs}")
285
- print(f" • Entities: {final_entities}")
286
- print(f" • Relations: {final_relations}")
287
- print(f" 🎯 Expected: 1 trace, 1 KG, 5 entities, 4 relations")
288
-
289
- # Additional verification
290
- if final_traces == 1 and final_kgs == 1 and final_entities == 5 and final_relations == 4:
291
- print("✅ All counts match expected values!")
292
- else:
293
- print("⚠️ Counts don't match expected values")
294
-
295
- except Exception as e:
296
- print(f"❌ Error during transaction: {e}")
297
- trans.rollback()
298
- print("🔄 Transaction rolled back")
299
- raise e
300
-
301
- except Exception as e:
302
- print(f"❌ Failed to add sample data: {e}")
303
- import traceback
304
- print("Full error traceback:")
305
- traceback.print_exc()
306
- print("🚨 Sample data loading failed completely")
307
-
308
  __all__ = [
309
  'get_db',
310
  'models',
311
  'init_db',
312
- 'test_database_connection',
313
- 'add_sample_data_for_hf',
314
  'save_knowledge_graph',
315
  'update_knowledge_graph_status',
316
  'get_knowledge_graph',
 
4
  """
5
 
6
  import os
7
+ from sqlalchemy import create_engine
8
  from sqlalchemy.ext.declarative import declarative_base
9
  from sqlalchemy.orm import sessionmaker, scoped_session
10
 
 
60
  """Initialize the database by creating all tables."""
61
  Base.metadata.create_all(bind=engine)
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  __all__ = [
64
  'get_db',
65
  'models',
66
  'init_db',
 
 
67
  'save_knowledge_graph',
68
  'update_knowledge_graph_status',
69
  'get_knowledge_graph',
backend/database/init_db.py CHANGED
@@ -43,233 +43,6 @@ def confirm_reset():
43
  response = input("Are you sure you want to continue? (yes/no): ")
44
  return response.lower() in ["yes", "y"]
45
 
46
- def add_sample_data(cursor, conn):
47
- """Add sample trace and knowledge graph data for demonstration purposes."""
48
- import json
49
- import uuid
50
- import hashlib
51
-
52
- # Sample trace content - Multi-agent customer service scenario
53
- sample_trace_content = '''[
54
- {
55
- "role": "user",
56
- "content": "I need help with my order #12345. It was supposed to arrive yesterday but I still haven't received it. This is very frustrating as I need it for an important meeting tomorrow.",
57
- "timestamp": "2024-08-31T10:00:00Z",
58
- "metadata": {"priority": "high", "emotion": "frustrated"}
59
- },
60
- {
61
- "role": "assistant",
62
- "name": "RouterAgent",
63
- "content": "I understand your frustration about the delayed order. Let me route this to our order tracking specialist who can provide detailed information about your shipment.",
64
- "timestamp": "2024-08-31T10:00:15Z",
65
- "actions": [{"type": "route", "target": "OrderTrackingAgent", "priority": "high"}]
66
- },
67
- {
68
- "role": "assistant",
69
- "name": "OrderTrackingAgent",
70
- "content": "I'll look up order #12345 immediately. Checking our tracking system... I found the issue - there was a delay at the distribution center due to weather conditions. Your package is currently in transit and should arrive by 2 PM today.",
71
- "timestamp": "2024-08-31T10:01:00Z",
72
- "actions": [
73
- {"type": "database_query", "query": "SELECT * FROM orders WHERE order_id = '12345'"},
74
- {"type": "api_call", "endpoint": "shipping_tracker", "response": "in_transit"}
75
- ],
76
- "findings": {
77
- "issue": "Weather delay at distribution center",
78
- "current_status": "In transit",
79
- "estimated_delivery": "2024-08-31T14:00:00Z"
80
- }
81
- },
82
- {
83
- "role": "assistant",
84
- "name": "CompensationAgent",
85
- "content": "Given this delay caused inconvenience, I'm authorizing a $10 credit to your account and providing free expedited shipping on your next order. I'm also escalating this to ensure better communication about delays in the future.",
86
- "timestamp": "2024-08-31T10:02:00Z",
87
- "actions": [
88
- {"type": "credit_account", "amount": 10, "reason": "delivery_delay"},
89
- {"type": "add_benefit", "type": "free_expedited_shipping"},
90
- {"type": "escalate", "department": "operations", "issue": "delayed_delivery_communication"}
91
- ],
92
- "error": "Failed to process account credit - payment system temporarily unavailable"
93
- },
94
- {
95
- "role": "assistant",
96
- "name": "SupervisorAgent",
97
- "content": "I see there was an error processing the account credit. I'm manually flagging your account for the $10 credit and expedited shipping benefit. Our technical team will resolve the payment system issue. You should see the credit within 24 hours.",
98
- "timestamp": "2024-08-31T10:03:00Z",
99
- "actions": [
100
- {"type": "manual_flag", "account_credit": 10},
101
- {"type": "technical_escalation", "system": "payment_processor"},
102
- {"type": "follow_up_schedule", "timeframe": "24_hours"}
103
- ],
104
- "optimization_opportunity": "Implement payment system redundancy to prevent credit processing failures"
105
- },
106
- {
107
- "role": "user",
108
- "content": "Thank you for the quick response and compensation. I appreciate you taking care of this issue promptly.",
109
- "timestamp": "2024-08-31T10:04:00Z",
110
- "sentiment": "satisfied"
111
- },
112
- {
113
- "role": "assistant",
114
- "name": "QualityAgent",
115
- "content": "Session completed successfully. Customer satisfaction restored. Identified system improvement: need better proactive communication about shipping delays and payment system reliability backup.",
116
- "timestamp": "2024-08-31T10:05:00Z",
117
- "session_analysis": {
118
- "resolution_time": "5 minutes",
119
- "customer_satisfaction": "high",
120
- "agents_involved": 4,
121
- "system_errors": 1,
122
- "optimization_recommendations": [
123
- "Implement proactive delay notifications",
124
- "Add payment system redundancy",
125
- "Improve agent handoff protocols"
126
- ]
127
- }
128
- }
129
- ]'''
130
-
131
- # Generate trace metadata
132
- trace_id = str(uuid.uuid4())
133
- content_hash = hashlib.sha256(sample_trace_content.encode()).hexdigest()
134
-
135
- # Insert sample trace
136
- cursor.execute('''
137
- INSERT INTO traces (
138
- trace_id, filename, title, description, content, content_hash,
139
- uploader, trace_type, trace_source, character_count, turn_count,
140
- status, tags, trace_metadata
141
- ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
142
- ''', (
143
- trace_id,
144
- "sample_customer_service.json",
145
- "Multi-Agent Customer Service Resolution",
146
- "Demonstration of multi-agent system handling customer complaint with error handling and optimization opportunities",
147
- sample_trace_content,
148
- content_hash,
149
- "AgentGraph Demo",
150
- "multi_agent",
151
- "sample",
152
- len(sample_trace_content),
153
- 6, # Number of turns
154
- "processed",
155
- json.dumps(["sample", "customer_service", "multi_agent", "error_handling", "optimization"]),
156
- json.dumps({
157
- "scenario": "customer_service",
158
- "agents": ["RouterAgent", "OrderTrackingAgent", "CompensationAgent", "SupervisorAgent", "QualityAgent"],
159
- "domain": "e_commerce",
160
- "complexity": "high"
161
- })
162
- ))
163
-
164
- # Create sample knowledge graph
165
- kg_id = 1 # First knowledge graph
166
-
167
- # Insert knowledge graph metadata
168
- cursor.execute('''
169
- INSERT INTO knowledge_graphs (
170
- filename, creator, entity_count, relation_count, namespace,
171
- system_name, system_summary, status, trace_id, window_index,
172
- window_total, processing_run_id
173
- ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
174
- ''', (
175
- "sample_customer_service_kg.json",
176
- "AgentGraph Demo",
177
- 15, # Will match actual entities inserted
178
- 18, # Will match actual relations inserted
179
- "customer_service_demo",
180
- "Multi-Agent Customer Service System",
181
- "An intelligent customer service system featuring multiple specialized agents working together to resolve customer issues, handle errors, and identify optimization opportunities. The system demonstrates sophisticated agent coordination, error recovery mechanisms, and continuous improvement processes.",
182
- "completed",
183
- trace_id,
184
- 0,
185
- 1,
186
- "sample_demo_run"
187
- ))
188
-
189
- # Sample entities with rich properties
190
- entities_data = [
191
- # Agents
192
- ("agent_1", "agent", "RouterAgent", {"role": "traffic_routing", "specialization": "request_classification", "priority_handling": True}),
193
- ("agent_2", "agent", "OrderTrackingAgent", {"role": "order_management", "specialization": "shipping_tracking", "data_sources": ["internal_db", "shipping_apis"]}),
194
- ("agent_3", "agent", "CompensationAgent", {"role": "customer_retention", "specialization": "compensation_authorization", "max_credit_limit": 50}),
195
- ("agent_4", "agent", "SupervisorAgent", {"role": "escalation_handling", "specialization": "system_error_recovery", "override_authority": True}),
196
- ("agent_5", "agent", "QualityAgent", {"role": "quality_assurance", "specialization": "session_analysis", "improvement_tracking": True}),
197
-
198
- # Systems and Tools
199
- ("system_1", "system", "OrderDatabase", {"type": "database", "function": "order_storage", "performance": "high"}),
200
- ("system_2", "system", "ShippingTracker", {"type": "external_api", "function": "package_tracking", "reliability": "99.5%"}),
201
- ("system_3", "system", "PaymentProcessor", {"type": "financial_system", "function": "account_credits", "status": "temporarily_unavailable"}),
202
-
203
- # Issues and Problems
204
- ("issue_1", "issue", "DeliveryDelay", {"severity": "medium", "cause": "weather_conditions", "impact": "customer_satisfaction"}),
205
- ("issue_2", "issue", "PaymentSystemFailure", {"severity": "high", "cause": "system_unavailability", "impact": "compensation_processing"}),
206
-
207
- # Actions and Processes
208
- ("action_1", "action", "RouteRequest", {"type": "traffic_management", "success_rate": "98%"}),
209
- ("action_2", "action", "TrackPackage", {"type": "information_retrieval", "data_sources": 2}),
210
- ("action_3", "action", "AuthorizeCredit", {"type": "financial_transaction", "approval_required": True}),
211
- ("action_4", "action", "EscalateIssue", {"type": "process_escalation", "department": "operations"}),
212
-
213
- # Improvements and Optimizations
214
- ("improvement_1", "improvement", "ProactiveNotifications", {"priority": "high", "implementation_effort": "medium", "expected_impact": "reduce_complaints_by_30%"})
215
- ]
216
-
217
- # Insert entities
218
- for entity_id, entity_type, name, properties in entities_data:
219
- cursor.execute('''
220
- INSERT INTO entities (
221
- graph_id, entity_id, type, name, properties, knowledge_graph_namespace
222
- ) VALUES (?, ?, ?, ?, ?, ?)
223
- ''', (kg_id, entity_id, entity_type, name, json.dumps(properties), "customer_service_demo"))
224
-
225
- # Sample relations showing complex interactions
226
- relations_data = [
227
- # Agent interactions
228
- ("rel_1", "agent_1", "routes_to", "agent_2", {"context": "order_inquiry", "priority": "high", "success": True}),
229
- ("rel_2", "agent_2", "collaborates_with", "agent_3", {"context": "customer_compensation", "coordination": "automated"}),
230
- ("rel_3", "agent_3", "escalates_to", "agent_4", {"context": "system_error", "escalation_reason": "payment_failure"}),
231
- ("rel_4", "agent_4", "coordinates_with", "agent_5", {"context": "quality_improvement", "outcome": "optimization_identified"}),
232
-
233
- # System interactions
234
- ("rel_5", "agent_2", "queries", "system_1", {"query_type": "order_lookup", "response_time": "0.5s", "success": True}),
235
- ("rel_6", "agent_2", "calls", "system_2", {"api_endpoint": "track_package", "response_time": "1.2s", "success": True}),
236
- ("rel_7", "agent_3", "attempts_transaction", "system_3", {"transaction_type": "credit", "amount": 10, "success": False}),
237
-
238
- # Problem identification and resolution
239
- ("rel_8", "agent_2", "identifies", "issue_1", {"detection_method": "system_query", "severity_assessed": "medium"}),
240
- ("rel_9", "agent_3", "encounters", "issue_2", {"error_handling": "automatic_escalation", "recovery_action": "manual_override"}),
241
- ("rel_10", "agent_4", "resolves", "issue_2", {"resolution_method": "manual_flag", "permanent_fix": False}),
242
-
243
- # Action execution
244
- ("rel_11", "agent_1", "executes", "action_1", {"execution_time": "15s", "outcome": "successful_routing"}),
245
- ("rel_12", "agent_2", "performs", "action_2", {"data_retrieved": True, "accuracy": "100%"}),
246
- ("rel_13", "agent_3", "initiates", "action_3", {"authorization_level": "standard", "blocked_by": "system_error"}),
247
- ("rel_14", "agent_4", "triggers", "action_4", {"escalation_department": "operations", "follow_up_required": True}),
248
-
249
- # Improvement opportunities
250
- ("rel_15", "agent_5", "identifies", "improvement_1", {"analysis_method": "session_review", "confidence": "high"}),
251
- ("rel_16", "issue_1", "leads_to", "improvement_1", {"causal_relationship": "direct", "prevention_potential": "high"}),
252
- ("rel_17", "issue_2", "exposes", "system_3", {"vulnerability_type": "single_point_of_failure", "risk_level": "high"}),
253
- ("rel_18", "improvement_1", "would_prevent", "issue_1", {"prevention_mechanism": "early_warning", "effectiveness": "85%"})
254
- ]
255
-
256
- # Insert relations
257
- for relation_id, from_entity, relation_type, to_entity, properties in relations_data:
258
- cursor.execute('''
259
- INSERT INTO relations (
260
- graph_id, relation_id, from_entity_id, relation_type, to_entity_id,
261
- properties, knowledge_graph_namespace
262
- ) VALUES (?, ?, ?, ?, ?, ?, ?)
263
- ''', (kg_id, relation_id, from_entity, relation_type, to_entity, json.dumps(properties), "customer_service_demo"))
264
-
265
- # Commit the sample data
266
- conn.commit()
267
-
268
- logger.info("✅ Sample data added successfully!")
269
- logger.info(f" • 1 sample trace: Multi-Agent Customer Service Resolution")
270
- logger.info(f" • 1 knowledge graph with {len(entities_data)} entities and {len(relations_data)} relations")
271
- logger.info(f" • Demonstrates: Multi-agent coordination, error handling, optimization opportunities")
272
-
273
  def init_database(reset=False, force=False):
274
  """
275
  Initialize the database with the required tables.
@@ -557,11 +330,6 @@ def init_database(reset=False, force=False):
557
 
558
  logger.info(f"Database contains: {kg_count} knowledge graphs, {entity_count} entities, {relation_count} relations, {trace_count} traces")
559
 
560
- # Add sample data for HF Spaces if database is empty
561
- if os.getenv("SPACE_ID") and kg_count == 0 and trace_count == 0:
562
- logger.info("🎯 HF Spaces detected with empty database - adding sample data...")
563
- add_sample_data(cursor, conn)
564
-
565
  # Close connection
566
  conn.close()
567
 
 
43
  response = input("Are you sure you want to continue? (yes/no): ")
44
  return response.lower() in ["yes", "y"]
45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  def init_database(reset=False, force=False):
47
  """
48
  Initialize the database with the required tables.
 
330
 
331
  logger.info(f"Database contains: {kg_count} knowledge graphs, {entity_count} entities, {relation_count} relations, {trace_count} traces")
332
 
 
 
 
 
 
333
  # Close connection
334
  conn.close()
335