#!/usr/bin/env python """ Database initialization and reset utility. Creates or resets the database with the three required tables: - knowledge_graphs (with status column) - entities - relations """ import os import sys import sqlite3 import argparse import logging import shutil import time # Configure logging (backend/database/init_db.py -> backend/database/ -> backend/ -> project_root/ -> logs/) LOG_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), "logs") os.makedirs(LOG_DIR, exist_ok=True) logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=[ logging.StreamHandler(sys.stdout), logging.FileHandler(os.path.join(LOG_DIR, 'agent_monitoring.log')) ] ) logger = logging.getLogger(__name__) # Get the absolute path to the project root directory ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # Database path with absolute path DB_PATH = os.path.join(ROOT_DIR, 'datasets/db/agent_monitoring.db') def confirm_reset(): """Ask for user confirmation before force resetting the database.""" print("\nWARNING: This will DELETE ALL data in the database.") print("All knowledge graphs, entities, and relations will be permanently lost.") response = input("Are you sure you want to continue? (yes/no): ") return response.lower() in ["yes", "y"] def add_sample_data(cursor, conn): """Add sample trace and knowledge graph data for demonstration purposes.""" import json import uuid import hashlib # Sample trace content - Multi-agent customer service scenario sample_trace_content = '''[ { "role": "user", "content": "I need help with my order #12345. It was supposed to arrive yesterday but I still haven't received it. This is very frustrating as I need it for an important meeting tomorrow.", "timestamp": "2024-08-31T10:00:00Z", "metadata": {"priority": "high", "emotion": "frustrated"} }, { "role": "assistant", "name": "RouterAgent", "content": "I understand your frustration about the delayed order. Let me route this to our order tracking specialist who can provide detailed information about your shipment.", "timestamp": "2024-08-31T10:00:15Z", "actions": [{"type": "route", "target": "OrderTrackingAgent", "priority": "high"}] }, { "role": "assistant", "name": "OrderTrackingAgent", "content": "I'll look up order #12345 immediately. Checking our tracking system... I found the issue - there was a delay at the distribution center due to weather conditions. Your package is currently in transit and should arrive by 2 PM today.", "timestamp": "2024-08-31T10:01:00Z", "actions": [ {"type": "database_query", "query": "SELECT * FROM orders WHERE order_id = '12345'"}, {"type": "api_call", "endpoint": "shipping_tracker", "response": "in_transit"} ], "findings": { "issue": "Weather delay at distribution center", "current_status": "In transit", "estimated_delivery": "2024-08-31T14:00:00Z" } }, { "role": "assistant", "name": "CompensationAgent", "content": "Given this delay caused inconvenience, I'm authorizing a $10 credit to your account and providing free expedited shipping on your next order. I'm also escalating this to ensure better communication about delays in the future.", "timestamp": "2024-08-31T10:02:00Z", "actions": [ {"type": "credit_account", "amount": 10, "reason": "delivery_delay"}, {"type": "add_benefit", "type": "free_expedited_shipping"}, {"type": "escalate", "department": "operations", "issue": "delayed_delivery_communication"} ], "error": "Failed to process account credit - payment system temporarily unavailable" }, { "role": "assistant", "name": "SupervisorAgent", "content": "I see there was an error processing the account credit. I'm manually flagging your account for the $10 credit and expedited shipping benefit. Our technical team will resolve the payment system issue. You should see the credit within 24 hours.", "timestamp": "2024-08-31T10:03:00Z", "actions": [ {"type": "manual_flag", "account_credit": 10}, {"type": "technical_escalation", "system": "payment_processor"}, {"type": "follow_up_schedule", "timeframe": "24_hours"} ], "optimization_opportunity": "Implement payment system redundancy to prevent credit processing failures" }, { "role": "user", "content": "Thank you for the quick response and compensation. I appreciate you taking care of this issue promptly.", "timestamp": "2024-08-31T10:04:00Z", "sentiment": "satisfied" }, { "role": "assistant", "name": "QualityAgent", "content": "Session completed successfully. Customer satisfaction restored. Identified system improvement: need better proactive communication about shipping delays and payment system reliability backup.", "timestamp": "2024-08-31T10:05:00Z", "session_analysis": { "resolution_time": "5 minutes", "customer_satisfaction": "high", "agents_involved": 4, "system_errors": 1, "optimization_recommendations": [ "Implement proactive delay notifications", "Add payment system redundancy", "Improve agent handoff protocols" ] } } ]''' # Generate trace metadata trace_id = str(uuid.uuid4()) content_hash = hashlib.sha256(sample_trace_content.encode()).hexdigest() # Insert sample trace cursor.execute(''' INSERT INTO traces ( trace_id, filename, title, description, content, content_hash, uploader, trace_type, trace_source, character_count, turn_count, status, tags, trace_metadata ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ''', ( trace_id, "sample_customer_service.json", "Multi-Agent Customer Service Resolution", "Demonstration of multi-agent system handling customer complaint with error handling and optimization opportunities", sample_trace_content, content_hash, "AgentGraph Demo", "multi_agent", "sample", len(sample_trace_content), 6, # Number of turns "processed", json.dumps(["sample", "customer_service", "multi_agent", "error_handling", "optimization"]), json.dumps({ "scenario": "customer_service", "agents": ["RouterAgent", "OrderTrackingAgent", "CompensationAgent", "SupervisorAgent", "QualityAgent"], "domain": "e_commerce", "complexity": "high" }) )) # Create sample knowledge graph kg_id = 1 # First knowledge graph # Insert knowledge graph metadata cursor.execute(''' INSERT INTO knowledge_graphs ( filename, creator, entity_count, relation_count, namespace, system_name, system_summary, status, trace_id, window_index, window_total, processing_run_id ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ''', ( "sample_customer_service_kg.json", "AgentGraph Demo", 15, # Will match actual entities inserted 18, # Will match actual relations inserted "customer_service_demo", "Multi-Agent Customer Service System", "An intelligent customer service system featuring multiple specialized agents working together to resolve customer issues, handle errors, and identify optimization opportunities. The system demonstrates sophisticated agent coordination, error recovery mechanisms, and continuous improvement processes.", "completed", trace_id, 0, 1, "sample_demo_run" )) # Sample entities with rich properties entities_data = [ # Agents ("agent_1", "agent", "RouterAgent", {"role": "traffic_routing", "specialization": "request_classification", "priority_handling": True}), ("agent_2", "agent", "OrderTrackingAgent", {"role": "order_management", "specialization": "shipping_tracking", "data_sources": ["internal_db", "shipping_apis"]}), ("agent_3", "agent", "CompensationAgent", {"role": "customer_retention", "specialization": "compensation_authorization", "max_credit_limit": 50}), ("agent_4", "agent", "SupervisorAgent", {"role": "escalation_handling", "specialization": "system_error_recovery", "override_authority": True}), ("agent_5", "agent", "QualityAgent", {"role": "quality_assurance", "specialization": "session_analysis", "improvement_tracking": True}), # Systems and Tools ("system_1", "system", "OrderDatabase", {"type": "database", "function": "order_storage", "performance": "high"}), ("system_2", "system", "ShippingTracker", {"type": "external_api", "function": "package_tracking", "reliability": "99.5%"}), ("system_3", "system", "PaymentProcessor", {"type": "financial_system", "function": "account_credits", "status": "temporarily_unavailable"}), # Issues and Problems ("issue_1", "issue", "DeliveryDelay", {"severity": "medium", "cause": "weather_conditions", "impact": "customer_satisfaction"}), ("issue_2", "issue", "PaymentSystemFailure", {"severity": "high", "cause": "system_unavailability", "impact": "compensation_processing"}), # Actions and Processes ("action_1", "action", "RouteRequest", {"type": "traffic_management", "success_rate": "98%"}), ("action_2", "action", "TrackPackage", {"type": "information_retrieval", "data_sources": 2}), ("action_3", "action", "AuthorizeCredit", {"type": "financial_transaction", "approval_required": True}), ("action_4", "action", "EscalateIssue", {"type": "process_escalation", "department": "operations"}), # Improvements and Optimizations ("improvement_1", "improvement", "ProactiveNotifications", {"priority": "high", "implementation_effort": "medium", "expected_impact": "reduce_complaints_by_30%"}) ] # Insert entities for entity_id, entity_type, name, properties in entities_data: cursor.execute(''' INSERT INTO entities ( graph_id, entity_id, type, name, properties, knowledge_graph_namespace ) VALUES (?, ?, ?, ?, ?, ?) ''', (kg_id, entity_id, entity_type, name, json.dumps(properties), "customer_service_demo")) # Sample relations showing complex interactions relations_data = [ # Agent interactions ("rel_1", "agent_1", "routes_to", "agent_2", {"context": "order_inquiry", "priority": "high", "success": True}), ("rel_2", "agent_2", "collaborates_with", "agent_3", {"context": "customer_compensation", "coordination": "automated"}), ("rel_3", "agent_3", "escalates_to", "agent_4", {"context": "system_error", "escalation_reason": "payment_failure"}), ("rel_4", "agent_4", "coordinates_with", "agent_5", {"context": "quality_improvement", "outcome": "optimization_identified"}), # System interactions ("rel_5", "agent_2", "queries", "system_1", {"query_type": "order_lookup", "response_time": "0.5s", "success": True}), ("rel_6", "agent_2", "calls", "system_2", {"api_endpoint": "track_package", "response_time": "1.2s", "success": True}), ("rel_7", "agent_3", "attempts_transaction", "system_3", {"transaction_type": "credit", "amount": 10, "success": False}), # Problem identification and resolution ("rel_8", "agent_2", "identifies", "issue_1", {"detection_method": "system_query", "severity_assessed": "medium"}), ("rel_9", "agent_3", "encounters", "issue_2", {"error_handling": "automatic_escalation", "recovery_action": "manual_override"}), ("rel_10", "agent_4", "resolves", "issue_2", {"resolution_method": "manual_flag", "permanent_fix": False}), # Action execution ("rel_11", "agent_1", "executes", "action_1", {"execution_time": "15s", "outcome": "successful_routing"}), ("rel_12", "agent_2", "performs", "action_2", {"data_retrieved": True, "accuracy": "100%"}), ("rel_13", "agent_3", "initiates", "action_3", {"authorization_level": "standard", "blocked_by": "system_error"}), ("rel_14", "agent_4", "triggers", "action_4", {"escalation_department": "operations", "follow_up_required": True}), # Improvement opportunities ("rel_15", "agent_5", "identifies", "improvement_1", {"analysis_method": "session_review", "confidence": "high"}), ("rel_16", "issue_1", "leads_to", "improvement_1", {"causal_relationship": "direct", "prevention_potential": "high"}), ("rel_17", "issue_2", "exposes", "system_3", {"vulnerability_type": "single_point_of_failure", "risk_level": "high"}), ("rel_18", "improvement_1", "would_prevent", "issue_1", {"prevention_mechanism": "early_warning", "effectiveness": "85%"}) ] # Insert relations for relation_id, from_entity, relation_type, to_entity, properties in relations_data: cursor.execute(''' INSERT INTO relations ( graph_id, relation_id, from_entity_id, relation_type, to_entity_id, properties, knowledge_graph_namespace ) VALUES (?, ?, ?, ?, ?, ?, ?) ''', (kg_id, relation_id, from_entity, relation_type, to_entity, json.dumps(properties), "customer_service_demo")) # Commit the sample data conn.commit() logger.info("✅ Sample data added successfully!") logger.info(f" • 1 sample trace: Multi-Agent Customer Service Resolution") logger.info(f" • 1 knowledge graph with {len(entities_data)} entities and {len(relations_data)} relations") logger.info(f" • Demonstrates: Multi-agent coordination, error handling, optimization opportunities") def init_database(reset=False, force=False): """ Initialize the database with the required tables. Args: reset: If True, drop and recreate the tables force: If True, delete the database file completely """ # Make sure the directory exists os.makedirs(os.path.dirname(DB_PATH), exist_ok=True) # Check if database exists db_exists = os.path.exists(DB_PATH) and os.path.getsize(DB_PATH) > 0 # Handle reset with file deletion if requested if db_exists and reset and force: logger.info(f"Found existing database at {DB_PATH}") # Create backup backup_path = f"{DB_PATH}.backup_{int(time.time())}" logger.info(f"Creating backup at {backup_path}") shutil.copy2(DB_PATH, backup_path) # Delete the database file logger.info("Deleting database file") os.remove(DB_PATH) db_exists = False logger.info("Database file deleted") # Connect to the database conn = sqlite3.connect(DB_PATH) cursor = conn.cursor() # Drop tables if reset requested if reset and db_exists: logger.info("Dropping existing tables") cursor.execute("DROP TABLE IF EXISTS relations") cursor.execute("DROP TABLE IF EXISTS entities") cursor.execute("DROP TABLE IF EXISTS knowledge_graphs") # Create tables logger.info("Creating tables") cursor.execute(''' CREATE TABLE IF NOT EXISTS knowledge_graphs ( id INTEGER PRIMARY KEY AUTOINCREMENT, filename VARCHAR(255) UNIQUE, creation_timestamp DATETIME DEFAULT CURRENT_TIMESTAMP, update_timestamp DATETIME DEFAULT CURRENT_TIMESTAMP, creator VARCHAR(255), entity_count INTEGER DEFAULT 0, relation_count INTEGER DEFAULT 0, namespace VARCHAR(255), system_name VARCHAR(255), system_summary TEXT, graph_data TEXT, status VARCHAR(50) DEFAULT 'created', trace_id VARCHAR(36), window_index INTEGER, window_total INTEGER, window_start_char INTEGER, window_end_char INTEGER, processing_run_id VARCHAR(255) ) ''') cursor.execute(''' CREATE TABLE IF NOT EXISTS traces ( id INTEGER PRIMARY KEY AUTOINCREMENT, trace_id VARCHAR(36) UNIQUE, filename VARCHAR(255), title VARCHAR(255), description TEXT, content TEXT, content_hash VARCHAR(64), upload_timestamp DATETIME DEFAULT CURRENT_TIMESTAMP, update_timestamp DATETIME DEFAULT CURRENT_TIMESTAMP, uploader VARCHAR(255), trace_type VARCHAR(50), trace_source VARCHAR(50), character_count INTEGER DEFAULT 0, turn_count INTEGER DEFAULT 0, status VARCHAR(50) DEFAULT 'uploaded', processing_method VARCHAR(50), tags TEXT, trace_metadata TEXT ) ''') cursor.execute(''' CREATE TABLE IF NOT EXISTS entities ( id INTEGER PRIMARY KEY AUTOINCREMENT, graph_id INTEGER, entity_id VARCHAR(255), type VARCHAR(255), name VARCHAR(255), properties TEXT, knowledge_graph_namespace VARCHAR(255), FOREIGN KEY (graph_id) REFERENCES knowledge_graphs(id), UNIQUE (graph_id, entity_id) ) ''') cursor.execute(''' CREATE TABLE IF NOT EXISTS relations ( id INTEGER PRIMARY KEY AUTOINCREMENT, graph_id INTEGER, relation_id VARCHAR(255), type VARCHAR(255), source_id INTEGER, target_id INTEGER, properties TEXT, knowledge_graph_namespace VARCHAR(255), FOREIGN KEY (graph_id) REFERENCES knowledge_graphs(id), FOREIGN KEY (source_id) REFERENCES entities(id), FOREIGN KEY (target_id) REFERENCES entities(id), UNIQUE (graph_id, relation_id) ) ''') cursor.execute(''' CREATE TABLE IF NOT EXISTS prompt_reconstructions ( id INTEGER PRIMARY KEY AUTOINCREMENT, knowledge_graph_id INTEGER, relation_id VARCHAR(255), reconstructed_prompt TEXT, dependencies TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, updated_at DATETIME DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (knowledge_graph_id) REFERENCES knowledge_graphs(id) ) ''') cursor.execute(''' CREATE TABLE IF NOT EXISTS perturbation_tests ( id INTEGER PRIMARY KEY AUTOINCREMENT, knowledge_graph_id INTEGER NOT NULL, prompt_reconstruction_id INTEGER NOT NULL, relation_id VARCHAR(255) NOT NULL, perturbation_type VARCHAR(50) NOT NULL, perturbation_set_id VARCHAR(64), test_result JSON, perturbation_score FLOAT, test_metadata JSON, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, updated_at DATETIME DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (knowledge_graph_id) REFERENCES knowledge_graphs(id), FOREIGN KEY (prompt_reconstruction_id) REFERENCES prompt_reconstructions(id) ) ''') cursor.execute(''' CREATE TABLE IF NOT EXISTS observability_connections ( id INTEGER PRIMARY KEY AUTOINCREMENT, connection_id VARCHAR(36) UNIQUE, platform VARCHAR(50) NOT NULL, public_key TEXT NOT NULL, secret_key TEXT, host VARCHAR(255), projects TEXT, status VARCHAR(50) DEFAULT 'connected', connected_at DATETIME DEFAULT CURRENT_TIMESTAMP, last_sync DATETIME, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ) ''') cursor.execute(''' CREATE TABLE IF NOT EXISTS fetched_traces ( id INTEGER PRIMARY KEY AUTOINCREMENT, trace_id VARCHAR(255) NOT NULL, name VARCHAR(255) NOT NULL, platform VARCHAR(50) NOT NULL, project_name VARCHAR(255), connection_id VARCHAR(36) NOT NULL, data TEXT, fetched_at DATETIME DEFAULT CURRENT_TIMESTAMP, imported BOOLEAN DEFAULT 0, imported_at DATETIME, imported_trace_id VARCHAR(36), FOREIGN KEY (connection_id) REFERENCES observability_connections(connection_id), UNIQUE (trace_id, connection_id) ) ''') cursor.execute(''' CREATE TABLE IF NOT EXISTS causal_analyses ( id INTEGER PRIMARY KEY AUTOINCREMENT, knowledge_graph_id INTEGER NOT NULL, perturbation_set_id VARCHAR(64) NOT NULL, analysis_method VARCHAR(50) NOT NULL, analysis_result JSON, causal_score FLOAT, analysis_metadata JSON, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (knowledge_graph_id) REFERENCES knowledge_graphs(id) ) ''') # Create indexes cursor.execute('CREATE INDEX IF NOT EXISTS idx_knowledge_graphs_filename ON knowledge_graphs(filename)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_knowledge_graphs_namespace ON knowledge_graphs(namespace)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_knowledge_graphs_trace_id ON knowledge_graphs(trace_id)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_entities_entity_id ON entities(entity_id)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_relations_relation_id ON relations(relation_id)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_traces_trace_id ON traces(trace_id)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_traces_content_hash ON traces(content_hash)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_traces_filename ON traces(filename)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_traces_status ON traces(status)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_prompt_reconstructions_kgid ON prompt_reconstructions(knowledge_graph_id)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_prompt_reconstructions_relation_id ON prompt_reconstructions(relation_id)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_perturbation_tests_kgid ON perturbation_tests(knowledge_graph_id)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_perturbation_tests_prid ON perturbation_tests(prompt_reconstruction_id)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_perturbation_tests_relation ON perturbation_tests(relation_id)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_perturbation_tests_type ON perturbation_tests(perturbation_type)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_causal_analyses_kgid ON causal_analyses(knowledge_graph_id)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_causal_analyses_method ON causal_analyses(analysis_method)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_causal_analyses_setid ON causal_analyses(perturbation_set_id)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_observability_connections_connection_id ON observability_connections(connection_id)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_observability_connections_platform ON observability_connections(platform)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_fetched_traces_trace_id ON fetched_traces(trace_id)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_fetched_traces_connection_id ON fetched_traces(connection_id)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_fetched_traces_platform ON fetched_traces(platform)') # Commit changes conn.commit() # Handle column migrations for existing tables logger.info("Checking for column migrations...") # Check if project_name column exists in fetched_traces cursor.execute("PRAGMA table_info(fetched_traces)") fetched_traces_columns = [column[1] for column in cursor.fetchall()] if 'project_name' not in fetched_traces_columns: logger.info("Adding project_name column to fetched_traces table...") cursor.execute("ALTER TABLE fetched_traces ADD COLUMN project_name TEXT") cursor.execute('CREATE INDEX IF NOT EXISTS idx_fetched_traces_project_name ON fetched_traces(project_name)') conn.commit() logger.info("Successfully added project_name column") # Check if projects column exists in observability_connections cursor.execute("PRAGMA table_info(observability_connections)") obs_conn_columns = [column[1] for column in cursor.fetchall()] if 'projects' not in obs_conn_columns: logger.info("Adding projects column to observability_connections table...") cursor.execute("ALTER TABLE observability_connections ADD COLUMN projects TEXT") conn.commit() logger.info("Successfully added projects column") # Check if we need to rename preview_data to data in fetched_traces if 'preview_data' in fetched_traces_columns and 'data' not in fetched_traces_columns: logger.info("Renaming preview_data column to data in fetched_traces table...") cursor.execute("ALTER TABLE fetched_traces RENAME COLUMN preview_data TO data") conn.commit() logger.info("Successfully renamed preview_data column to data") # Verify tables were created cursor.execute("SELECT name FROM sqlite_master WHERE type='table'") tables = cursor.fetchall() table_names = [table[0] for table in tables] # Report results if reset: logger.info(f"Database reset completed. Tables created: {table_names}") else: logger.info(f"Database initialization completed. Tables created: {table_names}") # Check if the tables are empty cursor.execute("SELECT count(*) FROM knowledge_graphs") kg_count = cursor.fetchone()[0] cursor.execute("SELECT count(*) FROM entities") entity_count = cursor.fetchone()[0] cursor.execute("SELECT count(*) FROM relations") relation_count = cursor.fetchone()[0] trace_count = 0 if 'traces' in table_names: cursor.execute("SELECT count(*) FROM traces") trace_count = cursor.fetchone()[0] logger.info(f"Database contains: {kg_count} knowledge graphs, {entity_count} entities, {relation_count} relations, {trace_count} traces") # Add sample data for HF Spaces if database is empty if os.getenv("SPACE_ID") and kg_count == 0 and trace_count == 0: logger.info("🎯 HF Spaces detected with empty database - adding sample data...") add_sample_data(cursor, conn) # Close connection conn.close() def main(): """Parse arguments and initialize database.""" parser = argparse.ArgumentParser(description='Initialize or reset the database') parser.add_argument('--reset', action='store_true', help='Reset the database by dropping and recreating tables') parser.add_argument('--force', action='store_true', help='Force reset by deleting the database file') args = parser.parse_args() try: if args.reset and not args.force and not confirm_reset(): print("Database reset canceled.") return 0 if args.force and not confirm_reset(): print("Database force reset canceled.") return 0 init_database(reset=args.reset, force=args.force) print("Database operation completed successfully.") return 0 except Exception as e: logger.error(f"Error: {str(e)}") return 1 if __name__ == "__main__": sys.exit(main())