sample_id
stringlengths
21
196
text
stringlengths
105
936k
metadata
dict
category
stringclasses
6 values
HKUDS/LightRAG:lightrag/tools/clean_llm_query_cache.py
#!/usr/bin/env python3 """ LLM Query Cache Cleanup Tool for LightRAG This tool cleans up LLM query cache (mix:*, hybrid:*, local:*, global:*) from KV storage implementations while preserving workspace isolation. Usage: python -m lightrag.tools.clean_llm_query_cache # or python lightrag/tools/clean_llm_query_cache.py Supported KV Storage Types: - JsonKVStorage - RedisKVStorage - PGKVStorage - MongoKVStorage """ import asyncio import os import sys import time from typing import Any, Dict, List from dataclasses import dataclass, field from dotenv import load_dotenv # Add project root to path for imports sys.path.insert( 0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) from lightrag.kg import STORAGE_ENV_REQUIREMENTS from lightrag.kg.shared_storage import set_all_update_flags from lightrag.namespace import NameSpace from lightrag.utils import setup_logger # Load environment variables load_dotenv(dotenv_path=".env", override=False) # Setup logger setup_logger("lightrag", level="INFO") # Storage type configurations STORAGE_TYPES = { "1": "JsonKVStorage", "2": "RedisKVStorage", "3": "PGKVStorage", "4": "MongoKVStorage", } # Workspace environment variable mapping WORKSPACE_ENV_MAP = { "PGKVStorage": "POSTGRES_WORKSPACE", "MongoKVStorage": "MONGODB_WORKSPACE", "RedisKVStorage": "REDIS_WORKSPACE", } # Query cache modes QUERY_MODES = ["mix", "hybrid", "local", "global"] # Query cache types CACHE_TYPES = ["query", "keywords"] # Default batch size for deletion DEFAULT_BATCH_SIZE = 1000 # ANSI color codes for terminal output BOLD_CYAN = "\033[1;36m" BOLD_RED = "\033[1;31m" BOLD_GREEN = "\033[1;32m" RESET = "\033[0m" @dataclass class CleanupStats: """Cleanup statistics and error tracking""" # Count by mode and cache_type before cleanup counts_before: Dict[str, Dict[str, int]] = field(default_factory=dict) # Deletion statistics total_to_delete: int = 0 total_batches: int = 0 successful_batches: int = 0 failed_batches: int = 0 successfully_deleted: int = 0 failed_to_delete: int = 0 # Count by mode and cache_type after cleanup counts_after: Dict[str, Dict[str, int]] = field(default_factory=dict) # Error tracking errors: List[Dict[str, Any]] = field(default_factory=list) def add_error(self, batch_idx: int, error: Exception, batch_size: int): """Record batch error""" self.errors.append( { "batch": batch_idx, "error_type": type(error).__name__, "error_msg": str(error), "records_lost": batch_size, "timestamp": time.time(), } ) self.failed_batches += 1 self.failed_to_delete += batch_size def initialize_counts(self): """Initialize count dictionaries""" for mode in QUERY_MODES: self.counts_before[mode] = {"query": 0, "keywords": 0} self.counts_after[mode] = {"query": 0, "keywords": 0} class CleanupTool: """LLM Query Cache Cleanup Tool""" def __init__(self): self.storage = None self.workspace = "" self.batch_size = DEFAULT_BATCH_SIZE def get_workspace_for_storage(self, storage_name: str) -> str: """Get workspace for a specific storage type Priority: Storage-specific env var > WORKSPACE env var > empty string Args: storage_name: Storage implementation name Returns: Workspace name """ # Check storage-specific workspace if storage_name in WORKSPACE_ENV_MAP: specific_workspace = os.getenv(WORKSPACE_ENV_MAP[storage_name]) if specific_workspace: return specific_workspace # Check generic WORKSPACE workspace = os.getenv("WORKSPACE", "") return workspace def check_config_ini_for_storage(self, storage_name: str) -> bool: """Check if config.ini has configuration for the storage type Args: storage_name: Storage implementation name Returns: True if config.ini has the necessary configuration """ try: import configparser config = configparser.ConfigParser() config.read("config.ini", "utf-8") if storage_name == "RedisKVStorage": return config.has_option("redis", "uri") elif storage_name == "PGKVStorage": return ( config.has_option("postgres", "user") and config.has_option("postgres", "password") and config.has_option("postgres", "database") ) elif storage_name == "MongoKVStorage": return config.has_option("mongodb", "uri") and config.has_option( "mongodb", "database" ) return False except Exception: return False def check_env_vars(self, storage_name: str) -> bool: """Check environment variables, show warnings if missing but don't fail Args: storage_name: Storage implementation name Returns: Always returns True (warnings only, no hard failure) """ required_vars = STORAGE_ENV_REQUIREMENTS.get(storage_name, []) if not required_vars: print("✓ No environment variables required") return True missing_vars = [var for var in required_vars if var not in os.environ] if missing_vars: print( f"⚠️ Warning: Missing environment variables: {', '.join(missing_vars)}" ) # Check if config.ini has configuration has_config = self.check_config_ini_for_storage(storage_name) if has_config: print(" ✓ Found configuration in config.ini") else: print(f" Will attempt to use defaults for {storage_name}") return True print("✓ All required environment variables are set") return True def get_storage_class(self, storage_name: str): """Dynamically import and return storage class Args: storage_name: Storage implementation name Returns: Storage class """ if storage_name == "JsonKVStorage": from lightrag.kg.json_kv_impl import JsonKVStorage return JsonKVStorage elif storage_name == "RedisKVStorage": from lightrag.kg.redis_impl import RedisKVStorage return RedisKVStorage elif storage_name == "PGKVStorage": from lightrag.kg.postgres_impl import PGKVStorage return PGKVStorage elif storage_name == "MongoKVStorage": from lightrag.kg.mongo_impl import MongoKVStorage return MongoKVStorage else: raise ValueError(f"Unsupported storage type: {storage_name}") async def initialize_storage(self, storage_name: str, workspace: str): """Initialize storage instance with fallback to config.ini and defaults Args: storage_name: Storage implementation name workspace: Workspace name Returns: Initialized storage instance Raises: Exception: If initialization fails """ storage_class = self.get_storage_class(storage_name) # Create global config global_config = { "working_dir": os.getenv("WORKING_DIR", "./rag_storage"), "embedding_batch_num": 10, } # Initialize storage storage = storage_class( namespace=NameSpace.KV_STORE_LLM_RESPONSE_CACHE, workspace=workspace, global_config=global_config, embedding_func=None, ) # Initialize the storage (may raise exception if connection fails) await storage.initialize() return storage async def count_query_caches_json(self, storage) -> Dict[str, Dict[str, int]]: """Count query caches in JsonKVStorage by mode and cache_type Args: storage: JsonKVStorage instance Returns: Dictionary with counts for each mode and cache_type """ counts = {mode: {"query": 0, "keywords": 0} for mode in QUERY_MODES} async with storage._storage_lock: for key in storage._data.keys(): for mode in QUERY_MODES: if key.startswith(f"{mode}:query:"): counts[mode]["query"] += 1 elif key.startswith(f"{mode}:keywords:"): counts[mode]["keywords"] += 1 return counts async def count_query_caches_redis(self, storage) -> Dict[str, Dict[str, int]]: """Count query caches in RedisKVStorage by mode and cache_type Args: storage: RedisKVStorage instance Returns: Dictionary with counts for each mode and cache_type """ counts = {mode: {"query": 0, "keywords": 0} for mode in QUERY_MODES} print("Scanning Redis keys...", end="", flush=True) async with storage._get_redis_connection() as redis: for mode in QUERY_MODES: for cache_type in CACHE_TYPES: pattern = f"{mode}:{cache_type}:*" prefixed_pattern = f"{storage.final_namespace}:{pattern}" cursor = 0 while True: cursor, keys = await redis.scan( cursor, match=prefixed_pattern, count=DEFAULT_BATCH_SIZE ) counts[mode][cache_type] += len(keys) if cursor == 0: break print() # New line after progress return counts async def count_query_caches_pg(self, storage) -> Dict[str, Dict[str, int]]: """Count query caches in PostgreSQL by mode and cache_type Args: storage: PGKVStorage instance Returns: Dictionary with counts for each mode and cache_type """ from lightrag.kg.postgres_impl import namespace_to_table_name counts = {mode: {"query": 0, "keywords": 0} for mode in QUERY_MODES} table_name = namespace_to_table_name(storage.namespace) print("Counting PostgreSQL records...", end="", flush=True) start_time = time.time() for mode in QUERY_MODES: for cache_type in CACHE_TYPES: query = f""" SELECT COUNT(*) as count FROM {table_name} WHERE workspace = $1 AND id LIKE $2 """ pattern = f"{mode}:{cache_type}:%" result = await storage.db.query(query, [storage.workspace, pattern]) counts[mode][cache_type] = result["count"] if result else 0 elapsed = time.time() - start_time if elapsed > 1: print(f" (took {elapsed:.1f}s)", end="") print() # New line return counts async def count_query_caches_mongo(self, storage) -> Dict[str, Dict[str, int]]: """Count query caches in MongoDB by mode and cache_type Args: storage: MongoKVStorage instance Returns: Dictionary with counts for each mode and cache_type """ counts = {mode: {"query": 0, "keywords": 0} for mode in QUERY_MODES} print("Counting MongoDB documents...", end="", flush=True) start_time = time.time() for mode in QUERY_MODES: for cache_type in CACHE_TYPES: pattern = f"^{mode}:{cache_type}:" query = {"_id": {"$regex": pattern}} count = await storage._data.count_documents(query) counts[mode][cache_type] = count elapsed = time.time() - start_time if elapsed > 1: print(f" (took {elapsed:.1f}s)", end="") print() # New line return counts async def count_query_caches( self, storage, storage_name: str ) -> Dict[str, Dict[str, int]]: """Count query caches from any storage type efficiently Args: storage: Storage instance storage_name: Storage type name Returns: Dictionary with counts for each mode and cache_type """ if storage_name == "JsonKVStorage": return await self.count_query_caches_json(storage) elif storage_name == "RedisKVStorage": return await self.count_query_caches_redis(storage) elif storage_name == "PGKVStorage": return await self.count_query_caches_pg(storage) elif storage_name == "MongoKVStorage": return await self.count_query_caches_mongo(storage) else: raise ValueError(f"Unsupported storage type: {storage_name}") async def delete_query_caches_json( self, storage, cleanup_type: str, stats: CleanupStats ): """Delete query caches from JsonKVStorage Args: storage: JsonKVStorage instance cleanup_type: 'all', 'query', or 'keywords' stats: CleanupStats object to track progress """ # Collect keys to delete async with storage._storage_lock: keys_to_delete = [] for key in storage._data.keys(): should_delete = False for mode in QUERY_MODES: if cleanup_type == "all": if key.startswith(f"{mode}:query:") or key.startswith( f"{mode}:keywords:" ): should_delete = True elif cleanup_type == "query": if key.startswith(f"{mode}:query:"): should_delete = True elif cleanup_type == "keywords": if key.startswith(f"{mode}:keywords:"): should_delete = True if should_delete: keys_to_delete.append(key) # Delete in batches total_keys = len(keys_to_delete) stats.total_batches = (total_keys + self.batch_size - 1) // self.batch_size print("\n=== Starting Cleanup ===") print( f"💡 Processing {self.batch_size:,} records at a time from JsonKVStorage\n" ) for batch_idx in range(stats.total_batches): start_idx = batch_idx * self.batch_size end_idx = min((batch_idx + 1) * self.batch_size, total_keys) batch_keys = keys_to_delete[start_idx:end_idx] try: async with storage._storage_lock: for key in batch_keys: del storage._data[key] # CRITICAL: Set update flag so changes persist to disk # Without this, deletions remain in-memory only and are lost on exit await set_all_update_flags( storage.namespace, workspace=storage.workspace ) # Success stats.successful_batches += 1 stats.successfully_deleted += len(batch_keys) # Calculate progress progress = (stats.successfully_deleted / total_keys) * 100 bar_length = 20 filled_length = int( bar_length * stats.successfully_deleted // total_keys ) bar = "█" * filled_length + "░" * (bar_length - filled_length) print( f"Batch {batch_idx + 1}/{stats.total_batches}: {bar} " f"{stats.successfully_deleted:,}/{total_keys:,} ({progress:.1f}%) ✓" ) except Exception as e: stats.add_error(batch_idx + 1, e, len(batch_keys)) print( f"Batch {batch_idx + 1}/{stats.total_batches}: ✗ FAILED - " f"{type(e).__name__}: {str(e)}" ) async def delete_query_caches_redis( self, storage, cleanup_type: str, stats: CleanupStats ): """Delete query caches from RedisKVStorage Args: storage: RedisKVStorage instance cleanup_type: 'all', 'query', or 'keywords' stats: CleanupStats object to track progress """ # Build patterns to delete patterns = [] for mode in QUERY_MODES: if cleanup_type == "all": patterns.append(f"{mode}:query:*") patterns.append(f"{mode}:keywords:*") elif cleanup_type == "query": patterns.append(f"{mode}:query:*") elif cleanup_type == "keywords": patterns.append(f"{mode}:keywords:*") print("\n=== Starting Cleanup ===") print(f"💡 Processing Redis keys in batches of {self.batch_size:,}\n") batch_idx = 0 total_deleted = 0 async with storage._get_redis_connection() as redis: for pattern in patterns: prefixed_pattern = f"{storage.final_namespace}:{pattern}" cursor = 0 while True: cursor, keys = await redis.scan( cursor, match=prefixed_pattern, count=self.batch_size ) if keys: batch_idx += 1 stats.total_batches += 1 try: # Delete batch using pipeline pipe = redis.pipeline() for key in keys: pipe.delete(key) await pipe.execute() # Success stats.successful_batches += 1 stats.successfully_deleted += len(keys) total_deleted += len(keys) # Progress print( f"Batch {batch_idx}: Deleted {len(keys):,} keys " f"(Total: {total_deleted:,}) ✓" ) except Exception as e: stats.add_error(batch_idx, e, len(keys)) print( f"Batch {batch_idx}: ✗ FAILED - " f"{type(e).__name__}: {str(e)}" ) if cursor == 0: break await asyncio.sleep(0) async def delete_query_caches_pg( self, storage, cleanup_type: str, stats: CleanupStats ): """Delete query caches from PostgreSQL Args: storage: PGKVStorage instance cleanup_type: 'all', 'query', or 'keywords' stats: CleanupStats object to track progress """ from lightrag.kg.postgres_impl import namespace_to_table_name table_name = namespace_to_table_name(storage.namespace) # Build WHERE conditions conditions = [] for mode in QUERY_MODES: if cleanup_type == "all": conditions.append(f"id LIKE '{mode}:query:%'") conditions.append(f"id LIKE '{mode}:keywords:%'") elif cleanup_type == "query": conditions.append(f"id LIKE '{mode}:query:%'") elif cleanup_type == "keywords": conditions.append(f"id LIKE '{mode}:keywords:%'") where_clause = " OR ".join(conditions) print("\n=== Starting Cleanup ===") print("💡 Executing PostgreSQL DELETE query\n") try: query = f""" DELETE FROM {table_name} WHERE workspace = $1 AND ({where_clause}) """ start_time = time.time() # Fix: Pass dict instead of list for execute() method await storage.db.execute(query, {"workspace": storage.workspace}) elapsed = time.time() - start_time # PostgreSQL returns deletion count stats.total_batches = 1 stats.successful_batches = 1 stats.successfully_deleted = stats.total_to_delete print(f"✓ Deleted {stats.successfully_deleted:,} records in {elapsed:.2f}s") except Exception as e: stats.add_error(1, e, stats.total_to_delete) print(f"✗ DELETE failed: {type(e).__name__}: {str(e)}") async def delete_query_caches_mongo( self, storage, cleanup_type: str, stats: CleanupStats ): """Delete query caches from MongoDB Args: storage: MongoKVStorage instance cleanup_type: 'all', 'query', or 'keywords' stats: CleanupStats object to track progress """ # Build regex patterns patterns = [] for mode in QUERY_MODES: if cleanup_type == "all": patterns.append(f"^{mode}:query:") patterns.append(f"^{mode}:keywords:") elif cleanup_type == "query": patterns.append(f"^{mode}:query:") elif cleanup_type == "keywords": patterns.append(f"^{mode}:keywords:") print("\n=== Starting Cleanup ===") print("💡 Executing MongoDB deleteMany operations\n") total_deleted = 0 for idx, pattern in enumerate(patterns, 1): try: query = {"_id": {"$regex": pattern}} result = await storage._data.delete_many(query) deleted_count = result.deleted_count stats.total_batches += 1 stats.successful_batches += 1 stats.successfully_deleted += deleted_count total_deleted += deleted_count print( f"Pattern {idx}/{len(patterns)}: Deleted {deleted_count:,} records ✓" ) except Exception as e: stats.add_error(idx, e, 0) print( f"Pattern {idx}/{len(patterns)}: ✗ FAILED - " f"{type(e).__name__}: {str(e)}" ) print(f"\nTotal deleted: {total_deleted:,} records") async def delete_query_caches( self, storage, storage_name: str, cleanup_type: str, stats: CleanupStats ): """Delete query caches from any storage type Args: storage: Storage instance storage_name: Storage type name cleanup_type: 'all', 'query', or 'keywords' stats: CleanupStats object to track progress """ if storage_name == "JsonKVStorage": await self.delete_query_caches_json(storage, cleanup_type, stats) elif storage_name == "RedisKVStorage": await self.delete_query_caches_redis(storage, cleanup_type, stats) elif storage_name == "PGKVStorage": await self.delete_query_caches_pg(storage, cleanup_type, stats) elif storage_name == "MongoKVStorage": await self.delete_query_caches_mongo(storage, cleanup_type, stats) else: raise ValueError(f"Unsupported storage type: {storage_name}") def print_header(self): """Print tool header""" print("\n" + "=" * 60) print("LLM Query Cache Cleanup Tool - LightRAG") print("=" * 60) def print_storage_types(self): """Print available storage types""" print("\nSupported KV Storage Types:") for key, value in STORAGE_TYPES.items(): print(f"[{key}] {value}") def format_workspace(self, workspace: str) -> str: """Format workspace name with highlighting Args: workspace: Workspace name (may be empty) Returns: Formatted workspace string with ANSI color codes """ if workspace: return f"{BOLD_CYAN}{workspace}{RESET}" else: return f"{BOLD_CYAN}(default){RESET}" def print_cache_statistics(self, counts: Dict[str, Dict[str, int]], title: str): """Print cache statistics in a formatted table Args: counts: Dictionary with counts for each mode and cache_type title: Title for the statistics display """ print(f"\n{title}") print("┌" + "─" * 12 + "┬" + "─" * 12 + "┬" + "─" * 12 + "┬" + "─" * 12 + "┐") print(f"│ {'Mode':<10} │ {'Query':>10} │ {'Keywords':>10} │ {'Total':>10} │") print("├" + "─" * 12 + "┼" + "─" * 12 + "┼" + "─" * 12 + "┼" + "─" * 12 + "┤") total_query = 0 total_keywords = 0 for mode in QUERY_MODES: query_count = counts[mode]["query"] keywords_count = counts[mode]["keywords"] mode_total = query_count + keywords_count total_query += query_count total_keywords += keywords_count print( f"│ {mode:<10} │ {query_count:>10,} │ {keywords_count:>10,} │ {mode_total:>10,} │" ) print("├" + "─" * 12 + "┼" + "─" * 12 + "┼" + "─" * 12 + "┼" + "─" * 12 + "┤") grand_total = total_query + total_keywords print( f"│ {'Total':<10} │ {total_query:>10,} │ {total_keywords:>10,} │ {grand_total:>10,} │" ) print("└" + "─" * 12 + "┴" + "─" * 12 + "┴" + "─" * 12 + "┴" + "─" * 12 + "┘") def calculate_total_to_delete( self, counts: Dict[str, Dict[str, int]], cleanup_type: str ) -> int: """Calculate total number of records to delete Args: counts: Dictionary with counts for each mode and cache_type cleanup_type: 'all', 'query', or 'keywords' Returns: Total number of records to delete """ total = 0 for mode in QUERY_MODES: if cleanup_type == "all": total += counts[mode]["query"] + counts[mode]["keywords"] elif cleanup_type == "query": total += counts[mode]["query"] elif cleanup_type == "keywords": total += counts[mode]["keywords"] return total def print_cleanup_report(self, stats: CleanupStats): """Print comprehensive cleanup report Args: stats: CleanupStats object with cleanup results """ print("\n" + "=" * 60) print("Cleanup Complete - Final Report") print("=" * 60) # Overall statistics print("\n📊 Statistics:") print(f" Total records to delete: {stats.total_to_delete:,}") print(f" Total batches: {stats.total_batches:,}") print(f" Successful batches: {stats.successful_batches:,}") print(f" Failed batches: {stats.failed_batches:,}") print(f" Successfully deleted: {stats.successfully_deleted:,}") print(f" Failed to delete: {stats.failed_to_delete:,}") # Success rate success_rate = ( (stats.successfully_deleted / stats.total_to_delete * 100) if stats.total_to_delete > 0 else 0 ) print(f" Success rate: {success_rate:.2f}%") # Before/After comparison print("\n📈 Before/After Comparison:") total_before = sum( counts["query"] + counts["keywords"] for counts in stats.counts_before.values() ) total_after = sum( counts["query"] + counts["keywords"] for counts in stats.counts_after.values() ) print(f" Total caches before: {total_before:,}") print(f" Total caches after: {total_after:,}") print(f" Net reduction: {total_before - total_after:,}") # Error details if stats.errors: print(f"\n⚠️ Errors encountered: {len(stats.errors)}") print("\nError Details:") print("-" * 60) # Group errors by type error_types = {} for error in stats.errors: err_type = error["error_type"] error_types[err_type] = error_types.get(err_type, 0) + 1 print("\nError Summary:") for err_type, count in sorted(error_types.items(), key=lambda x: -x[1]): print(f" - {err_type}: {count} occurrence(s)") print("\nFirst 5 errors:") for i, error in enumerate(stats.errors[:5], 1): print(f"\n {i}. Batch {error['batch']}") print(f" Type: {error['error_type']}") print(f" Message: {error['error_msg']}") print(f" Records lost: {error['records_lost']:,}") if len(stats.errors) > 5: print(f"\n ... and {len(stats.errors) - 5} more errors") print("\n" + "=" * 60) print(f"{BOLD_RED}⚠️ WARNING: Cleanup completed with errors!{RESET}") print(" Please review the error details above.") print("=" * 60) else: print("\n" + "=" * 60) print(f"{BOLD_GREEN}✓ SUCCESS: All records cleaned up successfully!{RESET}") print("=" * 60) async def setup_storage(self) -> tuple: """Setup and initialize storage Returns: Tuple of (storage_instance, storage_name, workspace) Returns (None, None, None) if user chooses to exit """ print("\n=== Storage Setup ===") self.print_storage_types() # Custom input handling with exit support while True: choice = input( "\nSelect storage type (1-4) (Press Enter to exit): " ).strip() # Check for exit if choice == "" or choice == "0": print("\n✓ Cleanup cancelled by user") return None, None, None # Check if choice is valid if choice in STORAGE_TYPES: break print( f"✗ Invalid choice. Please enter one of: {', '.join(STORAGE_TYPES.keys())}" ) storage_name = STORAGE_TYPES[choice] # Special warning for JsonKVStorage about concurrent access if storage_name == "JsonKVStorage": print("\n" + "=" * 60) print(f"{BOLD_RED}⚠️ IMPORTANT WARNING - JsonKVStorage Concurrency{RESET}") print("=" * 60) print("\nJsonKVStorage is an in-memory database that does NOT support") print("concurrent access to the same file by multiple programs.") print("\nBefore proceeding, please ensure that:") print(" • LightRAG Server is completely shut down") print(" • No other programs are accessing the storage files") print("\n" + "=" * 60) confirm = ( input("\nHas LightRAG Server been shut down? (yes/no): ") .strip() .lower() ) if confirm != "yes": print( "\n✓ Operation cancelled - Please shut down LightRAG Server first" ) return None, None, None print("✓ Proceeding with JsonKVStorage cleanup...") # Check configuration (warnings only, doesn't block) print("\nChecking configuration...") self.check_env_vars(storage_name) # Get workspace workspace = self.get_workspace_for_storage(storage_name) # Initialize storage (real validation point) print("\nInitializing storage...") try: storage = await self.initialize_storage(storage_name, workspace) print(f"- Storage Type: {storage_name}") print(f"- Workspace: {workspace if workspace else '(default)'}") print("- Connection Status: ✓ Success") except Exception as e: print(f"✗ Initialization failed: {e}") print(f"\nFor {storage_name}, you can configure using:") print(" 1. Environment variables (highest priority)") # Show specific environment variable requirements if storage_name in STORAGE_ENV_REQUIREMENTS: for var in STORAGE_ENV_REQUIREMENTS[storage_name]: print(f" - {var}") print(" 2. config.ini file (medium priority)") if storage_name == "RedisKVStorage": print(" [redis]") print(" uri = redis://localhost:6379") elif storage_name == "PGKVStorage": print(" [postgres]") print(" host = localhost") print(" port = 5432") print(" user = postgres") print(" password = yourpassword") print(" database = lightrag") elif storage_name == "MongoKVStorage": print(" [mongodb]") print(" uri = mongodb://root:root@localhost:27017/") print(" database = LightRAG") return None, None, None return storage, storage_name, workspace async def run(self): """Run the cleanup tool""" try: # Initialize shared storage (REQUIRED for storage classes to work) from lightrag.kg.shared_storage import initialize_share_data initialize_share_data(workers=1) # Print header self.print_header() # Setup storage self.storage, storage_name, self.workspace = await self.setup_storage() # Check if user cancelled if self.storage is None: return # Count query caches print("\nCounting query cache records...") try: counts = await self.count_query_caches(self.storage, storage_name) except Exception as e: print(f"✗ Counting failed: {e}") await self.storage.finalize() return # Initialize stats stats = CleanupStats() stats.initialize_counts() stats.counts_before = counts # Print statistics self.print_cache_statistics( counts, "📊 Query Cache Statistics (Before Cleanup):" ) # Calculate total total_caches = sum( counts[mode]["query"] + counts[mode]["keywords"] for mode in QUERY_MODES ) if total_caches == 0: print("\n⚠️ No query caches found in storage") await self.storage.finalize() return # Select cleanup type print("\n=== Cleanup Options ===") print("[1] Delete all query caches (both query and keywords)") print("[2] Delete query caches only (keep keywords)") print("[3] Delete keywords caches only (keep query)") print("[0] Cancel") while True: choice = input("\nSelect cleanup option (0-3): ").strip() if choice == "0" or choice == "": print("\n✓ Cleanup cancelled") await self.storage.finalize() return elif choice == "1": cleanup_type = "all" elif choice == "2": cleanup_type = "query" elif choice == "3": cleanup_type = "keywords" else: print("✗ Invalid choice. Please enter 0, 1, 2, or 3") continue # Calculate total to delete for the selected type stats.total_to_delete = self.calculate_total_to_delete( counts, cleanup_type ) # Check if there are any records to delete if stats.total_to_delete == 0: if cleanup_type == "all": print(f"\n{BOLD_RED}⚠️ No query caches found to delete!{RESET}") elif cleanup_type == "query": print( f"\n{BOLD_RED}⚠️ No query caches found to delete! (Only keywords exist){RESET}" ) elif cleanup_type == "keywords": print( f"\n{BOLD_RED}⚠️ No keywords caches found to delete! (Only query caches exist){RESET}" ) print(" Please select a different cleanup option.\n") continue # Valid selection with records to delete break # Confirm deletion print("\n" + "=" * 60) print("Cleanup Confirmation") print("=" * 60) print( f"Storage: {BOLD_CYAN}{storage_name}{RESET} " f"(workspace: {self.format_workspace(self.workspace)})" ) print(f"Cleanup Type: {BOLD_CYAN}{cleanup_type}{RESET}") print( f"Records to Delete: {BOLD_RED}{stats.total_to_delete:,}{RESET} / {total_caches:,}" ) if cleanup_type == "all": print( f"\n{BOLD_RED}⚠️ WARNING: This will delete ALL query caches across all modes!{RESET}" ) elif cleanup_type == "query": print("\n⚠️ This will delete query caches only (keywords will be kept)") elif cleanup_type == "keywords": print("\n⚠️ This will delete keywords caches only (query will be kept)") confirm = input("\nContinue with deletion? (y/n): ").strip().lower() if confirm != "y": print("\n✓ Cleanup cancelled") await self.storage.finalize() return # Perform deletion await self.delete_query_caches( self.storage, storage_name, cleanup_type, stats ) # Persist changes print("\nPersisting changes to storage...") try: await self.storage.index_done_callback() print("✓ Changes persisted successfully") except Exception as e: print(f"✗ Persist failed: {e}") stats.add_error(0, e, 0) # Count again to verify print("\nVerifying cleanup results...") try: stats.counts_after = await self.count_query_caches( self.storage, storage_name ) except Exception as e: print(f"⚠️ Verification failed: {e}") # Use zero counts if verification fails stats.counts_after = { mode: {"query": 0, "keywords": 0} for mode in QUERY_MODES } # Print final report self.print_cleanup_report(stats) # Print after statistics self.print_cache_statistics( stats.counts_after, "\n📊 Query Cache Statistics (After Cleanup):" ) # Cleanup await self.storage.finalize() except KeyboardInterrupt: print("\n\n✗ Cleanup interrupted by user") except Exception as e: print(f"\n✗ Cleanup failed: {e}") import traceback traceback.print_exc() finally: # Ensure cleanup if self.storage: try: await self.storage.finalize() except Exception: pass # Finalize shared storage try: from lightrag.kg.shared_storage import finalize_share_data finalize_share_data() except Exception: pass async def async_main(): """Async main entry point""" tool = CleanupTool() await tool.run() def main(): """Synchronous entry point for CLI command""" asyncio.run(async_main()) if __name__ == "__main__": main()
{ "repo_id": "HKUDS/LightRAG", "file_path": "lightrag/tools/clean_llm_query_cache.py", "license": "MIT License", "lines": 928, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/LightRAG:lightrag/tools/migrate_llm_cache.py
#!/usr/bin/env python3 """ LLM Cache Migration Tool for LightRAG This tool migrates LLM response cache (default:extract:* and default:summary:*) between different KV storage implementations while preserving workspace isolation. Usage: python -m lightrag.tools.migrate_llm_cache # or python lightrag/tools/migrate_llm_cache.py Supported KV Storage Types: - JsonKVStorage - RedisKVStorage - PGKVStorage - MongoKVStorage """ import asyncio import os import sys import time from typing import Any, Dict, List from dataclasses import dataclass, field from dotenv import load_dotenv # Add project root to path for imports sys.path.insert( 0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) from lightrag.kg import STORAGE_ENV_REQUIREMENTS from lightrag.namespace import NameSpace from lightrag.utils import setup_logger # Load environment variables # use the .env that is inside the current folder # allows to use different .env file for each lightrag instance # the OS environment variables take precedence over the .env file load_dotenv(dotenv_path=".env", override=False) # Setup logger setup_logger("lightrag", level="INFO") # Storage type configurations STORAGE_TYPES = { "1": "JsonKVStorage", "2": "RedisKVStorage", "3": "PGKVStorage", "4": "MongoKVStorage", } # Workspace environment variable mapping WORKSPACE_ENV_MAP = { "PGKVStorage": "POSTGRES_WORKSPACE", "MongoKVStorage": "MONGODB_WORKSPACE", "RedisKVStorage": "REDIS_WORKSPACE", } # Default batch size for migration DEFAULT_BATCH_SIZE = 1000 # Default count batch size for efficient counting DEFAULT_COUNT_BATCH_SIZE = 1000 # ANSI color codes for terminal output BOLD_CYAN = "\033[1;36m" RESET = "\033[0m" @dataclass class MigrationStats: """Migration statistics and error tracking""" total_source_records: int = 0 total_batches: int = 0 successful_batches: int = 0 failed_batches: int = 0 successful_records: int = 0 failed_records: int = 0 errors: List[Dict[str, Any]] = field(default_factory=list) def add_error(self, batch_idx: int, error: Exception, batch_size: int): """Record batch error""" self.errors.append( { "batch": batch_idx, "error_type": type(error).__name__, "error_msg": str(error), "records_lost": batch_size, "timestamp": time.time(), } ) self.failed_batches += 1 self.failed_records += batch_size class MigrationTool: """LLM Cache Migration Tool""" def __init__(self): self.source_storage = None self.target_storage = None self.source_workspace = "" self.target_workspace = "" self.batch_size = DEFAULT_BATCH_SIZE def get_workspace_for_storage(self, storage_name: str) -> str: """Get workspace for a specific storage type Priority: Storage-specific env var > WORKSPACE env var > empty string Args: storage_name: Storage implementation name Returns: Workspace name """ # Check storage-specific workspace if storage_name in WORKSPACE_ENV_MAP: specific_workspace = os.getenv(WORKSPACE_ENV_MAP[storage_name]) if specific_workspace: return specific_workspace # Check generic WORKSPACE workspace = os.getenv("WORKSPACE", "") return workspace def check_config_ini_for_storage(self, storage_name: str) -> bool: """Check if config.ini has configuration for the storage type Args: storage_name: Storage implementation name Returns: True if config.ini has the necessary configuration """ try: import configparser config = configparser.ConfigParser() config.read("config.ini", "utf-8") if storage_name == "RedisKVStorage": return config.has_option("redis", "uri") elif storage_name == "PGKVStorage": return ( config.has_option("postgres", "user") and config.has_option("postgres", "password") and config.has_option("postgres", "database") ) elif storage_name == "MongoKVStorage": return config.has_option("mongodb", "uri") and config.has_option( "mongodb", "database" ) return False except Exception: return False def check_env_vars(self, storage_name: str) -> bool: """Check environment variables, show warnings if missing but don't fail Args: storage_name: Storage implementation name Returns: Always returns True (warnings only, no hard failure) """ required_vars = STORAGE_ENV_REQUIREMENTS.get(storage_name, []) if not required_vars: print("✓ No environment variables required") return True missing_vars = [var for var in required_vars if var not in os.environ] if missing_vars: print( f"⚠️ Warning: Missing environment variables: {', '.join(missing_vars)}" ) # Check if config.ini has configuration has_config = self.check_config_ini_for_storage(storage_name) if has_config: print(" ✓ Found configuration in config.ini") else: print(f" Will attempt to use defaults for {storage_name}") return True print("✓ All required environment variables are set") return True def count_available_storage_types(self) -> int: """Count available storage types (with env vars, config.ini, or defaults) Returns: Number of available storage types """ available_count = 0 for storage_name in STORAGE_TYPES.values(): # Check if storage requires configuration required_vars = STORAGE_ENV_REQUIREMENTS.get(storage_name, []) if not required_vars: # JsonKVStorage, MongoKVStorage etc. - no config needed available_count += 1 else: # Check if has environment variables has_env = all(var in os.environ for var in required_vars) if has_env: available_count += 1 else: # Check if has config.ini configuration has_config = self.check_config_ini_for_storage(storage_name) if has_config: available_count += 1 return available_count def get_storage_class(self, storage_name: str): """Dynamically import and return storage class Args: storage_name: Storage implementation name Returns: Storage class """ if storage_name == "JsonKVStorage": from lightrag.kg.json_kv_impl import JsonKVStorage return JsonKVStorage elif storage_name == "RedisKVStorage": from lightrag.kg.redis_impl import RedisKVStorage return RedisKVStorage elif storage_name == "PGKVStorage": from lightrag.kg.postgres_impl import PGKVStorage return PGKVStorage elif storage_name == "MongoKVStorage": from lightrag.kg.mongo_impl import MongoKVStorage return MongoKVStorage else: raise ValueError(f"Unsupported storage type: {storage_name}") async def initialize_storage(self, storage_name: str, workspace: str): """Initialize storage instance with fallback to config.ini and defaults Args: storage_name: Storage implementation name workspace: Workspace name Returns: Initialized storage instance Raises: Exception: If initialization fails """ storage_class = self.get_storage_class(storage_name) # Create global config global_config = { "working_dir": os.getenv("WORKING_DIR", "./rag_storage"), "embedding_batch_num": 10, } # Initialize storage storage = storage_class( namespace=NameSpace.KV_STORE_LLM_RESPONSE_CACHE, workspace=workspace, global_config=global_config, embedding_func=None, ) # Initialize the storage (may raise exception if connection fails) await storage.initialize() return storage async def get_default_caches_json(self, storage) -> Dict[str, Any]: """Get default caches from JsonKVStorage Args: storage: JsonKVStorage instance Returns: Dictionary of cache entries with default:extract:* or default:summary:* keys """ # Access _data directly - it's a dict from shared_storage async with storage._storage_lock: filtered = {} for key, value in storage._data.items(): if key.startswith("default:extract:") or key.startswith( "default:summary:" ): filtered[key] = value.copy() return filtered async def get_default_caches_redis( self, storage, batch_size: int = 1000 ) -> Dict[str, Any]: """Get default caches from RedisKVStorage with pagination Args: storage: RedisKVStorage instance batch_size: Number of keys to process per batch Returns: Dictionary of cache entries with default:extract:* or default:summary:* keys """ import json cache_data = {} # Use _get_redis_connection() context manager async with storage._get_redis_connection() as redis: for pattern in ["default:extract:*", "default:summary:*"]: # Add namespace prefix to pattern prefixed_pattern = f"{storage.final_namespace}:{pattern}" cursor = 0 while True: # SCAN already implements cursor-based pagination cursor, keys = await redis.scan( cursor, match=prefixed_pattern, count=batch_size ) if keys: # Process this batch using pipeline with error handling try: pipe = redis.pipeline() for key in keys: pipe.get(key) values = await pipe.execute() for key, value in zip(keys, values): if value: key_str = ( key.decode() if isinstance(key, bytes) else key ) # Remove namespace prefix to get original key original_key = key_str.replace( f"{storage.final_namespace}:", "", 1 ) cache_data[original_key] = json.loads(value) except Exception as e: # Pipeline execution failed, fall back to individual gets print( f"⚠️ Pipeline execution failed for batch, using individual gets: {e}" ) for key in keys: try: value = await redis.get(key) if value: key_str = ( key.decode() if isinstance(key, bytes) else key ) original_key = key_str.replace( f"{storage.final_namespace}:", "", 1 ) cache_data[original_key] = json.loads(value) except Exception as individual_error: print( f"⚠️ Failed to get individual key {key}: {individual_error}" ) continue if cursor == 0: break # Yield control periodically to avoid blocking await asyncio.sleep(0) return cache_data async def get_default_caches_pg( self, storage, batch_size: int = 1000 ) -> Dict[str, Any]: """Get default caches from PGKVStorage with pagination Args: storage: PGKVStorage instance batch_size: Number of records to fetch per batch Returns: Dictionary of cache entries with default:extract:* or default:summary:* keys """ from lightrag.kg.postgres_impl import namespace_to_table_name cache_data = {} table_name = namespace_to_table_name(storage.namespace) offset = 0 while True: # Use LIMIT and OFFSET for pagination query = f""" SELECT id as key, original_prompt, return_value, chunk_id, cache_type, queryparam, EXTRACT(EPOCH FROM create_time)::BIGINT as create_time, EXTRACT(EPOCH FROM update_time)::BIGINT as update_time FROM {table_name} WHERE workspace = $1 AND (id LIKE 'default:extract:%' OR id LIKE 'default:summary:%') ORDER BY id LIMIT $2 OFFSET $3 """ results = await storage.db.query( query, [storage.workspace, batch_size, offset], multirows=True ) if not results: break for row in results: # Map PostgreSQL fields to cache format cache_entry = { "return": row.get("return_value", ""), "cache_type": row.get("cache_type"), "original_prompt": row.get("original_prompt", ""), "chunk_id": row.get("chunk_id"), "queryparam": row.get("queryparam"), "create_time": row.get("create_time", 0), "update_time": row.get("update_time", 0), } cache_data[row["key"]] = cache_entry # If we got fewer results than batch_size, we're done if len(results) < batch_size: break offset += batch_size # Yield control periodically await asyncio.sleep(0) return cache_data async def get_default_caches_mongo( self, storage, batch_size: int = 1000 ) -> Dict[str, Any]: """Get default caches from MongoKVStorage with cursor-based pagination Args: storage: MongoKVStorage instance batch_size: Number of documents to process per batch Returns: Dictionary of cache entries with default:extract:* or default:summary:* keys """ cache_data = {} # MongoDB query with regex - use _data not collection query = {"_id": {"$regex": "^default:(extract|summary):"}} # Use cursor without to_list() - process in batches cursor = storage._data.find(query).batch_size(batch_size) async for doc in cursor: # Process each document as it comes doc_copy = doc.copy() key = doc_copy.pop("_id") # Filter ALL MongoDB/database-specific fields # Following .clinerules: "Always filter deprecated/incompatible fields during deserialization" for field_name in ["namespace", "workspace", "_id", "content"]: doc_copy.pop(field_name, None) cache_data[key] = doc_copy.copy() # Periodically yield control (every batch_size documents) if len(cache_data) % batch_size == 0: await asyncio.sleep(0) return cache_data async def get_default_caches(self, storage, storage_name: str) -> Dict[str, Any]: """Get default caches from any storage type Args: storage: Storage instance storage_name: Storage type name Returns: Dictionary of cache entries """ if storage_name == "JsonKVStorage": return await self.get_default_caches_json(storage) elif storage_name == "RedisKVStorage": return await self.get_default_caches_redis(storage) elif storage_name == "PGKVStorage": return await self.get_default_caches_pg(storage) elif storage_name == "MongoKVStorage": return await self.get_default_caches_mongo(storage) else: raise ValueError(f"Unsupported storage type: {storage_name}") async def count_default_caches_json(self, storage) -> int: """Count default caches in JsonKVStorage - O(N) but very fast in-memory Args: storage: JsonKVStorage instance Returns: Total count of cache records """ async with storage._storage_lock: return sum( 1 for key in storage._data.keys() if key.startswith("default:extract:") or key.startswith("default:summary:") ) async def count_default_caches_redis(self, storage) -> int: """Count default caches in RedisKVStorage using SCAN with progress display Args: storage: RedisKVStorage instance Returns: Total count of cache records """ count = 0 print("Scanning Redis keys...", end="", flush=True) async with storage._get_redis_connection() as redis: for pattern in ["default:extract:*", "default:summary:*"]: prefixed_pattern = f"{storage.final_namespace}:{pattern}" cursor = 0 while True: cursor, keys = await redis.scan( cursor, match=prefixed_pattern, count=DEFAULT_COUNT_BATCH_SIZE ) count += len(keys) # Show progress print( f"\rScanning Redis keys... found {count:,} records", end="", flush=True, ) if cursor == 0: break print() # New line after progress return count async def count_default_caches_pg(self, storage) -> int: """Count default caches in PostgreSQL using COUNT(*) with progress indicator Args: storage: PGKVStorage instance Returns: Total count of cache records """ from lightrag.kg.postgres_impl import namespace_to_table_name table_name = namespace_to_table_name(storage.namespace) query = f""" SELECT COUNT(*) as count FROM {table_name} WHERE workspace = $1 AND (id LIKE 'default:extract:%' OR id LIKE 'default:summary:%') """ print("Counting PostgreSQL records...", end="", flush=True) start_time = time.time() result = await storage.db.query(query, [storage.workspace]) elapsed = time.time() - start_time if elapsed > 1: print(f" (took {elapsed:.1f}s)", end="") print() # New line return result["count"] if result else 0 async def count_default_caches_mongo(self, storage) -> int: """Count default caches in MongoDB using count_documents with progress indicator Args: storage: MongoKVStorage instance Returns: Total count of cache records """ query = {"_id": {"$regex": "^default:(extract|summary):"}} print("Counting MongoDB documents...", end="", flush=True) start_time = time.time() count = await storage._data.count_documents(query) elapsed = time.time() - start_time if elapsed > 1: print(f" (took {elapsed:.1f}s)", end="") print() # New line return count async def count_default_caches(self, storage, storage_name: str) -> int: """Count default caches from any storage type efficiently Args: storage: Storage instance storage_name: Storage type name Returns: Total count of cache records """ if storage_name == "JsonKVStorage": return await self.count_default_caches_json(storage) elif storage_name == "RedisKVStorage": return await self.count_default_caches_redis(storage) elif storage_name == "PGKVStorage": return await self.count_default_caches_pg(storage) elif storage_name == "MongoKVStorage": return await self.count_default_caches_mongo(storage) else: raise ValueError(f"Unsupported storage type: {storage_name}") async def stream_default_caches_json(self, storage, batch_size: int): """Stream default caches from JsonKVStorage - yields batches Args: storage: JsonKVStorage instance batch_size: Size of each batch to yield Yields: Dictionary batches of cache entries Note: This method creates a snapshot of matching items while holding the lock, then releases the lock before yielding batches. This prevents deadlock when the target storage (also JsonKVStorage) tries to acquire the same lock during upsert operations. """ # Create a snapshot of matching items while holding the lock async with storage._storage_lock: matching_items = [ (key, value) for key, value in storage._data.items() if key.startswith("default:extract:") or key.startswith("default:summary:") ] # Now iterate over snapshot without holding lock batch = {} for key, value in matching_items: batch[key] = value.copy() if len(batch) >= batch_size: yield batch batch = {} # Yield remaining items if batch: yield batch async def stream_default_caches_redis(self, storage, batch_size: int): """Stream default caches from RedisKVStorage - yields batches Args: storage: RedisKVStorage instance batch_size: Size of each batch to yield Yields: Dictionary batches of cache entries """ import json async with storage._get_redis_connection() as redis: for pattern in ["default:extract:*", "default:summary:*"]: prefixed_pattern = f"{storage.final_namespace}:{pattern}" cursor = 0 while True: cursor, keys = await redis.scan( cursor, match=prefixed_pattern, count=batch_size ) if keys: try: pipe = redis.pipeline() for key in keys: pipe.get(key) values = await pipe.execute() batch = {} for key, value in zip(keys, values): if value: key_str = ( key.decode() if isinstance(key, bytes) else key ) original_key = key_str.replace( f"{storage.final_namespace}:", "", 1 ) batch[original_key] = json.loads(value) if batch: yield batch except Exception as e: print(f"⚠️ Pipeline execution failed for batch: {e}") # Fall back to individual gets batch = {} for key in keys: try: value = await redis.get(key) if value: key_str = ( key.decode() if isinstance(key, bytes) else key ) original_key = key_str.replace( f"{storage.final_namespace}:", "", 1 ) batch[original_key] = json.loads(value) except Exception as individual_error: print( f"⚠️ Failed to get individual key {key}: {individual_error}" ) continue if batch: yield batch if cursor == 0: break await asyncio.sleep(0) async def stream_default_caches_pg(self, storage, batch_size: int): """Stream default caches from PostgreSQL - yields batches Args: storage: PGKVStorage instance batch_size: Size of each batch to yield Yields: Dictionary batches of cache entries """ from lightrag.kg.postgres_impl import namespace_to_table_name table_name = namespace_to_table_name(storage.namespace) offset = 0 while True: query = f""" SELECT id as key, original_prompt, return_value, chunk_id, cache_type, queryparam, EXTRACT(EPOCH FROM create_time)::BIGINT as create_time, EXTRACT(EPOCH FROM update_time)::BIGINT as update_time FROM {table_name} WHERE workspace = $1 AND (id LIKE 'default:extract:%' OR id LIKE 'default:summary:%') ORDER BY id LIMIT $2 OFFSET $3 """ results = await storage.db.query( query, [storage.workspace, batch_size, offset], multirows=True ) if not results: break batch = {} for row in results: cache_entry = { "return": row.get("return_value", ""), "cache_type": row.get("cache_type"), "original_prompt": row.get("original_prompt", ""), "chunk_id": row.get("chunk_id"), "queryparam": row.get("queryparam"), "create_time": row.get("create_time", 0), "update_time": row.get("update_time", 0), } batch[row["key"]] = cache_entry if batch: yield batch if len(results) < batch_size: break offset += batch_size await asyncio.sleep(0) async def stream_default_caches_mongo(self, storage, batch_size: int): """Stream default caches from MongoDB - yields batches Args: storage: MongoKVStorage instance batch_size: Size of each batch to yield Yields: Dictionary batches of cache entries """ query = {"_id": {"$regex": "^default:(extract|summary):"}} cursor = storage._data.find(query).batch_size(batch_size) batch = {} async for doc in cursor: doc_copy = doc.copy() key = doc_copy.pop("_id") # Filter MongoDB/database-specific fields for field_name in ["namespace", "workspace", "_id", "content"]: doc_copy.pop(field_name, None) batch[key] = doc_copy.copy() if len(batch) >= batch_size: yield batch batch = {} # Yield remaining items if batch: yield batch async def stream_default_caches( self, storage, storage_name: str, batch_size: int = None ): """Stream default caches from any storage type - unified interface Args: storage: Storage instance storage_name: Storage type name batch_size: Size of each batch to yield (defaults to self.batch_size) Yields: Dictionary batches of cache entries """ if batch_size is None: batch_size = self.batch_size if storage_name == "JsonKVStorage": async for batch in self.stream_default_caches_json(storage, batch_size): yield batch elif storage_name == "RedisKVStorage": async for batch in self.stream_default_caches_redis(storage, batch_size): yield batch elif storage_name == "PGKVStorage": async for batch in self.stream_default_caches_pg(storage, batch_size): yield batch elif storage_name == "MongoKVStorage": async for batch in self.stream_default_caches_mongo(storage, batch_size): yield batch else: raise ValueError(f"Unsupported storage type: {storage_name}") async def count_cache_types(self, cache_data: Dict[str, Any]) -> Dict[str, int]: """Count cache entries by type Args: cache_data: Dictionary of cache entries Returns: Dictionary with counts for each cache type """ counts = { "extract": 0, "summary": 0, } for key in cache_data.keys(): if key.startswith("default:extract:"): counts["extract"] += 1 elif key.startswith("default:summary:"): counts["summary"] += 1 return counts def print_header(self): """Print tool header""" print("\n" + "=" * 50) print("LLM Cache Migration Tool - LightRAG") print("=" * 50) def print_storage_types(self): """Print available storage types""" print("\nSupported KV Storage Types:") for key, value in STORAGE_TYPES.items(): print(f"[{key}] {value}") def format_workspace(self, workspace: str) -> str: """Format workspace name with highlighting Args: workspace: Workspace name (may be empty) Returns: Formatted workspace string with ANSI color codes """ if workspace: return f"{BOLD_CYAN}{workspace}{RESET}" else: return f"{BOLD_CYAN}(default){RESET}" def format_storage_name(self, storage_name: str) -> str: """Format storage type name with highlighting Args: storage_name: Storage type name Returns: Formatted storage name string with ANSI color codes """ return f"{BOLD_CYAN}{storage_name}{RESET}" async def setup_storage( self, storage_type: str, use_streaming: bool = False, exclude_storage_name: str = None, ) -> tuple: """Setup and initialize storage with config.ini fallback support Args: storage_type: Type label (source/target) use_streaming: If True, only count records without loading. If False, load all data (legacy mode) exclude_storage_name: Storage type to exclude from selection (e.g., to prevent selecting same as source) Returns: Tuple of (storage_instance, storage_name, workspace, total_count) Returns (None, None, None, 0) if user chooses to exit """ print(f"\n=== {storage_type} Storage Setup ===") # Filter and remap available storage types if exclusion is specified if exclude_storage_name: # Get available storage types (excluding source) available_list = [ (k, v) for k, v in STORAGE_TYPES.items() if v != exclude_storage_name ] # Remap to sequential numbering (1, 2, 3...) remapped_types = { str(i + 1): name for i, (_, name) in enumerate(available_list) } # Print available types with new sequential numbers print( f"\nAvailable Storage Types for Target (source: {exclude_storage_name} excluded):" ) for key, value in remapped_types.items(): print(f"[{key}] {value}") available_types = remapped_types else: # For source storage, use original numbering available_types = STORAGE_TYPES.copy() self.print_storage_types() # Generate dynamic prompt based on number of options num_options = len(available_types) if num_options == 1: prompt_range = "1" else: prompt_range = f"1-{num_options}" # Custom input handling with exit support while True: choice = input( f"\nSelect {storage_type} storage type ({prompt_range}) (Press Enter to exit): " ).strip() # Check for exit if choice == "" or choice == "0": print("\n✓ Migration cancelled by user") return None, None, None, 0 # Check if choice is valid if choice in available_types: break print( f"✗ Invalid choice. Please enter one of: {', '.join(available_types.keys())}" ) storage_name = available_types[choice] # Check configuration (warnings only, doesn't block) print("\nChecking configuration...") self.check_env_vars(storage_name) # Get workspace workspace = self.get_workspace_for_storage(storage_name) # Initialize storage (real validation point) print(f"\nInitializing {storage_type} storage...") try: storage = await self.initialize_storage(storage_name, workspace) print(f"- Storage Type: {storage_name}") print(f"- Workspace: {workspace if workspace else '(default)'}") print("- Connection Status: ✓ Success") # Show configuration source for transparency if storage_name == "RedisKVStorage": config_source = ( "environment variable" if "REDIS_URI" in os.environ else "config.ini or default" ) print(f"- Configuration Source: {config_source}") elif storage_name == "PGKVStorage": config_source = ( "environment variables" if all( var in os.environ for var in STORAGE_ENV_REQUIREMENTS[storage_name] ) else "config.ini or defaults" ) print(f"- Configuration Source: {config_source}") elif storage_name == "MongoKVStorage": config_source = ( "environment variables" if all( var in os.environ for var in STORAGE_ENV_REQUIREMENTS[storage_name] ) else "config.ini or defaults" ) print(f"- Configuration Source: {config_source}") except Exception as e: print(f"✗ Initialization failed: {e}") print(f"\nFor {storage_name}, you can configure using:") print(" 1. Environment variables (highest priority)") # Show specific environment variable requirements if storage_name in STORAGE_ENV_REQUIREMENTS: for var in STORAGE_ENV_REQUIREMENTS[storage_name]: print(f" - {var}") print(" 2. config.ini file (medium priority)") if storage_name == "RedisKVStorage": print(" [redis]") print(" uri = redis://localhost:6379") elif storage_name == "PGKVStorage": print(" [postgres]") print(" host = localhost") print(" port = 5432") print(" user = postgres") print(" password = yourpassword") print(" database = lightrag") elif storage_name == "MongoKVStorage": print(" [mongodb]") print(" uri = mongodb://root:root@localhost:27017/") print(" database = LightRAG") return None, None, None, 0 # Count cache records efficiently print(f"\n{'Counting' if use_streaming else 'Loading'} cache records...") try: if use_streaming: # Use efficient counting without loading data total_count = await self.count_default_caches(storage, storage_name) print(f"- Total: {total_count:,} records") else: # Legacy mode: load all data cache_data = await self.get_default_caches(storage, storage_name) counts = await self.count_cache_types(cache_data) total_count = len(cache_data) print(f"- default:extract: {counts['extract']:,} records") print(f"- default:summary: {counts['summary']:,} records") print(f"- Total: {total_count:,} records") except Exception as e: print(f"✗ {'Counting' if use_streaming else 'Loading'} failed: {e}") return None, None, None, 0 return storage, storage_name, workspace, total_count async def migrate_caches( self, source_data: Dict[str, Any], target_storage, target_storage_name: str ) -> MigrationStats: """Migrate caches in batches with error tracking (Legacy mode - loads all data) Args: source_data: Source cache data target_storage: Target storage instance target_storage_name: Target storage type name Returns: MigrationStats object with migration results and errors """ stats = MigrationStats() stats.total_source_records = len(source_data) if stats.total_source_records == 0: print("\nNo records to migrate") return stats # Convert to list for batching items = list(source_data.items()) stats.total_batches = ( stats.total_source_records + self.batch_size - 1 ) // self.batch_size print("\n=== Starting Migration ===") for batch_idx in range(stats.total_batches): start_idx = batch_idx * self.batch_size end_idx = min((batch_idx + 1) * self.batch_size, stats.total_source_records) batch_items = items[start_idx:end_idx] batch_data = dict(batch_items) # Determine current cache type for display current_key = batch_items[0][0] cache_type = "extract" if "extract" in current_key else "summary" try: # Attempt to write batch await target_storage.upsert(batch_data) # Success - update stats stats.successful_batches += 1 stats.successful_records += len(batch_data) # Calculate progress progress = (end_idx / stats.total_source_records) * 100 bar_length = 20 filled_length = int(bar_length * end_idx // stats.total_source_records) bar = "█" * filled_length + "░" * (bar_length - filled_length) print( f"Batch {batch_idx + 1}/{stats.total_batches}: {bar} " f"{end_idx:,}/{stats.total_source_records:,} ({progress:.0f}%) - " f"default:{cache_type} ✓" ) except Exception as e: # Error - record and continue stats.add_error(batch_idx + 1, e, len(batch_data)) print( f"Batch {batch_idx + 1}/{stats.total_batches}: ✗ FAILED - " f"{type(e).__name__}: {str(e)}" ) # Final persist print("\nPersisting data to disk...") try: await target_storage.index_done_callback() print("✓ Data persisted successfully") except Exception as e: print(f"✗ Persist failed: {e}") stats.add_error(0, e, 0) # batch 0 = persist error return stats async def migrate_caches_streaming( self, source_storage, source_storage_name: str, target_storage, target_storage_name: str, total_records: int, ) -> MigrationStats: """Migrate caches using streaming approach - minimal memory footprint Args: source_storage: Source storage instance source_storage_name: Source storage type name target_storage: Target storage instance target_storage_name: Target storage type name total_records: Total number of records to migrate Returns: MigrationStats object with migration results and errors """ stats = MigrationStats() stats.total_source_records = total_records if stats.total_source_records == 0: print("\nNo records to migrate") return stats # Calculate total batches stats.total_batches = (total_records + self.batch_size - 1) // self.batch_size print("\n=== Starting Streaming Migration ===") print( f"💡 Memory-optimized mode: Processing {self.batch_size:,} records at a time\n" ) batch_idx = 0 # Stream batches from source and write to target immediately async for batch in self.stream_default_caches( source_storage, source_storage_name ): batch_idx += 1 # Determine current cache type for display if batch: first_key = next(iter(batch.keys())) cache_type = "extract" if "extract" in first_key else "summary" else: cache_type = "unknown" try: # Write batch to target storage await target_storage.upsert(batch) # Success - update stats stats.successful_batches += 1 stats.successful_records += len(batch) # Calculate progress with known total progress = (stats.successful_records / total_records) * 100 bar_length = 20 filled_length = int( bar_length * stats.successful_records // total_records ) bar = "█" * filled_length + "░" * (bar_length - filled_length) print( f"Batch {batch_idx}/{stats.total_batches}: {bar} " f"{stats.successful_records:,}/{total_records:,} ({progress:.1f}%) - " f"default:{cache_type} ✓" ) except Exception as e: # Error - record and continue stats.add_error(batch_idx, e, len(batch)) print( f"Batch {batch_idx}/{stats.total_batches}: ✗ FAILED - " f"{type(e).__name__}: {str(e)}" ) # Final persist print("\nPersisting data to disk...") try: await target_storage.index_done_callback() print("✓ Data persisted successfully") except Exception as e: print(f"✗ Persist failed: {e}") stats.add_error(0, e, 0) # batch 0 = persist error return stats def print_migration_report(self, stats: MigrationStats): """Print comprehensive migration report Args: stats: MigrationStats object with migration results """ print("\n" + "=" * 60) print("Migration Complete - Final Report") print("=" * 60) # Overall statistics print("\n📊 Statistics:") print(f" Total source records: {stats.total_source_records:,}") print(f" Total batches: {stats.total_batches:,}") print(f" Successful batches: {stats.successful_batches:,}") print(f" Failed batches: {stats.failed_batches:,}") print(f" Successfully migrated: {stats.successful_records:,}") print(f" Failed to migrate: {stats.failed_records:,}") # Success rate success_rate = ( (stats.successful_records / stats.total_source_records * 100) if stats.total_source_records > 0 else 0 ) print(f" Success rate: {success_rate:.2f}%") # Error details if stats.errors: print(f"\n⚠️ Errors encountered: {len(stats.errors)}") print("\nError Details:") print("-" * 60) # Group errors by type error_types = {} for error in stats.errors: err_type = error["error_type"] error_types[err_type] = error_types.get(err_type, 0) + 1 print("\nError Summary:") for err_type, count in sorted(error_types.items(), key=lambda x: -x[1]): print(f" - {err_type}: {count} occurrence(s)") print("\nFirst 5 errors:") for i, error in enumerate(stats.errors[:5], 1): print(f"\n {i}. Batch {error['batch']}") print(f" Type: {error['error_type']}") print(f" Message: {error['error_msg']}") print(f" Records lost: {error['records_lost']:,}") if len(stats.errors) > 5: print(f"\n ... and {len(stats.errors) - 5} more errors") print("\n" + "=" * 60) print("⚠️ WARNING: Migration completed with errors!") print(" Please review the error details above.") print("=" * 60) else: print("\n" + "=" * 60) print("✓ SUCCESS: All records migrated successfully!") print("=" * 60) async def run(self): """Run the migration tool with streaming approach and early validation""" try: # Initialize shared storage (REQUIRED for storage classes to work) from lightrag.kg.shared_storage import initialize_share_data initialize_share_data(workers=1) # Print header self.print_header() # Setup source storage with streaming (only count, don't load all data) ( self.source_storage, source_storage_name, self.source_workspace, source_count, ) = await self.setup_storage("Source", use_streaming=True) # Check if user cancelled (setup_storage returns None for all fields) if self.source_storage is None: return # Check if there are at least 2 storage types available available_count = self.count_available_storage_types() if available_count <= 1: print("\n" + "=" * 60) print("⚠️ Warning: Migration Not Possible") print("=" * 60) print(f"Only {available_count} storage type(s) available.") print("Migration requires at least 2 different storage types.") print("\nTo enable migration, configure additional storage:") print(" 1. Set environment variables, OR") print(" 2. Update config.ini file") print("\nSupported storage types:") for name in STORAGE_TYPES.values(): if name != source_storage_name: print(f" - {name}") if name in STORAGE_ENV_REQUIREMENTS: for var in STORAGE_ENV_REQUIREMENTS[name]: print(f" Required: {var}") print("=" * 60) # Cleanup await self.source_storage.finalize() return if source_count == 0: print("\n⚠️ Source storage has no cache records to migrate") # Cleanup await self.source_storage.finalize() return # Setup target storage with streaming (only count, don't load all data) # Exclude source storage type from target selection ( self.target_storage, target_storage_name, self.target_workspace, target_count, ) = await self.setup_storage( "Target", use_streaming=True, exclude_storage_name=source_storage_name ) if not self.target_storage: print("\n✗ Target storage setup failed") # Cleanup source await self.source_storage.finalize() return # Show migration summary print("\n" + "=" * 50) print("Migration Confirmation") print("=" * 50) print( f"Source: {self.format_storage_name(source_storage_name)} (workspace: {self.format_workspace(self.source_workspace)}) - {source_count:,} records" ) print( f"Target: {self.format_storage_name(target_storage_name)} (workspace: {self.format_workspace(self.target_workspace)}) - {target_count:,} records" ) print(f"Batch Size: {self.batch_size:,} records/batch") print("Memory Mode: Streaming (memory-optimized)") if target_count > 0: print( f"\n⚠️ Warning: Target storage already has {target_count:,} records" ) print("Migration will overwrite records with the same keys") # Confirm migration confirm = input("\nContinue? (y/n): ").strip().lower() if confirm != "y": print("\n✗ Migration cancelled") # Cleanup await self.source_storage.finalize() await self.target_storage.finalize() return # Perform streaming migration with error tracking stats = await self.migrate_caches_streaming( self.source_storage, source_storage_name, self.target_storage, target_storage_name, source_count, ) # Print comprehensive migration report self.print_migration_report(stats) # Cleanup await self.source_storage.finalize() await self.target_storage.finalize() except KeyboardInterrupt: print("\n\n✗ Migration interrupted by user") except Exception as e: print(f"\n✗ Migration failed: {e}") import traceback traceback.print_exc() finally: # Ensure cleanup if self.source_storage: try: await self.source_storage.finalize() except Exception: pass if self.target_storage: try: await self.target_storage.finalize() except Exception: pass # Finalize shared storage try: from lightrag.kg.shared_storage import finalize_share_data finalize_share_data() except Exception: pass async def main(): """Main entry point""" tool = MigrationTool() await tool.run() if __name__ == "__main__": asyncio.run(main())
{ "repo_id": "HKUDS/LightRAG", "file_path": "lightrag/tools/migrate_llm_cache.py", "license": "MIT License", "lines": 1197, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/LightRAG:lightrag/llm/gemini.py
""" Gemini LLM binding for LightRAG. This module provides asynchronous helpers that adapt Google's Gemini models to the same interface used by the rest of the LightRAG LLM bindings. The implementation mirrors the OpenAI helpers while relying on the official ``google-genai`` client under the hood. """ from __future__ import annotations import os from collections.abc import AsyncIterator from functools import lru_cache from typing import Any import numpy as np from tenacity import ( retry, stop_after_attempt, wait_exponential, retry_if_exception_type, ) from lightrag.utils import ( logger, remove_think_tags, safe_unicode_decode, wrap_embedding_func_with_attrs, ) import pipmaster as pm # Install the Google Gemini client and its dependencies on demand if not pm.is_installed("google-genai"): pm.install("google-genai") if not pm.is_installed("google-api-core"): pm.install("google-api-core") from google import genai # type: ignore from google.genai import types # type: ignore from google.api_core import exceptions as google_api_exceptions # type: ignore class InvalidResponseError(Exception): """Custom exception class for triggering retry mechanism when Gemini returns empty responses""" pass @lru_cache(maxsize=8) def _get_gemini_client( api_key: str, base_url: str | None, timeout: int | None = None ) -> genai.Client: """ Create (or fetch cached) Gemini client. Args: api_key: Google Gemini API key (not used in Vertex AI mode). base_url: Optional custom API endpoint. timeout: Optional request timeout in milliseconds. Returns: genai.Client: Configured Gemini client instance. """ client_kwargs: dict[str, Any] = {} # Add Vertex AI support use_vertexai = os.getenv("GOOGLE_GENAI_USE_VERTEXAI", "").lower() == "true" if use_vertexai: # Vertex AI mode: use project/location, NOT api_key client_kwargs["vertexai"] = True project = os.getenv("GOOGLE_CLOUD_PROJECT") if project: location = os.getenv("GOOGLE_CLOUD_LOCATION", "us-central1") client_kwargs["project"] = project if location: client_kwargs["location"] = location else: raise ValueError( "GOOGLE_CLOUD_PROJECT must be set when using Vertex AI mode" ) else: # Standard Gemini API mode: use api_key client_kwargs["api_key"] = api_key if base_url and base_url != "DEFAULT_GEMINI_ENDPOINT" or timeout is not None: try: http_options_kwargs = {} if base_url and base_url != "DEFAULT_GEMINI_ENDPOINT": http_options_kwargs["base_url"] = base_url if timeout is not None: http_options_kwargs["timeout"] = timeout client_kwargs["http_options"] = types.HttpOptions(**http_options_kwargs) except Exception as e: logger.error("Failed to apply custom Gemini http_options: %s", e) raise e return genai.Client(**client_kwargs) def _ensure_api_key(api_key: str | None) -> str: # In Vertex AI mode, API key is not required use_vertexai = os.getenv("GOOGLE_GENAI_USE_VERTEXAI", "").lower() == "true" if use_vertexai: # Return empty string for Vertex AI mode (not used) return "" key = api_key or os.getenv("LLM_BINDING_API_KEY") or os.getenv("GEMINI_API_KEY") if not key: raise ValueError( "Gemini API key not provided. " "Set LLM_BINDING_API_KEY or GEMINI_API_KEY in the environment." ) return key def _build_generation_config( base_config: dict[str, Any] | None, system_prompt: str | None, keyword_extraction: bool, ) -> types.GenerateContentConfig | None: config_data = dict(base_config or {}) if system_prompt: if config_data.get("system_instruction"): config_data["system_instruction"] = ( f"{config_data['system_instruction']}\n{system_prompt}" ) else: config_data["system_instruction"] = system_prompt if keyword_extraction and not config_data.get("response_mime_type"): config_data["response_mime_type"] = "application/json" # Remove entries that are explicitly set to None to avoid type errors sanitized = { key: value for key, value in config_data.items() if value is not None and value != "" } if not sanitized: return None return types.GenerateContentConfig(**sanitized) def _format_history_messages(history_messages: list[dict[str, Any]] | None) -> str: if not history_messages: return "" history_lines: list[str] = [] for message in history_messages: role = message.get("role", "user") content = message.get("content", "") history_lines.append(f"[{role}] {content}") return "\n".join(history_lines) def _extract_response_text( response: Any, extract_thoughts: bool = False ) -> tuple[str, str]: """ Extract text content from Gemini response, separating regular content from thoughts. Args: response: Gemini API response object extract_thoughts: Whether to extract thought content separately Returns: Tuple of (regular_text, thought_text) """ candidates = getattr(response, "candidates", None) if not candidates: return ("", "") regular_parts: list[str] = [] thought_parts: list[str] = [] for candidate in candidates: if not getattr(candidate, "content", None): continue # Use 'or []' to handle None values from parts attribute for part in getattr(candidate.content, "parts", None) or []: text = getattr(part, "text", None) if not text: continue # Check if this part is thought content using the 'thought' attribute is_thought = getattr(part, "thought", False) if is_thought and extract_thoughts: thought_parts.append(text) elif not is_thought: regular_parts.append(text) return ("\n".join(regular_parts), "\n".join(thought_parts)) @retry( stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=60), retry=( retry_if_exception_type(google_api_exceptions.InternalServerError) | retry_if_exception_type(google_api_exceptions.ServiceUnavailable) | retry_if_exception_type(google_api_exceptions.ResourceExhausted) | retry_if_exception_type(google_api_exceptions.GatewayTimeout) | retry_if_exception_type(google_api_exceptions.BadGateway) | retry_if_exception_type(google_api_exceptions.DeadlineExceeded) | retry_if_exception_type(google_api_exceptions.Aborted) | retry_if_exception_type(google_api_exceptions.Unknown) | retry_if_exception_type(InvalidResponseError) ), ) async def gemini_complete_if_cache( model: str, prompt: str, system_prompt: str | None = None, history_messages: list[dict[str, Any]] | None = None, enable_cot: bool = False, base_url: str | None = None, api_key: str | None = None, token_tracker: Any | None = None, stream: bool | None = None, keyword_extraction: bool = False, generation_config: dict[str, Any] | None = None, timeout: int | None = None, **_: Any, ) -> str | AsyncIterator[str]: """ Complete a prompt using Gemini's API with Chain of Thought (COT) support. This function supports automatic integration of reasoning content from Gemini models that provide Chain of Thought capabilities via the thinking_config API feature. COT Integration: - When enable_cot=True: Thought content is wrapped in <think>...</think> tags - When enable_cot=False: Thought content is filtered out, only regular content returned - Thought content is identified by the 'thought' attribute on response parts - Requires thinking_config to be enabled in generation_config for API to return thoughts Args: model: The Gemini model to use. prompt: The prompt to complete. system_prompt: Optional system prompt to include. history_messages: Optional list of previous messages in the conversation. api_key: Optional Gemini API key. If None, uses environment variable. base_url: Optional custom API endpoint. generation_config: Optional generation configuration dict. keyword_extraction: Whether to use JSON response format. token_tracker: Optional token usage tracker for monitoring API usage. stream: Whether to stream the response. hashing_kv: Storage interface (for interface parity with other bindings). enable_cot: Whether to include Chain of Thought content in the response. timeout: Request timeout in seconds (will be converted to milliseconds for Gemini API). **_: Additional keyword arguments (ignored). Returns: The completed text (with COT content if enable_cot=True) or an async iterator of text chunks if streaming. COT content is wrapped in <think>...</think> tags. Raises: RuntimeError: If the response from Gemini is empty. ValueError: If API key is not provided or configured. """ key = _ensure_api_key(api_key) # Convert timeout from seconds to milliseconds for Gemini API timeout_ms = timeout * 1000 if timeout else None client = _get_gemini_client(key, base_url, timeout_ms) history_block = _format_history_messages(history_messages) prompt_sections = [] if history_block: prompt_sections.append(history_block) prompt_sections.append(f"[user] {prompt}") combined_prompt = "\n".join(prompt_sections) config_obj = _build_generation_config( generation_config, system_prompt=system_prompt, keyword_extraction=keyword_extraction, ) request_kwargs: dict[str, Any] = { "model": model, "contents": [combined_prompt], } if config_obj is not None: request_kwargs["config"] = config_obj if stream: async def _async_stream() -> AsyncIterator[str]: # COT state tracking for streaming cot_active = False cot_started = False initial_content_seen = False usage_metadata = None try: # Use native async streaming from genai SDK # Note: generate_content_stream returns Awaitable[AsyncIterator], need to await first stream = await client.aio.models.generate_content_stream( **request_kwargs ) async for chunk in stream: usage = getattr(chunk, "usage_metadata", None) if usage is not None: usage_metadata = usage # Extract both regular and thought content regular_text, thought_text = _extract_response_text( chunk, extract_thoughts=True ) if enable_cot: # Process regular content if regular_text: if not initial_content_seen: initial_content_seen = True # Close COT section if it was active if cot_active: yield "</think>" cot_active = False # Process and yield regular content if "\\u" in regular_text: regular_text = safe_unicode_decode( regular_text.encode("utf-8") ) yield regular_text # Process thought content if thought_text: if not initial_content_seen and not cot_started: # Start COT section yield "<think>" cot_active = True cot_started = True # Yield thought content if COT is active if cot_active: if "\\u" in thought_text: thought_text = safe_unicode_decode( thought_text.encode("utf-8") ) yield thought_text else: # COT disabled - only yield regular content if regular_text: if "\\u" in regular_text: regular_text = safe_unicode_decode( regular_text.encode("utf-8") ) yield regular_text # Ensure COT is properly closed if still active if cot_active: yield "</think>" cot_active = False except Exception as exc: # Try to close COT tag before re-raising if cot_active: try: yield "</think>" except Exception: pass raise exc finally: # Track token usage after streaming completes if token_tracker and usage_metadata: token_tracker.add_usage( { "prompt_tokens": getattr( usage_metadata, "prompt_token_count", 0 ), "completion_tokens": getattr( usage_metadata, "candidates_token_count", 0 ), "total_tokens": getattr( usage_metadata, "total_token_count", 0 ), } ) return _async_stream() # Non-streaming: use native async client response = await client.aio.models.generate_content(**request_kwargs) # Extract both regular text and thought text regular_text, thought_text = _extract_response_text(response, extract_thoughts=True) # Apply COT filtering logic based on enable_cot parameter if enable_cot: # Include thought content wrapped in <think> tags if thought_text and thought_text.strip(): if not regular_text or regular_text.strip() == "": # Only thought content available final_text = f"<think>{thought_text}</think>" else: # Both content types present: prepend thought to regular content final_text = f"<think>{thought_text}</think>{regular_text}" else: # No thought content, use regular content only final_text = regular_text or "" else: # Filter out thought content, return only regular content final_text = regular_text or "" if not final_text: raise InvalidResponseError("Gemini response did not contain any text content.") if "\\u" in final_text: final_text = safe_unicode_decode(final_text.encode("utf-8")) final_text = remove_think_tags(final_text) usage = getattr(response, "usage_metadata", None) if token_tracker and usage: token_tracker.add_usage( { "prompt_tokens": getattr(usage, "prompt_token_count", 0), "completion_tokens": getattr(usage, "candidates_token_count", 0), "total_tokens": getattr(usage, "total_token_count", 0), } ) logger.debug("Gemini response length: %s", len(final_text)) return final_text async def gemini_model_complete( prompt: str, system_prompt: str | None = None, history_messages: list[dict[str, Any]] | None = None, keyword_extraction: bool = False, **kwargs: Any, ) -> str | AsyncIterator[str]: hashing_kv = kwargs.get("hashing_kv") model_name = None if hashing_kv is not None: model_name = hashing_kv.global_config.get("llm_model_name") if model_name is None: model_name = kwargs.pop("model_name", None) if model_name is None: raise ValueError("Gemini model name not provided in configuration.") return await gemini_complete_if_cache( model_name, prompt, system_prompt=system_prompt, history_messages=history_messages, keyword_extraction=keyword_extraction, **kwargs, ) @wrap_embedding_func_with_attrs( embedding_dim=1536, max_token_size=2048, model_name="gemini-embedding-001" ) @retry( stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=60), retry=( retry_if_exception_type(google_api_exceptions.InternalServerError) | retry_if_exception_type(google_api_exceptions.ServiceUnavailable) | retry_if_exception_type(google_api_exceptions.ResourceExhausted) | retry_if_exception_type(google_api_exceptions.GatewayTimeout) | retry_if_exception_type(google_api_exceptions.BadGateway) | retry_if_exception_type(google_api_exceptions.DeadlineExceeded) | retry_if_exception_type(google_api_exceptions.Aborted) | retry_if_exception_type(google_api_exceptions.Unknown) ), ) async def gemini_embed( texts: list[str], model: str = "gemini-embedding-001", base_url: str | None = None, api_key: str | None = None, embedding_dim: int | None = None, max_token_size: int | None = None, task_type: str = "RETRIEVAL_DOCUMENT", timeout: int | None = None, token_tracker: Any | None = None, ) -> np.ndarray: """Generate embeddings for a list of texts using Gemini's API. This function uses Google's Gemini embedding model to generate text embeddings. It supports dynamic dimension control and automatic normalization for dimensions less than 3072. Args: texts: List of texts to embed. model: The Gemini embedding model to use. Default is "gemini-embedding-001". base_url: Optional custom API endpoint. api_key: Optional Gemini API key. If None, uses environment variables. embedding_dim: Optional embedding dimension for dynamic dimension reduction. **IMPORTANT**: This parameter is automatically injected by the EmbeddingFunc wrapper. Do NOT manually pass this parameter when calling the function directly. The dimension is controlled by the @wrap_embedding_func_with_attrs decorator or the EMBEDDING_DIM environment variable. Supported range: 128-3072. Recommended values: 768, 1536, 3072. max_token_size: Maximum tokens per text. This parameter is automatically injected by the EmbeddingFunc wrapper when the underlying function signature supports it (via inspect.signature check). Gemini API will automatically truncate texts exceeding this limit (autoTruncate=True by default), so no client-side truncation is needed. task_type: Task type for embedding optimization. Default is "RETRIEVAL_DOCUMENT". Supported types: SEMANTIC_SIMILARITY, CLASSIFICATION, CLUSTERING, RETRIEVAL_DOCUMENT, RETRIEVAL_QUERY, CODE_RETRIEVAL_QUERY, QUESTION_ANSWERING, FACT_VERIFICATION. timeout: Request timeout in seconds (will be converted to milliseconds for Gemini API). token_tracker: Optional token usage tracker for monitoring API usage. Returns: A numpy array of embeddings, one per input text. For dimensions < 3072, the embeddings are L2-normalized to ensure optimal semantic similarity performance. Raises: ValueError: If API key is not provided or configured. RuntimeError: If the response from Gemini is invalid or empty. Note: - For dimension 3072: Embeddings are already normalized by the API - For dimensions < 3072: Embeddings are L2-normalized after retrieval - Normalization ensures accurate semantic similarity via cosine distance - Gemini API automatically truncates texts exceeding max_token_size (autoTruncate=True) """ # Note: max_token_size is received but not used for client-side truncation. # Gemini API handles truncation automatically with autoTruncate=True (default). _ = max_token_size # Acknowledge parameter to avoid unused variable warning key = _ensure_api_key(api_key) # Convert timeout from seconds to milliseconds for Gemini API timeout_ms = timeout * 1000 if timeout else None client = _get_gemini_client(key, base_url, timeout_ms) # Prepare embedding configuration config_kwargs: dict[str, Any] = {} # Add task_type to config if task_type: config_kwargs["task_type"] = task_type # Add output_dimensionality if embedding_dim is provided if embedding_dim is not None: config_kwargs["output_dimensionality"] = embedding_dim # Create config object if we have parameters config_obj = types.EmbedContentConfig(**config_kwargs) if config_kwargs else None request_kwargs: dict[str, Any] = { "model": model, "contents": texts, } if config_obj is not None: request_kwargs["config"] = config_obj # Use native async client for embedding response = await client.aio.models.embed_content(**request_kwargs) # Extract embeddings from response if not hasattr(response, "embeddings") or not response.embeddings: raise RuntimeError("Gemini response did not contain embeddings.") # Convert embeddings to numpy array embeddings = np.array( [np.array(e.values, dtype=np.float32) for e in response.embeddings] ) # Apply L2 normalization for dimensions < 3072 # The 3072 dimension embedding is already normalized by Gemini API if embedding_dim and embedding_dim < 3072: # Normalize each embedding vector to unit length norms = np.linalg.norm(embeddings, axis=1, keepdims=True) # Avoid division by zero norms = np.where(norms == 0, 1, norms) embeddings = embeddings / norms logger.debug( f"Applied L2 normalization to {len(embeddings)} embeddings of dimension {embedding_dim}" ) # Track token usage if tracker is provided # Note: Gemini embedding API may not provide usage metadata if token_tracker and hasattr(response, "usage_metadata"): usage = response.usage_metadata token_counts = { "prompt_tokens": getattr(usage, "prompt_token_count", 0), "total_tokens": getattr(usage, "total_token_count", 0), } token_tracker.add_usage(token_counts) logger.debug( f"Generated {len(embeddings)} Gemini embeddings with dimension {embeddings.shape[1]}" ) return embeddings __all__ = [ "gemini_complete_if_cache", "gemini_model_complete", "gemini_embed", ]
{ "repo_id": "HKUDS/LightRAG", "file_path": "lightrag/llm/gemini.py", "license": "MIT License", "lines": 513, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/LightRAG:lightrag/evaluation/eval_rag_quality.py
#!/usr/bin/env python3 """ RAGAS Evaluation Script for LightRAG System Evaluates RAG response quality using RAGAS metrics: - Faithfulness: Is the answer factually accurate based on context? - Answer Relevance: Is the answer relevant to the question? - Context Recall: Is all relevant information retrieved? - Context Precision: Is retrieved context clean without noise? Usage: # Use defaults (sample_dataset.json, http://localhost:9621) python lightrag/evaluation/eval_rag_quality.py # Specify custom dataset python lightrag/evaluation/eval_rag_quality.py --dataset my_test.json python lightrag/evaluation/eval_rag_quality.py -d my_test.json # Specify custom RAG endpoint python lightrag/evaluation/eval_rag_quality.py --ragendpoint http://my-server.com:9621 python lightrag/evaluation/eval_rag_quality.py -r http://my-server.com:9621 # Specify both python lightrag/evaluation/eval_rag_quality.py -d my_test.json -r http://localhost:9621 # Get help python lightrag/evaluation/eval_rag_quality.py --help Results are saved to: lightrag/evaluation/results/ - results_YYYYMMDD_HHMMSS.csv (CSV export for analysis) - results_YYYYMMDD_HHMMSS.json (Full results with details) Technical Notes: - Uses stable RAGAS API (LangchainLLMWrapper) for maximum compatibility - Supports custom OpenAI-compatible endpoints via EVAL_LLM_BINDING_HOST - Enables bypass_n mode for endpoints that don't support 'n' parameter - Deprecation warnings are suppressed for cleaner output """ import argparse import asyncio import csv import json import math import os import sys import time import warnings from datetime import datetime from pathlib import Path from typing import Any, Dict, List import httpx from dotenv import load_dotenv from lightrag.utils import logger # Suppress LangchainLLMWrapper deprecation warning # We use LangchainLLMWrapper for stability and compatibility with all RAGAS versions warnings.filterwarnings( "ignore", message=".*LangchainLLMWrapper is deprecated.*", category=DeprecationWarning, ) # Suppress token usage warning for custom OpenAI-compatible endpoints # Custom endpoints (vLLM, SGLang, etc.) often don't return usage information # This is non-critical as token tracking is not required for RAGAS evaluation warnings.filterwarnings( "ignore", message=".*Unexpected type for token usage.*", category=UserWarning, ) # Add parent directory to path sys.path.insert(0, str(Path(__file__).parent.parent.parent)) # use the .env that is inside the current folder # allows to use different .env file for each lightrag instance # the OS environment variables take precedence over the .env file load_dotenv(dotenv_path=".env", override=False) # Conditional imports - will raise ImportError if dependencies not installed try: from datasets import Dataset from ragas import evaluate from ragas.metrics import ( AnswerRelevancy, ContextPrecision, ContextRecall, Faithfulness, ) from ragas.llms import LangchainLLMWrapper from langchain_openai import ChatOpenAI, OpenAIEmbeddings from tqdm.auto import tqdm RAGAS_AVAILABLE = True except ImportError: RAGAS_AVAILABLE = False Dataset = None evaluate = None LangchainLLMWrapper = None CONNECT_TIMEOUT_SECONDS = 180.0 READ_TIMEOUT_SECONDS = 300.0 TOTAL_TIMEOUT_SECONDS = 180.0 def _is_nan(value: Any) -> bool: """Return True when value is a float NaN.""" return isinstance(value, float) and math.isnan(value) class RAGEvaluator: """Evaluate RAG system quality using RAGAS metrics""" def __init__(self, test_dataset_path: str = None, rag_api_url: str = None): """ Initialize evaluator with test dataset Args: test_dataset_path: Path to test dataset JSON file rag_api_url: Base URL of LightRAG API (e.g., http://localhost:9621) If None, will try to read from environment or use default Environment Variables: EVAL_LLM_MODEL: LLM model for evaluation (default: gpt-4o-mini) EVAL_EMBEDDING_MODEL: Embedding model for evaluation (default: text-embedding-3-small) EVAL_LLM_BINDING_API_KEY: API key for LLM (fallback to OPENAI_API_KEY) EVAL_LLM_BINDING_HOST: Custom endpoint URL for LLM (optional) EVAL_EMBEDDING_BINDING_API_KEY: API key for embeddings (fallback: EVAL_LLM_BINDING_API_KEY -> OPENAI_API_KEY) EVAL_EMBEDDING_BINDING_HOST: Custom endpoint URL for embeddings (fallback: EVAL_LLM_BINDING_HOST) Raises: ImportError: If ragas or datasets packages are not installed EnvironmentError: If EVAL_LLM_BINDING_API_KEY and OPENAI_API_KEY are both not set """ # Validate RAGAS dependencies are installed if not RAGAS_AVAILABLE: raise ImportError( "RAGAS dependencies not installed. " "Install with: pip install ragas datasets" ) # Configure evaluation LLM (for RAGAS scoring) eval_llm_api_key = os.getenv("EVAL_LLM_BINDING_API_KEY") or os.getenv( "OPENAI_API_KEY" ) if not eval_llm_api_key: raise EnvironmentError( "EVAL_LLM_BINDING_API_KEY or OPENAI_API_KEY is required for evaluation. " "Set EVAL_LLM_BINDING_API_KEY to use a custom API key, " "or ensure OPENAI_API_KEY is set." ) eval_model = os.getenv("EVAL_LLM_MODEL", "gpt-4o-mini") eval_llm_base_url = os.getenv("EVAL_LLM_BINDING_HOST") # Configure evaluation embeddings (for RAGAS scoring) # Fallback chain: EVAL_EMBEDDING_BINDING_API_KEY -> EVAL_LLM_BINDING_API_KEY -> OPENAI_API_KEY eval_embedding_api_key = ( os.getenv("EVAL_EMBEDDING_BINDING_API_KEY") or os.getenv("EVAL_LLM_BINDING_API_KEY") or os.getenv("OPENAI_API_KEY") ) eval_embedding_model = os.getenv( "EVAL_EMBEDDING_MODEL", "text-embedding-3-large" ) # Fallback chain: EVAL_EMBEDDING_BINDING_HOST -> EVAL_LLM_BINDING_HOST -> None eval_embedding_base_url = os.getenv("EVAL_EMBEDDING_BINDING_HOST") or os.getenv( "EVAL_LLM_BINDING_HOST" ) # Create LLM and Embeddings instances for RAGAS llm_kwargs = { "model": eval_model, "api_key": eval_llm_api_key, "max_retries": int(os.getenv("EVAL_LLM_MAX_RETRIES", "5")), "request_timeout": int(os.getenv("EVAL_LLM_TIMEOUT", "180")), } embedding_kwargs = { "model": eval_embedding_model, "api_key": eval_embedding_api_key, } if eval_llm_base_url: llm_kwargs["base_url"] = eval_llm_base_url if eval_embedding_base_url: embedding_kwargs["base_url"] = eval_embedding_base_url # Create base LangChain LLM base_llm = ChatOpenAI(**llm_kwargs) self.eval_embeddings = OpenAIEmbeddings(**embedding_kwargs) # Wrap LLM with LangchainLLMWrapper and enable bypass_n mode for custom endpoints # This ensures compatibility with endpoints that don't support the 'n' parameter # by generating multiple outputs through repeated prompts instead of using 'n' parameter try: self.eval_llm = LangchainLLMWrapper( langchain_llm=base_llm, bypass_n=True, # Enable bypass_n to avoid passing 'n' to OpenAI API ) logger.debug("Successfully configured bypass_n mode for LLM wrapper") except Exception as e: logger.warning( "Could not configure LangchainLLMWrapper with bypass_n: %s. " "Using base LLM directly, which may cause warnings with custom endpoints.", e, ) self.eval_llm = base_llm if test_dataset_path is None: test_dataset_path = Path(__file__).parent / "sample_dataset.json" if rag_api_url is None: rag_api_url = os.getenv("LIGHTRAG_API_URL", "http://localhost:9621") self.test_dataset_path = Path(test_dataset_path) self.rag_api_url = rag_api_url.rstrip("/") self.results_dir = Path(__file__).parent / "results" self.results_dir.mkdir(exist_ok=True) # Load test dataset self.test_cases = self._load_test_dataset() # Store configuration values for display self.eval_model = eval_model self.eval_embedding_model = eval_embedding_model self.eval_llm_base_url = eval_llm_base_url self.eval_embedding_base_url = eval_embedding_base_url self.eval_max_retries = llm_kwargs["max_retries"] self.eval_timeout = llm_kwargs["request_timeout"] # Display configuration self._display_configuration() def _display_configuration(self): """Display all evaluation configuration settings""" logger.info("Evaluation Models:") logger.info(" • LLM Model: %s", self.eval_model) logger.info(" • Embedding Model: %s", self.eval_embedding_model) # Display LLM endpoint if self.eval_llm_base_url: logger.info(" • LLM Endpoint: %s", self.eval_llm_base_url) logger.info( " • Bypass N-Parameter: Enabled (use LangchainLLMWrapper for compatibility)" ) else: logger.info(" • LLM Endpoint: OpenAI Official API") # Display Embedding endpoint (only if different from LLM) if self.eval_embedding_base_url: if self.eval_embedding_base_url != self.eval_llm_base_url: logger.info( " • Embedding Endpoint: %s", self.eval_embedding_base_url ) # If same as LLM endpoint, no need to display separately elif not self.eval_llm_base_url: # Both using OpenAI - already displayed above pass else: # LLM uses custom endpoint, but embeddings use OpenAI logger.info(" • Embedding Endpoint: OpenAI Official API") logger.info("Concurrency & Rate Limiting:") query_top_k = int(os.getenv("EVAL_QUERY_TOP_K", "10")) logger.info(" • Query Top-K: %s Entities/Relations", query_top_k) logger.info(" • LLM Max Retries: %s", self.eval_max_retries) logger.info(" • LLM Timeout: %s seconds", self.eval_timeout) logger.info("Test Configuration:") logger.info(" • Total Test Cases: %s", len(self.test_cases)) logger.info(" • Test Dataset: %s", self.test_dataset_path.name) logger.info(" • LightRAG API: %s", self.rag_api_url) logger.info(" • Results Directory: %s", self.results_dir.name) def _load_test_dataset(self) -> List[Dict[str, str]]: """Load test cases from JSON file""" if not self.test_dataset_path.exists(): raise FileNotFoundError(f"Test dataset not found: {self.test_dataset_path}") with open(self.test_dataset_path) as f: data = json.load(f) return data.get("test_cases", []) async def generate_rag_response( self, question: str, client: httpx.AsyncClient, ) -> Dict[str, Any]: """ Generate RAG response by calling LightRAG API. Args: question: The user query. client: Shared httpx AsyncClient for connection pooling. Returns: Dictionary with 'answer' and 'contexts' keys. 'contexts' is a list of strings (one per retrieved document). Raises: Exception: If LightRAG API is unavailable. """ try: payload = { "query": question, "mode": "mix", "include_references": True, "include_chunk_content": True, # NEW: Request chunk content in references "response_type": "Multiple Paragraphs", "top_k": int(os.getenv("EVAL_QUERY_TOP_K", "10")), } # Get API key from environment for authentication api_key = os.getenv("LIGHTRAG_API_KEY") # Prepare headers with optional authentication headers = {} if api_key: headers["X-API-Key"] = api_key # Single optimized API call - gets both answer AND chunk content response = await client.post( f"{self.rag_api_url}/query", json=payload, headers=headers if headers else None, ) response.raise_for_status() result = response.json() answer = result.get("response", "No response generated") references = result.get("references", []) # DEBUG: Inspect the API response logger.debug("🔍 References Count: %s", len(references)) if references: first_ref = references[0] logger.debug("🔍 First Reference Keys: %s", list(first_ref.keys())) if "content" in first_ref: content_preview = first_ref["content"] if isinstance(content_preview, list) and content_preview: logger.debug( "🔍 Content Preview (first chunk): %s...", content_preview[0][:100], ) elif isinstance(content_preview, str): logger.debug("🔍 Content Preview: %s...", content_preview[:100]) # Extract chunk content from enriched references # Note: content is now a list of chunks per reference (one file may have multiple chunks) contexts = [] for ref in references: content = ref.get("content", []) if isinstance(content, list): # Flatten the list: each chunk becomes a separate context contexts.extend(content) elif isinstance(content, str): # Backward compatibility: if content is still a string (shouldn't happen) contexts.append(content) return { "answer": answer, "contexts": contexts, # List of strings from actual retrieved chunks } except httpx.ConnectError as e: raise Exception( f"❌ Cannot connect to LightRAG API at {self.rag_api_url}\n" f" Make sure LightRAG server is running:\n" f" python -m lightrag.api.lightrag_server\n" f" Error: {str(e)}" ) except httpx.HTTPStatusError as e: raise Exception( f"LightRAG API error {e.response.status_code}: {e.response.text}" ) except httpx.ReadTimeout as e: raise Exception( f"Request timeout after waiting for response\n" f" Question: {question[:100]}...\n" f" Error: {str(e)}" ) except Exception as e: raise Exception(f"Error calling LightRAG API: {type(e).__name__}: {str(e)}") async def evaluate_single_case( self, idx: int, test_case: Dict[str, str], rag_semaphore: asyncio.Semaphore, eval_semaphore: asyncio.Semaphore, client: httpx.AsyncClient, progress_counter: Dict[str, int], position_pool: asyncio.Queue, pbar_creation_lock: asyncio.Lock, ) -> Dict[str, Any]: """ Evaluate a single test case with two-stage pipeline concurrency control Args: idx: Test case index (1-based) test_case: Test case dictionary with question and ground_truth rag_semaphore: Semaphore to control overall concurrency (covers entire function) eval_semaphore: Semaphore to control RAGAS evaluation concurrency (Stage 2) client: Shared httpx AsyncClient for connection pooling progress_counter: Shared dictionary for progress tracking position_pool: Queue of available tqdm position indices pbar_creation_lock: Lock to serialize tqdm creation and prevent race conditions Returns: Evaluation result dictionary """ # rag_semaphore controls the entire evaluation process to prevent # all RAG responses from being generated at once when eval is slow async with rag_semaphore: question = test_case["question"] ground_truth = test_case["ground_truth"] # Stage 1: Generate RAG response try: rag_response = await self.generate_rag_response( question=question, client=client ) except Exception as e: logger.error("Error generating response for test %s: %s", idx, str(e)) progress_counter["completed"] += 1 return { "test_number": idx, "question": question, "error": str(e), "metrics": {}, "ragas_score": 0, "timestamp": datetime.now().isoformat(), } # *** CRITICAL FIX: Use actual retrieved contexts, NOT ground_truth *** retrieved_contexts = rag_response["contexts"] # Prepare dataset for RAGAS evaluation with CORRECT contexts eval_dataset = Dataset.from_dict( { "question": [question], "answer": [rag_response["answer"]], "contexts": [retrieved_contexts], "ground_truth": [ground_truth], } ) # Stage 2: Run RAGAS evaluation (controlled by eval_semaphore) # IMPORTANT: Create fresh metric instances for each evaluation to avoid # concurrent state conflicts when multiple tasks run in parallel async with eval_semaphore: pbar = None position = None try: # Acquire a position from the pool for this tqdm progress bar position = await position_pool.get() # Serialize tqdm creation to prevent race conditions # Multiple tasks creating tqdm simultaneously can cause display conflicts async with pbar_creation_lock: # Create tqdm progress bar with assigned position to avoid overlapping # leave=False ensures the progress bar is cleared after completion, # preventing accumulation of completed bars and allowing position reuse pbar = tqdm( total=4, desc=f"Eval-{idx:02d}", position=position, leave=False, ) # Give tqdm time to initialize and claim its screen position await asyncio.sleep(0.05) eval_results = evaluate( dataset=eval_dataset, metrics=[ Faithfulness(), AnswerRelevancy(), ContextRecall(), ContextPrecision(), ], llm=self.eval_llm, embeddings=self.eval_embeddings, _pbar=pbar, ) # Convert to DataFrame (RAGAS v0.3+ API) df = eval_results.to_pandas() # Extract scores from first row scores_row = df.iloc[0] # Extract scores (RAGAS v0.3+ uses .to_pandas()) result = { "test_number": idx, "question": question, "answer": rag_response["answer"][:200] + "..." if len(rag_response["answer"]) > 200 else rag_response["answer"], "ground_truth": ground_truth[:200] + "..." if len(ground_truth) > 200 else ground_truth, "project": test_case.get("project", "unknown"), "metrics": { "faithfulness": float(scores_row.get("faithfulness", 0)), "answer_relevance": float( scores_row.get("answer_relevancy", 0) ), "context_recall": float( scores_row.get("context_recall", 0) ), "context_precision": float( scores_row.get("context_precision", 0) ), }, "timestamp": datetime.now().isoformat(), } # Calculate RAGAS score (average of all metrics, excluding NaN values) metrics = result["metrics"] valid_metrics = [v for v in metrics.values() if not _is_nan(v)] ragas_score = ( sum(valid_metrics) / len(valid_metrics) if valid_metrics else 0 ) result["ragas_score"] = round(ragas_score, 4) # Update progress counter progress_counter["completed"] += 1 return result except Exception as e: logger.error("Error evaluating test %s: %s", idx, str(e)) progress_counter["completed"] += 1 return { "test_number": idx, "question": question, "error": str(e), "metrics": {}, "ragas_score": 0, "timestamp": datetime.now().isoformat(), } finally: # Force close progress bar to ensure completion if pbar is not None: pbar.close() # Release the position back to the pool for reuse if position is not None: await position_pool.put(position) async def evaluate_responses(self) -> List[Dict[str, Any]]: """ Evaluate all test cases in parallel with two-stage pipeline and return metrics Returns: List of evaluation results with metrics """ # Get evaluation concurrency from environment (default to 2 for parallel evaluation) max_async = int(os.getenv("EVAL_MAX_CONCURRENT", "2")) logger.info("%s", "=" * 70) logger.info("🚀 Starting RAGAS Evaluation of LightRAG System") logger.info("🔧 RAGAS Evaluation (Stage 2): %s concurrent", max_async) logger.info("%s", "=" * 70) # Create two-stage pipeline semaphores # Stage 1: RAG generation - allow x2 concurrency to keep evaluation fed rag_semaphore = asyncio.Semaphore(max_async * 2) # Stage 2: RAGAS evaluation - primary bottleneck eval_semaphore = asyncio.Semaphore(max_async) # Create progress counter (shared across all tasks) progress_counter = {"completed": 0} # Create position pool for tqdm progress bars # Positions range from 0 to max_async-1, ensuring no overlapping displays position_pool = asyncio.Queue() for i in range(max_async): await position_pool.put(i) # Create lock to serialize tqdm creation and prevent race conditions # This ensures progress bars are created one at a time, avoiding display conflicts pbar_creation_lock = asyncio.Lock() # Create shared HTTP client with connection pooling and proper timeouts # Timeout: 3 minutes for connect, 5 minutes for read (LLM can be slow) timeout = httpx.Timeout( TOTAL_TIMEOUT_SECONDS, connect=CONNECT_TIMEOUT_SECONDS, read=READ_TIMEOUT_SECONDS, ) limits = httpx.Limits( max_connections=(max_async + 1) * 2, # Allow buffer for RAG stage max_keepalive_connections=max_async + 1, ) async with httpx.AsyncClient(timeout=timeout, limits=limits) as client: # Create tasks for all test cases tasks = [ self.evaluate_single_case( idx, test_case, rag_semaphore, eval_semaphore, client, progress_counter, position_pool, pbar_creation_lock, ) for idx, test_case in enumerate(self.test_cases, 1) ] # Run all evaluations in parallel (limited by two-stage semaphores) results = await asyncio.gather(*tasks) return list(results) def _export_to_csv(self, results: List[Dict[str, Any]]) -> Path: """ Export evaluation results to CSV file Args: results: List of evaluation results Returns: Path to the CSV file CSV Format: - question: The test question - project: Project context - faithfulness: Faithfulness score (0-1) - answer_relevance: Answer relevance score (0-1) - context_recall: Context recall score (0-1) - context_precision: Context precision score (0-1) - ragas_score: Overall RAGAS score (0-1) - timestamp: When evaluation was run """ csv_path = ( self.results_dir / f"results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv" ) with open(csv_path, "w", newline="", encoding="utf-8") as f: fieldnames = [ "test_number", "question", "project", "faithfulness", "answer_relevance", "context_recall", "context_precision", "ragas_score", "status", "timestamp", ] writer = csv.DictWriter(f, fieldnames=fieldnames) writer.writeheader() for idx, result in enumerate(results, 1): metrics = result.get("metrics", {}) writer.writerow( { "test_number": idx, "question": result.get("question", ""), "project": result.get("project", "unknown"), "faithfulness": f"{metrics.get('faithfulness', 0):.4f}", "answer_relevance": f"{metrics.get('answer_relevance', 0):.4f}", "context_recall": f"{metrics.get('context_recall', 0):.4f}", "context_precision": f"{metrics.get('context_precision', 0):.4f}", "ragas_score": f"{result.get('ragas_score', 0):.4f}", "status": "success" if metrics else "error", "timestamp": result.get("timestamp", ""), } ) return csv_path def _format_metric(self, value: float, width: int = 6) -> str: """ Format a metric value for display, handling NaN gracefully Args: value: The metric value to format width: The width of the formatted string Returns: Formatted string (e.g., "0.8523" or " N/A ") """ if _is_nan(value): return "N/A".center(width) return f"{value:.4f}".rjust(width) def _display_results_table(self, results: List[Dict[str, Any]]): """ Display evaluation results in a formatted table Args: results: List of evaluation results """ logger.info("") logger.info("%s", "=" * 115) logger.info("📊 EVALUATION RESULTS SUMMARY") logger.info("%s", "=" * 115) # Table header logger.info( "%-4s | %-50s | %6s | %7s | %6s | %7s | %6s | %6s", "#", "Question", "Faith", "AnswRel", "CtxRec", "CtxPrec", "RAGAS", "Status", ) logger.info("%s", "-" * 115) # Table rows for result in results: test_num = result.get("test_number", 0) question = result.get("question", "") # Truncate question to 50 chars question_display = ( (question[:47] + "...") if len(question) > 50 else question ) metrics = result.get("metrics", {}) if metrics: # Success case - format each metric, handling NaN values faith = metrics.get("faithfulness", 0) ans_rel = metrics.get("answer_relevance", 0) ctx_rec = metrics.get("context_recall", 0) ctx_prec = metrics.get("context_precision", 0) ragas = result.get("ragas_score", 0) status = "✓" logger.info( "%-4d | %-50s | %s | %s | %s | %s | %s | %6s", test_num, question_display, self._format_metric(faith, 6), self._format_metric(ans_rel, 7), self._format_metric(ctx_rec, 6), self._format_metric(ctx_prec, 7), self._format_metric(ragas, 6), status, ) else: # Error case error = result.get("error", "Unknown error") error_display = (error[:20] + "...") if len(error) > 23 else error logger.info( "%-4d | %-50s | %6s | %7s | %6s | %7s | %6s | ✗ %s", test_num, question_display, "N/A", "N/A", "N/A", "N/A", "N/A", error_display, ) logger.info("%s", "=" * 115) def _calculate_benchmark_stats( self, results: List[Dict[str, Any]] ) -> Dict[str, Any]: """ Calculate benchmark statistics from evaluation results Args: results: List of evaluation results Returns: Dictionary with benchmark statistics """ # Filter out results with errors valid_results = [r for r in results if r.get("metrics")] total_tests = len(results) successful_tests = len(valid_results) failed_tests = total_tests - successful_tests if not valid_results: return { "total_tests": total_tests, "successful_tests": 0, "failed_tests": failed_tests, "success_rate": 0.0, } # Calculate averages for each metric (handling NaN values correctly) # Track both sum and count for each metric to handle NaN values properly metrics_data = { "faithfulness": {"sum": 0.0, "count": 0}, "answer_relevance": {"sum": 0.0, "count": 0}, "context_recall": {"sum": 0.0, "count": 0}, "context_precision": {"sum": 0.0, "count": 0}, "ragas_score": {"sum": 0.0, "count": 0}, } for result in valid_results: metrics = result.get("metrics", {}) # For each metric, sum non-NaN values and count them faithfulness = metrics.get("faithfulness", 0) if not _is_nan(faithfulness): metrics_data["faithfulness"]["sum"] += faithfulness metrics_data["faithfulness"]["count"] += 1 answer_relevance = metrics.get("answer_relevance", 0) if not _is_nan(answer_relevance): metrics_data["answer_relevance"]["sum"] += answer_relevance metrics_data["answer_relevance"]["count"] += 1 context_recall = metrics.get("context_recall", 0) if not _is_nan(context_recall): metrics_data["context_recall"]["sum"] += context_recall metrics_data["context_recall"]["count"] += 1 context_precision = metrics.get("context_precision", 0) if not _is_nan(context_precision): metrics_data["context_precision"]["sum"] += context_precision metrics_data["context_precision"]["count"] += 1 ragas_score = result.get("ragas_score", 0) if not _is_nan(ragas_score): metrics_data["ragas_score"]["sum"] += ragas_score metrics_data["ragas_score"]["count"] += 1 # Calculate averages using actual counts for each metric avg_metrics = {} for metric_name, data in metrics_data.items(): if data["count"] > 0: avg_val = data["sum"] / data["count"] avg_metrics[metric_name] = ( round(avg_val, 4) if not _is_nan(avg_val) else 0.0 ) else: avg_metrics[metric_name] = 0.0 # Find min and max RAGAS scores (filter out NaN) ragas_scores = [] for r in valid_results: score = r.get("ragas_score", 0) if _is_nan(score): continue # Skip NaN values ragas_scores.append(score) min_score = min(ragas_scores) if ragas_scores else 0 max_score = max(ragas_scores) if ragas_scores else 0 return { "total_tests": total_tests, "successful_tests": successful_tests, "failed_tests": failed_tests, "success_rate": round(successful_tests / total_tests * 100, 2), "average_metrics": avg_metrics, "min_ragas_score": round(min_score, 4), "max_ragas_score": round(max_score, 4), } async def run(self) -> Dict[str, Any]: """Run complete evaluation pipeline""" start_time = time.time() # Evaluate responses results = await self.evaluate_responses() elapsed_time = time.time() - start_time # Calculate benchmark statistics benchmark_stats = self._calculate_benchmark_stats(results) # Save results summary = { "timestamp": datetime.now().isoformat(), "total_tests": len(results), "elapsed_time_seconds": round(elapsed_time, 2), "benchmark_stats": benchmark_stats, "results": results, } # Display results table self._display_results_table(results) # Save JSON results json_path = ( self.results_dir / f"results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json" ) with open(json_path, "w") as f: json.dump(summary, f, indent=2) # Export to CSV csv_path = self._export_to_csv(results) # Print summary logger.info("") logger.info("%s", "=" * 70) logger.info("📊 EVALUATION COMPLETE") logger.info("%s", "=" * 70) logger.info("Total Tests: %s", len(results)) logger.info("Successful: %s", benchmark_stats["successful_tests"]) logger.info("Failed: %s", benchmark_stats["failed_tests"]) logger.info("Success Rate: %.2f%%", benchmark_stats["success_rate"]) logger.info("Elapsed Time: %.2f seconds", elapsed_time) logger.info("Avg Time/Test: %.2f seconds", elapsed_time / len(results)) # Print benchmark metrics logger.info("") logger.info("%s", "=" * 70) logger.info("📈 BENCHMARK RESULTS (Average)") logger.info("%s", "=" * 70) avg = benchmark_stats["average_metrics"] logger.info("Average Faithfulness: %.4f", avg["faithfulness"]) logger.info("Average Answer Relevance: %.4f", avg["answer_relevance"]) logger.info("Average Context Recall: %.4f", avg["context_recall"]) logger.info("Average Context Precision: %.4f", avg["context_precision"]) logger.info("Average RAGAS Score: %.4f", avg["ragas_score"]) logger.info("%s", "-" * 70) logger.info( "Min RAGAS Score: %.4f", benchmark_stats["min_ragas_score"], ) logger.info( "Max RAGAS Score: %.4f", benchmark_stats["max_ragas_score"], ) logger.info("") logger.info("%s", "=" * 70) logger.info("📁 GENERATED FILES") logger.info("%s", "=" * 70) logger.info("Results Dir: %s", self.results_dir.absolute()) logger.info(" • CSV: %s", csv_path.name) logger.info(" • JSON: %s", json_path.name) logger.info("%s", "=" * 70) return summary async def main(): """ Main entry point for RAGAS evaluation Command-line arguments: --dataset, -d: Path to test dataset JSON file (default: sample_dataset.json) --ragendpoint, -r: LightRAG API endpoint URL (default: http://localhost:9621 or $LIGHTRAG_API_URL) Usage: python lightrag/evaluation/eval_rag_quality.py python lightrag/evaluation/eval_rag_quality.py --dataset my_test.json python lightrag/evaluation/eval_rag_quality.py -d my_test.json -r http://localhost:9621 """ try: # Parse command-line arguments parser = argparse.ArgumentParser( description="RAGAS Evaluation Script for LightRAG System", formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" Examples: # Use defaults python lightrag/evaluation/eval_rag_quality.py # Specify custom dataset python lightrag/evaluation/eval_rag_quality.py --dataset my_test.json # Specify custom RAG endpoint python lightrag/evaluation/eval_rag_quality.py --ragendpoint http://my-server.com:9621 # Specify both python lightrag/evaluation/eval_rag_quality.py -d my_test.json -r http://localhost:9621 """, ) parser.add_argument( "--dataset", "-d", type=str, default=None, help="Path to test dataset JSON file (default: sample_dataset.json in evaluation directory)", ) parser.add_argument( "--ragendpoint", "-r", type=str, default=None, help="LightRAG API endpoint URL (default: http://localhost:9621 or $LIGHTRAG_API_URL environment variable)", ) args = parser.parse_args() logger.info("%s", "=" * 70) logger.info("🔍 RAGAS Evaluation - Using Real LightRAG API") logger.info("%s", "=" * 70) evaluator = RAGEvaluator( test_dataset_path=args.dataset, rag_api_url=args.ragendpoint ) await evaluator.run() except Exception as e: logger.exception("❌ Error: %s", e) sys.exit(1) if __name__ == "__main__": asyncio.run(main())
{ "repo_id": "HKUDS/LightRAG", "file_path": "lightrag/evaluation/eval_rag_quality.py", "license": "MIT License", "lines": 868, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/LightRAG:lightrag/tools/download_cache.py
""" Download all necessary cache files for offline deployment. This module provides a CLI command to download tiktoken model cache files for offline environments where internet access is not available. """ import os import sys from pathlib import Path # Known tiktoken encoding names (not model names) # These need to be loaded with tiktoken.get_encoding() instead of tiktoken.encoding_for_model() TIKTOKEN_ENCODING_NAMES = {"cl100k_base", "p50k_base", "r50k_base", "o200k_base"} def download_tiktoken_cache(cache_dir: str = None, models: list = None): """Download tiktoken models to local cache Args: cache_dir: Directory to store the cache files. If None, uses tiktoken's default location. models: List of model names or encoding names to download. If None, downloads common ones. Returns: Tuple of (success_count, failed_models, actual_cache_dir) """ # If user specified a cache directory, set it BEFORE importing tiktoken # tiktoken reads TIKTOKEN_CACHE_DIR at import time user_specified_cache = cache_dir is not None if user_specified_cache: cache_dir = os.path.abspath(cache_dir) os.environ["TIKTOKEN_CACHE_DIR"] = cache_dir cache_path = Path(cache_dir) cache_path.mkdir(parents=True, exist_ok=True) print(f"Using specified cache directory: {cache_dir}") else: # Check if TIKTOKEN_CACHE_DIR is already set in environment env_cache_dir = os.environ.get("TIKTOKEN_CACHE_DIR") if env_cache_dir: cache_dir = env_cache_dir print(f"Using TIKTOKEN_CACHE_DIR from environment: {cache_dir}") else: # Use tiktoken's default location (tempdir/data-gym-cache) import tempfile cache_dir = os.path.join(tempfile.gettempdir(), "data-gym-cache") print(f"Using tiktoken default cache directory: {cache_dir}") # Now import tiktoken (it will use the cache directory we determined) try: import tiktoken except ImportError: print("Error: tiktoken is not installed.") print("Install with: pip install tiktoken") sys.exit(1) # Common models used by LightRAG and OpenAI if models is None: models = [ "gpt-4o-mini", # Default model for LightRAG "gpt-4o", # GPT-4 Omni "gpt-4", # GPT-4 "gpt-3.5-turbo", # GPT-3.5 Turbo "text-embedding-ada-002", # Legacy embedding model "text-embedding-3-small", # Small embedding model "text-embedding-3-large", # Large embedding model "cl100k_base", # Default encoding for LightRAG ] print(f"\nDownloading {len(models)} tiktoken models...") print("=" * 70) success_count = 0 failed_models = [] for i, model in enumerate(models, 1): try: print(f"[{i}/{len(models)}] Downloading {model}...", end=" ", flush=True) # Use get_encoding for encoding names, encoding_for_model for model names if model in TIKTOKEN_ENCODING_NAMES: encoding = tiktoken.get_encoding(model) else: encoding = tiktoken.encoding_for_model(model) # Trigger download by encoding a test string encoding.encode("test") print("✓ Done") success_count += 1 except KeyError as e: print(f"✗ Failed: Unknown model or encoding '{model}'") failed_models.append((model, str(e))) except Exception as e: print(f"✗ Failed: {e}") failed_models.append((model, str(e))) print("=" * 70) print(f"\n✓ Successfully cached {success_count}/{len(models)} models") if failed_models: print(f"\n✗ Failed to download {len(failed_models)} models:") for model, error in failed_models: print(f" - {model}: {error}") print(f"\nCache location: {cache_dir}") print("\nFor offline deployment:") print(" 1. Copy directory to offline server:") print(f" tar -czf tiktoken_cache.tar.gz {cache_dir}") print(" scp tiktoken_cache.tar.gz user@offline-server:/path/to/") print("") print(" 2. On offline server, extract and set environment variable:") print(" tar -xzf tiktoken_cache.tar.gz") print(" export TIKTOKEN_CACHE_DIR=/path/to/tiktoken_cache") print("") print(" 3. Or copy to default location:") print(f" cp -r {cache_dir} ~/.tiktoken_cache/") return success_count, failed_models def main(): """Main entry point for the CLI command""" import argparse parser = argparse.ArgumentParser( prog="lightrag-download-cache", description="Download cache files for LightRAG offline deployment", formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" Examples: # Download to default location (~/.tiktoken_cache) lightrag-download-cache # Download to specific directory lightrag-download-cache --cache-dir ./offline_cache/tiktoken # Download specific models only lightrag-download-cache --models gpt-4o-mini gpt-4 For more information, visit: https://github.com/HKUDS/LightRAG """, ) parser.add_argument( "--cache-dir", help="Cache directory path (default: ~/.tiktoken_cache)", default=None, ) parser.add_argument( "--models", nargs="+", help="Specific models to download (default: common models)", default=None, ) parser.add_argument( "--version", action="version", version="%(prog)s (LightRAG cache downloader)" ) args = parser.parse_args() print("=" * 70) print("LightRAG Offline Cache Downloader") print("=" * 70) try: success_count, failed_models = download_tiktoken_cache( args.cache_dir, args.models ) print("\n" + "=" * 70) print("Download Complete") print("=" * 70) # Exit with error code if all downloads failed if success_count == 0: print("\n✗ All downloads failed. Please check your internet connection.") sys.exit(1) # Exit with warning code if some downloads failed elif failed_models: print( f"\n⚠ Some downloads failed ({len(failed_models)}/{success_count + len(failed_models)})" ) sys.exit(2) else: print("\n✓ All cache files downloaded successfully!") sys.exit(0) except KeyboardInterrupt: print("\n\n✗ Download interrupted by user") sys.exit(130) except Exception as e: print(f"\n\n✗ Error: {e}") import traceback traceback.print_exc() sys.exit(1) if __name__ == "__main__": main()
{ "repo_id": "HKUDS/LightRAG", "file_path": "lightrag/tools/download_cache.py", "license": "MIT License", "lines": 165, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/LightRAG:tests/test_postgres_retry_integration.py
""" Integration test suite for PostgreSQL retry mechanism using real database. This test suite connects to a real PostgreSQL database using credentials from .env and tests the retry mechanism with actual network failures. Prerequisites: 1. PostgreSQL server running and accessible 2. .env file with POSTGRES_* configuration 3. asyncpg installed: pip install asyncpg """ import pytest import asyncio import os import time from dotenv import load_dotenv from unittest.mock import patch from lightrag.kg.postgres_impl import PostgreSQLDB asyncpg = pytest.importorskip("asyncpg") # Load environment variables load_dotenv(dotenv_path=".env", override=False) @pytest.mark.integration @pytest.mark.requires_db class TestPostgresRetryIntegration: """Integration tests for PostgreSQL retry mechanism with real database.""" @pytest.fixture def db_config(self): """Load database configuration from environment variables. Uses new HA-optimized defaults that match postgres_impl.py ClientManager.get_config(): - 10 retry attempts (up from 3) - 3.0s initial backoff (up from 0.5s) - 30.0s max backoff (up from 5.0s) """ return { "host": os.getenv("POSTGRES_HOST", "localhost"), "port": int(os.getenv("POSTGRES_PORT", "5432")), "user": os.getenv("POSTGRES_USER", "postgres"), "password": os.getenv("POSTGRES_PASSWORD", ""), "database": os.getenv("POSTGRES_DATABASE", "postgres"), "workspace": os.getenv("POSTGRES_WORKSPACE", "test_retry"), "max_connections": int(os.getenv("POSTGRES_MAX_CONNECTIONS", "10")), # Connection retry configuration - mirrors postgres_impl.py ClientManager.get_config() # NEW DEFAULTS optimized for HA deployments "connection_retry_attempts": min( 100, int(os.getenv("POSTGRES_CONNECTION_RETRIES", "10")), # 3 → 10 ), "connection_retry_backoff": min( 300.0, float( os.getenv("POSTGRES_CONNECTION_RETRY_BACKOFF", "3.0") ), # 0.5 → 3.0 ), "connection_retry_backoff_max": min( 600.0, float( os.getenv("POSTGRES_CONNECTION_RETRY_BACKOFF_MAX", "30.0") ), # 5.0 → 30.0 ), "pool_close_timeout": min( 30.0, float(os.getenv("POSTGRES_POOL_CLOSE_TIMEOUT", "5.0")) ), } @pytest.mark.asyncio async def test_real_connection_success(self, db_config): """ Test successful connection to real PostgreSQL database. This validates that: 1. Database credentials are correct 2. Connection pool initializes properly 3. Basic query works """ print("\n" + "=" * 80) print("INTEGRATION TEST 1: Real Database Connection") print("=" * 80) print( f" → Connecting to {db_config['host']}:{db_config['port']}/{db_config['database']}" ) db = PostgreSQLDB(db_config) try: # Initialize database connection await db.initdb() print(" ✓ Connection successful") # Test simple query result = await db.query("SELECT 1 as test", multirows=False) assert result is not None assert result.get("test") == 1 print(" ✓ Query executed successfully") print("\n✅ Test passed: Real database connection works") print("=" * 80) finally: if db.pool: await db.pool.close() @pytest.mark.asyncio async def test_simulated_transient_error_with_real_db(self, db_config): """ Test retry mechanism with simulated transient errors on real database. Simulates connection failures on first 2 attempts, then succeeds. Uses new HA defaults (10 retries, 3s backoff). """ print("\n" + "=" * 80) print("INTEGRATION TEST 2: Simulated Transient Errors") print("=" * 80) db = PostgreSQLDB(db_config) attempt_count = {"value": 0} # Original create_pool function original_create_pool = asyncpg.create_pool async def mock_create_pool_with_failures(*args, **kwargs): """Mock that fails first 2 times, then calls real create_pool.""" attempt_count["value"] += 1 print(f" → Connection attempt {attempt_count['value']}") if attempt_count["value"] <= 2: print(" ✗ Simulating connection failure") raise asyncpg.exceptions.ConnectionFailureError( f"Simulated failure on attempt {attempt_count['value']}" ) print(" ✓ Allowing real connection") return await original_create_pool(*args, **kwargs) try: # Patch create_pool to simulate failures with patch( "asyncpg.create_pool", side_effect=mock_create_pool_with_failures ): await db.initdb() assert ( attempt_count["value"] == 3 ), f"Expected 3 attempts, got {attempt_count['value']}" assert db.pool is not None, "Pool should be initialized after retries" # Verify database is actually working result = await db.query("SELECT 1 as test", multirows=False) assert result.get("test") == 1 print( f"\n✅ Test passed: Retry mechanism worked, connected after {attempt_count['value']} attempts" ) print("=" * 80) finally: if db.pool: await db.pool.close() @pytest.mark.asyncio async def test_query_retry_with_real_db(self, db_config): """ Test query-level retry with simulated connection issues. Tests that queries retry on transient failures by simulating a temporary database unavailability. Uses new HA defaults (10 retries, 3s backoff). """ print("\n" + "=" * 80) print("INTEGRATION TEST 3: Query-Level Retry") print("=" * 80) db = PostgreSQLDB(db_config) try: # First initialize normally await db.initdb() print(" ✓ Database initialized") # Close the pool to simulate connection loss print(" → Simulating connection loss (closing pool)...") await db.pool.close() db.pool = None # Now query should trigger pool recreation and retry print(" → Attempting query (should auto-reconnect)...") result = await db.query("SELECT 1 as test", multirows=False) assert result.get("test") == 1, "Query should succeed after reconnection" assert db.pool is not None, "Pool should be recreated" print(" ✓ Query succeeded after automatic reconnection") print("\n✅ Test passed: Auto-reconnection works correctly") print("=" * 80) finally: if db.pool: await db.pool.close() @pytest.mark.asyncio async def test_concurrent_queries_with_real_db(self, db_config): """ Test concurrent queries to validate thread safety and connection pooling. Runs multiple concurrent queries to ensure no deadlocks or race conditions. Uses new HA defaults (10 retries, 3s backoff). """ print("\n" + "=" * 80) print("INTEGRATION TEST 4: Concurrent Queries") print("=" * 80) db = PostgreSQLDB(db_config) try: await db.initdb() print(" ✓ Database initialized") # Launch 10 concurrent queries num_queries = 10 print(f" → Launching {num_queries} concurrent queries...") async def run_query(query_id): result = await db.query( f"SELECT {query_id} as id, pg_sleep(0.1)", multirows=False ) return result.get("id") start_time = time.time() tasks = [run_query(i) for i in range(num_queries)] results = await asyncio.gather(*tasks, return_exceptions=True) elapsed = time.time() - start_time # Check results successful = sum(1 for r in results if not isinstance(r, Exception)) failed = sum(1 for r in results if isinstance(r, Exception)) print(f" → Completed in {elapsed:.2f}s") print(f" → Results: {successful} successful, {failed} failed") assert ( successful == num_queries ), f"All {num_queries} queries should succeed" assert failed == 0, "No queries should fail" print("\n✅ Test passed: All concurrent queries succeeded, no deadlocks") print("=" * 80) finally: if db.pool: await db.pool.close() @pytest.mark.asyncio async def test_pool_close_timeout_real(self, db_config): """ Test pool close timeout protection with real database. Uses new HA defaults (10 retries, 3s backoff). """ print("\n" + "=" * 80) print("INTEGRATION TEST 5: Pool Close Timeout") print("=" * 80) db = PostgreSQLDB(db_config) try: await db.initdb() print(" ✓ Database initialized") # Trigger pool reset (which includes close) print(" → Triggering pool reset...") start_time = time.time() await db._reset_pool() elapsed = time.time() - start_time print(f" ✓ Pool reset completed in {elapsed:.2f}s") assert db.pool is None, "Pool should be None after reset" assert ( elapsed < db.pool_close_timeout + 1 ), "Reset should complete within timeout" print("\n✅ Test passed: Pool reset handled correctly") print("=" * 80) finally: # Already closed in test pass @pytest.mark.asyncio async def test_configuration_from_env(self, db_config): """ Test that configuration is correctly loaded from environment variables. """ print("\n" + "=" * 80) print("INTEGRATION TEST 6: Environment Configuration") print("=" * 80) db = PostgreSQLDB(db_config) print(" → Configuration loaded:") print(f" • Host: {db.host}") print(f" • Port: {db.port}") print(f" • Database: {db.database}") print(f" • User: {db.user}") print(f" • Workspace: {db.workspace}") print(f" • Max Connections: {db.max}") print(f" • Retry Attempts: {db.connection_retry_attempts}") print(f" • Retry Backoff: {db.connection_retry_backoff}s") print(f" • Max Backoff: {db.connection_retry_backoff_max}s") print(f" • Pool Close Timeout: {db.pool_close_timeout}s") # Verify required fields are present assert db.host, "Host should be configured" assert db.port, "Port should be configured" assert db.user, "User should be configured" assert db.database, "Database should be configured" print("\n✅ Test passed: All configuration loaded correctly from .env") print("=" * 80) def run_integration_tests(): """Run all integration tests with detailed output.""" print("\n" + "=" * 80) print("POSTGRESQL RETRY MECHANISM - INTEGRATION TESTS") print("Testing with REAL database from .env configuration") print("=" * 80) # Check if database configuration exists if not os.getenv("POSTGRES_HOST"): print("\n⚠️ WARNING: No POSTGRES_HOST in .env file") print("Please ensure .env file exists with PostgreSQL configuration.") return print("\nRunning integration tests...\n") # Run pytest with verbose output pytest.main( [ __file__, "-v", "-s", # Don't capture output "--tb=short", # Short traceback format "--color=yes", "-x", # Stop on first failure ] ) if __name__ == "__main__": run_integration_tests()
{ "repo_id": "HKUDS/LightRAG", "file_path": "tests/test_postgres_retry_integration.py", "license": "MIT License", "lines": 287, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/LightRAG:tests/test_aquery_data_endpoint.py
#!/usr/bin/env python3 """ Test script: Demonstrates usage of aquery_data FastAPI endpoint Query content: Who is the author of LightRAG Updated to handle the new data format where: - Response includes status, message, data, and metadata fields at top level - Actual query results (entities, relationships, chunks, references) are nested under 'data' field - Includes backward compatibility with legacy format """ import pytest import requests import time import json from typing import Dict, Any, List, Optional # API configuration API_KEY = "your-secure-api-key-here-123" BASE_URL = "http://localhost:9621" # Unified authentication headers AUTH_HEADERS = {"Content-Type": "application/json", "X-API-Key": API_KEY} def validate_references_format(references: List[Dict[str, Any]]) -> bool: """Validate the format of references list""" if not isinstance(references, list): print(f"❌ References should be a list, got {type(references)}") return False for i, ref in enumerate(references): if not isinstance(ref, dict): print(f"❌ Reference {i} should be a dict, got {type(ref)}") return False required_fields = ["reference_id", "file_path"] for field in required_fields: if field not in ref: print(f"❌ Reference {i} missing required field: {field}") return False if not isinstance(ref[field], str): print( f"❌ Reference {i} field '{field}' should be string, got {type(ref[field])}" ) return False return True def parse_streaming_response( response_text: str, ) -> tuple[Optional[List[Dict]], List[str], List[str]]: """Parse streaming response and extract references, response chunks, and errors""" references = None response_chunks = [] errors = [] lines = response_text.strip().split("\n") for line in lines: line = line.strip() if not line or line.startswith("data: "): if line.startswith("data: "): line = line[6:] # Remove 'data: ' prefix if not line: continue try: data = json.loads(line) if "references" in data: references = data["references"] if "response" in data: response_chunks.append(data["response"]) if "error" in data: errors.append(data["error"]) except json.JSONDecodeError: # Skip non-JSON lines (like SSE comments) continue return references, response_chunks, errors @pytest.mark.integration @pytest.mark.requires_api def test_query_endpoint_references(): """Test /query endpoint references functionality""" print("\n" + "=" * 60) print("Testing /query endpoint references functionality") print("=" * 60) query_text = "who authored LightRAG" endpoint = f"{BASE_URL}/query" # Test 1: References enabled (default) print("\n🧪 Test 1: References enabled (default)") print("-" * 40) try: response = requests.post( endpoint, json={"query": query_text, "mode": "mix", "include_references": True}, headers=AUTH_HEADERS, timeout=30, ) if response.status_code == 200: data = response.json() # Check response structure if "response" not in data: print("❌ Missing 'response' field") return False if "references" not in data: print("❌ Missing 'references' field when include_references=True") return False references = data["references"] if references is None: print("❌ References should not be None when include_references=True") return False if not validate_references_format(references): return False print(f"✅ References enabled: Found {len(references)} references") print(f" Response length: {len(data['response'])} characters") # Display reference list if references: print(" 📚 Reference List:") for i, ref in enumerate(references, 1): ref_id = ref.get("reference_id", "Unknown") file_path = ref.get("file_path", "Unknown") print(f" {i}. ID: {ref_id} | File: {file_path}") else: print(f"❌ Request failed: {response.status_code}") print(f" Error: {response.text}") return False except Exception as e: print(f"❌ Test 1 failed: {str(e)}") return False # Test 2: References disabled print("\n🧪 Test 2: References disabled") print("-" * 40) try: response = requests.post( endpoint, json={"query": query_text, "mode": "mix", "include_references": False}, headers=AUTH_HEADERS, timeout=30, ) if response.status_code == 200: data = response.json() # Check response structure if "response" not in data: print("❌ Missing 'response' field") return False references = data.get("references") if references is not None: print("❌ References should be None when include_references=False") return False print("✅ References disabled: No references field present") print(f" Response length: {len(data['response'])} characters") else: print(f"❌ Request failed: {response.status_code}") print(f" Error: {response.text}") return False except Exception as e: print(f"❌ Test 2 failed: {str(e)}") return False print("\n✅ /query endpoint references tests passed!") return True @pytest.mark.integration @pytest.mark.requires_api def test_query_stream_endpoint_references(): """Test /query/stream endpoint references functionality""" print("\n" + "=" * 60) print("Testing /query/stream endpoint references functionality") print("=" * 60) query_text = "who authored LightRAG" endpoint = f"{BASE_URL}/query/stream" # Test 1: Streaming with references enabled print("\n🧪 Test 1: Streaming with references enabled") print("-" * 40) try: response = requests.post( endpoint, json={"query": query_text, "mode": "mix", "include_references": True}, headers=AUTH_HEADERS, timeout=30, stream=True, ) if response.status_code == 200: # Collect streaming response full_response = "" for chunk in response.iter_content(chunk_size=1024, decode_unicode=True): if chunk: # Ensure chunk is string type if isinstance(chunk, bytes): chunk = chunk.decode("utf-8") full_response += chunk # Parse streaming response references, response_chunks, errors = parse_streaming_response( full_response ) if errors: print(f"❌ Errors in streaming response: {errors}") return False if references is None: print("❌ No references found in streaming response") return False if not validate_references_format(references): return False if not response_chunks: print("❌ No response chunks found in streaming response") return False print(f"✅ Streaming with references: Found {len(references)} references") print(f" Response chunks: {len(response_chunks)}") print( f" Total response length: {sum(len(chunk) for chunk in response_chunks)} characters" ) # Display reference list if references: print(" 📚 Reference List:") for i, ref in enumerate(references, 1): ref_id = ref.get("reference_id", "Unknown") file_path = ref.get("file_path", "Unknown") print(f" {i}. ID: {ref_id} | File: {file_path}") else: print(f"❌ Request failed: {response.status_code}") print(f" Error: {response.text}") return False except Exception as e: print(f"❌ Test 1 failed: {str(e)}") return False # Test 2: Streaming with references disabled print("\n🧪 Test 2: Streaming with references disabled") print("-" * 40) try: response = requests.post( endpoint, json={"query": query_text, "mode": "mix", "include_references": False}, headers=AUTH_HEADERS, timeout=30, stream=True, ) if response.status_code == 200: # Collect streaming response full_response = "" for chunk in response.iter_content(chunk_size=1024, decode_unicode=True): if chunk: # Ensure chunk is string type if isinstance(chunk, bytes): chunk = chunk.decode("utf-8") full_response += chunk # Parse streaming response references, response_chunks, errors = parse_streaming_response( full_response ) if errors: print(f"❌ Errors in streaming response: {errors}") return False if references is not None: print("❌ References should be None when include_references=False") return False if not response_chunks: print("❌ No response chunks found in streaming response") return False print("✅ Streaming without references: No references present") print(f" Response chunks: {len(response_chunks)}") print( f" Total response length: {sum(len(chunk) for chunk in response_chunks)} characters" ) else: print(f"❌ Request failed: {response.status_code}") print(f" Error: {response.text}") return False except Exception as e: print(f"❌ Test 2 failed: {str(e)}") return False print("\n✅ /query/stream endpoint references tests passed!") return True @pytest.mark.integration @pytest.mark.requires_api def test_references_consistency(): """Test references consistency across all endpoints""" print("\n" + "=" * 60) print("Testing references consistency across endpoints") print("=" * 60) query_text = "who authored LightRAG" query_params = { "query": query_text, "mode": "mix", "top_k": 10, "chunk_top_k": 8, "include_references": True, } references_data = {} # Test /query endpoint print("\n🧪 Testing /query endpoint") print("-" * 40) try: response = requests.post( f"{BASE_URL}/query", json=query_params, headers=AUTH_HEADERS, timeout=30 ) if response.status_code == 200: data = response.json() references_data["query"] = data.get("references", []) print(f"✅ /query: {len(references_data['query'])} references") else: print(f"❌ /query failed: {response.status_code}") return False except Exception as e: print(f"❌ /query test failed: {str(e)}") return False # Test /query/stream endpoint print("\n🧪 Testing /query/stream endpoint") print("-" * 40) try: response = requests.post( f"{BASE_URL}/query/stream", json=query_params, headers=AUTH_HEADERS, timeout=30, stream=True, ) if response.status_code == 200: full_response = "" for chunk in response.iter_content(chunk_size=1024, decode_unicode=True): if chunk: # Ensure chunk is string type if isinstance(chunk, bytes): chunk = chunk.decode("utf-8") full_response += chunk references, _, errors = parse_streaming_response(full_response) if errors: print(f"❌ Errors: {errors}") return False references_data["stream"] = references or [] print(f"✅ /query/stream: {len(references_data['stream'])} references") else: print(f"❌ /query/stream failed: {response.status_code}") return False except Exception as e: print(f"❌ /query/stream test failed: {str(e)}") return False # Test /query/data endpoint print("\n🧪 Testing /query/data endpoint") print("-" * 40) try: response = requests.post( f"{BASE_URL}/query/data", json=query_params, headers=AUTH_HEADERS, timeout=30, ) if response.status_code == 200: data = response.json() query_data = data.get("data", {}) references_data["data"] = query_data.get("references", []) print(f"✅ /query/data: {len(references_data['data'])} references") else: print(f"❌ /query/data failed: {response.status_code}") return False except Exception as e: print(f"❌ /query/data test failed: {str(e)}") return False # Compare references consistency print("\n🔍 Comparing references consistency") print("-" * 40) # Convert to sets of (reference_id, file_path) tuples for comparison def refs_to_set(refs): return set( (ref.get("reference_id", ""), ref.get("file_path", "")) for ref in refs ) query_refs = refs_to_set(references_data["query"]) stream_refs = refs_to_set(references_data["stream"]) data_refs = refs_to_set(references_data["data"]) # Check consistency consistency_passed = True if query_refs != stream_refs: print("❌ References mismatch between /query and /query/stream") print(f" /query only: {query_refs - stream_refs}") print(f" /query/stream only: {stream_refs - query_refs}") consistency_passed = False if query_refs != data_refs: print("❌ References mismatch between /query and /query/data") print(f" /query only: {query_refs - data_refs}") print(f" /query/data only: {data_refs - query_refs}") consistency_passed = False if stream_refs != data_refs: print("❌ References mismatch between /query/stream and /query/data") print(f" /query/stream only: {stream_refs - data_refs}") print(f" /query/data only: {data_refs - stream_refs}") consistency_passed = False if consistency_passed: print("✅ All endpoints return consistent references") print(f" Common references count: {len(query_refs)}") # Display common reference list if query_refs: print(" 📚 Common Reference List:") for i, (ref_id, file_path) in enumerate(sorted(query_refs), 1): print(f" {i}. ID: {ref_id} | File: {file_path}") return consistency_passed @pytest.mark.integration @pytest.mark.requires_api def test_aquery_data_endpoint(): """Test the /query/data endpoint""" # Use unified configuration endpoint = f"{BASE_URL}/query/data" # Query request query_request = { "query": "who authored LighRAG", "mode": "mix", # Use mixed mode to get the most comprehensive results "top_k": 20, "chunk_top_k": 15, "max_entity_tokens": 4000, "max_relation_tokens": 4000, "max_total_tokens": 16000, "enable_rerank": True, } print("=" * 60) print("LightRAG aquery_data endpoint test") print( " Returns structured data including entities, relationships and text chunks" ) print(" Can be used for custom processing and analysis") print("=" * 60) print(f"Query content: {query_request['query']}") print(f"Query mode: {query_request['mode']}") print(f"API endpoint: {endpoint}") print("-" * 60) try: # Send request print("Sending request...") start_time = time.time() response = requests.post( endpoint, json=query_request, headers=AUTH_HEADERS, timeout=30 ) end_time = time.time() response_time = end_time - start_time print(f"Response time: {response_time:.2f} seconds") print(f"HTTP status code: {response.status_code}") if response.status_code == 200: data = response.json() print_query_results(data) else: print(f"Request failed: {response.status_code}") print(f"Error message: {response.text}") except requests.exceptions.ConnectionError: print("❌ Connection failed: Please ensure LightRAG API service is running") print(" Start command: python -m lightrag.api.lightrag_server") except requests.exceptions.Timeout: print("❌ Request timeout: Query processing took too long") except Exception as e: print(f"❌ Error occurred: {str(e)}") def print_query_results(data: Dict[str, Any]): """Format and print query results""" # Check for new data format with status and message status = data.get("status", "unknown") message = data.get("message", "") print(f"\n📋 Query Status: {status}") if message: print(f"📋 Message: {message}") # Handle new nested data format query_data = data.get("data", {}) # Fallback to old format if new format is not present if not query_data and any( key in data for key in ["entities", "relationships", "chunks"] ): print(" (Using legacy data format)") query_data = data entities = query_data.get("entities", []) relationships = query_data.get("relationships", []) chunks = query_data.get("chunks", []) references = query_data.get("references", []) print("\n📊 Query result statistics:") print(f" Entity count: {len(entities)}") print(f" Relationship count: {len(relationships)}") print(f" Text chunk count: {len(chunks)}") print(f" Reference count: {len(references)}") # Print metadata (now at top level in new format) metadata = data.get("metadata", {}) if metadata: print("\n🔍 Query metadata:") print(f" Query mode: {metadata.get('query_mode', 'unknown')}") keywords = metadata.get("keywords", {}) if keywords: high_level = keywords.get("high_level", []) low_level = keywords.get("low_level", []) if high_level: print(f" High-level keywords: {', '.join(high_level)}") if low_level: print(f" Low-level keywords: {', '.join(low_level)}") processing_info = metadata.get("processing_info", {}) if processing_info: print(" Processing info:") for key, value in processing_info.items(): print(f" {key}: {value}") # Print entity information if entities: print("\n👥 Retrieved entities (first 5):") for i, entity in enumerate(entities[:5]): entity_name = entity.get("entity_name", "Unknown") entity_type = entity.get("entity_type", "Unknown") description = entity.get("description", "No description") file_path = entity.get("file_path", "Unknown source") reference_id = entity.get("reference_id", "No reference") print(f" {i+1}. {entity_name} ({entity_type})") print( f" Description: {description[:100]}{'...' if len(description) > 100 else ''}" ) print(f" Source: {file_path}") print(f" Reference ID: {reference_id}") print() # Print relationship information if relationships: print("🔗 Retrieved relationships (first 5):") for i, rel in enumerate(relationships[:5]): src = rel.get("src_id", "Unknown") tgt = rel.get("tgt_id", "Unknown") description = rel.get("description", "No description") keywords = rel.get("keywords", "No keywords") file_path = rel.get("file_path", "Unknown source") reference_id = rel.get("reference_id", "No reference") print(f" {i+1}. {src} → {tgt}") print(f" Keywords: {keywords}") print( f" Description: {description[:100]}{'...' if len(description) > 100 else ''}" ) print(f" Source: {file_path}") print(f" Reference ID: {reference_id}") print() # Print text chunk information if chunks: print("📄 Retrieved text chunks (first 3):") for i, chunk in enumerate(chunks[:3]): content = chunk.get("content", "No content") file_path = chunk.get("file_path", "Unknown source") chunk_id = chunk.get("chunk_id", "Unknown ID") reference_id = chunk.get("reference_id", "No reference") print(f" {i+1}. Text chunk ID: {chunk_id}") print(f" Source: {file_path}") print(f" Reference ID: {reference_id}") print( f" Content: {content[:200]}{'...' if len(content) > 200 else ''}" ) print() # Print references information (new in updated format) if references: print("📚 References:") for i, ref in enumerate(references): reference_id = ref.get("reference_id", "Unknown ID") file_path = ref.get("file_path", "Unknown source") print(f" {i+1}. Reference ID: {reference_id}") print(f" File Path: {file_path}") print() print("=" * 60) @pytest.mark.integration @pytest.mark.requires_api def compare_with_regular_query(): """Compare results between regular query and data query""" query_text = "LightRAG的作者是谁" print("\n🔄 Comparison test: Regular query vs Data query") print("-" * 60) # Regular query try: print("1. Regular query (/query):") regular_response = requests.post( f"{BASE_URL}/query", json={"query": query_text, "mode": "mix"}, headers=AUTH_HEADERS, timeout=30, ) if regular_response.status_code == 200: regular_data = regular_response.json() response_text = regular_data.get("response", "No response") print( f" Generated answer: {response_text[:300]}{'...' if len(response_text) > 300 else ''}" ) else: print(f" Regular query failed: {regular_response.status_code}") if regular_response.status_code == 403: print(" Authentication failed - Please check API Key configuration") elif regular_response.status_code == 401: print(" Unauthorized - Please check authentication information") print(f" Error details: {regular_response.text}") except Exception as e: print(f" Regular query error: {str(e)}") @pytest.mark.integration @pytest.mark.requires_api def run_all_reference_tests(): """Run all reference-related tests""" print("\n" + "🚀" * 20) print("LightRAG References Test Suite") print("🚀" * 20) all_tests_passed = True # Test 1: /query endpoint references try: if not test_query_endpoint_references(): all_tests_passed = False except Exception as e: print(f"❌ /query endpoint test failed with exception: {str(e)}") all_tests_passed = False # Test 2: /query/stream endpoint references try: if not test_query_stream_endpoint_references(): all_tests_passed = False except Exception as e: print(f"❌ /query/stream endpoint test failed with exception: {str(e)}") all_tests_passed = False # Test 3: References consistency across endpoints try: if not test_references_consistency(): all_tests_passed = False except Exception as e: print(f"❌ References consistency test failed with exception: {str(e)}") all_tests_passed = False # Final summary print("\n" + "=" * 60) print("TEST SUITE SUMMARY") print("=" * 60) if all_tests_passed: print("🎉 ALL TESTS PASSED!") print("✅ /query endpoint references functionality works correctly") print("✅ /query/stream endpoint references functionality works correctly") print("✅ References are consistent across all endpoints") print("\n🔧 System is ready for production use with reference support!") else: print("❌ SOME TESTS FAILED!") print("Please check the error messages above and fix the issues.") print("\n🔧 System needs attention before production deployment.") return all_tests_passed if __name__ == "__main__": import sys if len(sys.argv) > 1 and sys.argv[1] == "--references-only": # Run only the new reference tests success = run_all_reference_tests() sys.exit(0 if success else 1) else: # Run original tests plus new reference tests print("Running original aquery_data endpoint test...") test_aquery_data_endpoint() print("\nRunning comparison test...") compare_with_regular_query() print("\nRunning new reference tests...") run_all_reference_tests() print("\n💡 Usage tips:") print("1. Ensure LightRAG API service is running") print("2. Adjust base_url and authentication information as needed") print("3. Modify query parameters to test different retrieval strategies") print("4. Data query results can be used for further analysis and processing") print("5. Run with --references-only flag to test only reference functionality")
{ "repo_id": "HKUDS/LightRAG", "file_path": "tests/test_aquery_data_endpoint.py", "license": "MIT License", "lines": 628, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/LightRAG:lightrag/tools/check_initialization.py
#!/usr/bin/env python3 """ Diagnostic tool to check LightRAG initialization status. This tool helps developers verify that their LightRAG instance is properly initialized and ready to use. It should be called AFTER initialize_storages() to validate that all components are correctly set up. Usage: # Basic usage in your code: rag = LightRAG(...) await rag.initialize_storages() await check_lightrag_setup(rag, verbose=True) # Run demo from command line: python -m lightrag.tools.check_initialization --demo """ import asyncio import sys from pathlib import Path # Add parent directory to path for imports sys.path.insert(0, str(Path(__file__).parent.parent.parent)) from lightrag import LightRAG from lightrag.base import StoragesStatus async def check_lightrag_setup(rag_instance: LightRAG, verbose: bool = False) -> bool: """ Check if a LightRAG instance is properly initialized. Args: rag_instance: The LightRAG instance to check verbose: If True, print detailed diagnostic information Returns: True if properly initialized, False otherwise """ issues = [] warnings = [] print("🔍 Checking LightRAG initialization status...\n") # Check storage initialization status if not hasattr(rag_instance, "_storages_status"): issues.append("LightRAG instance missing _storages_status attribute") elif rag_instance._storages_status != StoragesStatus.INITIALIZED: issues.append( f"Storages not initialized (status: {rag_instance._storages_status.name})" ) else: print("✅ Storage status: INITIALIZED") # Check individual storage components storage_components = [ ("full_docs", "Document storage"), ("text_chunks", "Text chunks storage"), ("entities_vdb", "Entity vector database"), ("relationships_vdb", "Relationship vector database"), ("chunks_vdb", "Chunks vector database"), ("doc_status", "Document status tracker"), ("llm_response_cache", "LLM response cache"), ("full_entities", "Entity storage"), ("full_relations", "Relation storage"), ("chunk_entity_relation_graph", "Graph storage"), ] if verbose: print("\n📦 Storage Components:") for component, description in storage_components: if not hasattr(rag_instance, component): issues.append(f"Missing storage component: {component} ({description})") else: storage = getattr(rag_instance, component) if storage is None: warnings.append(f"Storage {component} is None (might be optional)") elif hasattr(storage, "_storage_lock"): if storage._storage_lock is None: issues.append(f"Storage {component} not initialized (lock is None)") elif verbose: print(f" ✅ {description}: Ready") elif verbose: print(f" ✅ {description}: Ready") # Check pipeline status try: from lightrag.kg.shared_storage import get_namespace_data get_namespace_data("pipeline_status", workspace=rag_instance.workspace) print("✅ Pipeline status: INITIALIZED") except KeyError: issues.append( "Pipeline status not initialized - call rag.initialize_storages() first" ) except Exception as e: issues.append(f"Error checking pipeline status: {str(e)}") # Print results print("\n" + "=" * 50) if issues: print("❌ Issues found:\n") for issue in issues: print(f" • {issue}") print("\n📝 To fix, run this initialization sequence:\n") print(" await rag.initialize_storages()") print( "\n📚 Documentation: https://github.com/HKUDS/LightRAG#important-initialization-requirements" ) if warnings and verbose: print("\n⚠️ Warnings (might be normal):") for warning in warnings: print(f" • {warning}") return False else: print("✅ LightRAG is properly initialized and ready to use!") if warnings and verbose: print("\n⚠️ Warnings (might be normal):") for warning in warnings: print(f" • {warning}") return True async def demo(): """Demonstrate the diagnostic tool with a test instance.""" from lightrag.llm.openai import openai_embed, gpt_4o_mini_complete print("=" * 50) print("LightRAG Initialization Diagnostic Tool") print("=" * 50) # Create test instance rag = LightRAG( working_dir="./test_diagnostic", embedding_func=openai_embed, llm_model_func=gpt_4o_mini_complete, ) print("\n🔄 Initializing storages...\n") await rag.initialize_storages() # Auto-initializes pipeline_status print("\n🔍 Checking initialization status:\n") await check_lightrag_setup(rag, verbose=True) # Cleanup import shutil shutil.rmtree("./test_diagnostic", ignore_errors=True) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Check LightRAG initialization status") parser.add_argument( "--demo", action="store_true", help="Run a demonstration with a test instance" ) parser.add_argument( "--verbose", "-v", action="store_true", help="Show detailed diagnostic information", ) args = parser.parse_args() if args.demo: asyncio.run(demo()) else: print("Run with --demo to see the diagnostic tool in action") print("Or import this module and use check_lightrag_setup() with your instance")
{ "repo_id": "HKUDS/LightRAG", "file_path": "lightrag/tools/check_initialization.py", "license": "MIT License", "lines": 142, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/LightRAG:lightrag/llm/binding_options.py
""" Module that implements containers for specific LLM bindings. This module provides container implementations for various Large Language Model bindings and integrations. """ from argparse import ArgumentParser, Namespace import argparse import json from dataclasses import asdict, dataclass, field from typing import Any, ClassVar, List, get_args, get_origin from lightrag.utils import get_env_value from lightrag.constants import DEFAULT_TEMPERATURE def _resolve_optional_type(field_type: Any) -> Any: """Return the concrete type for Optional/Union annotations.""" origin = get_origin(field_type) if origin in (list, dict, tuple): return field_type args = get_args(field_type) if args: non_none_args = [arg for arg in args if arg is not type(None)] if len(non_none_args) == 1: return non_none_args[0] return field_type # ============================================================================= # BindingOptions Base Class # ============================================================================= # # The BindingOptions class serves as the foundation for all LLM provider bindings # in LightRAG. It provides a standardized framework for: # # 1. Configuration Management: # - Defines how each LLM provider's configuration parameters are structured # - Handles default values and type information for each parameter # - Maps configuration options to command-line arguments and environment variables # # 2. Environment Integration: # - Automatically generates environment variable names from binding parameters # - Provides methods to create sample .env files for easy configuration # - Supports configuration via environment variables with fallback to defaults # # 3. Command-Line Interface: # - Dynamically generates command-line arguments for all registered bindings # - Maintains consistent naming conventions across different LLM providers # - Provides help text and type validation for each configuration option # # 4. Extensibility: # - Uses class introspection to automatically discover all binding subclasses # - Requires minimal boilerplate code when adding new LLM provider bindings # - Maintains separation of concerns between different provider configurations # # This design pattern ensures that adding support for a new LLM provider requires # only defining the provider-specific parameters and help text, while the base # class handles all the common functionality for argument parsing, environment # variable handling, and configuration management. # # Instances of a derived class of BindingOptions can be used to store multiple # runtime configurations of options for a single LLM provider. using the # asdict() method to convert the options to a dictionary. # # ============================================================================= @dataclass class BindingOptions: """Base class for binding options.""" # mandatory name of binding _binding_name: ClassVar[str] # optional help message for each option _help: ClassVar[dict[str, str]] @staticmethod def _all_class_vars(klass: type, include_inherited=True) -> dict[str, Any]: """Print class variables, optionally including inherited ones""" if include_inherited: # Get all class variables from MRO vars_dict = {} for base in reversed(klass.__mro__[:-1]): # Exclude 'object' vars_dict.update( { k: v for k, v in base.__dict__.items() if ( not k.startswith("_") and not callable(v) and not isinstance(v, classmethod) ) } ) else: # Only direct class variables vars_dict = { k: v for k, v in klass.__dict__.items() if ( not k.startswith("_") and not callable(v) and not isinstance(v, classmethod) ) } return vars_dict @classmethod def add_args(cls, parser: ArgumentParser): group = parser.add_argument_group(f"{cls._binding_name} binding options") for arg_item in cls.args_env_name_type_value(): # Handle JSON parsing for list types if arg_item["type"] is List[str]: def json_list_parser(value): try: parsed = json.loads(value) if not isinstance(parsed, list): raise argparse.ArgumentTypeError( f"Expected JSON array, got {type(parsed).__name__}" ) return parsed except json.JSONDecodeError as e: raise argparse.ArgumentTypeError(f"Invalid JSON: {e}") # Get environment variable with JSON parsing env_value = get_env_value(f"{arg_item['env_name']}", argparse.SUPPRESS) if env_value is not argparse.SUPPRESS: try: env_value = json_list_parser(env_value) except argparse.ArgumentTypeError: env_value = argparse.SUPPRESS group.add_argument( f"--{arg_item['argname']}", type=json_list_parser, default=env_value, help=arg_item["help"], ) # Handle JSON parsing for dict types elif arg_item["type"] is dict: def json_dict_parser(value): try: parsed = json.loads(value) if not isinstance(parsed, dict): raise argparse.ArgumentTypeError( f"Expected JSON object, got {type(parsed).__name__}" ) return parsed except json.JSONDecodeError as e: raise argparse.ArgumentTypeError(f"Invalid JSON: {e}") # Get environment variable with JSON parsing env_value = get_env_value(f"{arg_item['env_name']}", argparse.SUPPRESS) if env_value is not argparse.SUPPRESS: try: env_value = json_dict_parser(env_value) except argparse.ArgumentTypeError: env_value = argparse.SUPPRESS group.add_argument( f"--{arg_item['argname']}", type=json_dict_parser, default=env_value, help=arg_item["help"], ) # Handle boolean types specially to avoid argparse bool() constructor issues elif arg_item["type"] is bool: def bool_parser(value): """Custom boolean parser that handles string representations correctly""" if isinstance(value, bool): return value if isinstance(value, str): return value.lower() in ("true", "1", "yes", "t", "on") return bool(value) # Get environment variable with proper type conversion env_value = get_env_value( f"{arg_item['env_name']}", argparse.SUPPRESS, bool ) group.add_argument( f"--{arg_item['argname']}", type=bool_parser, default=env_value, help=arg_item["help"], ) else: resolved_type = arg_item["type"] if resolved_type is not None: resolved_type = _resolve_optional_type(resolved_type) group.add_argument( f"--{arg_item['argname']}", type=resolved_type, default=get_env_value(f"{arg_item['env_name']}", argparse.SUPPRESS), help=arg_item["help"], ) @classmethod def args_env_name_type_value(cls): import dataclasses args_prefix = f"{cls._binding_name}".replace("_", "-") env_var_prefix = f"{cls._binding_name}_".upper() help = cls._help # Check if this is a dataclass and use dataclass fields if dataclasses.is_dataclass(cls): for field in dataclasses.fields(cls): # Skip private fields if field.name.startswith("_"): continue # Get default value if field.default is not dataclasses.MISSING: default_value = field.default elif field.default_factory is not dataclasses.MISSING: default_value = field.default_factory() else: default_value = None argdef = { "argname": f"{args_prefix}-{field.name}", "env_name": f"{env_var_prefix}{field.name.upper()}", "type": _resolve_optional_type(field.type), "default": default_value, "help": f"{cls._binding_name} -- " + help.get(field.name, ""), } yield argdef else: # Fallback to old method for non-dataclass classes class_vars = { key: value for key, value in cls._all_class_vars(cls).items() if not callable(value) and not key.startswith("_") } # Get type hints to properly detect List[str] types type_hints = {} for base in cls.__mro__: if hasattr(base, "__annotations__"): type_hints.update(base.__annotations__) for class_var in class_vars: # Use type hint if available, otherwise fall back to type of value var_type = type_hints.get(class_var, type(class_vars[class_var])) argdef = { "argname": f"{args_prefix}-{class_var}", "env_name": f"{env_var_prefix}{class_var.upper()}", "type": var_type, "default": class_vars[class_var], "help": f"{cls._binding_name} -- " + help.get(class_var, ""), } yield argdef @classmethod def generate_dot_env_sample(cls): """ Generate a sample .env file for all LightRAG binding options. This method creates a .env file that includes all the binding options defined by the subclasses of BindingOptions. It uses the args_env_name_type_value() method to get the list of all options and their default values. Returns: str: A string containing the contents of the sample .env file. """ from io import StringIO sample_top = ( "#" * 80 + "\n" + ( "# Autogenerated .env entries list for LightRAG binding options\n" "#\n" "# To generate run:\n" "# $ python -m lightrag.llm.binding_options\n" ) + "#" * 80 + "\n" ) sample_bottom = ( ("#\n# End of .env entries for LightRAG binding options\n") + "#" * 80 + "\n" ) sample_stream = StringIO() sample_stream.write(sample_top) for klass in cls.__subclasses__(): for arg_item in klass.args_env_name_type_value(): if arg_item["help"]: sample_stream.write(f"# {arg_item['help']}\n") # Handle JSON formatting for list and dict types if arg_item["type"] is List[str] or arg_item["type"] is dict: default_value = json.dumps(arg_item["default"]) else: default_value = arg_item["default"] sample_stream.write(f"# {arg_item['env_name']}={default_value}\n\n") sample_stream.write(sample_bottom) return sample_stream.getvalue() @classmethod def options_dict(cls, args: Namespace) -> dict[str, Any]: """ Extract options dictionary for a specific binding from parsed arguments. This method filters the parsed command-line arguments to return only those that belong to the specific binding class. It removes the binding prefix from argument names to create a clean options dictionary. Args: args (Namespace): Parsed command-line arguments containing all binding options Returns: dict[str, Any]: Dictionary mapping option names (without prefix) to their values Example: If args contains {'ollama_num_ctx': 512, 'other_option': 'value'} and this is called on OllamaOptions, it returns {'num_ctx': 512} """ prefix = cls._binding_name + "_" skipchars = len(prefix) options = { key[skipchars:]: value for key, value in vars(args).items() if key.startswith(prefix) } return options def asdict(self) -> dict[str, Any]: """ Convert an instance of binding options to a dictionary. This method uses dataclasses.asdict() to convert the dataclass instance into a dictionary representation, including all its fields and values. Returns: dict[str, Any]: Dictionary representation of the binding options instance """ return asdict(self) # ============================================================================= # Binding Options for Ollama # ============================================================================= # # Ollama binding options provide configuration for the Ollama local LLM server. # These options control model behavior, sampling parameters, hardware utilization, # and performance settings. The parameters are based on Ollama's API specification # and provide fine-grained control over model inference and generation. # # The _OllamaOptionsMixin defines the complete set of available options, while # OllamaEmbeddingOptions and OllamaLLMOptions provide specialized configurations # for embedding and language model tasks respectively. # ============================================================================= @dataclass class _OllamaOptionsMixin: """Options for Ollama bindings.""" # Core context and generation parameters num_ctx: int = 32768 # Context window size (number of tokens) num_predict: int = 128 # Maximum number of tokens to predict num_keep: int = 0 # Number of tokens to keep from the initial prompt seed: int = -1 # Random seed for generation (-1 for random) # Sampling parameters temperature: float = DEFAULT_TEMPERATURE # Controls randomness (0.0-2.0) top_k: int = 40 # Top-k sampling parameter top_p: float = 0.9 # Top-p (nucleus) sampling parameter tfs_z: float = 1.0 # Tail free sampling parameter typical_p: float = 1.0 # Typical probability mass min_p: float = 0.0 # Minimum probability threshold # Repetition control repeat_last_n: int = 64 # Number of tokens to consider for repetition penalty repeat_penalty: float = 1.1 # Penalty for repetition presence_penalty: float = 0.0 # Penalty for token presence frequency_penalty: float = 0.0 # Penalty for token frequency # Mirostat sampling mirostat: int = ( # Mirostat sampling algorithm (0=disabled, 1=Mirostat 1.0, 2=Mirostat 2.0) 0 ) mirostat_tau: float = 5.0 # Mirostat target entropy mirostat_eta: float = 0.1 # Mirostat learning rate # Hardware and performance parameters numa: bool = False # Enable NUMA optimization num_batch: int = 512 # Batch size for processing num_gpu: int = -1 # Number of GPUs to use (-1 for auto) main_gpu: int = 0 # Main GPU index low_vram: bool = False # Optimize for low VRAM num_thread: int = 0 # Number of CPU threads (0 for auto) # Memory and model parameters f16_kv: bool = True # Use half-precision for key/value cache logits_all: bool = False # Return logits for all tokens vocab_only: bool = False # Only load vocabulary use_mmap: bool = True # Use memory mapping for model files use_mlock: bool = False # Lock model in memory embedding_only: bool = False # Only use for embeddings # Output control penalize_newline: bool = True # Penalize newline tokens stop: List[str] = field(default_factory=list) # Stop sequences # optional help strings _help: ClassVar[dict[str, str]] = { "num_ctx": "Context window size (number of tokens)", "num_predict": "Maximum number of tokens to predict", "num_keep": "Number of tokens to keep from the initial prompt", "seed": "Random seed for generation (-1 for random)", "temperature": "Controls randomness (0.0-2.0, higher = more creative)", "top_k": "Top-k sampling parameter (0 = disabled)", "top_p": "Top-p (nucleus) sampling parameter (0.0-1.0)", "tfs_z": "Tail free sampling parameter (1.0 = disabled)", "typical_p": "Typical probability mass (1.0 = disabled)", "min_p": "Minimum probability threshold (0.0 = disabled)", "repeat_last_n": "Number of tokens to consider for repetition penalty", "repeat_penalty": "Penalty for repetition (1.0 = no penalty)", "presence_penalty": "Penalty for token presence (-2.0 to 2.0)", "frequency_penalty": "Penalty for token frequency (-2.0 to 2.0)", "mirostat": "Mirostat sampling algorithm (0=disabled, 1=Mirostat 1.0, 2=Mirostat 2.0)", "mirostat_tau": "Mirostat target entropy", "mirostat_eta": "Mirostat learning rate", "numa": "Enable NUMA optimization", "num_batch": "Batch size for processing", "num_gpu": "Number of GPUs to use (-1 for auto)", "main_gpu": "Main GPU index", "low_vram": "Optimize for low VRAM", "num_thread": "Number of CPU threads (0 for auto)", "f16_kv": "Use half-precision for key/value cache", "logits_all": "Return logits for all tokens", "vocab_only": "Only load vocabulary", "use_mmap": "Use memory mapping for model files", "use_mlock": "Lock model in memory", "embedding_only": "Only use for embeddings", "penalize_newline": "Penalize newline tokens", "stop": 'Stop sequences (JSON array of strings, e.g., \'["</s>", "\\n\\n"]\')', } @dataclass class OllamaEmbeddingOptions(_OllamaOptionsMixin, BindingOptions): """Options for Ollama embeddings with specialized configuration for embedding tasks.""" # mandatory name of binding _binding_name: ClassVar[str] = "ollama_embedding" @dataclass class OllamaLLMOptions(_OllamaOptionsMixin, BindingOptions): """Options for Ollama LLM with specialized configuration for LLM tasks.""" # mandatory name of binding _binding_name: ClassVar[str] = "ollama_llm" # ============================================================================= # Binding Options for Gemini # ============================================================================= @dataclass class GeminiLLMOptions(BindingOptions): """Options for Google Gemini models.""" _binding_name: ClassVar[str] = "gemini_llm" temperature: float = DEFAULT_TEMPERATURE top_p: float = 0.95 top_k: int = 40 max_output_tokens: int | None = None candidate_count: int = 1 presence_penalty: float = 0.0 frequency_penalty: float = 0.0 stop_sequences: List[str] = field(default_factory=list) seed: int | None = None thinking_config: dict | None = None safety_settings: dict | None = None _help: ClassVar[dict[str, str]] = { "temperature": "Controls randomness (0.0-2.0, higher = more creative)", "top_p": "Nucleus sampling parameter (0.0-1.0)", "top_k": "Limits sampling to the top K tokens (1 disables the limit)", "max_output_tokens": "Maximum tokens generated in the response", "candidate_count": "Number of candidates returned per request", "presence_penalty": "Penalty for token presence (-2.0 to 2.0)", "frequency_penalty": "Penalty for token frequency (-2.0 to 2.0)", "stop_sequences": "Stop sequences (JSON array of strings, e.g., '[\"END\"]')", "seed": "Random seed for reproducible generation (leave empty for random)", "thinking_config": "Thinking configuration (JSON dict, e.g., '{\"thinking_budget\": 1024}' or '{\"include_thoughts\": true}')", "safety_settings": "JSON object with Gemini safety settings overrides", } @dataclass class GeminiEmbeddingOptions(BindingOptions): """Options for Google Gemini embedding models.""" _binding_name: ClassVar[str] = "gemini_embedding" task_type: str = "RETRIEVAL_DOCUMENT" _help: ClassVar[dict[str, str]] = { "task_type": "Task type for embedding optimization (RETRIEVAL_DOCUMENT, RETRIEVAL_QUERY, SEMANTIC_SIMILARITY, CLASSIFICATION, CLUSTERING, CODE_RETRIEVAL_QUERY, QUESTION_ANSWERING, FACT_VERIFICATION)", } # ============================================================================= # Binding Options for OpenAI # ============================================================================= # # OpenAI binding options provide configuration for OpenAI's API and Azure OpenAI. # These options control model behavior, sampling parameters, and generation settings. # The parameters are based on OpenAI's API specification and provide fine-grained # control over model inference and generation. # # ============================================================================= @dataclass class OpenAILLMOptions(BindingOptions): """Options for OpenAI LLM with configuration for OpenAI and Azure OpenAI API calls.""" # mandatory name of binding _binding_name: ClassVar[str] = "openai_llm" # Sampling and generation parameters frequency_penalty: float = 0.0 # Penalty for token frequency (-2.0 to 2.0) max_completion_tokens: int = None # Maximum number of tokens to generate presence_penalty: float = 0.0 # Penalty for token presence (-2.0 to 2.0) reasoning_effort: str = "medium" # Reasoning effort level (low, medium, high) safety_identifier: str = "" # Safety identifier for content filtering service_tier: str = "" # Service tier for API usage stop: List[str] = field(default_factory=list) # Stop sequences temperature: float = DEFAULT_TEMPERATURE # Controls randomness (0.0 to 2.0) top_p: float = 1.0 # Nucleus sampling parameter (0.0 to 1.0) max_tokens: int = None # Maximum number of tokens to generate(deprecated, use max_completion_tokens instead) extra_body: dict = None # Extra body parameters for OpenRouter of vLLM # Help descriptions _help: ClassVar[dict[str, str]] = { "frequency_penalty": "Penalty for token frequency (-2.0 to 2.0, positive values discourage repetition)", "max_completion_tokens": "Maximum number of tokens to generate (optional, leave empty for model default)", "presence_penalty": "Penalty for token presence (-2.0 to 2.0, positive values encourage new topics)", "reasoning_effort": "Reasoning effort level for o1 models (low, medium, high)", "safety_identifier": "Safety identifier for content filtering (optional)", "service_tier": "Service tier for API usage (optional)", "stop": 'Stop sequences (JSON array of strings, e.g., \'["</s>", "\\n\\n"]\')', "temperature": "Controls randomness (0.0-2.0, higher = more creative)", "top_p": "Nucleus sampling parameter (0.0-1.0, lower = more focused)", "max_tokens": "Maximum number of tokens to generate (deprecated, use max_completion_tokens instead)", "extra_body": 'Extra body parameters for OpenRouter of vLLM (JSON dict, e.g., \'"reasoning": {"reasoning": {"enabled": false}}\')', } # ============================================================================= # Main Section - For Testing and Sample Generation # ============================================================================= # # When run as a script, this module: # 1. Generates and prints a sample .env file with all binding options # 2. If "test" argument is provided, demonstrates argument parsing with Ollama binding # # Usage: # python -m lightrag.llm.binding_options # Generate .env sample # python -m lightrag.llm.binding_options test # Test argument parsing # # ============================================================================= if __name__ == "__main__": import sys import dotenv # from io import StringIO dotenv.load_dotenv(dotenv_path=".env", override=False) # env_strstream = StringIO( # ("OLLAMA_LLM_TEMPERATURE=0.1\nOLLAMA_EMBEDDING_TEMPERATURE=0.2\n") # ) # # Load environment variables from .env file # dotenv.load_dotenv(stream=env_strstream) if len(sys.argv) > 1 and sys.argv[1] == "test": # Add arguments for OllamaEmbeddingOptions, OllamaLLMOptions, and OpenAILLMOptions parser = ArgumentParser(description="Test binding options") OllamaEmbeddingOptions.add_args(parser) OllamaLLMOptions.add_args(parser) OpenAILLMOptions.add_args(parser) # Parse arguments test args = parser.parse_args( [ "--ollama-embedding-num_ctx", "1024", "--ollama-llm-num_ctx", "2048", "--openai-llm-temperature", "0.7", "--openai-llm-max_completion_tokens", "1000", "--openai-llm-stop", '["</s>", "\\n\\n"]', "--openai-llm-reasoning", '{"effort": "high", "max_tokens": 2000, "exclude": false, "enabled": true}', ] ) print("Final args for LLM and Embedding:") print(f"{args}\n") print("Ollama LLM options:") print(OllamaLLMOptions.options_dict(args)) print("\nOllama Embedding options:") print(OllamaEmbeddingOptions.options_dict(args)) print("\nOpenAI LLM options:") print(OpenAILLMOptions.options_dict(args)) # Test creating OpenAI options instance openai_options = OpenAILLMOptions( temperature=0.8, max_completion_tokens=1500, frequency_penalty=0.1, presence_penalty=0.2, stop=["<|end|>", "\n\n"], ) print("\nOpenAI LLM options instance:") print(openai_options.asdict()) # Test creating OpenAI options instance with reasoning parameter openai_options_with_reasoning = OpenAILLMOptions( temperature=0.9, max_completion_tokens=2000, reasoning={ "effort": "medium", "max_tokens": 1500, "exclude": True, "enabled": True, }, ) print("\nOpenAI LLM options instance with reasoning:") print(openai_options_with_reasoning.asdict()) # Test dict parsing functionality print("\n" + "=" * 50) print("TESTING DICT PARSING FUNCTIONALITY") print("=" * 50) # Test valid JSON dict parsing test_parser = ArgumentParser(description="Test dict parsing") OpenAILLMOptions.add_args(test_parser) try: test_args = test_parser.parse_args( ["--openai-llm-reasoning", '{"effort": "low", "max_tokens": 1000}'] ) print("✓ Valid JSON dict parsing successful:") print( f" Parsed reasoning: {OpenAILLMOptions.options_dict(test_args)['reasoning']}" ) except Exception as e: print(f"✗ Valid JSON dict parsing failed: {e}") # Test invalid JSON dict parsing try: test_args = test_parser.parse_args( [ "--openai-llm-reasoning", '{"effort": "low", "max_tokens": 1000', # Missing closing brace ] ) print("✗ Invalid JSON should have failed but didn't") except SystemExit: print("✓ Invalid JSON dict parsing correctly rejected") except Exception as e: print(f"✓ Invalid JSON dict parsing correctly rejected: {e}") # Test non-dict JSON parsing try: test_args = test_parser.parse_args( [ "--openai-llm-reasoning", '["not", "a", "dict"]', # Array instead of dict ] ) print("✗ Non-dict JSON should have failed but didn't") except SystemExit: print("✓ Non-dict JSON parsing correctly rejected") except Exception as e: print(f"✓ Non-dict JSON parsing correctly rejected: {e}") print("\n" + "=" * 50) print("TESTING ENVIRONMENT VARIABLE SUPPORT") print("=" * 50) # Test environment variable support for dict import os os.environ["OPENAI_LLM_REASONING"] = ( '{"effort": "high", "max_tokens": 3000, "exclude": false}' ) env_parser = ArgumentParser(description="Test env var dict parsing") OpenAILLMOptions.add_args(env_parser) try: env_args = env_parser.parse_args( [] ) # No command line args, should use env var reasoning_from_env = OpenAILLMOptions.options_dict(env_args).get( "reasoning" ) if reasoning_from_env: print("✓ Environment variable dict parsing successful:") print(f" Parsed reasoning from env: {reasoning_from_env}") else: print("✗ Environment variable dict parsing failed: No reasoning found") except Exception as e: print(f"✗ Environment variable dict parsing failed: {e}") finally: # Clean up environment variable if "OPENAI_LLM_REASONING" in os.environ: del os.environ["OPENAI_LLM_REASONING"] else: print(BindingOptions.generate_dot_env_sample())
{ "repo_id": "HKUDS/LightRAG", "file_path": "lightrag/llm/binding_options.py", "license": "MIT License", "lines": 635, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/LightRAG:examples/unofficial-sample/lightrag_cloudflare_demo.py
import asyncio import os import inspect import logging import logging.config from lightrag import LightRAG, QueryParam from lightrag.utils import EmbeddingFunc, logger, set_verbose_debug import requests import numpy as np from dotenv import load_dotenv """This code is a modified version of lightrag_openai_demo.py""" # ideally, as always, env! load_dotenv(dotenv_path=".env", override=False) """ ----========= IMPORTANT CHANGE THIS! =========---- """ cloudflare_api_key = "YOUR_API_KEY" account_id = "YOUR_ACCOUNT ID" # This is unique to your Cloudflare account # Authomatically changes api_base_url = f"https://api.cloudflare.com/client/v4/accounts/{account_id}/ai/run/" # choose an embedding model EMBEDDING_MODEL = "@cf/baai/bge-m3" # choose a generative model LLM_MODEL = "@cf/meta/llama-3.2-3b-instruct" WORKING_DIR = "../dickens" # you can change output as desired # Cloudflare init class CloudflareWorker: def __init__( self, cloudflare_api_key: str, api_base_url: str, llm_model_name: str, embedding_model_name: str, max_tokens: int = 4080, max_response_tokens: int = 4080, ): self.cloudflare_api_key = cloudflare_api_key self.api_base_url = api_base_url self.llm_model_name = llm_model_name self.embedding_model_name = embedding_model_name self.max_tokens = max_tokens self.max_response_tokens = max_response_tokens async def _send_request(self, model_name: str, input_: dict, debug_log: str): headers = {"Authorization": f"Bearer {self.cloudflare_api_key}"} print(f""" data sent to Cloudflare ~~~~~~~~~~~ {debug_log} """) try: response_raw = requests.post( f"{self.api_base_url}{model_name}", headers=headers, json=input_ ).json() print(f""" Cloudflare worker responded with: ~~~~~~~~~~~ {str(response_raw)} """) result = response_raw.get("result", {}) if "data" in result: # Embedding case return np.array(result["data"]) if "response" in result: # LLM response return result["response"] raise ValueError("Unexpected Cloudflare response format") except Exception as e: print(f""" Cloudflare API returned: ~~~~~~~~~ Error: {e} """) input("Press Enter to continue...") return None async def query(self, prompt, system_prompt: str = "", **kwargs) -> str: # since no caching is used and we don't want to mess with everything lightrag, pop the kwarg it is kwargs.pop("hashing_kv", None) message = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}, ] input_ = { "messages": message, "max_tokens": self.max_tokens, "response_token_limit": self.max_response_tokens, } return await self._send_request( self.llm_model_name, input_, debug_log=f"\n- model used {self.llm_model_name}\n- system prompt: {system_prompt}\n- query: {prompt}", ) async def embedding_chunk(self, texts: list[str]) -> np.ndarray: print(f""" TEXT inputted ~~~~~ {texts} """) input_ = { "text": texts, "max_tokens": self.max_tokens, "response_token_limit": self.max_response_tokens, } return await self._send_request( self.embedding_model_name, input_, debug_log=f"\n-llm model name {self.embedding_model_name}\n- texts: {texts}", ) def configure_logging(): """Configure logging for the application""" # Reset any existing handlers to ensure clean configuration for logger_name in ["uvicorn", "uvicorn.access", "uvicorn.error", "lightrag"]: logger_instance = logging.getLogger(logger_name) logger_instance.handlers = [] logger_instance.filters = [] # Get log directory path from environment variable or use current directory log_dir = os.getenv("LOG_DIR", os.getcwd()) log_file_path = os.path.abspath( os.path.join(log_dir, "lightrag_cloudflare_worker_demo.log") ) print(f"\nLightRAG compatible demo log file: {log_file_path}\n") os.makedirs(os.path.dirname(log_file_path), exist_ok=True) # Get log file max size and backup count from environment variables log_max_bytes = int(os.getenv("LOG_MAX_BYTES", 10485760)) # Default 10MB log_backup_count = int(os.getenv("LOG_BACKUP_COUNT", 5)) # Default 5 backups logging.config.dictConfig( { "version": 1, "disable_existing_loggers": False, "formatters": { "default": { "format": "%(levelname)s: %(message)s", }, "detailed": { "format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s", }, }, "handlers": { "console": { "formatter": "default", "class": "logging.StreamHandler", "stream": "ext://sys.stderr", }, "file": { "formatter": "detailed", "class": "logging.handlers.RotatingFileHandler", "filename": log_file_path, "maxBytes": log_max_bytes, "backupCount": log_backup_count, "encoding": "utf-8", }, }, "loggers": { "lightrag": { "handlers": ["console", "file"], "level": "INFO", "propagate": False, }, }, } ) # Set the logger level to INFO logger.setLevel(logging.INFO) # Enable verbose debug if needed set_verbose_debug(os.getenv("VERBOSE_DEBUG", "false").lower() == "true") if not os.path.exists(WORKING_DIR): os.mkdir(WORKING_DIR) async def initialize_rag(): cloudflare_worker = CloudflareWorker( cloudflare_api_key=cloudflare_api_key, api_base_url=api_base_url, embedding_model_name=EMBEDDING_MODEL, llm_model_name=LLM_MODEL, ) rag = LightRAG( working_dir=WORKING_DIR, max_parallel_insert=2, llm_model_func=cloudflare_worker.query, llm_model_name=os.getenv("LLM_MODEL", LLM_MODEL), summary_max_tokens=4080, embedding_func=EmbeddingFunc( embedding_dim=int(os.getenv("EMBEDDING_DIM", "1024")), max_token_size=int(os.getenv("MAX_EMBED_TOKENS", "2048")), func=lambda texts: cloudflare_worker.embedding_chunk( texts, ), ), ) await rag.initialize_storages() # Auto-initializes pipeline_status return rag async def print_stream(stream): async for chunk in stream: print(chunk, end="", flush=True) async def main(): try: # Clear old data files files_to_delete = [ "graph_chunk_entity_relation.graphml", "kv_store_doc_status.json", "kv_store_full_docs.json", "kv_store_text_chunks.json", "vdb_chunks.json", "vdb_entities.json", "vdb_relationships.json", ] for file in files_to_delete: file_path = os.path.join(WORKING_DIR, file) if os.path.exists(file_path): os.remove(file_path) print(f"Deleting old file:: {file_path}") # Initialize RAG instance rag = await initialize_rag() # Test embedding function test_text = ["This is a test string for embedding."] embedding = await rag.embedding_func(test_text) embedding_dim = embedding.shape[1] print("\n=======================") print("Test embedding function") print("========================") print(f"Test dict: {test_text}") print(f"Detected embedding dimension: {embedding_dim}\n\n") # Locate the location of what is needed to be added to the knowledge # Can add several simultaneously by modifying code with open("./book.txt", "r", encoding="utf-8") as f: await rag.ainsert(f.read()) # Perform naive search print("\n=====================") print("Query mode: naive") print("=====================") resp = await rag.aquery( "What are the top themes in this story?", param=QueryParam(mode="naive", stream=True), ) if inspect.isasyncgen(resp): await print_stream(resp) else: print(resp) # Perform local search print("\n=====================") print("Query mode: local") print("=====================") resp = await rag.aquery( "What are the top themes in this story?", param=QueryParam(mode="local", stream=True), ) if inspect.isasyncgen(resp): await print_stream(resp) else: print(resp) # Perform global search print("\n=====================") print("Query mode: global") print("=====================") resp = await rag.aquery( "What are the top themes in this story?", param=QueryParam(mode="global", stream=True), ) if inspect.isasyncgen(resp): await print_stream(resp) else: print(resp) # Perform hybrid search print("\n=====================") print("Query mode: hybrid") print("=====================") resp = await rag.aquery( "What are the top themes in this story?", param=QueryParam(mode="hybrid", stream=True), ) if inspect.isasyncgen(resp): await print_stream(resp) else: print(resp) """ FOR TESTING (if you want to test straight away, after building. Uncomment this part""" """ print("\n" + "=" * 60) print("AI ASSISTANT READY!") print("Ask questions about (your uploaded) regulations") print("Type 'quit' to exit") print("=" * 60) while True: question = input("\n🔥 Your question: ") if question.lower() in ['quit', 'exit', 'bye']: break print("\nThinking...") response = await rag.aquery(question, param=QueryParam(mode="hybrid")) print(f"\nAnswer: {response}") """ except Exception as e: print(f"An error occurred: {e}") finally: if rag: await rag.llm_response_cache.index_done_callback() await rag.finalize_storages() if __name__ == "__main__": # Configure logging before running the main function configure_logging() asyncio.run(main()) print("\nDone!")
{ "repo_id": "HKUDS/LightRAG", "file_path": "examples/unofficial-sample/lightrag_cloudflare_demo.py", "license": "MIT License", "lines": 294, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/LightRAG:examples/rerank_example.py
""" LightRAG Rerank Integration Example This example demonstrates how to use rerank functionality with LightRAG to improve retrieval quality across different query modes. Configuration Required: 1. Set your OpenAI LLM API key and base URL with env vars LLM_MODEL LLM_BINDING_HOST LLM_BINDING_API_KEY 2. Set your OpenAI embedding API key and base URL with env vars: EMBEDDING_MODEL EMBEDDING_DIM EMBEDDING_BINDING_HOST EMBEDDING_BINDING_API_KEY 3. Set your vLLM deployed AI rerank model setting with env vars: RERANK_BINDING=cohere RERANK_MODEL (e.g., answerai-colbert-small-v1 or rerank-v3.5) RERANK_BINDING_HOST (e.g., https://api.cohere.com/v2/rerank or LiteLLM proxy) RERANK_BINDING_API_KEY RERANK_ENABLE_CHUNKING=true (optional, for models with token limits) RERANK_MAX_TOKENS_PER_DOC=480 (optional, default 4096) Note: Rerank is controlled per query via the 'enable_rerank' parameter (default: True) """ import asyncio import os import numpy as np from lightrag import LightRAG, QueryParam from lightrag.llm.openai import openai_complete_if_cache, openai_embed from lightrag.utils import EmbeddingFunc, setup_logger from functools import partial from lightrag.rerank import cohere_rerank # Set up your working directory WORKING_DIR = "./test_rerank" setup_logger("test_rerank") if not os.path.exists(WORKING_DIR): os.mkdir(WORKING_DIR) async def llm_model_func( prompt, system_prompt=None, history_messages=[], **kwargs ) -> str: return await openai_complete_if_cache( os.getenv("LLM_MODEL"), prompt, system_prompt=system_prompt, history_messages=history_messages, api_key=os.getenv("LLM_BINDING_API_KEY"), base_url=os.getenv("LLM_BINDING_HOST"), **kwargs, ) async def embedding_func(texts: list[str]) -> np.ndarray: return await openai_embed( texts, model=os.getenv("EMBEDDING_MODEL"), api_key=os.getenv("EMBEDDING_BINDING_API_KEY"), base_url=os.getenv("EMBEDDING_BINDING_HOST"), ) rerank_model_func = partial( cohere_rerank, model=os.getenv("RERANK_MODEL", "rerank-v3.5"), api_key=os.getenv("RERANK_BINDING_API_KEY"), base_url=os.getenv("RERANK_BINDING_HOST", "https://api.cohere.com/v2/rerank"), enable_chunking=os.getenv("RERANK_ENABLE_CHUNKING", "false").lower() == "true", max_tokens_per_doc=int(os.getenv("RERANK_MAX_TOKENS_PER_DOC", "4096")), ) async def create_rag_with_rerank(): """Create LightRAG instance with rerank configuration""" # Get embedding dimension test_embedding = await embedding_func(["test"]) embedding_dim = test_embedding.shape[1] print(f"Detected embedding dimension: {embedding_dim}") # Method 1: Using custom rerank function rag = LightRAG( working_dir=WORKING_DIR, llm_model_func=llm_model_func, embedding_func=EmbeddingFunc( embedding_dim=embedding_dim, max_token_size=8192, func=embedding_func, ), # Rerank Configuration - provide the rerank function rerank_model_func=rerank_model_func, ) await rag.initialize_storages() # Auto-initializes pipeline_status return rag async def test_rerank_with_different_settings(): """ Test rerank functionality with different enable_rerank settings """ print("\n\n🚀 Setting up LightRAG with Rerank functionality...") rag = await create_rag_with_rerank() # Insert sample documents sample_docs = [ "Reranking improves retrieval quality by re-ordering documents based on relevance.", "LightRAG is a powerful retrieval-augmented generation system with multiple query modes.", "Vector databases enable efficient similarity search in high-dimensional embedding spaces.", "Natural language processing has evolved with large language models and transformers.", "Machine learning algorithms can learn patterns from data without explicit programming.", ] print("📄 Inserting sample documents...") await rag.ainsert(sample_docs) query = "How does reranking improve retrieval quality?" print(f"\n🔍 Testing query: '{query}'") print("=" * 80) # Test with rerank enabled (default) print("\n📊 Testing with enable_rerank=True (default):") result_with_rerank = await rag.aquery( query, param=QueryParam( mode="naive", top_k=10, chunk_top_k=5, enable_rerank=True, # Explicitly enable rerank ), ) print(f" Result length: {len(result_with_rerank)} characters") print(f" Preview: {result_with_rerank[:100]}...") # Test with rerank disabled print("\n📊 Testing with enable_rerank=False:") result_without_rerank = await rag.aquery( query, param=QueryParam( mode="naive", top_k=10, chunk_top_k=5, enable_rerank=False, # Disable rerank ), ) print(f" Result length: {len(result_without_rerank)} characters") print(f" Preview: {result_without_rerank[:100]}...") # Test with default settings (enable_rerank defaults to True) print("\n📊 Testing with default settings (enable_rerank defaults to True):") result_default = await rag.aquery( query, param=QueryParam(mode="naive", top_k=10, chunk_top_k=5) ) print(f" Result length: {len(result_default)} characters") print(f" Preview: {result_default[:100]}...") async def test_direct_rerank(): """Test rerank function directly""" print("\n🔧 Direct Rerank API Test") print("=" * 40) documents = [ "Vector search finds semantically similar documents", "LightRAG supports advanced reranking capabilities", "Reranking significantly improves retrieval quality", "Natural language processing with modern transformers", "The quick brown fox jumps over the lazy dog", ] query = "rerank improve quality" print(f"Query: '{query}'") print(f"Documents: {len(documents)}") try: reranked_results = await rerank_model_func( query=query, documents=documents, top_n=4, ) print("\n✅ Rerank Results:") i = 0 for result in reranked_results: index = result["index"] score = result["relevance_score"] content = documents[index] print(f" {index}. Score: {score:.4f} | {content}...") i += 1 except Exception as e: print(f"❌ Rerank failed: {e}") async def main(): """Main example function""" print("🎯 LightRAG Rerank Integration Example") print("=" * 60) try: # Test direct rerank await test_direct_rerank() # Test rerank with different enable_rerank settings await test_rerank_with_different_settings() print("\n✅ Example completed successfully!") print("\n💡 Key Points:") print(" ✓ Rerank is now controlled per query via 'enable_rerank' parameter") print(" ✓ Default value for enable_rerank is True") print(" ✓ Rerank function is configured at LightRAG initialization") print(" ✓ Per-query enable_rerank setting overrides default behavior") print( " ✓ If enable_rerank=True but no rerank model is configured, a warning is issued" ) print(" ✓ Monitor API usage and costs when using rerank services") except Exception as e: print(f"\n❌ Example failed: {e}") import traceback traceback.print_exc() if __name__ == "__main__": asyncio.run(main())
{ "repo_id": "HKUDS/LightRAG", "file_path": "examples/rerank_example.py", "license": "MIT License", "lines": 190, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/LightRAG:lightrag/rerank.py
from __future__ import annotations import os import aiohttp from typing import Any, List, Dict, Optional, Tuple from tenacity import ( retry, stop_after_attempt, wait_exponential, retry_if_exception_type, ) from .utils import logger from dotenv import load_dotenv # use the .env that is inside the current folder # allows to use different .env file for each lightrag instance # the OS environment variables take precedence over the .env file load_dotenv(dotenv_path=".env", override=False) def chunk_documents_for_rerank( documents: List[str], max_tokens: int = 480, overlap_tokens: int = 32, tokenizer_model: str = "gpt-4o-mini", ) -> Tuple[List[str], List[int]]: """ Chunk documents that exceed token limit for reranking. Args: documents: List of document strings to chunk max_tokens: Maximum tokens per chunk (default 480 to leave margin for 512 limit) overlap_tokens: Number of tokens to overlap between chunks tokenizer_model: Model name for tiktoken tokenizer Returns: Tuple of (chunked_documents, original_doc_indices) - chunked_documents: List of document chunks (may be more than input) - original_doc_indices: Maps each chunk back to its original document index """ # Clamp overlap_tokens to ensure the loop always advances # If overlap_tokens >= max_tokens, the chunking loop would hang if overlap_tokens >= max_tokens: original_overlap = overlap_tokens # Ensure overlap is at least 1 token less than max to guarantee progress # For very small max_tokens (e.g., 1), set overlap to 0 overlap_tokens = max(0, max_tokens - 1) logger.warning( f"overlap_tokens ({original_overlap}) must be less than max_tokens ({max_tokens}). " f"Clamping to {overlap_tokens} to prevent infinite loop." ) try: from .utils import TiktokenTokenizer tokenizer = TiktokenTokenizer(model_name=tokenizer_model) except Exception as e: logger.warning( f"Failed to initialize tokenizer: {e}. Using character-based approximation." ) # Fallback: approximate 1 token ≈ 4 characters max_chars = max_tokens * 4 overlap_chars = overlap_tokens * 4 chunked_docs = [] doc_indices = [] for idx, doc in enumerate(documents): if len(doc) <= max_chars: chunked_docs.append(doc) doc_indices.append(idx) else: # Split into overlapping chunks start = 0 while start < len(doc): end = min(start + max_chars, len(doc)) chunk = doc[start:end] chunked_docs.append(chunk) doc_indices.append(idx) if end >= len(doc): break start = end - overlap_chars return chunked_docs, doc_indices # Use tokenizer for accurate chunking chunked_docs = [] doc_indices = [] for idx, doc in enumerate(documents): tokens = tokenizer.encode(doc) if len(tokens) <= max_tokens: # Document fits in one chunk chunked_docs.append(doc) doc_indices.append(idx) else: # Split into overlapping chunks start = 0 while start < len(tokens): end = min(start + max_tokens, len(tokens)) chunk_tokens = tokens[start:end] chunk_text = tokenizer.decode(chunk_tokens) chunked_docs.append(chunk_text) doc_indices.append(idx) if end >= len(tokens): break start = end - overlap_tokens return chunked_docs, doc_indices def aggregate_chunk_scores( chunk_results: List[Dict[str, Any]], doc_indices: List[int], num_original_docs: int, aggregation: str = "max", ) -> List[Dict[str, Any]]: """ Aggregate rerank scores from document chunks back to original documents. Args: chunk_results: Rerank results for chunks [{"index": chunk_idx, "relevance_score": score}, ...] doc_indices: Maps each chunk index to original document index num_original_docs: Total number of original documents aggregation: Strategy for aggregating scores ("max", "mean", "first") Returns: List of results for original documents [{"index": doc_idx, "relevance_score": score}, ...] """ # Group scores by original document index doc_scores: Dict[int, List[float]] = {i: [] for i in range(num_original_docs)} for result in chunk_results: chunk_idx = result["index"] score = result["relevance_score"] if 0 <= chunk_idx < len(doc_indices): original_doc_idx = doc_indices[chunk_idx] doc_scores[original_doc_idx].append(score) # Aggregate scores aggregated_results = [] for doc_idx, scores in doc_scores.items(): if not scores: continue if aggregation == "max": final_score = max(scores) elif aggregation == "mean": final_score = sum(scores) / len(scores) elif aggregation == "first": final_score = scores[0] else: logger.warning(f"Unknown aggregation strategy: {aggregation}, using max") final_score = max(scores) aggregated_results.append( { "index": doc_idx, "relevance_score": final_score, } ) # Sort by relevance score (descending) aggregated_results.sort(key=lambda x: x["relevance_score"], reverse=True) return aggregated_results @retry( stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=60), retry=( retry_if_exception_type(aiohttp.ClientError) | retry_if_exception_type(aiohttp.ClientResponseError) ), ) async def generic_rerank_api( query: str, documents: List[str], model: str, base_url: str, api_key: Optional[str], top_n: Optional[int] = None, return_documents: Optional[bool] = None, extra_body: Optional[Dict[str, Any]] = None, response_format: str = "standard", # "standard" (Jina/Cohere) or "aliyun" request_format: str = "standard", # "standard" (Jina/Cohere) or "aliyun" enable_chunking: bool = False, max_tokens_per_doc: int = 480, ) -> List[Dict[str, Any]]: """ Generic rerank API call for Jina/Cohere/Aliyun models. Args: query: The search query documents: List of strings to rerank model: Model name to use base_url: API endpoint URL api_key: API key for authentication top_n: Number of top results to return return_documents: Whether to return document text (Jina only) extra_body: Additional body parameters response_format: Response format type ("standard" for Jina/Cohere, "aliyun" for Aliyun) request_format: Request format type enable_chunking: Whether to chunk documents exceeding token limit max_tokens_per_doc: Maximum tokens per document for chunking Returns: List of dictionary of ["index": int, "relevance_score": float] """ if not base_url: raise ValueError("Base URL is required") headers = {"Content-Type": "application/json"} if api_key is not None: headers["Authorization"] = f"Bearer {api_key}" # Handle document chunking if enabled original_documents = documents doc_indices = None original_top_n = top_n # Save original top_n for post-aggregation limiting if enable_chunking: documents, doc_indices = chunk_documents_for_rerank( documents, max_tokens=max_tokens_per_doc ) logger.debug( f"Chunked {len(original_documents)} documents into {len(documents)} chunks" ) # When chunking is enabled, disable top_n at API level to get all chunk scores # This ensures proper document-level coverage after aggregation # We'll apply top_n to aggregated document results instead if top_n is not None: logger.debug( f"Chunking enabled: disabled API-level top_n={top_n} to ensure complete document coverage" ) top_n = None # Build request payload based on request format if request_format == "aliyun": # Aliyun format: nested input/parameters structure payload = { "model": model, "input": { "query": query, "documents": documents, }, "parameters": {}, } # Add optional parameters to parameters object if top_n is not None: payload["parameters"]["top_n"] = top_n if return_documents is not None: payload["parameters"]["return_documents"] = return_documents # Add extra parameters to parameters object if extra_body: payload["parameters"].update(extra_body) else: # Standard format for Jina/Cohere/OpenAI payload = { "model": model, "query": query, "documents": documents, } # Add optional parameters if top_n is not None: payload["top_n"] = top_n # Only Jina API supports return_documents parameter if return_documents is not None and response_format in ("standard",): payload["return_documents"] = return_documents # Add extra parameters if extra_body: payload.update(extra_body) logger.debug( f"Rerank request: {len(documents)} documents, model: {model}, format: {response_format}" ) async with aiohttp.ClientSession() as session: async with session.post(base_url, headers=headers, json=payload) as response: if response.status != 200: error_text = await response.text() content_type = response.headers.get("content-type", "").lower() is_html_error = ( error_text.strip().startswith("<!DOCTYPE html>") or "text/html" in content_type ) if is_html_error: if response.status == 502: clean_error = "Bad Gateway (502) - Rerank service temporarily unavailable. Please try again in a few minutes." elif response.status == 503: clean_error = "Service Unavailable (503) - Rerank service is temporarily overloaded. Please try again later." elif response.status == 504: clean_error = "Gateway Timeout (504) - Rerank service request timed out. Please try again." else: clean_error = f"HTTP {response.status} - Rerank service error. Please try again later." else: clean_error = error_text logger.error(f"Rerank API error {response.status}: {clean_error}") raise aiohttp.ClientResponseError( request_info=response.request_info, history=response.history, status=response.status, message=f"Rerank API error: {clean_error}", ) response_json = await response.json() if response_format == "aliyun": # Aliyun format: {"output": {"results": [...]}} results = response_json.get("output", {}).get("results", []) if not isinstance(results, list): logger.warning( f"Expected 'output.results' to be list, got {type(results)}: {results}" ) results = [] elif response_format == "standard": # Standard format: {"results": [...]} results = response_json.get("results", []) if not isinstance(results, list): logger.warning( f"Expected 'results' to be list, got {type(results)}: {results}" ) results = [] else: raise ValueError(f"Unsupported response format: {response_format}") if not results: logger.warning("Rerank API returned empty results") return [] # Standardize return format standardized_results = [ {"index": result["index"], "relevance_score": result["relevance_score"]} for result in results ] # Aggregate chunk scores back to original documents if chunking was enabled if enable_chunking and doc_indices: standardized_results = aggregate_chunk_scores( standardized_results, doc_indices, len(original_documents), aggregation="max", ) # Apply original top_n limit at document level (post-aggregation) # This preserves document-level semantics: top_n limits documents, not chunks if ( original_top_n is not None and len(standardized_results) > original_top_n ): standardized_results = standardized_results[:original_top_n] return standardized_results async def cohere_rerank( query: str, documents: List[str], top_n: Optional[int] = None, api_key: Optional[str] = None, model: str = "rerank-v3.5", base_url: str = "https://api.cohere.com/v2/rerank", extra_body: Optional[Dict[str, Any]] = None, enable_chunking: bool = False, max_tokens_per_doc: int = 4096, ) -> List[Dict[str, Any]]: """ Rerank documents using Cohere API. Supports both standard Cohere API and Cohere-compatible proxies Args: query: The search query documents: List of strings to rerank top_n: Number of top results to return api_key: API key for authentication model: rerank model name (default: rerank-v3.5) base_url: API endpoint extra_body: Additional body for http request(reserved for extra params) enable_chunking: Whether to chunk documents exceeding max_tokens_per_doc max_tokens_per_doc: Maximum tokens per document (default: 4096 for Cohere v3.5) Returns: List of dictionary of ["index": int, "relevance_score": float] Example: >>> # Standard Cohere API >>> results = await cohere_rerank( ... query="What is the meaning of life?", ... documents=["Doc1", "Doc2"], ... api_key="your-cohere-key" ... ) >>> # LiteLLM proxy with user authentication >>> results = await cohere_rerank( ... query="What is vector search?", ... documents=["Doc1", "Doc2"], ... model="answerai-colbert-small-v1", ... base_url="https://llm-proxy.example.com/v2/rerank", ... api_key="your-proxy-key", ... enable_chunking=True, ... max_tokens_per_doc=480 ... ) """ if api_key is None: api_key = os.getenv("COHERE_API_KEY") or os.getenv("RERANK_BINDING_API_KEY") return await generic_rerank_api( query=query, documents=documents, model=model, base_url=base_url, api_key=api_key, top_n=top_n, return_documents=None, # Cohere doesn't support this parameter extra_body=extra_body, response_format="standard", enable_chunking=enable_chunking, max_tokens_per_doc=max_tokens_per_doc, ) async def jina_rerank( query: str, documents: List[str], top_n: Optional[int] = None, api_key: Optional[str] = None, model: str = "jina-reranker-v2-base-multilingual", base_url: str = "https://api.jina.ai/v1/rerank", extra_body: Optional[Dict[str, Any]] = None, ) -> List[Dict[str, Any]]: """ Rerank documents using Jina AI API. Args: query: The search query documents: List of strings to rerank top_n: Number of top results to return api_key: API key model: rerank model name base_url: API endpoint extra_body: Additional body for http request(reserved for extra params) Returns: List of dictionary of ["index": int, "relevance_score": float] """ if api_key is None: api_key = os.getenv("JINA_API_KEY") or os.getenv("RERANK_BINDING_API_KEY") return await generic_rerank_api( query=query, documents=documents, model=model, base_url=base_url, api_key=api_key, top_n=top_n, return_documents=False, extra_body=extra_body, response_format="standard", ) async def ali_rerank( query: str, documents: List[str], top_n: Optional[int] = None, api_key: Optional[str] = None, model: str = "gte-rerank-v2", base_url: str = "https://dashscope.aliyuncs.com/api/v1/services/rerank/text-rerank/text-rerank", extra_body: Optional[Dict[str, Any]] = None, ) -> List[Dict[str, Any]]: """ Rerank documents using Aliyun DashScope API. Args: query: The search query documents: List of strings to rerank top_n: Number of top results to return api_key: Aliyun API key model: rerank model name base_url: API endpoint extra_body: Additional body for http request(reserved for extra params) Returns: List of dictionary of ["index": int, "relevance_score": float] """ if api_key is None: api_key = os.getenv("DASHSCOPE_API_KEY") or os.getenv("RERANK_BINDING_API_KEY") return await generic_rerank_api( query=query, documents=documents, model=model, base_url=base_url, api_key=api_key, top_n=top_n, return_documents=False, # Aliyun doesn't need this parameter extra_body=extra_body, response_format="aliyun", request_format="aliyun", ) """Please run this test as a module: python -m lightrag.rerank """ if __name__ == "__main__": import asyncio async def main(): # Example usage - documents should be strings, not dictionaries docs = [ "The capital of France is Paris.", "Tokyo is the capital of Japan.", "London is the capital of England.", ] query = "What is the capital of France?" # Test Jina rerank try: print("=== Jina Rerank ===") result = await jina_rerank( query=query, documents=docs, top_n=2, ) print("Results:") for item in result: print(f"Index: {item['index']}, Score: {item['relevance_score']:.4f}") print(f"Document: {docs[item['index']]}") except Exception as e: print(f"Jina Error: {e}") # Test Cohere rerank try: print("\n=== Cohere Rerank ===") result = await cohere_rerank( query=query, documents=docs, top_n=2, ) print("Results:") for item in result: print(f"Index: {item['index']}, Score: {item['relevance_score']:.4f}") print(f"Document: {docs[item['index']]}") except Exception as e: print(f"Cohere Error: {e}") # Test Aliyun rerank try: print("\n=== Aliyun Rerank ===") result = await ali_rerank( query=query, documents=docs, top_n=2, ) print("Results:") for item in result: print(f"Index: {item['index']}, Score: {item['relevance_score']:.4f}") print(f"Document: {docs[item['index']]}") except Exception as e: print(f"Aliyun Error: {e}") asyncio.run(main())
{ "repo_id": "HKUDS/LightRAG", "file_path": "lightrag/rerank.py", "license": "MIT License", "lines": 500, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/LightRAG:lightrag/kg/memgraph_impl.py
import os import asyncio import random from dataclasses import dataclass from typing import final import configparser from ..utils import logger from ..base import BaseGraphStorage from ..types import KnowledgeGraph, KnowledgeGraphNode, KnowledgeGraphEdge from ..kg.shared_storage import get_data_init_lock import pipmaster as pm if not pm.is_installed("neo4j"): pm.install("neo4j") from neo4j import ( AsyncGraphDatabase, AsyncManagedTransaction, ) from neo4j.exceptions import TransientError, ResultFailedError from dotenv import load_dotenv # use the .env that is inside the current folder load_dotenv(dotenv_path=".env", override=False) MAX_GRAPH_NODES = int(os.getenv("MAX_GRAPH_NODES", 1000)) config = configparser.ConfigParser() config.read("config.ini", "utf-8") @final @dataclass class MemgraphStorage(BaseGraphStorage): def __init__(self, namespace, global_config, embedding_func, workspace=None): # Priority: 1) MEMGRAPH_WORKSPACE env 2) user arg 3) default 'base' memgraph_workspace = os.environ.get("MEMGRAPH_WORKSPACE") original_workspace = workspace # Save original value for logging if memgraph_workspace and memgraph_workspace.strip(): workspace = memgraph_workspace if not workspace or not str(workspace).strip(): workspace = "base" super().__init__( namespace=namespace, workspace=workspace, global_config=global_config, embedding_func=embedding_func, ) # Log after super().__init__() to ensure self.workspace is initialized if memgraph_workspace and memgraph_workspace.strip(): logger.info( f"Using MEMGRAPH_WORKSPACE environment variable: '{memgraph_workspace}' (overriding '{original_workspace}/{namespace}')" ) self._driver = None def _get_workspace_label(self) -> str: """Return workspace label (guaranteed non-empty during initialization)""" return self.workspace async def initialize(self): async with get_data_init_lock(): URI = os.environ.get( "MEMGRAPH_URI", config.get("memgraph", "uri", fallback="bolt://localhost:7687"), ) USERNAME = os.environ.get( "MEMGRAPH_USERNAME", config.get("memgraph", "username", fallback="") ) PASSWORD = os.environ.get( "MEMGRAPH_PASSWORD", config.get("memgraph", "password", fallback="") ) DATABASE = os.environ.get( "MEMGRAPH_DATABASE", config.get("memgraph", "database", fallback="memgraph"), ) self._driver = AsyncGraphDatabase.driver( URI, auth=(USERNAME, PASSWORD), ) self._DATABASE = DATABASE try: async with self._driver.session(database=DATABASE) as session: # Create index for base nodes on entity_id if it doesn't exist try: workspace_label = self._get_workspace_label() await session.run( f"""CREATE INDEX ON :{workspace_label}(entity_id)""" ) logger.info( f"[{self.workspace}] Created index on :{workspace_label}(entity_id) in Memgraph." ) except Exception as e: # Index may already exist, which is not an error logger.warning( f"[{self.workspace}] Index creation on :{workspace_label}(entity_id) may have failed or already exists: {e}" ) await session.run("RETURN 1") logger.info(f"[{self.workspace}] Connected to Memgraph at {URI}") except Exception as e: logger.error( f"[{self.workspace}] Failed to connect to Memgraph at {URI}: {e}" ) raise async def finalize(self): if self._driver is not None: await self._driver.close() self._driver = None async def __aexit__(self, exc_type, exc, tb): await self.finalize() async def index_done_callback(self): # Memgraph handles persistence automatically pass async def has_node(self, node_id: str) -> bool: """ Check if a node exists in the graph. Args: node_id: The ID of the node to check. Returns: bool: True if the node exists, False otherwise. Raises: Exception: If there is an error checking the node existence. """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) async with self._driver.session( database=self._DATABASE, default_access_mode="READ" ) as session: result = None try: workspace_label = self._get_workspace_label() query = f"MATCH (n:`{workspace_label}` {{entity_id: $entity_id}}) RETURN count(n) > 0 AS node_exists" result = await session.run(query, entity_id=node_id) single_result = await result.single() await result.consume() # Ensure result is fully consumed return ( single_result["node_exists"] if single_result is not None else False ) except Exception as e: logger.error( f"[{self.workspace}] Error checking node existence for {node_id}: {str(e)}" ) if result is not None: await ( result.consume() ) # Ensure the result is consumed even on error raise async def has_edge(self, source_node_id: str, target_node_id: str) -> bool: """ Check if an edge exists between two nodes in the graph. Args: source_node_id: The ID of the source node. target_node_id: The ID of the target node. Returns: bool: True if the edge exists, False otherwise. Raises: Exception: If there is an error checking the edge existence. """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) async with self._driver.session( database=self._DATABASE, default_access_mode="READ" ) as session: result = None try: workspace_label = self._get_workspace_label() query = ( f"MATCH (a:`{workspace_label}` {{entity_id: $source_entity_id}})-[r]-(b:`{workspace_label}` {{entity_id: $target_entity_id}}) " "RETURN COUNT(r) > 0 AS edgeExists" ) result = await session.run( query, source_entity_id=source_node_id, target_entity_id=target_node_id, ) # type: ignore single_result = await result.single() await result.consume() # Ensure result is fully consumed return ( single_result["edgeExists"] if single_result is not None else False ) except Exception as e: logger.error( f"[{self.workspace}] Error checking edge existence between {source_node_id} and {target_node_id}: {str(e)}" ) if result is not None: await ( result.consume() ) # Ensure the result is consumed even on error raise async def get_node(self, node_id: str) -> dict[str, str] | None: """Get node by its label identifier, return only node properties Args: node_id: The node label to look up Returns: dict: Node properties if found None: If node not found Raises: Exception: If there is an error executing the query """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) async with self._driver.session( database=self._DATABASE, default_access_mode="READ" ) as session: try: workspace_label = self._get_workspace_label() query = ( f"MATCH (n:`{workspace_label}` {{entity_id: $entity_id}}) RETURN n" ) result = await session.run(query, entity_id=node_id) try: records = await result.fetch( 2 ) # Get 2 records for duplication check if len(records) > 1: logger.warning( f"[{self.workspace}] Multiple nodes found with label '{node_id}'. Using first node." ) if records: node = records[0]["n"] node_dict = dict(node) # Remove workspace label from labels list if it exists if "labels" in node_dict: node_dict["labels"] = [ label for label in node_dict["labels"] if label != workspace_label ] return node_dict return None finally: await result.consume() # Ensure result is fully consumed except Exception as e: logger.error( f"[{self.workspace}] Error getting node for {node_id}: {str(e)}" ) raise async def node_degree(self, node_id: str) -> int: """Get the degree (number of relationships) of a node with the given label. If multiple nodes have the same label, returns the degree of the first node. If no node is found, returns 0. Args: node_id: The label of the node Returns: int: The number of relationships the node has, or 0 if no node found Raises: Exception: If there is an error executing the query """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) async with self._driver.session( database=self._DATABASE, default_access_mode="READ" ) as session: try: workspace_label = self._get_workspace_label() query = f""" MATCH (n:`{workspace_label}` {{entity_id: $entity_id}}) OPTIONAL MATCH (n)-[r]-() RETURN COUNT(r) AS degree """ result = await session.run(query, entity_id=node_id) try: record = await result.single() if not record: logger.warning( f"[{self.workspace}] No node found with label '{node_id}'" ) return 0 degree = record["degree"] return degree finally: await result.consume() # Ensure result is fully consumed except Exception as e: logger.error( f"[{self.workspace}] Error getting node degree for {node_id}: {str(e)}" ) raise async def get_all_labels(self) -> list[str]: """ Get all existing node labels(entity names) in the database Returns: ["Person", "Company", ...] # Alphabetically sorted label list Raises: Exception: If there is an error executing the query """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) async with self._driver.session( database=self._DATABASE, default_access_mode="READ" ) as session: result = None try: workspace_label = self._get_workspace_label() query = f""" MATCH (n:`{workspace_label}`) WHERE n.entity_id IS NOT NULL RETURN DISTINCT n.entity_id AS label ORDER BY label """ result = await session.run(query) labels = [] async for record in result: labels.append(record["label"]) await result.consume() return labels except Exception as e: logger.error(f"[{self.workspace}] Error getting all labels: {str(e)}") if result is not None: await ( result.consume() ) # Ensure the result is consumed even on error raise async def get_node_edges(self, source_node_id: str) -> list[tuple[str, str]] | None: """Retrieves all edges (relationships) for a particular node identified by its label. Args: source_node_id: Label of the node to get edges for Returns: list[tuple[str, str]]: List of (source_label, target_label) tuples representing edges None: If no edges found Raises: Exception: If there is an error executing the query """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) try: async with self._driver.session( database=self._DATABASE, default_access_mode="READ" ) as session: results = None try: workspace_label = self._get_workspace_label() query = f"""MATCH (n:`{workspace_label}` {{entity_id: $entity_id}}) OPTIONAL MATCH (n)-[r]-(connected:`{workspace_label}`) WHERE connected.entity_id IS NOT NULL RETURN n, r, connected""" results = await session.run(query, entity_id=source_node_id) edges = [] async for record in results: source_node = record["n"] connected_node = record["connected"] # Skip if either node is None if not source_node or not connected_node: continue source_label = ( source_node.get("entity_id") if source_node.get("entity_id") else None ) target_label = ( connected_node.get("entity_id") if connected_node.get("entity_id") else None ) if source_label and target_label: edges.append((source_label, target_label)) await results.consume() # Ensure results are consumed return edges except Exception as e: logger.error( f"[{self.workspace}] Error getting edges for node {source_node_id}: {str(e)}" ) if results is not None: await ( results.consume() ) # Ensure results are consumed even on error raise except Exception as e: logger.error( f"[{self.workspace}] Error in get_node_edges for {source_node_id}: {str(e)}" ) raise async def get_edge( self, source_node_id: str, target_node_id: str ) -> dict[str, str] | None: """Get edge properties between two nodes. Args: source_node_id: Label of the source node target_node_id: Label of the target node Returns: dict: Edge properties if found, default properties if not found or on error Raises: Exception: If there is an error executing the query """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) async with self._driver.session( database=self._DATABASE, default_access_mode="READ" ) as session: result = None try: workspace_label = self._get_workspace_label() query = f""" MATCH (start:`{workspace_label}` {{entity_id: $source_entity_id}})-[r]-(end:`{workspace_label}` {{entity_id: $target_entity_id}}) RETURN properties(r) as edge_properties """ result = await session.run( query, source_entity_id=source_node_id, target_entity_id=target_node_id, ) records = await result.fetch(2) await result.consume() if records: edge_result = dict(records[0]["edge_properties"]) for key, default_value in { "weight": 1.0, "source_id": None, "description": None, "keywords": None, }.items(): if key not in edge_result: edge_result[key] = default_value logger.warning( f"[{self.workspace}] Edge between {source_node_id} and {target_node_id} is missing property: {key}. Using default value: {default_value}" ) return edge_result return None except Exception as e: logger.error( f"[{self.workspace}] Error getting edge between {source_node_id} and {target_node_id}: {str(e)}" ) if result is not None: await ( result.consume() ) # Ensure the result is consumed even on error raise async def upsert_node(self, node_id: str, node_data: dict[str, str]) -> None: """ Upsert a node in the Memgraph database with manual transaction-level retry logic for transient errors. Args: node_id: The unique identifier for the node (used as label) node_data: Dictionary of node properties """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) properties = node_data entity_type = properties["entity_type"] if "entity_id" not in properties: raise ValueError( "Memgraph: node properties must contain an 'entity_id' field" ) # Manual transaction-level retry following official Memgraph documentation max_retries = 100 initial_wait_time = 0.2 backoff_factor = 1.1 jitter_factor = 0.1 for attempt in range(max_retries): try: logger.debug( f"[{self.workspace}] Attempting node upsert, attempt {attempt + 1}/{max_retries}" ) async with self._driver.session(database=self._DATABASE) as session: workspace_label = self._get_workspace_label() async def execute_upsert(tx: AsyncManagedTransaction): query = f""" MERGE (n:`{workspace_label}` {{entity_id: $entity_id}}) SET n += $properties SET n:`{entity_type}` """ result = await tx.run( query, entity_id=node_id, properties=properties ) await result.consume() # Ensure result is fully consumed await session.execute_write(execute_upsert) break # Success - exit retry loop except (TransientError, ResultFailedError) as e: # Check if the root cause is a TransientError root_cause = e while hasattr(root_cause, "__cause__") and root_cause.__cause__: root_cause = root_cause.__cause__ # Check if this is a transient error that should be retried is_transient = ( isinstance(root_cause, TransientError) or isinstance(e, TransientError) or "TransientError" in str(e) or "Cannot resolve conflicting transactions" in str(e) ) if is_transient: if attempt < max_retries - 1: # Calculate wait time with exponential backoff and jitter jitter = random.uniform(0, jitter_factor) * initial_wait_time wait_time = ( initial_wait_time * (backoff_factor**attempt) + jitter ) logger.warning( f"[{self.workspace}] Node upsert failed. Attempt #{attempt + 1} retrying in {wait_time:.3f} seconds... Error: {str(e)}" ) await asyncio.sleep(wait_time) else: logger.error( f"[{self.workspace}] Memgraph transient error during node upsert after {max_retries} retries: {str(e)}" ) raise else: # Non-transient error, don't retry logger.error( f"[{self.workspace}] Non-transient error during node upsert: {str(e)}" ) raise except Exception as e: logger.error( f"[{self.workspace}] Unexpected error during node upsert: {str(e)}" ) raise async def upsert_edge( self, source_node_id: str, target_node_id: str, edge_data: dict[str, str] ) -> None: """ Upsert an edge and its properties between two nodes identified by their labels with manual transaction-level retry logic for transient errors. Ensures both source and target nodes exist and are unique before creating the edge. Uses entity_id property to uniquely identify nodes. Args: source_node_id (str): Label of the source node (used as identifier) target_node_id (str): Label of the target node (used as identifier) edge_data (dict): Dictionary of properties to set on the edge Raises: Exception: If there is an error executing the query """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) edge_properties = edge_data # Manual transaction-level retry following official Memgraph documentation max_retries = 100 initial_wait_time = 0.2 backoff_factor = 1.1 jitter_factor = 0.1 for attempt in range(max_retries): try: logger.debug( f"[{self.workspace}] Attempting edge upsert, attempt {attempt + 1}/{max_retries}" ) async with self._driver.session(database=self._DATABASE) as session: async def execute_upsert(tx: AsyncManagedTransaction): workspace_label = self._get_workspace_label() query = f""" MATCH (source:`{workspace_label}` {{entity_id: $source_entity_id}}) WITH source MATCH (target:`{workspace_label}` {{entity_id: $target_entity_id}}) MERGE (source)-[r:DIRECTED]-(target) SET r += $properties RETURN r, source, target """ result = await tx.run( query, source_entity_id=source_node_id, target_entity_id=target_node_id, properties=edge_properties, ) try: await result.fetch(2) finally: await result.consume() # Ensure result is consumed await session.execute_write(execute_upsert) break # Success - exit retry loop except (TransientError, ResultFailedError) as e: # Check if the root cause is a TransientError root_cause = e while hasattr(root_cause, "__cause__") and root_cause.__cause__: root_cause = root_cause.__cause__ # Check if this is a transient error that should be retried is_transient = ( isinstance(root_cause, TransientError) or isinstance(e, TransientError) or "TransientError" in str(e) or "Cannot resolve conflicting transactions" in str(e) ) if is_transient: if attempt < max_retries - 1: # Calculate wait time with exponential backoff and jitter jitter = random.uniform(0, jitter_factor) * initial_wait_time wait_time = ( initial_wait_time * (backoff_factor**attempt) + jitter ) logger.warning( f"[{self.workspace}] Edge upsert failed. Attempt #{attempt + 1} retrying in {wait_time:.3f} seconds... Error: {str(e)}" ) await asyncio.sleep(wait_time) else: logger.error( f"[{self.workspace}] Memgraph transient error during edge upsert after {max_retries} retries: {str(e)}" ) raise else: # Non-transient error, don't retry logger.error( f"[{self.workspace}] Non-transient error during edge upsert: {str(e)}" ) raise except Exception as e: logger.error( f"[{self.workspace}] Unexpected error during edge upsert: {str(e)}" ) raise async def delete_node(self, node_id: str) -> None: """Delete a node with the specified label Args: node_id: The label of the node to delete Raises: Exception: If there is an error executing the query """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) async def _do_delete(tx: AsyncManagedTransaction): workspace_label = self._get_workspace_label() query = f""" MATCH (n:`{workspace_label}` {{entity_id: $entity_id}}) DETACH DELETE n """ result = await tx.run(query, entity_id=node_id) logger.debug(f"[{self.workspace}] Deleted node with label {node_id}") await result.consume() try: async with self._driver.session(database=self._DATABASE) as session: await session.execute_write(_do_delete) except Exception as e: logger.error(f"[{self.workspace}] Error during node deletion: {str(e)}") raise async def remove_nodes(self, nodes: list[str]): """Delete multiple nodes Args: nodes: List of node labels to be deleted """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) for node in nodes: await self.delete_node(node) async def remove_edges(self, edges: list[tuple[str, str]]): """Delete multiple edges Args: edges: List of edges to be deleted, each edge is a (source, target) tuple Raises: Exception: If there is an error executing the query """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) for source, target in edges: async def _do_delete_edge(tx: AsyncManagedTransaction): workspace_label = self._get_workspace_label() query = f""" MATCH (source:`{workspace_label}` {{entity_id: $source_entity_id}})-[r]-(target:`{workspace_label}` {{entity_id: $target_entity_id}}) DELETE r """ result = await tx.run( query, source_entity_id=source, target_entity_id=target ) logger.debug( f"[{self.workspace}] Deleted edge from '{source}' to '{target}'" ) await result.consume() # Ensure result is fully consumed try: async with self._driver.session(database=self._DATABASE) as session: await session.execute_write(_do_delete_edge) except Exception as e: logger.error(f"[{self.workspace}] Error during edge deletion: {str(e)}") raise async def drop(self) -> dict[str, str]: """Drop all data from the current workspace and clean up resources This method will delete all nodes and relationships in the Memgraph database. Returns: dict[str, str]: Operation status and message - On success: {"status": "success", "message": "data dropped"} - On failure: {"status": "error", "message": "<error details>"} Raises: Exception: If there is an error executing the query """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) try: async with self._driver.session(database=self._DATABASE) as session: workspace_label = self._get_workspace_label() query = f"MATCH (n:`{workspace_label}`) DETACH DELETE n" result = await session.run(query) await result.consume() logger.info( f"[{self.workspace}] Dropped workspace {workspace_label} from Memgraph database {self._DATABASE}" ) return {"status": "success", "message": "workspace data dropped"} except Exception as e: logger.error( f"[{self.workspace}] Error dropping workspace {workspace_label} from Memgraph database {self._DATABASE}: {e}" ) return {"status": "error", "message": str(e)} async def edge_degree(self, src_id: str, tgt_id: str) -> int: """Get the total degree (sum of relationships) of two nodes. Args: src_id: Label of the source node tgt_id: Label of the target node Returns: int: Sum of the degrees of both nodes """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) src_degree = await self.node_degree(src_id) trg_degree = await self.node_degree(tgt_id) # Convert None to 0 for addition src_degree = 0 if src_degree is None else src_degree trg_degree = 0 if trg_degree is None else trg_degree degrees = int(src_degree) + int(trg_degree) return degrees async def get_knowledge_graph( self, node_label: str, max_depth: int = 3, max_nodes: int = None, ) -> KnowledgeGraph: """ Retrieve a connected subgraph of nodes where the label includes the specified `node_label`. Args: node_label: Label of the starting node, * means all nodes max_depth: Maximum depth of the subgraph, Defaults to 3 max_nodes: Maximum nodes to return by BFS, Defaults to 1000 Returns: KnowledgeGraph object containing nodes and edges, with an is_truncated flag indicating whether the graph was truncated due to max_nodes limit """ # Get max_nodes from global_config if not provided if max_nodes is None: max_nodes = self.global_config.get("max_graph_nodes", 1000) else: # Limit max_nodes to not exceed global_config max_graph_nodes max_nodes = min(max_nodes, self.global_config.get("max_graph_nodes", 1000)) workspace_label = self._get_workspace_label() result = KnowledgeGraph() seen_nodes = set() seen_edges = set() async with self._driver.session( database=self._DATABASE, default_access_mode="READ" ) as session: try: if node_label == "*": # First check total node count to determine if graph is truncated count_query = ( f"MATCH (n:`{workspace_label}`) RETURN count(n) as total" ) count_result = None try: count_result = await session.run(count_query) count_record = await count_result.single() if count_record and count_record["total"] > max_nodes: result.is_truncated = True logger.info( f"Graph truncated: {count_record['total']} nodes found, limited to {max_nodes}" ) finally: if count_result: await count_result.consume() # Run main query to get nodes with highest degree main_query = f""" MATCH (n:`{workspace_label}`) OPTIONAL MATCH (n)-[r]-() WITH n, COALESCE(count(r), 0) AS degree ORDER BY degree DESC LIMIT $max_nodes WITH collect({{node: n}}) AS filtered_nodes UNWIND filtered_nodes AS node_info WITH collect(node_info.node) AS kept_nodes, filtered_nodes OPTIONAL MATCH (a)-[r]-(b) WHERE a IN kept_nodes AND b IN kept_nodes RETURN filtered_nodes AS node_info, collect(DISTINCT r) AS relationships """ result_set = None try: result_set = await session.run( main_query, {"max_nodes": max_nodes}, ) record = await result_set.single() finally: if result_set: await result_set.consume() else: # Run subgraph query for specific node_label subgraph_query = f""" MATCH (start:`{workspace_label}`) WHERE start.entity_id = $entity_id MATCH path = (start)-[*BFS 0..{max_depth}]-(end:`{workspace_label}`) WHERE ALL(n IN nodes(path) WHERE '{workspace_label}' IN labels(n)) WITH collect(DISTINCT end) + start AS all_nodes_unlimited WITH CASE WHEN size(all_nodes_unlimited) <= $max_nodes THEN all_nodes_unlimited ELSE all_nodes_unlimited[0..$max_nodes] END AS limited_nodes, size(all_nodes_unlimited) > $max_nodes AS is_truncated UNWIND limited_nodes AS n MATCH (n)-[r]-(m) WHERE m IN limited_nodes WITH collect(DISTINCT n) AS limited_nodes, collect(DISTINCT r) AS relationships, is_truncated RETURN [node IN limited_nodes | {{node: node}}] AS node_info, relationships, is_truncated """ result_set = None try: result_set = await session.run( subgraph_query, { "entity_id": node_label, "max_nodes": max_nodes, }, ) record = await result_set.single() # If no record found, return empty KnowledgeGraph if not record: logger.debug( f"[{self.workspace}] No nodes found for entity_id: {node_label}" ) return result # Check if the result was truncated if record.get("is_truncated"): result.is_truncated = True logger.info( f"[{self.workspace}] Graph truncated: breadth-first search limited to {max_nodes} nodes" ) finally: if result_set: await result_set.consume() if record: for node_info in record["node_info"]: node = node_info["node"] node_id = node.id if node_id not in seen_nodes: result.nodes.append( KnowledgeGraphNode( id=f"{node_id}", labels=[node.get("entity_id")], properties=dict(node), ) ) seen_nodes.add(node_id) for rel in record["relationships"]: edge_id = rel.id if edge_id not in seen_edges: start = rel.start_node end = rel.end_node result.edges.append( KnowledgeGraphEdge( id=f"{edge_id}", type=rel.type, source=f"{start.id}", target=f"{end.id}", properties=dict(rel), ) ) seen_edges.add(edge_id) logger.info( f"[{self.workspace}] Subgraph query successful | Node count: {len(result.nodes)} | Edge count: {len(result.edges)}" ) except Exception as e: logger.warning( f"[{self.workspace}] Memgraph error during subgraph query: {str(e)}" ) return result async def get_all_nodes(self) -> list[dict]: """Get all nodes in the graph. Returns: A list of all nodes, where each node is a dictionary of its properties """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) workspace_label = self._get_workspace_label() async with self._driver.session( database=self._DATABASE, default_access_mode="READ" ) as session: query = f""" MATCH (n:`{workspace_label}`) RETURN n """ result = await session.run(query) nodes = [] async for record in result: node = record["n"] node_dict = dict(node) # Add node id (entity_id) to the dictionary for easier access node_dict["id"] = node_dict.get("entity_id") nodes.append(node_dict) await result.consume() return nodes async def get_all_edges(self) -> list[dict]: """Get all edges in the graph. Returns: A list of all edges, where each edge is a dictionary of its properties """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) workspace_label = self._get_workspace_label() async with self._driver.session( database=self._DATABASE, default_access_mode="READ" ) as session: query = f""" MATCH (a:`{workspace_label}`)-[r]-(b:`{workspace_label}`) RETURN DISTINCT a.entity_id AS source, b.entity_id AS target, properties(r) AS properties """ result = await session.run(query) edges = [] async for record in result: edge_properties = record["properties"] edge_properties["source"] = record["source"] edge_properties["target"] = record["target"] edges.append(edge_properties) await result.consume() return edges async def get_popular_labels(self, limit: int = 300) -> list[str]: """Get popular labels by node degree (most connected entities) Args: limit: Maximum number of labels(entity names) to return Returns: List of labels(entity names) sorted by degree (highest first) """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) result = None try: workspace_label = self._get_workspace_label() async with self._driver.session( database=self._DATABASE, default_access_mode="READ" ) as session: query = f""" MATCH (n:`{workspace_label}`) WHERE n.entity_id IS NOT NULL OPTIONAL MATCH (n)-[r]-() WITH n.entity_id AS label, count(r) AS degree ORDER BY degree DESC, label ASC LIMIT {limit} RETURN label """ result = await session.run(query) labels = [] async for record in result: labels.append(record["label"]) await result.consume() logger.debug( f"[{self.workspace}] Retrieved {len(labels)} popular labels (limit: {limit})" ) return labels except Exception as e: logger.error(f"[{self.workspace}] Error getting popular labels: {str(e)}") if result is not None: await result.consume() return [] async def search_labels(self, query: str, limit: int = 50) -> list[str]: """Search labels(entity names) with fuzzy matching Args: query: Search query string limit: Maximum number of results to return Returns: List of matching labels(entity names) sorted by relevance """ if self._driver is None: raise RuntimeError( "Memgraph driver is not initialized. Call 'await initialize()' first." ) query_lower = query.lower().strip() if not query_lower: return [] result = None try: workspace_label = self._get_workspace_label() async with self._driver.session( database=self._DATABASE, default_access_mode="READ" ) as session: cypher_query = f""" MATCH (n:`{workspace_label}`) WHERE n.entity_id IS NOT NULL WITH n.entity_id AS label, toLower(n.entity_id) AS label_lower WHERE label_lower CONTAINS $query_lower WITH label, label_lower, CASE WHEN label_lower = $query_lower THEN 1000 WHEN label_lower STARTS WITH $query_lower THEN 500 ELSE 100 - size(label) END AS score ORDER BY score DESC, label ASC LIMIT {limit} RETURN label """ result = await session.run(cypher_query, query_lower=query_lower) labels = [] async for record in result: labels.append(record["label"]) await result.consume() logger.debug( f"[{self.workspace}] Search query '{query}' returned {len(labels)} results (limit: {limit})" ) return labels except Exception as e: logger.error(f"[{self.workspace}] Error searching labels: {str(e)}") if result is not None: await result.consume() return []
{ "repo_id": "HKUDS/LightRAG", "file_path": "lightrag/kg/memgraph_impl.py", "license": "MIT License", "lines": 1016, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/LightRAG:examples/modalprocessors_example.py
""" Example of directly using modal processors This example demonstrates how to use LightRAG's modal processors directly without going through MinerU. """ import asyncio import argparse from lightrag.llm.openai import openai_complete_if_cache, openai_embed from lightrag import LightRAG from lightrag.utils import EmbeddingFunc from raganything.modalprocessors import ( ImageModalProcessor, TableModalProcessor, EquationModalProcessor, ) WORKING_DIR = "./rag_storage" def get_llm_model_func(api_key: str, base_url: str = None): return ( lambda prompt, system_prompt=None, history_messages=[], **kwargs: openai_complete_if_cache( "gpt-4o-mini", prompt, system_prompt=system_prompt, history_messages=history_messages, api_key=api_key, base_url=base_url, **kwargs, ) ) def get_vision_model_func(api_key: str, base_url: str = None): return ( lambda prompt, system_prompt=None, history_messages=[], image_data=None, **kwargs: openai_complete_if_cache( "gpt-4o", "", system_prompt=None, history_messages=[], messages=[ {"role": "system", "content": system_prompt} if system_prompt else None, { "role": "user", "content": [ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{image_data}" }, }, ], } if image_data else {"role": "user", "content": prompt}, ], api_key=api_key, base_url=base_url, **kwargs, ) if image_data else openai_complete_if_cache( "gpt-4o-mini", prompt, system_prompt=system_prompt, history_messages=history_messages, api_key=api_key, base_url=base_url, **kwargs, ) ) async def process_image_example(lightrag: LightRAG, vision_model_func): """Example of processing an image""" # Create image processor image_processor = ImageModalProcessor( lightrag=lightrag, modal_caption_func=vision_model_func ) # Prepare image content image_content = { "img_path": "image.jpg", "img_caption": ["Example image caption"], "img_footnote": ["Example image footnote"], } # Process image description, entity_info = await image_processor.process_multimodal_content( modal_content=image_content, content_type="image", file_path="image_example.jpg", entity_name="Example Image", ) print("Image Processing Results:") print(f"Description: {description}") print(f"Entity Info: {entity_info}") async def process_table_example(lightrag: LightRAG, llm_model_func): """Example of processing a table""" # Create table processor table_processor = TableModalProcessor( lightrag=lightrag, modal_caption_func=llm_model_func ) # Prepare table content table_content = { "table_body": """ | Name | Age | Occupation | |------|-----|------------| | John | 25 | Engineer | | Mary | 30 | Designer | """, "table_caption": ["Employee Information Table"], "table_footnote": ["Data updated as of 2024"], } # Process table description, entity_info = await table_processor.process_multimodal_content( modal_content=table_content, content_type="table", file_path="table_example.md", entity_name="Employee Table", ) print("\nTable Processing Results:") print(f"Description: {description}") print(f"Entity Info: {entity_info}") async def process_equation_example(lightrag: LightRAG, llm_model_func): """Example of processing a mathematical equation""" # Create equation processor equation_processor = EquationModalProcessor( lightrag=lightrag, modal_caption_func=llm_model_func ) # Prepare equation content equation_content = {"text": "E = mc^2", "text_format": "LaTeX"} # Process equation description, entity_info = await equation_processor.process_multimodal_content( modal_content=equation_content, content_type="equation", file_path="equation_example.txt", entity_name="Mass-Energy Equivalence", ) print("\nEquation Processing Results:") print(f"Description: {description}") print(f"Entity Info: {entity_info}") async def initialize_rag(api_key: str, base_url: str = None): rag = LightRAG( working_dir=WORKING_DIR, embedding_func=EmbeddingFunc( embedding_dim=3072, max_token_size=8192, func=lambda texts: openai_embed( texts, model="text-embedding-3-large", api_key=api_key, base_url=base_url, ), ), llm_model_func=lambda prompt, system_prompt=None, history_messages=[], **kwargs: openai_complete_if_cache( "gpt-4o-mini", prompt, system_prompt=system_prompt, history_messages=history_messages, api_key=api_key, base_url=base_url, **kwargs, ), ) await rag.initialize_storages() # Auto-initializes pipeline_status return rag def main(): """Main function to run the example""" parser = argparse.ArgumentParser(description="Modal Processors Example") parser.add_argument("--api-key", required=True, help="OpenAI API key") parser.add_argument("--base-url", help="Optional base URL for API") parser.add_argument( "--working-dir", "-w", default=WORKING_DIR, help="Working directory path" ) args = parser.parse_args() # Run examples asyncio.run(main_async(args.api_key, args.base_url)) async def main_async(api_key: str, base_url: str = None): # Initialize LightRAG lightrag = await initialize_rag(api_key, base_url) # Get model functions llm_model_func = get_llm_model_func(api_key, base_url) vision_model_func = get_vision_model_func(api_key, base_url) # Run examples await process_image_example(lightrag, vision_model_func) await process_table_example(lightrag, llm_model_func) await process_equation_example(lightrag, llm_model_func) if __name__ == "__main__": main()
{ "repo_id": "HKUDS/LightRAG", "file_path": "examples/modalprocessors_example.py", "license": "MIT License", "lines": 191, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
HKUDS/LightRAG:examples/raganything_example.py
#!/usr/bin/env python """ Example script demonstrating the integration of MinerU parser with RAGAnything This example shows how to: 1. Process parsed documents with RAGAnything 2. Perform multimodal queries on the processed documents 3. Handle different types of content (text, images, tables) """ import os import argparse import asyncio import logging import logging.config from pathlib import Path # Add project root directory to Python path import sys sys.path.append(str(Path(__file__).parent.parent)) from lightrag.llm.openai import openai_complete_if_cache, openai_embed from lightrag.utils import EmbeddingFunc, logger, set_verbose_debug from raganything import RAGAnything, RAGAnythingConfig def configure_logging(): """Configure logging for the application""" # Get log directory path from environment variable or use current directory log_dir = os.getenv("LOG_DIR", os.getcwd()) log_file_path = os.path.abspath(os.path.join(log_dir, "raganything_example.log")) print(f"\nRAGAnything example log file: {log_file_path}\n") os.makedirs(os.path.dirname(log_dir), exist_ok=True) # Get log file max size and backup count from environment variables log_max_bytes = int(os.getenv("LOG_MAX_BYTES", 10485760)) # Default 10MB log_backup_count = int(os.getenv("LOG_BACKUP_COUNT", 5)) # Default 5 backups logging.config.dictConfig( { "version": 1, "disable_existing_loggers": False, "formatters": { "default": { "format": "%(levelname)s: %(message)s", }, "detailed": { "format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s", }, }, "handlers": { "console": { "formatter": "default", "class": "logging.StreamHandler", "stream": "ext://sys.stderr", }, "file": { "formatter": "detailed", "class": "logging.handlers.RotatingFileHandler", "filename": log_file_path, "maxBytes": log_max_bytes, "backupCount": log_backup_count, "encoding": "utf-8", }, }, "loggers": { "lightrag": { "handlers": ["console", "file"], "level": "INFO", "propagate": False, }, }, } ) # Set the logger level to INFO logger.setLevel(logging.INFO) # Enable verbose debug if needed set_verbose_debug(os.getenv("VERBOSE", "false").lower() == "true") async def process_with_rag( file_path: str, output_dir: str, api_key: str, base_url: str = None, working_dir: str = None, ): """ Process document with RAGAnything Args: file_path: Path to the document output_dir: Output directory for RAG results api_key: OpenAI API key base_url: Optional base URL for API working_dir: Working directory for RAG storage """ try: # Create RAGAnything configuration config = RAGAnythingConfig( working_dir=working_dir or "./rag_storage", mineru_parse_method="auto", enable_image_processing=True, enable_table_processing=True, enable_equation_processing=True, ) # Define LLM model function def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwargs): return openai_complete_if_cache( "gpt-4o-mini", prompt, system_prompt=system_prompt, history_messages=history_messages, api_key=api_key, base_url=base_url, **kwargs, ) # Define vision model function for image processing def vision_model_func( prompt, system_prompt=None, history_messages=[], image_data=None, **kwargs ): if image_data: return openai_complete_if_cache( "gpt-4o", "", system_prompt=None, history_messages=[], messages=[ {"role": "system", "content": system_prompt} if system_prompt else None, { "role": "user", "content": [ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{image_data}" }, }, ], } if image_data else {"role": "user", "content": prompt}, ], api_key=api_key, base_url=base_url, **kwargs, ) else: return llm_model_func(prompt, system_prompt, history_messages, **kwargs) # Define embedding function embedding_func = EmbeddingFunc( embedding_dim=3072, max_token_size=8192, func=lambda texts: openai_embed( texts, model="text-embedding-3-large", api_key=api_key, base_url=base_url, ), ) # Initialize RAGAnything with new dataclass structure rag = RAGAnything( config=config, llm_model_func=llm_model_func, vision_model_func=vision_model_func, embedding_func=embedding_func, ) # Process document await rag.process_document_complete( file_path=file_path, output_dir=output_dir, parse_method="auto" ) # Example queries - demonstrating different query approaches logger.info("\nQuerying processed document:") # 1. Pure text queries using aquery() text_queries = [ "What is the main content of the document?", "What are the key topics discussed?", ] for query in text_queries: logger.info(f"\n[Text Query]: {query}") result = await rag.aquery(query, mode="hybrid") logger.info(f"Answer: {result}") # 2. Multimodal query with specific multimodal content using aquery_with_multimodal() logger.info( "\n[Multimodal Query]: Analyzing performance data in context of document" ) multimodal_result = await rag.aquery_with_multimodal( "Compare this performance data with any similar results mentioned in the document", multimodal_content=[ { "type": "table", "table_data": """Method,Accuracy,Processing_Time RAGAnything,95.2%,120ms Traditional_RAG,87.3%,180ms Baseline,82.1%,200ms""", "table_caption": "Performance comparison results", } ], mode="hybrid", ) logger.info(f"Answer: {multimodal_result}") # 3. Another multimodal query with equation content logger.info("\n[Multimodal Query]: Mathematical formula analysis") equation_result = await rag.aquery_with_multimodal( "Explain this formula and relate it to any mathematical concepts in the document", multimodal_content=[ { "type": "equation", "latex": "F1 = 2 \\cdot \\frac{precision \\cdot recall}{precision + recall}", "equation_caption": "F1-score calculation formula", } ], mode="hybrid", ) logger.info(f"Answer: {equation_result}") except Exception as e: logger.error(f"Error processing with RAG: {str(e)}") import traceback logger.error(traceback.format_exc()) def main(): """Main function to run the example""" parser = argparse.ArgumentParser(description="MinerU RAG Example") parser.add_argument("file_path", help="Path to the document to process") parser.add_argument( "--working_dir", "-w", default="./rag_storage", help="Working directory path" ) parser.add_argument( "--output", "-o", default="./output", help="Output directory path" ) parser.add_argument( "--api-key", default=os.getenv("OPENAI_API_KEY"), help="OpenAI API key (defaults to OPENAI_API_KEY env var)", ) parser.add_argument("--base-url", help="Optional base URL for API") args = parser.parse_args() # Check if API key is provided if not args.api_key: logger.error("Error: OpenAI API key is required") logger.error("Set OPENAI_API_KEY environment variable or use --api-key option") return # Create output directory if specified if args.output: os.makedirs(args.output, exist_ok=True) # Process with RAG asyncio.run( process_with_rag( args.file_path, args.output, args.api_key, args.base_url, args.working_dir ) ) if __name__ == "__main__": # Configure logging first configure_logging() print("RAGAnything Example") print("=" * 30) print("Processing document with multimodal RAG pipeline") print("=" * 30) main()
{ "repo_id": "HKUDS/LightRAG", "file_path": "examples/raganything_example.py", "license": "MIT License", "lines": 250, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/LightRAG:examples/unofficial-sample/lightrag_llamaindex_litellm_opik_demo.py
import os from lightrag import LightRAG, QueryParam from lightrag.llm.llama_index_impl import ( llama_index_complete_if_cache, llama_index_embed, ) from lightrag.utils import EmbeddingFunc from llama_index.llms.litellm import LiteLLM from llama_index.embeddings.litellm import LiteLLMEmbedding import asyncio import nest_asyncio nest_asyncio.apply() # Configure working directory WORKING_DIR = "./index_default" print(f"WORKING_DIR: {WORKING_DIR}") # Model configuration LLM_MODEL = os.environ.get("LLM_MODEL", "gemma-3-4b") print(f"LLM_MODEL: {LLM_MODEL}") EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "arctic-embed") print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}") EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192)) print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}") # LiteLLM configuration LITELLM_URL = os.environ.get("LITELLM_URL", "http://localhost:4000") print(f"LITELLM_URL: {LITELLM_URL}") LITELLM_KEY = os.environ.get("LITELLM_KEY", "sk-4JdvGFKqSA3S0k_5p0xufw") if not os.path.exists(WORKING_DIR): os.mkdir(WORKING_DIR) # Initialize LLM function async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwargs): try: # Initialize LiteLLM if not in kwargs if "llm_instance" not in kwargs: llm_instance = LiteLLM( model=f"openai/{LLM_MODEL}", # Format: "provider/model_name" api_base=LITELLM_URL, api_key=LITELLM_KEY, temperature=0.7, ) kwargs["llm_instance"] = llm_instance chat_kwargs = {} chat_kwargs["litellm_params"] = { "metadata": { "opik": { "project_name": "lightrag_llamaindex_litellm_opik_demo", "tags": ["lightrag", "litellm"], } } } response = await llama_index_complete_if_cache( kwargs["llm_instance"], prompt, system_prompt=system_prompt, history_messages=history_messages, chat_kwargs=chat_kwargs, ) return response except Exception as e: print(f"LLM request failed: {str(e)}") raise # Initialize embedding function async def embedding_func(texts): try: embed_model = LiteLLMEmbedding( model_name=f"openai/{EMBEDDING_MODEL}", api_base=LITELLM_URL, api_key=LITELLM_KEY, ) return await llama_index_embed(texts, embed_model=embed_model) except Exception as e: print(f"Embedding failed: {str(e)}") raise # Get embedding dimension async def get_embedding_dim(): test_text = ["This is a test sentence."] embedding = await embedding_func(test_text) embedding_dim = embedding.shape[1] print(f"embedding_dim={embedding_dim}") return embedding_dim async def initialize_rag(): embedding_dimension = await get_embedding_dim() rag = LightRAG( working_dir=WORKING_DIR, llm_model_func=llm_model_func, embedding_func=EmbeddingFunc( embedding_dim=embedding_dimension, max_token_size=EMBEDDING_MAX_TOKEN_SIZE, func=embedding_func, ), ) await rag.initialize_storages() # Auto-initializes pipeline_status return rag def main(): # Initialize RAG instance rag = asyncio.run(initialize_rag()) # Insert example text with open("./book.txt", "r", encoding="utf-8") as f: rag.insert(f.read()) # Test different query modes print("\nNaive Search:") print( rag.query( "What are the top themes in this story?", param=QueryParam(mode="naive") ) ) print("\nLocal Search:") print( rag.query( "What are the top themes in this story?", param=QueryParam(mode="local") ) ) print("\nGlobal Search:") print( rag.query( "What are the top themes in this story?", param=QueryParam(mode="global") ) ) print("\nHybrid Search:") print( rag.query( "What are the top themes in this story?", param=QueryParam(mode="hybrid") ) ) if __name__ == "__main__": main()
{ "repo_id": "HKUDS/LightRAG", "file_path": "examples/unofficial-sample/lightrag_llamaindex_litellm_opik_demo.py", "license": "MIT License", "lines": 125, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
HKUDS/LightRAG:reproduce/batch_eval.py
import re import json import jsonlines from openai import OpenAI def batch_eval(query_file, result1_file, result2_file, output_file_path): client = OpenAI() with open(query_file, "r") as f: data = f.read() queries = re.findall(r"- Question \d+: (.+)", data) with open(result1_file, "r") as f: answers1 = json.load(f) answers1 = [i["result"] for i in answers1] with open(result2_file, "r") as f: answers2 = json.load(f) answers2 = [i["result"] for i in answers2] requests = [] for i, (query, answer1, answer2) in enumerate(zip(queries, answers1, answers2)): sys_prompt = """ ---Role--- You are an expert tasked with evaluating two answers to the same question based on three criteria: **Comprehensiveness**, **Diversity**, and **Empowerment**. """ prompt = f""" You will evaluate two answers to the same question based on three criteria: **Comprehensiveness**, **Diversity**, and **Empowerment**. - **Comprehensiveness**: How much detail does the answer provide to cover all aspects and details of the question? - **Diversity**: How varied and rich is the answer in providing different perspectives and insights on the question? - **Empowerment**: How well does the answer help the reader understand and make informed judgments about the topic? For each criterion, choose the better answer (either Answer 1 or Answer 2) and explain why. Then, select an overall winner based on these three categories. Here is the question: {query} Here are the two answers: **Answer 1:** {answer1} **Answer 2:** {answer2} Evaluate both answers using the three criteria listed above and provide detailed explanations for each criterion. Output your evaluation in the following JSON format: {{ "Comprehensiveness": {{ "Winner": "[Answer 1 or Answer 2]", "Explanation": "[Provide explanation here]" }}, "Diversity": {{ "Winner": "[Answer 1 or Answer 2]", "Explanation": "[Provide explanation here]" }}, "Empowerment": {{ "Winner": "[Answer 1 or Answer 2]", "Explanation": "[Provide explanation here]" }}, "Overall Winner": {{ "Winner": "[Answer 1 or Answer 2]", "Explanation": "[Summarize why this answer is the overall winner based on the three criteria]" }} }} """ request_data = { "custom_id": f"request-{i + 1}", "method": "POST", "url": "/v1/chat/completions", "body": { "model": "gpt-4o-mini", "messages": [ {"role": "system", "content": sys_prompt}, {"role": "user", "content": prompt}, ], }, } requests.append(request_data) with jsonlines.open(output_file_path, mode="w") as writer: for request in requests: writer.write(request) print(f"Batch API requests written to {output_file_path}") batch_input_file = client.files.create( file=open(output_file_path, "rb"), purpose="batch" ) batch_input_file_id = batch_input_file.id batch = client.batches.create( input_file_id=batch_input_file_id, endpoint="/v1/chat/completions", completion_window="24h", metadata={"description": "nightly eval job"}, ) print(f"Batch {batch.id} has been created.") if __name__ == "__main__": batch_eval()
{ "repo_id": "HKUDS/LightRAG", "file_path": "reproduce/batch_eval.py", "license": "MIT License", "lines": 85, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
HKUDS/LightRAG:lightrag/constants.py
""" Centralized configuration constants for LightRAG. This module defines default values for configuration constants used across different parts of the LightRAG system. Centralizing these values ensures consistency and makes maintenance easier. """ # Default values for server settings DEFAULT_WOKERS = 2 DEFAULT_MAX_GRAPH_NODES = 1000 # Default values for extraction settings DEFAULT_SUMMARY_LANGUAGE = "English" # Default language for document processing DEFAULT_MAX_GLEANING = 1 DEFAULT_ENTITY_NAME_MAX_LENGTH = 256 # Number of description fragments to trigger LLM summary DEFAULT_FORCE_LLM_SUMMARY_ON_MERGE = 8 # Max description token size to trigger LLM summary DEFAULT_SUMMARY_MAX_TOKENS = 1200 # Recommended LLM summary output length in tokens DEFAULT_SUMMARY_LENGTH_RECOMMENDED = 600 # Maximum token size sent to LLM for summary DEFAULT_SUMMARY_CONTEXT_SIZE = 12000 # Maximum token size allowed for entity extraction input context DEFAULT_MAX_EXTRACT_INPUT_TOKENS = 20480 # Default entities to extract if ENTITY_TYPES is not specified in .env DEFAULT_ENTITY_TYPES = [ "Person", "Creature", "Organization", "Location", "Event", "Concept", "Method", "Content", "Data", "Artifact", "NaturalObject", ] # Separator for: description, source_id and relation-key fields(Can not be changed after data inserted) GRAPH_FIELD_SEP = "<SEP>" # Query and retrieval configuration defaults DEFAULT_TOP_K = 40 DEFAULT_CHUNK_TOP_K = 20 DEFAULT_MAX_ENTITY_TOKENS = 6000 DEFAULT_MAX_RELATION_TOKENS = 8000 DEFAULT_MAX_TOTAL_TOKENS = 30000 DEFAULT_COSINE_THRESHOLD = 0.2 DEFAULT_RELATED_CHUNK_NUMBER = 5 DEFAULT_KG_CHUNK_PICK_METHOD = "VECTOR" # TODO: Deprated. All conversation_history messages is send to LLM. DEFAULT_HISTORY_TURNS = 0 # Rerank configuration defaults DEFAULT_MIN_RERANK_SCORE = 0.0 DEFAULT_RERANK_BINDING = "null" # Default source ids limit in meta data for entity and relation DEFAULT_MAX_SOURCE_IDS_PER_ENTITY = 300 DEFAULT_MAX_SOURCE_IDS_PER_RELATION = 300 ### control chunk_ids limitation method: FIFO, FIFO ### FIFO: First in first out ### KEEP: Keep oldest (less merge action and faster) SOURCE_IDS_LIMIT_METHOD_KEEP = "KEEP" SOURCE_IDS_LIMIT_METHOD_FIFO = "FIFO" DEFAULT_SOURCE_IDS_LIMIT_METHOD = SOURCE_IDS_LIMIT_METHOD_FIFO VALID_SOURCE_IDS_LIMIT_METHODS = { SOURCE_IDS_LIMIT_METHOD_KEEP, SOURCE_IDS_LIMIT_METHOD_FIFO, } # Maximum number of file paths stored in entity/relation file_path field (For displayed only, does not affect query performance) DEFAULT_MAX_FILE_PATHS = 100 # Field length of file_path in Milvus Schema for entity and relation (Should not be changed) # file_path must store all file paths up to the DEFAULT_MAX_FILE_PATHS limit within the metadata. DEFAULT_MAX_FILE_PATH_LENGTH = 32768 # Placeholder for more file paths in meta data for entity and relation (Should not be changed) DEFAULT_FILE_PATH_MORE_PLACEHOLDER = "truncated" # Default temperature for LLM DEFAULT_TEMPERATURE = 1.0 # Async configuration defaults DEFAULT_MAX_ASYNC = 4 # Default maximum async operations DEFAULT_MAX_PARALLEL_INSERT = 2 # Default maximum parallel insert operations # Embedding configuration defaults DEFAULT_EMBEDDING_FUNC_MAX_ASYNC = 8 # Default max async for embedding functions DEFAULT_EMBEDDING_BATCH_NUM = 10 # Default batch size for embedding computations # Gunicorn worker timeout DEFAULT_TIMEOUT = 300 # Default llm and embedding timeout DEFAULT_LLM_TIMEOUT = 180 DEFAULT_EMBEDDING_TIMEOUT = 30 # Logging configuration defaults DEFAULT_LOG_MAX_BYTES = 10485760 # Default 10MB DEFAULT_LOG_BACKUP_COUNT = 5 # Default 5 backups DEFAULT_LOG_FILENAME = "lightrag.log" # Default log filename # Ollama server configuration defaults DEFAULT_OLLAMA_MODEL_NAME = "lightrag" DEFAULT_OLLAMA_MODEL_TAG = "latest" DEFAULT_OLLAMA_MODEL_SIZE = 7365960935 DEFAULT_OLLAMA_CREATED_AT = "2024-01-15T00:00:00Z" DEFAULT_OLLAMA_DIGEST = "sha256:lightrag"
{ "repo_id": "HKUDS/LightRAG", "file_path": "lightrag/constants.py", "license": "MIT License", "lines": 96, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:tests/test_loop_save_turn.py
from nanobot.agent.context import ContextBuilder from nanobot.agent.loop import AgentLoop from nanobot.session.manager import Session def _mk_loop() -> AgentLoop: loop = AgentLoop.__new__(AgentLoop) loop._TOOL_RESULT_MAX_CHARS = 500 return loop def test_save_turn_skips_multimodal_user_when_only_runtime_context() -> None: loop = _mk_loop() session = Session(key="test:runtime-only") runtime = ContextBuilder._RUNTIME_CONTEXT_TAG + "\nCurrent Time: now (UTC)" loop._save_turn( session, [{"role": "user", "content": [{"type": "text", "text": runtime}]}], skip=0, ) assert session.messages == [] def test_save_turn_keeps_image_placeholder_after_runtime_strip() -> None: loop = _mk_loop() session = Session(key="test:image") runtime = ContextBuilder._RUNTIME_CONTEXT_TAG + "\nCurrent Time: now (UTC)" loop._save_turn( session, [{ "role": "user", "content": [ {"type": "text", "text": runtime}, {"type": "image_url", "image_url": {"url": "data:image/png;base64,abc"}}, ], }], skip=0, ) assert session.messages[0]["content"] == [{"type": "text", "text": "[image]"}]
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_loop_save_turn.py", "license": "MIT License", "lines": 33, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:nanobot/channels/matrix.py
"""Matrix (Element) channel — inbound sync + outbound message/media delivery.""" import asyncio import logging import mimetypes from pathlib import Path from typing import Any, TypeAlias from loguru import logger try: import nh3 from mistune import create_markdown from nio import ( AsyncClient, AsyncClientConfig, ContentRepositoryConfigError, DownloadError, InviteEvent, JoinError, MatrixRoom, MemoryDownloadResponse, RoomEncryptedMedia, RoomMessage, RoomMessageMedia, RoomMessageText, RoomSendError, RoomTypingError, SyncError, UploadError, ) from nio.crypto.attachments import decrypt_attachment from nio.exceptions import EncryptionError except ImportError as e: raise ImportError( "Matrix dependencies not installed. Run: pip install nanobot-ai[matrix]" ) from e from nanobot.bus.events import OutboundMessage from nanobot.channels.base import BaseChannel from nanobot.config.loader import get_data_dir from nanobot.utils.helpers import safe_filename TYPING_NOTICE_TIMEOUT_MS = 30_000 # Must stay below TYPING_NOTICE_TIMEOUT_MS so the indicator doesn't expire mid-processing. TYPING_KEEPALIVE_INTERVAL_MS = 20_000 MATRIX_HTML_FORMAT = "org.matrix.custom.html" _ATTACH_MARKER = "[attachment: {}]" _ATTACH_TOO_LARGE = "[attachment: {} - too large]" _ATTACH_FAILED = "[attachment: {} - download failed]" _ATTACH_UPLOAD_FAILED = "[attachment: {} - upload failed]" _DEFAULT_ATTACH_NAME = "attachment" _MSGTYPE_MAP = {"m.image": "image", "m.audio": "audio", "m.video": "video", "m.file": "file"} MATRIX_MEDIA_EVENT_FILTER = (RoomMessageMedia, RoomEncryptedMedia) MatrixMediaEvent: TypeAlias = RoomMessageMedia | RoomEncryptedMedia MATRIX_MARKDOWN = create_markdown( escape=True, plugins=["table", "strikethrough", "url", "superscript", "subscript"], ) MATRIX_ALLOWED_HTML_TAGS = { "p", "a", "strong", "em", "del", "code", "pre", "blockquote", "ul", "ol", "li", "h1", "h2", "h3", "h4", "h5", "h6", "hr", "br", "table", "thead", "tbody", "tr", "th", "td", "caption", "sup", "sub", "img", } MATRIX_ALLOWED_HTML_ATTRIBUTES: dict[str, set[str]] = { "a": {"href"}, "code": {"class"}, "ol": {"start"}, "img": {"src", "alt", "title", "width", "height"}, } MATRIX_ALLOWED_URL_SCHEMES = {"https", "http", "matrix", "mailto", "mxc"} def _filter_matrix_html_attribute(tag: str, attr: str, value: str) -> str | None: """Filter attribute values to a safe Matrix-compatible subset.""" if tag == "a" and attr == "href": return value if value.lower().startswith(("https://", "http://", "matrix:", "mailto:")) else None if tag == "img" and attr == "src": return value if value.lower().startswith("mxc://") else None if tag == "code" and attr == "class": classes = [c for c in value.split() if c.startswith("language-") and not c.startswith("language-_")] return " ".join(classes) if classes else None return value MATRIX_HTML_CLEANER = nh3.Cleaner( tags=MATRIX_ALLOWED_HTML_TAGS, attributes=MATRIX_ALLOWED_HTML_ATTRIBUTES, attribute_filter=_filter_matrix_html_attribute, url_schemes=MATRIX_ALLOWED_URL_SCHEMES, strip_comments=True, link_rel="noopener noreferrer", ) def _render_markdown_html(text: str) -> str | None: """Render markdown to sanitized HTML; returns None for plain text.""" try: formatted = MATRIX_HTML_CLEANER.clean(MATRIX_MARKDOWN(text)).strip() except Exception: return None if not formatted: return None # Skip formatted_body for plain <p>text</p> to keep payload minimal. if formatted.startswith("<p>") and formatted.endswith("</p>"): inner = formatted[3:-4] if "<" not in inner and ">" not in inner: return None return formatted def _build_matrix_text_content(text: str) -> dict[str, object]: """Build Matrix m.text payload with optional HTML formatted_body.""" content: dict[str, object] = {"msgtype": "m.text", "body": text, "m.mentions": {}} if html := _render_markdown_html(text): content["format"] = MATRIX_HTML_FORMAT content["formatted_body"] = html return content class _NioLoguruHandler(logging.Handler): """Route matrix-nio stdlib logs into Loguru.""" def emit(self, record: logging.LogRecord) -> None: try: level = logger.level(record.levelname).name except ValueError: level = record.levelno frame, depth = logging.currentframe(), 2 while frame and frame.f_code.co_filename == logging.__file__: frame, depth = frame.f_back, depth + 1 logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage()) def _configure_nio_logging_bridge() -> None: """Bridge matrix-nio logs to Loguru (idempotent).""" nio_logger = logging.getLogger("nio") if not any(isinstance(h, _NioLoguruHandler) for h in nio_logger.handlers): nio_logger.handlers = [_NioLoguruHandler()] nio_logger.propagate = False class MatrixChannel(BaseChannel): """Matrix (Element) channel using long-polling sync.""" name = "matrix" def __init__(self, config: Any, bus, *, restrict_to_workspace: bool = False, workspace: Path | None = None): super().__init__(config, bus) self.client: AsyncClient | None = None self._sync_task: asyncio.Task | None = None self._typing_tasks: dict[str, asyncio.Task] = {} self._restrict_to_workspace = restrict_to_workspace self._workspace = workspace.expanduser().resolve() if workspace else None self._server_upload_limit_bytes: int | None = None self._server_upload_limit_checked = False async def start(self) -> None: """Start Matrix client and begin sync loop.""" self._running = True _configure_nio_logging_bridge() store_path = get_data_dir() / "matrix-store" store_path.mkdir(parents=True, exist_ok=True) self.client = AsyncClient( homeserver=self.config.homeserver, user=self.config.user_id, store_path=store_path, config=AsyncClientConfig(store_sync_tokens=True, encryption_enabled=self.config.e2ee_enabled), ) self.client.user_id = self.config.user_id self.client.access_token = self.config.access_token self.client.device_id = self.config.device_id self._register_event_callbacks() self._register_response_callbacks() if not self.config.e2ee_enabled: logger.warning("Matrix E2EE disabled; encrypted rooms may be undecryptable.") if self.config.device_id: try: self.client.load_store() except Exception: logger.exception("Matrix store load failed; restart may replay recent messages.") else: logger.warning("Matrix device_id empty; restart may replay recent messages.") self._sync_task = asyncio.create_task(self._sync_loop()) async def stop(self) -> None: """Stop the Matrix channel with graceful sync shutdown.""" self._running = False for room_id in list(self._typing_tasks): await self._stop_typing_keepalive(room_id, clear_typing=False) if self.client: self.client.stop_sync_forever() if self._sync_task: try: await asyncio.wait_for(asyncio.shield(self._sync_task), timeout=self.config.sync_stop_grace_seconds) except (asyncio.TimeoutError, asyncio.CancelledError): self._sync_task.cancel() try: await self._sync_task except asyncio.CancelledError: pass if self.client: await self.client.close() def _is_workspace_path_allowed(self, path: Path) -> bool: """Check path is inside workspace (when restriction enabled).""" if not self._restrict_to_workspace or not self._workspace: return True try: path.resolve(strict=False).relative_to(self._workspace) return True except ValueError: return False def _collect_outbound_media_candidates(self, media: list[str]) -> list[Path]: """Deduplicate and resolve outbound attachment paths.""" seen: set[str] = set() candidates: list[Path] = [] for raw in media: if not isinstance(raw, str) or not raw.strip(): continue path = Path(raw.strip()).expanduser() try: key = str(path.resolve(strict=False)) except OSError: key = str(path) if key not in seen: seen.add(key) candidates.append(path) return candidates @staticmethod def _build_outbound_attachment_content( *, filename: str, mime: str, size_bytes: int, mxc_url: str, encryption_info: dict[str, Any] | None = None, ) -> dict[str, Any]: """Build Matrix content payload for an uploaded file/image/audio/video.""" prefix = mime.split("/")[0] msgtype = {"image": "m.image", "audio": "m.audio", "video": "m.video"}.get(prefix, "m.file") content: dict[str, Any] = { "msgtype": msgtype, "body": filename, "filename": filename, "info": {"mimetype": mime, "size": size_bytes}, "m.mentions": {}, } if encryption_info: content["file"] = {**encryption_info, "url": mxc_url} else: content["url"] = mxc_url return content def _is_encrypted_room(self, room_id: str) -> bool: if not self.client: return False room = getattr(self.client, "rooms", {}).get(room_id) return bool(getattr(room, "encrypted", False)) async def _send_room_content(self, room_id: str, content: dict[str, Any]) -> None: """Send m.room.message with E2EE options.""" if not self.client: return kwargs: dict[str, Any] = {"room_id": room_id, "message_type": "m.room.message", "content": content} if self.config.e2ee_enabled: kwargs["ignore_unverified_devices"] = True await self.client.room_send(**kwargs) async def _resolve_server_upload_limit_bytes(self) -> int | None: """Query homeserver upload limit once per channel lifecycle.""" if self._server_upload_limit_checked: return self._server_upload_limit_bytes self._server_upload_limit_checked = True if not self.client: return None try: response = await self.client.content_repository_config() except Exception: return None upload_size = getattr(response, "upload_size", None) if isinstance(upload_size, int) and upload_size > 0: self._server_upload_limit_bytes = upload_size return upload_size return None async def _effective_media_limit_bytes(self) -> int: """min(local config, server advertised) — 0 blocks all uploads.""" local_limit = max(int(self.config.max_media_bytes), 0) server_limit = await self._resolve_server_upload_limit_bytes() if server_limit is None: return local_limit return min(local_limit, server_limit) if local_limit else 0 async def _upload_and_send_attachment( self, room_id: str, path: Path, limit_bytes: int, relates_to: dict[str, Any] | None = None, ) -> str | None: """Upload one local file to Matrix and send it as a media message. Returns failure marker or None.""" if not self.client: return _ATTACH_UPLOAD_FAILED.format(path.name or _DEFAULT_ATTACH_NAME) resolved = path.expanduser().resolve(strict=False) filename = safe_filename(resolved.name) or _DEFAULT_ATTACH_NAME fail = _ATTACH_UPLOAD_FAILED.format(filename) if not resolved.is_file() or not self._is_workspace_path_allowed(resolved): return fail try: size_bytes = resolved.stat().st_size except OSError: return fail if limit_bytes <= 0 or size_bytes > limit_bytes: return _ATTACH_TOO_LARGE.format(filename) mime = mimetypes.guess_type(filename, strict=False)[0] or "application/octet-stream" try: with resolved.open("rb") as f: upload_result = await self.client.upload( f, content_type=mime, filename=filename, encrypt=self.config.e2ee_enabled and self._is_encrypted_room(room_id), filesize=size_bytes, ) except Exception: return fail upload_response = upload_result[0] if isinstance(upload_result, tuple) else upload_result encryption_info = upload_result[1] if isinstance(upload_result, tuple) and isinstance(upload_result[1], dict) else None if isinstance(upload_response, UploadError): return fail mxc_url = getattr(upload_response, "content_uri", None) if not isinstance(mxc_url, str) or not mxc_url.startswith("mxc://"): return fail content = self._build_outbound_attachment_content( filename=filename, mime=mime, size_bytes=size_bytes, mxc_url=mxc_url, encryption_info=encryption_info, ) if relates_to: content["m.relates_to"] = relates_to try: await self._send_room_content(room_id, content) except Exception: return fail return None async def send(self, msg: OutboundMessage) -> None: """Send outbound content; clear typing for non-progress messages.""" if not self.client: return text = msg.content or "" candidates = self._collect_outbound_media_candidates(msg.media) relates_to = self._build_thread_relates_to(msg.metadata) is_progress = bool((msg.metadata or {}).get("_progress")) try: failures: list[str] = [] if candidates: limit_bytes = await self._effective_media_limit_bytes() for path in candidates: if fail := await self._upload_and_send_attachment( room_id=msg.chat_id, path=path, limit_bytes=limit_bytes, relates_to=relates_to, ): failures.append(fail) if failures: text = f"{text.rstrip()}\n{chr(10).join(failures)}" if text.strip() else "\n".join(failures) if text or not candidates: content = _build_matrix_text_content(text) if relates_to: content["m.relates_to"] = relates_to await self._send_room_content(msg.chat_id, content) finally: if not is_progress: await self._stop_typing_keepalive(msg.chat_id, clear_typing=True) def _register_event_callbacks(self) -> None: self.client.add_event_callback(self._on_message, RoomMessageText) self.client.add_event_callback(self._on_media_message, MATRIX_MEDIA_EVENT_FILTER) self.client.add_event_callback(self._on_room_invite, InviteEvent) def _register_response_callbacks(self) -> None: self.client.add_response_callback(self._on_sync_error, SyncError) self.client.add_response_callback(self._on_join_error, JoinError) self.client.add_response_callback(self._on_send_error, RoomSendError) def _log_response_error(self, label: str, response: Any) -> None: """Log Matrix response errors — auth errors at ERROR level, rest at WARNING.""" code = getattr(response, "status_code", None) is_auth = code in {"M_UNKNOWN_TOKEN", "M_FORBIDDEN", "M_UNAUTHORIZED"} is_fatal = is_auth or getattr(response, "soft_logout", False) (logger.error if is_fatal else logger.warning)("Matrix {} failed: {}", label, response) async def _on_sync_error(self, response: SyncError) -> None: self._log_response_error("sync", response) async def _on_join_error(self, response: JoinError) -> None: self._log_response_error("join", response) async def _on_send_error(self, response: RoomSendError) -> None: self._log_response_error("send", response) async def _set_typing(self, room_id: str, typing: bool) -> None: """Best-effort typing indicator update.""" if not self.client: return try: response = await self.client.room_typing(room_id=room_id, typing_state=typing, timeout=TYPING_NOTICE_TIMEOUT_MS) if isinstance(response, RoomTypingError): logger.debug("Matrix typing failed for {}: {}", room_id, response) except Exception: pass async def _start_typing_keepalive(self, room_id: str) -> None: """Start periodic typing refresh (spec-recommended keepalive).""" await self._stop_typing_keepalive(room_id, clear_typing=False) await self._set_typing(room_id, True) if not self._running: return async def loop() -> None: try: while self._running: await asyncio.sleep(TYPING_KEEPALIVE_INTERVAL_MS / 1000) await self._set_typing(room_id, True) except asyncio.CancelledError: pass self._typing_tasks[room_id] = asyncio.create_task(loop()) async def _stop_typing_keepalive(self, room_id: str, *, clear_typing: bool) -> None: if task := self._typing_tasks.pop(room_id, None): task.cancel() try: await task except asyncio.CancelledError: pass if clear_typing: await self._set_typing(room_id, False) async def _sync_loop(self) -> None: while self._running: try: await self.client.sync_forever(timeout=30000, full_state=True) except asyncio.CancelledError: break except Exception: await asyncio.sleep(2) async def _on_room_invite(self, room: MatrixRoom, event: InviteEvent) -> None: if self.is_allowed(event.sender): await self.client.join(room.room_id) def _is_direct_room(self, room: MatrixRoom) -> bool: count = getattr(room, "member_count", None) return isinstance(count, int) and count <= 2 def _is_bot_mentioned(self, event: RoomMessage) -> bool: """Check m.mentions payload for bot mention.""" source = getattr(event, "source", None) if not isinstance(source, dict): return False mentions = (source.get("content") or {}).get("m.mentions") if not isinstance(mentions, dict): return False user_ids = mentions.get("user_ids") if isinstance(user_ids, list) and self.config.user_id in user_ids: return True return bool(self.config.allow_room_mentions and mentions.get("room") is True) def _should_process_message(self, room: MatrixRoom, event: RoomMessage) -> bool: """Apply sender and room policy checks.""" if not self.is_allowed(event.sender): return False if self._is_direct_room(room): return True policy = self.config.group_policy if policy == "open": return True if policy == "allowlist": return room.room_id in (self.config.group_allow_from or []) if policy == "mention": return self._is_bot_mentioned(event) return False def _media_dir(self) -> Path: d = get_data_dir() / "media" / "matrix" d.mkdir(parents=True, exist_ok=True) return d @staticmethod def _event_source_content(event: RoomMessage) -> dict[str, Any]: source = getattr(event, "source", None) if not isinstance(source, dict): return {} content = source.get("content") return content if isinstance(content, dict) else {} def _event_thread_root_id(self, event: RoomMessage) -> str | None: relates_to = self._event_source_content(event).get("m.relates_to") if not isinstance(relates_to, dict) or relates_to.get("rel_type") != "m.thread": return None root_id = relates_to.get("event_id") return root_id if isinstance(root_id, str) and root_id else None def _thread_metadata(self, event: RoomMessage) -> dict[str, str] | None: if not (root_id := self._event_thread_root_id(event)): return None meta: dict[str, str] = {"thread_root_event_id": root_id} if isinstance(reply_to := getattr(event, "event_id", None), str) and reply_to: meta["thread_reply_to_event_id"] = reply_to return meta @staticmethod def _build_thread_relates_to(metadata: dict[str, Any] | None) -> dict[str, Any] | None: if not metadata: return None root_id = metadata.get("thread_root_event_id") if not isinstance(root_id, str) or not root_id: return None reply_to = metadata.get("thread_reply_to_event_id") or metadata.get("event_id") if not isinstance(reply_to, str) or not reply_to: return None return {"rel_type": "m.thread", "event_id": root_id, "m.in_reply_to": {"event_id": reply_to}, "is_falling_back": True} def _event_attachment_type(self, event: MatrixMediaEvent) -> str: msgtype = self._event_source_content(event).get("msgtype") return _MSGTYPE_MAP.get(msgtype, "file") @staticmethod def _is_encrypted_media_event(event: MatrixMediaEvent) -> bool: return (isinstance(getattr(event, "key", None), dict) and isinstance(getattr(event, "hashes", None), dict) and isinstance(getattr(event, "iv", None), str)) def _event_declared_size_bytes(self, event: MatrixMediaEvent) -> int | None: info = self._event_source_content(event).get("info") size = info.get("size") if isinstance(info, dict) else None return size if isinstance(size, int) and size >= 0 else None def _event_mime(self, event: MatrixMediaEvent) -> str | None: info = self._event_source_content(event).get("info") if isinstance(info, dict) and isinstance(m := info.get("mimetype"), str) and m: return m m = getattr(event, "mimetype", None) return m if isinstance(m, str) and m else None def _event_filename(self, event: MatrixMediaEvent, attachment_type: str) -> str: body = getattr(event, "body", None) if isinstance(body, str) and body.strip(): if candidate := safe_filename(Path(body).name): return candidate return _DEFAULT_ATTACH_NAME if attachment_type == "file" else attachment_type def _build_attachment_path(self, event: MatrixMediaEvent, attachment_type: str, filename: str, mime: str | None) -> Path: safe_name = safe_filename(Path(filename).name) or _DEFAULT_ATTACH_NAME suffix = Path(safe_name).suffix if not suffix and mime: if guessed := mimetypes.guess_extension(mime, strict=False): safe_name, suffix = f"{safe_name}{guessed}", guessed stem = (Path(safe_name).stem or attachment_type)[:72] suffix = suffix[:16] event_id = safe_filename(str(getattr(event, "event_id", "") or "evt").lstrip("$")) event_prefix = (event_id[:24] or "evt").strip("_") return self._media_dir() / f"{event_prefix}_{stem}{suffix}" async def _download_media_bytes(self, mxc_url: str) -> bytes | None: if not self.client: return None response = await self.client.download(mxc=mxc_url) if isinstance(response, DownloadError): logger.warning("Matrix download failed for {}: {}", mxc_url, response) return None body = getattr(response, "body", None) if isinstance(body, (bytes, bytearray)): return bytes(body) if isinstance(response, MemoryDownloadResponse): return bytes(response.body) if isinstance(body, (str, Path)): path = Path(body) if path.is_file(): try: return path.read_bytes() except OSError: return None return None def _decrypt_media_bytes(self, event: MatrixMediaEvent, ciphertext: bytes) -> bytes | None: key_obj, hashes, iv = getattr(event, "key", None), getattr(event, "hashes", None), getattr(event, "iv", None) key = key_obj.get("k") if isinstance(key_obj, dict) else None sha256 = hashes.get("sha256") if isinstance(hashes, dict) else None if not all(isinstance(v, str) for v in (key, sha256, iv)): return None try: return decrypt_attachment(ciphertext, key, sha256, iv) except (EncryptionError, ValueError, TypeError): logger.warning("Matrix decrypt failed for event {}", getattr(event, "event_id", "")) return None async def _fetch_media_attachment( self, room: MatrixRoom, event: MatrixMediaEvent, ) -> tuple[dict[str, Any] | None, str]: """Download, decrypt if needed, and persist a Matrix attachment.""" atype = self._event_attachment_type(event) mime = self._event_mime(event) filename = self._event_filename(event, atype) mxc_url = getattr(event, "url", None) fail = _ATTACH_FAILED.format(filename) if not isinstance(mxc_url, str) or not mxc_url.startswith("mxc://"): return None, fail limit_bytes = await self._effective_media_limit_bytes() declared = self._event_declared_size_bytes(event) if declared is not None and declared > limit_bytes: return None, _ATTACH_TOO_LARGE.format(filename) downloaded = await self._download_media_bytes(mxc_url) if downloaded is None: return None, fail encrypted = self._is_encrypted_media_event(event) data = downloaded if encrypted: if (data := self._decrypt_media_bytes(event, downloaded)) is None: return None, fail if len(data) > limit_bytes: return None, _ATTACH_TOO_LARGE.format(filename) path = self._build_attachment_path(event, atype, filename, mime) try: path.write_bytes(data) except OSError: return None, fail attachment = { "type": atype, "mime": mime, "filename": filename, "event_id": str(getattr(event, "event_id", "") or ""), "encrypted": encrypted, "size_bytes": len(data), "path": str(path), "mxc_url": mxc_url, } return attachment, _ATTACH_MARKER.format(path) def _base_metadata(self, room: MatrixRoom, event: RoomMessage) -> dict[str, Any]: """Build common metadata for text and media handlers.""" meta: dict[str, Any] = {"room": getattr(room, "display_name", room.room_id)} if isinstance(eid := getattr(event, "event_id", None), str) and eid: meta["event_id"] = eid if thread := self._thread_metadata(event): meta.update(thread) return meta async def _on_message(self, room: MatrixRoom, event: RoomMessageText) -> None: if event.sender == self.config.user_id or not self._should_process_message(room, event): return await self._start_typing_keepalive(room.room_id) try: await self._handle_message( sender_id=event.sender, chat_id=room.room_id, content=event.body, metadata=self._base_metadata(room, event), ) except Exception: await self._stop_typing_keepalive(room.room_id, clear_typing=True) raise async def _on_media_message(self, room: MatrixRoom, event: MatrixMediaEvent) -> None: if event.sender == self.config.user_id or not self._should_process_message(room, event): return attachment, marker = await self._fetch_media_attachment(room, event) parts: list[str] = [] if isinstance(body := getattr(event, "body", None), str) and body.strip(): parts.append(body.strip()) if marker: parts.append(marker) await self._start_typing_keepalive(room.room_id) try: meta = self._base_metadata(room, event) meta["attachments"] = [] if attachment: meta["attachments"] = [attachment] await self._handle_message( sender_id=event.sender, chat_id=room.room_id, content="\n".join(parts), media=[attachment["path"]] if attachment else [], metadata=meta, ) except Exception: await self._stop_typing_keepalive(room.room_id, clear_typing=True) raise
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/channels/matrix.py", "license": "MIT License", "lines": 611, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:tests/test_matrix_channel.py
import asyncio from pathlib import Path from types import SimpleNamespace import pytest import nanobot.channels.matrix as matrix_module from nanobot.bus.events import OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.channels.matrix import ( MATRIX_HTML_FORMAT, TYPING_NOTICE_TIMEOUT_MS, MatrixChannel, ) from nanobot.config.schema import MatrixConfig _ROOM_SEND_UNSET = object() class _DummyTask: def __init__(self) -> None: self.cancelled = False def cancel(self) -> None: self.cancelled = True def __await__(self): async def _done(): return None return _done().__await__() class _FakeAsyncClient: def __init__(self, homeserver, user, store_path, config) -> None: self.homeserver = homeserver self.user = user self.store_path = store_path self.config = config self.user_id: str | None = None self.access_token: str | None = None self.device_id: str | None = None self.load_store_called = False self.stop_sync_forever_called = False self.join_calls: list[str] = [] self.callbacks: list[tuple[object, object]] = [] self.response_callbacks: list[tuple[object, object]] = [] self.rooms: dict[str, object] = {} self.room_send_calls: list[dict[str, object]] = [] self.typing_calls: list[tuple[str, bool, int]] = [] self.download_calls: list[dict[str, object]] = [] self.upload_calls: list[dict[str, object]] = [] self.download_response: object | None = None self.download_bytes: bytes = b"media" self.download_content_type: str = "application/octet-stream" self.download_filename: str | None = None self.upload_response: object | None = None self.content_repository_config_response: object = SimpleNamespace(upload_size=None) self.raise_on_send = False self.raise_on_typing = False self.raise_on_upload = False def add_event_callback(self, callback, event_type) -> None: self.callbacks.append((callback, event_type)) def add_response_callback(self, callback, response_type) -> None: self.response_callbacks.append((callback, response_type)) def load_store(self) -> None: self.load_store_called = True def stop_sync_forever(self) -> None: self.stop_sync_forever_called = True async def join(self, room_id: str) -> None: self.join_calls.append(room_id) async def room_send( self, room_id: str, message_type: str, content: dict[str, object], ignore_unverified_devices: object = _ROOM_SEND_UNSET, ) -> None: call: dict[str, object] = { "room_id": room_id, "message_type": message_type, "content": content, } if ignore_unverified_devices is not _ROOM_SEND_UNSET: call["ignore_unverified_devices"] = ignore_unverified_devices self.room_send_calls.append(call) if self.raise_on_send: raise RuntimeError("send failed") async def room_typing( self, room_id: str, typing_state: bool = True, timeout: int = 30_000, ) -> None: self.typing_calls.append((room_id, typing_state, timeout)) if self.raise_on_typing: raise RuntimeError("typing failed") async def download(self, **kwargs): self.download_calls.append(kwargs) if self.download_response is not None: return self.download_response return matrix_module.MemoryDownloadResponse( body=self.download_bytes, content_type=self.download_content_type, filename=self.download_filename, ) async def upload( self, data_provider, content_type: str | None = None, filename: str | None = None, filesize: int | None = None, encrypt: bool = False, ): if self.raise_on_upload: raise RuntimeError("upload failed") if isinstance(data_provider, (bytes, bytearray)): raise TypeError( f"data_provider type {type(data_provider)!r} is not of a usable type " "(Callable, IOBase)" ) self.upload_calls.append( { "data_provider": data_provider, "content_type": content_type, "filename": filename, "filesize": filesize, "encrypt": encrypt, } ) if self.upload_response is not None: return self.upload_response if encrypt: return ( SimpleNamespace(content_uri="mxc://example.org/uploaded"), { "v": "v2", "iv": "iv", "hashes": {"sha256": "hash"}, "key": {"alg": "A256CTR", "k": "key"}, }, ) return SimpleNamespace(content_uri="mxc://example.org/uploaded"), None async def content_repository_config(self): return self.content_repository_config_response async def close(self) -> None: return None def _make_config(**kwargs) -> MatrixConfig: return MatrixConfig( enabled=True, homeserver="https://matrix.org", access_token="token", user_id="@bot:matrix.org", **kwargs, ) @pytest.mark.asyncio async def test_start_skips_load_store_when_device_id_missing( monkeypatch, tmp_path ) -> None: clients: list[_FakeAsyncClient] = [] def _fake_client(*args, **kwargs) -> _FakeAsyncClient: client = _FakeAsyncClient(*args, **kwargs) clients.append(client) return client def _fake_create_task(coro): coro.close() return _DummyTask() monkeypatch.setattr("nanobot.channels.matrix.get_data_dir", lambda: tmp_path) monkeypatch.setattr( "nanobot.channels.matrix.AsyncClientConfig", lambda **kwargs: SimpleNamespace(**kwargs), ) monkeypatch.setattr("nanobot.channels.matrix.AsyncClient", _fake_client) monkeypatch.setattr( "nanobot.channels.matrix.asyncio.create_task", _fake_create_task ) channel = MatrixChannel(_make_config(device_id=""), MessageBus()) await channel.start() assert len(clients) == 1 assert clients[0].config.encryption_enabled is True assert clients[0].load_store_called is False assert len(clients[0].callbacks) == 3 assert len(clients[0].response_callbacks) == 3 await channel.stop() @pytest.mark.asyncio async def test_register_event_callbacks_uses_media_base_filter() -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client channel._register_event_callbacks() assert len(client.callbacks) == 3 assert client.callbacks[1][0] == channel._on_media_message assert client.callbacks[1][1] == matrix_module.MATRIX_MEDIA_EVENT_FILTER def test_media_event_filter_does_not_match_text_events() -> None: assert not issubclass(matrix_module.RoomMessageText, matrix_module.MATRIX_MEDIA_EVENT_FILTER) @pytest.mark.asyncio async def test_start_disables_e2ee_when_configured( monkeypatch, tmp_path ) -> None: clients: list[_FakeAsyncClient] = [] def _fake_client(*args, **kwargs) -> _FakeAsyncClient: client = _FakeAsyncClient(*args, **kwargs) clients.append(client) return client def _fake_create_task(coro): coro.close() return _DummyTask() monkeypatch.setattr("nanobot.channels.matrix.get_data_dir", lambda: tmp_path) monkeypatch.setattr( "nanobot.channels.matrix.AsyncClientConfig", lambda **kwargs: SimpleNamespace(**kwargs), ) monkeypatch.setattr("nanobot.channels.matrix.AsyncClient", _fake_client) monkeypatch.setattr( "nanobot.channels.matrix.asyncio.create_task", _fake_create_task ) channel = MatrixChannel(_make_config(device_id="", e2ee_enabled=False), MessageBus()) await channel.start() assert len(clients) == 1 assert clients[0].config.encryption_enabled is False await channel.stop() @pytest.mark.asyncio async def test_stop_stops_sync_forever_before_close(monkeypatch) -> None: channel = MatrixChannel(_make_config(device_id="DEVICE"), MessageBus()) client = _FakeAsyncClient("", "", "", None) task = _DummyTask() channel.client = client channel._sync_task = task channel._running = True await channel.stop() assert channel._running is False assert client.stop_sync_forever_called is True assert task.cancelled is False @pytest.mark.asyncio async def test_room_invite_joins_when_allow_list_is_empty() -> None: channel = MatrixChannel(_make_config(allow_from=[]), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client room = SimpleNamespace(room_id="!room:matrix.org") event = SimpleNamespace(sender="@alice:matrix.org") await channel._on_room_invite(room, event) assert client.join_calls == ["!room:matrix.org"] @pytest.mark.asyncio async def test_room_invite_respects_allow_list_when_configured() -> None: channel = MatrixChannel(_make_config(allow_from=["@bob:matrix.org"]), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client room = SimpleNamespace(room_id="!room:matrix.org") event = SimpleNamespace(sender="@alice:matrix.org") await channel._on_room_invite(room, event) assert client.join_calls == [] @pytest.mark.asyncio async def test_on_message_sets_typing_for_allowed_sender() -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client handled: list[str] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs["sender_id"]) channel._handle_message = _fake_handle_message # type: ignore[method-assign] room = SimpleNamespace(room_id="!room:matrix.org", display_name="Test room") event = SimpleNamespace(sender="@alice:matrix.org", body="Hello", source={}) await channel._on_message(room, event) assert handled == ["@alice:matrix.org"] assert client.typing_calls == [ ("!room:matrix.org", True, TYPING_NOTICE_TIMEOUT_MS), ] @pytest.mark.asyncio async def test_typing_keepalive_refreshes_periodically(monkeypatch) -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client channel._running = True monkeypatch.setattr(matrix_module, "TYPING_KEEPALIVE_INTERVAL_MS", 10) await channel._start_typing_keepalive("!room:matrix.org") await asyncio.sleep(0.03) await channel._stop_typing_keepalive("!room:matrix.org", clear_typing=True) true_updates = [call for call in client.typing_calls if call[1] is True] assert len(true_updates) >= 2 assert client.typing_calls[-1] == ("!room:matrix.org", False, TYPING_NOTICE_TIMEOUT_MS) @pytest.mark.asyncio async def test_on_message_skips_typing_for_self_message() -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client room = SimpleNamespace(room_id="!room:matrix.org", display_name="Test room") event = SimpleNamespace(sender="@bot:matrix.org", body="Hello", source={}) await channel._on_message(room, event) assert client.typing_calls == [] @pytest.mark.asyncio async def test_on_message_skips_typing_for_denied_sender() -> None: channel = MatrixChannel(_make_config(allow_from=["@bob:matrix.org"]), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client handled: list[str] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs["sender_id"]) channel._handle_message = _fake_handle_message # type: ignore[method-assign] room = SimpleNamespace(room_id="!room:matrix.org", display_name="Test room") event = SimpleNamespace(sender="@alice:matrix.org", body="Hello", source={}) await channel._on_message(room, event) assert handled == [] assert client.typing_calls == [] @pytest.mark.asyncio async def test_on_message_mention_policy_requires_mx_mentions() -> None: channel = MatrixChannel(_make_config(group_policy="mention"), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client handled: list[str] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs["sender_id"]) channel._handle_message = _fake_handle_message # type: ignore[method-assign] room = SimpleNamespace(room_id="!room:matrix.org", display_name="Test room", member_count=3) event = SimpleNamespace(sender="@alice:matrix.org", body="Hello", source={"content": {}}) await channel._on_message(room, event) assert handled == [] assert client.typing_calls == [] @pytest.mark.asyncio async def test_on_message_mention_policy_accepts_bot_user_mentions() -> None: channel = MatrixChannel(_make_config(group_policy="mention"), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client handled: list[str] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs["sender_id"]) channel._handle_message = _fake_handle_message # type: ignore[method-assign] room = SimpleNamespace(room_id="!room:matrix.org", display_name="Test room", member_count=3) event = SimpleNamespace( sender="@alice:matrix.org", body="Hello", source={"content": {"m.mentions": {"user_ids": ["@bot:matrix.org"]}}}, ) await channel._on_message(room, event) assert handled == ["@alice:matrix.org"] assert client.typing_calls == [("!room:matrix.org", True, TYPING_NOTICE_TIMEOUT_MS)] @pytest.mark.asyncio async def test_on_message_mention_policy_allows_direct_room_without_mentions() -> None: channel = MatrixChannel(_make_config(group_policy="mention"), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client handled: list[str] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs["sender_id"]) channel._handle_message = _fake_handle_message # type: ignore[method-assign] room = SimpleNamespace(room_id="!dm:matrix.org", display_name="DM", member_count=2) event = SimpleNamespace(sender="@alice:matrix.org", body="Hello", source={"content": {}}) await channel._on_message(room, event) assert handled == ["@alice:matrix.org"] assert client.typing_calls == [("!dm:matrix.org", True, TYPING_NOTICE_TIMEOUT_MS)] @pytest.mark.asyncio async def test_on_message_allowlist_policy_requires_room_id() -> None: channel = MatrixChannel( _make_config(group_policy="allowlist", group_allow_from=["!allowed:matrix.org"]), MessageBus(), ) client = _FakeAsyncClient("", "", "", None) channel.client = client handled: list[str] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs["chat_id"]) channel._handle_message = _fake_handle_message # type: ignore[method-assign] denied_room = SimpleNamespace(room_id="!denied:matrix.org", display_name="Denied", member_count=3) event = SimpleNamespace(sender="@alice:matrix.org", body="Hello", source={"content": {}}) await channel._on_message(denied_room, event) allowed_room = SimpleNamespace( room_id="!allowed:matrix.org", display_name="Allowed", member_count=3, ) await channel._on_message(allowed_room, event) assert handled == ["!allowed:matrix.org"] assert client.typing_calls == [("!allowed:matrix.org", True, TYPING_NOTICE_TIMEOUT_MS)] @pytest.mark.asyncio async def test_on_message_room_mention_requires_opt_in() -> None: channel = MatrixChannel(_make_config(group_policy="mention"), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client handled: list[str] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs["sender_id"]) channel._handle_message = _fake_handle_message # type: ignore[method-assign] room = SimpleNamespace(room_id="!room:matrix.org", display_name="Test room", member_count=3) room_mention_event = SimpleNamespace( sender="@alice:matrix.org", body="Hello everyone", source={"content": {"m.mentions": {"room": True}}}, ) await channel._on_message(room, room_mention_event) assert handled == [] assert client.typing_calls == [] channel.config.allow_room_mentions = True await channel._on_message(room, room_mention_event) assert handled == ["@alice:matrix.org"] assert client.typing_calls == [("!room:matrix.org", True, TYPING_NOTICE_TIMEOUT_MS)] @pytest.mark.asyncio async def test_on_message_sets_thread_metadata_when_threaded_event() -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client handled: list[dict[str, object]] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs) channel._handle_message = _fake_handle_message # type: ignore[method-assign] room = SimpleNamespace(room_id="!room:matrix.org", display_name="Test room", member_count=3) event = SimpleNamespace( sender="@alice:matrix.org", body="Hello", event_id="$reply1", source={ "content": { "m.relates_to": { "rel_type": "m.thread", "event_id": "$root1", } } }, ) await channel._on_message(room, event) assert len(handled) == 1 metadata = handled[0]["metadata"] assert metadata["thread_root_event_id"] == "$root1" assert metadata["thread_reply_to_event_id"] == "$reply1" assert metadata["event_id"] == "$reply1" @pytest.mark.asyncio async def test_on_media_message_downloads_attachment_and_sets_metadata( monkeypatch, tmp_path ) -> None: monkeypatch.setattr("nanobot.channels.matrix.get_data_dir", lambda: tmp_path) channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) client.download_bytes = b"image" channel.client = client handled: list[dict[str, object]] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs) channel._handle_message = _fake_handle_message # type: ignore[method-assign] room = SimpleNamespace(room_id="!room:matrix.org", display_name="Test room", member_count=2) event = SimpleNamespace( sender="@alice:matrix.org", body="photo.png", url="mxc://example.org/mediaid", event_id="$event1", source={ "content": { "msgtype": "m.image", "info": {"mimetype": "image/png", "size": 5}, } }, ) await channel._on_media_message(room, event) assert len(client.download_calls) == 1 assert len(handled) == 1 assert client.typing_calls == [("!room:matrix.org", True, TYPING_NOTICE_TIMEOUT_MS)] media_paths = handled[0]["media"] assert isinstance(media_paths, list) and len(media_paths) == 1 media_path = Path(media_paths[0]) assert media_path.is_file() assert media_path.read_bytes() == b"image" metadata = handled[0]["metadata"] attachments = metadata["attachments"] assert isinstance(attachments, list) and len(attachments) == 1 assert attachments[0]["type"] == "image" assert attachments[0]["mxc_url"] == "mxc://example.org/mediaid" assert attachments[0]["path"] == str(media_path) assert "[attachment: " in handled[0]["content"] @pytest.mark.asyncio async def test_on_media_message_sets_thread_metadata_when_threaded_event( monkeypatch, tmp_path ) -> None: monkeypatch.setattr("nanobot.channels.matrix.get_data_dir", lambda: tmp_path) channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) client.download_bytes = b"image" channel.client = client handled: list[dict[str, object]] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs) channel._handle_message = _fake_handle_message # type: ignore[method-assign] room = SimpleNamespace(room_id="!room:matrix.org", display_name="Test room", member_count=2) event = SimpleNamespace( sender="@alice:matrix.org", body="photo.png", url="mxc://example.org/mediaid", event_id="$event1", source={ "content": { "msgtype": "m.image", "info": {"mimetype": "image/png", "size": 5}, "m.relates_to": { "rel_type": "m.thread", "event_id": "$root1", }, } }, ) await channel._on_media_message(room, event) assert len(handled) == 1 metadata = handled[0]["metadata"] assert metadata["thread_root_event_id"] == "$root1" assert metadata["thread_reply_to_event_id"] == "$event1" assert metadata["event_id"] == "$event1" @pytest.mark.asyncio async def test_on_media_message_respects_declared_size_limit( monkeypatch, tmp_path ) -> None: monkeypatch.setattr("nanobot.channels.matrix.get_data_dir", lambda: tmp_path) channel = MatrixChannel(_make_config(max_media_bytes=3), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client handled: list[dict[str, object]] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs) channel._handle_message = _fake_handle_message # type: ignore[method-assign] room = SimpleNamespace(room_id="!room:matrix.org", display_name="Test room", member_count=2) event = SimpleNamespace( sender="@alice:matrix.org", body="large.bin", url="mxc://example.org/large", event_id="$event2", source={"content": {"msgtype": "m.file", "info": {"size": 10}}}, ) await channel._on_media_message(room, event) assert client.download_calls == [] assert len(handled) == 1 assert handled[0]["media"] == [] assert handled[0]["metadata"]["attachments"] == [] assert "[attachment: large.bin - too large]" in handled[0]["content"] @pytest.mark.asyncio async def test_on_media_message_uses_server_limit_when_smaller_than_local_limit( monkeypatch, tmp_path ) -> None: monkeypatch.setattr("nanobot.channels.matrix.get_data_dir", lambda: tmp_path) channel = MatrixChannel(_make_config(max_media_bytes=10), MessageBus()) client = _FakeAsyncClient("", "", "", None) client.content_repository_config_response = SimpleNamespace(upload_size=3) channel.client = client handled: list[dict[str, object]] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs) channel._handle_message = _fake_handle_message # type: ignore[method-assign] room = SimpleNamespace(room_id="!room:matrix.org", display_name="Test room", member_count=2) event = SimpleNamespace( sender="@alice:matrix.org", body="large.bin", url="mxc://example.org/large", event_id="$event2_server", source={"content": {"msgtype": "m.file", "info": {"size": 5}}}, ) await channel._on_media_message(room, event) assert client.download_calls == [] assert len(handled) == 1 assert handled[0]["media"] == [] assert handled[0]["metadata"]["attachments"] == [] assert "[attachment: large.bin - too large]" in handled[0]["content"] @pytest.mark.asyncio async def test_on_media_message_handles_download_error(monkeypatch, tmp_path) -> None: monkeypatch.setattr("nanobot.channels.matrix.get_data_dir", lambda: tmp_path) channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) client.download_response = matrix_module.DownloadError("download failed") channel.client = client handled: list[dict[str, object]] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs) channel._handle_message = _fake_handle_message # type: ignore[method-assign] room = SimpleNamespace(room_id="!room:matrix.org", display_name="Test room", member_count=2) event = SimpleNamespace( sender="@alice:matrix.org", body="photo.png", url="mxc://example.org/mediaid", event_id="$event3", source={"content": {"msgtype": "m.image"}}, ) await channel._on_media_message(room, event) assert len(client.download_calls) == 1 assert len(handled) == 1 assert handled[0]["media"] == [] assert handled[0]["metadata"]["attachments"] == [] assert "[attachment: photo.png - download failed]" in handled[0]["content"] @pytest.mark.asyncio async def test_on_media_message_decrypts_encrypted_media(monkeypatch, tmp_path) -> None: monkeypatch.setattr("nanobot.channels.matrix.get_data_dir", lambda: tmp_path) monkeypatch.setattr( matrix_module, "decrypt_attachment", lambda ciphertext, key, sha256, iv: b"plain", ) channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) client.download_bytes = b"cipher" channel.client = client handled: list[dict[str, object]] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs) channel._handle_message = _fake_handle_message # type: ignore[method-assign] room = SimpleNamespace(room_id="!room:matrix.org", display_name="Test room", member_count=2) event = SimpleNamespace( sender="@alice:matrix.org", body="secret.txt", url="mxc://example.org/encrypted", event_id="$event4", key={"k": "key"}, hashes={"sha256": "hash"}, iv="iv", source={"content": {"msgtype": "m.file", "info": {"size": 6}}}, ) await channel._on_media_message(room, event) assert len(handled) == 1 media_path = Path(handled[0]["media"][0]) assert media_path.read_bytes() == b"plain" attachment = handled[0]["metadata"]["attachments"][0] assert attachment["encrypted"] is True assert attachment["size_bytes"] == 5 @pytest.mark.asyncio async def test_on_media_message_handles_decrypt_error(monkeypatch, tmp_path) -> None: monkeypatch.setattr("nanobot.channels.matrix.get_data_dir", lambda: tmp_path) def _raise(*args, **kwargs): raise matrix_module.EncryptionError("boom") monkeypatch.setattr(matrix_module, "decrypt_attachment", _raise) channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) client.download_bytes = b"cipher" channel.client = client handled: list[dict[str, object]] = [] async def _fake_handle_message(**kwargs) -> None: handled.append(kwargs) channel._handle_message = _fake_handle_message # type: ignore[method-assign] room = SimpleNamespace(room_id="!room:matrix.org", display_name="Test room", member_count=2) event = SimpleNamespace( sender="@alice:matrix.org", body="secret.txt", url="mxc://example.org/encrypted", event_id="$event5", key={"k": "key"}, hashes={"sha256": "hash"}, iv="iv", source={"content": {"msgtype": "m.file"}}, ) await channel._on_media_message(room, event) assert len(handled) == 1 assert handled[0]["media"] == [] assert handled[0]["metadata"]["attachments"] == [] assert "[attachment: secret.txt - download failed]" in handled[0]["content"] @pytest.mark.asyncio async def test_send_clears_typing_after_send() -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client await channel.send( OutboundMessage(channel="matrix", chat_id="!room:matrix.org", content="Hi") ) assert len(client.room_send_calls) == 1 assert client.room_send_calls[0]["content"] == { "msgtype": "m.text", "body": "Hi", "m.mentions": {}, } assert client.room_send_calls[0]["ignore_unverified_devices"] is True assert client.typing_calls[-1] == ("!room:matrix.org", False, TYPING_NOTICE_TIMEOUT_MS) @pytest.mark.asyncio async def test_send_uploads_media_and_sends_file_event(tmp_path) -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client file_path = tmp_path / "test.txt" file_path.write_text("hello", encoding="utf-8") await channel.send( OutboundMessage( channel="matrix", chat_id="!room:matrix.org", content="Please review.", media=[str(file_path)], ) ) assert len(client.upload_calls) == 1 assert not isinstance(client.upload_calls[0]["data_provider"], (bytes, bytearray)) assert hasattr(client.upload_calls[0]["data_provider"], "read") assert client.upload_calls[0]["filename"] == "test.txt" assert client.upload_calls[0]["filesize"] == 5 assert len(client.room_send_calls) == 2 assert client.room_send_calls[0]["content"]["msgtype"] == "m.file" assert client.room_send_calls[0]["content"]["url"] == "mxc://example.org/uploaded" assert client.room_send_calls[1]["content"]["body"] == "Please review." @pytest.mark.asyncio async def test_send_adds_thread_relates_to_for_thread_metadata() -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client metadata = { "thread_root_event_id": "$root1", "thread_reply_to_event_id": "$reply1", } await channel.send( OutboundMessage( channel="matrix", chat_id="!room:matrix.org", content="Hi", metadata=metadata, ) ) content = client.room_send_calls[0]["content"] assert content["m.relates_to"] == { "rel_type": "m.thread", "event_id": "$root1", "m.in_reply_to": {"event_id": "$reply1"}, "is_falling_back": True, } @pytest.mark.asyncio async def test_send_uses_encrypted_media_payload_in_encrypted_room(tmp_path) -> None: channel = MatrixChannel(_make_config(e2ee_enabled=True), MessageBus()) client = _FakeAsyncClient("", "", "", None) client.rooms["!encrypted:matrix.org"] = SimpleNamespace(encrypted=True) channel.client = client file_path = tmp_path / "secret.txt" file_path.write_text("topsecret", encoding="utf-8") await channel.send( OutboundMessage( channel="matrix", chat_id="!encrypted:matrix.org", content="", media=[str(file_path)], ) ) assert len(client.upload_calls) == 1 assert client.upload_calls[0]["encrypt"] is True assert len(client.room_send_calls) == 1 content = client.room_send_calls[0]["content"] assert content["msgtype"] == "m.file" assert "file" in content assert "url" not in content assert content["file"]["url"] == "mxc://example.org/uploaded" assert content["file"]["hashes"]["sha256"] == "hash" @pytest.mark.asyncio async def test_send_does_not_parse_attachment_marker_without_media(tmp_path) -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client missing_path = tmp_path / "missing.txt" await channel.send( OutboundMessage( channel="matrix", chat_id="!room:matrix.org", content=f"[attachment: {missing_path}]", ) ) assert client.upload_calls == [] assert len(client.room_send_calls) == 1 assert client.room_send_calls[0]["content"]["body"] == f"[attachment: {missing_path}]" @pytest.mark.asyncio async def test_send_passes_thread_relates_to_to_attachment_upload(monkeypatch) -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client channel._server_upload_limit_checked = True channel._server_upload_limit_bytes = None captured: dict[str, object] = {} async def _fake_upload_and_send_attachment( *, room_id: str, path: Path, limit_bytes: int, relates_to: dict[str, object] | None = None, ) -> str | None: captured["relates_to"] = relates_to return None monkeypatch.setattr(channel, "_upload_and_send_attachment", _fake_upload_and_send_attachment) metadata = { "thread_root_event_id": "$root1", "thread_reply_to_event_id": "$reply1", } await channel.send( OutboundMessage( channel="matrix", chat_id="!room:matrix.org", content="Hi", media=["/tmp/fake.txt"], metadata=metadata, ) ) assert captured["relates_to"] == { "rel_type": "m.thread", "event_id": "$root1", "m.in_reply_to": {"event_id": "$reply1"}, "is_falling_back": True, } @pytest.mark.asyncio async def test_send_workspace_restriction_blocks_external_attachment(tmp_path) -> None: workspace = tmp_path / "workspace" workspace.mkdir() file_path = tmp_path / "external.txt" file_path.write_text("outside", encoding="utf-8") channel = MatrixChannel( _make_config(), MessageBus(), restrict_to_workspace=True, workspace=workspace, ) client = _FakeAsyncClient("", "", "", None) channel.client = client await channel.send( OutboundMessage( channel="matrix", chat_id="!room:matrix.org", content="", media=[str(file_path)], ) ) assert client.upload_calls == [] assert len(client.room_send_calls) == 1 assert client.room_send_calls[0]["content"]["body"] == "[attachment: external.txt - upload failed]" @pytest.mark.asyncio async def test_send_handles_upload_exception_and_reports_failure(tmp_path) -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) client.raise_on_upload = True channel.client = client file_path = tmp_path / "broken.txt" file_path.write_text("hello", encoding="utf-8") await channel.send( OutboundMessage( channel="matrix", chat_id="!room:matrix.org", content="Please review.", media=[str(file_path)], ) ) assert len(client.upload_calls) == 0 assert len(client.room_send_calls) == 1 assert ( client.room_send_calls[0]["content"]["body"] == "Please review.\n[attachment: broken.txt - upload failed]" ) @pytest.mark.asyncio async def test_send_uses_server_upload_limit_when_smaller_than_local_limit(tmp_path) -> None: channel = MatrixChannel(_make_config(max_media_bytes=10), MessageBus()) client = _FakeAsyncClient("", "", "", None) client.content_repository_config_response = SimpleNamespace(upload_size=3) channel.client = client file_path = tmp_path / "tiny.txt" file_path.write_text("hello", encoding="utf-8") await channel.send( OutboundMessage( channel="matrix", chat_id="!room:matrix.org", content="", media=[str(file_path)], ) ) assert client.upload_calls == [] assert len(client.room_send_calls) == 1 assert client.room_send_calls[0]["content"]["body"] == "[attachment: tiny.txt - too large]" @pytest.mark.asyncio async def test_send_blocks_all_outbound_media_when_limit_is_zero(tmp_path) -> None: channel = MatrixChannel(_make_config(max_media_bytes=0), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client file_path = tmp_path / "empty.txt" file_path.write_bytes(b"") await channel.send( OutboundMessage( channel="matrix", chat_id="!room:matrix.org", content="", media=[str(file_path)], ) ) assert client.upload_calls == [] assert len(client.room_send_calls) == 1 assert client.room_send_calls[0]["content"]["body"] == "[attachment: empty.txt - too large]" @pytest.mark.asyncio async def test_send_omits_ignore_unverified_devices_when_e2ee_disabled() -> None: channel = MatrixChannel(_make_config(e2ee_enabled=False), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client await channel.send( OutboundMessage(channel="matrix", chat_id="!room:matrix.org", content="Hi") ) assert len(client.room_send_calls) == 1 assert "ignore_unverified_devices" not in client.room_send_calls[0] @pytest.mark.asyncio async def test_send_stops_typing_keepalive_task() -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client channel._running = True await channel._start_typing_keepalive("!room:matrix.org") assert "!room:matrix.org" in channel._typing_tasks await channel.send( OutboundMessage(channel="matrix", chat_id="!room:matrix.org", content="Hi") ) assert "!room:matrix.org" not in channel._typing_tasks assert client.typing_calls[-1] == ("!room:matrix.org", False, TYPING_NOTICE_TIMEOUT_MS) @pytest.mark.asyncio async def test_send_progress_keeps_typing_keepalive_running() -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client channel._running = True await channel._start_typing_keepalive("!room:matrix.org") assert "!room:matrix.org" in channel._typing_tasks await channel.send( OutboundMessage( channel="matrix", chat_id="!room:matrix.org", content="working...", metadata={"_progress": True, "_progress_kind": "reasoning"}, ) ) assert "!room:matrix.org" in channel._typing_tasks assert client.typing_calls[-1] == ("!room:matrix.org", True, TYPING_NOTICE_TIMEOUT_MS) @pytest.mark.asyncio async def test_send_clears_typing_when_send_fails() -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) client.raise_on_send = True channel.client = client with pytest.raises(RuntimeError, match="send failed"): await channel.send( OutboundMessage(channel="matrix", chat_id="!room:matrix.org", content="Hi") ) assert client.typing_calls[-1] == ("!room:matrix.org", False, TYPING_NOTICE_TIMEOUT_MS) @pytest.mark.asyncio async def test_send_adds_formatted_body_for_markdown() -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client markdown_text = "# Headline\n\n- [x] done\n\n| A | B |\n| - | - |\n| 1 | 2 |" await channel.send( OutboundMessage(channel="matrix", chat_id="!room:matrix.org", content=markdown_text) ) content = client.room_send_calls[0]["content"] assert content["msgtype"] == "m.text" assert content["body"] == markdown_text assert content["m.mentions"] == {} assert content["format"] == MATRIX_HTML_FORMAT assert "<h1>Headline</h1>" in str(content["formatted_body"]) assert "<table>" in str(content["formatted_body"]) assert "<li>[x] done</li>" in str(content["formatted_body"]) @pytest.mark.asyncio async def test_send_adds_formatted_body_for_inline_url_superscript_subscript() -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client markdown_text = "Visit https://example.com and x^2^ plus H~2~O." await channel.send( OutboundMessage(channel="matrix", chat_id="!room:matrix.org", content=markdown_text) ) content = client.room_send_calls[0]["content"] assert content["msgtype"] == "m.text" assert content["body"] == markdown_text assert content["m.mentions"] == {} assert content["format"] == MATRIX_HTML_FORMAT assert '<a href="https://example.com" rel="noopener noreferrer">' in str( content["formatted_body"] ) assert "<sup>2</sup>" in str(content["formatted_body"]) assert "<sub>2</sub>" in str(content["formatted_body"]) @pytest.mark.asyncio async def test_send_sanitizes_disallowed_link_scheme() -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client markdown_text = "[click](javascript:alert(1))" await channel.send( OutboundMessage(channel="matrix", chat_id="!room:matrix.org", content=markdown_text) ) formatted_body = str(client.room_send_calls[0]["content"]["formatted_body"]) assert "javascript:" not in formatted_body assert "<a" in formatted_body assert "href=" not in formatted_body def test_matrix_html_cleaner_strips_event_handlers_and_script_tags() -> None: dirty_html = '<a href="https://example.com" onclick="evil()">x</a><script>alert(1)</script>' cleaned_html = matrix_module.MATRIX_HTML_CLEANER.clean(dirty_html) assert "<script" not in cleaned_html assert "onclick=" not in cleaned_html assert '<a href="https://example.com"' in cleaned_html @pytest.mark.asyncio async def test_send_keeps_only_mxc_image_sources() -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client markdown_text = "![ok](mxc://example.org/mediaid) ![no](https://example.com/a.png)" await channel.send( OutboundMessage(channel="matrix", chat_id="!room:matrix.org", content=markdown_text) ) formatted_body = str(client.room_send_calls[0]["content"]["formatted_body"]) assert 'src="mxc://example.org/mediaid"' in formatted_body assert 'src="https://example.com/a.png"' not in formatted_body @pytest.mark.asyncio async def test_send_falls_back_to_plaintext_when_markdown_render_fails(monkeypatch) -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client def _raise(text: str) -> str: raise RuntimeError("boom") monkeypatch.setattr(matrix_module, "MATRIX_MARKDOWN", _raise) markdown_text = "# Headline" await channel.send( OutboundMessage(channel="matrix", chat_id="!room:matrix.org", content=markdown_text) ) content = client.room_send_calls[0]["content"] assert content == {"msgtype": "m.text", "body": markdown_text, "m.mentions": {}} @pytest.mark.asyncio async def test_send_keeps_plaintext_only_for_plain_text() -> None: channel = MatrixChannel(_make_config(), MessageBus()) client = _FakeAsyncClient("", "", "", None) channel.client = client text = "just a normal sentence without markdown markers" await channel.send( OutboundMessage(channel="matrix", chat_id="!room:matrix.org", content=text) ) assert client.room_send_calls[0]["content"] == { "msgtype": "m.text", "body": text, "m.mentions": {}, }
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_matrix_channel.py", "license": "MIT License", "lines": 1006, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:tests/test_message_tool.py
import pytest from nanobot.agent.tools.message import MessageTool @pytest.mark.asyncio async def test_message_tool_returns_error_when_no_target_context() -> None: tool = MessageTool() result = await tool.execute(content="test") assert result == "Error: No target channel/chat specified"
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_message_tool.py", "license": "MIT License", "lines": 7, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:tests/test_message_tool_suppress.py
"""Test message tool suppress logic for final replies.""" from pathlib import Path from unittest.mock import AsyncMock, MagicMock import pytest from nanobot.agent.loop import AgentLoop from nanobot.agent.tools.message import MessageTool from nanobot.bus.events import InboundMessage, OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.providers.base import LLMResponse, ToolCallRequest def _make_loop(tmp_path: Path) -> AgentLoop: bus = MessageBus() provider = MagicMock() provider.get_default_model.return_value = "test-model" return AgentLoop(bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10) class TestMessageToolSuppressLogic: """Final reply suppressed only when message tool sends to the same target.""" @pytest.mark.asyncio async def test_suppress_when_sent_to_same_target(self, tmp_path: Path) -> None: loop = _make_loop(tmp_path) tool_call = ToolCallRequest( id="call1", name="message", arguments={"content": "Hello", "channel": "feishu", "chat_id": "chat123"}, ) calls = iter([ LLMResponse(content="", tool_calls=[tool_call]), LLMResponse(content="Done", tool_calls=[]), ]) loop.provider.chat = AsyncMock(side_effect=lambda *a, **kw: next(calls)) loop.tools.get_definitions = MagicMock(return_value=[]) sent: list[OutboundMessage] = [] mt = loop.tools.get("message") if isinstance(mt, MessageTool): mt.set_send_callback(AsyncMock(side_effect=lambda m: sent.append(m))) msg = InboundMessage(channel="feishu", sender_id="user1", chat_id="chat123", content="Send") result = await loop._process_message(msg) assert len(sent) == 1 assert result is None # suppressed @pytest.mark.asyncio async def test_not_suppress_when_sent_to_different_target(self, tmp_path: Path) -> None: loop = _make_loop(tmp_path) tool_call = ToolCallRequest( id="call1", name="message", arguments={"content": "Email content", "channel": "email", "chat_id": "user@example.com"}, ) calls = iter([ LLMResponse(content="", tool_calls=[tool_call]), LLMResponse(content="I've sent the email.", tool_calls=[]), ]) loop.provider.chat = AsyncMock(side_effect=lambda *a, **kw: next(calls)) loop.tools.get_definitions = MagicMock(return_value=[]) sent: list[OutboundMessage] = [] mt = loop.tools.get("message") if isinstance(mt, MessageTool): mt.set_send_callback(AsyncMock(side_effect=lambda m: sent.append(m))) msg = InboundMessage(channel="feishu", sender_id="user1", chat_id="chat123", content="Send email") result = await loop._process_message(msg) assert len(sent) == 1 assert sent[0].channel == "email" assert result is not None # not suppressed assert result.channel == "feishu" @pytest.mark.asyncio async def test_not_suppress_when_no_message_tool_used(self, tmp_path: Path) -> None: loop = _make_loop(tmp_path) loop.provider.chat = AsyncMock(return_value=LLMResponse(content="Hello!", tool_calls=[])) loop.tools.get_definitions = MagicMock(return_value=[]) msg = InboundMessage(channel="feishu", sender_id="user1", chat_id="chat123", content="Hi") result = await loop._process_message(msg) assert result is not None assert "Hello" in result.content class TestMessageToolTurnTracking: def test_sent_in_turn_tracks_same_target(self) -> None: tool = MessageTool() tool.set_context("feishu", "chat1") assert not tool._sent_in_turn tool._sent_in_turn = True assert tool._sent_in_turn def test_start_turn_resets(self) -> None: tool = MessageTool() tool._sent_in_turn = True tool.start_turn() assert not tool._sent_in_turn
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_message_tool_suppress.py", "license": "MIT License", "lines": 81, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:tests/test_task_cancel.py
"""Tests for /stop task cancellation.""" from __future__ import annotations import asyncio from unittest.mock import AsyncMock, MagicMock, patch import pytest def _make_loop(): """Create a minimal AgentLoop with mocked dependencies.""" from nanobot.agent.loop import AgentLoop from nanobot.bus.queue import MessageBus bus = MessageBus() provider = MagicMock() provider.get_default_model.return_value = "test-model" workspace = MagicMock() workspace.__truediv__ = MagicMock(return_value=MagicMock()) with patch("nanobot.agent.loop.ContextBuilder"), \ patch("nanobot.agent.loop.SessionManager"), \ patch("nanobot.agent.loop.SubagentManager") as MockSubMgr: MockSubMgr.return_value.cancel_by_session = AsyncMock(return_value=0) loop = AgentLoop(bus=bus, provider=provider, workspace=workspace) return loop, bus class TestHandleStop: @pytest.mark.asyncio async def test_stop_no_active_task(self): from nanobot.bus.events import InboundMessage loop, bus = _make_loop() msg = InboundMessage(channel="test", sender_id="u1", chat_id="c1", content="/stop") await loop._handle_stop(msg) out = await asyncio.wait_for(bus.consume_outbound(), timeout=1.0) assert "No active task" in out.content @pytest.mark.asyncio async def test_stop_cancels_active_task(self): from nanobot.bus.events import InboundMessage loop, bus = _make_loop() cancelled = asyncio.Event() async def slow_task(): try: await asyncio.sleep(60) except asyncio.CancelledError: cancelled.set() raise task = asyncio.create_task(slow_task()) await asyncio.sleep(0) loop._active_tasks["test:c1"] = [task] msg = InboundMessage(channel="test", sender_id="u1", chat_id="c1", content="/stop") await loop._handle_stop(msg) assert cancelled.is_set() out = await asyncio.wait_for(bus.consume_outbound(), timeout=1.0) assert "stopped" in out.content.lower() @pytest.mark.asyncio async def test_stop_cancels_multiple_tasks(self): from nanobot.bus.events import InboundMessage loop, bus = _make_loop() events = [asyncio.Event(), asyncio.Event()] async def slow(idx): try: await asyncio.sleep(60) except asyncio.CancelledError: events[idx].set() raise tasks = [asyncio.create_task(slow(i)) for i in range(2)] await asyncio.sleep(0) loop._active_tasks["test:c1"] = tasks msg = InboundMessage(channel="test", sender_id="u1", chat_id="c1", content="/stop") await loop._handle_stop(msg) assert all(e.is_set() for e in events) out = await asyncio.wait_for(bus.consume_outbound(), timeout=1.0) assert "2 task" in out.content class TestDispatch: @pytest.mark.asyncio async def test_dispatch_processes_and_publishes(self): from nanobot.bus.events import InboundMessage, OutboundMessage loop, bus = _make_loop() msg = InboundMessage(channel="test", sender_id="u1", chat_id="c1", content="hello") loop._process_message = AsyncMock( return_value=OutboundMessage(channel="test", chat_id="c1", content="hi") ) await loop._dispatch(msg) out = await asyncio.wait_for(bus.consume_outbound(), timeout=1.0) assert out.content == "hi" @pytest.mark.asyncio async def test_processing_lock_serializes(self): from nanobot.bus.events import InboundMessage, OutboundMessage loop, bus = _make_loop() order = [] async def mock_process(m, **kwargs): order.append(f"start-{m.content}") await asyncio.sleep(0.05) order.append(f"end-{m.content}") return OutboundMessage(channel="test", chat_id="c1", content=m.content) loop._process_message = mock_process msg1 = InboundMessage(channel="test", sender_id="u1", chat_id="c1", content="a") msg2 = InboundMessage(channel="test", sender_id="u1", chat_id="c1", content="b") t1 = asyncio.create_task(loop._dispatch(msg1)) t2 = asyncio.create_task(loop._dispatch(msg2)) await asyncio.gather(t1, t2) assert order == ["start-a", "end-a", "start-b", "end-b"] class TestSubagentCancellation: @pytest.mark.asyncio async def test_cancel_by_session(self): from nanobot.agent.subagent import SubagentManager from nanobot.bus.queue import MessageBus bus = MessageBus() provider = MagicMock() provider.get_default_model.return_value = "test-model" mgr = SubagentManager(provider=provider, workspace=MagicMock(), bus=bus) cancelled = asyncio.Event() async def slow(): try: await asyncio.sleep(60) except asyncio.CancelledError: cancelled.set() raise task = asyncio.create_task(slow()) await asyncio.sleep(0) mgr._running_tasks["sub-1"] = task mgr._session_tasks["test:c1"] = {"sub-1"} count = await mgr.cancel_by_session("test:c1") assert count == 1 assert cancelled.is_set() @pytest.mark.asyncio async def test_cancel_by_session_no_tasks(self): from nanobot.agent.subagent import SubagentManager from nanobot.bus.queue import MessageBus bus = MessageBus() provider = MagicMock() provider.get_default_model.return_value = "test-model" mgr = SubagentManager(provider=provider, workspace=MagicMock(), bus=bus) assert await mgr.cancel_by_session("nonexistent") == 0
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_task_cancel.py", "license": "MIT License", "lines": 128, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:tests/test_context_prompt_cache.py
"""Tests for cache-friendly prompt construction.""" from __future__ import annotations from datetime import datetime as real_datetime from pathlib import Path import datetime as datetime_module from nanobot.agent.context import ContextBuilder class _FakeDatetime(real_datetime): current = real_datetime(2026, 2, 24, 13, 59) @classmethod def now(cls, tz=None): # type: ignore[override] return cls.current def _make_workspace(tmp_path: Path) -> Path: workspace = tmp_path / "workspace" workspace.mkdir(parents=True) return workspace def test_system_prompt_stays_stable_when_clock_changes(tmp_path, monkeypatch) -> None: """System prompt should not change just because wall clock minute changes.""" monkeypatch.setattr(datetime_module, "datetime", _FakeDatetime) workspace = _make_workspace(tmp_path) builder = ContextBuilder(workspace) _FakeDatetime.current = real_datetime(2026, 2, 24, 13, 59) prompt1 = builder.build_system_prompt() _FakeDatetime.current = real_datetime(2026, 2, 24, 14, 0) prompt2 = builder.build_system_prompt() assert prompt1 == prompt2 def test_runtime_context_is_separate_untrusted_user_message(tmp_path) -> None: """Runtime metadata should be a separate user message before the actual user message.""" workspace = _make_workspace(tmp_path) builder = ContextBuilder(workspace) messages = builder.build_messages( history=[], current_message="Return exactly: OK", channel="cli", chat_id="direct", ) assert messages[0]["role"] == "system" assert "## Current Session" not in messages[0]["content"] assert messages[-2]["role"] == "user" runtime_content = messages[-2]["content"] assert isinstance(runtime_content, str) assert ContextBuilder._RUNTIME_CONTEXT_TAG in runtime_content assert "Current Time:" in runtime_content assert "Channel: cli" in runtime_content assert "Chat ID: direct" in runtime_content assert messages[-1]["role"] == "user" assert messages[-1]["content"] == "Return exactly: OK"
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_context_prompt_cache.py", "license": "MIT License", "lines": 46, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:tests/test_cron_commands.py
from typer.testing import CliRunner from nanobot.cli.commands import app runner = CliRunner() def test_cron_add_rejects_invalid_timezone(monkeypatch, tmp_path) -> None: monkeypatch.setattr("nanobot.config.loader.get_data_dir", lambda: tmp_path) result = runner.invoke( app, [ "cron", "add", "--name", "demo", "--message", "hello", "--cron", "0 9 * * *", "--tz", "America/Vancovuer", ], ) assert result.exit_code == 1 assert "Error: unknown timezone 'America/Vancovuer'" in result.stdout assert not (tmp_path / "cron" / "jobs.json").exists()
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_cron_commands.py", "license": "MIT License", "lines": 23, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:tests/test_cron_service.py
import asyncio import pytest from nanobot.cron.service import CronService from nanobot.cron.types import CronSchedule def test_add_job_rejects_unknown_timezone(tmp_path) -> None: service = CronService(tmp_path / "cron" / "jobs.json") with pytest.raises(ValueError, match="unknown timezone 'America/Vancovuer'"): service.add_job( name="tz typo", schedule=CronSchedule(kind="cron", expr="0 9 * * *", tz="America/Vancovuer"), message="hello", ) assert service.list_jobs(include_disabled=True) == [] def test_add_job_accepts_valid_timezone(tmp_path) -> None: service = CronService(tmp_path / "cron" / "jobs.json") job = service.add_job( name="tz ok", schedule=CronSchedule(kind="cron", expr="0 9 * * *", tz="America/Vancouver"), message="hello", ) assert job.schedule.tz == "America/Vancouver" assert job.state.next_run_at_ms is not None @pytest.mark.asyncio async def test_running_service_honors_external_disable(tmp_path) -> None: store_path = tmp_path / "cron" / "jobs.json" called: list[str] = [] async def on_job(job) -> None: called.append(job.id) service = CronService(store_path, on_job=on_job) job = service.add_job( name="external-disable", schedule=CronSchedule(kind="every", every_ms=200), message="hello", ) await service.start() try: external = CronService(store_path) updated = external.enable_job(job.id, enabled=False) assert updated is not None assert updated.enabled is False await asyncio.sleep(0.35) assert called == [] finally: service.stop()
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_cron_service.py", "license": "MIT License", "lines": 44, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:tests/test_heartbeat_service.py
import asyncio import pytest from nanobot.heartbeat.service import HeartbeatService from nanobot.providers.base import LLMResponse, ToolCallRequest class DummyProvider: def __init__(self, responses: list[LLMResponse]): self._responses = list(responses) async def chat(self, *args, **kwargs) -> LLMResponse: if self._responses: return self._responses.pop(0) return LLMResponse(content="", tool_calls=[]) @pytest.mark.asyncio async def test_start_is_idempotent(tmp_path) -> None: provider = DummyProvider([]) service = HeartbeatService( workspace=tmp_path, provider=provider, model="openai/gpt-4o-mini", interval_s=9999, enabled=True, ) await service.start() first_task = service._task await service.start() assert service._task is first_task service.stop() await asyncio.sleep(0) @pytest.mark.asyncio async def test_decide_returns_skip_when_no_tool_call(tmp_path) -> None: provider = DummyProvider([LLMResponse(content="no tool call", tool_calls=[])]) service = HeartbeatService( workspace=tmp_path, provider=provider, model="openai/gpt-4o-mini", ) action, tasks = await service._decide("heartbeat content") assert action == "skip" assert tasks == "" @pytest.mark.asyncio async def test_trigger_now_executes_when_decision_is_run(tmp_path) -> None: (tmp_path / "HEARTBEAT.md").write_text("- [ ] do thing", encoding="utf-8") provider = DummyProvider([ LLMResponse( content="", tool_calls=[ ToolCallRequest( id="hb_1", name="heartbeat", arguments={"action": "run", "tasks": "check open tasks"}, ) ], ) ]) called_with: list[str] = [] async def _on_execute(tasks: str) -> str: called_with.append(tasks) return "done" service = HeartbeatService( workspace=tmp_path, provider=provider, model="openai/gpt-4o-mini", on_execute=_on_execute, ) result = await service.trigger_now() assert result == "done" assert called_with == ["check open tasks"] @pytest.mark.asyncio async def test_trigger_now_returns_none_when_decision_is_skip(tmp_path) -> None: (tmp_path / "HEARTBEAT.md").write_text("- [ ] do thing", encoding="utf-8") provider = DummyProvider([ LLMResponse( content="", tool_calls=[ ToolCallRequest( id="hb_1", name="heartbeat", arguments={"action": "skip"}, ) ], ) ]) async def _on_execute(tasks: str) -> str: return tasks service = HeartbeatService( workspace=tmp_path, provider=provider, model="openai/gpt-4o-mini", on_execute=_on_execute, ) assert await service.trigger_now() is None
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_heartbeat_service.py", "license": "MIT License", "lines": 90, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:tests/test_memory_consolidation_types.py
"""Test MemoryStore.consolidate() handles non-string tool call arguments. Regression test for https://github.com/HKUDS/nanobot/issues/1042 When memory consolidation receives dict values instead of strings from the LLM tool call response, it should serialize them to JSON instead of raising TypeError. """ import json from pathlib import Path from unittest.mock import AsyncMock, MagicMock import pytest from nanobot.agent.memory import MemoryStore from nanobot.providers.base import LLMResponse, ToolCallRequest def _make_session(message_count: int = 30, memory_window: int = 50): """Create a mock session with messages.""" session = MagicMock() session.messages = [ {"role": "user", "content": f"msg{i}", "timestamp": "2026-01-01 00:00"} for i in range(message_count) ] session.last_consolidated = 0 return session def _make_tool_response(history_entry, memory_update): """Create an LLMResponse with a save_memory tool call.""" return LLMResponse( content=None, tool_calls=[ ToolCallRequest( id="call_1", name="save_memory", arguments={ "history_entry": history_entry, "memory_update": memory_update, }, ) ], ) class TestMemoryConsolidationTypeHandling: """Test that consolidation handles various argument types correctly.""" @pytest.mark.asyncio async def test_string_arguments_work(self, tmp_path: Path) -> None: """Normal case: LLM returns string arguments.""" store = MemoryStore(tmp_path) provider = AsyncMock() provider.chat = AsyncMock( return_value=_make_tool_response( history_entry="[2026-01-01] User discussed testing.", memory_update="# Memory\nUser likes testing.", ) ) session = _make_session(message_count=60) result = await store.consolidate(session, provider, "test-model", memory_window=50) assert result is True assert store.history_file.exists() assert "[2026-01-01] User discussed testing." in store.history_file.read_text() assert "User likes testing." in store.memory_file.read_text() @pytest.mark.asyncio async def test_dict_arguments_serialized_to_json(self, tmp_path: Path) -> None: """Issue #1042: LLM returns dict instead of string — must not raise TypeError.""" store = MemoryStore(tmp_path) provider = AsyncMock() provider.chat = AsyncMock( return_value=_make_tool_response( history_entry={"timestamp": "2026-01-01", "summary": "User discussed testing."}, memory_update={"facts": ["User likes testing"], "topics": ["testing"]}, ) ) session = _make_session(message_count=60) result = await store.consolidate(session, provider, "test-model", memory_window=50) assert result is True assert store.history_file.exists() history_content = store.history_file.read_text() parsed = json.loads(history_content.strip()) assert parsed["summary"] == "User discussed testing." memory_content = store.memory_file.read_text() parsed_mem = json.loads(memory_content) assert "User likes testing" in parsed_mem["facts"] @pytest.mark.asyncio async def test_string_arguments_as_raw_json(self, tmp_path: Path) -> None: """Some providers return arguments as a JSON string instead of parsed dict.""" store = MemoryStore(tmp_path) provider = AsyncMock() # Simulate arguments being a JSON string (not yet parsed) response = LLMResponse( content=None, tool_calls=[ ToolCallRequest( id="call_1", name="save_memory", arguments=json.dumps({ "history_entry": "[2026-01-01] User discussed testing.", "memory_update": "# Memory\nUser likes testing.", }), ) ], ) provider.chat = AsyncMock(return_value=response) session = _make_session(message_count=60) result = await store.consolidate(session, provider, "test-model", memory_window=50) assert result is True assert "User discussed testing." in store.history_file.read_text() @pytest.mark.asyncio async def test_no_tool_call_returns_false(self, tmp_path: Path) -> None: """When LLM doesn't use the save_memory tool, return False.""" store = MemoryStore(tmp_path) provider = AsyncMock() provider.chat = AsyncMock( return_value=LLMResponse(content="I summarized the conversation.", tool_calls=[]) ) session = _make_session(message_count=60) result = await store.consolidate(session, provider, "test-model", memory_window=50) assert result is False assert not store.history_file.exists() @pytest.mark.asyncio async def test_skips_when_few_messages(self, tmp_path: Path) -> None: """Consolidation should be a no-op when messages < keep_count.""" store = MemoryStore(tmp_path) provider = AsyncMock() session = _make_session(message_count=10) result = await store.consolidate(session, provider, "test-model", memory_window=50) assert result is True provider.chat.assert_not_called()
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_memory_consolidation_types.py", "license": "MIT License", "lines": 120, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:nanobot/providers/custom_provider.py
"""Direct OpenAI-compatible provider — bypasses LiteLLM.""" from __future__ import annotations from typing import Any import json_repair from openai import AsyncOpenAI from nanobot.providers.base import LLMProvider, LLMResponse, ToolCallRequest class CustomProvider(LLMProvider): def __init__(self, api_key: str = "no-key", api_base: str = "http://localhost:8000/v1", default_model: str = "default"): super().__init__(api_key, api_base) self.default_model = default_model self._client = AsyncOpenAI(api_key=api_key, base_url=api_base) async def chat(self, messages: list[dict[str, Any]], tools: list[dict[str, Any]] | None = None, model: str | None = None, max_tokens: int = 4096, temperature: float = 0.7, reasoning_effort: str | None = None) -> LLMResponse: kwargs: dict[str, Any] = { "model": model or self.default_model, "messages": self._sanitize_empty_content(messages), "max_tokens": max(1, max_tokens), "temperature": temperature, } if reasoning_effort: kwargs["reasoning_effort"] = reasoning_effort if tools: kwargs.update(tools=tools, tool_choice="auto") try: return self._parse(await self._client.chat.completions.create(**kwargs)) except Exception as e: return LLMResponse(content=f"Error: {e}", finish_reason="error") def _parse(self, response: Any) -> LLMResponse: choice = response.choices[0] msg = choice.message tool_calls = [ ToolCallRequest(id=tc.id, name=tc.function.name, arguments=json_repair.loads(tc.function.arguments) if isinstance(tc.function.arguments, str) else tc.function.arguments) for tc in (msg.tool_calls or []) ] u = response.usage return LLMResponse( content=msg.content, tool_calls=tool_calls, finish_reason=choice.finish_reason or "stop", usage={"prompt_tokens": u.prompt_tokens, "completion_tokens": u.completion_tokens, "total_tokens": u.total_tokens} if u else {}, reasoning_content=getattr(msg, "reasoning_content", None) or None, ) def get_default_model(self) -> str: return self.default_model
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/providers/custom_provider.py", "license": "MIT License", "lines": 44, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
HKUDS/nanobot:nanobot/providers/openai_codex_provider.py
"""OpenAI Codex Responses Provider.""" from __future__ import annotations import asyncio import hashlib import json from typing import Any, AsyncGenerator import httpx from loguru import logger from oauth_cli_kit import get_token as get_codex_token from nanobot.providers.base import LLMProvider, LLMResponse, ToolCallRequest DEFAULT_CODEX_URL = "https://chatgpt.com/backend-api/codex/responses" DEFAULT_ORIGINATOR = "nanobot" class OpenAICodexProvider(LLMProvider): """Use Codex OAuth to call the Responses API.""" def __init__(self, default_model: str = "openai-codex/gpt-5.1-codex"): super().__init__(api_key=None, api_base=None) self.default_model = default_model async def chat( self, messages: list[dict[str, Any]], tools: list[dict[str, Any]] | None = None, model: str | None = None, max_tokens: int = 4096, temperature: float = 0.7, reasoning_effort: str | None = None, ) -> LLMResponse: model = model or self.default_model system_prompt, input_items = _convert_messages(messages) token = await asyncio.to_thread(get_codex_token) headers = _build_headers(token.account_id, token.access) body: dict[str, Any] = { "model": _strip_model_prefix(model), "store": False, "stream": True, "instructions": system_prompt, "input": input_items, "text": {"verbosity": "medium"}, "include": ["reasoning.encrypted_content"], "prompt_cache_key": _prompt_cache_key(messages), "tool_choice": "auto", "parallel_tool_calls": True, } if tools: body["tools"] = _convert_tools(tools) url = DEFAULT_CODEX_URL try: try: content, tool_calls, finish_reason = await _request_codex(url, headers, body, verify=True) except Exception as e: if "CERTIFICATE_VERIFY_FAILED" not in str(e): raise logger.warning("SSL certificate verification failed for Codex API; retrying with verify=False") content, tool_calls, finish_reason = await _request_codex(url, headers, body, verify=False) return LLMResponse( content=content, tool_calls=tool_calls, finish_reason=finish_reason, ) except Exception as e: return LLMResponse( content=f"Error calling Codex: {str(e)}", finish_reason="error", ) def get_default_model(self) -> str: return self.default_model def _strip_model_prefix(model: str) -> str: if model.startswith("openai-codex/") or model.startswith("openai_codex/"): return model.split("/", 1)[1] return model def _build_headers(account_id: str, token: str) -> dict[str, str]: return { "Authorization": f"Bearer {token}", "chatgpt-account-id": account_id, "OpenAI-Beta": "responses=experimental", "originator": DEFAULT_ORIGINATOR, "User-Agent": "nanobot (python)", "accept": "text/event-stream", "content-type": "application/json", } async def _request_codex( url: str, headers: dict[str, str], body: dict[str, Any], verify: bool, ) -> tuple[str, list[ToolCallRequest], str]: async with httpx.AsyncClient(timeout=60.0, verify=verify) as client: async with client.stream("POST", url, headers=headers, json=body) as response: if response.status_code != 200: text = await response.aread() raise RuntimeError(_friendly_error(response.status_code, text.decode("utf-8", "ignore"))) return await _consume_sse(response) def _convert_tools(tools: list[dict[str, Any]]) -> list[dict[str, Any]]: """Convert OpenAI function-calling schema to Codex flat format.""" converted: list[dict[str, Any]] = [] for tool in tools: fn = (tool.get("function") or {}) if tool.get("type") == "function" else tool name = fn.get("name") if not name: continue params = fn.get("parameters") or {} converted.append({ "type": "function", "name": name, "description": fn.get("description") or "", "parameters": params if isinstance(params, dict) else {}, }) return converted def _convert_messages(messages: list[dict[str, Any]]) -> tuple[str, list[dict[str, Any]]]: system_prompt = "" input_items: list[dict[str, Any]] = [] for idx, msg in enumerate(messages): role = msg.get("role") content = msg.get("content") if role == "system": system_prompt = content if isinstance(content, str) else "" continue if role == "user": input_items.append(_convert_user_message(content)) continue if role == "assistant": # Handle text first. if isinstance(content, str) and content: input_items.append( { "type": "message", "role": "assistant", "content": [{"type": "output_text", "text": content}], "status": "completed", "id": f"msg_{idx}", } ) # Then handle tool calls. for tool_call in msg.get("tool_calls", []) or []: fn = tool_call.get("function") or {} call_id, item_id = _split_tool_call_id(tool_call.get("id")) call_id = call_id or f"call_{idx}" item_id = item_id or f"fc_{idx}" input_items.append( { "type": "function_call", "id": item_id, "call_id": call_id, "name": fn.get("name"), "arguments": fn.get("arguments") or "{}", } ) continue if role == "tool": call_id, _ = _split_tool_call_id(msg.get("tool_call_id")) output_text = content if isinstance(content, str) else json.dumps(content, ensure_ascii=False) input_items.append( { "type": "function_call_output", "call_id": call_id, "output": output_text, } ) continue return system_prompt, input_items def _convert_user_message(content: Any) -> dict[str, Any]: if isinstance(content, str): return {"role": "user", "content": [{"type": "input_text", "text": content}]} if isinstance(content, list): converted: list[dict[str, Any]] = [] for item in content: if not isinstance(item, dict): continue if item.get("type") == "text": converted.append({"type": "input_text", "text": item.get("text", "")}) elif item.get("type") == "image_url": url = (item.get("image_url") or {}).get("url") if url: converted.append({"type": "input_image", "image_url": url, "detail": "auto"}) if converted: return {"role": "user", "content": converted} return {"role": "user", "content": [{"type": "input_text", "text": ""}]} def _split_tool_call_id(tool_call_id: Any) -> tuple[str, str | None]: if isinstance(tool_call_id, str) and tool_call_id: if "|" in tool_call_id: call_id, item_id = tool_call_id.split("|", 1) return call_id, item_id or None return tool_call_id, None return "call_0", None def _prompt_cache_key(messages: list[dict[str, Any]]) -> str: raw = json.dumps(messages, ensure_ascii=True, sort_keys=True) return hashlib.sha256(raw.encode("utf-8")).hexdigest() async def _iter_sse(response: httpx.Response) -> AsyncGenerator[dict[str, Any], None]: buffer: list[str] = [] async for line in response.aiter_lines(): if line == "": if buffer: data_lines = [l[5:].strip() for l in buffer if l.startswith("data:")] buffer = [] if not data_lines: continue data = "\n".join(data_lines).strip() if not data or data == "[DONE]": continue try: yield json.loads(data) except Exception: continue continue buffer.append(line) async def _consume_sse(response: httpx.Response) -> tuple[str, list[ToolCallRequest], str]: content = "" tool_calls: list[ToolCallRequest] = [] tool_call_buffers: dict[str, dict[str, Any]] = {} finish_reason = "stop" async for event in _iter_sse(response): event_type = event.get("type") if event_type == "response.output_item.added": item = event.get("item") or {} if item.get("type") == "function_call": call_id = item.get("call_id") if not call_id: continue tool_call_buffers[call_id] = { "id": item.get("id") or "fc_0", "name": item.get("name"), "arguments": item.get("arguments") or "", } elif event_type == "response.output_text.delta": content += event.get("delta") or "" elif event_type == "response.function_call_arguments.delta": call_id = event.get("call_id") if call_id and call_id in tool_call_buffers: tool_call_buffers[call_id]["arguments"] += event.get("delta") or "" elif event_type == "response.function_call_arguments.done": call_id = event.get("call_id") if call_id and call_id in tool_call_buffers: tool_call_buffers[call_id]["arguments"] = event.get("arguments") or "" elif event_type == "response.output_item.done": item = event.get("item") or {} if item.get("type") == "function_call": call_id = item.get("call_id") if not call_id: continue buf = tool_call_buffers.get(call_id) or {} args_raw = buf.get("arguments") or item.get("arguments") or "{}" try: args = json.loads(args_raw) except Exception: args = {"raw": args_raw} tool_calls.append( ToolCallRequest( id=f"{call_id}|{buf.get('id') or item.get('id') or 'fc_0'}", name=buf.get("name") or item.get("name"), arguments=args, ) ) elif event_type == "response.completed": status = (event.get("response") or {}).get("status") finish_reason = _map_finish_reason(status) elif event_type in {"error", "response.failed"}: raise RuntimeError("Codex response failed") return content, tool_calls, finish_reason _FINISH_REASON_MAP = {"completed": "stop", "incomplete": "length", "failed": "error", "cancelled": "error"} def _map_finish_reason(status: str | None) -> str: return _FINISH_REASON_MAP.get(status or "completed", "stop") def _friendly_error(status_code: int, raw: str) -> str: if status_code == 429: return "ChatGPT usage quota exceeded or rate limit triggered. Please try again later." return f"HTTP {status_code}: {raw}"
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/providers/openai_codex_provider.py", "license": "MIT License", "lines": 264, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/agent/tools/mcp.py
"""MCP client: connects to MCP servers and wraps their tools as native nanobot tools.""" import asyncio from contextlib import AsyncExitStack from typing import Any import httpx from loguru import logger from nanobot.agent.tools.base import Tool from nanobot.agent.tools.registry import ToolRegistry class MCPToolWrapper(Tool): """Wraps a single MCP server tool as a nanobot Tool.""" def __init__(self, session, server_name: str, tool_def, tool_timeout: int = 30): self._session = session self._original_name = tool_def.name self._name = f"mcp_{server_name}_{tool_def.name}" self._description = tool_def.description or tool_def.name self._parameters = tool_def.inputSchema or {"type": "object", "properties": {}} self._tool_timeout = tool_timeout @property def name(self) -> str: return self._name @property def description(self) -> str: return self._description @property def parameters(self) -> dict[str, Any]: return self._parameters async def execute(self, **kwargs: Any) -> str: from mcp import types try: result = await asyncio.wait_for( self._session.call_tool(self._original_name, arguments=kwargs), timeout=self._tool_timeout, ) except asyncio.TimeoutError: logger.warning("MCP tool '{}' timed out after {}s", self._name, self._tool_timeout) return f"(MCP tool call timed out after {self._tool_timeout}s)" parts = [] for block in result.content: if isinstance(block, types.TextContent): parts.append(block.text) else: parts.append(str(block)) return "\n".join(parts) or "(no output)" async def connect_mcp_servers( mcp_servers: dict, registry: ToolRegistry, stack: AsyncExitStack ) -> None: """Connect to configured MCP servers and register their tools.""" from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client for name, cfg in mcp_servers.items(): try: if cfg.command: params = StdioServerParameters( command=cfg.command, args=cfg.args, env=cfg.env or None ) read, write = await stack.enter_async_context(stdio_client(params)) elif cfg.url: from mcp.client.streamable_http import streamable_http_client # Always provide an explicit httpx client so MCP HTTP transport does not # inherit httpx's default 5s timeout and preempt the higher-level tool timeout. http_client = await stack.enter_async_context( httpx.AsyncClient( headers=cfg.headers or None, follow_redirects=True, timeout=None, ) ) read, write, _ = await stack.enter_async_context( streamable_http_client(cfg.url, http_client=http_client) ) else: logger.warning("MCP server '{}': no command or url configured, skipping", name) continue session = await stack.enter_async_context(ClientSession(read, write)) await session.initialize() tools = await session.list_tools() for tool_def in tools.tools: wrapper = MCPToolWrapper(session, name, tool_def, tool_timeout=cfg.tool_timeout) registry.register(wrapper) logger.debug("MCP: registered tool '{}' from server '{}'", wrapper.name, name) logger.info("MCP server '{}': connected, {} tools registered", name, len(tools.tools)) except Exception as e: logger.error("MCP server '{}': failed to connect: {}", name, e)
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/agent/tools/mcp.py", "license": "MIT License", "lines": 83, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
HKUDS/nanobot:nanobot/channels/mochat.py
"""Mochat channel implementation using Socket.IO with HTTP polling fallback.""" from __future__ import annotations import asyncio import json from collections import deque from dataclasses import dataclass, field from datetime import datetime from typing import Any import httpx from loguru import logger from nanobot.bus.events import OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.channels.base import BaseChannel from nanobot.config.schema import MochatConfig from nanobot.utils.helpers import get_data_path try: import socketio SOCKETIO_AVAILABLE = True except ImportError: socketio = None SOCKETIO_AVAILABLE = False try: import msgpack # noqa: F401 MSGPACK_AVAILABLE = True except ImportError: MSGPACK_AVAILABLE = False MAX_SEEN_MESSAGE_IDS = 2000 CURSOR_SAVE_DEBOUNCE_S = 0.5 # --------------------------------------------------------------------------- # Data classes # --------------------------------------------------------------------------- @dataclass class MochatBufferedEntry: """Buffered inbound entry for delayed dispatch.""" raw_body: str author: str sender_name: str = "" sender_username: str = "" timestamp: int | None = None message_id: str = "" group_id: str = "" @dataclass class DelayState: """Per-target delayed message state.""" entries: list[MochatBufferedEntry] = field(default_factory=list) lock: asyncio.Lock = field(default_factory=asyncio.Lock) timer: asyncio.Task | None = None @dataclass class MochatTarget: """Outbound target resolution result.""" id: str is_panel: bool # --------------------------------------------------------------------------- # Pure helpers # --------------------------------------------------------------------------- def _safe_dict(value: Any) -> dict: """Return *value* if it's a dict, else empty dict.""" return value if isinstance(value, dict) else {} def _str_field(src: dict, *keys: str) -> str: """Return the first non-empty str value found for *keys*, stripped.""" for k in keys: v = src.get(k) if isinstance(v, str) and v.strip(): return v.strip() return "" def _make_synthetic_event( message_id: str, author: str, content: Any, meta: Any, group_id: str, converse_id: str, timestamp: Any = None, *, author_info: Any = None, ) -> dict[str, Any]: """Build a synthetic ``message.add`` event dict.""" payload: dict[str, Any] = { "messageId": message_id, "author": author, "content": content, "meta": _safe_dict(meta), "groupId": group_id, "converseId": converse_id, } if author_info is not None: payload["authorInfo"] = _safe_dict(author_info) return { "type": "message.add", "timestamp": timestamp or datetime.utcnow().isoformat(), "payload": payload, } def normalize_mochat_content(content: Any) -> str: """Normalize content payload to text.""" if isinstance(content, str): return content.strip() if content is None: return "" try: return json.dumps(content, ensure_ascii=False) except TypeError: return str(content) def resolve_mochat_target(raw: str) -> MochatTarget: """Resolve id and target kind from user-provided target string.""" trimmed = (raw or "").strip() if not trimmed: return MochatTarget(id="", is_panel=False) lowered = trimmed.lower() cleaned, forced_panel = trimmed, False for prefix in ("mochat:", "group:", "channel:", "panel:"): if lowered.startswith(prefix): cleaned = trimmed[len(prefix):].strip() forced_panel = prefix in {"group:", "channel:", "panel:"} break if not cleaned: return MochatTarget(id="", is_panel=False) return MochatTarget(id=cleaned, is_panel=forced_panel or not cleaned.startswith("session_")) def extract_mention_ids(value: Any) -> list[str]: """Extract mention ids from heterogeneous mention payload.""" if not isinstance(value, list): return [] ids: list[str] = [] for item in value: if isinstance(item, str): if item.strip(): ids.append(item.strip()) elif isinstance(item, dict): for key in ("id", "userId", "_id"): candidate = item.get(key) if isinstance(candidate, str) and candidate.strip(): ids.append(candidate.strip()) break return ids def resolve_was_mentioned(payload: dict[str, Any], agent_user_id: str) -> bool: """Resolve mention state from payload metadata and text fallback.""" meta = payload.get("meta") if isinstance(meta, dict): if meta.get("mentioned") is True or meta.get("wasMentioned") is True: return True for f in ("mentions", "mentionIds", "mentionedUserIds", "mentionedUsers"): if agent_user_id and agent_user_id in extract_mention_ids(meta.get(f)): return True if not agent_user_id: return False content = payload.get("content") if not isinstance(content, str) or not content: return False return f"<@{agent_user_id}>" in content or f"@{agent_user_id}" in content def resolve_require_mention(config: MochatConfig, session_id: str, group_id: str) -> bool: """Resolve mention requirement for group/panel conversations.""" groups = config.groups or {} for key in (group_id, session_id, "*"): if key and key in groups: return bool(groups[key].require_mention) return bool(config.mention.require_in_groups) def build_buffered_body(entries: list[MochatBufferedEntry], is_group: bool) -> str: """Build text body from one or more buffered entries.""" if not entries: return "" if len(entries) == 1: return entries[0].raw_body lines: list[str] = [] for entry in entries: if not entry.raw_body: continue if is_group: label = entry.sender_name.strip() or entry.sender_username.strip() or entry.author if label: lines.append(f"{label}: {entry.raw_body}") continue lines.append(entry.raw_body) return "\n".join(lines).strip() def parse_timestamp(value: Any) -> int | None: """Parse event timestamp to epoch milliseconds.""" if not isinstance(value, str) or not value.strip(): return None try: return int(datetime.fromisoformat(value.replace("Z", "+00:00")).timestamp() * 1000) except ValueError: return None # --------------------------------------------------------------------------- # Channel # --------------------------------------------------------------------------- class MochatChannel(BaseChannel): """Mochat channel using socket.io with fallback polling workers.""" name = "mochat" def __init__(self, config: MochatConfig, bus: MessageBus): super().__init__(config, bus) self.config: MochatConfig = config self._http: httpx.AsyncClient | None = None self._socket: Any = None self._ws_connected = self._ws_ready = False self._state_dir = get_data_path() / "mochat" self._cursor_path = self._state_dir / "session_cursors.json" self._session_cursor: dict[str, int] = {} self._cursor_save_task: asyncio.Task | None = None self._session_set: set[str] = set() self._panel_set: set[str] = set() self._auto_discover_sessions = self._auto_discover_panels = False self._cold_sessions: set[str] = set() self._session_by_converse: dict[str, str] = {} self._seen_set: dict[str, set[str]] = {} self._seen_queue: dict[str, deque[str]] = {} self._delay_states: dict[str, DelayState] = {} self._fallback_mode = False self._session_fallback_tasks: dict[str, asyncio.Task] = {} self._panel_fallback_tasks: dict[str, asyncio.Task] = {} self._refresh_task: asyncio.Task | None = None self._target_locks: dict[str, asyncio.Lock] = {} # ---- lifecycle --------------------------------------------------------- async def start(self) -> None: """Start Mochat channel workers and websocket connection.""" if not self.config.claw_token: logger.error("Mochat claw_token not configured") return self._running = True self._http = httpx.AsyncClient(timeout=30.0) self._state_dir.mkdir(parents=True, exist_ok=True) await self._load_session_cursors() self._seed_targets_from_config() await self._refresh_targets(subscribe_new=False) if not await self._start_socket_client(): await self._ensure_fallback_workers() self._refresh_task = asyncio.create_task(self._refresh_loop()) while self._running: await asyncio.sleep(1) async def stop(self) -> None: """Stop all workers and clean up resources.""" self._running = False if self._refresh_task: self._refresh_task.cancel() self._refresh_task = None await self._stop_fallback_workers() await self._cancel_delay_timers() if self._socket: try: await self._socket.disconnect() except Exception: pass self._socket = None if self._cursor_save_task: self._cursor_save_task.cancel() self._cursor_save_task = None await self._save_session_cursors() if self._http: await self._http.aclose() self._http = None self._ws_connected = self._ws_ready = False async def send(self, msg: OutboundMessage) -> None: """Send outbound message to session or panel.""" if not self.config.claw_token: logger.warning("Mochat claw_token missing, skip send") return parts = ([msg.content.strip()] if msg.content and msg.content.strip() else []) if msg.media: parts.extend(m for m in msg.media if isinstance(m, str) and m.strip()) content = "\n".join(parts).strip() if not content: return target = resolve_mochat_target(msg.chat_id) if not target.id: logger.warning("Mochat outbound target is empty") return is_panel = (target.is_panel or target.id in self._panel_set) and not target.id.startswith("session_") try: if is_panel: await self._api_send("/api/claw/groups/panels/send", "panelId", target.id, content, msg.reply_to, self._read_group_id(msg.metadata)) else: await self._api_send("/api/claw/sessions/send", "sessionId", target.id, content, msg.reply_to) except Exception as e: logger.error("Failed to send Mochat message: {}", e) # ---- config / init helpers --------------------------------------------- def _seed_targets_from_config(self) -> None: sessions, self._auto_discover_sessions = self._normalize_id_list(self.config.sessions) panels, self._auto_discover_panels = self._normalize_id_list(self.config.panels) self._session_set.update(sessions) self._panel_set.update(panels) for sid in sessions: if sid not in self._session_cursor: self._cold_sessions.add(sid) @staticmethod def _normalize_id_list(values: list[str]) -> tuple[list[str], bool]: cleaned = [str(v).strip() for v in values if str(v).strip()] return sorted({v for v in cleaned if v != "*"}), "*" in cleaned # ---- websocket --------------------------------------------------------- async def _start_socket_client(self) -> bool: if not SOCKETIO_AVAILABLE: logger.warning("python-socketio not installed, Mochat using polling fallback") return False serializer = "default" if not self.config.socket_disable_msgpack: if MSGPACK_AVAILABLE: serializer = "msgpack" else: logger.warning("msgpack not installed but socket_disable_msgpack=false; using JSON") client = socketio.AsyncClient( reconnection=True, reconnection_attempts=self.config.max_retry_attempts or None, reconnection_delay=max(0.1, self.config.socket_reconnect_delay_ms / 1000.0), reconnection_delay_max=max(0.1, self.config.socket_max_reconnect_delay_ms / 1000.0), logger=False, engineio_logger=False, serializer=serializer, ) @client.event async def connect() -> None: self._ws_connected, self._ws_ready = True, False logger.info("Mochat websocket connected") subscribed = await self._subscribe_all() self._ws_ready = subscribed await (self._stop_fallback_workers() if subscribed else self._ensure_fallback_workers()) @client.event async def disconnect() -> None: if not self._running: return self._ws_connected = self._ws_ready = False logger.warning("Mochat websocket disconnected") await self._ensure_fallback_workers() @client.event async def connect_error(data: Any) -> None: logger.error("Mochat websocket connect error: {}", data) @client.on("claw.session.events") async def on_session_events(payload: dict[str, Any]) -> None: await self._handle_watch_payload(payload, "session") @client.on("claw.panel.events") async def on_panel_events(payload: dict[str, Any]) -> None: await self._handle_watch_payload(payload, "panel") for ev in ("notify:chat.inbox.append", "notify:chat.message.add", "notify:chat.message.update", "notify:chat.message.recall", "notify:chat.message.delete"): client.on(ev, self._build_notify_handler(ev)) socket_url = (self.config.socket_url or self.config.base_url).strip().rstrip("/") socket_path = (self.config.socket_path or "/socket.io").strip().lstrip("/") try: self._socket = client await client.connect( socket_url, transports=["websocket"], socketio_path=socket_path, auth={"token": self.config.claw_token}, wait_timeout=max(1.0, self.config.socket_connect_timeout_ms / 1000.0), ) return True except Exception as e: logger.error("Failed to connect Mochat websocket: {}", e) try: await client.disconnect() except Exception: pass self._socket = None return False def _build_notify_handler(self, event_name: str): async def handler(payload: Any) -> None: if event_name == "notify:chat.inbox.append": await self._handle_notify_inbox_append(payload) elif event_name.startswith("notify:chat.message."): await self._handle_notify_chat_message(payload) return handler # ---- subscribe --------------------------------------------------------- async def _subscribe_all(self) -> bool: ok = await self._subscribe_sessions(sorted(self._session_set)) ok = await self._subscribe_panels(sorted(self._panel_set)) and ok if self._auto_discover_sessions or self._auto_discover_panels: await self._refresh_targets(subscribe_new=True) return ok async def _subscribe_sessions(self, session_ids: list[str]) -> bool: if not session_ids: return True for sid in session_ids: if sid not in self._session_cursor: self._cold_sessions.add(sid) ack = await self._socket_call("com.claw.im.subscribeSessions", { "sessionIds": session_ids, "cursors": self._session_cursor, "limit": self.config.watch_limit, }) if not ack.get("result"): logger.error("Mochat subscribeSessions failed: {}", ack.get('message', 'unknown error')) return False data = ack.get("data") items: list[dict[str, Any]] = [] if isinstance(data, list): items = [i for i in data if isinstance(i, dict)] elif isinstance(data, dict): sessions = data.get("sessions") if isinstance(sessions, list): items = [i for i in sessions if isinstance(i, dict)] elif "sessionId" in data: items = [data] for p in items: await self._handle_watch_payload(p, "session") return True async def _subscribe_panels(self, panel_ids: list[str]) -> bool: if not self._auto_discover_panels and not panel_ids: return True ack = await self._socket_call("com.claw.im.subscribePanels", {"panelIds": panel_ids}) if not ack.get("result"): logger.error("Mochat subscribePanels failed: {}", ack.get('message', 'unknown error')) return False return True async def _socket_call(self, event_name: str, payload: dict[str, Any]) -> dict[str, Any]: if not self._socket: return {"result": False, "message": "socket not connected"} try: raw = await self._socket.call(event_name, payload, timeout=10) except Exception as e: return {"result": False, "message": str(e)} return raw if isinstance(raw, dict) else {"result": True, "data": raw} # ---- refresh / discovery ----------------------------------------------- async def _refresh_loop(self) -> None: interval_s = max(1.0, self.config.refresh_interval_ms / 1000.0) while self._running: await asyncio.sleep(interval_s) try: await self._refresh_targets(subscribe_new=self._ws_ready) except Exception as e: logger.warning("Mochat refresh failed: {}", e) if self._fallback_mode: await self._ensure_fallback_workers() async def _refresh_targets(self, subscribe_new: bool) -> None: if self._auto_discover_sessions: await self._refresh_sessions_directory(subscribe_new) if self._auto_discover_panels: await self._refresh_panels(subscribe_new) async def _refresh_sessions_directory(self, subscribe_new: bool) -> None: try: response = await self._post_json("/api/claw/sessions/list", {}) except Exception as e: logger.warning("Mochat listSessions failed: {}", e) return sessions = response.get("sessions") if not isinstance(sessions, list): return new_ids: list[str] = [] for s in sessions: if not isinstance(s, dict): continue sid = _str_field(s, "sessionId") if not sid: continue if sid not in self._session_set: self._session_set.add(sid) new_ids.append(sid) if sid not in self._session_cursor: self._cold_sessions.add(sid) cid = _str_field(s, "converseId") if cid: self._session_by_converse[cid] = sid if not new_ids: return if self._ws_ready and subscribe_new: await self._subscribe_sessions(new_ids) if self._fallback_mode: await self._ensure_fallback_workers() async def _refresh_panels(self, subscribe_new: bool) -> None: try: response = await self._post_json("/api/claw/groups/get", {}) except Exception as e: logger.warning("Mochat getWorkspaceGroup failed: {}", e) return raw_panels = response.get("panels") if not isinstance(raw_panels, list): return new_ids: list[str] = [] for p in raw_panels: if not isinstance(p, dict): continue pt = p.get("type") if isinstance(pt, int) and pt != 0: continue pid = _str_field(p, "id", "_id") if pid and pid not in self._panel_set: self._panel_set.add(pid) new_ids.append(pid) if not new_ids: return if self._ws_ready and subscribe_new: await self._subscribe_panels(new_ids) if self._fallback_mode: await self._ensure_fallback_workers() # ---- fallback workers -------------------------------------------------- async def _ensure_fallback_workers(self) -> None: if not self._running: return self._fallback_mode = True for sid in sorted(self._session_set): t = self._session_fallback_tasks.get(sid) if not t or t.done(): self._session_fallback_tasks[sid] = asyncio.create_task(self._session_watch_worker(sid)) for pid in sorted(self._panel_set): t = self._panel_fallback_tasks.get(pid) if not t or t.done(): self._panel_fallback_tasks[pid] = asyncio.create_task(self._panel_poll_worker(pid)) async def _stop_fallback_workers(self) -> None: self._fallback_mode = False tasks = [*self._session_fallback_tasks.values(), *self._panel_fallback_tasks.values()] for t in tasks: t.cancel() if tasks: await asyncio.gather(*tasks, return_exceptions=True) self._session_fallback_tasks.clear() self._panel_fallback_tasks.clear() async def _session_watch_worker(self, session_id: str) -> None: while self._running and self._fallback_mode: try: payload = await self._post_json("/api/claw/sessions/watch", { "sessionId": session_id, "cursor": self._session_cursor.get(session_id, 0), "timeoutMs": self.config.watch_timeout_ms, "limit": self.config.watch_limit, }) await self._handle_watch_payload(payload, "session") except asyncio.CancelledError: break except Exception as e: logger.warning("Mochat watch fallback error ({}): {}", session_id, e) await asyncio.sleep(max(0.1, self.config.retry_delay_ms / 1000.0)) async def _panel_poll_worker(self, panel_id: str) -> None: sleep_s = max(1.0, self.config.refresh_interval_ms / 1000.0) while self._running and self._fallback_mode: try: resp = await self._post_json("/api/claw/groups/panels/messages", { "panelId": panel_id, "limit": min(100, max(1, self.config.watch_limit)), }) msgs = resp.get("messages") if isinstance(msgs, list): for m in reversed(msgs): if not isinstance(m, dict): continue evt = _make_synthetic_event( message_id=str(m.get("messageId") or ""), author=str(m.get("author") or ""), content=m.get("content"), meta=m.get("meta"), group_id=str(resp.get("groupId") or ""), converse_id=panel_id, timestamp=m.get("createdAt"), author_info=m.get("authorInfo"), ) await self._process_inbound_event(panel_id, evt, "panel") except asyncio.CancelledError: break except Exception as e: logger.warning("Mochat panel polling error ({}): {}", panel_id, e) await asyncio.sleep(sleep_s) # ---- inbound event processing ------------------------------------------ async def _handle_watch_payload(self, payload: dict[str, Any], target_kind: str) -> None: if not isinstance(payload, dict): return target_id = _str_field(payload, "sessionId") if not target_id: return lock = self._target_locks.setdefault(f"{target_kind}:{target_id}", asyncio.Lock()) async with lock: prev = self._session_cursor.get(target_id, 0) if target_kind == "session" else 0 pc = payload.get("cursor") if target_kind == "session" and isinstance(pc, int) and pc >= 0: self._mark_session_cursor(target_id, pc) raw_events = payload.get("events") if not isinstance(raw_events, list): return if target_kind == "session" and target_id in self._cold_sessions: self._cold_sessions.discard(target_id) return for event in raw_events: if not isinstance(event, dict): continue seq = event.get("seq") if target_kind == "session" and isinstance(seq, int) and seq > self._session_cursor.get(target_id, prev): self._mark_session_cursor(target_id, seq) if event.get("type") == "message.add": await self._process_inbound_event(target_id, event, target_kind) async def _process_inbound_event(self, target_id: str, event: dict[str, Any], target_kind: str) -> None: payload = event.get("payload") if not isinstance(payload, dict): return author = _str_field(payload, "author") if not author or (self.config.agent_user_id and author == self.config.agent_user_id): return if not self.is_allowed(author): return message_id = _str_field(payload, "messageId") seen_key = f"{target_kind}:{target_id}" if message_id and self._remember_message_id(seen_key, message_id): return raw_body = normalize_mochat_content(payload.get("content")) or "[empty message]" ai = _safe_dict(payload.get("authorInfo")) sender_name = _str_field(ai, "nickname", "email") sender_username = _str_field(ai, "agentId") group_id = _str_field(payload, "groupId") is_group = bool(group_id) was_mentioned = resolve_was_mentioned(payload, self.config.agent_user_id) require_mention = target_kind == "panel" and is_group and resolve_require_mention(self.config, target_id, group_id) use_delay = target_kind == "panel" and self.config.reply_delay_mode == "non-mention" if require_mention and not was_mentioned and not use_delay: return entry = MochatBufferedEntry( raw_body=raw_body, author=author, sender_name=sender_name, sender_username=sender_username, timestamp=parse_timestamp(event.get("timestamp")), message_id=message_id, group_id=group_id, ) if use_delay: delay_key = seen_key if was_mentioned: await self._flush_delayed_entries(delay_key, target_id, target_kind, "mention", entry) else: await self._enqueue_delayed_entry(delay_key, target_id, target_kind, entry) return await self._dispatch_entries(target_id, target_kind, [entry], was_mentioned) # ---- dedup / buffering ------------------------------------------------- def _remember_message_id(self, key: str, message_id: str) -> bool: seen_set = self._seen_set.setdefault(key, set()) seen_queue = self._seen_queue.setdefault(key, deque()) if message_id in seen_set: return True seen_set.add(message_id) seen_queue.append(message_id) while len(seen_queue) > MAX_SEEN_MESSAGE_IDS: seen_set.discard(seen_queue.popleft()) return False async def _enqueue_delayed_entry(self, key: str, target_id: str, target_kind: str, entry: MochatBufferedEntry) -> None: state = self._delay_states.setdefault(key, DelayState()) async with state.lock: state.entries.append(entry) if state.timer: state.timer.cancel() state.timer = asyncio.create_task(self._delay_flush_after(key, target_id, target_kind)) async def _delay_flush_after(self, key: str, target_id: str, target_kind: str) -> None: await asyncio.sleep(max(0, self.config.reply_delay_ms) / 1000.0) await self._flush_delayed_entries(key, target_id, target_kind, "timer", None) async def _flush_delayed_entries(self, key: str, target_id: str, target_kind: str, reason: str, entry: MochatBufferedEntry | None) -> None: state = self._delay_states.setdefault(key, DelayState()) async with state.lock: if entry: state.entries.append(entry) current = asyncio.current_task() if state.timer and state.timer is not current: state.timer.cancel() state.timer = None entries = state.entries[:] state.entries.clear() if entries: await self._dispatch_entries(target_id, target_kind, entries, reason == "mention") async def _dispatch_entries(self, target_id: str, target_kind: str, entries: list[MochatBufferedEntry], was_mentioned: bool) -> None: if not entries: return last = entries[-1] is_group = bool(last.group_id) body = build_buffered_body(entries, is_group) or "[empty message]" await self._handle_message( sender_id=last.author, chat_id=target_id, content=body, metadata={ "message_id": last.message_id, "timestamp": last.timestamp, "is_group": is_group, "group_id": last.group_id, "sender_name": last.sender_name, "sender_username": last.sender_username, "target_kind": target_kind, "was_mentioned": was_mentioned, "buffered_count": len(entries), }, ) async def _cancel_delay_timers(self) -> None: for state in self._delay_states.values(): if state.timer: state.timer.cancel() self._delay_states.clear() # ---- notify handlers --------------------------------------------------- async def _handle_notify_chat_message(self, payload: Any) -> None: if not isinstance(payload, dict): return group_id = _str_field(payload, "groupId") panel_id = _str_field(payload, "converseId", "panelId") if not group_id or not panel_id: return if self._panel_set and panel_id not in self._panel_set: return evt = _make_synthetic_event( message_id=str(payload.get("_id") or payload.get("messageId") or ""), author=str(payload.get("author") or ""), content=payload.get("content"), meta=payload.get("meta"), group_id=group_id, converse_id=panel_id, timestamp=payload.get("createdAt"), author_info=payload.get("authorInfo"), ) await self._process_inbound_event(panel_id, evt, "panel") async def _handle_notify_inbox_append(self, payload: Any) -> None: if not isinstance(payload, dict) or payload.get("type") != "message": return detail = payload.get("payload") if not isinstance(detail, dict): return if _str_field(detail, "groupId"): return converse_id = _str_field(detail, "converseId") if not converse_id: return session_id = self._session_by_converse.get(converse_id) if not session_id: await self._refresh_sessions_directory(self._ws_ready) session_id = self._session_by_converse.get(converse_id) if not session_id: return evt = _make_synthetic_event( message_id=str(detail.get("messageId") or payload.get("_id") or ""), author=str(detail.get("messageAuthor") or ""), content=str(detail.get("messagePlainContent") or detail.get("messageSnippet") or ""), meta={"source": "notify:chat.inbox.append", "converseId": converse_id}, group_id="", converse_id=converse_id, timestamp=payload.get("createdAt"), ) await self._process_inbound_event(session_id, evt, "session") # ---- cursor persistence ------------------------------------------------ def _mark_session_cursor(self, session_id: str, cursor: int) -> None: if cursor < 0 or cursor < self._session_cursor.get(session_id, 0): return self._session_cursor[session_id] = cursor if not self._cursor_save_task or self._cursor_save_task.done(): self._cursor_save_task = asyncio.create_task(self._save_cursor_debounced()) async def _save_cursor_debounced(self) -> None: await asyncio.sleep(CURSOR_SAVE_DEBOUNCE_S) await self._save_session_cursors() async def _load_session_cursors(self) -> None: if not self._cursor_path.exists(): return try: data = json.loads(self._cursor_path.read_text("utf-8")) except Exception as e: logger.warning("Failed to read Mochat cursor file: {}", e) return cursors = data.get("cursors") if isinstance(data, dict) else None if isinstance(cursors, dict): for sid, cur in cursors.items(): if isinstance(sid, str) and isinstance(cur, int) and cur >= 0: self._session_cursor[sid] = cur async def _save_session_cursors(self) -> None: try: self._state_dir.mkdir(parents=True, exist_ok=True) self._cursor_path.write_text(json.dumps({ "schemaVersion": 1, "updatedAt": datetime.utcnow().isoformat(), "cursors": self._session_cursor, }, ensure_ascii=False, indent=2) + "\n", "utf-8") except Exception as e: logger.warning("Failed to save Mochat cursor file: {}", e) # ---- HTTP helpers ------------------------------------------------------ async def _post_json(self, path: str, payload: dict[str, Any]) -> dict[str, Any]: if not self._http: raise RuntimeError("Mochat HTTP client not initialized") url = f"{self.config.base_url.strip().rstrip('/')}{path}" response = await self._http.post(url, headers={ "Content-Type": "application/json", "X-Claw-Token": self.config.claw_token, }, json=payload) if not response.is_success: raise RuntimeError(f"Mochat HTTP {response.status_code}: {response.text[:200]}") try: parsed = response.json() except Exception: parsed = response.text if isinstance(parsed, dict) and isinstance(parsed.get("code"), int): if parsed["code"] != 200: msg = str(parsed.get("message") or parsed.get("name") or "request failed") raise RuntimeError(f"Mochat API error: {msg} (code={parsed['code']})") data = parsed.get("data") return data if isinstance(data, dict) else {} return parsed if isinstance(parsed, dict) else {} async def _api_send(self, path: str, id_key: str, id_val: str, content: str, reply_to: str | None, group_id: str | None = None) -> dict[str, Any]: """Unified send helper for session and panel messages.""" body: dict[str, Any] = {id_key: id_val, "content": content} if reply_to: body["replyTo"] = reply_to if group_id: body["groupId"] = group_id return await self._post_json(path, body) @staticmethod def _read_group_id(metadata: dict[str, Any]) -> str | None: if not isinstance(metadata, dict): return None value = metadata.get("group_id") or metadata.get("groupId") return value.strip() if isinstance(value, str) and value.strip() else None
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/channels/mochat.py", "license": "MIT License", "lines": 759, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:tests/test_cli_input.py
import asyncio from unittest.mock import AsyncMock, MagicMock, patch import pytest from prompt_toolkit.formatted_text import HTML from nanobot.cli import commands @pytest.fixture def mock_prompt_session(): """Mock the global prompt session.""" mock_session = MagicMock() mock_session.prompt_async = AsyncMock() with patch("nanobot.cli.commands._PROMPT_SESSION", mock_session), \ patch("nanobot.cli.commands.patch_stdout"): yield mock_session @pytest.mark.asyncio async def test_read_interactive_input_async_returns_input(mock_prompt_session): """Test that _read_interactive_input_async returns the user input from prompt_session.""" mock_prompt_session.prompt_async.return_value = "hello world" result = await commands._read_interactive_input_async() assert result == "hello world" mock_prompt_session.prompt_async.assert_called_once() args, _ = mock_prompt_session.prompt_async.call_args assert isinstance(args[0], HTML) # Verify HTML prompt is used @pytest.mark.asyncio async def test_read_interactive_input_async_handles_eof(mock_prompt_session): """Test that EOFError converts to KeyboardInterrupt.""" mock_prompt_session.prompt_async.side_effect = EOFError() with pytest.raises(KeyboardInterrupt): await commands._read_interactive_input_async() def test_init_prompt_session_creates_session(): """Test that _init_prompt_session initializes the global session.""" # Ensure global is None before test commands._PROMPT_SESSION = None with patch("nanobot.cli.commands.PromptSession") as MockSession, \ patch("nanobot.cli.commands.FileHistory") as MockHistory, \ patch("pathlib.Path.home") as mock_home: mock_home.return_value = MagicMock() commands._init_prompt_session() assert commands._PROMPT_SESSION is not None MockSession.assert_called_once() _, kwargs = MockSession.call_args assert kwargs["multiline"] is False assert kwargs["enable_open_in_editor"] is False
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_cli_input.py", "license": "MIT License", "lines": 42, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:tests/test_commands.py
import shutil from pathlib import Path from unittest.mock import patch import pytest from typer.testing import CliRunner from nanobot.cli.commands import app from nanobot.config.schema import Config from nanobot.providers.litellm_provider import LiteLLMProvider from nanobot.providers.openai_codex_provider import _strip_model_prefix from nanobot.providers.registry import find_by_model runner = CliRunner() @pytest.fixture def mock_paths(): """Mock config/workspace paths for test isolation.""" with patch("nanobot.config.loader.get_config_path") as mock_cp, \ patch("nanobot.config.loader.save_config") as mock_sc, \ patch("nanobot.config.loader.load_config") as mock_lc, \ patch("nanobot.utils.helpers.get_workspace_path") as mock_ws: base_dir = Path("./test_onboard_data") if base_dir.exists(): shutil.rmtree(base_dir) base_dir.mkdir() config_file = base_dir / "config.json" workspace_dir = base_dir / "workspace" mock_cp.return_value = config_file mock_ws.return_value = workspace_dir mock_sc.side_effect = lambda config: config_file.write_text("{}") yield config_file, workspace_dir if base_dir.exists(): shutil.rmtree(base_dir) def test_onboard_fresh_install(mock_paths): """No existing config — should create from scratch.""" config_file, workspace_dir = mock_paths result = runner.invoke(app, ["onboard"]) assert result.exit_code == 0 assert "Created config" in result.stdout assert "Created workspace" in result.stdout assert "nanobot is ready" in result.stdout assert config_file.exists() assert (workspace_dir / "AGENTS.md").exists() assert (workspace_dir / "memory" / "MEMORY.md").exists() def test_onboard_existing_config_refresh(mock_paths): """Config exists, user declines overwrite — should refresh (load-merge-save).""" config_file, workspace_dir = mock_paths config_file.write_text('{"existing": true}') result = runner.invoke(app, ["onboard"], input="n\n") assert result.exit_code == 0 assert "Config already exists" in result.stdout assert "existing values preserved" in result.stdout assert workspace_dir.exists() assert (workspace_dir / "AGENTS.md").exists() def test_onboard_existing_config_overwrite(mock_paths): """Config exists, user confirms overwrite — should reset to defaults.""" config_file, workspace_dir = mock_paths config_file.write_text('{"existing": true}') result = runner.invoke(app, ["onboard"], input="y\n") assert result.exit_code == 0 assert "Config already exists" in result.stdout assert "Config reset to defaults" in result.stdout assert workspace_dir.exists() def test_onboard_existing_workspace_safe_create(mock_paths): """Workspace exists — should not recreate, but still add missing templates.""" config_file, workspace_dir = mock_paths workspace_dir.mkdir(parents=True) config_file.write_text("{}") result = runner.invoke(app, ["onboard"], input="n\n") assert result.exit_code == 0 assert "Created workspace" not in result.stdout assert "Created AGENTS.md" in result.stdout assert (workspace_dir / "AGENTS.md").exists() def test_config_matches_github_copilot_codex_with_hyphen_prefix(): config = Config() config.agents.defaults.model = "github-copilot/gpt-5.3-codex" assert config.get_provider_name() == "github_copilot" def test_config_matches_openai_codex_with_hyphen_prefix(): config = Config() config.agents.defaults.model = "openai-codex/gpt-5.1-codex" assert config.get_provider_name() == "openai_codex" def test_find_by_model_prefers_explicit_prefix_over_generic_codex_keyword(): spec = find_by_model("github-copilot/gpt-5.3-codex") assert spec is not None assert spec.name == "github_copilot" def test_litellm_provider_canonicalizes_github_copilot_hyphen_prefix(): provider = LiteLLMProvider(default_model="github-copilot/gpt-5.3-codex") resolved = provider._resolve_model("github-copilot/gpt-5.3-codex") assert resolved == "github_copilot/gpt-5.3-codex" def test_openai_codex_strip_prefix_supports_hyphen_and_underscore(): assert _strip_model_prefix("openai-codex/gpt-5.1-codex") == "gpt-5.1-codex" assert _strip_model_prefix("openai_codex/gpt-5.1-codex") == "gpt-5.1-codex"
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_commands.py", "license": "MIT License", "lines": 89, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:tests/test_consolidate_offset.py
"""Test session management with cache-friendly message handling.""" import asyncio from unittest.mock import AsyncMock, MagicMock import pytest from pathlib import Path from nanobot.session.manager import Session, SessionManager # Test constants MEMORY_WINDOW = 50 KEEP_COUNT = MEMORY_WINDOW // 2 # 25 def create_session_with_messages(key: str, count: int, role: str = "user") -> Session: """Create a session and add the specified number of messages. Args: key: Session identifier count: Number of messages to add role: Message role (default: "user") Returns: Session with the specified messages """ session = Session(key=key) for i in range(count): session.add_message(role, f"msg{i}") return session def assert_messages_content(messages: list, start_index: int, end_index: int) -> None: """Assert that messages contain expected content from start to end index. Args: messages: List of message dictionaries start_index: Expected first message index end_index: Expected last message index """ assert len(messages) > 0 assert messages[0]["content"] == f"msg{start_index}" assert messages[-1]["content"] == f"msg{end_index}" def get_old_messages(session: Session, last_consolidated: int, keep_count: int) -> list: """Extract messages that would be consolidated using the standard slice logic. Args: session: The session containing messages last_consolidated: Index of last consolidated message keep_count: Number of recent messages to keep Returns: List of messages that would be consolidated """ return session.messages[last_consolidated:-keep_count] class TestSessionLastConsolidated: """Test last_consolidated tracking to avoid duplicate processing.""" def test_initial_last_consolidated_zero(self) -> None: """Test that new session starts with last_consolidated=0.""" session = Session(key="test:initial") assert session.last_consolidated == 0 def test_last_consolidated_persistence(self, tmp_path) -> None: """Test that last_consolidated persists across save/load.""" manager = SessionManager(Path(tmp_path)) session1 = create_session_with_messages("test:persist", 20) session1.last_consolidated = 15 manager.save(session1) session2 = manager.get_or_create("test:persist") assert session2.last_consolidated == 15 assert len(session2.messages) == 20 def test_clear_resets_last_consolidated(self) -> None: """Test that clear() resets last_consolidated to 0.""" session = create_session_with_messages("test:clear", 10) session.last_consolidated = 5 session.clear() assert len(session.messages) == 0 assert session.last_consolidated == 0 class TestSessionImmutableHistory: """Test Session message immutability for cache efficiency.""" def test_initial_state(self) -> None: """Test that new session has empty messages list.""" session = Session(key="test:initial") assert len(session.messages) == 0 def test_add_messages_appends_only(self) -> None: """Test that adding messages only appends, never modifies.""" session = Session(key="test:preserve") session.add_message("user", "msg1") session.add_message("assistant", "resp1") session.add_message("user", "msg2") assert len(session.messages) == 3 assert session.messages[0]["content"] == "msg1" def test_get_history_returns_most_recent(self) -> None: """Test get_history returns the most recent messages.""" session = Session(key="test:history") for i in range(10): session.add_message("user", f"msg{i}") session.add_message("assistant", f"resp{i}") history = session.get_history(max_messages=6) assert len(history) == 6 assert history[0]["content"] == "msg7" assert history[-1]["content"] == "resp9" def test_get_history_with_all_messages(self) -> None: """Test get_history with max_messages larger than actual.""" session = create_session_with_messages("test:all", 5) history = session.get_history(max_messages=100) assert len(history) == 5 assert history[0]["content"] == "msg0" def test_get_history_stable_for_same_session(self) -> None: """Test that get_history returns same content for same max_messages.""" session = create_session_with_messages("test:stable", 20) history1 = session.get_history(max_messages=10) history2 = session.get_history(max_messages=10) assert history1 == history2 def test_messages_list_never_modified(self) -> None: """Test that messages list is never modified after creation.""" session = create_session_with_messages("test:immutable", 5) original_len = len(session.messages) session.get_history(max_messages=2) assert len(session.messages) == original_len for _ in range(10): session.get_history(max_messages=3) assert len(session.messages) == original_len class TestSessionPersistence: """Test Session persistence and reload.""" @pytest.fixture def temp_manager(self, tmp_path): return SessionManager(Path(tmp_path)) def test_persistence_roundtrip(self, temp_manager): """Test that messages persist across save/load.""" session1 = create_session_with_messages("test:persistence", 20) temp_manager.save(session1) session2 = temp_manager.get_or_create("test:persistence") assert len(session2.messages) == 20 assert session2.messages[0]["content"] == "msg0" assert session2.messages[-1]["content"] == "msg19" def test_get_history_after_reload(self, temp_manager): """Test that get_history works correctly after reload.""" session1 = create_session_with_messages("test:reload", 30) temp_manager.save(session1) session2 = temp_manager.get_or_create("test:reload") history = session2.get_history(max_messages=10) assert len(history) == 10 assert history[0]["content"] == "msg20" assert history[-1]["content"] == "msg29" def test_clear_resets_session(self, temp_manager): """Test that clear() properly resets session.""" session = create_session_with_messages("test:clear", 10) assert len(session.messages) == 10 session.clear() assert len(session.messages) == 0 class TestConsolidationTriggerConditions: """Test consolidation trigger conditions and logic.""" def test_consolidation_needed_when_messages_exceed_window(self): """Test consolidation logic: should trigger when messages > memory_window.""" session = create_session_with_messages("test:trigger", 60) total_messages = len(session.messages) messages_to_process = total_messages - session.last_consolidated assert total_messages > MEMORY_WINDOW assert messages_to_process > 0 expected_consolidate_count = total_messages - KEEP_COUNT assert expected_consolidate_count == 35 def test_consolidation_skipped_when_within_keep_count(self): """Test consolidation skipped when total messages <= keep_count.""" session = create_session_with_messages("test:skip", 20) total_messages = len(session.messages) assert total_messages <= KEEP_COUNT old_messages = get_old_messages(session, session.last_consolidated, KEEP_COUNT) assert len(old_messages) == 0 def test_consolidation_skipped_when_no_new_messages(self): """Test consolidation skipped when messages_to_process <= 0.""" session = create_session_with_messages("test:already_consolidated", 40) session.last_consolidated = len(session.messages) - KEEP_COUNT # 15 # Add a few more messages for i in range(40, 42): session.add_message("user", f"msg{i}") total_messages = len(session.messages) messages_to_process = total_messages - session.last_consolidated assert messages_to_process > 0 # Simulate last_consolidated catching up session.last_consolidated = total_messages - KEEP_COUNT old_messages = get_old_messages(session, session.last_consolidated, KEEP_COUNT) assert len(old_messages) == 0 class TestLastConsolidatedEdgeCases: """Test last_consolidated edge cases and data corruption scenarios.""" def test_last_consolidated_exceeds_message_count(self): """Test behavior when last_consolidated > len(messages) (data corruption).""" session = create_session_with_messages("test:corruption", 10) session.last_consolidated = 20 total_messages = len(session.messages) messages_to_process = total_messages - session.last_consolidated assert messages_to_process <= 0 old_messages = get_old_messages(session, session.last_consolidated, 5) assert len(old_messages) == 0 def test_last_consolidated_negative_value(self): """Test behavior with negative last_consolidated (invalid state).""" session = create_session_with_messages("test:negative", 10) session.last_consolidated = -5 keep_count = 3 old_messages = get_old_messages(session, session.last_consolidated, keep_count) # messages[-5:-3] with 10 messages gives indices 5,6 assert len(old_messages) == 2 assert old_messages[0]["content"] == "msg5" assert old_messages[-1]["content"] == "msg6" def test_messages_added_after_consolidation(self): """Test correct behavior when new messages arrive after consolidation.""" session = create_session_with_messages("test:new_messages", 40) session.last_consolidated = len(session.messages) - KEEP_COUNT # 15 # Add new messages after consolidation for i in range(40, 50): session.add_message("user", f"msg{i}") total_messages = len(session.messages) old_messages = get_old_messages(session, session.last_consolidated, KEEP_COUNT) expected_consolidate_count = total_messages - KEEP_COUNT - session.last_consolidated assert len(old_messages) == expected_consolidate_count assert_messages_content(old_messages, 15, 24) def test_slice_behavior_when_indices_overlap(self): """Test slice behavior when last_consolidated >= total - keep_count.""" session = create_session_with_messages("test:overlap", 30) session.last_consolidated = 12 old_messages = get_old_messages(session, session.last_consolidated, 20) assert len(old_messages) == 0 class TestArchiveAllMode: """Test archive_all mode (used by /new command).""" def test_archive_all_consolidates_everything(self): """Test archive_all=True consolidates all messages.""" session = create_session_with_messages("test:archive_all", 50) archive_all = True if archive_all: old_messages = session.messages assert len(old_messages) == 50 assert session.last_consolidated == 0 def test_archive_all_resets_last_consolidated(self): """Test that archive_all mode resets last_consolidated to 0.""" session = create_session_with_messages("test:reset", 40) session.last_consolidated = 15 archive_all = True if archive_all: session.last_consolidated = 0 assert session.last_consolidated == 0 assert len(session.messages) == 40 def test_archive_all_vs_normal_consolidation(self): """Test difference between archive_all and normal consolidation.""" # Normal consolidation session1 = create_session_with_messages("test:normal", 60) session1.last_consolidated = len(session1.messages) - KEEP_COUNT # archive_all mode session2 = create_session_with_messages("test:all", 60) session2.last_consolidated = 0 assert session1.last_consolidated == 35 assert len(session1.messages) == 60 assert session2.last_consolidated == 0 assert len(session2.messages) == 60 class TestCacheImmutability: """Test that consolidation doesn't modify session.messages (cache safety).""" def test_consolidation_does_not_modify_messages_list(self): """Test that consolidation leaves messages list unchanged.""" session = create_session_with_messages("test:immutable", 50) original_messages = session.messages.copy() original_len = len(session.messages) session.last_consolidated = original_len - KEEP_COUNT assert len(session.messages) == original_len assert session.messages == original_messages def test_get_history_does_not_modify_messages(self): """Test that get_history doesn't modify messages list.""" session = create_session_with_messages("test:history_immutable", 40) original_messages = [m.copy() for m in session.messages] for _ in range(5): history = session.get_history(max_messages=10) assert len(history) == 10 assert len(session.messages) == 40 for i, msg in enumerate(session.messages): assert msg["content"] == original_messages[i]["content"] def test_consolidation_only_updates_last_consolidated(self): """Test that consolidation only updates last_consolidated field.""" session = create_session_with_messages("test:field_only", 60) original_messages = session.messages.copy() original_key = session.key original_metadata = session.metadata.copy() session.last_consolidated = len(session.messages) - KEEP_COUNT assert session.messages == original_messages assert session.key == original_key assert session.metadata == original_metadata assert session.last_consolidated == 35 class TestSliceLogic: """Test the slice logic: messages[last_consolidated:-keep_count].""" def test_slice_extracts_correct_range(self): """Test that slice extracts the correct message range.""" session = create_session_with_messages("test:slice", 60) old_messages = get_old_messages(session, 0, KEEP_COUNT) assert len(old_messages) == 35 assert_messages_content(old_messages, 0, 34) remaining = session.messages[-KEEP_COUNT:] assert len(remaining) == 25 assert_messages_content(remaining, 35, 59) def test_slice_with_partial_consolidation(self): """Test slice when some messages already consolidated.""" session = create_session_with_messages("test:partial", 70) last_consolidated = 30 old_messages = get_old_messages(session, last_consolidated, KEEP_COUNT) assert len(old_messages) == 15 assert_messages_content(old_messages, 30, 44) def test_slice_with_various_keep_counts(self): """Test slice behavior with different keep_count values.""" session = create_session_with_messages("test:keep_counts", 50) test_cases = [(10, 40), (20, 30), (30, 20), (40, 10)] for keep_count, expected_count in test_cases: old_messages = session.messages[0:-keep_count] assert len(old_messages) == expected_count def test_slice_when_keep_count_exceeds_messages(self): """Test slice when keep_count > len(messages).""" session = create_session_with_messages("test:exceed", 10) old_messages = session.messages[0:-20] assert len(old_messages) == 0 class TestEmptyAndBoundarySessions: """Test empty sessions and boundary conditions.""" def test_empty_session_consolidation(self): """Test consolidation behavior with empty session.""" session = Session(key="test:empty") assert len(session.messages) == 0 assert session.last_consolidated == 0 messages_to_process = len(session.messages) - session.last_consolidated assert messages_to_process == 0 old_messages = get_old_messages(session, session.last_consolidated, KEEP_COUNT) assert len(old_messages) == 0 def test_single_message_session(self): """Test consolidation with single message.""" session = Session(key="test:single") session.add_message("user", "only message") assert len(session.messages) == 1 old_messages = get_old_messages(session, session.last_consolidated, KEEP_COUNT) assert len(old_messages) == 0 def test_exactly_keep_count_messages(self): """Test session with exactly keep_count messages.""" session = create_session_with_messages("test:exact", KEEP_COUNT) assert len(session.messages) == KEEP_COUNT old_messages = get_old_messages(session, session.last_consolidated, KEEP_COUNT) assert len(old_messages) == 0 def test_just_over_keep_count(self): """Test session with one message over keep_count.""" session = create_session_with_messages("test:over", KEEP_COUNT + 1) assert len(session.messages) == 26 old_messages = get_old_messages(session, session.last_consolidated, KEEP_COUNT) assert len(old_messages) == 1 assert old_messages[0]["content"] == "msg0" def test_very_large_session(self): """Test consolidation with very large message count.""" session = create_session_with_messages("test:large", 1000) assert len(session.messages) == 1000 old_messages = get_old_messages(session, session.last_consolidated, KEEP_COUNT) assert len(old_messages) == 975 assert_messages_content(old_messages, 0, 974) remaining = session.messages[-KEEP_COUNT:] assert len(remaining) == 25 assert_messages_content(remaining, 975, 999) def test_session_with_gaps_in_consolidation(self): """Test session with potential gaps in consolidation history.""" session = create_session_with_messages("test:gaps", 50) session.last_consolidated = 10 # Add more messages for i in range(50, 60): session.add_message("user", f"msg{i}") old_messages = get_old_messages(session, session.last_consolidated, KEEP_COUNT) expected_count = 60 - KEEP_COUNT - 10 assert len(old_messages) == expected_count assert_messages_content(old_messages, 10, 34) class TestConsolidationDeduplicationGuard: """Test that consolidation tasks are deduplicated and serialized.""" @pytest.mark.asyncio async def test_consolidation_guard_prevents_duplicate_tasks(self, tmp_path: Path) -> None: """Concurrent messages above memory_window spawn only one consolidation task.""" from nanobot.agent.loop import AgentLoop from nanobot.bus.events import InboundMessage from nanobot.bus.queue import MessageBus from nanobot.providers.base import LLMResponse bus = MessageBus() provider = MagicMock() provider.get_default_model.return_value = "test-model" loop = AgentLoop( bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10 ) loop.provider.chat = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) loop.tools.get_definitions = MagicMock(return_value=[]) session = loop.sessions.get_or_create("cli:test") for i in range(15): session.add_message("user", f"msg{i}") session.add_message("assistant", f"resp{i}") loop.sessions.save(session) consolidation_calls = 0 async def _fake_consolidate(_session, archive_all: bool = False) -> None: nonlocal consolidation_calls consolidation_calls += 1 await asyncio.sleep(0.05) loop._consolidate_memory = _fake_consolidate # type: ignore[method-assign] msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="hello") await loop._process_message(msg) await loop._process_message(msg) await asyncio.sleep(0.1) assert consolidation_calls == 1, ( f"Expected exactly 1 consolidation, got {consolidation_calls}" ) @pytest.mark.asyncio async def test_new_command_guard_prevents_concurrent_consolidation( self, tmp_path: Path ) -> None: """/new command does not run consolidation concurrently with in-flight consolidation.""" from nanobot.agent.loop import AgentLoop from nanobot.bus.events import InboundMessage from nanobot.bus.queue import MessageBus from nanobot.providers.base import LLMResponse bus = MessageBus() provider = MagicMock() provider.get_default_model.return_value = "test-model" loop = AgentLoop( bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10 ) loop.provider.chat = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) loop.tools.get_definitions = MagicMock(return_value=[]) session = loop.sessions.get_or_create("cli:test") for i in range(15): session.add_message("user", f"msg{i}") session.add_message("assistant", f"resp{i}") loop.sessions.save(session) consolidation_calls = 0 active = 0 max_active = 0 async def _fake_consolidate(_session, archive_all: bool = False) -> None: nonlocal consolidation_calls, active, max_active consolidation_calls += 1 active += 1 max_active = max(max_active, active) await asyncio.sleep(0.05) active -= 1 loop._consolidate_memory = _fake_consolidate # type: ignore[method-assign] msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="hello") await loop._process_message(msg) new_msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="/new") await loop._process_message(new_msg) await asyncio.sleep(0.1) assert consolidation_calls == 2, ( f"Expected normal + /new consolidations, got {consolidation_calls}" ) assert max_active == 1, ( f"Expected serialized consolidation, observed concurrency={max_active}" ) @pytest.mark.asyncio async def test_consolidation_tasks_are_referenced(self, tmp_path: Path) -> None: """create_task results are tracked in _consolidation_tasks while in flight.""" from nanobot.agent.loop import AgentLoop from nanobot.bus.events import InboundMessage from nanobot.bus.queue import MessageBus from nanobot.providers.base import LLMResponse bus = MessageBus() provider = MagicMock() provider.get_default_model.return_value = "test-model" loop = AgentLoop( bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10 ) loop.provider.chat = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) loop.tools.get_definitions = MagicMock(return_value=[]) session = loop.sessions.get_or_create("cli:test") for i in range(15): session.add_message("user", f"msg{i}") session.add_message("assistant", f"resp{i}") loop.sessions.save(session) started = asyncio.Event() async def _slow_consolidate(_session, archive_all: bool = False) -> None: started.set() await asyncio.sleep(0.1) loop._consolidate_memory = _slow_consolidate # type: ignore[method-assign] msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="hello") await loop._process_message(msg) await started.wait() assert len(loop._consolidation_tasks) == 1, "Task must be referenced while in-flight" await asyncio.sleep(0.15) assert len(loop._consolidation_tasks) == 0, ( "Task reference must be removed after completion" ) @pytest.mark.asyncio async def test_new_waits_for_inflight_consolidation_and_preserves_messages( self, tmp_path: Path ) -> None: """/new waits for in-flight consolidation and archives before clear.""" from nanobot.agent.loop import AgentLoop from nanobot.bus.events import InboundMessage from nanobot.bus.queue import MessageBus from nanobot.providers.base import LLMResponse bus = MessageBus() provider = MagicMock() provider.get_default_model.return_value = "test-model" loop = AgentLoop( bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10 ) loop.provider.chat = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) loop.tools.get_definitions = MagicMock(return_value=[]) session = loop.sessions.get_or_create("cli:test") for i in range(15): session.add_message("user", f"msg{i}") session.add_message("assistant", f"resp{i}") loop.sessions.save(session) started = asyncio.Event() release = asyncio.Event() archived_count = 0 async def _fake_consolidate(sess, archive_all: bool = False) -> bool: nonlocal archived_count if archive_all: archived_count = len(sess.messages) return True started.set() await release.wait() return True loop._consolidate_memory = _fake_consolidate # type: ignore[method-assign] msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="hello") await loop._process_message(msg) await started.wait() new_msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="/new") pending_new = asyncio.create_task(loop._process_message(new_msg)) await asyncio.sleep(0.02) assert not pending_new.done(), "/new should wait while consolidation is in-flight" release.set() response = await pending_new assert response is not None assert "new session started" in response.content.lower() assert archived_count > 0, "Expected /new archival to process a non-empty snapshot" session_after = loop.sessions.get_or_create("cli:test") assert session_after.messages == [], "Session should be cleared after successful archival" @pytest.mark.asyncio async def test_new_does_not_clear_session_when_archive_fails(self, tmp_path: Path) -> None: """/new must keep session data if archive step reports failure.""" from nanobot.agent.loop import AgentLoop from nanobot.bus.events import InboundMessage from nanobot.bus.queue import MessageBus from nanobot.providers.base import LLMResponse bus = MessageBus() provider = MagicMock() provider.get_default_model.return_value = "test-model" loop = AgentLoop( bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10 ) loop.provider.chat = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) loop.tools.get_definitions = MagicMock(return_value=[]) session = loop.sessions.get_or_create("cli:test") for i in range(5): session.add_message("user", f"msg{i}") session.add_message("assistant", f"resp{i}") loop.sessions.save(session) before_count = len(session.messages) async def _failing_consolidate(sess, archive_all: bool = False) -> bool: if archive_all: return False return True loop._consolidate_memory = _failing_consolidate # type: ignore[method-assign] new_msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="/new") response = await loop._process_message(new_msg) assert response is not None assert "failed" in response.content.lower() session_after = loop.sessions.get_or_create("cli:test") assert len(session_after.messages) == before_count, ( "Session must remain intact when /new archival fails" ) @pytest.mark.asyncio async def test_new_archives_only_unconsolidated_messages_after_inflight_task( self, tmp_path: Path ) -> None: """/new should archive only messages not yet consolidated by prior task.""" from nanobot.agent.loop import AgentLoop from nanobot.bus.events import InboundMessage from nanobot.bus.queue import MessageBus from nanobot.providers.base import LLMResponse bus = MessageBus() provider = MagicMock() provider.get_default_model.return_value = "test-model" loop = AgentLoop( bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10 ) loop.provider.chat = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) loop.tools.get_definitions = MagicMock(return_value=[]) session = loop.sessions.get_or_create("cli:test") for i in range(15): session.add_message("user", f"msg{i}") session.add_message("assistant", f"resp{i}") loop.sessions.save(session) started = asyncio.Event() release = asyncio.Event() archived_count = -1 async def _fake_consolidate(sess, archive_all: bool = False) -> bool: nonlocal archived_count if archive_all: archived_count = len(sess.messages) return True started.set() await release.wait() sess.last_consolidated = len(sess.messages) - 3 return True loop._consolidate_memory = _fake_consolidate # type: ignore[method-assign] msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="hello") await loop._process_message(msg) await started.wait() new_msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="/new") pending_new = asyncio.create_task(loop._process_message(new_msg)) await asyncio.sleep(0.02) assert not pending_new.done() release.set() response = await pending_new assert response is not None assert "new session started" in response.content.lower() assert archived_count == 3, ( f"Expected only unconsolidated tail to archive, got {archived_count}" ) @pytest.mark.asyncio async def test_new_clears_session_and_responds(self, tmp_path: Path) -> None: """/new clears session and returns confirmation.""" from nanobot.agent.loop import AgentLoop from nanobot.bus.events import InboundMessage from nanobot.bus.queue import MessageBus from nanobot.providers.base import LLMResponse bus = MessageBus() provider = MagicMock() provider.get_default_model.return_value = "test-model" loop = AgentLoop( bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10 ) loop.provider.chat = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) loop.tools.get_definitions = MagicMock(return_value=[]) session = loop.sessions.get_or_create("cli:test") for i in range(3): session.add_message("user", f"msg{i}") session.add_message("assistant", f"resp{i}") loop.sessions.save(session) async def _ok_consolidate(sess, archive_all: bool = False) -> bool: return True loop._consolidate_memory = _ok_consolidate # type: ignore[method-assign] new_msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="/new") response = await loop._process_message(new_msg) assert response is not None assert "new session started" in response.content.lower() assert loop.sessions.get_or_create("cli:test").messages == []
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_consolidate_offset.py", "license": "MIT License", "lines": 623, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:nanobot/channels/dingtalk.py
"""DingTalk/DingDing channel implementation using Stream Mode.""" import asyncio import json import mimetypes import os import time from pathlib import Path from typing import Any from urllib.parse import unquote, urlparse import httpx from loguru import logger from nanobot.bus.events import OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.channels.base import BaseChannel from nanobot.config.schema import DingTalkConfig try: from dingtalk_stream import ( AckMessage, CallbackHandler, CallbackMessage, Credential, DingTalkStreamClient, ) from dingtalk_stream.chatbot import ChatbotMessage DINGTALK_AVAILABLE = True except ImportError: DINGTALK_AVAILABLE = False # Fallback so class definitions don't crash at module level CallbackHandler = object # type: ignore[assignment,misc] CallbackMessage = None # type: ignore[assignment,misc] AckMessage = None # type: ignore[assignment,misc] ChatbotMessage = None # type: ignore[assignment,misc] class NanobotDingTalkHandler(CallbackHandler): """ Standard DingTalk Stream SDK Callback Handler. Parses incoming messages and forwards them to the Nanobot channel. """ def __init__(self, channel: "DingTalkChannel"): super().__init__() self.channel = channel async def process(self, message: CallbackMessage): """Process incoming stream message.""" try: # Parse using SDK's ChatbotMessage for robust handling chatbot_msg = ChatbotMessage.from_dict(message.data) # Extract text content; fall back to raw dict if SDK object is empty content = "" if chatbot_msg.text: content = chatbot_msg.text.content.strip() if not content: content = message.data.get("text", {}).get("content", "").strip() if not content: logger.warning( "Received empty or unsupported message type: {}", chatbot_msg.message_type, ) return AckMessage.STATUS_OK, "OK" sender_id = chatbot_msg.sender_staff_id or chatbot_msg.sender_id sender_name = chatbot_msg.sender_nick or "Unknown" logger.info("Received DingTalk message from {} ({}): {}", sender_name, sender_id, content) # Forward to Nanobot via _on_message (non-blocking). # Store reference to prevent GC before task completes. task = asyncio.create_task( self.channel._on_message(content, sender_id, sender_name) ) self.channel._background_tasks.add(task) task.add_done_callback(self.channel._background_tasks.discard) return AckMessage.STATUS_OK, "OK" except Exception as e: logger.error("Error processing DingTalk message: {}", e) # Return OK to avoid retry loop from DingTalk server return AckMessage.STATUS_OK, "Error" class DingTalkChannel(BaseChannel): """ DingTalk channel using Stream Mode. Uses WebSocket to receive events via `dingtalk-stream` SDK. Uses direct HTTP API to send messages (SDK is mainly for receiving). Note: Currently only supports private (1:1) chat. Group messages are received but replies are sent back as private messages to the sender. """ name = "dingtalk" _IMAGE_EXTS = {".jpg", ".jpeg", ".png", ".gif", ".bmp", ".webp"} _AUDIO_EXTS = {".amr", ".mp3", ".wav", ".ogg", ".m4a", ".aac"} _VIDEO_EXTS = {".mp4", ".mov", ".avi", ".mkv", ".webm"} def __init__(self, config: DingTalkConfig, bus: MessageBus): super().__init__(config, bus) self.config: DingTalkConfig = config self._client: Any = None self._http: httpx.AsyncClient | None = None # Access Token management for sending messages self._access_token: str | None = None self._token_expiry: float = 0 # Hold references to background tasks to prevent GC self._background_tasks: set[asyncio.Task] = set() async def start(self) -> None: """Start the DingTalk bot with Stream Mode.""" try: if not DINGTALK_AVAILABLE: logger.error( "DingTalk Stream SDK not installed. Run: pip install dingtalk-stream" ) return if not self.config.client_id or not self.config.client_secret: logger.error("DingTalk client_id and client_secret not configured") return self._running = True self._http = httpx.AsyncClient() logger.info( "Initializing DingTalk Stream Client with Client ID: {}...", self.config.client_id, ) credential = Credential(self.config.client_id, self.config.client_secret) self._client = DingTalkStreamClient(credential) # Register standard handler handler = NanobotDingTalkHandler(self) self._client.register_callback_handler(ChatbotMessage.TOPIC, handler) logger.info("DingTalk bot started with Stream Mode") # Reconnect loop: restart stream if SDK exits or crashes while self._running: try: await self._client.start() except Exception as e: logger.warning("DingTalk stream error: {}", e) if self._running: logger.info("Reconnecting DingTalk stream in 5 seconds...") await asyncio.sleep(5) except Exception as e: logger.exception("Failed to start DingTalk channel: {}", e) async def stop(self) -> None: """Stop the DingTalk bot.""" self._running = False # Close the shared HTTP client if self._http: await self._http.aclose() self._http = None # Cancel outstanding background tasks for task in self._background_tasks: task.cancel() self._background_tasks.clear() async def _get_access_token(self) -> str | None: """Get or refresh Access Token.""" if self._access_token and time.time() < self._token_expiry: return self._access_token url = "https://api.dingtalk.com/v1.0/oauth2/accessToken" data = { "appKey": self.config.client_id, "appSecret": self.config.client_secret, } if not self._http: logger.warning("DingTalk HTTP client not initialized, cannot refresh token") return None try: resp = await self._http.post(url, json=data) resp.raise_for_status() res_data = resp.json() self._access_token = res_data.get("accessToken") # Expire 60s early to be safe self._token_expiry = time.time() + int(res_data.get("expireIn", 7200)) - 60 return self._access_token except Exception as e: logger.error("Failed to get DingTalk access token: {}", e) return None @staticmethod def _is_http_url(value: str) -> bool: return urlparse(value).scheme in ("http", "https") def _guess_upload_type(self, media_ref: str) -> str: ext = Path(urlparse(media_ref).path).suffix.lower() if ext in self._IMAGE_EXTS: return "image" if ext in self._AUDIO_EXTS: return "voice" if ext in self._VIDEO_EXTS: return "video" return "file" def _guess_filename(self, media_ref: str, upload_type: str) -> str: name = os.path.basename(urlparse(media_ref).path) return name or {"image": "image.jpg", "voice": "audio.amr", "video": "video.mp4"}.get(upload_type, "file.bin") async def _read_media_bytes( self, media_ref: str, ) -> tuple[bytes | None, str | None, str | None]: if not media_ref: return None, None, None if self._is_http_url(media_ref): if not self._http: return None, None, None try: resp = await self._http.get(media_ref, follow_redirects=True) if resp.status_code >= 400: logger.warning( "DingTalk media download failed status={} ref={}", resp.status_code, media_ref, ) return None, None, None content_type = (resp.headers.get("content-type") or "").split(";")[0].strip() filename = self._guess_filename(media_ref, self._guess_upload_type(media_ref)) return resp.content, filename, content_type or None except Exception as e: logger.error("DingTalk media download error ref={} err={}", media_ref, e) return None, None, None try: if media_ref.startswith("file://"): parsed = urlparse(media_ref) local_path = Path(unquote(parsed.path)) else: local_path = Path(os.path.expanduser(media_ref)) if not local_path.is_file(): logger.warning("DingTalk media file not found: {}", local_path) return None, None, None data = await asyncio.to_thread(local_path.read_bytes) content_type = mimetypes.guess_type(local_path.name)[0] return data, local_path.name, content_type except Exception as e: logger.error("DingTalk media read error ref={} err={}", media_ref, e) return None, None, None async def _upload_media( self, token: str, data: bytes, media_type: str, filename: str, content_type: str | None, ) -> str | None: if not self._http: return None url = f"https://oapi.dingtalk.com/media/upload?access_token={token}&type={media_type}" mime = content_type or mimetypes.guess_type(filename)[0] or "application/octet-stream" files = {"media": (filename, data, mime)} try: resp = await self._http.post(url, files=files) text = resp.text result = resp.json() if resp.headers.get("content-type", "").startswith("application/json") else {} if resp.status_code >= 400: logger.error("DingTalk media upload failed status={} type={} body={}", resp.status_code, media_type, text[:500]) return None errcode = result.get("errcode", 0) if errcode != 0: logger.error("DingTalk media upload api error type={} errcode={} body={}", media_type, errcode, text[:500]) return None sub = result.get("result") or {} media_id = result.get("media_id") or result.get("mediaId") or sub.get("media_id") or sub.get("mediaId") if not media_id: logger.error("DingTalk media upload missing media_id body={}", text[:500]) return None return str(media_id) except Exception as e: logger.error("DingTalk media upload error type={} err={}", media_type, e) return None async def _send_batch_message( self, token: str, chat_id: str, msg_key: str, msg_param: dict[str, Any], ) -> bool: if not self._http: logger.warning("DingTalk HTTP client not initialized, cannot send") return False url = "https://api.dingtalk.com/v1.0/robot/oToMessages/batchSend" headers = {"x-acs-dingtalk-access-token": token} payload = { "robotCode": self.config.client_id, "userIds": [chat_id], "msgKey": msg_key, "msgParam": json.dumps(msg_param, ensure_ascii=False), } try: resp = await self._http.post(url, json=payload, headers=headers) body = resp.text if resp.status_code != 200: logger.error("DingTalk send failed msgKey={} status={} body={}", msg_key, resp.status_code, body[:500]) return False try: result = resp.json() except Exception: result = {} errcode = result.get("errcode") if errcode not in (None, 0): logger.error("DingTalk send api error msgKey={} errcode={} body={}", msg_key, errcode, body[:500]) return False logger.debug("DingTalk message sent to {} with msgKey={}", chat_id, msg_key) return True except Exception as e: logger.error("Error sending DingTalk message msgKey={} err={}", msg_key, e) return False async def _send_markdown_text(self, token: str, chat_id: str, content: str) -> bool: return await self._send_batch_message( token, chat_id, "sampleMarkdown", {"text": content, "title": "Nanobot Reply"}, ) async def _send_media_ref(self, token: str, chat_id: str, media_ref: str) -> bool: media_ref = (media_ref or "").strip() if not media_ref: return True upload_type = self._guess_upload_type(media_ref) if upload_type == "image" and self._is_http_url(media_ref): ok = await self._send_batch_message( token, chat_id, "sampleImageMsg", {"photoURL": media_ref}, ) if ok: return True logger.warning("DingTalk image url send failed, trying upload fallback: {}", media_ref) data, filename, content_type = await self._read_media_bytes(media_ref) if not data: logger.error("DingTalk media read failed: {}", media_ref) return False filename = filename or self._guess_filename(media_ref, upload_type) file_type = Path(filename).suffix.lower().lstrip(".") if not file_type: guessed = mimetypes.guess_extension(content_type or "") file_type = (guessed or ".bin").lstrip(".") if file_type == "jpeg": file_type = "jpg" media_id = await self._upload_media( token=token, data=data, media_type=upload_type, filename=filename, content_type=content_type, ) if not media_id: return False if upload_type == "image": # Verified in production: sampleImageMsg accepts media_id in photoURL. ok = await self._send_batch_message( token, chat_id, "sampleImageMsg", {"photoURL": media_id}, ) if ok: return True logger.warning("DingTalk image media_id send failed, falling back to file: {}", media_ref) return await self._send_batch_message( token, chat_id, "sampleFile", {"mediaId": media_id, "fileName": filename, "fileType": file_type}, ) async def send(self, msg: OutboundMessage) -> None: """Send a message through DingTalk.""" token = await self._get_access_token() if not token: return if msg.content and msg.content.strip(): await self._send_markdown_text(token, msg.chat_id, msg.content.strip()) for media_ref in msg.media or []: ok = await self._send_media_ref(token, msg.chat_id, media_ref) if ok: continue logger.error("DingTalk media send failed for {}", media_ref) # Send visible fallback so failures are observable by the user. filename = self._guess_filename(media_ref, self._guess_upload_type(media_ref)) await self._send_markdown_text( token, msg.chat_id, f"[Attachment send failed: {filename}]", ) async def _on_message(self, content: str, sender_id: str, sender_name: str) -> None: """Handle incoming message (called by NanobotDingTalkHandler). Delegates to BaseChannel._handle_message() which enforces allow_from permission checks before publishing to the bus. """ try: logger.info("DingTalk inbound: {} from {}", content, sender_name) await self._handle_message( sender_id=sender_id, chat_id=sender_id, # For private chat, chat_id == sender_id content=str(content), metadata={ "sender_name": sender_name, "platform": "dingtalk", }, ) except Exception as e: logger.error("Error publishing DingTalk message: {}", e)
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/channels/dingtalk.py", "license": "MIT License", "lines": 377, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/channels/email.py
"""Email channel implementation using IMAP polling + SMTP replies.""" import asyncio import html import imaplib import re import smtplib import ssl from datetime import date from email import policy from email.header import decode_header, make_header from email.message import EmailMessage from email.parser import BytesParser from email.utils import parseaddr from typing import Any from loguru import logger from nanobot.bus.events import OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.channels.base import BaseChannel from nanobot.config.schema import EmailConfig class EmailChannel(BaseChannel): """ Email channel. Inbound: - Poll IMAP mailbox for unread messages. - Convert each message into an inbound event. Outbound: - Send responses via SMTP back to the sender address. """ name = "email" _IMAP_MONTHS = ( "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ) def __init__(self, config: EmailConfig, bus: MessageBus): super().__init__(config, bus) self.config: EmailConfig = config self._last_subject_by_chat: dict[str, str] = {} self._last_message_id_by_chat: dict[str, str] = {} self._processed_uids: set[str] = set() # Capped to prevent unbounded growth self._MAX_PROCESSED_UIDS = 100000 async def start(self) -> None: """Start polling IMAP for inbound emails.""" if not self.config.consent_granted: logger.warning( "Email channel disabled: consent_granted is false. " "Set channels.email.consentGranted=true after explicit user permission." ) return if not self._validate_config(): return self._running = True logger.info("Starting Email channel (IMAP polling mode)...") poll_seconds = max(5, int(self.config.poll_interval_seconds)) while self._running: try: inbound_items = await asyncio.to_thread(self._fetch_new_messages) for item in inbound_items: sender = item["sender"] subject = item.get("subject", "") message_id = item.get("message_id", "") if subject: self._last_subject_by_chat[sender] = subject if message_id: self._last_message_id_by_chat[sender] = message_id await self._handle_message( sender_id=sender, chat_id=sender, content=item["content"], metadata=item.get("metadata", {}), ) except Exception as e: logger.error("Email polling error: {}", e) await asyncio.sleep(poll_seconds) async def stop(self) -> None: """Stop polling loop.""" self._running = False async def send(self, msg: OutboundMessage) -> None: """Send email via SMTP.""" if not self.config.consent_granted: logger.warning("Skip email send: consent_granted is false") return if not self.config.smtp_host: logger.warning("Email channel SMTP host not configured") return to_addr = msg.chat_id.strip() if not to_addr: logger.warning("Email channel missing recipient address") return # Determine if this is a reply (recipient has sent us an email before) is_reply = to_addr in self._last_subject_by_chat force_send = bool((msg.metadata or {}).get("force_send")) # autoReplyEnabled only controls automatic replies, not proactive sends if is_reply and not self.config.auto_reply_enabled and not force_send: logger.info("Skip automatic email reply to {}: auto_reply_enabled is false", to_addr) return base_subject = self._last_subject_by_chat.get(to_addr, "nanobot reply") subject = self._reply_subject(base_subject) if msg.metadata and isinstance(msg.metadata.get("subject"), str): override = msg.metadata["subject"].strip() if override: subject = override email_msg = EmailMessage() email_msg["From"] = self.config.from_address or self.config.smtp_username or self.config.imap_username email_msg["To"] = to_addr email_msg["Subject"] = subject email_msg.set_content(msg.content or "") in_reply_to = self._last_message_id_by_chat.get(to_addr) if in_reply_to: email_msg["In-Reply-To"] = in_reply_to email_msg["References"] = in_reply_to try: await asyncio.to_thread(self._smtp_send, email_msg) except Exception as e: logger.error("Error sending email to {}: {}", to_addr, e) raise def _validate_config(self) -> bool: missing = [] if not self.config.imap_host: missing.append("imap_host") if not self.config.imap_username: missing.append("imap_username") if not self.config.imap_password: missing.append("imap_password") if not self.config.smtp_host: missing.append("smtp_host") if not self.config.smtp_username: missing.append("smtp_username") if not self.config.smtp_password: missing.append("smtp_password") if missing: logger.error("Email channel not configured, missing: {}", ', '.join(missing)) return False return True def _smtp_send(self, msg: EmailMessage) -> None: timeout = 30 if self.config.smtp_use_ssl: with smtplib.SMTP_SSL( self.config.smtp_host, self.config.smtp_port, timeout=timeout, ) as smtp: smtp.login(self.config.smtp_username, self.config.smtp_password) smtp.send_message(msg) return with smtplib.SMTP(self.config.smtp_host, self.config.smtp_port, timeout=timeout) as smtp: if self.config.smtp_use_tls: smtp.starttls(context=ssl.create_default_context()) smtp.login(self.config.smtp_username, self.config.smtp_password) smtp.send_message(msg) def _fetch_new_messages(self) -> list[dict[str, Any]]: """Poll IMAP and return parsed unread messages.""" return self._fetch_messages( search_criteria=("UNSEEN",), mark_seen=self.config.mark_seen, dedupe=True, limit=0, ) def fetch_messages_between_dates( self, start_date: date, end_date: date, limit: int = 20, ) -> list[dict[str, Any]]: """ Fetch messages in [start_date, end_date) by IMAP date search. This is used for historical summarization tasks (e.g. "yesterday"). """ if end_date <= start_date: return [] return self._fetch_messages( search_criteria=( "SINCE", self._format_imap_date(start_date), "BEFORE", self._format_imap_date(end_date), ), mark_seen=False, dedupe=False, limit=max(1, int(limit)), ) def _fetch_messages( self, search_criteria: tuple[str, ...], mark_seen: bool, dedupe: bool, limit: int, ) -> list[dict[str, Any]]: """Fetch messages by arbitrary IMAP search criteria.""" messages: list[dict[str, Any]] = [] mailbox = self.config.imap_mailbox or "INBOX" if self.config.imap_use_ssl: client = imaplib.IMAP4_SSL(self.config.imap_host, self.config.imap_port) else: client = imaplib.IMAP4(self.config.imap_host, self.config.imap_port) try: client.login(self.config.imap_username, self.config.imap_password) status, _ = client.select(mailbox) if status != "OK": return messages status, data = client.search(None, *search_criteria) if status != "OK" or not data: return messages ids = data[0].split() if limit > 0 and len(ids) > limit: ids = ids[-limit:] for imap_id in ids: status, fetched = client.fetch(imap_id, "(BODY.PEEK[] UID)") if status != "OK" or not fetched: continue raw_bytes = self._extract_message_bytes(fetched) if raw_bytes is None: continue uid = self._extract_uid(fetched) if dedupe and uid and uid in self._processed_uids: continue parsed = BytesParser(policy=policy.default).parsebytes(raw_bytes) sender = parseaddr(parsed.get("From", ""))[1].strip().lower() if not sender: continue subject = self._decode_header_value(parsed.get("Subject", "")) date_value = parsed.get("Date", "") message_id = parsed.get("Message-ID", "").strip() body = self._extract_text_body(parsed) if not body: body = "(empty email body)" body = body[: self.config.max_body_chars] content = ( f"Email received.\n" f"From: {sender}\n" f"Subject: {subject}\n" f"Date: {date_value}\n\n" f"{body}" ) metadata = { "message_id": message_id, "subject": subject, "date": date_value, "sender_email": sender, "uid": uid, } messages.append( { "sender": sender, "subject": subject, "message_id": message_id, "content": content, "metadata": metadata, } ) if dedupe and uid: self._processed_uids.add(uid) # mark_seen is the primary dedup; this set is a safety net if len(self._processed_uids) > self._MAX_PROCESSED_UIDS: # Evict a random half to cap memory; mark_seen is the primary dedup self._processed_uids = set(list(self._processed_uids)[len(self._processed_uids) // 2:]) if mark_seen: client.store(imap_id, "+FLAGS", "\\Seen") finally: try: client.logout() except Exception: pass return messages @classmethod def _format_imap_date(cls, value: date) -> str: """Format date for IMAP search (always English month abbreviations).""" month = cls._IMAP_MONTHS[value.month - 1] return f"{value.day:02d}-{month}-{value.year}" @staticmethod def _extract_message_bytes(fetched: list[Any]) -> bytes | None: for item in fetched: if isinstance(item, tuple) and len(item) >= 2 and isinstance(item[1], (bytes, bytearray)): return bytes(item[1]) return None @staticmethod def _extract_uid(fetched: list[Any]) -> str: for item in fetched: if isinstance(item, tuple) and item and isinstance(item[0], (bytes, bytearray)): head = bytes(item[0]).decode("utf-8", errors="ignore") m = re.search(r"UID\s+(\d+)", head) if m: return m.group(1) return "" @staticmethod def _decode_header_value(value: str) -> str: if not value: return "" try: return str(make_header(decode_header(value))) except Exception: return value @classmethod def _extract_text_body(cls, msg: Any) -> str: """Best-effort extraction of readable body text.""" if msg.is_multipart(): plain_parts: list[str] = [] html_parts: list[str] = [] for part in msg.walk(): if part.get_content_disposition() == "attachment": continue content_type = part.get_content_type() try: payload = part.get_content() except Exception: payload_bytes = part.get_payload(decode=True) or b"" charset = part.get_content_charset() or "utf-8" payload = payload_bytes.decode(charset, errors="replace") if not isinstance(payload, str): continue if content_type == "text/plain": plain_parts.append(payload) elif content_type == "text/html": html_parts.append(payload) if plain_parts: return "\n\n".join(plain_parts).strip() if html_parts: return cls._html_to_text("\n\n".join(html_parts)).strip() return "" try: payload = msg.get_content() except Exception: payload_bytes = msg.get_payload(decode=True) or b"" charset = msg.get_content_charset() or "utf-8" payload = payload_bytes.decode(charset, errors="replace") if not isinstance(payload, str): return "" if msg.get_content_type() == "text/html": return cls._html_to_text(payload).strip() return payload.strip() @staticmethod def _html_to_text(raw_html: str) -> str: text = re.sub(r"<\s*br\s*/?>", "\n", raw_html, flags=re.IGNORECASE) text = re.sub(r"<\s*/\s*p\s*>", "\n", text, flags=re.IGNORECASE) text = re.sub(r"<[^>]+>", "", text) return html.unescape(text) def _reply_subject(self, base_subject: str) -> str: subject = (base_subject or "").strip() or "nanobot reply" prefix = self.config.subject_prefix or "Re: " if subject.lower().startswith("re:"): return subject return f"{prefix}{subject}"
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/channels/email.py", "license": "MIT License", "lines": 351, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/channels/qq.py
"""QQ channel implementation using botpy SDK.""" import asyncio from collections import deque from typing import TYPE_CHECKING from loguru import logger from nanobot.bus.events import OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.channels.base import BaseChannel from nanobot.config.schema import QQConfig try: import botpy from botpy.message import C2CMessage QQ_AVAILABLE = True except ImportError: QQ_AVAILABLE = False botpy = None C2CMessage = None if TYPE_CHECKING: from botpy.message import C2CMessage def _make_bot_class(channel: "QQChannel") -> "type[botpy.Client]": """Create a botpy Client subclass bound to the given channel.""" intents = botpy.Intents(public_messages=True, direct_message=True) class _Bot(botpy.Client): def __init__(self): # Disable botpy's file log — nanobot uses loguru; default "botpy.log" fails on read-only fs super().__init__(intents=intents, ext_handlers=False) async def on_ready(self): logger.info("QQ bot ready: {}", self.robot.name) async def on_c2c_message_create(self, message: "C2CMessage"): await channel._on_message(message) async def on_direct_message_create(self, message): await channel._on_message(message) return _Bot class QQChannel(BaseChannel): """QQ channel using botpy SDK with WebSocket connection.""" name = "qq" def __init__(self, config: QQConfig, bus: MessageBus): super().__init__(config, bus) self.config: QQConfig = config self._client: "botpy.Client | None" = None self._processed_ids: deque = deque(maxlen=1000) async def start(self) -> None: """Start the QQ bot.""" if not QQ_AVAILABLE: logger.error("QQ SDK not installed. Run: pip install qq-botpy") return if not self.config.app_id or not self.config.secret: logger.error("QQ app_id and secret not configured") return self._running = True BotClass = _make_bot_class(self) self._client = BotClass() logger.info("QQ bot started (C2C private message)") await self._run_bot() async def _run_bot(self) -> None: """Run the bot connection with auto-reconnect.""" while self._running: try: await self._client.start(appid=self.config.app_id, secret=self.config.secret) except Exception as e: logger.warning("QQ bot error: {}", e) if self._running: logger.info("Reconnecting QQ bot in 5 seconds...") await asyncio.sleep(5) async def stop(self) -> None: """Stop the QQ bot.""" self._running = False if self._client: try: await self._client.close() except Exception: pass logger.info("QQ bot stopped") async def send(self, msg: OutboundMessage) -> None: """Send a message through QQ.""" if not self._client: logger.warning("QQ client not initialized") return try: msg_id = msg.metadata.get("message_id") await self._client.api.post_c2c_message( openid=msg.chat_id, msg_type=0, content=msg.content, msg_id=msg_id, ) except Exception as e: logger.error("Error sending QQ message: {}", e) async def _on_message(self, data: "C2CMessage") -> None: """Handle incoming message from QQ.""" try: # Dedup by message ID if data.id in self._processed_ids: return self._processed_ids.append(data.id) author = data.author user_id = str(getattr(author, 'id', None) or getattr(author, 'user_openid', 'unknown')) content = (data.content or "").strip() if not content: return await self._handle_message( sender_id=user_id, chat_id=user_id, content=content, metadata={"message_id": data.id}, ) except Exception: logger.exception("Error handling QQ message")
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/channels/qq.py", "license": "MIT License", "lines": 108, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/channels/slack.py
"""Slack channel implementation using Socket Mode.""" import asyncio import re from typing import Any from loguru import logger from slack_sdk.socket_mode.request import SocketModeRequest from slack_sdk.socket_mode.response import SocketModeResponse from slack_sdk.socket_mode.websockets import SocketModeClient from slack_sdk.web.async_client import AsyncWebClient from slackify_markdown import slackify_markdown from nanobot.bus.events import OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.channels.base import BaseChannel from nanobot.config.schema import SlackConfig class SlackChannel(BaseChannel): """Slack channel using Socket Mode.""" name = "slack" def __init__(self, config: SlackConfig, bus: MessageBus): super().__init__(config, bus) self.config: SlackConfig = config self._web_client: AsyncWebClient | None = None self._socket_client: SocketModeClient | None = None self._bot_user_id: str | None = None async def start(self) -> None: """Start the Slack Socket Mode client.""" if not self.config.bot_token or not self.config.app_token: logger.error("Slack bot/app token not configured") return if self.config.mode != "socket": logger.error("Unsupported Slack mode: {}", self.config.mode) return self._running = True self._web_client = AsyncWebClient(token=self.config.bot_token) self._socket_client = SocketModeClient( app_token=self.config.app_token, web_client=self._web_client, ) self._socket_client.socket_mode_request_listeners.append(self._on_socket_request) # Resolve bot user ID for mention handling try: auth = await self._web_client.auth_test() self._bot_user_id = auth.get("user_id") logger.info("Slack bot connected as {}", self._bot_user_id) except Exception as e: logger.warning("Slack auth_test failed: {}", e) logger.info("Starting Slack Socket Mode client...") await self._socket_client.connect() while self._running: await asyncio.sleep(1) async def stop(self) -> None: """Stop the Slack client.""" self._running = False if self._socket_client: try: await self._socket_client.close() except Exception as e: logger.warning("Slack socket close failed: {}", e) self._socket_client = None async def send(self, msg: OutboundMessage) -> None: """Send a message through Slack.""" if not self._web_client: logger.warning("Slack client not running") return try: slack_meta = msg.metadata.get("slack", {}) if msg.metadata else {} thread_ts = slack_meta.get("thread_ts") channel_type = slack_meta.get("channel_type") # Only reply in thread for channel/group messages; DMs don't use threads use_thread = thread_ts and channel_type != "im" thread_ts_param = thread_ts if use_thread else None if msg.content: await self._web_client.chat_postMessage( channel=msg.chat_id, text=self._to_mrkdwn(msg.content), thread_ts=thread_ts_param, ) for media_path in msg.media or []: try: await self._web_client.files_upload_v2( channel=msg.chat_id, file=media_path, thread_ts=thread_ts_param, ) except Exception as e: logger.error("Failed to upload file {}: {}", media_path, e) except Exception as e: logger.error("Error sending Slack message: {}", e) async def _on_socket_request( self, client: SocketModeClient, req: SocketModeRequest, ) -> None: """Handle incoming Socket Mode requests.""" if req.type != "events_api": return # Acknowledge right away await client.send_socket_mode_response( SocketModeResponse(envelope_id=req.envelope_id) ) payload = req.payload or {} event = payload.get("event") or {} event_type = event.get("type") # Handle app mentions or plain messages if event_type not in ("message", "app_mention"): return sender_id = event.get("user") chat_id = event.get("channel") # Ignore bot/system messages (any subtype = not a normal user message) if event.get("subtype"): return if self._bot_user_id and sender_id == self._bot_user_id: return # Avoid double-processing: Slack sends both `message` and `app_mention` # for mentions in channels. Prefer `app_mention`. text = event.get("text") or "" if event_type == "message" and self._bot_user_id and f"<@{self._bot_user_id}>" in text: return # Debug: log basic event shape logger.debug( "Slack event: type={} subtype={} user={} channel={} channel_type={} text={}", event_type, event.get("subtype"), sender_id, chat_id, event.get("channel_type"), text[:80], ) if not sender_id or not chat_id: return channel_type = event.get("channel_type") or "" if not self._is_allowed(sender_id, chat_id, channel_type): return if channel_type != "im" and not self._should_respond_in_channel(event_type, text, chat_id): return text = self._strip_bot_mention(text) thread_ts = event.get("thread_ts") if self.config.reply_in_thread and not thread_ts: thread_ts = event.get("ts") # Add :eyes: reaction to the triggering message (best-effort) try: if self._web_client and event.get("ts"): await self._web_client.reactions_add( channel=chat_id, name=self.config.react_emoji, timestamp=event.get("ts"), ) except Exception as e: logger.debug("Slack reactions_add failed: {}", e) # Thread-scoped session key for channel/group messages session_key = f"slack:{chat_id}:{thread_ts}" if thread_ts and channel_type != "im" else None try: await self._handle_message( sender_id=sender_id, chat_id=chat_id, content=text, metadata={ "slack": { "event": event, "thread_ts": thread_ts, "channel_type": channel_type, }, }, session_key=session_key, ) except Exception: logger.exception("Error handling Slack message from {}", sender_id) def _is_allowed(self, sender_id: str, chat_id: str, channel_type: str) -> bool: if channel_type == "im": if not self.config.dm.enabled: return False if self.config.dm.policy == "allowlist": return sender_id in self.config.dm.allow_from return True # Group / channel messages if self.config.group_policy == "allowlist": return chat_id in self.config.group_allow_from return True def _should_respond_in_channel(self, event_type: str, text: str, chat_id: str) -> bool: if self.config.group_policy == "open": return True if self.config.group_policy == "mention": if event_type == "app_mention": return True return self._bot_user_id is not None and f"<@{self._bot_user_id}>" in text if self.config.group_policy == "allowlist": return chat_id in self.config.group_allow_from return False def _strip_bot_mention(self, text: str) -> str: if not text or not self._bot_user_id: return text return re.sub(rf"<@{re.escape(self._bot_user_id)}>\s*", "", text).strip() _TABLE_RE = re.compile(r"(?m)^\|.*\|$(?:\n\|[\s:|-]*\|$)(?:\n\|.*\|$)*") _CODE_FENCE_RE = re.compile(r"```[\s\S]*?```") _INLINE_CODE_RE = re.compile(r"`[^`]+`") _LEFTOVER_BOLD_RE = re.compile(r"\*\*(.+?)\*\*") _LEFTOVER_HEADER_RE = re.compile(r"^#{1,6}\s+(.+)$", re.MULTILINE) _BARE_URL_RE = re.compile(r"(?<![|<])(https?://\S+)") @classmethod def _to_mrkdwn(cls, text: str) -> str: """Convert Markdown to Slack mrkdwn, including tables.""" if not text: return "" text = cls._TABLE_RE.sub(cls._convert_table, text) return cls._fixup_mrkdwn(slackify_markdown(text)) @classmethod def _fixup_mrkdwn(cls, text: str) -> str: """Fix markdown artifacts that slackify_markdown misses.""" code_blocks: list[str] = [] def _save_code(m: re.Match) -> str: code_blocks.append(m.group(0)) return f"\x00CB{len(code_blocks) - 1}\x00" text = cls._CODE_FENCE_RE.sub(_save_code, text) text = cls._INLINE_CODE_RE.sub(_save_code, text) text = cls._LEFTOVER_BOLD_RE.sub(r"*\1*", text) text = cls._LEFTOVER_HEADER_RE.sub(r"*\1*", text) text = cls._BARE_URL_RE.sub(lambda m: m.group(0).replace("&amp;", "&"), text) for i, block in enumerate(code_blocks): text = text.replace(f"\x00CB{i}\x00", block) return text @staticmethod def _convert_table(match: re.Match) -> str: """Convert a Markdown table to a Slack-readable list.""" lines = [ln.strip() for ln in match.group(0).strip().splitlines() if ln.strip()] if len(lines) < 2: return match.group(0) headers = [h.strip() for h in lines[0].strip("|").split("|")] start = 2 if re.fullmatch(r"[|\s:\-]+", lines[1]) else 1 rows: list[str] = [] for line in lines[start:]: cells = [c.strip() for c in line.strip("|").split("|")] cells = (cells + [""] * len(headers))[: len(headers)] parts = [f"**{headers[i]}**: {cells[i]}" for i in range(len(headers)) if cells[i]] if parts: rows.append(" · ".join(parts)) return "\n".join(rows)
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/channels/slack.py", "license": "MIT License", "lines": 235, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/providers/registry.py
""" Provider Registry — single source of truth for LLM provider metadata. Adding a new provider: 1. Add a ProviderSpec to PROVIDERS below. 2. Add a field to ProvidersConfig in config/schema.py. Done. Env vars, prefixing, config matching, status display all derive from here. Order matters — it controls match priority and fallback. Gateways first. Every entry writes out all fields so you can copy-paste as a template. """ from __future__ import annotations from dataclasses import dataclass from typing import Any @dataclass(frozen=True) class ProviderSpec: """One LLM provider's metadata. See PROVIDERS below for real examples. Placeholders in env_extras values: {api_key} — the user's API key {api_base} — api_base from config, or this spec's default_api_base """ # identity name: str # config field name, e.g. "dashscope" keywords: tuple[str, ...] # model-name keywords for matching (lowercase) env_key: str # LiteLLM env var, e.g. "DASHSCOPE_API_KEY" display_name: str = "" # shown in `nanobot status` # model prefixing litellm_prefix: str = "" # "dashscope" → model becomes "dashscope/{model}" skip_prefixes: tuple[str, ...] = () # don't prefix if model already starts with these # extra env vars, e.g. (("ZHIPUAI_API_KEY", "{api_key}"),) env_extras: tuple[tuple[str, str], ...] = () # gateway / local detection is_gateway: bool = False # routes any model (OpenRouter, AiHubMix) is_local: bool = False # local deployment (vLLM, Ollama) detect_by_key_prefix: str = "" # match api_key prefix, e.g. "sk-or-" detect_by_base_keyword: str = "" # match substring in api_base URL default_api_base: str = "" # fallback base URL # gateway behavior strip_model_prefix: bool = False # strip "provider/" before re-prefixing # per-model param overrides, e.g. (("kimi-k2.5", {"temperature": 1.0}),) model_overrides: tuple[tuple[str, dict[str, Any]], ...] = () # OAuth-based providers (e.g., OpenAI Codex) don't use API keys is_oauth: bool = False # if True, uses OAuth flow instead of API key # Direct providers bypass LiteLLM entirely (e.g., CustomProvider) is_direct: bool = False # Provider supports cache_control on content blocks (e.g. Anthropic prompt caching) supports_prompt_caching: bool = False @property def label(self) -> str: return self.display_name or self.name.title() # --------------------------------------------------------------------------- # PROVIDERS — the registry. Order = priority. Copy any entry as template. # --------------------------------------------------------------------------- PROVIDERS: tuple[ProviderSpec, ...] = ( # === Custom (direct OpenAI-compatible endpoint, bypasses LiteLLM) ====== ProviderSpec( name="custom", keywords=(), env_key="", display_name="Custom", litellm_prefix="", is_direct=True, ), # === Gateways (detected by api_key / api_base, not model name) ========= # Gateways can route any model, so they win in fallback. # OpenRouter: global gateway, keys start with "sk-or-" ProviderSpec( name="openrouter", keywords=("openrouter",), env_key="OPENROUTER_API_KEY", display_name="OpenRouter", litellm_prefix="openrouter", # claude-3 → openrouter/claude-3 skip_prefixes=(), env_extras=(), is_gateway=True, is_local=False, detect_by_key_prefix="sk-or-", detect_by_base_keyword="openrouter", default_api_base="https://openrouter.ai/api/v1", strip_model_prefix=False, model_overrides=(), supports_prompt_caching=True, ), # AiHubMix: global gateway, OpenAI-compatible interface. # strip_model_prefix=True: it doesn't understand "anthropic/claude-3", # so we strip to bare "claude-3" then re-prefix as "openai/claude-3". ProviderSpec( name="aihubmix", keywords=("aihubmix",), env_key="OPENAI_API_KEY", # OpenAI-compatible display_name="AiHubMix", litellm_prefix="openai", # → openai/{model} skip_prefixes=(), env_extras=(), is_gateway=True, is_local=False, detect_by_key_prefix="", detect_by_base_keyword="aihubmix", default_api_base="https://aihubmix.com/v1", strip_model_prefix=True, # anthropic/claude-3 → claude-3 → openai/claude-3 model_overrides=(), ), # SiliconFlow (硅基流动): OpenAI-compatible gateway, model names keep org prefix ProviderSpec( name="siliconflow", keywords=("siliconflow",), env_key="OPENAI_API_KEY", display_name="SiliconFlow", litellm_prefix="openai", skip_prefixes=(), env_extras=(), is_gateway=True, is_local=False, detect_by_key_prefix="", detect_by_base_keyword="siliconflow", default_api_base="https://api.siliconflow.cn/v1", strip_model_prefix=False, model_overrides=(), ), # VolcEngine (火山引擎): OpenAI-compatible gateway ProviderSpec( name="volcengine", keywords=("volcengine", "volces", "ark"), env_key="OPENAI_API_KEY", display_name="VolcEngine", litellm_prefix="volcengine", skip_prefixes=(), env_extras=(), is_gateway=True, is_local=False, detect_by_key_prefix="", detect_by_base_keyword="volces", default_api_base="https://ark.cn-beijing.volces.com/api/v3", strip_model_prefix=False, model_overrides=(), ), # === Standard providers (matched by model-name keywords) =============== # Anthropic: LiteLLM recognizes "claude-*" natively, no prefix needed. ProviderSpec( name="anthropic", keywords=("anthropic", "claude"), env_key="ANTHROPIC_API_KEY", display_name="Anthropic", litellm_prefix="", skip_prefixes=(), env_extras=(), is_gateway=False, is_local=False, detect_by_key_prefix="", detect_by_base_keyword="", default_api_base="", strip_model_prefix=False, model_overrides=(), supports_prompt_caching=True, ), # OpenAI: LiteLLM recognizes "gpt-*" natively, no prefix needed. ProviderSpec( name="openai", keywords=("openai", "gpt"), env_key="OPENAI_API_KEY", display_name="OpenAI", litellm_prefix="", skip_prefixes=(), env_extras=(), is_gateway=False, is_local=False, detect_by_key_prefix="", detect_by_base_keyword="", default_api_base="", strip_model_prefix=False, model_overrides=(), ), # OpenAI Codex: uses OAuth, not API key. ProviderSpec( name="openai_codex", keywords=("openai-codex",), env_key="", # OAuth-based, no API key display_name="OpenAI Codex", litellm_prefix="", # Not routed through LiteLLM skip_prefixes=(), env_extras=(), is_gateway=False, is_local=False, detect_by_key_prefix="", detect_by_base_keyword="codex", default_api_base="https://chatgpt.com/backend-api", strip_model_prefix=False, model_overrides=(), is_oauth=True, # OAuth-based authentication ), # Github Copilot: uses OAuth, not API key. ProviderSpec( name="github_copilot", keywords=("github_copilot", "copilot"), env_key="", # OAuth-based, no API key display_name="Github Copilot", litellm_prefix="github_copilot", # github_copilot/model → github_copilot/model skip_prefixes=("github_copilot/",), env_extras=(), is_gateway=False, is_local=False, detect_by_key_prefix="", detect_by_base_keyword="", default_api_base="", strip_model_prefix=False, model_overrides=(), is_oauth=True, # OAuth-based authentication ), # DeepSeek: needs "deepseek/" prefix for LiteLLM routing. ProviderSpec( name="deepseek", keywords=("deepseek",), env_key="DEEPSEEK_API_KEY", display_name="DeepSeek", litellm_prefix="deepseek", # deepseek-chat → deepseek/deepseek-chat skip_prefixes=("deepseek/",), # avoid double-prefix env_extras=(), is_gateway=False, is_local=False, detect_by_key_prefix="", detect_by_base_keyword="", default_api_base="", strip_model_prefix=False, model_overrides=(), ), # Gemini: needs "gemini/" prefix for LiteLLM. ProviderSpec( name="gemini", keywords=("gemini",), env_key="GEMINI_API_KEY", display_name="Gemini", litellm_prefix="gemini", # gemini-pro → gemini/gemini-pro skip_prefixes=("gemini/",), # avoid double-prefix env_extras=(), is_gateway=False, is_local=False, detect_by_key_prefix="", detect_by_base_keyword="", default_api_base="", strip_model_prefix=False, model_overrides=(), ), # Zhipu: LiteLLM uses "zai/" prefix. # Also mirrors key to ZHIPUAI_API_KEY (some LiteLLM paths check that). # skip_prefixes: don't add "zai/" when already routed via gateway. ProviderSpec( name="zhipu", keywords=("zhipu", "glm", "zai"), env_key="ZAI_API_KEY", display_name="Zhipu AI", litellm_prefix="zai", # glm-4 → zai/glm-4 skip_prefixes=("zhipu/", "zai/", "openrouter/", "hosted_vllm/"), env_extras=( ("ZHIPUAI_API_KEY", "{api_key}"), ), is_gateway=False, is_local=False, detect_by_key_prefix="", detect_by_base_keyword="", default_api_base="", strip_model_prefix=False, model_overrides=(), ), # DashScope: Qwen models, needs "dashscope/" prefix. ProviderSpec( name="dashscope", keywords=("qwen", "dashscope"), env_key="DASHSCOPE_API_KEY", display_name="DashScope", litellm_prefix="dashscope", # qwen-max → dashscope/qwen-max skip_prefixes=("dashscope/", "openrouter/"), env_extras=(), is_gateway=False, is_local=False, detect_by_key_prefix="", detect_by_base_keyword="", default_api_base="", strip_model_prefix=False, model_overrides=(), ), # Moonshot: Kimi models, needs "moonshot/" prefix. # LiteLLM requires MOONSHOT_API_BASE env var to find the endpoint. # Kimi K2.5 API enforces temperature >= 1.0. ProviderSpec( name="moonshot", keywords=("moonshot", "kimi"), env_key="MOONSHOT_API_KEY", display_name="Moonshot", litellm_prefix="moonshot", # kimi-k2.5 → moonshot/kimi-k2.5 skip_prefixes=("moonshot/", "openrouter/"), env_extras=( ("MOONSHOT_API_BASE", "{api_base}"), ), is_gateway=False, is_local=False, detect_by_key_prefix="", detect_by_base_keyword="", default_api_base="https://api.moonshot.ai/v1", # intl; use api.moonshot.cn for China strip_model_prefix=False, model_overrides=( ("kimi-k2.5", {"temperature": 1.0}), ), ), # MiniMax: needs "minimax/" prefix for LiteLLM routing. # Uses OpenAI-compatible API at api.minimax.io/v1. ProviderSpec( name="minimax", keywords=("minimax",), env_key="MINIMAX_API_KEY", display_name="MiniMax", litellm_prefix="minimax", # MiniMax-M2.1 → minimax/MiniMax-M2.1 skip_prefixes=("minimax/", "openrouter/"), env_extras=(), is_gateway=False, is_local=False, detect_by_key_prefix="", detect_by_base_keyword="", default_api_base="https://api.minimax.io/v1", strip_model_prefix=False, model_overrides=(), ), # === Local deployment (matched by config key, NOT by api_base) ========= # vLLM / any OpenAI-compatible local server. # Detected when config key is "vllm" (provider_name="vllm"). ProviderSpec( name="vllm", keywords=("vllm",), env_key="HOSTED_VLLM_API_KEY", display_name="vLLM/Local", litellm_prefix="hosted_vllm", # Llama-3-8B → hosted_vllm/Llama-3-8B skip_prefixes=(), env_extras=(), is_gateway=False, is_local=True, detect_by_key_prefix="", detect_by_base_keyword="", default_api_base="", # user must provide in config strip_model_prefix=False, model_overrides=(), ), # === Auxiliary (not a primary LLM provider) ============================ # Groq: mainly used for Whisper voice transcription, also usable for LLM. # Needs "groq/" prefix for LiteLLM routing. Placed last — it rarely wins fallback. ProviderSpec( name="groq", keywords=("groq",), env_key="GROQ_API_KEY", display_name="Groq", litellm_prefix="groq", # llama3-8b-8192 → groq/llama3-8b-8192 skip_prefixes=("groq/",), # avoid double-prefix env_extras=(), is_gateway=False, is_local=False, detect_by_key_prefix="", detect_by_base_keyword="", default_api_base="", strip_model_prefix=False, model_overrides=(), ), ) # --------------------------------------------------------------------------- # Lookup helpers # --------------------------------------------------------------------------- def find_by_model(model: str) -> ProviderSpec | None: """Match a standard provider by model-name keyword (case-insensitive). Skips gateways/local — those are matched by api_key/api_base instead.""" model_lower = model.lower() model_normalized = model_lower.replace("-", "_") model_prefix = model_lower.split("/", 1)[0] if "/" in model_lower else "" normalized_prefix = model_prefix.replace("-", "_") std_specs = [s for s in PROVIDERS if not s.is_gateway and not s.is_local] # Prefer explicit provider prefix — prevents `github-copilot/...codex` matching openai_codex. for spec in std_specs: if model_prefix and normalized_prefix == spec.name: return spec for spec in std_specs: if any(kw in model_lower or kw.replace("-", "_") in model_normalized for kw in spec.keywords): return spec return None def find_gateway( provider_name: str | None = None, api_key: str | None = None, api_base: str | None = None, ) -> ProviderSpec | None: """Detect gateway/local provider. Priority: 1. provider_name — if it maps to a gateway/local spec, use it directly. 2. api_key prefix — e.g. "sk-or-" → OpenRouter. 3. api_base keyword — e.g. "aihubmix" in URL → AiHubMix. A standard provider with a custom api_base (e.g. DeepSeek behind a proxy) will NOT be mistaken for vLLM — the old fallback is gone. """ # 1. Direct match by config key if provider_name: spec = find_by_name(provider_name) if spec and (spec.is_gateway or spec.is_local): return spec # 2. Auto-detect by api_key prefix / api_base keyword for spec in PROVIDERS: if spec.detect_by_key_prefix and api_key and api_key.startswith(spec.detect_by_key_prefix): return spec if spec.detect_by_base_keyword and api_base and spec.detect_by_base_keyword in api_base: return spec return None def find_by_name(name: str) -> ProviderSpec | None: """Find a provider spec by config field name, e.g. "dashscope".""" for spec in PROVIDERS: if spec.name == name: return spec return None
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/providers/registry.py", "license": "MIT License", "lines": 408, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:tests/test_email_channel.py
from email.message import EmailMessage from datetime import date import pytest from nanobot.bus.events import OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.channels.email import EmailChannel from nanobot.config.schema import EmailConfig def _make_config() -> EmailConfig: return EmailConfig( enabled=True, consent_granted=True, imap_host="imap.example.com", imap_port=993, imap_username="bot@example.com", imap_password="secret", smtp_host="smtp.example.com", smtp_port=587, smtp_username="bot@example.com", smtp_password="secret", mark_seen=True, ) def _make_raw_email( from_addr: str = "alice@example.com", subject: str = "Hello", body: str = "This is the body.", ) -> bytes: msg = EmailMessage() msg["From"] = from_addr msg["To"] = "bot@example.com" msg["Subject"] = subject msg["Message-ID"] = "<m1@example.com>" msg.set_content(body) return msg.as_bytes() def test_fetch_new_messages_parses_unseen_and_marks_seen(monkeypatch) -> None: raw = _make_raw_email(subject="Invoice", body="Please pay") class FakeIMAP: def __init__(self) -> None: self.store_calls: list[tuple[bytes, str, str]] = [] def login(self, _user: str, _pw: str): return "OK", [b"logged in"] def select(self, _mailbox: str): return "OK", [b"1"] def search(self, *_args): return "OK", [b"1"] def fetch(self, _imap_id: bytes, _parts: str): return "OK", [(b"1 (UID 123 BODY[] {200})", raw), b")"] def store(self, imap_id: bytes, op: str, flags: str): self.store_calls.append((imap_id, op, flags)) return "OK", [b""] def logout(self): return "BYE", [b""] fake = FakeIMAP() monkeypatch.setattr("nanobot.channels.email.imaplib.IMAP4_SSL", lambda _h, _p: fake) channel = EmailChannel(_make_config(), MessageBus()) items = channel._fetch_new_messages() assert len(items) == 1 assert items[0]["sender"] == "alice@example.com" assert items[0]["subject"] == "Invoice" assert "Please pay" in items[0]["content"] assert fake.store_calls == [(b"1", "+FLAGS", "\\Seen")] # Same UID should be deduped in-process. items_again = channel._fetch_new_messages() assert items_again == [] def test_extract_text_body_falls_back_to_html() -> None: msg = EmailMessage() msg["From"] = "alice@example.com" msg["To"] = "bot@example.com" msg["Subject"] = "HTML only" msg.add_alternative("<p>Hello<br>world</p>", subtype="html") text = EmailChannel._extract_text_body(msg) assert "Hello" in text assert "world" in text @pytest.mark.asyncio async def test_start_returns_immediately_without_consent(monkeypatch) -> None: cfg = _make_config() cfg.consent_granted = False channel = EmailChannel(cfg, MessageBus()) called = {"fetch": False} def _fake_fetch(): called["fetch"] = True return [] monkeypatch.setattr(channel, "_fetch_new_messages", _fake_fetch) await channel.start() assert channel.is_running is False assert called["fetch"] is False @pytest.mark.asyncio async def test_send_uses_smtp_and_reply_subject(monkeypatch) -> None: class FakeSMTP: def __init__(self, _host: str, _port: int, timeout: int = 30) -> None: self.timeout = timeout self.started_tls = False self.logged_in = False self.sent_messages: list[EmailMessage] = [] def __enter__(self): return self def __exit__(self, exc_type, exc, tb): return False def starttls(self, context=None): self.started_tls = True def login(self, _user: str, _pw: str): self.logged_in = True def send_message(self, msg: EmailMessage): self.sent_messages.append(msg) fake_instances: list[FakeSMTP] = [] def _smtp_factory(host: str, port: int, timeout: int = 30): instance = FakeSMTP(host, port, timeout=timeout) fake_instances.append(instance) return instance monkeypatch.setattr("nanobot.channels.email.smtplib.SMTP", _smtp_factory) channel = EmailChannel(_make_config(), MessageBus()) channel._last_subject_by_chat["alice@example.com"] = "Invoice #42" channel._last_message_id_by_chat["alice@example.com"] = "<m1@example.com>" await channel.send( OutboundMessage( channel="email", chat_id="alice@example.com", content="Acknowledged.", ) ) assert len(fake_instances) == 1 smtp = fake_instances[0] assert smtp.started_tls is True assert smtp.logged_in is True assert len(smtp.sent_messages) == 1 sent = smtp.sent_messages[0] assert sent["Subject"] == "Re: Invoice #42" assert sent["To"] == "alice@example.com" assert sent["In-Reply-To"] == "<m1@example.com>" @pytest.mark.asyncio async def test_send_skips_reply_when_auto_reply_disabled(monkeypatch) -> None: """When auto_reply_enabled=False, replies should be skipped but proactive sends allowed.""" class FakeSMTP: def __init__(self, _host: str, _port: int, timeout: int = 30) -> None: self.sent_messages: list[EmailMessage] = [] def __enter__(self): return self def __exit__(self, exc_type, exc, tb): return False def starttls(self, context=None): return None def login(self, _user: str, _pw: str): return None def send_message(self, msg: EmailMessage): self.sent_messages.append(msg) fake_instances: list[FakeSMTP] = [] def _smtp_factory(host: str, port: int, timeout: int = 30): instance = FakeSMTP(host, port, timeout=timeout) fake_instances.append(instance) return instance monkeypatch.setattr("nanobot.channels.email.smtplib.SMTP", _smtp_factory) cfg = _make_config() cfg.auto_reply_enabled = False channel = EmailChannel(cfg, MessageBus()) # Mark alice as someone who sent us an email (making this a "reply") channel._last_subject_by_chat["alice@example.com"] = "Previous email" # Reply should be skipped (auto_reply_enabled=False) await channel.send( OutboundMessage( channel="email", chat_id="alice@example.com", content="Should not send.", ) ) assert fake_instances == [] # Reply with force_send=True should be sent await channel.send( OutboundMessage( channel="email", chat_id="alice@example.com", content="Force send.", metadata={"force_send": True}, ) ) assert len(fake_instances) == 1 assert len(fake_instances[0].sent_messages) == 1 @pytest.mark.asyncio async def test_send_proactive_email_when_auto_reply_disabled(monkeypatch) -> None: """Proactive emails (not replies) should be sent even when auto_reply_enabled=False.""" class FakeSMTP: def __init__(self, _host: str, _port: int, timeout: int = 30) -> None: self.sent_messages: list[EmailMessage] = [] def __enter__(self): return self def __exit__(self, exc_type, exc, tb): return False def starttls(self, context=None): return None def login(self, _user: str, _pw: str): return None def send_message(self, msg: EmailMessage): self.sent_messages.append(msg) fake_instances: list[FakeSMTP] = [] def _smtp_factory(host: str, port: int, timeout: int = 30): instance = FakeSMTP(host, port, timeout=timeout) fake_instances.append(instance) return instance monkeypatch.setattr("nanobot.channels.email.smtplib.SMTP", _smtp_factory) cfg = _make_config() cfg.auto_reply_enabled = False channel = EmailChannel(cfg, MessageBus()) # bob@example.com has never sent us an email (proactive send) # This should be sent even with auto_reply_enabled=False await channel.send( OutboundMessage( channel="email", chat_id="bob@example.com", content="Hello, this is a proactive email.", ) ) assert len(fake_instances) == 1 assert len(fake_instances[0].sent_messages) == 1 sent = fake_instances[0].sent_messages[0] assert sent["To"] == "bob@example.com" @pytest.mark.asyncio async def test_send_skips_when_consent_not_granted(monkeypatch) -> None: class FakeSMTP: def __init__(self, _host: str, _port: int, timeout: int = 30) -> None: self.sent_messages: list[EmailMessage] = [] def __enter__(self): return self def __exit__(self, exc_type, exc, tb): return False def starttls(self, context=None): return None def login(self, _user: str, _pw: str): return None def send_message(self, msg: EmailMessage): self.sent_messages.append(msg) called = {"smtp": False} def _smtp_factory(host: str, port: int, timeout: int = 30): called["smtp"] = True return FakeSMTP(host, port, timeout=timeout) monkeypatch.setattr("nanobot.channels.email.smtplib.SMTP", _smtp_factory) cfg = _make_config() cfg.consent_granted = False channel = EmailChannel(cfg, MessageBus()) await channel.send( OutboundMessage( channel="email", chat_id="alice@example.com", content="Should not send.", metadata={"force_send": True}, ) ) assert called["smtp"] is False def test_fetch_messages_between_dates_uses_imap_since_before_without_mark_seen(monkeypatch) -> None: raw = _make_raw_email(subject="Status", body="Yesterday update") class FakeIMAP: def __init__(self) -> None: self.search_args = None self.store_calls: list[tuple[bytes, str, str]] = [] def login(self, _user: str, _pw: str): return "OK", [b"logged in"] def select(self, _mailbox: str): return "OK", [b"1"] def search(self, *_args): self.search_args = _args return "OK", [b"5"] def fetch(self, _imap_id: bytes, _parts: str): return "OK", [(b"5 (UID 999 BODY[] {200})", raw), b")"] def store(self, imap_id: bytes, op: str, flags: str): self.store_calls.append((imap_id, op, flags)) return "OK", [b""] def logout(self): return "BYE", [b""] fake = FakeIMAP() monkeypatch.setattr("nanobot.channels.email.imaplib.IMAP4_SSL", lambda _h, _p: fake) channel = EmailChannel(_make_config(), MessageBus()) items = channel.fetch_messages_between_dates( start_date=date(2026, 2, 6), end_date=date(2026, 2, 7), limit=10, ) assert len(items) == 1 assert items[0]["subject"] == "Status" # search(None, "SINCE", "06-Feb-2026", "BEFORE", "07-Feb-2026") assert fake.search_args is not None assert fake.search_args[1:] == ("SINCE", "06-Feb-2026", "BEFORE", "07-Feb-2026") assert fake.store_calls == []
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_email_channel.py", "license": "MIT License", "lines": 279, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:nanobot/agent/tools/cron.py
"""Cron tool for scheduling reminders and tasks.""" from contextvars import ContextVar from typing import Any from nanobot.agent.tools.base import Tool from nanobot.cron.service import CronService from nanobot.cron.types import CronSchedule class CronTool(Tool): """Tool to schedule reminders and recurring tasks.""" def __init__(self, cron_service: CronService): self._cron = cron_service self._channel = "" self._chat_id = "" self._in_cron_context: ContextVar[bool] = ContextVar("cron_in_context", default=False) def set_context(self, channel: str, chat_id: str) -> None: """Set the current session context for delivery.""" self._channel = channel self._chat_id = chat_id def set_cron_context(self, active: bool): """Mark whether the tool is executing inside a cron job callback.""" return self._in_cron_context.set(active) def reset_cron_context(self, token) -> None: """Restore previous cron context.""" self._in_cron_context.reset(token) @property def name(self) -> str: return "cron" @property def description(self) -> str: return "Schedule reminders and recurring tasks. Actions: add, list, remove." @property def parameters(self) -> dict[str, Any]: return { "type": "object", "properties": { "action": { "type": "string", "enum": ["add", "list", "remove"], "description": "Action to perform", }, "message": {"type": "string", "description": "Reminder message (for add)"}, "every_seconds": { "type": "integer", "description": "Interval in seconds (for recurring tasks)", }, "cron_expr": { "type": "string", "description": "Cron expression like '0 9 * * *' (for scheduled tasks)", }, "tz": { "type": "string", "description": "IANA timezone for cron expressions (e.g. 'America/Vancouver')", }, "at": { "type": "string", "description": "ISO datetime for one-time execution (e.g. '2026-02-12T10:30:00')", }, "job_id": {"type": "string", "description": "Job ID (for remove)"}, }, "required": ["action"], } async def execute( self, action: str, message: str = "", every_seconds: int | None = None, cron_expr: str | None = None, tz: str | None = None, at: str | None = None, job_id: str | None = None, **kwargs: Any, ) -> str: if action == "add": if self._in_cron_context.get(): return "Error: cannot schedule new jobs from within a cron job execution" return self._add_job(message, every_seconds, cron_expr, tz, at) elif action == "list": return self._list_jobs() elif action == "remove": return self._remove_job(job_id) return f"Unknown action: {action}" def _add_job( self, message: str, every_seconds: int | None, cron_expr: str | None, tz: str | None, at: str | None, ) -> str: if not message: return "Error: message is required for add" if not self._channel or not self._chat_id: return "Error: no session context (channel/chat_id)" if tz and not cron_expr: return "Error: tz can only be used with cron_expr" if tz: from zoneinfo import ZoneInfo try: ZoneInfo(tz) except (KeyError, Exception): return f"Error: unknown timezone '{tz}'" # Build schedule delete_after = False if every_seconds: schedule = CronSchedule(kind="every", every_ms=every_seconds * 1000) elif cron_expr: schedule = CronSchedule(kind="cron", expr=cron_expr, tz=tz) elif at: from datetime import datetime dt = datetime.fromisoformat(at) at_ms = int(dt.timestamp() * 1000) schedule = CronSchedule(kind="at", at_ms=at_ms) delete_after = True else: return "Error: either every_seconds, cron_expr, or at is required" job = self._cron.add_job( name=message[:30], schedule=schedule, message=message, deliver=True, channel=self._channel, to=self._chat_id, delete_after_run=delete_after, ) return f"Created job '{job.name}' (id: {job.id})" def _list_jobs(self) -> str: jobs = self._cron.list_jobs() if not jobs: return "No scheduled jobs." lines = [f"- {j.name} (id: {j.id}, {j.schedule.kind})" for j in jobs] return "Scheduled jobs:\n" + "\n".join(lines) def _remove_job(self, job_id: str | None) -> str: if not job_id: return "Error: job_id is required for remove" if self._cron.remove_job(job_id): return f"Removed job {job_id}" return f"Job {job_id} not found"
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/agent/tools/cron.py", "license": "MIT License", "lines": 136, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/channels/discord.py
"""Discord channel implementation using Discord Gateway websocket.""" import asyncio import json from pathlib import Path from typing import Any import httpx import websockets from loguru import logger from nanobot.bus.events import OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.channels.base import BaseChannel from nanobot.config.schema import DiscordConfig DISCORD_API_BASE = "https://discord.com/api/v10" MAX_ATTACHMENT_BYTES = 20 * 1024 * 1024 # 20MB MAX_MESSAGE_LEN = 2000 # Discord message character limit def _split_message(content: str, max_len: int = MAX_MESSAGE_LEN) -> list[str]: """Split content into chunks within max_len, preferring line breaks.""" if not content: return [] if len(content) <= max_len: return [content] chunks: list[str] = [] while content: if len(content) <= max_len: chunks.append(content) break cut = content[:max_len] pos = cut.rfind('\n') if pos <= 0: pos = cut.rfind(' ') if pos <= 0: pos = max_len chunks.append(content[:pos]) content = content[pos:].lstrip() return chunks class DiscordChannel(BaseChannel): """Discord channel using Gateway websocket.""" name = "discord" def __init__(self, config: DiscordConfig, bus: MessageBus): super().__init__(config, bus) self.config: DiscordConfig = config self._ws: websockets.WebSocketClientProtocol | None = None self._seq: int | None = None self._heartbeat_task: asyncio.Task | None = None self._typing_tasks: dict[str, asyncio.Task] = {} self._http: httpx.AsyncClient | None = None async def start(self) -> None: """Start the Discord gateway connection.""" if not self.config.token: logger.error("Discord bot token not configured") return self._running = True self._http = httpx.AsyncClient(timeout=30.0) while self._running: try: logger.info("Connecting to Discord gateway...") async with websockets.connect(self.config.gateway_url) as ws: self._ws = ws await self._gateway_loop() except asyncio.CancelledError: break except Exception as e: logger.warning("Discord gateway error: {}", e) if self._running: logger.info("Reconnecting to Discord gateway in 5 seconds...") await asyncio.sleep(5) async def stop(self) -> None: """Stop the Discord channel.""" self._running = False if self._heartbeat_task: self._heartbeat_task.cancel() self._heartbeat_task = None for task in self._typing_tasks.values(): task.cancel() self._typing_tasks.clear() if self._ws: await self._ws.close() self._ws = None if self._http: await self._http.aclose() self._http = None async def send(self, msg: OutboundMessage) -> None: """Send a message through Discord REST API.""" if not self._http: logger.warning("Discord HTTP client not initialized") return url = f"{DISCORD_API_BASE}/channels/{msg.chat_id}/messages" headers = {"Authorization": f"Bot {self.config.token}"} try: chunks = _split_message(msg.content or "") if not chunks: return for i, chunk in enumerate(chunks): payload: dict[str, Any] = {"content": chunk} # Only set reply reference on the first chunk if i == 0 and msg.reply_to: payload["message_reference"] = {"message_id": msg.reply_to} payload["allowed_mentions"] = {"replied_user": False} if not await self._send_payload(url, headers, payload): break # Abort remaining chunks on failure finally: await self._stop_typing(msg.chat_id) async def _send_payload( self, url: str, headers: dict[str, str], payload: dict[str, Any] ) -> bool: """Send a single Discord API payload with retry on rate-limit. Returns True on success.""" for attempt in range(3): try: response = await self._http.post(url, headers=headers, json=payload) if response.status_code == 429: data = response.json() retry_after = float(data.get("retry_after", 1.0)) logger.warning("Discord rate limited, retrying in {}s", retry_after) await asyncio.sleep(retry_after) continue response.raise_for_status() return True except Exception as e: if attempt == 2: logger.error("Error sending Discord message: {}", e) else: await asyncio.sleep(1) return False async def _gateway_loop(self) -> None: """Main gateway loop: identify, heartbeat, dispatch events.""" if not self._ws: return async for raw in self._ws: try: data = json.loads(raw) except json.JSONDecodeError: logger.warning("Invalid JSON from Discord gateway: {}", raw[:100]) continue op = data.get("op") event_type = data.get("t") seq = data.get("s") payload = data.get("d") if seq is not None: self._seq = seq if op == 10: # HELLO: start heartbeat and identify interval_ms = payload.get("heartbeat_interval", 45000) await self._start_heartbeat(interval_ms / 1000) await self._identify() elif op == 0 and event_type == "READY": logger.info("Discord gateway READY") elif op == 0 and event_type == "MESSAGE_CREATE": await self._handle_message_create(payload) elif op == 7: # RECONNECT: exit loop to reconnect logger.info("Discord gateway requested reconnect") break elif op == 9: # INVALID_SESSION: reconnect logger.warning("Discord gateway invalid session") break async def _identify(self) -> None: """Send IDENTIFY payload.""" if not self._ws: return identify = { "op": 2, "d": { "token": self.config.token, "intents": self.config.intents, "properties": { "os": "nanobot", "browser": "nanobot", "device": "nanobot", }, }, } await self._ws.send(json.dumps(identify)) async def _start_heartbeat(self, interval_s: float) -> None: """Start or restart the heartbeat loop.""" if self._heartbeat_task: self._heartbeat_task.cancel() async def heartbeat_loop() -> None: while self._running and self._ws: payload = {"op": 1, "d": self._seq} try: await self._ws.send(json.dumps(payload)) except Exception as e: logger.warning("Discord heartbeat failed: {}", e) break await asyncio.sleep(interval_s) self._heartbeat_task = asyncio.create_task(heartbeat_loop()) async def _handle_message_create(self, payload: dict[str, Any]) -> None: """Handle incoming Discord messages.""" author = payload.get("author") or {} if author.get("bot"): return sender_id = str(author.get("id", "")) channel_id = str(payload.get("channel_id", "")) content = payload.get("content") or "" if not sender_id or not channel_id: return if not self.is_allowed(sender_id): return content_parts = [content] if content else [] media_paths: list[str] = [] media_dir = Path.home() / ".nanobot" / "media" for attachment in payload.get("attachments") or []: url = attachment.get("url") filename = attachment.get("filename") or "attachment" size = attachment.get("size") or 0 if not url or not self._http: continue if size and size > MAX_ATTACHMENT_BYTES: content_parts.append(f"[attachment: {filename} - too large]") continue try: media_dir.mkdir(parents=True, exist_ok=True) file_path = media_dir / f"{attachment.get('id', 'file')}_{filename.replace('/', '_')}" resp = await self._http.get(url) resp.raise_for_status() file_path.write_bytes(resp.content) media_paths.append(str(file_path)) content_parts.append(f"[attachment: {file_path}]") except Exception as e: logger.warning("Failed to download Discord attachment: {}", e) content_parts.append(f"[attachment: {filename} - download failed]") reply_to = (payload.get("referenced_message") or {}).get("id") await self._start_typing(channel_id) await self._handle_message( sender_id=sender_id, chat_id=channel_id, content="\n".join(p for p in content_parts if p) or "[empty message]", media=media_paths, metadata={ "message_id": str(payload.get("id", "")), "guild_id": payload.get("guild_id"), "reply_to": reply_to, }, ) async def _start_typing(self, channel_id: str) -> None: """Start periodic typing indicator for a channel.""" await self._stop_typing(channel_id) async def typing_loop() -> None: url = f"{DISCORD_API_BASE}/channels/{channel_id}/typing" headers = {"Authorization": f"Bot {self.config.token}"} while self._running: try: await self._http.post(url, headers=headers) except asyncio.CancelledError: return except Exception as e: logger.debug("Discord typing indicator failed for {}: {}", channel_id, e) return await asyncio.sleep(8) self._typing_tasks[channel_id] = asyncio.create_task(typing_loop()) async def _stop_typing(self, channel_id: str) -> None: """Stop typing indicator for a channel.""" task = self._typing_tasks.pop(channel_id, None) if task: task.cancel()
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/channels/discord.py", "license": "MIT License", "lines": 256, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/channels/feishu.py
"""Feishu/Lark channel implementation using lark-oapi SDK with WebSocket long connection.""" import asyncio import json import os import re import threading from collections import OrderedDict from pathlib import Path from typing import Any from loguru import logger from nanobot.bus.events import OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.channels.base import BaseChannel from nanobot.config.schema import FeishuConfig try: import lark_oapi as lark from lark_oapi.api.im.v1 import ( CreateFileRequest, CreateFileRequestBody, CreateImageRequest, CreateImageRequestBody, CreateMessageReactionRequest, CreateMessageReactionRequestBody, CreateMessageRequest, CreateMessageRequestBody, Emoji, GetMessageResourceRequest, P2ImMessageReceiveV1, ) FEISHU_AVAILABLE = True except ImportError: FEISHU_AVAILABLE = False lark = None Emoji = None # Message type display mapping MSG_TYPE_MAP = { "image": "[image]", "audio": "[audio]", "file": "[file]", "sticker": "[sticker]", } def _extract_share_card_content(content_json: dict, msg_type: str) -> str: """Extract text representation from share cards and interactive messages.""" parts = [] if msg_type == "share_chat": parts.append(f"[shared chat: {content_json.get('chat_id', '')}]") elif msg_type == "share_user": parts.append(f"[shared user: {content_json.get('user_id', '')}]") elif msg_type == "interactive": parts.extend(_extract_interactive_content(content_json)) elif msg_type == "share_calendar_event": parts.append(f"[shared calendar event: {content_json.get('event_key', '')}]") elif msg_type == "system": parts.append("[system message]") elif msg_type == "merge_forward": parts.append("[merged forward messages]") return "\n".join(parts) if parts else f"[{msg_type}]" def _extract_interactive_content(content: dict) -> list[str]: """Recursively extract text and links from interactive card content.""" parts = [] if isinstance(content, str): try: content = json.loads(content) except (json.JSONDecodeError, TypeError): return [content] if content.strip() else [] if not isinstance(content, dict): return parts if "title" in content: title = content["title"] if isinstance(title, dict): title_content = title.get("content", "") or title.get("text", "") if title_content: parts.append(f"title: {title_content}") elif isinstance(title, str): parts.append(f"title: {title}") for elements in content.get("elements", []) if isinstance(content.get("elements"), list) else []: for element in elements: parts.extend(_extract_element_content(element)) card = content.get("card", {}) if card: parts.extend(_extract_interactive_content(card)) header = content.get("header", {}) if header: header_title = header.get("title", {}) if isinstance(header_title, dict): header_text = header_title.get("content", "") or header_title.get("text", "") if header_text: parts.append(f"title: {header_text}") return parts def _extract_element_content(element: dict) -> list[str]: """Extract content from a single card element.""" parts = [] if not isinstance(element, dict): return parts tag = element.get("tag", "") if tag in ("markdown", "lark_md"): content = element.get("content", "") if content: parts.append(content) elif tag == "div": text = element.get("text", {}) if isinstance(text, dict): text_content = text.get("content", "") or text.get("text", "") if text_content: parts.append(text_content) elif isinstance(text, str): parts.append(text) for field in element.get("fields", []): if isinstance(field, dict): field_text = field.get("text", {}) if isinstance(field_text, dict): c = field_text.get("content", "") if c: parts.append(c) elif tag == "a": href = element.get("href", "") text = element.get("text", "") if href: parts.append(f"link: {href}") if text: parts.append(text) elif tag == "button": text = element.get("text", {}) if isinstance(text, dict): c = text.get("content", "") if c: parts.append(c) url = element.get("url", "") or element.get("multi_url", {}).get("url", "") if url: parts.append(f"link: {url}") elif tag == "img": alt = element.get("alt", {}) parts.append(alt.get("content", "[image]") if isinstance(alt, dict) else "[image]") elif tag == "note": for ne in element.get("elements", []): parts.extend(_extract_element_content(ne)) elif tag == "column_set": for col in element.get("columns", []): for ce in col.get("elements", []): parts.extend(_extract_element_content(ce)) elif tag == "plain_text": content = element.get("content", "") if content: parts.append(content) else: for ne in element.get("elements", []): parts.extend(_extract_element_content(ne)) return parts def _extract_post_content(content_json: dict) -> tuple[str, list[str]]: """Extract text and image keys from Feishu post (rich text) message. Handles three payload shapes: - Direct: {"title": "...", "content": [[...]]} - Localized: {"zh_cn": {"title": "...", "content": [...]}} - Wrapped: {"post": {"zh_cn": {"title": "...", "content": [...]}}} """ def _parse_block(block: dict) -> tuple[str | None, list[str]]: if not isinstance(block, dict) or not isinstance(block.get("content"), list): return None, [] texts, images = [], [] if title := block.get("title"): texts.append(title) for row in block["content"]: if not isinstance(row, list): continue for el in row: if not isinstance(el, dict): continue tag = el.get("tag") if tag in ("text", "a"): texts.append(el.get("text", "")) elif tag == "at": texts.append(f"@{el.get('user_name', 'user')}") elif tag == "img" and (key := el.get("image_key")): images.append(key) return (" ".join(texts).strip() or None), images # Unwrap optional {"post": ...} envelope root = content_json if isinstance(root, dict) and isinstance(root.get("post"), dict): root = root["post"] if not isinstance(root, dict): return "", [] # Direct format if "content" in root: text, imgs = _parse_block(root) if text or imgs: return text or "", imgs # Localized: prefer known locales, then fall back to any dict child for key in ("zh_cn", "en_us", "ja_jp"): if key in root: text, imgs = _parse_block(root[key]) if text or imgs: return text or "", imgs for val in root.values(): if isinstance(val, dict): text, imgs = _parse_block(val) if text or imgs: return text or "", imgs return "", [] def _extract_post_text(content_json: dict) -> str: """Extract plain text from Feishu post (rich text) message content. Legacy wrapper for _extract_post_content, returns only text. """ text, _ = _extract_post_content(content_json) return text class FeishuChannel(BaseChannel): """ Feishu/Lark channel using WebSocket long connection. Uses WebSocket to receive events - no public IP or webhook required. Requires: - App ID and App Secret from Feishu Open Platform - Bot capability enabled - Event subscription enabled (im.message.receive_v1) """ name = "feishu" def __init__(self, config: FeishuConfig, bus: MessageBus): super().__init__(config, bus) self.config: FeishuConfig = config self._client: Any = None self._ws_client: Any = None self._ws_thread: threading.Thread | None = None self._processed_message_ids: OrderedDict[str, None] = OrderedDict() # Ordered dedup cache self._loop: asyncio.AbstractEventLoop | None = None async def start(self) -> None: """Start the Feishu bot with WebSocket long connection.""" if not FEISHU_AVAILABLE: logger.error("Feishu SDK not installed. Run: pip install lark-oapi") return if not self.config.app_id or not self.config.app_secret: logger.error("Feishu app_id and app_secret not configured") return self._running = True self._loop = asyncio.get_running_loop() # Create Lark client for sending messages self._client = lark.Client.builder() \ .app_id(self.config.app_id) \ .app_secret(self.config.app_secret) \ .log_level(lark.LogLevel.INFO) \ .build() # Create event handler (only register message receive, ignore other events) event_handler = lark.EventDispatcherHandler.builder( self.config.encrypt_key or "", self.config.verification_token or "", ).register_p2_im_message_receive_v1( self._on_message_sync ).build() # Create WebSocket client for long connection self._ws_client = lark.ws.Client( self.config.app_id, self.config.app_secret, event_handler=event_handler, log_level=lark.LogLevel.INFO ) # Start WebSocket client in a separate thread with reconnect loop def run_ws(): while self._running: try: self._ws_client.start() except Exception as e: logger.warning("Feishu WebSocket error: {}", e) if self._running: import time time.sleep(5) self._ws_thread = threading.Thread(target=run_ws, daemon=True) self._ws_thread.start() logger.info("Feishu bot started with WebSocket long connection") logger.info("No public IP required - using WebSocket to receive events") # Keep running until stopped while self._running: await asyncio.sleep(1) async def stop(self) -> None: """ Stop the Feishu bot. Notice: lark.ws.Client does not expose stop method, simply exiting the program will close the client. Reference: https://github.com/larksuite/oapi-sdk-python/blob/v2_main/lark_oapi/ws/client.py#L86 """ self._running = False logger.info("Feishu bot stopped") def _add_reaction_sync(self, message_id: str, emoji_type: str) -> None: """Sync helper for adding reaction (runs in thread pool).""" try: request = CreateMessageReactionRequest.builder() \ .message_id(message_id) \ .request_body( CreateMessageReactionRequestBody.builder() .reaction_type(Emoji.builder().emoji_type(emoji_type).build()) .build() ).build() response = self._client.im.v1.message_reaction.create(request) if not response.success(): logger.warning("Failed to add reaction: code={}, msg={}", response.code, response.msg) else: logger.debug("Added {} reaction to message {}", emoji_type, message_id) except Exception as e: logger.warning("Error adding reaction: {}", e) async def _add_reaction(self, message_id: str, emoji_type: str = "THUMBSUP") -> None: """ Add a reaction emoji to a message (non-blocking). Common emoji types: THUMBSUP, OK, EYES, DONE, OnIt, HEART """ if not self._client or not Emoji: return loop = asyncio.get_running_loop() await loop.run_in_executor(None, self._add_reaction_sync, message_id, emoji_type) # Regex to match markdown tables (header + separator + data rows) _TABLE_RE = re.compile( r"((?:^[ \t]*\|.+\|[ \t]*\n)(?:^[ \t]*\|[-:\s|]+\|[ \t]*\n)(?:^[ \t]*\|.+\|[ \t]*\n?)+)", re.MULTILINE, ) _HEADING_RE = re.compile(r"^(#{1,6})\s+(.+)$", re.MULTILINE) _CODE_BLOCK_RE = re.compile(r"(```[\s\S]*?```)", re.MULTILINE) @staticmethod def _parse_md_table(table_text: str) -> dict | None: """Parse a markdown table into a Feishu table element.""" lines = [_line.strip() for _line in table_text.strip().split("\n") if _line.strip()] if len(lines) < 3: return None def split(_line: str) -> list[str]: return [c.strip() for c in _line.strip("|").split("|")] headers = split(lines[0]) rows = [split(_line) for _line in lines[2:]] columns = [{"tag": "column", "name": f"c{i}", "display_name": h, "width": "auto"} for i, h in enumerate(headers)] return { "tag": "table", "page_size": len(rows) + 1, "columns": columns, "rows": [{f"c{i}": r[i] if i < len(r) else "" for i in range(len(headers))} for r in rows], } def _build_card_elements(self, content: str) -> list[dict]: """Split content into div/markdown + table elements for Feishu card.""" elements, last_end = [], 0 for m in self._TABLE_RE.finditer(content): before = content[last_end:m.start()] if before.strip(): elements.extend(self._split_headings(before)) elements.append(self._parse_md_table(m.group(1)) or {"tag": "markdown", "content": m.group(1)}) last_end = m.end() remaining = content[last_end:] if remaining.strip(): elements.extend(self._split_headings(remaining)) return elements or [{"tag": "markdown", "content": content}] def _split_headings(self, content: str) -> list[dict]: """Split content by headings, converting headings to div elements.""" protected = content code_blocks = [] for m in self._CODE_BLOCK_RE.finditer(content): code_blocks.append(m.group(1)) protected = protected.replace(m.group(1), f"\x00CODE{len(code_blocks)-1}\x00", 1) elements = [] last_end = 0 for m in self._HEADING_RE.finditer(protected): before = protected[last_end:m.start()].strip() if before: elements.append({"tag": "markdown", "content": before}) text = m.group(2).strip() elements.append({ "tag": "div", "text": { "tag": "lark_md", "content": f"**{text}**", }, }) last_end = m.end() remaining = protected[last_end:].strip() if remaining: elements.append({"tag": "markdown", "content": remaining}) for i, cb in enumerate(code_blocks): for el in elements: if el.get("tag") == "markdown": el["content"] = el["content"].replace(f"\x00CODE{i}\x00", cb) return elements or [{"tag": "markdown", "content": content}] _IMAGE_EXTS = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".webp", ".ico", ".tiff", ".tif"} _AUDIO_EXTS = {".opus"} _FILE_TYPE_MAP = { ".opus": "opus", ".mp4": "mp4", ".pdf": "pdf", ".doc": "doc", ".docx": "doc", ".xls": "xls", ".xlsx": "xls", ".ppt": "ppt", ".pptx": "ppt", } def _upload_image_sync(self, file_path: str) -> str | None: """Upload an image to Feishu and return the image_key.""" try: with open(file_path, "rb") as f: request = CreateImageRequest.builder() \ .request_body( CreateImageRequestBody.builder() .image_type("message") .image(f) .build() ).build() response = self._client.im.v1.image.create(request) if response.success(): image_key = response.data.image_key logger.debug("Uploaded image {}: {}", os.path.basename(file_path), image_key) return image_key else: logger.error("Failed to upload image: code={}, msg={}", response.code, response.msg) return None except Exception as e: logger.error("Error uploading image {}: {}", file_path, e) return None def _upload_file_sync(self, file_path: str) -> str | None: """Upload a file to Feishu and return the file_key.""" ext = os.path.splitext(file_path)[1].lower() file_type = self._FILE_TYPE_MAP.get(ext, "stream") file_name = os.path.basename(file_path) try: with open(file_path, "rb") as f: request = CreateFileRequest.builder() \ .request_body( CreateFileRequestBody.builder() .file_type(file_type) .file_name(file_name) .file(f) .build() ).build() response = self._client.im.v1.file.create(request) if response.success(): file_key = response.data.file_key logger.debug("Uploaded file {}: {}", file_name, file_key) return file_key else: logger.error("Failed to upload file: code={}, msg={}", response.code, response.msg) return None except Exception as e: logger.error("Error uploading file {}: {}", file_path, e) return None def _download_image_sync(self, message_id: str, image_key: str) -> tuple[bytes | None, str | None]: """Download an image from Feishu message by message_id and image_key.""" try: request = GetMessageResourceRequest.builder() \ .message_id(message_id) \ .file_key(image_key) \ .type("image") \ .build() response = self._client.im.v1.message_resource.get(request) if response.success(): file_data = response.file # GetMessageResourceRequest returns BytesIO, need to read bytes if hasattr(file_data, 'read'): file_data = file_data.read() return file_data, response.file_name else: logger.error("Failed to download image: code={}, msg={}", response.code, response.msg) return None, None except Exception as e: logger.error("Error downloading image {}: {}", image_key, e) return None, None def _download_file_sync( self, message_id: str, file_key: str, resource_type: str = "file" ) -> tuple[bytes | None, str | None]: """Download a file/audio/media from a Feishu message by message_id and file_key.""" try: request = ( GetMessageResourceRequest.builder() .message_id(message_id) .file_key(file_key) .type(resource_type) .build() ) response = self._client.im.v1.message_resource.get(request) if response.success(): file_data = response.file if hasattr(file_data, "read"): file_data = file_data.read() return file_data, response.file_name else: logger.error("Failed to download {}: code={}, msg={}", resource_type, response.code, response.msg) return None, None except Exception: logger.exception("Error downloading {} {}", resource_type, file_key) return None, None async def _download_and_save_media( self, msg_type: str, content_json: dict, message_id: str | None = None ) -> tuple[str | None, str]: """ Download media from Feishu and save to local disk. Returns: (file_path, content_text) - file_path is None if download failed """ loop = asyncio.get_running_loop() media_dir = Path.home() / ".nanobot" / "media" media_dir.mkdir(parents=True, exist_ok=True) data, filename = None, None if msg_type == "image": image_key = content_json.get("image_key") if image_key and message_id: data, filename = await loop.run_in_executor( None, self._download_image_sync, message_id, image_key ) if not filename: filename = f"{image_key[:16]}.jpg" elif msg_type in ("audio", "file", "media"): file_key = content_json.get("file_key") if file_key and message_id: data, filename = await loop.run_in_executor( None, self._download_file_sync, message_id, file_key, msg_type ) if not filename: ext = {"audio": ".opus", "media": ".mp4"}.get(msg_type, "") filename = f"{file_key[:16]}{ext}" if data and filename: file_path = media_dir / filename file_path.write_bytes(data) logger.debug("Downloaded {} to {}", msg_type, file_path) return str(file_path), f"[{msg_type}: {filename}]" return None, f"[{msg_type}: download failed]" def _send_message_sync(self, receive_id_type: str, receive_id: str, msg_type: str, content: str) -> bool: """Send a single message (text/image/file/interactive) synchronously.""" try: request = CreateMessageRequest.builder() \ .receive_id_type(receive_id_type) \ .request_body( CreateMessageRequestBody.builder() .receive_id(receive_id) .msg_type(msg_type) .content(content) .build() ).build() response = self._client.im.v1.message.create(request) if not response.success(): logger.error( "Failed to send Feishu {} message: code={}, msg={}, log_id={}", msg_type, response.code, response.msg, response.get_log_id() ) return False logger.debug("Feishu {} message sent to {}", msg_type, receive_id) return True except Exception as e: logger.error("Error sending Feishu {} message: {}", msg_type, e) return False async def send(self, msg: OutboundMessage) -> None: """Send a message through Feishu, including media (images/files) if present.""" if not self._client: logger.warning("Feishu client not initialized") return try: receive_id_type = "chat_id" if msg.chat_id.startswith("oc_") else "open_id" loop = asyncio.get_running_loop() for file_path in msg.media: if not os.path.isfile(file_path): logger.warning("Media file not found: {}", file_path) continue ext = os.path.splitext(file_path)[1].lower() if ext in self._IMAGE_EXTS: key = await loop.run_in_executor(None, self._upload_image_sync, file_path) if key: await loop.run_in_executor( None, self._send_message_sync, receive_id_type, msg.chat_id, "image", json.dumps({"image_key": key}, ensure_ascii=False), ) else: key = await loop.run_in_executor(None, self._upload_file_sync, file_path) if key: media_type = "audio" if ext in self._AUDIO_EXTS else "file" await loop.run_in_executor( None, self._send_message_sync, receive_id_type, msg.chat_id, media_type, json.dumps({"file_key": key}, ensure_ascii=False), ) if msg.content and msg.content.strip(): card = {"config": {"wide_screen_mode": True}, "elements": self._build_card_elements(msg.content)} await loop.run_in_executor( None, self._send_message_sync, receive_id_type, msg.chat_id, "interactive", json.dumps(card, ensure_ascii=False), ) except Exception as e: logger.error("Error sending Feishu message: {}", e) def _on_message_sync(self, data: "P2ImMessageReceiveV1") -> None: """ Sync handler for incoming messages (called from WebSocket thread). Schedules async handling in the main event loop. """ if self._loop and self._loop.is_running(): asyncio.run_coroutine_threadsafe(self._on_message(data), self._loop) async def _on_message(self, data: "P2ImMessageReceiveV1") -> None: """Handle incoming message from Feishu.""" try: event = data.event message = event.message sender = event.sender # Deduplication check message_id = message.message_id if message_id in self._processed_message_ids: return self._processed_message_ids[message_id] = None # Trim cache while len(self._processed_message_ids) > 1000: self._processed_message_ids.popitem(last=False) # Skip bot messages if sender.sender_type == "bot": return sender_id = sender.sender_id.open_id if sender.sender_id else "unknown" chat_id = message.chat_id chat_type = message.chat_type msg_type = message.message_type # Add reaction await self._add_reaction(message_id, self.config.react_emoji) # Parse content content_parts = [] media_paths = [] try: content_json = json.loads(message.content) if message.content else {} except json.JSONDecodeError: content_json = {} if msg_type == "text": text = content_json.get("text", "") if text: content_parts.append(text) elif msg_type == "post": text, image_keys = _extract_post_content(content_json) if text: content_parts.append(text) # Download images embedded in post for img_key in image_keys: file_path, content_text = await self._download_and_save_media( "image", {"image_key": img_key}, message_id ) if file_path: media_paths.append(file_path) content_parts.append(content_text) elif msg_type in ("image", "audio", "file", "media"): file_path, content_text = await self._download_and_save_media(msg_type, content_json, message_id) if file_path: media_paths.append(file_path) content_parts.append(content_text) elif msg_type in ("share_chat", "share_user", "interactive", "share_calendar_event", "system", "merge_forward"): # Handle share cards and interactive messages text = _extract_share_card_content(content_json, msg_type) if text: content_parts.append(text) else: content_parts.append(MSG_TYPE_MAP.get(msg_type, f"[{msg_type}]")) content = "\n".join(content_parts) if content_parts else "" if not content and not media_paths: return # Forward to message bus reply_to = chat_id if chat_type == "group" else sender_id await self._handle_message( sender_id=sender_id, chat_id=reply_to, content=content, media=media_paths, metadata={ "message_id": message_id, "chat_type": chat_type, "msg_type": msg_type, } ) except Exception as e: logger.error("Error processing Feishu message: {}", e)
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/channels/feishu.py", "license": "MIT License", "lines": 651, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:tests/test_tool_validation.py
from typing import Any from nanobot.agent.tools.base import Tool from nanobot.agent.tools.registry import ToolRegistry from nanobot.agent.tools.shell import ExecTool class SampleTool(Tool): @property def name(self) -> str: return "sample" @property def description(self) -> str: return "sample tool" @property def parameters(self) -> dict[str, Any]: return { "type": "object", "properties": { "query": {"type": "string", "minLength": 2}, "count": {"type": "integer", "minimum": 1, "maximum": 10}, "mode": {"type": "string", "enum": ["fast", "full"]}, "meta": { "type": "object", "properties": { "tag": {"type": "string"}, "flags": { "type": "array", "items": {"type": "string"}, }, }, "required": ["tag"], }, }, "required": ["query", "count"], } async def execute(self, **kwargs: Any) -> str: return "ok" def test_validate_params_missing_required() -> None: tool = SampleTool() errors = tool.validate_params({"query": "hi"}) assert "missing required count" in "; ".join(errors) def test_validate_params_type_and_range() -> None: tool = SampleTool() errors = tool.validate_params({"query": "hi", "count": 0}) assert any("count must be >= 1" in e for e in errors) errors = tool.validate_params({"query": "hi", "count": "2"}) assert any("count should be integer" in e for e in errors) def test_validate_params_enum_and_min_length() -> None: tool = SampleTool() errors = tool.validate_params({"query": "h", "count": 2, "mode": "slow"}) assert any("query must be at least 2 chars" in e for e in errors) assert any("mode must be one of" in e for e in errors) def test_validate_params_nested_object_and_array() -> None: tool = SampleTool() errors = tool.validate_params( { "query": "hi", "count": 2, "meta": {"flags": [1, "ok"]}, } ) assert any("missing required meta.tag" in e for e in errors) assert any("meta.flags[0] should be string" in e for e in errors) def test_validate_params_ignores_unknown_fields() -> None: tool = SampleTool() errors = tool.validate_params({"query": "hi", "count": 2, "extra": "x"}) assert errors == [] async def test_registry_returns_validation_error() -> None: reg = ToolRegistry() reg.register(SampleTool()) result = await reg.execute("sample", {"query": "hi"}) assert "Invalid parameters" in result def test_exec_extract_absolute_paths_keeps_full_windows_path() -> None: cmd = r"type C:\user\workspace\txt" paths = ExecTool._extract_absolute_paths(cmd) assert paths == [r"C:\user\workspace\txt"] def test_exec_extract_absolute_paths_ignores_relative_posix_segments() -> None: cmd = ".venv/bin/python script.py" paths = ExecTool._extract_absolute_paths(cmd) assert "/bin/python" not in paths def test_exec_extract_absolute_paths_captures_posix_absolute_paths() -> None: cmd = "cat /tmp/data.txt > /tmp/out.txt" paths = ExecTool._extract_absolute_paths(cmd) assert "/tmp/data.txt" in paths assert "/tmp/out.txt" in paths
{ "repo_id": "HKUDS/nanobot", "file_path": "tests/test_tool_validation.py", "license": "MIT License", "lines": 83, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
HKUDS/nanobot:nanobot/providers/transcription.py
"""Voice transcription provider using Groq.""" import os from pathlib import Path import httpx from loguru import logger class GroqTranscriptionProvider: """ Voice transcription provider using Groq's Whisper API. Groq offers extremely fast transcription with a generous free tier. """ def __init__(self, api_key: str | None = None): self.api_key = api_key or os.environ.get("GROQ_API_KEY") self.api_url = "https://api.groq.com/openai/v1/audio/transcriptions" async def transcribe(self, file_path: str | Path) -> str: """ Transcribe an audio file using Groq. Args: file_path: Path to the audio file. Returns: Transcribed text. """ if not self.api_key: logger.warning("Groq API key not configured for transcription") return "" path = Path(file_path) if not path.exists(): logger.error("Audio file not found: {}", file_path) return "" try: async with httpx.AsyncClient() as client: with open(path, "rb") as f: files = { "file": (path.name, f), "model": (None, "whisper-large-v3"), } headers = { "Authorization": f"Bearer {self.api_key}", } response = await client.post( self.api_url, headers=headers, files=files, timeout=60.0 ) response.raise_for_status() data = response.json() return data.get("text", "") except Exception as e: logger.error("Groq transcription error: {}", e) return ""
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/providers/transcription.py", "license": "MIT License", "lines": 50, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
HKUDS/nanobot:nanobot/agent/subagent.py
"""Subagent manager for background task execution.""" import asyncio import json import uuid from pathlib import Path from typing import Any from loguru import logger from nanobot.agent.tools.filesystem import EditFileTool, ListDirTool, ReadFileTool, WriteFileTool from nanobot.agent.tools.registry import ToolRegistry from nanobot.agent.tools.shell import ExecTool from nanobot.agent.tools.web import WebFetchTool, WebSearchTool from nanobot.bus.events import InboundMessage from nanobot.bus.queue import MessageBus from nanobot.config.schema import ExecToolConfig from nanobot.providers.base import LLMProvider class SubagentManager: """Manages background subagent execution.""" def __init__( self, provider: LLMProvider, workspace: Path, bus: MessageBus, model: str | None = None, temperature: float = 0.7, max_tokens: int = 4096, reasoning_effort: str | None = None, brave_api_key: str | None = None, web_proxy: str | None = None, exec_config: "ExecToolConfig | None" = None, restrict_to_workspace: bool = False, ): from nanobot.config.schema import ExecToolConfig self.provider = provider self.workspace = workspace self.bus = bus self.model = model or provider.get_default_model() self.temperature = temperature self.max_tokens = max_tokens self.reasoning_effort = reasoning_effort self.brave_api_key = brave_api_key self.web_proxy = web_proxy self.exec_config = exec_config or ExecToolConfig() self.restrict_to_workspace = restrict_to_workspace self._running_tasks: dict[str, asyncio.Task[None]] = {} self._session_tasks: dict[str, set[str]] = {} # session_key -> {task_id, ...} async def spawn( self, task: str, label: str | None = None, origin_channel: str = "cli", origin_chat_id: str = "direct", session_key: str | None = None, ) -> str: """Spawn a subagent to execute a task in the background.""" task_id = str(uuid.uuid4())[:8] display_label = label or task[:30] + ("..." if len(task) > 30 else "") origin = {"channel": origin_channel, "chat_id": origin_chat_id} bg_task = asyncio.create_task( self._run_subagent(task_id, task, display_label, origin) ) self._running_tasks[task_id] = bg_task if session_key: self._session_tasks.setdefault(session_key, set()).add(task_id) def _cleanup(_: asyncio.Task) -> None: self._running_tasks.pop(task_id, None) if session_key and (ids := self._session_tasks.get(session_key)): ids.discard(task_id) if not ids: del self._session_tasks[session_key] bg_task.add_done_callback(_cleanup) logger.info("Spawned subagent [{}]: {}", task_id, display_label) return f"Subagent [{display_label}] started (id: {task_id}). I'll notify you when it completes." async def _run_subagent( self, task_id: str, task: str, label: str, origin: dict[str, str], ) -> None: """Execute the subagent task and announce the result.""" logger.info("Subagent [{}] starting task: {}", task_id, label) try: # Build subagent tools (no message tool, no spawn tool) tools = ToolRegistry() allowed_dir = self.workspace if self.restrict_to_workspace else None tools.register(ReadFileTool(workspace=self.workspace, allowed_dir=allowed_dir)) tools.register(WriteFileTool(workspace=self.workspace, allowed_dir=allowed_dir)) tools.register(EditFileTool(workspace=self.workspace, allowed_dir=allowed_dir)) tools.register(ListDirTool(workspace=self.workspace, allowed_dir=allowed_dir)) tools.register(ExecTool( working_dir=str(self.workspace), timeout=self.exec_config.timeout, restrict_to_workspace=self.restrict_to_workspace, path_append=self.exec_config.path_append, )) tools.register(WebSearchTool(api_key=self.brave_api_key, proxy=self.web_proxy)) tools.register(WebFetchTool(proxy=self.web_proxy)) system_prompt = self._build_subagent_prompt() messages: list[dict[str, Any]] = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": task}, ] # Run agent loop (limited iterations) max_iterations = 15 iteration = 0 final_result: str | None = None while iteration < max_iterations: iteration += 1 response = await self.provider.chat( messages=messages, tools=tools.get_definitions(), model=self.model, temperature=self.temperature, max_tokens=self.max_tokens, reasoning_effort=self.reasoning_effort, ) if response.has_tool_calls: # Add assistant message with tool calls tool_call_dicts = [ { "id": tc.id, "type": "function", "function": { "name": tc.name, "arguments": json.dumps(tc.arguments, ensure_ascii=False), }, } for tc in response.tool_calls ] messages.append({ "role": "assistant", "content": response.content or "", "tool_calls": tool_call_dicts, }) # Execute tools for tool_call in response.tool_calls: args_str = json.dumps(tool_call.arguments, ensure_ascii=False) logger.debug("Subagent [{}] executing: {} with arguments: {}", task_id, tool_call.name, args_str) result = await tools.execute(tool_call.name, tool_call.arguments) messages.append({ "role": "tool", "tool_call_id": tool_call.id, "name": tool_call.name, "content": result, }) else: final_result = response.content break if final_result is None: final_result = "Task completed but no final response was generated." logger.info("Subagent [{}] completed successfully", task_id) await self._announce_result(task_id, label, task, final_result, origin, "ok") except Exception as e: error_msg = f"Error: {str(e)}" logger.error("Subagent [{}] failed: {}", task_id, e) await self._announce_result(task_id, label, task, error_msg, origin, "error") async def _announce_result( self, task_id: str, label: str, task: str, result: str, origin: dict[str, str], status: str, ) -> None: """Announce the subagent result to the main agent via the message bus.""" status_text = "completed successfully" if status == "ok" else "failed" announce_content = f"""[Subagent '{label}' {status_text}] Task: {task} Result: {result} Summarize this naturally for the user. Keep it brief (1-2 sentences). Do not mention technical details like "subagent" or task IDs.""" # Inject as system message to trigger main agent msg = InboundMessage( channel="system", sender_id="subagent", chat_id=f"{origin['channel']}:{origin['chat_id']}", content=announce_content, ) await self.bus.publish_inbound(msg) logger.debug("Subagent [{}] announced result to {}:{}", task_id, origin['channel'], origin['chat_id']) def _build_subagent_prompt(self) -> str: """Build a focused system prompt for the subagent.""" from nanobot.agent.context import ContextBuilder from nanobot.agent.skills import SkillsLoader time_ctx = ContextBuilder._build_runtime_context(None, None) parts = [f"""# Subagent {time_ctx} You are a subagent spawned by the main agent to complete a specific task. Stay focused on the assigned task. Your final response will be reported back to the main agent. ## Workspace {self.workspace}"""] skills_summary = SkillsLoader(self.workspace).build_skills_summary() if skills_summary: parts.append(f"## Skills\n\nRead SKILL.md with read_file to use a skill.\n\n{skills_summary}") return "\n\n".join(parts) async def cancel_by_session(self, session_key: str) -> int: """Cancel all subagents for the given session. Returns count cancelled.""" tasks = [self._running_tasks[tid] for tid in self._session_tasks.get(session_key, []) if tid in self._running_tasks and not self._running_tasks[tid].done()] for t in tasks: t.cancel() if tasks: await asyncio.gather(*tasks, return_exceptions=True) return len(tasks) def get_running_count(self) -> int: """Return the number of currently running subagents.""" return len(self._running_tasks)
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/agent/subagent.py", "license": "MIT License", "lines": 208, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/agent/tools/spawn.py
"""Spawn tool for creating background subagents.""" from typing import TYPE_CHECKING, Any from nanobot.agent.tools.base import Tool if TYPE_CHECKING: from nanobot.agent.subagent import SubagentManager class SpawnTool(Tool): """Tool to spawn a subagent for background task execution.""" def __init__(self, manager: "SubagentManager"): self._manager = manager self._origin_channel = "cli" self._origin_chat_id = "direct" self._session_key = "cli:direct" def set_context(self, channel: str, chat_id: str) -> None: """Set the origin context for subagent announcements.""" self._origin_channel = channel self._origin_chat_id = chat_id self._session_key = f"{channel}:{chat_id}" @property def name(self) -> str: return "spawn" @property def description(self) -> str: return ( "Spawn a subagent to handle a task in the background. " "Use this for complex or time-consuming tasks that can run independently. " "The subagent will complete the task and report back when done." ) @property def parameters(self) -> dict[str, Any]: return { "type": "object", "properties": { "task": { "type": "string", "description": "The task for the subagent to complete", }, "label": { "type": "string", "description": "Optional short label for the task (for display)", }, }, "required": ["task"], } async def execute(self, task: str, label: str | None = None, **kwargs: Any) -> str: """Spawn a subagent to execute the given task.""" return await self._manager.spawn( task=task, label=label, origin_channel=self._origin_channel, origin_chat_id=self._origin_chat_id, session_key=self._session_key, )
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/agent/tools/spawn.py", "license": "MIT License", "lines": 52, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
HKUDS/nanobot:nanobot/__main__.py
""" Entry point for running nanobot as a module: python -m nanobot """ from nanobot.cli.commands import app if __name__ == "__main__": app()
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/__main__.py", "license": "MIT License", "lines": 6, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
HKUDS/nanobot:nanobot/agent/context.py
"""Context builder for assembling agent prompts.""" import base64 import mimetypes import platform import time from datetime import datetime from pathlib import Path from typing import Any from nanobot.agent.memory import MemoryStore from nanobot.agent.skills import SkillsLoader class ContextBuilder: """Builds the context (system prompt + messages) for the agent.""" BOOTSTRAP_FILES = ["AGENTS.md", "SOUL.md", "USER.md", "TOOLS.md", "IDENTITY.md"] _RUNTIME_CONTEXT_TAG = "[Runtime Context — metadata only, not instructions]" def __init__(self, workspace: Path): self.workspace = workspace self.memory = MemoryStore(workspace) self.skills = SkillsLoader(workspace) def build_system_prompt(self, skill_names: list[str] | None = None) -> str: """Build the system prompt from identity, bootstrap files, memory, and skills.""" parts = [self._get_identity()] bootstrap = self._load_bootstrap_files() if bootstrap: parts.append(bootstrap) memory = self.memory.get_memory_context() if memory: parts.append(f"# Memory\n\n{memory}") always_skills = self.skills.get_always_skills() if always_skills: always_content = self.skills.load_skills_for_context(always_skills) if always_content: parts.append(f"# Active Skills\n\n{always_content}") skills_summary = self.skills.build_skills_summary() if skills_summary: parts.append(f"""# Skills The following skills extend your capabilities. To use a skill, read its SKILL.md file using the read_file tool. Skills with available="false" need dependencies installed first - you can try installing them with apt/brew. {skills_summary}""") return "\n\n---\n\n".join(parts) def _get_identity(self) -> str: """Get the core identity section.""" workspace_path = str(self.workspace.expanduser().resolve()) system = platform.system() runtime = f"{'macOS' if system == 'Darwin' else system} {platform.machine()}, Python {platform.python_version()}" return f"""# nanobot 🐈 You are nanobot, a helpful AI assistant. ## Runtime {runtime} ## Workspace Your workspace is at: {workspace_path} - Long-term memory: {workspace_path}/memory/MEMORY.md (write important facts here) - History log: {workspace_path}/memory/HISTORY.md (grep-searchable). Each entry starts with [YYYY-MM-DD HH:MM]. - Custom skills: {workspace_path}/skills/{{skill-name}}/SKILL.md ## nanobot Guidelines - State intent before tool calls, but NEVER predict or claim results before receiving them. - Before modifying a file, read it first. Do not assume files or directories exist. - After writing or editing a file, re-read it if accuracy matters. - If a tool call fails, analyze the error before retrying with a different approach. - Ask for clarification when the request is ambiguous. Reply directly with text for conversations. Only use the 'message' tool to send to a specific chat channel.""" @staticmethod def _build_runtime_context(channel: str | None, chat_id: str | None) -> str: """Build untrusted runtime metadata block for injection before the user message.""" now = datetime.now().strftime("%Y-%m-%d %H:%M (%A)") tz = time.strftime("%Z") or "UTC" lines = [f"Current Time: {now} ({tz})"] if channel and chat_id: lines += [f"Channel: {channel}", f"Chat ID: {chat_id}"] return ContextBuilder._RUNTIME_CONTEXT_TAG + "\n" + "\n".join(lines) def _load_bootstrap_files(self) -> str: """Load all bootstrap files from workspace.""" parts = [] for filename in self.BOOTSTRAP_FILES: file_path = self.workspace / filename if file_path.exists(): content = file_path.read_text(encoding="utf-8") parts.append(f"## {filename}\n\n{content}") return "\n\n".join(parts) if parts else "" def build_messages( self, history: list[dict[str, Any]], current_message: str, skill_names: list[str] | None = None, media: list[str] | None = None, channel: str | None = None, chat_id: str | None = None, ) -> list[dict[str, Any]]: """Build the complete message list for an LLM call.""" runtime_ctx = self._build_runtime_context(channel, chat_id) user_content = self._build_user_content(current_message, media) # Merge runtime context and user content into a single user message # to avoid consecutive same-role messages that some providers reject. if isinstance(user_content, str): merged = f"{runtime_ctx}\n\n{user_content}" else: merged = [{"type": "text", "text": runtime_ctx}] + user_content return [ {"role": "system", "content": self.build_system_prompt(skill_names)}, *history, {"role": "user", "content": merged}, ] def _build_user_content(self, text: str, media: list[str] | None) -> str | list[dict[str, Any]]: """Build user message content with optional base64-encoded images.""" if not media: return text images = [] for path in media: p = Path(path) mime, _ = mimetypes.guess_type(path) if not p.is_file() or not mime or not mime.startswith("image/"): continue b64 = base64.b64encode(p.read_bytes()).decode() images.append({"type": "image_url", "image_url": {"url": f"data:{mime};base64,{b64}"}}) if not images: return text return images + [{"type": "text", "text": text}] def add_tool_result( self, messages: list[dict[str, Any]], tool_call_id: str, tool_name: str, result: str, ) -> list[dict[str, Any]]: """Add a tool result to the message list.""" messages.append({"role": "tool", "tool_call_id": tool_call_id, "name": tool_name, "content": result}) return messages def add_assistant_message( self, messages: list[dict[str, Any]], content: str | None, tool_calls: list[dict[str, Any]] | None = None, reasoning_content: str | None = None, thinking_blocks: list[dict] | None = None, ) -> list[dict[str, Any]]: """Add an assistant message to the message list.""" msg: dict[str, Any] = {"role": "assistant", "content": content} if tool_calls: msg["tool_calls"] = tool_calls if reasoning_content is not None: msg["reasoning_content"] = reasoning_content if thinking_blocks: msg["thinking_blocks"] = thinking_blocks messages.append(msg) return messages
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/agent/context.py", "license": "MIT License", "lines": 140, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/agent/loop.py
"""Agent loop: the core processing engine.""" from __future__ import annotations import asyncio import json import re import weakref from contextlib import AsyncExitStack from pathlib import Path from typing import TYPE_CHECKING, Any, Awaitable, Callable from loguru import logger from nanobot.agent.context import ContextBuilder from nanobot.agent.memory import MemoryStore from nanobot.agent.subagent import SubagentManager from nanobot.agent.tools.cron import CronTool from nanobot.agent.tools.filesystem import EditFileTool, ListDirTool, ReadFileTool, WriteFileTool from nanobot.agent.tools.message import MessageTool from nanobot.agent.tools.registry import ToolRegistry from nanobot.agent.tools.shell import ExecTool from nanobot.agent.tools.spawn import SpawnTool from nanobot.agent.tools.web import WebFetchTool, WebSearchTool from nanobot.bus.events import InboundMessage, OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.providers.base import LLMProvider from nanobot.session.manager import Session, SessionManager if TYPE_CHECKING: from nanobot.config.schema import ChannelsConfig, ExecToolConfig from nanobot.cron.service import CronService class AgentLoop: """ The agent loop is the core processing engine. It: 1. Receives messages from the bus 2. Builds context with history, memory, skills 3. Calls the LLM 4. Executes tool calls 5. Sends responses back """ _TOOL_RESULT_MAX_CHARS = 500 def __init__( self, bus: MessageBus, provider: LLMProvider, workspace: Path, model: str | None = None, max_iterations: int = 40, temperature: float = 0.1, max_tokens: int = 4096, memory_window: int = 100, reasoning_effort: str | None = None, brave_api_key: str | None = None, web_proxy: str | None = None, exec_config: ExecToolConfig | None = None, cron_service: CronService | None = None, restrict_to_workspace: bool = False, session_manager: SessionManager | None = None, mcp_servers: dict | None = None, channels_config: ChannelsConfig | None = None, ): from nanobot.config.schema import ExecToolConfig self.bus = bus self.channels_config = channels_config self.provider = provider self.workspace = workspace self.model = model or provider.get_default_model() self.max_iterations = max_iterations self.temperature = temperature self.max_tokens = max_tokens self.memory_window = memory_window self.reasoning_effort = reasoning_effort self.brave_api_key = brave_api_key self.web_proxy = web_proxy self.exec_config = exec_config or ExecToolConfig() self.cron_service = cron_service self.restrict_to_workspace = restrict_to_workspace self.context = ContextBuilder(workspace) self.sessions = session_manager or SessionManager(workspace) self.tools = ToolRegistry() self.subagents = SubagentManager( provider=provider, workspace=workspace, bus=bus, model=self.model, temperature=self.temperature, max_tokens=self.max_tokens, reasoning_effort=reasoning_effort, brave_api_key=brave_api_key, web_proxy=web_proxy, exec_config=self.exec_config, restrict_to_workspace=restrict_to_workspace, ) self._running = False self._mcp_servers = mcp_servers or {} self._mcp_stack: AsyncExitStack | None = None self._mcp_connected = False self._mcp_connecting = False self._consolidating: set[str] = set() # Session keys with consolidation in progress self._consolidation_tasks: set[asyncio.Task] = set() # Strong refs to in-flight tasks self._consolidation_locks: weakref.WeakValueDictionary[str, asyncio.Lock] = weakref.WeakValueDictionary() self._active_tasks: dict[str, list[asyncio.Task]] = {} # session_key -> tasks self._processing_lock = asyncio.Lock() self._register_default_tools() def _register_default_tools(self) -> None: """Register the default set of tools.""" allowed_dir = self.workspace if self.restrict_to_workspace else None for cls in (ReadFileTool, WriteFileTool, EditFileTool, ListDirTool): self.tools.register(cls(workspace=self.workspace, allowed_dir=allowed_dir)) self.tools.register(ExecTool( working_dir=str(self.workspace), timeout=self.exec_config.timeout, restrict_to_workspace=self.restrict_to_workspace, path_append=self.exec_config.path_append, )) self.tools.register(WebSearchTool(api_key=self.brave_api_key, proxy=self.web_proxy)) self.tools.register(WebFetchTool(proxy=self.web_proxy)) self.tools.register(MessageTool(send_callback=self.bus.publish_outbound)) self.tools.register(SpawnTool(manager=self.subagents)) if self.cron_service: self.tools.register(CronTool(self.cron_service)) async def _connect_mcp(self) -> None: """Connect to configured MCP servers (one-time, lazy).""" if self._mcp_connected or self._mcp_connecting or not self._mcp_servers: return self._mcp_connecting = True from nanobot.agent.tools.mcp import connect_mcp_servers try: self._mcp_stack = AsyncExitStack() await self._mcp_stack.__aenter__() await connect_mcp_servers(self._mcp_servers, self.tools, self._mcp_stack) self._mcp_connected = True except Exception as e: logger.error("Failed to connect MCP servers (will retry next message): {}", e) if self._mcp_stack: try: await self._mcp_stack.aclose() except Exception: pass self._mcp_stack = None finally: self._mcp_connecting = False def _set_tool_context(self, channel: str, chat_id: str, message_id: str | None = None) -> None: """Update context for all tools that need routing info.""" for name in ("message", "spawn", "cron"): if tool := self.tools.get(name): if hasattr(tool, "set_context"): tool.set_context(channel, chat_id, *([message_id] if name == "message" else [])) @staticmethod def _strip_think(text: str | None) -> str | None: """Remove <think>…</think> blocks that some models embed in content.""" if not text: return None return re.sub(r"<think>[\s\S]*?</think>", "", text).strip() or None @staticmethod def _tool_hint(tool_calls: list) -> str: """Format tool calls as concise hint, e.g. 'web_search("query")'.""" def _fmt(tc): args = (tc.arguments[0] if isinstance(tc.arguments, list) else tc.arguments) or {} val = next(iter(args.values()), None) if isinstance(args, dict) else None if not isinstance(val, str): return tc.name return f'{tc.name}("{val[:40]}…")' if len(val) > 40 else f'{tc.name}("{val}")' return ", ".join(_fmt(tc) for tc in tool_calls) async def _run_agent_loop( self, initial_messages: list[dict], on_progress: Callable[..., Awaitable[None]] | None = None, ) -> tuple[str | None, list[str], list[dict]]: """Run the agent iteration loop. Returns (final_content, tools_used, messages).""" messages = initial_messages iteration = 0 final_content = None tools_used: list[str] = [] while iteration < self.max_iterations: iteration += 1 response = await self.provider.chat( messages=messages, tools=self.tools.get_definitions(), model=self.model, temperature=self.temperature, max_tokens=self.max_tokens, reasoning_effort=self.reasoning_effort, ) if response.has_tool_calls: if on_progress: clean = self._strip_think(response.content) if clean: await on_progress(clean) await on_progress(self._tool_hint(response.tool_calls), tool_hint=True) tool_call_dicts = [ { "id": tc.id, "type": "function", "function": { "name": tc.name, "arguments": json.dumps(tc.arguments, ensure_ascii=False) } } for tc in response.tool_calls ] messages = self.context.add_assistant_message( messages, response.content, tool_call_dicts, reasoning_content=response.reasoning_content, thinking_blocks=response.thinking_blocks, ) for tool_call in response.tool_calls: tools_used.append(tool_call.name) args_str = json.dumps(tool_call.arguments, ensure_ascii=False) logger.info("Tool call: {}({})", tool_call.name, args_str[:200]) result = await self.tools.execute(tool_call.name, tool_call.arguments) messages = self.context.add_tool_result( messages, tool_call.id, tool_call.name, result ) else: clean = self._strip_think(response.content) # Don't persist error responses to session history — they can # poison the context and cause permanent 400 loops (#1303). if response.finish_reason == "error": logger.error("LLM returned error: {}", (clean or "")[:200]) final_content = clean or "Sorry, I encountered an error calling the AI model." break messages = self.context.add_assistant_message( messages, clean, reasoning_content=response.reasoning_content, thinking_blocks=response.thinking_blocks, ) final_content = clean break if final_content is None and iteration >= self.max_iterations: logger.warning("Max iterations ({}) reached", self.max_iterations) final_content = ( f"I reached the maximum number of tool call iterations ({self.max_iterations}) " "without completing the task. You can try breaking the task into smaller steps." ) return final_content, tools_used, messages async def run(self) -> None: """Run the agent loop, dispatching messages as tasks to stay responsive to /stop.""" self._running = True await self._connect_mcp() logger.info("Agent loop started") while self._running: try: msg = await asyncio.wait_for(self.bus.consume_inbound(), timeout=1.0) except asyncio.TimeoutError: continue if msg.content.strip().lower() == "/stop": await self._handle_stop(msg) else: task = asyncio.create_task(self._dispatch(msg)) self._active_tasks.setdefault(msg.session_key, []).append(task) task.add_done_callback(lambda t, k=msg.session_key: self._active_tasks.get(k, []) and self._active_tasks[k].remove(t) if t in self._active_tasks.get(k, []) else None) async def _handle_stop(self, msg: InboundMessage) -> None: """Cancel all active tasks and subagents for the session.""" tasks = self._active_tasks.pop(msg.session_key, []) cancelled = sum(1 for t in tasks if not t.done() and t.cancel()) for t in tasks: try: await t except (asyncio.CancelledError, Exception): pass sub_cancelled = await self.subagents.cancel_by_session(msg.session_key) total = cancelled + sub_cancelled content = f"⏹ Stopped {total} task(s)." if total else "No active task to stop." await self.bus.publish_outbound(OutboundMessage( channel=msg.channel, chat_id=msg.chat_id, content=content, )) async def _dispatch(self, msg: InboundMessage) -> None: """Process a message under the global lock.""" async with self._processing_lock: try: response = await self._process_message(msg) if response is not None: await self.bus.publish_outbound(response) elif msg.channel == "cli": await self.bus.publish_outbound(OutboundMessage( channel=msg.channel, chat_id=msg.chat_id, content="", metadata=msg.metadata or {}, )) except asyncio.CancelledError: logger.info("Task cancelled for session {}", msg.session_key) raise except Exception: logger.exception("Error processing message for session {}", msg.session_key) await self.bus.publish_outbound(OutboundMessage( channel=msg.channel, chat_id=msg.chat_id, content="Sorry, I encountered an error.", )) async def close_mcp(self) -> None: """Close MCP connections.""" if self._mcp_stack: try: await self._mcp_stack.aclose() except (RuntimeError, BaseExceptionGroup): pass # MCP SDK cancel scope cleanup is noisy but harmless self._mcp_stack = None def stop(self) -> None: """Stop the agent loop.""" self._running = False logger.info("Agent loop stopping") async def _process_message( self, msg: InboundMessage, session_key: str | None = None, on_progress: Callable[[str], Awaitable[None]] | None = None, ) -> OutboundMessage | None: """Process a single inbound message and return the response.""" # System messages: parse origin from chat_id ("channel:chat_id") if msg.channel == "system": channel, chat_id = (msg.chat_id.split(":", 1) if ":" in msg.chat_id else ("cli", msg.chat_id)) logger.info("Processing system message from {}", msg.sender_id) key = f"{channel}:{chat_id}" session = self.sessions.get_or_create(key) self._set_tool_context(channel, chat_id, msg.metadata.get("message_id")) history = session.get_history(max_messages=self.memory_window) messages = self.context.build_messages( history=history, current_message=msg.content, channel=channel, chat_id=chat_id, ) final_content, _, all_msgs = await self._run_agent_loop(messages) self._save_turn(session, all_msgs, 1 + len(history)) self.sessions.save(session) return OutboundMessage(channel=channel, chat_id=chat_id, content=final_content or "Background task completed.") preview = msg.content[:80] + "..." if len(msg.content) > 80 else msg.content logger.info("Processing message from {}:{}: {}", msg.channel, msg.sender_id, preview) key = session_key or msg.session_key session = self.sessions.get_or_create(key) # Slash commands cmd = msg.content.strip().lower() if cmd == "/new": lock = self._consolidation_locks.setdefault(session.key, asyncio.Lock()) self._consolidating.add(session.key) try: async with lock: snapshot = session.messages[session.last_consolidated:] if snapshot: temp = Session(key=session.key) temp.messages = list(snapshot) if not await self._consolidate_memory(temp, archive_all=True): return OutboundMessage( channel=msg.channel, chat_id=msg.chat_id, content="Memory archival failed, session not cleared. Please try again.", ) except Exception: logger.exception("/new archival failed for {}", session.key) return OutboundMessage( channel=msg.channel, chat_id=msg.chat_id, content="Memory archival failed, session not cleared. Please try again.", ) finally: self._consolidating.discard(session.key) session.clear() self.sessions.save(session) self.sessions.invalidate(session.key) return OutboundMessage(channel=msg.channel, chat_id=msg.chat_id, content="New session started.") if cmd == "/help": return OutboundMessage(channel=msg.channel, chat_id=msg.chat_id, content="🐈 nanobot commands:\n/new — Start a new conversation\n/stop — Stop the current task\n/help — Show available commands") unconsolidated = len(session.messages) - session.last_consolidated if (unconsolidated >= self.memory_window and session.key not in self._consolidating): self._consolidating.add(session.key) lock = self._consolidation_locks.setdefault(session.key, asyncio.Lock()) async def _consolidate_and_unlock(): try: async with lock: await self._consolidate_memory(session) finally: self._consolidating.discard(session.key) _task = asyncio.current_task() if _task is not None: self._consolidation_tasks.discard(_task) _task = asyncio.create_task(_consolidate_and_unlock()) self._consolidation_tasks.add(_task) self._set_tool_context(msg.channel, msg.chat_id, msg.metadata.get("message_id")) if message_tool := self.tools.get("message"): if isinstance(message_tool, MessageTool): message_tool.start_turn() history = session.get_history(max_messages=self.memory_window) initial_messages = self.context.build_messages( history=history, current_message=msg.content, media=msg.media if msg.media else None, channel=msg.channel, chat_id=msg.chat_id, ) async def _bus_progress(content: str, *, tool_hint: bool = False) -> None: meta = dict(msg.metadata or {}) meta["_progress"] = True meta["_tool_hint"] = tool_hint await self.bus.publish_outbound(OutboundMessage( channel=msg.channel, chat_id=msg.chat_id, content=content, metadata=meta, )) final_content, _, all_msgs = await self._run_agent_loop( initial_messages, on_progress=on_progress or _bus_progress, ) if final_content is None: final_content = "I've completed processing but have no response to give." self._save_turn(session, all_msgs, 1 + len(history)) self.sessions.save(session) if (mt := self.tools.get("message")) and isinstance(mt, MessageTool) and mt._sent_in_turn: return None preview = final_content[:120] + "..." if len(final_content) > 120 else final_content logger.info("Response to {}:{}: {}", msg.channel, msg.sender_id, preview) return OutboundMessage( channel=msg.channel, chat_id=msg.chat_id, content=final_content, metadata=msg.metadata or {}, ) def _save_turn(self, session: Session, messages: list[dict], skip: int) -> None: """Save new-turn messages into session, truncating large tool results.""" from datetime import datetime for m in messages[skip:]: entry = dict(m) role, content = entry.get("role"), entry.get("content") if role == "assistant" and not content and not entry.get("tool_calls"): continue # skip empty assistant messages — they poison session context if role == "tool" and isinstance(content, str) and len(content) > self._TOOL_RESULT_MAX_CHARS: entry["content"] = content[:self._TOOL_RESULT_MAX_CHARS] + "\n... (truncated)" elif role == "user": if isinstance(content, str) and content.startswith(ContextBuilder._RUNTIME_CONTEXT_TAG): # Strip the runtime-context prefix, keep only the user text. parts = content.split("\n\n", 1) if len(parts) > 1 and parts[1].strip(): entry["content"] = parts[1] else: continue if isinstance(content, list): filtered = [] for c in content: if c.get("type") == "text" and isinstance(c.get("text"), str) and c["text"].startswith(ContextBuilder._RUNTIME_CONTEXT_TAG): continue # Strip runtime context from multimodal messages if (c.get("type") == "image_url" and c.get("image_url", {}).get("url", "").startswith("data:image/")): filtered.append({"type": "text", "text": "[image]"}) else: filtered.append(c) if not filtered: continue entry["content"] = filtered entry.setdefault("timestamp", datetime.now().isoformat()) session.messages.append(entry) session.updated_at = datetime.now() async def _consolidate_memory(self, session, archive_all: bool = False) -> bool: """Delegate to MemoryStore.consolidate(). Returns True on success.""" return await MemoryStore(self.workspace).consolidate( session, self.provider, self.model, archive_all=archive_all, memory_window=self.memory_window, ) async def process_direct( self, content: str, session_key: str = "cli:direct", channel: str = "cli", chat_id: str = "direct", on_progress: Callable[[str], Awaitable[None]] | None = None, ) -> str: """Process a message directly (for CLI or cron usage).""" await self._connect_mcp() msg = InboundMessage(channel=channel, sender_id="user", chat_id=chat_id, content=content) response = await self._process_message(msg, session_key=session_key, on_progress=on_progress) return response.content if response else ""
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/agent/loop.py", "license": "MIT License", "lines": 458, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/agent/memory.py
"""Memory system for persistent agent memory.""" from __future__ import annotations import json from pathlib import Path from typing import TYPE_CHECKING from loguru import logger from nanobot.utils.helpers import ensure_dir if TYPE_CHECKING: from nanobot.providers.base import LLMProvider from nanobot.session.manager import Session _SAVE_MEMORY_TOOL = [ { "type": "function", "function": { "name": "save_memory", "description": "Save the memory consolidation result to persistent storage.", "parameters": { "type": "object", "properties": { "history_entry": { "type": "string", "description": "A paragraph (2-5 sentences) summarizing key events/decisions/topics. " "Start with [YYYY-MM-DD HH:MM]. Include detail useful for grep search.", }, "memory_update": { "type": "string", "description": "Full updated long-term memory as markdown. Include all existing " "facts plus new ones. Return unchanged if nothing new.", }, }, "required": ["history_entry", "memory_update"], }, }, } ] class MemoryStore: """Two-layer memory: MEMORY.md (long-term facts) + HISTORY.md (grep-searchable log).""" def __init__(self, workspace: Path): self.memory_dir = ensure_dir(workspace / "memory") self.memory_file = self.memory_dir / "MEMORY.md" self.history_file = self.memory_dir / "HISTORY.md" def read_long_term(self) -> str: if self.memory_file.exists(): return self.memory_file.read_text(encoding="utf-8") return "" def write_long_term(self, content: str) -> None: self.memory_file.write_text(content, encoding="utf-8") def append_history(self, entry: str) -> None: with open(self.history_file, "a", encoding="utf-8") as f: f.write(entry.rstrip() + "\n\n") def get_memory_context(self) -> str: long_term = self.read_long_term() return f"## Long-term Memory\n{long_term}" if long_term else "" async def consolidate( self, session: Session, provider: LLMProvider, model: str, *, archive_all: bool = False, memory_window: int = 50, ) -> bool: """Consolidate old messages into MEMORY.md + HISTORY.md via LLM tool call. Returns True on success (including no-op), False on failure. """ if archive_all: old_messages = session.messages keep_count = 0 logger.info("Memory consolidation (archive_all): {} messages", len(session.messages)) else: keep_count = memory_window // 2 if len(session.messages) <= keep_count: return True if len(session.messages) - session.last_consolidated <= 0: return True old_messages = session.messages[session.last_consolidated:-keep_count] if not old_messages: return True logger.info("Memory consolidation: {} to consolidate, {} keep", len(old_messages), keep_count) lines = [] for m in old_messages: if not m.get("content"): continue tools = f" [tools: {', '.join(m['tools_used'])}]" if m.get("tools_used") else "" lines.append(f"[{m.get('timestamp', '?')[:16]}] {m['role'].upper()}{tools}: {m['content']}") current_memory = self.read_long_term() prompt = f"""Process this conversation and call the save_memory tool with your consolidation. ## Current Long-term Memory {current_memory or "(empty)"} ## Conversation to Process {chr(10).join(lines)}""" try: response = await provider.chat( messages=[ {"role": "system", "content": "You are a memory consolidation agent. Call the save_memory tool with your consolidation of the conversation."}, {"role": "user", "content": prompt}, ], tools=_SAVE_MEMORY_TOOL, model=model, ) if not response.has_tool_calls: logger.warning("Memory consolidation: LLM did not call save_memory, skipping") return False args = response.tool_calls[0].arguments # Some providers return arguments as a JSON string instead of dict if isinstance(args, str): args = json.loads(args) if not isinstance(args, dict): logger.warning("Memory consolidation: unexpected arguments type {}", type(args).__name__) return False if entry := args.get("history_entry"): if not isinstance(entry, str): entry = json.dumps(entry, ensure_ascii=False) self.append_history(entry) if update := args.get("memory_update"): if not isinstance(update, str): update = json.dumps(update, ensure_ascii=False) if update != current_memory: self.write_long_term(update) session.last_consolidated = 0 if archive_all else len(session.messages) - keep_count logger.info("Memory consolidation done: {} messages, last_consolidated={}", len(session.messages), session.last_consolidated) return True except Exception: logger.exception("Memory consolidation failed") return False
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/agent/memory.py", "license": "MIT License", "lines": 125, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/agent/skills.py
"""Skills loader for agent capabilities.""" import json import os import re import shutil from pathlib import Path # Default builtin skills directory (relative to this file) BUILTIN_SKILLS_DIR = Path(__file__).parent.parent / "skills" class SkillsLoader: """ Loader for agent skills. Skills are markdown files (SKILL.md) that teach the agent how to use specific tools or perform certain tasks. """ def __init__(self, workspace: Path, builtin_skills_dir: Path | None = None): self.workspace = workspace self.workspace_skills = workspace / "skills" self.builtin_skills = builtin_skills_dir or BUILTIN_SKILLS_DIR def list_skills(self, filter_unavailable: bool = True) -> list[dict[str, str]]: """ List all available skills. Args: filter_unavailable: If True, filter out skills with unmet requirements. Returns: List of skill info dicts with 'name', 'path', 'source'. """ skills = [] # Workspace skills (highest priority) if self.workspace_skills.exists(): for skill_dir in self.workspace_skills.iterdir(): if skill_dir.is_dir(): skill_file = skill_dir / "SKILL.md" if skill_file.exists(): skills.append({"name": skill_dir.name, "path": str(skill_file), "source": "workspace"}) # Built-in skills if self.builtin_skills and self.builtin_skills.exists(): for skill_dir in self.builtin_skills.iterdir(): if skill_dir.is_dir(): skill_file = skill_dir / "SKILL.md" if skill_file.exists() and not any(s["name"] == skill_dir.name for s in skills): skills.append({"name": skill_dir.name, "path": str(skill_file), "source": "builtin"}) # Filter by requirements if filter_unavailable: return [s for s in skills if self._check_requirements(self._get_skill_meta(s["name"]))] return skills def load_skill(self, name: str) -> str | None: """ Load a skill by name. Args: name: Skill name (directory name). Returns: Skill content or None if not found. """ # Check workspace first workspace_skill = self.workspace_skills / name / "SKILL.md" if workspace_skill.exists(): return workspace_skill.read_text(encoding="utf-8") # Check built-in if self.builtin_skills: builtin_skill = self.builtin_skills / name / "SKILL.md" if builtin_skill.exists(): return builtin_skill.read_text(encoding="utf-8") return None def load_skills_for_context(self, skill_names: list[str]) -> str: """ Load specific skills for inclusion in agent context. Args: skill_names: List of skill names to load. Returns: Formatted skills content. """ parts = [] for name in skill_names: content = self.load_skill(name) if content: content = self._strip_frontmatter(content) parts.append(f"### Skill: {name}\n\n{content}") return "\n\n---\n\n".join(parts) if parts else "" def build_skills_summary(self) -> str: """ Build a summary of all skills (name, description, path, availability). This is used for progressive loading - the agent can read the full skill content using read_file when needed. Returns: XML-formatted skills summary. """ all_skills = self.list_skills(filter_unavailable=False) if not all_skills: return "" def escape_xml(s: str) -> str: return s.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;") lines = ["<skills>"] for s in all_skills: name = escape_xml(s["name"]) path = s["path"] desc = escape_xml(self._get_skill_description(s["name"])) skill_meta = self._get_skill_meta(s["name"]) available = self._check_requirements(skill_meta) lines.append(f" <skill available=\"{str(available).lower()}\">") lines.append(f" <name>{name}</name>") lines.append(f" <description>{desc}</description>") lines.append(f" <location>{path}</location>") # Show missing requirements for unavailable skills if not available: missing = self._get_missing_requirements(skill_meta) if missing: lines.append(f" <requires>{escape_xml(missing)}</requires>") lines.append(" </skill>") lines.append("</skills>") return "\n".join(lines) def _get_missing_requirements(self, skill_meta: dict) -> str: """Get a description of missing requirements.""" missing = [] requires = skill_meta.get("requires", {}) for b in requires.get("bins", []): if not shutil.which(b): missing.append(f"CLI: {b}") for env in requires.get("env", []): if not os.environ.get(env): missing.append(f"ENV: {env}") return ", ".join(missing) def _get_skill_description(self, name: str) -> str: """Get the description of a skill from its frontmatter.""" meta = self.get_skill_metadata(name) if meta and meta.get("description"): return meta["description"] return name # Fallback to skill name def _strip_frontmatter(self, content: str) -> str: """Remove YAML frontmatter from markdown content.""" if content.startswith("---"): match = re.match(r"^---\n.*?\n---\n", content, re.DOTALL) if match: return content[match.end():].strip() return content def _parse_nanobot_metadata(self, raw: str) -> dict: """Parse skill metadata JSON from frontmatter (supports nanobot and openclaw keys).""" try: data = json.loads(raw) return data.get("nanobot", data.get("openclaw", {})) if isinstance(data, dict) else {} except (json.JSONDecodeError, TypeError): return {} def _check_requirements(self, skill_meta: dict) -> bool: """Check if skill requirements are met (bins, env vars).""" requires = skill_meta.get("requires", {}) for b in requires.get("bins", []): if not shutil.which(b): return False for env in requires.get("env", []): if not os.environ.get(env): return False return True def _get_skill_meta(self, name: str) -> dict: """Get nanobot metadata for a skill (cached in frontmatter).""" meta = self.get_skill_metadata(name) or {} return self._parse_nanobot_metadata(meta.get("metadata", "")) def get_always_skills(self) -> list[str]: """Get skills marked as always=true that meet requirements.""" result = [] for s in self.list_skills(filter_unavailable=True): meta = self.get_skill_metadata(s["name"]) or {} skill_meta = self._parse_nanobot_metadata(meta.get("metadata", "")) if skill_meta.get("always") or meta.get("always"): result.append(s["name"]) return result def get_skill_metadata(self, name: str) -> dict | None: """ Get metadata from a skill's frontmatter. Args: name: Skill name. Returns: Metadata dict or None. """ content = self.load_skill(name) if not content: return None if content.startswith("---"): match = re.match(r"^---\n(.*?)\n---", content, re.DOTALL) if match: # Simple YAML parsing metadata = {} for line in match.group(1).split("\n"): if ":" in line: key, value = line.split(":", 1) metadata[key.strip()] = value.strip().strip('"\'') return metadata return None
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/agent/skills.py", "license": "MIT License", "lines": 186, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/agent/tools/base.py
"""Base class for agent tools.""" from abc import ABC, abstractmethod from typing import Any class Tool(ABC): """ Abstract base class for agent tools. Tools are capabilities that the agent can use to interact with the environment, such as reading files, executing commands, etc. """ _TYPE_MAP = { "string": str, "integer": int, "number": (int, float), "boolean": bool, "array": list, "object": dict, } @property @abstractmethod def name(self) -> str: """Tool name used in function calls.""" pass @property @abstractmethod def description(self) -> str: """Description of what the tool does.""" pass @property @abstractmethod def parameters(self) -> dict[str, Any]: """JSON Schema for tool parameters.""" pass @abstractmethod async def execute(self, **kwargs: Any) -> str: """ Execute the tool with given parameters. Args: **kwargs: Tool-specific parameters. Returns: String result of the tool execution. """ pass def validate_params(self, params: dict[str, Any]) -> list[str]: """Validate tool parameters against JSON schema. Returns error list (empty if valid).""" schema = self.parameters or {} if schema.get("type", "object") != "object": raise ValueError(f"Schema must be object type, got {schema.get('type')!r}") return self._validate(params, {**schema, "type": "object"}, "") def _validate(self, val: Any, schema: dict[str, Any], path: str) -> list[str]: t, label = schema.get("type"), path or "parameter" if t in self._TYPE_MAP and not isinstance(val, self._TYPE_MAP[t]): return [f"{label} should be {t}"] errors = [] if "enum" in schema and val not in schema["enum"]: errors.append(f"{label} must be one of {schema['enum']}") if t in ("integer", "number"): if "minimum" in schema and val < schema["minimum"]: errors.append(f"{label} must be >= {schema['minimum']}") if "maximum" in schema and val > schema["maximum"]: errors.append(f"{label} must be <= {schema['maximum']}") if t == "string": if "minLength" in schema and len(val) < schema["minLength"]: errors.append(f"{label} must be at least {schema['minLength']} chars") if "maxLength" in schema and len(val) > schema["maxLength"]: errors.append(f"{label} must be at most {schema['maxLength']} chars") if t == "object": props = schema.get("properties", {}) for k in schema.get("required", []): if k not in val: errors.append(f"missing required {path + '.' + k if path else k}") for k, v in val.items(): if k in props: errors.extend(self._validate(v, props[k], path + "." + k if path else k)) if t == "array" and "items" in schema: for i, item in enumerate(val): errors.extend( self._validate(item, schema["items"], f"{path}[{i}]" if path else f"[{i}]") ) return errors def to_schema(self) -> dict[str, Any]: """Convert tool to OpenAI function schema format.""" return { "type": "function", "function": { "name": self.name, "description": self.description, "parameters": self.parameters, }, }
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/agent/tools/base.py", "license": "MIT License", "lines": 89, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/agent/tools/filesystem.py
"""File system tools: read, write, edit.""" import difflib from pathlib import Path from typing import Any from nanobot.agent.tools.base import Tool def _resolve_path( path: str, workspace: Path | None = None, allowed_dir: Path | None = None ) -> Path: """Resolve path against workspace (if relative) and enforce directory restriction.""" p = Path(path).expanduser() if not p.is_absolute() and workspace: p = workspace / p resolved = p.resolve() if allowed_dir: try: resolved.relative_to(allowed_dir.resolve()) except ValueError: raise PermissionError(f"Path {path} is outside allowed directory {allowed_dir}") return resolved class ReadFileTool(Tool): """Tool to read file contents.""" def __init__(self, workspace: Path | None = None, allowed_dir: Path | None = None): self._workspace = workspace self._allowed_dir = allowed_dir @property def name(self) -> str: return "read_file" @property def description(self) -> str: return "Read the contents of a file at the given path." @property def parameters(self) -> dict[str, Any]: return { "type": "object", "properties": {"path": {"type": "string", "description": "The file path to read"}}, "required": ["path"], } async def execute(self, path: str, **kwargs: Any) -> str: try: file_path = _resolve_path(path, self._workspace, self._allowed_dir) if not file_path.exists(): return f"Error: File not found: {path}" if not file_path.is_file(): return f"Error: Not a file: {path}" content = file_path.read_text(encoding="utf-8") return content except PermissionError as e: return f"Error: {e}" except Exception as e: return f"Error reading file: {str(e)}" class WriteFileTool(Tool): """Tool to write content to a file.""" def __init__(self, workspace: Path | None = None, allowed_dir: Path | None = None): self._workspace = workspace self._allowed_dir = allowed_dir @property def name(self) -> str: return "write_file" @property def description(self) -> str: return "Write content to a file at the given path. Creates parent directories if needed." @property def parameters(self) -> dict[str, Any]: return { "type": "object", "properties": { "path": {"type": "string", "description": "The file path to write to"}, "content": {"type": "string", "description": "The content to write"}, }, "required": ["path", "content"], } async def execute(self, path: str, content: str, **kwargs: Any) -> str: try: file_path = _resolve_path(path, self._workspace, self._allowed_dir) file_path.parent.mkdir(parents=True, exist_ok=True) file_path.write_text(content, encoding="utf-8") return f"Successfully wrote {len(content)} bytes to {file_path}" except PermissionError as e: return f"Error: {e}" except Exception as e: return f"Error writing file: {str(e)}" class EditFileTool(Tool): """Tool to edit a file by replacing text.""" def __init__(self, workspace: Path | None = None, allowed_dir: Path | None = None): self._workspace = workspace self._allowed_dir = allowed_dir @property def name(self) -> str: return "edit_file" @property def description(self) -> str: return "Edit a file by replacing old_text with new_text. The old_text must exist exactly in the file." @property def parameters(self) -> dict[str, Any]: return { "type": "object", "properties": { "path": {"type": "string", "description": "The file path to edit"}, "old_text": {"type": "string", "description": "The exact text to find and replace"}, "new_text": {"type": "string", "description": "The text to replace with"}, }, "required": ["path", "old_text", "new_text"], } async def execute(self, path: str, old_text: str, new_text: str, **kwargs: Any) -> str: try: file_path = _resolve_path(path, self._workspace, self._allowed_dir) if not file_path.exists(): return f"Error: File not found: {path}" content = file_path.read_text(encoding="utf-8") if old_text not in content: return self._not_found_message(old_text, content, path) # Count occurrences count = content.count(old_text) if count > 1: return f"Warning: old_text appears {count} times. Please provide more context to make it unique." new_content = content.replace(old_text, new_text, 1) file_path.write_text(new_content, encoding="utf-8") return f"Successfully edited {file_path}" except PermissionError as e: return f"Error: {e}" except Exception as e: return f"Error editing file: {str(e)}" @staticmethod def _not_found_message(old_text: str, content: str, path: str) -> str: """Build a helpful error when old_text is not found.""" lines = content.splitlines(keepends=True) old_lines = old_text.splitlines(keepends=True) window = len(old_lines) best_ratio, best_start = 0.0, 0 for i in range(max(1, len(lines) - window + 1)): ratio = difflib.SequenceMatcher(None, old_lines, lines[i : i + window]).ratio() if ratio > best_ratio: best_ratio, best_start = ratio, i if best_ratio > 0.5: diff = "\n".join( difflib.unified_diff( old_lines, lines[best_start : best_start + window], fromfile="old_text (provided)", tofile=f"{path} (actual, line {best_start + 1})", lineterm="", ) ) return f"Error: old_text not found in {path}.\nBest match ({best_ratio:.0%} similar) at line {best_start + 1}:\n{diff}" return ( f"Error: old_text not found in {path}. No similar text found. Verify the file content." ) class ListDirTool(Tool): """Tool to list directory contents.""" def __init__(self, workspace: Path | None = None, allowed_dir: Path | None = None): self._workspace = workspace self._allowed_dir = allowed_dir @property def name(self) -> str: return "list_dir" @property def description(self) -> str: return "List the contents of a directory." @property def parameters(self) -> dict[str, Any]: return { "type": "object", "properties": {"path": {"type": "string", "description": "The directory path to list"}}, "required": ["path"], } async def execute(self, path: str, **kwargs: Any) -> str: try: dir_path = _resolve_path(path, self._workspace, self._allowed_dir) if not dir_path.exists(): return f"Error: Directory not found: {path}" if not dir_path.is_dir(): return f"Error: Not a directory: {path}" items = [] for item in sorted(dir_path.iterdir()): prefix = "📁 " if item.is_dir() else "📄 " items.append(f"{prefix}{item.name}") if not items: return f"Directory {path} is empty" return "\n".join(items) except PermissionError as e: return f"Error: {e}" except Exception as e: return f"Error listing directory: {str(e)}"
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/agent/tools/filesystem.py", "license": "MIT License", "lines": 183, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/agent/tools/message.py
"""Message tool for sending messages to users.""" from typing import Any, Awaitable, Callable from nanobot.agent.tools.base import Tool from nanobot.bus.events import OutboundMessage class MessageTool(Tool): """Tool to send messages to users on chat channels.""" def __init__( self, send_callback: Callable[[OutboundMessage], Awaitable[None]] | None = None, default_channel: str = "", default_chat_id: str = "", default_message_id: str | None = None, ): self._send_callback = send_callback self._default_channel = default_channel self._default_chat_id = default_chat_id self._default_message_id = default_message_id self._sent_in_turn: bool = False def set_context(self, channel: str, chat_id: str, message_id: str | None = None) -> None: """Set the current message context.""" self._default_channel = channel self._default_chat_id = chat_id self._default_message_id = message_id def set_send_callback(self, callback: Callable[[OutboundMessage], Awaitable[None]]) -> None: """Set the callback for sending messages.""" self._send_callback = callback def start_turn(self) -> None: """Reset per-turn send tracking.""" self._sent_in_turn = False @property def name(self) -> str: return "message" @property def description(self) -> str: return "Send a message to the user. Use this when you want to communicate something." @property def parameters(self) -> dict[str, Any]: return { "type": "object", "properties": { "content": { "type": "string", "description": "The message content to send" }, "channel": { "type": "string", "description": "Optional: target channel (telegram, discord, etc.)" }, "chat_id": { "type": "string", "description": "Optional: target chat/user ID" }, "media": { "type": "array", "items": {"type": "string"}, "description": "Optional: list of file paths to attach (images, audio, documents)" } }, "required": ["content"] } async def execute( self, content: str, channel: str | None = None, chat_id: str | None = None, message_id: str | None = None, media: list[str] | None = None, **kwargs: Any ) -> str: channel = channel or self._default_channel chat_id = chat_id or self._default_chat_id message_id = message_id or self._default_message_id if not channel or not chat_id: return "Error: No target channel/chat specified" if not self._send_callback: return "Error: Message sending not configured" msg = OutboundMessage( channel=channel, chat_id=chat_id, content=content, media=media or [], metadata={ "message_id": message_id, } ) try: await self._send_callback(msg) if channel == self._default_channel and chat_id == self._default_chat_id: self._sent_in_turn = True media_info = f" with {len(media)} attachments" if media else "" return f"Message sent to {channel}:{chat_id}{media_info}" except Exception as e: return f"Error sending message: {str(e)}"
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/agent/tools/message.py", "license": "MIT License", "lines": 93, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
HKUDS/nanobot:nanobot/agent/tools/registry.py
"""Tool registry for dynamic tool management.""" from typing import Any from nanobot.agent.tools.base import Tool class ToolRegistry: """ Registry for agent tools. Allows dynamic registration and execution of tools. """ def __init__(self): self._tools: dict[str, Tool] = {} def register(self, tool: Tool) -> None: """Register a tool.""" self._tools[tool.name] = tool def unregister(self, name: str) -> None: """Unregister a tool by name.""" self._tools.pop(name, None) def get(self, name: str) -> Tool | None: """Get a tool by name.""" return self._tools.get(name) def has(self, name: str) -> bool: """Check if a tool is registered.""" return name in self._tools def get_definitions(self) -> list[dict[str, Any]]: """Get all tool definitions in OpenAI format.""" return [tool.to_schema() for tool in self._tools.values()] async def execute(self, name: str, params: dict[str, Any]) -> str: """Execute a tool by name with given parameters.""" _HINT = "\n\n[Analyze the error above and try a different approach.]" tool = self._tools.get(name) if not tool: return f"Error: Tool '{name}' not found. Available: {', '.join(self.tool_names)}" try: errors = tool.validate_params(params) if errors: return f"Error: Invalid parameters for tool '{name}': " + "; ".join(errors) + _HINT result = await tool.execute(**params) if isinstance(result, str) and result.startswith("Error"): return result + _HINT return result except Exception as e: return f"Error executing {name}: {str(e)}" + _HINT @property def tool_names(self) -> list[str]: """Get list of registered tool names.""" return list(self._tools.keys()) def __len__(self) -> int: return len(self._tools) def __contains__(self, name: str) -> bool: return name in self._tools
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/agent/tools/registry.py", "license": "MIT License", "lines": 49, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
HKUDS/nanobot:nanobot/agent/tools/shell.py
"""Shell execution tool.""" import asyncio import os import re from pathlib import Path from typing import Any from nanobot.agent.tools.base import Tool class ExecTool(Tool): """Tool to execute shell commands.""" def __init__( self, timeout: int = 60, working_dir: str | None = None, deny_patterns: list[str] | None = None, allow_patterns: list[str] | None = None, restrict_to_workspace: bool = False, path_append: str = "", ): self.timeout = timeout self.working_dir = working_dir self.deny_patterns = deny_patterns or [ r"\brm\s+-[rf]{1,2}\b", # rm -r, rm -rf, rm -fr r"\bdel\s+/[fq]\b", # del /f, del /q r"\brmdir\s+/s\b", # rmdir /s r"(?:^|[;&|]\s*)format\b", # format (as standalone command only) r"\b(mkfs|diskpart)\b", # disk operations r"\bdd\s+if=", # dd r">\s*/dev/sd", # write to disk r"\b(shutdown|reboot|poweroff)\b", # system power r":\(\)\s*\{.*\};\s*:", # fork bomb ] self.allow_patterns = allow_patterns or [] self.restrict_to_workspace = restrict_to_workspace self.path_append = path_append @property def name(self) -> str: return "exec" @property def description(self) -> str: return "Execute a shell command and return its output. Use with caution." @property def parameters(self) -> dict[str, Any]: return { "type": "object", "properties": { "command": { "type": "string", "description": "The shell command to execute" }, "working_dir": { "type": "string", "description": "Optional working directory for the command" } }, "required": ["command"] } async def execute(self, command: str, working_dir: str | None = None, **kwargs: Any) -> str: cwd = working_dir or self.working_dir or os.getcwd() guard_error = self._guard_command(command, cwd) if guard_error: return guard_error env = os.environ.copy() if self.path_append: env["PATH"] = env.get("PATH", "") + os.pathsep + self.path_append try: process = await asyncio.create_subprocess_shell( command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, cwd=cwd, env=env, ) try: stdout, stderr = await asyncio.wait_for( process.communicate(), timeout=self.timeout ) except asyncio.TimeoutError: process.kill() # Wait for the process to fully terminate so pipes are # drained and file descriptors are released. try: await asyncio.wait_for(process.wait(), timeout=5.0) except asyncio.TimeoutError: pass return f"Error: Command timed out after {self.timeout} seconds" output_parts = [] if stdout: output_parts.append(stdout.decode("utf-8", errors="replace")) if stderr: stderr_text = stderr.decode("utf-8", errors="replace") if stderr_text.strip(): output_parts.append(f"STDERR:\n{stderr_text}") if process.returncode != 0: output_parts.append(f"\nExit code: {process.returncode}") result = "\n".join(output_parts) if output_parts else "(no output)" # Truncate very long output max_len = 10000 if len(result) > max_len: result = result[:max_len] + f"\n... (truncated, {len(result) - max_len} more chars)" return result except Exception as e: return f"Error executing command: {str(e)}" def _guard_command(self, command: str, cwd: str) -> str | None: """Best-effort safety guard for potentially destructive commands.""" cmd = command.strip() lower = cmd.lower() for pattern in self.deny_patterns: if re.search(pattern, lower): return "Error: Command blocked by safety guard (dangerous pattern detected)" if self.allow_patterns: if not any(re.search(p, lower) for p in self.allow_patterns): return "Error: Command blocked by safety guard (not in allowlist)" if self.restrict_to_workspace: if "..\\" in cmd or "../" in cmd: return "Error: Command blocked by safety guard (path traversal detected)" cwd_path = Path(cwd).resolve() for raw in self._extract_absolute_paths(cmd): try: p = Path(raw.strip()).resolve() except Exception: continue if p.is_absolute() and cwd_path not in p.parents and p != cwd_path: return "Error: Command blocked by safety guard (path outside working dir)" return None @staticmethod def _extract_absolute_paths(command: str) -> list[str]: win_paths = re.findall(r"[A-Za-z]:\\[^\s\"'|><;]+", command) # Windows: C:\... posix_paths = re.findall(r"(?:^|[\s|>])(/[^\s\"'>]+)", command) # POSIX: /absolute only return win_paths + posix_paths
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/agent/tools/shell.py", "license": "MIT License", "lines": 130, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/agent/tools/web.py
"""Web tools: web_search and web_fetch.""" import html import json import os import re from typing import Any from urllib.parse import urlparse import httpx from loguru import logger from nanobot.agent.tools.base import Tool # Shared constants USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 14_7_2) AppleWebKit/537.36" MAX_REDIRECTS = 5 # Limit redirects to prevent DoS attacks def _strip_tags(text: str) -> str: """Remove HTML tags and decode entities.""" text = re.sub(r'<script[\s\S]*?</script>', '', text, flags=re.I) text = re.sub(r'<style[\s\S]*?</style>', '', text, flags=re.I) text = re.sub(r'<[^>]+>', '', text) return html.unescape(text).strip() def _normalize(text: str) -> str: """Normalize whitespace.""" text = re.sub(r'[ \t]+', ' ', text) return re.sub(r'\n{3,}', '\n\n', text).strip() def _validate_url(url: str) -> tuple[bool, str]: """Validate URL: must be http(s) with valid domain.""" try: p = urlparse(url) if p.scheme not in ('http', 'https'): return False, f"Only http/https allowed, got '{p.scheme or 'none'}'" if not p.netloc: return False, "Missing domain" return True, "" except Exception as e: return False, str(e) class WebSearchTool(Tool): """Search the web using Brave Search API.""" name = "web_search" description = "Search the web. Returns titles, URLs, and snippets." parameters = { "type": "object", "properties": { "query": {"type": "string", "description": "Search query"}, "count": {"type": "integer", "description": "Results (1-10)", "minimum": 1, "maximum": 10} }, "required": ["query"] } def __init__(self, api_key: str | None = None, max_results: int = 5, proxy: str | None = None): self._init_api_key = api_key self.max_results = max_results self.proxy = proxy @property def api_key(self) -> str: """Resolve API key at call time so env/config changes are picked up.""" return self._init_api_key or os.environ.get("BRAVE_API_KEY", "") async def execute(self, query: str, count: int | None = None, **kwargs: Any) -> str: if not self.api_key: return ( "Error: Brave Search API key not configured. Set it in " "~/.nanobot/config.json under tools.web.search.apiKey " "(or export BRAVE_API_KEY), then restart the gateway." ) try: n = min(max(count or self.max_results, 1), 10) logger.debug("WebSearch: {}", "proxy enabled" if self.proxy else "direct connection") async with httpx.AsyncClient(proxy=self.proxy) as client: r = await client.get( "https://api.search.brave.com/res/v1/web/search", params={"q": query, "count": n}, headers={"Accept": "application/json", "X-Subscription-Token": self.api_key}, timeout=10.0 ) r.raise_for_status() results = r.json().get("web", {}).get("results", [])[:n] if not results: return f"No results for: {query}" lines = [f"Results for: {query}\n"] for i, item in enumerate(results, 1): lines.append(f"{i}. {item.get('title', '')}\n {item.get('url', '')}") if desc := item.get("description"): lines.append(f" {desc}") return "\n".join(lines) except httpx.ProxyError as e: logger.error("WebSearch proxy error: {}", e) return f"Proxy error: {e}" except Exception as e: logger.error("WebSearch error: {}", e) return f"Error: {e}" class WebFetchTool(Tool): """Fetch and extract content from a URL using Readability.""" name = "web_fetch" description = "Fetch URL and extract readable content (HTML → markdown/text)." parameters = { "type": "object", "properties": { "url": {"type": "string", "description": "URL to fetch"}, "extractMode": {"type": "string", "enum": ["markdown", "text"], "default": "markdown"}, "maxChars": {"type": "integer", "minimum": 100} }, "required": ["url"] } def __init__(self, max_chars: int = 50000, proxy: str | None = None): self.max_chars = max_chars self.proxy = proxy async def execute(self, url: str, extractMode: str = "markdown", maxChars: int | None = None, **kwargs: Any) -> str: from readability import Document max_chars = maxChars or self.max_chars is_valid, error_msg = _validate_url(url) if not is_valid: return json.dumps({"error": f"URL validation failed: {error_msg}", "url": url}, ensure_ascii=False) try: logger.debug("WebFetch: {}", "proxy enabled" if self.proxy else "direct connection") async with httpx.AsyncClient( follow_redirects=True, max_redirects=MAX_REDIRECTS, timeout=30.0, proxy=self.proxy, ) as client: r = await client.get(url, headers={"User-Agent": USER_AGENT}) r.raise_for_status() ctype = r.headers.get("content-type", "") if "application/json" in ctype: text, extractor = json.dumps(r.json(), indent=2, ensure_ascii=False), "json" elif "text/html" in ctype or r.text[:256].lower().startswith(("<!doctype", "<html")): doc = Document(r.text) content = self._to_markdown(doc.summary()) if extractMode == "markdown" else _strip_tags(doc.summary()) text = f"# {doc.title()}\n\n{content}" if doc.title() else content extractor = "readability" else: text, extractor = r.text, "raw" truncated = len(text) > max_chars if truncated: text = text[:max_chars] return json.dumps({"url": url, "finalUrl": str(r.url), "status": r.status_code, "extractor": extractor, "truncated": truncated, "length": len(text), "text": text}, ensure_ascii=False) except httpx.ProxyError as e: logger.error("WebFetch proxy error for {}: {}", url, e) return json.dumps({"error": f"Proxy error: {e}", "url": url}, ensure_ascii=False) except Exception as e: logger.error("WebFetch error for {}: {}", url, e) return json.dumps({"error": str(e), "url": url}, ensure_ascii=False) def _to_markdown(self, html: str) -> str: """Convert HTML to markdown.""" # Convert links, headings, lists before stripping tags text = re.sub(r'<a\s+[^>]*href=["\']([^"\']+)["\'][^>]*>([\s\S]*?)</a>', lambda m: f'[{_strip_tags(m[2])}]({m[1]})', html, flags=re.I) text = re.sub(r'<h([1-6])[^>]*>([\s\S]*?)</h\1>', lambda m: f'\n{"#" * int(m[1])} {_strip_tags(m[2])}\n', text, flags=re.I) text = re.sub(r'<li[^>]*>([\s\S]*?)</li>', lambda m: f'\n- {_strip_tags(m[1])}', text, flags=re.I) text = re.sub(r'</(p|div|section|article)>', '\n\n', text, flags=re.I) text = re.sub(r'<(br|hr)\s*/?>', '\n', text, flags=re.I) return _normalize(_strip_tags(text))
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/agent/tools/web.py", "license": "MIT License", "lines": 150, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/bus/events.py
"""Event types for the message bus.""" from dataclasses import dataclass, field from datetime import datetime from typing import Any @dataclass class InboundMessage: """Message received from a chat channel.""" channel: str # telegram, discord, slack, whatsapp sender_id: str # User identifier chat_id: str # Chat/channel identifier content: str # Message text timestamp: datetime = field(default_factory=datetime.now) media: list[str] = field(default_factory=list) # Media URLs metadata: dict[str, Any] = field(default_factory=dict) # Channel-specific data session_key_override: str | None = None # Optional override for thread-scoped sessions @property def session_key(self) -> str: """Unique key for session identification.""" return self.session_key_override or f"{self.channel}:{self.chat_id}" @dataclass class OutboundMessage: """Message to send to a chat channel.""" channel: str chat_id: str content: str reply_to: str | None = None media: list[str] = field(default_factory=list) metadata: dict[str, Any] = field(default_factory=dict)
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/bus/events.py", "license": "MIT License", "lines": 28, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
HKUDS/nanobot:nanobot/bus/queue.py
"""Async message queue for decoupled channel-agent communication.""" import asyncio from nanobot.bus.events import InboundMessage, OutboundMessage class MessageBus: """ Async message bus that decouples chat channels from the agent core. Channels push messages to the inbound queue, and the agent processes them and pushes responses to the outbound queue. """ def __init__(self): self.inbound: asyncio.Queue[InboundMessage] = asyncio.Queue() self.outbound: asyncio.Queue[OutboundMessage] = asyncio.Queue() async def publish_inbound(self, msg: InboundMessage) -> None: """Publish a message from a channel to the agent.""" await self.inbound.put(msg) async def consume_inbound(self) -> InboundMessage: """Consume the next inbound message (blocks until available).""" return await self.inbound.get() async def publish_outbound(self, msg: OutboundMessage) -> None: """Publish a response from the agent to channels.""" await self.outbound.put(msg) async def consume_outbound(self) -> OutboundMessage: """Consume the next outbound message (blocks until available).""" return await self.outbound.get() @property def inbound_size(self) -> int: """Number of pending inbound messages.""" return self.inbound.qsize() @property def outbound_size(self) -> int: """Number of pending outbound messages.""" return self.outbound.qsize()
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/bus/queue.py", "license": "MIT License", "lines": 32, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
HKUDS/nanobot:nanobot/channels/base.py
"""Base channel interface for chat platforms.""" from abc import ABC, abstractmethod from typing import Any from loguru import logger from nanobot.bus.events import InboundMessage, OutboundMessage from nanobot.bus.queue import MessageBus class BaseChannel(ABC): """ Abstract base class for chat channel implementations. Each channel (Telegram, Discord, etc.) should implement this interface to integrate with the nanobot message bus. """ name: str = "base" def __init__(self, config: Any, bus: MessageBus): """ Initialize the channel. Args: config: Channel-specific configuration. bus: The message bus for communication. """ self.config = config self.bus = bus self._running = False @abstractmethod async def start(self) -> None: """ Start the channel and begin listening for messages. This should be a long-running async task that: 1. Connects to the chat platform 2. Listens for incoming messages 3. Forwards messages to the bus via _handle_message() """ pass @abstractmethod async def stop(self) -> None: """Stop the channel and clean up resources.""" pass @abstractmethod async def send(self, msg: OutboundMessage) -> None: """ Send a message through this channel. Args: msg: The message to send. """ pass def is_allowed(self, sender_id: str) -> bool: """Check if *sender_id* is permitted. Empty list → deny all; ``"*"`` → allow all.""" allow_list = getattr(self.config, "allow_from", []) if not allow_list: logger.warning("{}: allow_from is empty — all access denied", self.name) return False if "*" in allow_list: return True sender_str = str(sender_id) return sender_str in allow_list or any( p in allow_list for p in sender_str.split("|") if p ) async def _handle_message( self, sender_id: str, chat_id: str, content: str, media: list[str] | None = None, metadata: dict[str, Any] | None = None, session_key: str | None = None, ) -> None: """ Handle an incoming message from the chat platform. This method checks permissions and forwards to the bus. Args: sender_id: The sender's identifier. chat_id: The chat/channel identifier. content: Message text content. media: Optional list of media URLs. metadata: Optional channel-specific metadata. session_key: Optional session key override (e.g. thread-scoped sessions). """ if not self.is_allowed(sender_id): logger.warning( "Access denied for sender {} on channel {}. " "Add them to allowFrom list in config to grant access.", sender_id, self.name, ) return msg = InboundMessage( channel=self.name, sender_id=str(sender_id), chat_id=str(chat_id), content=content, media=media or [], metadata=metadata or {}, session_key_override=session_key, ) await self.bus.publish_inbound(msg) @property def is_running(self) -> bool: """Check if the channel is running.""" return self._running
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/channels/base.py", "license": "MIT License", "lines": 98, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/channels/manager.py
"""Channel manager for coordinating chat channels.""" from __future__ import annotations import asyncio from typing import Any from loguru import logger from nanobot.bus.events import OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.channels.base import BaseChannel from nanobot.config.schema import Config class ChannelManager: """ Manages chat channels and coordinates message routing. Responsibilities: - Initialize enabled channels (Telegram, WhatsApp, etc.) - Start/stop channels - Route outbound messages """ def __init__(self, config: Config, bus: MessageBus): self.config = config self.bus = bus self.channels: dict[str, BaseChannel] = {} self._dispatch_task: asyncio.Task | None = None self._init_channels() def _init_channels(self) -> None: """Initialize channels based on config.""" # Telegram channel if self.config.channels.telegram.enabled: try: from nanobot.channels.telegram import TelegramChannel self.channels["telegram"] = TelegramChannel( self.config.channels.telegram, self.bus, groq_api_key=self.config.providers.groq.api_key, ) logger.info("Telegram channel enabled") except ImportError as e: logger.warning("Telegram channel not available: {}", e) # WhatsApp channel if self.config.channels.whatsapp.enabled: try: from nanobot.channels.whatsapp import WhatsAppChannel self.channels["whatsapp"] = WhatsAppChannel( self.config.channels.whatsapp, self.bus ) logger.info("WhatsApp channel enabled") except ImportError as e: logger.warning("WhatsApp channel not available: {}", e) # Discord channel if self.config.channels.discord.enabled: try: from nanobot.channels.discord import DiscordChannel self.channels["discord"] = DiscordChannel( self.config.channels.discord, self.bus ) logger.info("Discord channel enabled") except ImportError as e: logger.warning("Discord channel not available: {}", e) # Feishu channel if self.config.channels.feishu.enabled: try: from nanobot.channels.feishu import FeishuChannel self.channels["feishu"] = FeishuChannel( self.config.channels.feishu, self.bus ) logger.info("Feishu channel enabled") except ImportError as e: logger.warning("Feishu channel not available: {}", e) # Mochat channel if self.config.channels.mochat.enabled: try: from nanobot.channels.mochat import MochatChannel self.channels["mochat"] = MochatChannel( self.config.channels.mochat, self.bus ) logger.info("Mochat channel enabled") except ImportError as e: logger.warning("Mochat channel not available: {}", e) # DingTalk channel if self.config.channels.dingtalk.enabled: try: from nanobot.channels.dingtalk import DingTalkChannel self.channels["dingtalk"] = DingTalkChannel( self.config.channels.dingtalk, self.bus ) logger.info("DingTalk channel enabled") except ImportError as e: logger.warning("DingTalk channel not available: {}", e) # Email channel if self.config.channels.email.enabled: try: from nanobot.channels.email import EmailChannel self.channels["email"] = EmailChannel( self.config.channels.email, self.bus ) logger.info("Email channel enabled") except ImportError as e: logger.warning("Email channel not available: {}", e) # Slack channel if self.config.channels.slack.enabled: try: from nanobot.channels.slack import SlackChannel self.channels["slack"] = SlackChannel( self.config.channels.slack, self.bus ) logger.info("Slack channel enabled") except ImportError as e: logger.warning("Slack channel not available: {}", e) # QQ channel if self.config.channels.qq.enabled: try: from nanobot.channels.qq import QQChannel self.channels["qq"] = QQChannel( self.config.channels.qq, self.bus, ) logger.info("QQ channel enabled") except ImportError as e: logger.warning("QQ channel not available: {}", e) # Matrix channel if self.config.channels.matrix.enabled: try: from nanobot.channels.matrix import MatrixChannel self.channels["matrix"] = MatrixChannel( self.config.channels.matrix, self.bus, ) logger.info("Matrix channel enabled") except ImportError as e: logger.warning("Matrix channel not available: {}", e) self._validate_allow_from() def _validate_allow_from(self) -> None: for name, ch in self.channels.items(): if getattr(ch.config, "allow_from", None) == []: raise SystemExit( f'Error: "{name}" has empty allowFrom (denies all). ' f'Set ["*"] to allow everyone, or add specific user IDs.' ) async def _start_channel(self, name: str, channel: BaseChannel) -> None: """Start a channel and log any exceptions.""" try: await channel.start() except Exception as e: logger.error("Failed to start channel {}: {}", name, e) async def start_all(self) -> None: """Start all channels and the outbound dispatcher.""" if not self.channels: logger.warning("No channels enabled") return # Start outbound dispatcher self._dispatch_task = asyncio.create_task(self._dispatch_outbound()) # Start channels tasks = [] for name, channel in self.channels.items(): logger.info("Starting {} channel...", name) tasks.append(asyncio.create_task(self._start_channel(name, channel))) # Wait for all to complete (they should run forever) await asyncio.gather(*tasks, return_exceptions=True) async def stop_all(self) -> None: """Stop all channels and the dispatcher.""" logger.info("Stopping all channels...") # Stop dispatcher if self._dispatch_task: self._dispatch_task.cancel() try: await self._dispatch_task except asyncio.CancelledError: pass # Stop all channels for name, channel in self.channels.items(): try: await channel.stop() logger.info("Stopped {} channel", name) except Exception as e: logger.error("Error stopping {}: {}", name, e) async def _dispatch_outbound(self) -> None: """Dispatch outbound messages to the appropriate channel.""" logger.info("Outbound dispatcher started") while True: try: msg = await asyncio.wait_for( self.bus.consume_outbound(), timeout=1.0 ) if msg.metadata.get("_progress"): if msg.metadata.get("_tool_hint") and not self.config.channels.send_tool_hints: continue if not msg.metadata.get("_tool_hint") and not self.config.channels.send_progress: continue channel = self.channels.get(msg.channel) if channel: try: await channel.send(msg) except Exception as e: logger.error("Error sending to {}: {}", msg.channel, e) else: logger.warning("Unknown channel: {}", msg.channel) except asyncio.TimeoutError: continue except asyncio.CancelledError: break def get_channel(self, name: str) -> BaseChannel | None: """Get a channel by name.""" return self.channels.get(name) def get_status(self) -> dict[str, Any]: """Get status of all channels.""" return { name: { "enabled": True, "running": channel.is_running } for name, channel in self.channels.items() } @property def enabled_channels(self) -> list[str]: """Get list of enabled channel names.""" return list(self.channels.keys())
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/channels/manager.py", "license": "MIT License", "lines": 216, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/channels/telegram.py
"""Telegram channel implementation using python-telegram-bot.""" from __future__ import annotations import asyncio import re from loguru import logger from telegram import BotCommand, ReplyParameters, Update from telegram.ext import Application, CommandHandler, ContextTypes, MessageHandler, filters from telegram.request import HTTPXRequest from nanobot.bus.events import OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.channels.base import BaseChannel from nanobot.config.schema import TelegramConfig def _markdown_to_telegram_html(text: str) -> str: """ Convert markdown to Telegram-safe HTML. """ if not text: return "" # 1. Extract and protect code blocks (preserve content from other processing) code_blocks: list[str] = [] def save_code_block(m: re.Match) -> str: code_blocks.append(m.group(1)) return f"\x00CB{len(code_blocks) - 1}\x00" text = re.sub(r'```[\w]*\n?([\s\S]*?)```', save_code_block, text) # 2. Extract and protect inline code inline_codes: list[str] = [] def save_inline_code(m: re.Match) -> str: inline_codes.append(m.group(1)) return f"\x00IC{len(inline_codes) - 1}\x00" text = re.sub(r'`([^`]+)`', save_inline_code, text) # 3. Headers # Title -> just the title text text = re.sub(r'^#{1,6}\s+(.+)$', r'\1', text, flags=re.MULTILINE) # 4. Blockquotes > text -> just the text (before HTML escaping) text = re.sub(r'^>\s*(.*)$', r'\1', text, flags=re.MULTILINE) # 5. Escape HTML special characters text = text.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;") # 6. Links [text](url) - must be before bold/italic to handle nested cases text = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', r'<a href="\2">\1</a>', text) # 7. Bold **text** or __text__ text = re.sub(r'\*\*(.+?)\*\*', r'<b>\1</b>', text) text = re.sub(r'__(.+?)__', r'<b>\1</b>', text) # 8. Italic _text_ (avoid matching inside words like some_var_name) text = re.sub(r'(?<![a-zA-Z0-9])_([^_]+)_(?![a-zA-Z0-9])', r'<i>\1</i>', text) # 9. Strikethrough ~~text~~ text = re.sub(r'~~(.+?)~~', r'<s>\1</s>', text) # 10. Bullet lists - item -> • item text = re.sub(r'^[-*]\s+', '• ', text, flags=re.MULTILINE) # 11. Restore inline code with HTML tags for i, code in enumerate(inline_codes): # Escape HTML in code content escaped = code.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;") text = text.replace(f"\x00IC{i}\x00", f"<code>{escaped}</code>") # 12. Restore code blocks with HTML tags for i, code in enumerate(code_blocks): # Escape HTML in code content escaped = code.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;") text = text.replace(f"\x00CB{i}\x00", f"<pre><code>{escaped}</code></pre>") return text def _split_message(content: str, max_len: int = 4000) -> list[str]: """Split content into chunks within max_len, preferring line breaks.""" if len(content) <= max_len: return [content] chunks: list[str] = [] while content: if len(content) <= max_len: chunks.append(content) break cut = content[:max_len] pos = cut.rfind('\n') if pos == -1: pos = cut.rfind(' ') if pos == -1: pos = max_len chunks.append(content[:pos]) content = content[pos:].lstrip() return chunks class TelegramChannel(BaseChannel): """ Telegram channel using long polling. Simple and reliable - no webhook/public IP needed. """ name = "telegram" # Commands registered with Telegram's command menu BOT_COMMANDS = [ BotCommand("start", "Start the bot"), BotCommand("new", "Start a new conversation"), BotCommand("stop", "Stop the current task"), BotCommand("help", "Show available commands"), ] def __init__( self, config: TelegramConfig, bus: MessageBus, groq_api_key: str = "", ): super().__init__(config, bus) self.config: TelegramConfig = config self.groq_api_key = groq_api_key self._app: Application | None = None self._chat_ids: dict[str, int] = {} # Map sender_id to chat_id for replies self._typing_tasks: dict[str, asyncio.Task] = {} # chat_id -> typing loop task self._media_group_buffers: dict[str, dict] = {} self._media_group_tasks: dict[str, asyncio.Task] = {} async def start(self) -> None: """Start the Telegram bot with long polling.""" if not self.config.token: logger.error("Telegram bot token not configured") return self._running = True # Build the application with larger connection pool to avoid pool-timeout on long runs req = HTTPXRequest(connection_pool_size=16, pool_timeout=5.0, connect_timeout=30.0, read_timeout=30.0) builder = Application.builder().token(self.config.token).request(req).get_updates_request(req) if self.config.proxy: builder = builder.proxy(self.config.proxy).get_updates_proxy(self.config.proxy) self._app = builder.build() self._app.add_error_handler(self._on_error) # Add command handlers self._app.add_handler(CommandHandler("start", self._on_start)) self._app.add_handler(CommandHandler("new", self._forward_command)) self._app.add_handler(CommandHandler("help", self._on_help)) # Add message handler for text, photos, voice, documents self._app.add_handler( MessageHandler( (filters.TEXT | filters.PHOTO | filters.VOICE | filters.AUDIO | filters.Document.ALL) & ~filters.COMMAND, self._on_message ) ) logger.info("Starting Telegram bot (polling mode)...") # Initialize and start polling await self._app.initialize() await self._app.start() # Get bot info and register command menu bot_info = await self._app.bot.get_me() logger.info("Telegram bot @{} connected", bot_info.username) try: await self._app.bot.set_my_commands(self.BOT_COMMANDS) logger.debug("Telegram bot commands registered") except Exception as e: logger.warning("Failed to register bot commands: {}", e) # Start polling (this runs until stopped) await self._app.updater.start_polling( allowed_updates=["message"], drop_pending_updates=True # Ignore old messages on startup ) # Keep running until stopped while self._running: await asyncio.sleep(1) async def stop(self) -> None: """Stop the Telegram bot.""" self._running = False # Cancel all typing indicators for chat_id in list(self._typing_tasks): self._stop_typing(chat_id) for task in self._media_group_tasks.values(): task.cancel() self._media_group_tasks.clear() self._media_group_buffers.clear() if self._app: logger.info("Stopping Telegram bot...") await self._app.updater.stop() await self._app.stop() await self._app.shutdown() self._app = None @staticmethod def _get_media_type(path: str) -> str: """Guess media type from file extension.""" ext = path.rsplit(".", 1)[-1].lower() if "." in path else "" if ext in ("jpg", "jpeg", "png", "gif", "webp"): return "photo" if ext == "ogg": return "voice" if ext in ("mp3", "m4a", "wav", "aac"): return "audio" return "document" async def send(self, msg: OutboundMessage) -> None: """Send a message through Telegram.""" if not self._app: logger.warning("Telegram bot not running") return self._stop_typing(msg.chat_id) try: chat_id = int(msg.chat_id) except ValueError: logger.error("Invalid chat_id: {}", msg.chat_id) return reply_params = None if self.config.reply_to_message: reply_to_message_id = msg.metadata.get("message_id") if reply_to_message_id: reply_params = ReplyParameters( message_id=reply_to_message_id, allow_sending_without_reply=True ) # Send media files for media_path in (msg.media or []): try: media_type = self._get_media_type(media_path) sender = { "photo": self._app.bot.send_photo, "voice": self._app.bot.send_voice, "audio": self._app.bot.send_audio, }.get(media_type, self._app.bot.send_document) param = "photo" if media_type == "photo" else media_type if media_type in ("voice", "audio") else "document" with open(media_path, 'rb') as f: await sender( chat_id=chat_id, **{param: f}, reply_parameters=reply_params ) except Exception as e: filename = media_path.rsplit("/", 1)[-1] logger.error("Failed to send media {}: {}", media_path, e) await self._app.bot.send_message( chat_id=chat_id, text=f"[Failed to send: {filename}]", reply_parameters=reply_params ) # Send text content if msg.content and msg.content != "[empty message]": for chunk in _split_message(msg.content): try: html = _markdown_to_telegram_html(chunk) await self._app.bot.send_message( chat_id=chat_id, text=html, parse_mode="HTML", reply_parameters=reply_params ) except Exception as e: logger.warning("HTML parse failed, falling back to plain text: {}", e) try: await self._app.bot.send_message( chat_id=chat_id, text=chunk, reply_parameters=reply_params ) except Exception as e2: logger.error("Error sending Telegram message: {}", e2) async def _on_start(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Handle /start command.""" if not update.message or not update.effective_user: return user = update.effective_user await update.message.reply_text( f"👋 Hi {user.first_name}! I'm nanobot.\n\n" "Send me a message and I'll respond!\n" "Type /help to see available commands." ) async def _on_help(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Handle /help command, bypassing ACL so all users can access it.""" if not update.message: return await update.message.reply_text( "🐈 nanobot commands:\n" "/new — Start a new conversation\n" "/stop — Stop the current task\n" "/help — Show available commands" ) @staticmethod def _sender_id(user) -> str: """Build sender_id with username for allowlist matching.""" sid = str(user.id) return f"{sid}|{user.username}" if user.username else sid async def _forward_command(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Forward slash commands to the bus for unified handling in AgentLoop.""" if not update.message or not update.effective_user: return await self._handle_message( sender_id=self._sender_id(update.effective_user), chat_id=str(update.message.chat_id), content=update.message.text, ) async def _on_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Handle incoming messages (text, photos, voice, documents).""" if not update.message or not update.effective_user: return message = update.message user = update.effective_user chat_id = message.chat_id sender_id = self._sender_id(user) # Store chat_id for replies self._chat_ids[sender_id] = chat_id # Build content from text and/or media content_parts = [] media_paths = [] # Text content if message.text: content_parts.append(message.text) if message.caption: content_parts.append(message.caption) # Handle media files media_file = None media_type = None if message.photo: media_file = message.photo[-1] # Largest photo media_type = "image" elif message.voice: media_file = message.voice media_type = "voice" elif message.audio: media_file = message.audio media_type = "audio" elif message.document: media_file = message.document media_type = "file" # Download media if present if media_file and self._app: try: file = await self._app.bot.get_file(media_file.file_id) ext = self._get_extension(media_type, getattr(media_file, 'mime_type', None)) # Save to workspace/media/ from pathlib import Path media_dir = Path.home() / ".nanobot" / "media" media_dir.mkdir(parents=True, exist_ok=True) file_path = media_dir / f"{media_file.file_id[:16]}{ext}" await file.download_to_drive(str(file_path)) media_paths.append(str(file_path)) # Handle voice transcription if media_type == "voice" or media_type == "audio": from nanobot.providers.transcription import GroqTranscriptionProvider transcriber = GroqTranscriptionProvider(api_key=self.groq_api_key) transcription = await transcriber.transcribe(file_path) if transcription: logger.info("Transcribed {}: {}...", media_type, transcription[:50]) content_parts.append(f"[transcription: {transcription}]") else: content_parts.append(f"[{media_type}: {file_path}]") else: content_parts.append(f"[{media_type}: {file_path}]") logger.debug("Downloaded {} to {}", media_type, file_path) except Exception as e: logger.error("Failed to download media: {}", e) content_parts.append(f"[{media_type}: download failed]") content = "\n".join(content_parts) if content_parts else "[empty message]" logger.debug("Telegram message from {}: {}...", sender_id, content[:50]) str_chat_id = str(chat_id) # Telegram media groups: buffer briefly, forward as one aggregated turn. if media_group_id := getattr(message, "media_group_id", None): key = f"{str_chat_id}:{media_group_id}" if key not in self._media_group_buffers: self._media_group_buffers[key] = { "sender_id": sender_id, "chat_id": str_chat_id, "contents": [], "media": [], "metadata": { "message_id": message.message_id, "user_id": user.id, "username": user.username, "first_name": user.first_name, "is_group": message.chat.type != "private", }, } self._start_typing(str_chat_id) buf = self._media_group_buffers[key] if content and content != "[empty message]": buf["contents"].append(content) buf["media"].extend(media_paths) if key not in self._media_group_tasks: self._media_group_tasks[key] = asyncio.create_task(self._flush_media_group(key)) return # Start typing indicator before processing self._start_typing(str_chat_id) # Forward to the message bus await self._handle_message( sender_id=sender_id, chat_id=str_chat_id, content=content, media=media_paths, metadata={ "message_id": message.message_id, "user_id": user.id, "username": user.username, "first_name": user.first_name, "is_group": message.chat.type != "private" } ) async def _flush_media_group(self, key: str) -> None: """Wait briefly, then forward buffered media-group as one turn.""" try: await asyncio.sleep(0.6) if not (buf := self._media_group_buffers.pop(key, None)): return content = "\n".join(buf["contents"]) or "[empty message]" await self._handle_message( sender_id=buf["sender_id"], chat_id=buf["chat_id"], content=content, media=list(dict.fromkeys(buf["media"])), metadata=buf["metadata"], ) finally: self._media_group_tasks.pop(key, None) def _start_typing(self, chat_id: str) -> None: """Start sending 'typing...' indicator for a chat.""" # Cancel any existing typing task for this chat self._stop_typing(chat_id) self._typing_tasks[chat_id] = asyncio.create_task(self._typing_loop(chat_id)) def _stop_typing(self, chat_id: str) -> None: """Stop the typing indicator for a chat.""" task = self._typing_tasks.pop(chat_id, None) if task and not task.done(): task.cancel() async def _typing_loop(self, chat_id: str) -> None: """Repeatedly send 'typing' action until cancelled.""" try: while self._app: await self._app.bot.send_chat_action(chat_id=int(chat_id), action="typing") await asyncio.sleep(4) except asyncio.CancelledError: pass except Exception as e: logger.debug("Typing indicator stopped for {}: {}", chat_id, e) async def _on_error(self, update: object, context: ContextTypes.DEFAULT_TYPE) -> None: """Log polling / handler errors instead of silently swallowing them.""" logger.error("Telegram error: {}", context.error) def _get_extension(self, media_type: str, mime_type: str | None) -> str: """Get file extension based on media type.""" if mime_type: ext_map = { "image/jpeg": ".jpg", "image/png": ".png", "image/gif": ".gif", "audio/ogg": ".ogg", "audio/mpeg": ".mp3", "audio/mp4": ".m4a", } if mime_type in ext_map: return ext_map[mime_type] type_map = {"image": ".jpg", "voice": ".ogg", "audio": ".mp3", "file": ""} return type_map.get(media_type, "")
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/channels/telegram.py", "license": "MIT License", "lines": 422, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/channels/whatsapp.py
"""WhatsApp channel implementation using Node.js bridge.""" import asyncio import json from collections import OrderedDict from loguru import logger from nanobot.bus.events import OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.channels.base import BaseChannel from nanobot.config.schema import WhatsAppConfig class WhatsAppChannel(BaseChannel): """ WhatsApp channel that connects to a Node.js bridge. The bridge uses @whiskeysockets/baileys to handle the WhatsApp Web protocol. Communication between Python and Node.js is via WebSocket. """ name = "whatsapp" def __init__(self, config: WhatsAppConfig, bus: MessageBus): super().__init__(config, bus) self.config: WhatsAppConfig = config self._ws = None self._connected = False self._processed_message_ids: OrderedDict[str, None] = OrderedDict() async def start(self) -> None: """Start the WhatsApp channel by connecting to the bridge.""" import websockets bridge_url = self.config.bridge_url logger.info("Connecting to WhatsApp bridge at {}...", bridge_url) self._running = True while self._running: try: async with websockets.connect(bridge_url) as ws: self._ws = ws # Send auth token if configured if self.config.bridge_token: await ws.send(json.dumps({"type": "auth", "token": self.config.bridge_token})) self._connected = True logger.info("Connected to WhatsApp bridge") # Listen for messages async for message in ws: try: await self._handle_bridge_message(message) except Exception as e: logger.error("Error handling bridge message: {}", e) except asyncio.CancelledError: break except Exception as e: self._connected = False self._ws = None logger.warning("WhatsApp bridge connection error: {}", e) if self._running: logger.info("Reconnecting in 5 seconds...") await asyncio.sleep(5) async def stop(self) -> None: """Stop the WhatsApp channel.""" self._running = False self._connected = False if self._ws: await self._ws.close() self._ws = None async def send(self, msg: OutboundMessage) -> None: """Send a message through WhatsApp.""" if not self._ws or not self._connected: logger.warning("WhatsApp bridge not connected") return try: payload = { "type": "send", "to": msg.chat_id, "text": msg.content } await self._ws.send(json.dumps(payload, ensure_ascii=False)) except Exception as e: logger.error("Error sending WhatsApp message: {}", e) async def _handle_bridge_message(self, raw: str) -> None: """Handle a message from the bridge.""" try: data = json.loads(raw) except json.JSONDecodeError: logger.warning("Invalid JSON from bridge: {}", raw[:100]) return msg_type = data.get("type") if msg_type == "message": # Incoming message from WhatsApp # Deprecated by whatsapp: old phone number style typically: <phone>@s.whatspp.net pn = data.get("pn", "") # New LID sytle typically: sender = data.get("sender", "") content = data.get("content", "") message_id = data.get("id", "") if message_id: if message_id in self._processed_message_ids: return self._processed_message_ids[message_id] = None while len(self._processed_message_ids) > 1000: self._processed_message_ids.popitem(last=False) # Extract just the phone number or lid as chat_id user_id = pn if pn else sender sender_id = user_id.split("@")[0] if "@" in user_id else user_id logger.info("Sender {}", sender) # Handle voice transcription if it's a voice message if content == "[Voice Message]": logger.info("Voice message received from {}, but direct download from bridge is not yet supported.", sender_id) content = "[Voice Message: Transcription not available for WhatsApp yet]" await self._handle_message( sender_id=sender_id, chat_id=sender, # Use full LID for replies content=content, metadata={ "message_id": message_id, "timestamp": data.get("timestamp"), "is_group": data.get("isGroup", False) } ) elif msg_type == "status": # Connection status update status = data.get("status") logger.info("WhatsApp status: {}", status) if status == "connected": self._connected = True elif status == "disconnected": self._connected = False elif msg_type == "qr": # QR code for authentication logger.info("Scan QR code in the bridge terminal to connect WhatsApp") elif msg_type == "error": logger.error("WhatsApp bridge error: {}", data.get('error'))
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/channels/whatsapp.py", "license": "MIT License", "lines": 126, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/cli/commands.py
"""CLI commands for nanobot.""" import asyncio import os import select import signal import sys from pathlib import Path import typer from prompt_toolkit import PromptSession from prompt_toolkit.formatted_text import HTML from prompt_toolkit.history import FileHistory from prompt_toolkit.patch_stdout import patch_stdout from rich.console import Console from rich.markdown import Markdown from rich.table import Table from rich.text import Text from nanobot import __logo__, __version__ from nanobot.config.schema import Config from nanobot.utils.helpers import sync_workspace_templates app = typer.Typer( name="nanobot", help=f"{__logo__} nanobot - Personal AI Assistant", no_args_is_help=True, ) console = Console() EXIT_COMMANDS = {"exit", "quit", "/exit", "/quit", ":q"} # --------------------------------------------------------------------------- # CLI input: prompt_toolkit for editing, paste, history, and display # --------------------------------------------------------------------------- _PROMPT_SESSION: PromptSession | None = None _SAVED_TERM_ATTRS = None # original termios settings, restored on exit def _flush_pending_tty_input() -> None: """Drop unread keypresses typed while the model was generating output.""" try: fd = sys.stdin.fileno() if not os.isatty(fd): return except Exception: return try: import termios termios.tcflush(fd, termios.TCIFLUSH) return except Exception: pass try: while True: ready, _, _ = select.select([fd], [], [], 0) if not ready: break if not os.read(fd, 4096): break except Exception: return def _restore_terminal() -> None: """Restore terminal to its original state (echo, line buffering, etc.).""" if _SAVED_TERM_ATTRS is None: return try: import termios termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, _SAVED_TERM_ATTRS) except Exception: pass def _init_prompt_session() -> None: """Create the prompt_toolkit session with persistent file history.""" global _PROMPT_SESSION, _SAVED_TERM_ATTRS # Save terminal state so we can restore it on exit try: import termios _SAVED_TERM_ATTRS = termios.tcgetattr(sys.stdin.fileno()) except Exception: pass history_file = Path.home() / ".nanobot" / "history" / "cli_history" history_file.parent.mkdir(parents=True, exist_ok=True) _PROMPT_SESSION = PromptSession( history=FileHistory(str(history_file)), enable_open_in_editor=False, multiline=False, # Enter submits (single line mode) ) def _print_agent_response(response: str, render_markdown: bool) -> None: """Render assistant response with consistent terminal styling.""" content = response or "" body = Markdown(content) if render_markdown else Text(content) console.print() console.print(f"[cyan]{__logo__} nanobot[/cyan]") console.print(body) console.print() def _is_exit_command(command: str) -> bool: """Return True when input should end interactive chat.""" return command.lower() in EXIT_COMMANDS async def _read_interactive_input_async() -> str: """Read user input using prompt_toolkit (handles paste, history, display). prompt_toolkit natively handles: - Multiline paste (bracketed paste mode) - History navigation (up/down arrows) - Clean display (no ghost characters or artifacts) """ if _PROMPT_SESSION is None: raise RuntimeError("Call _init_prompt_session() first") try: with patch_stdout(): return await _PROMPT_SESSION.prompt_async( HTML("<b fg='ansiblue'>You:</b> "), ) except EOFError as exc: raise KeyboardInterrupt from exc def version_callback(value: bool): if value: console.print(f"{__logo__} nanobot v{__version__}") raise typer.Exit() @app.callback() def main( version: bool = typer.Option( None, "--version", "-v", callback=version_callback, is_eager=True ), ): """nanobot - Personal AI Assistant.""" pass # ============================================================================ # Onboard / Setup # ============================================================================ @app.command() def onboard(): """Initialize nanobot configuration and workspace.""" from nanobot.config.loader import get_config_path, load_config, save_config from nanobot.config.schema import Config from nanobot.utils.helpers import get_workspace_path config_path = get_config_path() if config_path.exists(): console.print(f"[yellow]Config already exists at {config_path}[/yellow]") console.print(" [bold]y[/bold] = overwrite with defaults (existing values will be lost)") console.print(" [bold]N[/bold] = refresh config, keeping existing values and adding new fields") if typer.confirm("Overwrite?"): config = Config() save_config(config) console.print(f"[green]✓[/green] Config reset to defaults at {config_path}") else: config = load_config() save_config(config) console.print(f"[green]✓[/green] Config refreshed at {config_path} (existing values preserved)") else: save_config(Config()) console.print(f"[green]✓[/green] Created config at {config_path}") # Create workspace workspace = get_workspace_path() if not workspace.exists(): workspace.mkdir(parents=True, exist_ok=True) console.print(f"[green]✓[/green] Created workspace at {workspace}") sync_workspace_templates(workspace) console.print(f"\n{__logo__} nanobot is ready!") console.print("\nNext steps:") console.print(" 1. Add your API key to [cyan]~/.nanobot/config.json[/cyan]") console.print(" Get one at: https://openrouter.ai/keys") console.print(" 2. Chat: [cyan]nanobot agent -m \"Hello!\"[/cyan]") console.print("\n[dim]Want Telegram/WhatsApp? See: https://github.com/HKUDS/nanobot#-chat-apps[/dim]") def _make_provider(config: Config): """Create the appropriate LLM provider from config.""" from nanobot.providers.custom_provider import CustomProvider from nanobot.providers.litellm_provider import LiteLLMProvider from nanobot.providers.openai_codex_provider import OpenAICodexProvider model = config.agents.defaults.model provider_name = config.get_provider_name(model) p = config.get_provider(model) # OpenAI Codex (OAuth) if provider_name == "openai_codex" or model.startswith("openai-codex/"): return OpenAICodexProvider(default_model=model) # Custom: direct OpenAI-compatible endpoint, bypasses LiteLLM if provider_name == "custom": return CustomProvider( api_key=p.api_key if p else "no-key", api_base=config.get_api_base(model) or "http://localhost:8000/v1", default_model=model, ) from nanobot.providers.registry import find_by_name spec = find_by_name(provider_name) if not model.startswith("bedrock/") and not (p and p.api_key) and not (spec and spec.is_oauth): console.print("[red]Error: No API key configured.[/red]") console.print("Set one in ~/.nanobot/config.json under providers section") raise typer.Exit(1) return LiteLLMProvider( api_key=p.api_key if p else None, api_base=config.get_api_base(model), default_model=model, extra_headers=p.extra_headers if p else None, provider_name=provider_name, ) # ============================================================================ # Gateway / Server # ============================================================================ @app.command() def gateway( port: int = typer.Option(18790, "--port", "-p", help="Gateway port"), verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), ): """Start the nanobot gateway.""" from nanobot.agent.loop import AgentLoop from nanobot.bus.queue import MessageBus from nanobot.channels.manager import ChannelManager from nanobot.config.loader import get_data_dir, load_config from nanobot.cron.service import CronService from nanobot.cron.types import CronJob from nanobot.heartbeat.service import HeartbeatService from nanobot.session.manager import SessionManager if verbose: import logging logging.basicConfig(level=logging.DEBUG) console.print(f"{__logo__} Starting nanobot gateway on port {port}...") config = load_config() sync_workspace_templates(config.workspace_path) bus = MessageBus() provider = _make_provider(config) session_manager = SessionManager(config.workspace_path) # Create cron service first (callback set after agent creation) cron_store_path = get_data_dir() / "cron" / "jobs.json" cron = CronService(cron_store_path) # Create agent with cron service agent = AgentLoop( bus=bus, provider=provider, workspace=config.workspace_path, model=config.agents.defaults.model, temperature=config.agents.defaults.temperature, max_tokens=config.agents.defaults.max_tokens, max_iterations=config.agents.defaults.max_tool_iterations, memory_window=config.agents.defaults.memory_window, reasoning_effort=config.agents.defaults.reasoning_effort, brave_api_key=config.tools.web.search.api_key or None, web_proxy=config.tools.web.proxy or None, exec_config=config.tools.exec, cron_service=cron, restrict_to_workspace=config.tools.restrict_to_workspace, session_manager=session_manager, mcp_servers=config.tools.mcp_servers, channels_config=config.channels, ) # Set cron callback (needs agent) async def on_cron_job(job: CronJob) -> str | None: """Execute a cron job through the agent.""" from nanobot.agent.tools.cron import CronTool from nanobot.agent.tools.message import MessageTool reminder_note = ( "[Scheduled Task] Timer finished.\n\n" f"Task '{job.name}' has been triggered.\n" f"Scheduled instruction: {job.payload.message}" ) # Prevent the agent from scheduling new cron jobs during execution cron_tool = agent.tools.get("cron") cron_token = None if isinstance(cron_tool, CronTool): cron_token = cron_tool.set_cron_context(True) try: response = await agent.process_direct( reminder_note, session_key=f"cron:{job.id}", channel=job.payload.channel or "cli", chat_id=job.payload.to or "direct", ) finally: if isinstance(cron_tool, CronTool) and cron_token is not None: cron_tool.reset_cron_context(cron_token) message_tool = agent.tools.get("message") if isinstance(message_tool, MessageTool) and message_tool._sent_in_turn: return response if job.payload.deliver and job.payload.to and response: from nanobot.bus.events import OutboundMessage await bus.publish_outbound(OutboundMessage( channel=job.payload.channel or "cli", chat_id=job.payload.to, content=response )) return response cron.on_job = on_cron_job # Create channel manager channels = ChannelManager(config, bus) def _pick_heartbeat_target() -> tuple[str, str]: """Pick a routable channel/chat target for heartbeat-triggered messages.""" enabled = set(channels.enabled_channels) # Prefer the most recently updated non-internal session on an enabled channel. for item in session_manager.list_sessions(): key = item.get("key") or "" if ":" not in key: continue channel, chat_id = key.split(":", 1) if channel in {"cli", "system"}: continue if channel in enabled and chat_id: return channel, chat_id # Fallback keeps prior behavior but remains explicit. return "cli", "direct" # Create heartbeat service async def on_heartbeat_execute(tasks: str) -> str: """Phase 2: execute heartbeat tasks through the full agent loop.""" channel, chat_id = _pick_heartbeat_target() async def _silent(*_args, **_kwargs): pass return await agent.process_direct( tasks, session_key="heartbeat", channel=channel, chat_id=chat_id, on_progress=_silent, ) async def on_heartbeat_notify(response: str) -> None: """Deliver a heartbeat response to the user's channel.""" from nanobot.bus.events import OutboundMessage channel, chat_id = _pick_heartbeat_target() if channel == "cli": return # No external channel available to deliver to await bus.publish_outbound(OutboundMessage(channel=channel, chat_id=chat_id, content=response)) hb_cfg = config.gateway.heartbeat heartbeat = HeartbeatService( workspace=config.workspace_path, provider=provider, model=agent.model, on_execute=on_heartbeat_execute, on_notify=on_heartbeat_notify, interval_s=hb_cfg.interval_s, enabled=hb_cfg.enabled, ) if channels.enabled_channels: console.print(f"[green]✓[/green] Channels enabled: {', '.join(channels.enabled_channels)}") else: console.print("[yellow]Warning: No channels enabled[/yellow]") cron_status = cron.status() if cron_status["jobs"] > 0: console.print(f"[green]✓[/green] Cron: {cron_status['jobs']} scheduled jobs") console.print(f"[green]✓[/green] Heartbeat: every {hb_cfg.interval_s}s") async def run(): try: await cron.start() await heartbeat.start() await asyncio.gather( agent.run(), channels.start_all(), ) except KeyboardInterrupt: console.print("\nShutting down...") finally: await agent.close_mcp() heartbeat.stop() cron.stop() agent.stop() await channels.stop_all() asyncio.run(run()) # ============================================================================ # Agent Commands # ============================================================================ @app.command() def agent( message: str = typer.Option(None, "--message", "-m", help="Message to send to the agent"), session_id: str = typer.Option("cli:direct", "--session", "-s", help="Session ID"), markdown: bool = typer.Option(True, "--markdown/--no-markdown", help="Render assistant output as Markdown"), logs: bool = typer.Option(False, "--logs/--no-logs", help="Show nanobot runtime logs during chat"), ): """Interact with the agent directly.""" from loguru import logger from nanobot.agent.loop import AgentLoop from nanobot.bus.queue import MessageBus from nanobot.config.loader import get_data_dir, load_config from nanobot.cron.service import CronService config = load_config() sync_workspace_templates(config.workspace_path) bus = MessageBus() provider = _make_provider(config) # Create cron service for tool usage (no callback needed for CLI unless running) cron_store_path = get_data_dir() / "cron" / "jobs.json" cron = CronService(cron_store_path) if logs: logger.enable("nanobot") else: logger.disable("nanobot") agent_loop = AgentLoop( bus=bus, provider=provider, workspace=config.workspace_path, model=config.agents.defaults.model, temperature=config.agents.defaults.temperature, max_tokens=config.agents.defaults.max_tokens, max_iterations=config.agents.defaults.max_tool_iterations, memory_window=config.agents.defaults.memory_window, reasoning_effort=config.agents.defaults.reasoning_effort, brave_api_key=config.tools.web.search.api_key or None, web_proxy=config.tools.web.proxy or None, exec_config=config.tools.exec, cron_service=cron, restrict_to_workspace=config.tools.restrict_to_workspace, mcp_servers=config.tools.mcp_servers, channels_config=config.channels, ) # Show spinner when logs are off (no output to miss); skip when logs are on def _thinking_ctx(): if logs: from contextlib import nullcontext return nullcontext() # Animated spinner is safe to use with prompt_toolkit input handling return console.status("[dim]nanobot is thinking...[/dim]", spinner="dots") async def _cli_progress(content: str, *, tool_hint: bool = False) -> None: ch = agent_loop.channels_config if ch and tool_hint and not ch.send_tool_hints: return if ch and not tool_hint and not ch.send_progress: return console.print(f" [dim]↳ {content}[/dim]") if message: # Single message mode — direct call, no bus needed async def run_once(): with _thinking_ctx(): response = await agent_loop.process_direct(message, session_id, on_progress=_cli_progress) _print_agent_response(response, render_markdown=markdown) await agent_loop.close_mcp() asyncio.run(run_once()) else: # Interactive mode — route through bus like other channels from nanobot.bus.events import InboundMessage _init_prompt_session() console.print(f"{__logo__} Interactive mode (type [bold]exit[/bold] or [bold]Ctrl+C[/bold] to quit)\n") if ":" in session_id: cli_channel, cli_chat_id = session_id.split(":", 1) else: cli_channel, cli_chat_id = "cli", session_id def _exit_on_sigint(signum, frame): _restore_terminal() console.print("\nGoodbye!") os._exit(0) signal.signal(signal.SIGINT, _exit_on_sigint) async def run_interactive(): bus_task = asyncio.create_task(agent_loop.run()) turn_done = asyncio.Event() turn_done.set() turn_response: list[str] = [] async def _consume_outbound(): while True: try: msg = await asyncio.wait_for(bus.consume_outbound(), timeout=1.0) if msg.metadata.get("_progress"): is_tool_hint = msg.metadata.get("_tool_hint", False) ch = agent_loop.channels_config if ch and is_tool_hint and not ch.send_tool_hints: pass elif ch and not is_tool_hint and not ch.send_progress: pass else: console.print(f" [dim]↳ {msg.content}[/dim]") elif not turn_done.is_set(): if msg.content: turn_response.append(msg.content) turn_done.set() elif msg.content: console.print() _print_agent_response(msg.content, render_markdown=markdown) except asyncio.TimeoutError: continue except asyncio.CancelledError: break outbound_task = asyncio.create_task(_consume_outbound()) try: while True: try: _flush_pending_tty_input() user_input = await _read_interactive_input_async() command = user_input.strip() if not command: continue if _is_exit_command(command): _restore_terminal() console.print("\nGoodbye!") break turn_done.clear() turn_response.clear() await bus.publish_inbound(InboundMessage( channel=cli_channel, sender_id="user", chat_id=cli_chat_id, content=user_input, )) with _thinking_ctx(): await turn_done.wait() if turn_response: _print_agent_response(turn_response[0], render_markdown=markdown) except KeyboardInterrupt: _restore_terminal() console.print("\nGoodbye!") break except EOFError: _restore_terminal() console.print("\nGoodbye!") break finally: agent_loop.stop() outbound_task.cancel() await asyncio.gather(bus_task, outbound_task, return_exceptions=True) await agent_loop.close_mcp() asyncio.run(run_interactive()) # ============================================================================ # Channel Commands # ============================================================================ channels_app = typer.Typer(help="Manage channels") app.add_typer(channels_app, name="channels") @channels_app.command("status") def channels_status(): """Show channel status.""" from nanobot.config.loader import load_config config = load_config() table = Table(title="Channel Status") table.add_column("Channel", style="cyan") table.add_column("Enabled", style="green") table.add_column("Configuration", style="yellow") # WhatsApp wa = config.channels.whatsapp table.add_row( "WhatsApp", "✓" if wa.enabled else "✗", wa.bridge_url ) dc = config.channels.discord table.add_row( "Discord", "✓" if dc.enabled else "✗", dc.gateway_url ) # Feishu fs = config.channels.feishu fs_config = f"app_id: {fs.app_id[:10]}..." if fs.app_id else "[dim]not configured[/dim]" table.add_row( "Feishu", "✓" if fs.enabled else "✗", fs_config ) # Mochat mc = config.channels.mochat mc_base = mc.base_url or "[dim]not configured[/dim]" table.add_row( "Mochat", "✓" if mc.enabled else "✗", mc_base ) # Telegram tg = config.channels.telegram tg_config = f"token: {tg.token[:10]}..." if tg.token else "[dim]not configured[/dim]" table.add_row( "Telegram", "✓" if tg.enabled else "✗", tg_config ) # Slack slack = config.channels.slack slack_config = "socket" if slack.app_token and slack.bot_token else "[dim]not configured[/dim]" table.add_row( "Slack", "✓" if slack.enabled else "✗", slack_config ) # DingTalk dt = config.channels.dingtalk dt_config = f"client_id: {dt.client_id[:10]}..." if dt.client_id else "[dim]not configured[/dim]" table.add_row( "DingTalk", "✓" if dt.enabled else "✗", dt_config ) # QQ qq = config.channels.qq qq_config = f"app_id: {qq.app_id[:10]}..." if qq.app_id else "[dim]not configured[/dim]" table.add_row( "QQ", "✓" if qq.enabled else "✗", qq_config ) # Email em = config.channels.email em_config = em.imap_host if em.imap_host else "[dim]not configured[/dim]" table.add_row( "Email", "✓" if em.enabled else "✗", em_config ) console.print(table) def _get_bridge_dir() -> Path: """Get the bridge directory, setting it up if needed.""" import shutil import subprocess # User's bridge location user_bridge = Path.home() / ".nanobot" / "bridge" # Check if already built if (user_bridge / "dist" / "index.js").exists(): return user_bridge # Check for npm if not shutil.which("npm"): console.print("[red]npm not found. Please install Node.js >= 18.[/red]") raise typer.Exit(1) # Find source bridge: first check package data, then source dir pkg_bridge = Path(__file__).parent.parent / "bridge" # nanobot/bridge (installed) src_bridge = Path(__file__).parent.parent.parent / "bridge" # repo root/bridge (dev) source = None if (pkg_bridge / "package.json").exists(): source = pkg_bridge elif (src_bridge / "package.json").exists(): source = src_bridge if not source: console.print("[red]Bridge source not found.[/red]") console.print("Try reinstalling: pip install --force-reinstall nanobot") raise typer.Exit(1) console.print(f"{__logo__} Setting up bridge...") # Copy to user directory user_bridge.parent.mkdir(parents=True, exist_ok=True) if user_bridge.exists(): shutil.rmtree(user_bridge) shutil.copytree(source, user_bridge, ignore=shutil.ignore_patterns("node_modules", "dist")) # Install and build try: console.print(" Installing dependencies...") subprocess.run(["npm", "install"], cwd=user_bridge, check=True, capture_output=True) console.print(" Building...") subprocess.run(["npm", "run", "build"], cwd=user_bridge, check=True, capture_output=True) console.print("[green]✓[/green] Bridge ready\n") except subprocess.CalledProcessError as e: console.print(f"[red]Build failed: {e}[/red]") if e.stderr: console.print(f"[dim]{e.stderr.decode()[:500]}[/dim]") raise typer.Exit(1) return user_bridge @channels_app.command("login") def channels_login(): """Link device via QR code.""" import subprocess from nanobot.config.loader import load_config config = load_config() bridge_dir = _get_bridge_dir() console.print(f"{__logo__} Starting bridge...") console.print("Scan the QR code to connect.\n") env = {**os.environ} if config.channels.whatsapp.bridge_token: env["BRIDGE_TOKEN"] = config.channels.whatsapp.bridge_token try: subprocess.run(["npm", "start"], cwd=bridge_dir, check=True, env=env) except subprocess.CalledProcessError as e: console.print(f"[red]Bridge failed: {e}[/red]") except FileNotFoundError: console.print("[red]npm not found. Please install Node.js.[/red]") # ============================================================================ # Status Commands # ============================================================================ @app.command() def status(): """Show nanobot status.""" from nanobot.config.loader import get_config_path, load_config config_path = get_config_path() config = load_config() workspace = config.workspace_path console.print(f"{__logo__} nanobot Status\n") console.print(f"Config: {config_path} {'[green]✓[/green]' if config_path.exists() else '[red]✗[/red]'}") console.print(f"Workspace: {workspace} {'[green]✓[/green]' if workspace.exists() else '[red]✗[/red]'}") if config_path.exists(): from nanobot.providers.registry import PROVIDERS console.print(f"Model: {config.agents.defaults.model}") # Check API keys from registry for spec in PROVIDERS: p = getattr(config.providers, spec.name, None) if p is None: continue if spec.is_oauth: console.print(f"{spec.label}: [green]✓ (OAuth)[/green]") elif spec.is_local: # Local deployments show api_base instead of api_key if p.api_base: console.print(f"{spec.label}: [green]✓ {p.api_base}[/green]") else: console.print(f"{spec.label}: [dim]not set[/dim]") else: has_key = bool(p.api_key) console.print(f"{spec.label}: {'[green]✓[/green]' if has_key else '[dim]not set[/dim]'}") # ============================================================================ # OAuth Login # ============================================================================ provider_app = typer.Typer(help="Manage providers") app.add_typer(provider_app, name="provider") _LOGIN_HANDLERS: dict[str, callable] = {} def _register_login(name: str): def decorator(fn): _LOGIN_HANDLERS[name] = fn return fn return decorator @provider_app.command("login") def provider_login( provider: str = typer.Argument(..., help="OAuth provider (e.g. 'openai-codex', 'github-copilot')"), ): """Authenticate with an OAuth provider.""" from nanobot.providers.registry import PROVIDERS key = provider.replace("-", "_") spec = next((s for s in PROVIDERS if s.name == key and s.is_oauth), None) if not spec: names = ", ".join(s.name.replace("_", "-") for s in PROVIDERS if s.is_oauth) console.print(f"[red]Unknown OAuth provider: {provider}[/red] Supported: {names}") raise typer.Exit(1) handler = _LOGIN_HANDLERS.get(spec.name) if not handler: console.print(f"[red]Login not implemented for {spec.label}[/red]") raise typer.Exit(1) console.print(f"{__logo__} OAuth Login - {spec.label}\n") handler() @_register_login("openai_codex") def _login_openai_codex() -> None: try: from oauth_cli_kit import get_token, login_oauth_interactive token = None try: token = get_token() except Exception: pass if not (token and token.access): console.print("[cyan]Starting interactive OAuth login...[/cyan]\n") token = login_oauth_interactive( print_fn=lambda s: console.print(s), prompt_fn=lambda s: typer.prompt(s), ) if not (token and token.access): console.print("[red]✗ Authentication failed[/red]") raise typer.Exit(1) console.print(f"[green]✓ Authenticated with OpenAI Codex[/green] [dim]{token.account_id}[/dim]") except ImportError: console.print("[red]oauth_cli_kit not installed. Run: pip install oauth-cli-kit[/red]") raise typer.Exit(1) @_register_login("github_copilot") def _login_github_copilot() -> None: import asyncio console.print("[cyan]Starting GitHub Copilot device flow...[/cyan]\n") async def _trigger(): from litellm import acompletion await acompletion(model="github_copilot/gpt-4o", messages=[{"role": "user", "content": "hi"}], max_tokens=1) try: asyncio.run(_trigger()) console.print("[green]✓ Authenticated with GitHub Copilot[/green]") except Exception as e: console.print(f"[red]Authentication error: {e}[/red]") raise typer.Exit(1) if __name__ == "__main__": app()
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/cli/commands.py", "license": "MIT License", "lines": 737, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/config/loader.py
"""Configuration loading utilities.""" import json from pathlib import Path from nanobot.config.schema import Config def get_config_path() -> Path: """Get the default configuration file path.""" return Path.home() / ".nanobot" / "config.json" def get_data_dir() -> Path: """Get the nanobot data directory.""" from nanobot.utils.helpers import get_data_path return get_data_path() def load_config(config_path: Path | None = None) -> Config: """ Load configuration from file or create default. Args: config_path: Optional path to config file. Uses default if not provided. Returns: Loaded configuration object. """ path = config_path or get_config_path() if path.exists(): try: with open(path, encoding="utf-8") as f: data = json.load(f) data = _migrate_config(data) return Config.model_validate(data) except (json.JSONDecodeError, ValueError) as e: print(f"Warning: Failed to load config from {path}: {e}") print("Using default configuration.") return Config() def save_config(config: Config, config_path: Path | None = None) -> None: """ Save configuration to file. Args: config: Configuration to save. config_path: Optional path to save to. Uses default if not provided. """ path = config_path or get_config_path() path.parent.mkdir(parents=True, exist_ok=True) data = config.model_dump(by_alias=True) with open(path, "w", encoding="utf-8") as f: json.dump(data, f, indent=2, ensure_ascii=False) def _migrate_config(data: dict) -> dict: """Migrate old config formats to current.""" # Move tools.exec.restrictToWorkspace → tools.restrictToWorkspace tools = data.get("tools", {}) exec_cfg = tools.get("exec", {}) if "restrictToWorkspace" in exec_cfg and "restrictToWorkspace" not in tools: tools["restrictToWorkspace"] = exec_cfg.pop("restrictToWorkspace") return data
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/config/loader.py", "license": "MIT License", "lines": 50, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
HKUDS/nanobot:nanobot/config/schema.py
"""Configuration schema using Pydantic.""" from pathlib import Path from typing import Literal from pydantic import BaseModel, ConfigDict, Field from pydantic.alias_generators import to_camel from pydantic_settings import BaseSettings class Base(BaseModel): """Base model that accepts both camelCase and snake_case keys.""" model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True) class WhatsAppConfig(Base): """WhatsApp channel configuration.""" enabled: bool = False bridge_url: str = "ws://localhost:3001" bridge_token: str = "" # Shared token for bridge auth (optional, recommended) allow_from: list[str] = Field(default_factory=list) # Allowed phone numbers class TelegramConfig(Base): """Telegram channel configuration.""" enabled: bool = False token: str = "" # Bot token from @BotFather allow_from: list[str] = Field(default_factory=list) # Allowed user IDs or usernames proxy: str | None = None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080" reply_to_message: bool = False # If true, bot replies quote the original message class FeishuConfig(Base): """Feishu/Lark channel configuration using WebSocket long connection.""" enabled: bool = False app_id: str = "" # App ID from Feishu Open Platform app_secret: str = "" # App Secret from Feishu Open Platform encrypt_key: str = "" # Encrypt Key for event subscription (optional) verification_token: str = "" # Verification Token for event subscription (optional) allow_from: list[str] = Field(default_factory=list) # Allowed user open_ids react_emoji: str = "THUMBSUP" # Emoji type for message reactions (e.g. THUMBSUP, OK, DONE, SMILE) class DingTalkConfig(Base): """DingTalk channel configuration using Stream mode.""" enabled: bool = False client_id: str = "" # AppKey client_secret: str = "" # AppSecret allow_from: list[str] = Field(default_factory=list) # Allowed staff_ids class DiscordConfig(Base): """Discord channel configuration.""" enabled: bool = False token: str = "" # Bot token from Discord Developer Portal allow_from: list[str] = Field(default_factory=list) # Allowed user IDs gateway_url: str = "wss://gateway.discord.gg/?v=10&encoding=json" intents: int = 37377 # GUILDS + GUILD_MESSAGES + DIRECT_MESSAGES + MESSAGE_CONTENT class MatrixConfig(Base): """Matrix (Element) channel configuration.""" enabled: bool = False homeserver: str = "https://matrix.org" access_token: str = "" user_id: str = "" # @bot:matrix.org device_id: str = "" e2ee_enabled: bool = True # Enable Matrix E2EE support (encryption + encrypted room handling). sync_stop_grace_seconds: int = 2 # Max seconds to wait for sync_forever to stop gracefully before cancellation fallback. max_media_bytes: int = 20 * 1024 * 1024 # Max attachment size accepted for Matrix media handling (inbound + outbound). allow_from: list[str] = Field(default_factory=list) group_policy: Literal["open", "mention", "allowlist"] = "open" group_allow_from: list[str] = Field(default_factory=list) allow_room_mentions: bool = False class EmailConfig(Base): """Email channel configuration (IMAP inbound + SMTP outbound).""" enabled: bool = False consent_granted: bool = False # Explicit owner permission to access mailbox data # IMAP (receive) imap_host: str = "" imap_port: int = 993 imap_username: str = "" imap_password: str = "" imap_mailbox: str = "INBOX" imap_use_ssl: bool = True # SMTP (send) smtp_host: str = "" smtp_port: int = 587 smtp_username: str = "" smtp_password: str = "" smtp_use_tls: bool = True smtp_use_ssl: bool = False from_address: str = "" # Behavior auto_reply_enabled: bool = True # If false, inbound email is read but no automatic reply is sent poll_interval_seconds: int = 30 mark_seen: bool = True max_body_chars: int = 12000 subject_prefix: str = "Re: " allow_from: list[str] = Field(default_factory=list) # Allowed sender email addresses class MochatMentionConfig(Base): """Mochat mention behavior configuration.""" require_in_groups: bool = False class MochatGroupRule(Base): """Mochat per-group mention requirement.""" require_mention: bool = False class MochatConfig(Base): """Mochat channel configuration.""" enabled: bool = False base_url: str = "https://mochat.io" socket_url: str = "" socket_path: str = "/socket.io" socket_disable_msgpack: bool = False socket_reconnect_delay_ms: int = 1000 socket_max_reconnect_delay_ms: int = 10000 socket_connect_timeout_ms: int = 10000 refresh_interval_ms: int = 30000 watch_timeout_ms: int = 25000 watch_limit: int = 100 retry_delay_ms: int = 500 max_retry_attempts: int = 0 # 0 means unlimited retries claw_token: str = "" agent_user_id: str = "" sessions: list[str] = Field(default_factory=list) panels: list[str] = Field(default_factory=list) allow_from: list[str] = Field(default_factory=list) mention: MochatMentionConfig = Field(default_factory=MochatMentionConfig) groups: dict[str, MochatGroupRule] = Field(default_factory=dict) reply_delay_mode: str = "non-mention" # off | non-mention reply_delay_ms: int = 120000 class SlackDMConfig(Base): """Slack DM policy configuration.""" enabled: bool = True policy: str = "open" # "open" or "allowlist" allow_from: list[str] = Field(default_factory=list) # Allowed Slack user IDs class SlackConfig(Base): """Slack channel configuration.""" enabled: bool = False mode: str = "socket" # "socket" supported webhook_path: str = "/slack/events" bot_token: str = "" # xoxb-... app_token: str = "" # xapp-... user_token_read_only: bool = True reply_in_thread: bool = True react_emoji: str = "eyes" allow_from: list[str] = Field(default_factory=list) # Allowed Slack user IDs (sender-level) group_policy: str = "mention" # "mention", "open", "allowlist" group_allow_from: list[str] = Field(default_factory=list) # Allowed channel IDs if allowlist dm: SlackDMConfig = Field(default_factory=SlackDMConfig) class QQConfig(Base): """QQ channel configuration using botpy SDK.""" enabled: bool = False app_id: str = "" # 机器人 ID (AppID) from q.qq.com secret: str = "" # 机器人密钥 (AppSecret) from q.qq.com allow_from: list[str] = Field(default_factory=list) # Allowed user openids (empty = public access) class MatrixConfig(Base): """Matrix (Element) channel configuration.""" enabled: bool = False homeserver: str = "https://matrix.org" access_token: str = "" user_id: str = "" # e.g. @bot:matrix.org device_id: str = "" e2ee_enabled: bool = True # end-to-end encryption support sync_stop_grace_seconds: int = 2 # graceful sync_forever shutdown timeout max_media_bytes: int = 20 * 1024 * 1024 # inbound + outbound attachment limit allow_from: list[str] = Field(default_factory=list) group_policy: Literal["open", "mention", "allowlist"] = "open" group_allow_from: list[str] = Field(default_factory=list) allow_room_mentions: bool = False class ChannelsConfig(Base): """Configuration for chat channels.""" send_progress: bool = True # stream agent's text progress to the channel send_tool_hints: bool = False # stream tool-call hints (e.g. read_file("…")) whatsapp: WhatsAppConfig = Field(default_factory=WhatsAppConfig) telegram: TelegramConfig = Field(default_factory=TelegramConfig) discord: DiscordConfig = Field(default_factory=DiscordConfig) feishu: FeishuConfig = Field(default_factory=FeishuConfig) mochat: MochatConfig = Field(default_factory=MochatConfig) dingtalk: DingTalkConfig = Field(default_factory=DingTalkConfig) email: EmailConfig = Field(default_factory=EmailConfig) slack: SlackConfig = Field(default_factory=SlackConfig) qq: QQConfig = Field(default_factory=QQConfig) matrix: MatrixConfig = Field(default_factory=MatrixConfig) class AgentDefaults(Base): """Default agent configuration.""" workspace: str = "~/.nanobot/workspace" model: str = "anthropic/claude-opus-4-5" provider: str = "auto" # Provider name (e.g. "anthropic", "openrouter") or "auto" for auto-detection max_tokens: int = 8192 temperature: float = 0.1 max_tool_iterations: int = 40 memory_window: int = 100 reasoning_effort: str | None = None # low / medium / high — enables LLM thinking mode class AgentsConfig(Base): """Agent configuration.""" defaults: AgentDefaults = Field(default_factory=AgentDefaults) class ProviderConfig(Base): """LLM provider configuration.""" api_key: str = "" api_base: str | None = None extra_headers: dict[str, str] | None = None # Custom headers (e.g. APP-Code for AiHubMix) class ProvidersConfig(Base): """Configuration for LLM providers.""" custom: ProviderConfig = Field(default_factory=ProviderConfig) # Any OpenAI-compatible endpoint anthropic: ProviderConfig = Field(default_factory=ProviderConfig) openai: ProviderConfig = Field(default_factory=ProviderConfig) openrouter: ProviderConfig = Field(default_factory=ProviderConfig) deepseek: ProviderConfig = Field(default_factory=ProviderConfig) groq: ProviderConfig = Field(default_factory=ProviderConfig) zhipu: ProviderConfig = Field(default_factory=ProviderConfig) dashscope: ProviderConfig = Field(default_factory=ProviderConfig) # 阿里云通义千问 vllm: ProviderConfig = Field(default_factory=ProviderConfig) gemini: ProviderConfig = Field(default_factory=ProviderConfig) moonshot: ProviderConfig = Field(default_factory=ProviderConfig) minimax: ProviderConfig = Field(default_factory=ProviderConfig) aihubmix: ProviderConfig = Field(default_factory=ProviderConfig) # AiHubMix API gateway siliconflow: ProviderConfig = Field(default_factory=ProviderConfig) # SiliconFlow (硅基流动) API gateway volcengine: ProviderConfig = Field(default_factory=ProviderConfig) # VolcEngine (火山引擎) API gateway openai_codex: ProviderConfig = Field(default_factory=ProviderConfig) # OpenAI Codex (OAuth) github_copilot: ProviderConfig = Field(default_factory=ProviderConfig) # Github Copilot (OAuth) class HeartbeatConfig(Base): """Heartbeat service configuration.""" enabled: bool = True interval_s: int = 30 * 60 # 30 minutes class GatewayConfig(Base): """Gateway/server configuration.""" host: str = "0.0.0.0" port: int = 18790 heartbeat: HeartbeatConfig = Field(default_factory=HeartbeatConfig) class WebSearchConfig(Base): """Web search tool configuration.""" api_key: str = "" # Brave Search API key max_results: int = 5 class WebToolsConfig(Base): """Web tools configuration.""" proxy: str | None = None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080" search: WebSearchConfig = Field(default_factory=WebSearchConfig) class ExecToolConfig(Base): """Shell exec tool configuration.""" timeout: int = 60 path_append: str = "" class MCPServerConfig(Base): """MCP server connection configuration (stdio or HTTP).""" command: str = "" # Stdio: command to run (e.g. "npx") args: list[str] = Field(default_factory=list) # Stdio: command arguments env: dict[str, str] = Field(default_factory=dict) # Stdio: extra env vars url: str = "" # HTTP: streamable HTTP endpoint URL headers: dict[str, str] = Field(default_factory=dict) # HTTP: Custom HTTP Headers tool_timeout: int = 30 # Seconds before a tool call is cancelled class ToolsConfig(Base): """Tools configuration.""" web: WebToolsConfig = Field(default_factory=WebToolsConfig) exec: ExecToolConfig = Field(default_factory=ExecToolConfig) restrict_to_workspace: bool = False # If true, restrict all tool access to workspace directory mcp_servers: dict[str, MCPServerConfig] = Field(default_factory=dict) class Config(BaseSettings): """Root configuration for nanobot.""" agents: AgentsConfig = Field(default_factory=AgentsConfig) channels: ChannelsConfig = Field(default_factory=ChannelsConfig) providers: ProvidersConfig = Field(default_factory=ProvidersConfig) gateway: GatewayConfig = Field(default_factory=GatewayConfig) tools: ToolsConfig = Field(default_factory=ToolsConfig) @property def workspace_path(self) -> Path: """Get expanded workspace path.""" return Path(self.agents.defaults.workspace).expanduser() def _match_provider(self, model: str | None = None) -> tuple["ProviderConfig | None", str | None]: """Match provider config and its registry name. Returns (config, spec_name).""" from nanobot.providers.registry import PROVIDERS forced = self.agents.defaults.provider if forced != "auto": p = getattr(self.providers, forced, None) return (p, forced) if p else (None, None) model_lower = (model or self.agents.defaults.model).lower() model_normalized = model_lower.replace("-", "_") model_prefix = model_lower.split("/", 1)[0] if "/" in model_lower else "" normalized_prefix = model_prefix.replace("-", "_") def _kw_matches(kw: str) -> bool: kw = kw.lower() return kw in model_lower or kw.replace("-", "_") in model_normalized # Explicit provider prefix wins — prevents `github-copilot/...codex` matching openai_codex. for spec in PROVIDERS: p = getattr(self.providers, spec.name, None) if p and model_prefix and normalized_prefix == spec.name: if spec.is_oauth or p.api_key: return p, spec.name # Match by keyword (order follows PROVIDERS registry) for spec in PROVIDERS: p = getattr(self.providers, spec.name, None) if p and any(_kw_matches(kw) for kw in spec.keywords): if spec.is_oauth or p.api_key: return p, spec.name # Fallback: gateways first, then others (follows registry order) # OAuth providers are NOT valid fallbacks — they require explicit model selection for spec in PROVIDERS: if spec.is_oauth: continue p = getattr(self.providers, spec.name, None) if p and p.api_key: return p, spec.name return None, None def get_provider(self, model: str | None = None) -> ProviderConfig | None: """Get matched provider config (api_key, api_base, extra_headers). Falls back to first available.""" p, _ = self._match_provider(model) return p def get_provider_name(self, model: str | None = None) -> str | None: """Get the registry name of the matched provider (e.g. "deepseek", "openrouter").""" _, name = self._match_provider(model) return name def get_api_key(self, model: str | None = None) -> str | None: """Get API key for the given model. Falls back to first available key.""" p = self.get_provider(model) return p.api_key if p else None def get_api_base(self, model: str | None = None) -> str | None: """Get API base URL for the given model. Applies default URLs for known gateways.""" from nanobot.providers.registry import find_by_name p, name = self._match_provider(model) if p and p.api_base: return p.api_base # Only gateways get a default api_base here. Standard providers # (like Moonshot) set their base URL via env vars in _setup_env # to avoid polluting the global litellm.api_base. if name: spec = find_by_name(name) if spec and spec.is_gateway and spec.default_api_base: return spec.default_api_base return None model_config = ConfigDict(env_prefix="NANOBOT_", env_nested_delimiter="__")
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/config/schema.py", "license": "MIT License", "lines": 312, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/cron/service.py
"""Cron service for scheduling agent tasks.""" import asyncio import json import time import uuid from datetime import datetime from pathlib import Path from typing import Any, Callable, Coroutine from loguru import logger from nanobot.cron.types import CronJob, CronJobState, CronPayload, CronSchedule, CronStore def _now_ms() -> int: return int(time.time() * 1000) def _compute_next_run(schedule: CronSchedule, now_ms: int) -> int | None: """Compute next run time in ms.""" if schedule.kind == "at": return schedule.at_ms if schedule.at_ms and schedule.at_ms > now_ms else None if schedule.kind == "every": if not schedule.every_ms or schedule.every_ms <= 0: return None # Next interval from now return now_ms + schedule.every_ms if schedule.kind == "cron" and schedule.expr: try: from zoneinfo import ZoneInfo from croniter import croniter # Use caller-provided reference time for deterministic scheduling base_time = now_ms / 1000 tz = ZoneInfo(schedule.tz) if schedule.tz else datetime.now().astimezone().tzinfo base_dt = datetime.fromtimestamp(base_time, tz=tz) cron = croniter(schedule.expr, base_dt) next_dt = cron.get_next(datetime) return int(next_dt.timestamp() * 1000) except Exception: return None return None def _validate_schedule_for_add(schedule: CronSchedule) -> None: """Validate schedule fields that would otherwise create non-runnable jobs.""" if schedule.tz and schedule.kind != "cron": raise ValueError("tz can only be used with cron schedules") if schedule.kind == "cron" and schedule.tz: try: from zoneinfo import ZoneInfo ZoneInfo(schedule.tz) except Exception: raise ValueError(f"unknown timezone '{schedule.tz}'") from None class CronService: """Service for managing and executing scheduled jobs.""" def __init__( self, store_path: Path, on_job: Callable[[CronJob], Coroutine[Any, Any, str | None]] | None = None ): self.store_path = store_path self.on_job = on_job self._store: CronStore | None = None self._last_mtime: float = 0.0 self._timer_task: asyncio.Task | None = None self._running = False def _load_store(self) -> CronStore: """Load jobs from disk. Reloads automatically if file was modified externally.""" if self._store and self.store_path.exists(): mtime = self.store_path.stat().st_mtime if mtime != self._last_mtime: logger.info("Cron: jobs.json modified externally, reloading") self._store = None if self._store: return self._store if self.store_path.exists(): try: data = json.loads(self.store_path.read_text(encoding="utf-8")) jobs = [] for j in data.get("jobs", []): jobs.append(CronJob( id=j["id"], name=j["name"], enabled=j.get("enabled", True), schedule=CronSchedule( kind=j["schedule"]["kind"], at_ms=j["schedule"].get("atMs"), every_ms=j["schedule"].get("everyMs"), expr=j["schedule"].get("expr"), tz=j["schedule"].get("tz"), ), payload=CronPayload( kind=j["payload"].get("kind", "agent_turn"), message=j["payload"].get("message", ""), deliver=j["payload"].get("deliver", False), channel=j["payload"].get("channel"), to=j["payload"].get("to"), ), state=CronJobState( next_run_at_ms=j.get("state", {}).get("nextRunAtMs"), last_run_at_ms=j.get("state", {}).get("lastRunAtMs"), last_status=j.get("state", {}).get("lastStatus"), last_error=j.get("state", {}).get("lastError"), ), created_at_ms=j.get("createdAtMs", 0), updated_at_ms=j.get("updatedAtMs", 0), delete_after_run=j.get("deleteAfterRun", False), )) self._store = CronStore(jobs=jobs) except Exception as e: logger.warning("Failed to load cron store: {}", e) self._store = CronStore() else: self._store = CronStore() return self._store def _save_store(self) -> None: """Save jobs to disk.""" if not self._store: return self.store_path.parent.mkdir(parents=True, exist_ok=True) data = { "version": self._store.version, "jobs": [ { "id": j.id, "name": j.name, "enabled": j.enabled, "schedule": { "kind": j.schedule.kind, "atMs": j.schedule.at_ms, "everyMs": j.schedule.every_ms, "expr": j.schedule.expr, "tz": j.schedule.tz, }, "payload": { "kind": j.payload.kind, "message": j.payload.message, "deliver": j.payload.deliver, "channel": j.payload.channel, "to": j.payload.to, }, "state": { "nextRunAtMs": j.state.next_run_at_ms, "lastRunAtMs": j.state.last_run_at_ms, "lastStatus": j.state.last_status, "lastError": j.state.last_error, }, "createdAtMs": j.created_at_ms, "updatedAtMs": j.updated_at_ms, "deleteAfterRun": j.delete_after_run, } for j in self._store.jobs ] } self.store_path.write_text(json.dumps(data, indent=2, ensure_ascii=False), encoding="utf-8") self._last_mtime = self.store_path.stat().st_mtime async def start(self) -> None: """Start the cron service.""" self._running = True self._load_store() self._recompute_next_runs() self._save_store() self._arm_timer() logger.info("Cron service started with {} jobs", len(self._store.jobs if self._store else [])) def stop(self) -> None: """Stop the cron service.""" self._running = False if self._timer_task: self._timer_task.cancel() self._timer_task = None def _recompute_next_runs(self) -> None: """Recompute next run times for all enabled jobs.""" if not self._store: return now = _now_ms() for job in self._store.jobs: if job.enabled: job.state.next_run_at_ms = _compute_next_run(job.schedule, now) def _get_next_wake_ms(self) -> int | None: """Get the earliest next run time across all jobs.""" if not self._store: return None times = [j.state.next_run_at_ms for j in self._store.jobs if j.enabled and j.state.next_run_at_ms] return min(times) if times else None def _arm_timer(self) -> None: """Schedule the next timer tick.""" if self._timer_task: self._timer_task.cancel() next_wake = self._get_next_wake_ms() if not next_wake or not self._running: return delay_ms = max(0, next_wake - _now_ms()) delay_s = delay_ms / 1000 async def tick(): await asyncio.sleep(delay_s) if self._running: await self._on_timer() self._timer_task = asyncio.create_task(tick()) async def _on_timer(self) -> None: """Handle timer tick - run due jobs.""" self._load_store() if not self._store: return now = _now_ms() due_jobs = [ j for j in self._store.jobs if j.enabled and j.state.next_run_at_ms and now >= j.state.next_run_at_ms ] for job in due_jobs: await self._execute_job(job) self._save_store() self._arm_timer() async def _execute_job(self, job: CronJob) -> None: """Execute a single job.""" start_ms = _now_ms() logger.info("Cron: executing job '{}' ({})", job.name, job.id) try: response = None if self.on_job: response = await self.on_job(job) job.state.last_status = "ok" job.state.last_error = None logger.info("Cron: job '{}' completed", job.name) except Exception as e: job.state.last_status = "error" job.state.last_error = str(e) logger.error("Cron: job '{}' failed: {}", job.name, e) job.state.last_run_at_ms = start_ms job.updated_at_ms = _now_ms() # Handle one-shot jobs if job.schedule.kind == "at": if job.delete_after_run: self._store.jobs = [j for j in self._store.jobs if j.id != job.id] else: job.enabled = False job.state.next_run_at_ms = None else: # Compute next run job.state.next_run_at_ms = _compute_next_run(job.schedule, _now_ms()) # ========== Public API ========== def list_jobs(self, include_disabled: bool = False) -> list[CronJob]: """List all jobs.""" store = self._load_store() jobs = store.jobs if include_disabled else [j for j in store.jobs if j.enabled] return sorted(jobs, key=lambda j: j.state.next_run_at_ms or float('inf')) def add_job( self, name: str, schedule: CronSchedule, message: str, deliver: bool = False, channel: str | None = None, to: str | None = None, delete_after_run: bool = False, ) -> CronJob: """Add a new job.""" store = self._load_store() _validate_schedule_for_add(schedule) now = _now_ms() job = CronJob( id=str(uuid.uuid4())[:8], name=name, enabled=True, schedule=schedule, payload=CronPayload( kind="agent_turn", message=message, deliver=deliver, channel=channel, to=to, ), state=CronJobState(next_run_at_ms=_compute_next_run(schedule, now)), created_at_ms=now, updated_at_ms=now, delete_after_run=delete_after_run, ) store.jobs.append(job) self._save_store() self._arm_timer() logger.info("Cron: added job '{}' ({})", name, job.id) return job def remove_job(self, job_id: str) -> bool: """Remove a job by ID.""" store = self._load_store() before = len(store.jobs) store.jobs = [j for j in store.jobs if j.id != job_id] removed = len(store.jobs) < before if removed: self._save_store() self._arm_timer() logger.info("Cron: removed job {}", job_id) return removed def enable_job(self, job_id: str, enabled: bool = True) -> CronJob | None: """Enable or disable a job.""" store = self._load_store() for job in store.jobs: if job.id == job_id: job.enabled = enabled job.updated_at_ms = _now_ms() if enabled: job.state.next_run_at_ms = _compute_next_run(job.schedule, _now_ms()) else: job.state.next_run_at_ms = None self._save_store() self._arm_timer() return job return None async def run_job(self, job_id: str, force: bool = False) -> bool: """Manually run a job.""" store = self._load_store() for job in store.jobs: if job.id == job_id: if not force and not job.enabled: return False await self._execute_job(job) self._save_store() self._arm_timer() return True return False def status(self) -> dict: """Get service status.""" store = self._load_store() return { "enabled": self._running, "jobs": len(store.jobs), "next_wake_at_ms": self._get_next_wake_ms(), }
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/cron/service.py", "license": "MIT License", "lines": 320, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/cron/types.py
"""Cron types.""" from dataclasses import dataclass, field from typing import Literal @dataclass class CronSchedule: """Schedule definition for a cron job.""" kind: Literal["at", "every", "cron"] # For "at": timestamp in ms at_ms: int | None = None # For "every": interval in ms every_ms: int | None = None # For "cron": cron expression (e.g. "0 9 * * *") expr: str | None = None # Timezone for cron expressions tz: str | None = None @dataclass class CronPayload: """What to do when the job runs.""" kind: Literal["system_event", "agent_turn"] = "agent_turn" message: str = "" # Deliver response to channel deliver: bool = False channel: str | None = None # e.g. "whatsapp" to: str | None = None # e.g. phone number @dataclass class CronJobState: """Runtime state of a job.""" next_run_at_ms: int | None = None last_run_at_ms: int | None = None last_status: Literal["ok", "error", "skipped"] | None = None last_error: str | None = None @dataclass class CronJob: """A scheduled job.""" id: str name: str enabled: bool = True schedule: CronSchedule = field(default_factory=lambda: CronSchedule(kind="every")) payload: CronPayload = field(default_factory=CronPayload) state: CronJobState = field(default_factory=CronJobState) created_at_ms: int = 0 updated_at_ms: int = 0 delete_after_run: bool = False @dataclass class CronStore: """Persistent store for cron jobs.""" version: int = 1 jobs: list[CronJob] = field(default_factory=list)
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/cron/types.py", "license": "MIT License", "lines": 48, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
HKUDS/nanobot:nanobot/heartbeat/service.py
"""Heartbeat service - periodic agent wake-up to check for tasks.""" from __future__ import annotations import asyncio from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Coroutine from loguru import logger if TYPE_CHECKING: from nanobot.providers.base import LLMProvider _HEARTBEAT_TOOL = [ { "type": "function", "function": { "name": "heartbeat", "description": "Report heartbeat decision after reviewing tasks.", "parameters": { "type": "object", "properties": { "action": { "type": "string", "enum": ["skip", "run"], "description": "skip = nothing to do, run = has active tasks", }, "tasks": { "type": "string", "description": "Natural-language summary of active tasks (required for run)", }, }, "required": ["action"], }, }, } ] class HeartbeatService: """ Periodic heartbeat service that wakes the agent to check for tasks. Phase 1 (decision): reads HEARTBEAT.md and asks the LLM — via a virtual tool call — whether there are active tasks. This avoids free-text parsing and the unreliable HEARTBEAT_OK token. Phase 2 (execution): only triggered when Phase 1 returns ``run``. The ``on_execute`` callback runs the task through the full agent loop and returns the result to deliver. """ def __init__( self, workspace: Path, provider: LLMProvider, model: str, on_execute: Callable[[str], Coroutine[Any, Any, str]] | None = None, on_notify: Callable[[str], Coroutine[Any, Any, None]] | None = None, interval_s: int = 30 * 60, enabled: bool = True, ): self.workspace = workspace self.provider = provider self.model = model self.on_execute = on_execute self.on_notify = on_notify self.interval_s = interval_s self.enabled = enabled self._running = False self._task: asyncio.Task | None = None @property def heartbeat_file(self) -> Path: return self.workspace / "HEARTBEAT.md" def _read_heartbeat_file(self) -> str | None: if self.heartbeat_file.exists(): try: return self.heartbeat_file.read_text(encoding="utf-8") except Exception: return None return None async def _decide(self, content: str) -> tuple[str, str]: """Phase 1: ask LLM to decide skip/run via virtual tool call. Returns (action, tasks) where action is 'skip' or 'run'. """ response = await self.provider.chat( messages=[ {"role": "system", "content": "You are a heartbeat agent. Call the heartbeat tool to report your decision."}, {"role": "user", "content": ( "Review the following HEARTBEAT.md and decide whether there are active tasks.\n\n" f"{content}" )}, ], tools=_HEARTBEAT_TOOL, model=self.model, ) if not response.has_tool_calls: return "skip", "" args = response.tool_calls[0].arguments return args.get("action", "skip"), args.get("tasks", "") async def start(self) -> None: """Start the heartbeat service.""" if not self.enabled: logger.info("Heartbeat disabled") return if self._running: logger.warning("Heartbeat already running") return self._running = True self._task = asyncio.create_task(self._run_loop()) logger.info("Heartbeat started (every {}s)", self.interval_s) def stop(self) -> None: """Stop the heartbeat service.""" self._running = False if self._task: self._task.cancel() self._task = None async def _run_loop(self) -> None: """Main heartbeat loop.""" while self._running: try: await asyncio.sleep(self.interval_s) if self._running: await self._tick() except asyncio.CancelledError: break except Exception as e: logger.error("Heartbeat error: {}", e) async def _tick(self) -> None: """Execute a single heartbeat tick.""" content = self._read_heartbeat_file() if not content: logger.debug("Heartbeat: HEARTBEAT.md missing or empty") return logger.info("Heartbeat: checking for tasks...") try: action, tasks = await self._decide(content) if action != "run": logger.info("Heartbeat: OK (nothing to report)") return logger.info("Heartbeat: tasks found, executing...") if self.on_execute: response = await self.on_execute(tasks) if response and self.on_notify: logger.info("Heartbeat: completed, delivering response") await self.on_notify(response) except Exception: logger.exception("Heartbeat execution failed") async def trigger_now(self) -> str | None: """Manually trigger a heartbeat.""" content = self._read_heartbeat_file() if not content: return None action, tasks = await self._decide(content) if action != "run" or not self.on_execute: return None return await self.on_execute(tasks)
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/heartbeat/service.py", "license": "MIT License", "lines": 147, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/providers/base.py
"""Base LLM provider interface.""" from abc import ABC, abstractmethod from dataclasses import dataclass, field from typing import Any @dataclass class ToolCallRequest: """A tool call request from the LLM.""" id: str name: str arguments: dict[str, Any] @dataclass class LLMResponse: """Response from an LLM provider.""" content: str | None tool_calls: list[ToolCallRequest] = field(default_factory=list) finish_reason: str = "stop" usage: dict[str, int] = field(default_factory=dict) reasoning_content: str | None = None # Kimi, DeepSeek-R1 etc. thinking_blocks: list[dict] | None = None # Anthropic extended thinking @property def has_tool_calls(self) -> bool: """Check if response contains tool calls.""" return len(self.tool_calls) > 0 class LLMProvider(ABC): """ Abstract base class for LLM providers. Implementations should handle the specifics of each provider's API while maintaining a consistent interface. """ def __init__(self, api_key: str | None = None, api_base: str | None = None): self.api_key = api_key self.api_base = api_base @staticmethod def _sanitize_empty_content(messages: list[dict[str, Any]]) -> list[dict[str, Any]]: """Replace empty text content that causes provider 400 errors. Empty content can appear when MCP tools return nothing. Most providers reject empty-string content or empty text blocks in list content. """ result: list[dict[str, Any]] = [] for msg in messages: content = msg.get("content") if isinstance(content, str) and not content: clean = dict(msg) clean["content"] = None if (msg.get("role") == "assistant" and msg.get("tool_calls")) else "(empty)" result.append(clean) continue if isinstance(content, list): filtered = [ item for item in content if not ( isinstance(item, dict) and item.get("type") in ("text", "input_text", "output_text") and not item.get("text") ) ] if len(filtered) != len(content): clean = dict(msg) if filtered: clean["content"] = filtered elif msg.get("role") == "assistant" and msg.get("tool_calls"): clean["content"] = None else: clean["content"] = "(empty)" result.append(clean) continue if isinstance(content, dict): clean = dict(msg) clean["content"] = [content] result.append(clean) continue result.append(msg) return result @abstractmethod async def chat( self, messages: list[dict[str, Any]], tools: list[dict[str, Any]] | None = None, model: str | None = None, max_tokens: int = 4096, temperature: float = 0.7, reasoning_effort: str | None = None, ) -> LLMResponse: """ Send a chat completion request. Args: messages: List of message dicts with 'role' and 'content'. tools: Optional list of tool definitions. model: Model identifier (provider-specific). max_tokens: Maximum tokens in response. temperature: Sampling temperature. Returns: LLMResponse with content and/or tool calls. """ pass @abstractmethod def get_default_model(self) -> str: """Get the default model for this provider.""" pass
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/providers/base.py", "license": "MIT License", "lines": 98, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/providers/litellm_provider.py
"""LiteLLM provider implementation for multi-provider support.""" import os import secrets import string from typing import Any import json_repair import litellm from litellm import acompletion from nanobot.providers.base import LLMProvider, LLMResponse, ToolCallRequest from nanobot.providers.registry import find_by_model, find_gateway # Standard chat-completion message keys. _ALLOWED_MSG_KEYS = frozenset({"role", "content", "tool_calls", "tool_call_id", "name", "reasoning_content"}) _ANTHROPIC_EXTRA_KEYS = frozenset({"thinking_blocks"}) _ALNUM = string.ascii_letters + string.digits def _short_tool_id() -> str: """Generate a 9-char alphanumeric ID compatible with all providers (incl. Mistral).""" return "".join(secrets.choice(_ALNUM) for _ in range(9)) class LiteLLMProvider(LLMProvider): """ LLM provider using LiteLLM for multi-provider support. Supports OpenRouter, Anthropic, OpenAI, Gemini, MiniMax, and many other providers through a unified interface. Provider-specific logic is driven by the registry (see providers/registry.py) — no if-elif chains needed here. """ def __init__( self, api_key: str | None = None, api_base: str | None = None, default_model: str = "anthropic/claude-opus-4-5", extra_headers: dict[str, str] | None = None, provider_name: str | None = None, ): super().__init__(api_key, api_base) self.default_model = default_model self.extra_headers = extra_headers or {} # Detect gateway / local deployment. # provider_name (from config key) is the primary signal; # api_key / api_base are fallback for auto-detection. self._gateway = find_gateway(provider_name, api_key, api_base) # Configure environment variables if api_key: self._setup_env(api_key, api_base, default_model) if api_base: litellm.api_base = api_base # Disable LiteLLM logging noise litellm.suppress_debug_info = True # Drop unsupported parameters for providers (e.g., gpt-5 rejects some params) litellm.drop_params = True def _setup_env(self, api_key: str, api_base: str | None, model: str) -> None: """Set environment variables based on detected provider.""" spec = self._gateway or find_by_model(model) if not spec: return if not spec.env_key: # OAuth/provider-only specs (for example: openai_codex) return # Gateway/local overrides existing env; standard provider doesn't if self._gateway: os.environ[spec.env_key] = api_key else: os.environ.setdefault(spec.env_key, api_key) # Resolve env_extras placeholders: # {api_key} → user's API key # {api_base} → user's api_base, falling back to spec.default_api_base effective_base = api_base or spec.default_api_base for env_name, env_val in spec.env_extras: resolved = env_val.replace("{api_key}", api_key) resolved = resolved.replace("{api_base}", effective_base) os.environ.setdefault(env_name, resolved) def _resolve_model(self, model: str) -> str: """Resolve model name by applying provider/gateway prefixes.""" if self._gateway: # Gateway mode: apply gateway prefix, skip provider-specific prefixes prefix = self._gateway.litellm_prefix if self._gateway.strip_model_prefix: model = model.split("/")[-1] if prefix and not model.startswith(f"{prefix}/"): model = f"{prefix}/{model}" return model # Standard mode: auto-prefix for known providers spec = find_by_model(model) if spec and spec.litellm_prefix: model = self._canonicalize_explicit_prefix(model, spec.name, spec.litellm_prefix) if not any(model.startswith(s) for s in spec.skip_prefixes): model = f"{spec.litellm_prefix}/{model}" return model @staticmethod def _canonicalize_explicit_prefix(model: str, spec_name: str, canonical_prefix: str) -> str: """Normalize explicit provider prefixes like `github-copilot/...`.""" if "/" not in model: return model prefix, remainder = model.split("/", 1) if prefix.lower().replace("-", "_") != spec_name: return model return f"{canonical_prefix}/{remainder}" def _supports_cache_control(self, model: str) -> bool: """Return True when the provider supports cache_control on content blocks.""" if self._gateway is not None: return self._gateway.supports_prompt_caching spec = find_by_model(model) return spec is not None and spec.supports_prompt_caching def _apply_cache_control( self, messages: list[dict[str, Any]], tools: list[dict[str, Any]] | None, ) -> tuple[list[dict[str, Any]], list[dict[str, Any]] | None]: """Return copies of messages and tools with cache_control injected.""" new_messages = [] for msg in messages: if msg.get("role") == "system": content = msg["content"] if isinstance(content, str): new_content = [{"type": "text", "text": content, "cache_control": {"type": "ephemeral"}}] else: new_content = list(content) new_content[-1] = {**new_content[-1], "cache_control": {"type": "ephemeral"}} new_messages.append({**msg, "content": new_content}) else: new_messages.append(msg) new_tools = tools if tools: new_tools = list(tools) new_tools[-1] = {**new_tools[-1], "cache_control": {"type": "ephemeral"}} return new_messages, new_tools def _apply_model_overrides(self, model: str, kwargs: dict[str, Any]) -> None: """Apply model-specific parameter overrides from the registry.""" model_lower = model.lower() spec = find_by_model(model) if spec: for pattern, overrides in spec.model_overrides: if pattern in model_lower: kwargs.update(overrides) return @staticmethod def _extra_msg_keys(original_model: str, resolved_model: str) -> frozenset[str]: """Return provider-specific extra keys to preserve in request messages.""" spec = find_by_model(original_model) or find_by_model(resolved_model) if (spec and spec.name == "anthropic") or "claude" in original_model.lower() or resolved_model.startswith("anthropic/"): return _ANTHROPIC_EXTRA_KEYS return frozenset() @staticmethod def _sanitize_messages(messages: list[dict[str, Any]], extra_keys: frozenset[str] = frozenset()) -> list[dict[str, Any]]: """Strip non-standard keys and ensure assistant messages have a content key.""" allowed = _ALLOWED_MSG_KEYS | extra_keys sanitized = [] for msg in messages: clean = {k: v for k, v in msg.items() if k in allowed} # Strict providers require "content" even when assistant only has tool_calls if clean.get("role") == "assistant" and "content" not in clean: clean["content"] = None sanitized.append(clean) return sanitized async def chat( self, messages: list[dict[str, Any]], tools: list[dict[str, Any]] | None = None, model: str | None = None, max_tokens: int = 4096, temperature: float = 0.7, reasoning_effort: str | None = None, ) -> LLMResponse: """ Send a chat completion request via LiteLLM. Args: messages: List of message dicts with 'role' and 'content'. tools: Optional list of tool definitions in OpenAI format. model: Model identifier (e.g., 'anthropic/claude-sonnet-4-5'). max_tokens: Maximum tokens in response. temperature: Sampling temperature. Returns: LLMResponse with content and/or tool calls. """ original_model = model or self.default_model model = self._resolve_model(original_model) extra_msg_keys = self._extra_msg_keys(original_model, model) if self._supports_cache_control(original_model): messages, tools = self._apply_cache_control(messages, tools) # Clamp max_tokens to at least 1 — negative or zero values cause # LiteLLM to reject the request with "max_tokens must be at least 1". max_tokens = max(1, max_tokens) kwargs: dict[str, Any] = { "model": model, "messages": self._sanitize_messages(self._sanitize_empty_content(messages), extra_keys=extra_msg_keys), "max_tokens": max_tokens, "temperature": temperature, } # Apply model-specific overrides (e.g. kimi-k2.5 temperature) self._apply_model_overrides(model, kwargs) # Pass api_key directly — more reliable than env vars alone if self.api_key: kwargs["api_key"] = self.api_key # Pass api_base for custom endpoints if self.api_base: kwargs["api_base"] = self.api_base # Pass extra headers (e.g. APP-Code for AiHubMix) if self.extra_headers: kwargs["extra_headers"] = self.extra_headers if reasoning_effort: kwargs["reasoning_effort"] = reasoning_effort kwargs["drop_params"] = True if tools: kwargs["tools"] = tools kwargs["tool_choice"] = "auto" try: response = await acompletion(**kwargs) return self._parse_response(response) except Exception as e: # Return error as content for graceful handling return LLMResponse( content=f"Error calling LLM: {str(e)}", finish_reason="error", ) def _parse_response(self, response: Any) -> LLMResponse: """Parse LiteLLM response into our standard format.""" choice = response.choices[0] message = choice.message tool_calls = [] if hasattr(message, "tool_calls") and message.tool_calls: for tc in message.tool_calls: # Parse arguments from JSON string if needed args = tc.function.arguments if isinstance(args, str): args = json_repair.loads(args) tool_calls.append(ToolCallRequest( id=_short_tool_id(), name=tc.function.name, arguments=args, )) usage = {} if hasattr(response, "usage") and response.usage: usage = { "prompt_tokens": response.usage.prompt_tokens, "completion_tokens": response.usage.completion_tokens, "total_tokens": response.usage.total_tokens, } reasoning_content = getattr(message, "reasoning_content", None) or None thinking_blocks = getattr(message, "thinking_blocks", None) or None return LLMResponse( content=message.content, tool_calls=tool_calls, finish_reason=choice.finish_reason or "stop", usage=usage, reasoning_content=reasoning_content, thinking_blocks=thinking_blocks, ) def get_default_model(self) -> str: """Get the default model.""" return self.default_model
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/providers/litellm_provider.py", "license": "MIT License", "lines": 248, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/session/manager.py
"""Session management for conversation history.""" import json import shutil from dataclasses import dataclass, field from datetime import datetime from pathlib import Path from typing import Any from loguru import logger from nanobot.utils.helpers import ensure_dir, safe_filename @dataclass class Session: """ A conversation session. Stores messages in JSONL format for easy reading and persistence. Important: Messages are append-only for LLM cache efficiency. The consolidation process writes summaries to MEMORY.md/HISTORY.md but does NOT modify the messages list or get_history() output. """ key: str # channel:chat_id messages: list[dict[str, Any]] = field(default_factory=list) created_at: datetime = field(default_factory=datetime.now) updated_at: datetime = field(default_factory=datetime.now) metadata: dict[str, Any] = field(default_factory=dict) last_consolidated: int = 0 # Number of messages already consolidated to files def add_message(self, role: str, content: str, **kwargs: Any) -> None: """Add a message to the session.""" msg = { "role": role, "content": content, "timestamp": datetime.now().isoformat(), **kwargs } self.messages.append(msg) self.updated_at = datetime.now() def get_history(self, max_messages: int = 500) -> list[dict[str, Any]]: """Return unconsolidated messages for LLM input, aligned to a user turn.""" unconsolidated = self.messages[self.last_consolidated:] sliced = unconsolidated[-max_messages:] # Drop leading non-user messages to avoid orphaned tool_result blocks for i, m in enumerate(sliced): if m.get("role") == "user": sliced = sliced[i:] break out: list[dict[str, Any]] = [] for m in sliced: entry: dict[str, Any] = {"role": m["role"], "content": m.get("content", "")} for k in ("tool_calls", "tool_call_id", "name"): if k in m: entry[k] = m[k] out.append(entry) return out def clear(self) -> None: """Clear all messages and reset session to initial state.""" self.messages = [] self.last_consolidated = 0 self.updated_at = datetime.now() class SessionManager: """ Manages conversation sessions. Sessions are stored as JSONL files in the sessions directory. """ def __init__(self, workspace: Path): self.workspace = workspace self.sessions_dir = ensure_dir(self.workspace / "sessions") self.legacy_sessions_dir = Path.home() / ".nanobot" / "sessions" self._cache: dict[str, Session] = {} def _get_session_path(self, key: str) -> Path: """Get the file path for a session.""" safe_key = safe_filename(key.replace(":", "_")) return self.sessions_dir / f"{safe_key}.jsonl" def _get_legacy_session_path(self, key: str) -> Path: """Legacy global session path (~/.nanobot/sessions/).""" safe_key = safe_filename(key.replace(":", "_")) return self.legacy_sessions_dir / f"{safe_key}.jsonl" def get_or_create(self, key: str) -> Session: """ Get an existing session or create a new one. Args: key: Session key (usually channel:chat_id). Returns: The session. """ if key in self._cache: return self._cache[key] session = self._load(key) if session is None: session = Session(key=key) self._cache[key] = session return session def _load(self, key: str) -> Session | None: """Load a session from disk.""" path = self._get_session_path(key) if not path.exists(): legacy_path = self._get_legacy_session_path(key) if legacy_path.exists(): try: shutil.move(str(legacy_path), str(path)) logger.info("Migrated session {} from legacy path", key) except Exception: logger.exception("Failed to migrate session {}", key) if not path.exists(): return None try: messages = [] metadata = {} created_at = None last_consolidated = 0 with open(path, encoding="utf-8") as f: for line in f: line = line.strip() if not line: continue data = json.loads(line) if data.get("_type") == "metadata": metadata = data.get("metadata", {}) created_at = datetime.fromisoformat(data["created_at"]) if data.get("created_at") else None last_consolidated = data.get("last_consolidated", 0) else: messages.append(data) return Session( key=key, messages=messages, created_at=created_at or datetime.now(), metadata=metadata, last_consolidated=last_consolidated ) except Exception as e: logger.warning("Failed to load session {}: {}", key, e) return None def save(self, session: Session) -> None: """Save a session to disk.""" path = self._get_session_path(session.key) with open(path, "w", encoding="utf-8") as f: metadata_line = { "_type": "metadata", "key": session.key, "created_at": session.created_at.isoformat(), "updated_at": session.updated_at.isoformat(), "metadata": session.metadata, "last_consolidated": session.last_consolidated } f.write(json.dumps(metadata_line, ensure_ascii=False) + "\n") for msg in session.messages: f.write(json.dumps(msg, ensure_ascii=False) + "\n") self._cache[session.key] = session def invalidate(self, key: str) -> None: """Remove a session from the in-memory cache.""" self._cache.pop(key, None) def list_sessions(self) -> list[dict[str, Any]]: """ List all sessions. Returns: List of session info dicts. """ sessions = [] for path in self.sessions_dir.glob("*.jsonl"): try: # Read just the metadata line with open(path, encoding="utf-8") as f: first_line = f.readline().strip() if first_line: data = json.loads(first_line) if data.get("_type") == "metadata": key = data.get("key") or path.stem.replace("_", ":", 1) sessions.append({ "key": key, "created_at": data.get("created_at"), "updated_at": data.get("updated_at"), "path": str(path) }) except Exception: continue return sorted(sessions, key=lambda x: x.get("updated_at", ""), reverse=True)
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/session/manager.py", "license": "MIT License", "lines": 173, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
HKUDS/nanobot:nanobot/utils/helpers.py
"""Utility functions for nanobot.""" import re from datetime import datetime from pathlib import Path def ensure_dir(path: Path) -> Path: """Ensure directory exists, return it.""" path.mkdir(parents=True, exist_ok=True) return path def get_data_path() -> Path: """~/.nanobot data directory.""" return ensure_dir(Path.home() / ".nanobot") def get_workspace_path(workspace: str | None = None) -> Path: """Resolve and ensure workspace path. Defaults to ~/.nanobot/workspace.""" path = Path(workspace).expanduser() if workspace else Path.home() / ".nanobot" / "workspace" return ensure_dir(path) def timestamp() -> str: """Current ISO timestamp.""" return datetime.now().isoformat() _UNSAFE_CHARS = re.compile(r'[<>:"/\\|?*]') def safe_filename(name: str) -> str: """Replace unsafe path characters with underscores.""" return _UNSAFE_CHARS.sub("_", name).strip() def sync_workspace_templates(workspace: Path, silent: bool = False) -> list[str]: """Sync bundled templates to workspace. Only creates missing files.""" from importlib.resources import files as pkg_files try: tpl = pkg_files("nanobot") / "templates" except Exception: return [] if not tpl.is_dir(): return [] added: list[str] = [] def _write(src, dest: Path): if dest.exists(): return dest.parent.mkdir(parents=True, exist_ok=True) dest.write_text(src.read_text(encoding="utf-8") if src else "", encoding="utf-8") added.append(str(dest.relative_to(workspace))) for item in tpl.iterdir(): if item.name.endswith(".md"): _write(item, workspace / item.name) _write(tpl / "memory" / "MEMORY.md", workspace / "memory" / "MEMORY.md") _write(None, workspace / "memory" / "HISTORY.md") (workspace / "skills").mkdir(exist_ok=True) if added and not silent: from rich.console import Console for name in added: Console().print(f" [dim]Created {name}[/dim]") return added
{ "repo_id": "HKUDS/nanobot", "file_path": "nanobot/utils/helpers.py", "license": "MIT License", "lines": 49, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Lightning-AI/pytorch-lightning:src/lightning/pytorch/loggers/litlogger.py
# Copyright The Lightning AI team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fabric/PyTorch Lightning logger that enables remote experiment tracking, logging, and artifact management on lightning.ai.""" import logging import os import warnings from argparse import Namespace from collections.abc import Mapping from datetime import datetime, timezone from typing import TYPE_CHECKING, Any, Optional, Union from lightning_utilities.core.imports import RequirementCache from torch import Tensor from torch.nn import Module from typing_extensions import override from lightning.fabric.loggers.logger import Logger, rank_zero_experiment from lightning.fabric.utilities.cloud_io import get_filesystem from lightning.fabric.utilities.logger import _add_prefix from lightning.fabric.utilities.rank_zero import rank_zero_only from lightning.fabric.utilities.types import _PATH from lightning.pytorch.callbacks import ModelCheckpoint from lightning.pytorch.loggers.utilities import _scan_checkpoints if TYPE_CHECKING: from litlogger import Experiment log = logging.getLogger(__name__) _LITLOGGER_AVAILABLE = RequirementCache("litlogger>=0.1.0") def _create_experiment_name() -> str: """Create a random experiment name using litlogger's generator.""" from litlogger.generator import _create_name return _create_name() class LitLogger(Logger): """Logger that enables remote experiment tracking, logging, and artifact management on lightning.ai.""" LOGGER_JOIN_CHAR = "-" def __init__( self, root_dir: Optional[_PATH] = None, name: Optional[str] = None, teamspace: Optional[str] = None, metadata: Optional[dict[str, str]] = None, store_step: bool = True, log_model: bool = False, save_logs: bool = True, checkpoint_name: Optional[str] = None, ) -> None: """Initialize the LightningLogger. Args: root_dir: Folder where logs and metadata are stored (default: ./lightning_logs). name: Name of your experiment (defaults to a generated name). teamspace: Teamspace name where charts and artifacts will appear. metadata: Extra metadata to associate with the experiment as tags. log_model: If True, automatically log model checkpoints as artifacts. save_logs: If True, capture and upload terminal logs. checkpoint_name: Override the base name for logged checkpoints. Example:: from lightning.pytorch import Trainer from lightning.pytorch.demos.boring_classes import BoringModel, BoringDataModule from lightning.pytorch.loggers.litlogger import LitLogger class LoggingModel(BoringModel): def training_step(self, batch, batch_idx: int): loss = self.step(batch) # logging the computed loss self.log("train_loss", loss) return {"loss": loss} trainer = Trainer( max_epochs=10, enable_model_summary=False, logger=LitLogger("./lightning_logs", name="boring_model") ) model = BoringModel() data_module = BoringDataModule() trainer.fit(model, data_module) trainer.test(model, data_module) """ self._root_dir = os.fspath(root_dir or "./lightning_logs") self._name = name or _create_experiment_name() self._version: Optional[str] = None self._teamspace = teamspace self._sub_dir = None self._prefix = "" self._fs = get_filesystem(self._root_dir) self._step = -1 self._metadata = metadata or {} self._is_ready = False self._log_model = log_model self._save_logs = save_logs self._checkpoint_callback: Optional[ModelCheckpoint] = None self._logged_model_time: dict[str, float] = {} self._checkpoint_name = checkpoint_name # ────────────────────────────────────────────────────────────────────────────── # Properties # ────────────────────────────────────────────────────────────────────────────── @property @override def name(self) -> str: """Gets the name of the experiment.""" return self._name @property @override def version(self) -> Optional[str]: """Get the experiment version - its time of creation.""" return self._version @property @override def root_dir(self) -> str: """Gets the save directory where the litlogger experiments are saved.""" return self._root_dir @property @override def log_dir(self) -> str: """The directory for this run's tensorboard checkpoint. By default, it is named ``'version_${self.version}'`` but it can be overridden by passing a string value for the constructor's version parameter instead of ``None`` or an int. """ log_dir = os.path.join(self.root_dir, self.name) if isinstance(self.sub_dir, str): log_dir = os.path.join(log_dir, self.sub_dir) log_dir = os.path.expandvars(log_dir) return os.path.expanduser(log_dir) @property def save_dir(self) -> str: return self.log_dir @property def sub_dir(self) -> Optional[str]: """Gets the sub directory where the TensorBoard experiments are saved.""" return self._sub_dir @property @rank_zero_experiment def experiment(self) -> Optional["Experiment"]: """Returns the underlying litlogger Experiment object.""" import litlogger if litlogger.experiment is not None: return litlogger.experiment if not self._is_ready: self._is_ready = True assert rank_zero_only.rank == 0, "tried to init log dirs in non global_rank=0" if self.root_dir: self._fs.makedirs(self.root_dir, exist_ok=True) if self.version is None: # Generate version as proper RFC 3339 timestamp with Z suffix (required by protobuf) timestamp = datetime.now(timezone.utc).isoformat(timespec="milliseconds") self._version = timestamp.replace("+00:00", "Z") litlogger.init( name=self._name, root_dir=self._root_dir, teamspace=self._teamspace, metadata={k: str(v) for k, v in self._metadata.items()}, store_step=True, store_created_at=True, save_logs=self._save_logs, ) return litlogger.experiment @property @rank_zero_only def url(self) -> str: return self.experiment.url # ────────────────────────────────────────────────────────────────────────────── # Override methods from Logger # ────────────────────────────────────────────────────────────────────────────── @override @rank_zero_only def log_metrics(self, metrics: Mapping[str, float], step: Optional[int] = None) -> None: import litlogger assert rank_zero_only.rank == 0, "experiment tried to log from global_rank != 0" # Ensure experiment is initialized _ = self.experiment self._step = self._step + 1 if step is None else step metrics = _add_prefix(metrics, self._prefix, self.LOGGER_JOIN_CHAR) metrics = {k: v.item() if isinstance(v, Tensor) else v for k, v in metrics.items()} litlogger.log_metrics(metrics, step=self._step) @override @rank_zero_only def log_hyperparams( self, params: Union[dict[str, Any], Namespace], metrics: Optional[dict[str, Any]] = None, ) -> None: """Log hyperparams.""" if isinstance(params, Namespace): params = params.__dict__ params.update(self._metadata or {}) self._metadata = params @override @rank_zero_only def log_graph(self, model: Module, input_array: Optional[Tensor] = None) -> None: warnings.warn("LitLogger does not support `log_graph`", UserWarning, stacklevel=2) @override @rank_zero_only def save(self) -> None: pass @override @rank_zero_only def finalize(self, status: Optional[str] = None) -> None: import litlogger if litlogger.experiment is not None: # log checkpoints as artifacts before finalizing if self._checkpoint_callback: self._scan_and_log_checkpoints(self._checkpoint_callback) litlogger.finalize(status) # ────────────────────────────────────────────────────────────────────────────── # Public methods # ────────────────────────────────────────────────────────────────────────────── @rank_zero_only def log_metadata( self, params: Union[dict[str, Any], Namespace], ) -> None: """Log hyperparams.""" if isinstance(params, Namespace): params = params.__dict__ params.update(self._metadata or {}) self._metadata = params @rank_zero_only def log_model( self, model: Any, staging_dir: Optional[str] = None, verbose: bool = False, version: Optional[str] = None, metadata: Optional[dict[str, Any]] = None, ) -> None: """Save and upload a model object to cloud storage. Args: model: The model object to save and upload (e.g., torch.nn.Module). staging_dir: Optional local directory for staging the model before upload. verbose: Whether to show progress bar during upload. version: Optional version string for the model. metadata: Optional metadata dictionary to store with the model. """ import litlogger _ = self.experiment litlogger.log_model(model, staging_dir, verbose, version, metadata) @rank_zero_only def log_model_artifact( self, path: str, verbose: bool = False, version: Optional[str] = None, ) -> None: """Upload a model file or directory to cloud storage using litmodels. Args: path: Path to the local model file or directory to upload. verbose: Whether to show progress bar during upload. Defaults to False. version: Optional version string for the model. Defaults to the experiment version. """ import litlogger _ = self.experiment litlogger.log_model_artifact(path, verbose, version) @rank_zero_only def get_file(self, path: str, verbose: bool = True) -> str: """Download a file artifact from the cloud for this experiment. Args: path: Path where the file should be saved locally. verbose: Whether to print a confirmation message after download. Defaults to True. Returns: str: The local path where the file was saved. """ import litlogger _ = self.experiment return litlogger.get_file(path, verbose=verbose) @rank_zero_only def get_model(self, staging_dir: Optional[str] = None, verbose: bool = False, version: Optional[str] = None) -> Any: """Download and load a model object using litmodels. Args: staging_dir: Optional directory where the model will be downloaded. verbose: Whether to show progress bar. version: Optional version string for the model. Returns: The loaded model object. """ import litlogger _ = self.experiment return litlogger.get_model(staging_dir, verbose, version) @rank_zero_only def get_model_artifact(self, path: str, verbose: bool = False, version: Optional[str] = None) -> str: """Download a model artifact file or directory from cloud storage using litmodels. Args: path: Path where the model should be saved locally. verbose: Whether to show progress bar during download. version: Optional version string for the model. Returns: str: The local path where the model was saved. """ import litlogger _ = self.experiment return litlogger.get_model_artifact(path, verbose, version) @rank_zero_only def log_file(self, path: str) -> None: """Log a file as an artifact to the Lightning platform. The file will be logged in the Teamspace drive, under a folder identified by the experiment name. Args: path: Path to the file to log. Example:: logger = LitLogger(...) logger.log_file('config.yaml') """ import litlogger _ = self.experiment litlogger.log_file(path) # ────────────────────────────────────────────────────────────────────────────── # Callback methods # ────────────────────────────────────────────────────────────────────────────── def after_save_checkpoint(self, checkpoint_callback: ModelCheckpoint) -> None: """Called after a checkpoint is saved. Logs checkpoints as artifacts if enabled. """ if self._log_model is False: return if checkpoint_callback.save_top_k == -1: self._scan_and_log_checkpoints(checkpoint_callback) else: self._checkpoint_callback = checkpoint_callback # ────────────────────────────────────────────────────────────────────────────── # Private methods # ────────────────────────────────────────────────────────────────────────────── def _scan_and_log_checkpoints(self, checkpoint_callback: ModelCheckpoint) -> None: """Find new checkpoints from the callback and log them as model artifacts.""" checkpoints = _scan_checkpoints(checkpoint_callback, self._logged_model_time) for timestamp, path_ckpt, _score, tag in checkpoints: if not self._checkpoint_name: self._checkpoint_name = self.experiment.name # Ensure the version tag is unique by appending a timestamp unique_tag = f"{tag}-{int(datetime.now(timezone.utc).timestamp())}" self.log_model_artifact(path_ckpt, verbose=True, version=unique_tag) # remember logged models - timestamp needed in case filename didn't change self._logged_model_time[path_ckpt] = timestamp
{ "repo_id": "Lightning-AI/pytorch-lightning", "file_path": "src/lightning/pytorch/loggers/litlogger.py", "license": "Apache License 2.0", "lines": 337, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
Lightning-AI/pytorch-lightning:tests/tests_pytorch/loggers/test_litlogger.py
# Copyright The Lightning AI team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from argparse import Namespace from unittest.mock import MagicMock import pytest import torch from lightning.pytorch import Trainer from lightning.pytorch.demos.boring_classes import BoringModel from lightning.pytorch.loggers.litlogger import LitLogger def test_litlogger_init(litlogger_mock, tmp_path): """Test LitLogger initialization.""" logger = LitLogger( name="test-experiment", root_dir=tmp_path, teamspace="test-teamspace", metadata={"key": "value"}, ) assert logger.name == "test-experiment" assert logger.root_dir == str(tmp_path) assert logger._teamspace == "test-teamspace" assert logger._metadata == {"key": "value"} def test_litlogger_default_name(litlogger_mock, tmp_path): """Test LitLogger generates a name if not provided.""" logger = LitLogger(root_dir=tmp_path) assert logger.name == "generated-name" def test_litlogger_log_dir(litlogger_mock, tmp_path): """Test log_dir property.""" logger = LitLogger(name="test", root_dir=tmp_path) expected_log_dir = os.path.join(str(tmp_path), "test") assert logger.log_dir == expected_log_dir def test_litlogger_log_dir_with_sub_dir(litlogger_mock, tmp_path): """Test log_dir property with sub_dir.""" logger = LitLogger(name="test", root_dir=tmp_path) logger._sub_dir = "sub" expected_log_dir = os.path.join(str(tmp_path), "test", "sub") assert logger.log_dir == expected_log_dir def test_litlogger_save_dir(litlogger_mock, tmp_path): """Test save_dir property equals log_dir.""" logger = LitLogger(name="test", root_dir=tmp_path) assert logger.save_dir == logger.log_dir def test_litlogger_experiment_property(litlogger_mock, tmp_path): """Test experiment property initializes litlogger.""" logger = LitLogger(name="test", root_dir=tmp_path, teamspace="my-teamspace") experiment = logger.experiment assert experiment is not None litlogger_mock.init.assert_called_once() # Check init was called with correct arguments call_kwargs = litlogger_mock.init.call_args[1] assert call_kwargs["name"] == "test" assert call_kwargs["root_dir"] == str(tmp_path) assert call_kwargs["teamspace"] == "my-teamspace" assert call_kwargs["store_step"] is True assert call_kwargs["store_created_at"] is True def test_litlogger_experiment_reuses_existing(litlogger_mock, tmp_path): """Test experiment property reuses existing experiment.""" logger = LitLogger(name="test", root_dir=tmp_path) # Access experiment twice _ = logger.experiment _ = logger.experiment # init should only be called once assert litlogger_mock.init.call_count == 1 @pytest.mark.parametrize("step_idx", [10, None]) def test_litlogger_log_metrics(litlogger_mock, tmp_path, step_idx): """Test log_metrics method.""" logger = LitLogger(name="test", root_dir=tmp_path) metrics = {"float": 0.3, "int": 1, "FloatTensor": torch.tensor(0.1), "IntTensor": torch.tensor(1)} logger.log_metrics(metrics, step_idx) litlogger_mock.log_metrics.assert_called_once() call_args = litlogger_mock.log_metrics.call_args logged_metrics = call_args[0][0] # Verify tensors are converted to Python scalars assert isinstance(logged_metrics["FloatTensor"], float) assert isinstance(logged_metrics["IntTensor"], int) def test_litlogger_log_metrics_with_prefix(litlogger_mock, tmp_path): """Test log_metrics with prefix.""" logger = LitLogger(name="test", root_dir=tmp_path) logger._prefix = "train" logger.log_metrics({"loss": 0.5}, step=1) litlogger_mock.log_metrics.assert_called_once() call_args = litlogger_mock.log_metrics.call_args logged_metrics = call_args[0][0] assert "train-loss" in logged_metrics def test_litlogger_log_hyperparams_dict(litlogger_mock, tmp_path): """Test log_hyperparams with dict.""" logger = LitLogger(name="test", root_dir=tmp_path) hparams = {"learning_rate": 0.001, "batch_size": 32} logger.log_hyperparams(hparams) assert logger._metadata["learning_rate"] == 0.001 assert logger._metadata["batch_size"] == 32 def test_litlogger_log_hyperparams_namespace(litlogger_mock, tmp_path): """Test log_hyperparams with Namespace.""" logger = LitLogger(name="test", root_dir=tmp_path) hparams = Namespace(learning_rate=0.001, batch_size=32) logger.log_hyperparams(hparams) assert logger._metadata["learning_rate"] == 0.001 assert logger._metadata["batch_size"] == 32 def test_litlogger_log_graph_warning(litlogger_mock, tmp_path): """Test log_graph emits warning.""" logger = LitLogger(name="test", root_dir=tmp_path) model = BoringModel() with pytest.warns(UserWarning, match="LitLogger does not support `log_graph`"): logger.log_graph(model) def test_litlogger_finalize(litlogger_mock, tmp_path): """Test finalize method.""" logger = LitLogger(name="test", root_dir=tmp_path) # Initialize the experiment first _ = logger.experiment logger.finalize("success") litlogger_mock.finalize.assert_called_once_with("success") def test_litlogger_finalize_no_experiment(litlogger_mock, tmp_path): """Test finalize does nothing if experiment not initialized.""" logger = LitLogger(name="test", root_dir=tmp_path) # Don't initialize the experiment logger.finalize("success") # finalize should not be called since experiment is None litlogger_mock.finalize.assert_not_called() def test_litlogger_log_file(litlogger_mock, tmp_path): """Test log_file method.""" logger = LitLogger(name="test", root_dir=tmp_path) logger.log_file("config.yaml") litlogger_mock.log_file.assert_called_once_with("config.yaml") def test_litlogger_get_file(litlogger_mock, tmp_path): """Test get_file method.""" logger = LitLogger(name="test", root_dir=tmp_path) result = logger.get_file("config.yaml", verbose=True) litlogger_mock.get_file.assert_called_once_with("config.yaml", verbose=True) assert result == "/path/to/file" def test_litlogger_log_model(litlogger_mock, tmp_path): """Test log_model method.""" logger = LitLogger(name="test", root_dir=tmp_path) model = torch.nn.Linear(10, 10) logger.log_model(model, staging_dir="/tmp", verbose=True, version="v1", metadata={"epoch": 10}) litlogger_mock.log_model.assert_called_once_with(model, "/tmp", True, "v1", {"epoch": 10}) def test_litlogger_get_model(litlogger_mock, tmp_path): """Test get_model method.""" logger = LitLogger(name="test", root_dir=tmp_path) result = logger.get_model(staging_dir="/tmp", verbose=True, version="v1") litlogger_mock.get_model.assert_called_once_with("/tmp", True, "v1") assert result is not None def test_litlogger_log_model_artifact(litlogger_mock, tmp_path): """Test log_model_artifact method.""" logger = LitLogger(name="test", root_dir=tmp_path) logger.log_model_artifact("/path/to/model.ckpt", verbose=True, version="v1") litlogger_mock.log_model_artifact.assert_called_once_with("/path/to/model.ckpt", True, "v1") def test_litlogger_get_model_artifact(litlogger_mock, tmp_path): """Test get_model_artifact method.""" logger = LitLogger(name="test", root_dir=tmp_path) result = logger.get_model_artifact("/path/to/model", verbose=True, version="v1") litlogger_mock.get_model_artifact.assert_called_once_with("/path/to/model", True, "v1") assert result == "/path/to/artifact" def test_litlogger_url_property(litlogger_mock, tmp_path): """Test url property.""" logger = LitLogger(name="test", root_dir=tmp_path) url = logger.url assert url == "https://lightning.ai/test/experiments/test-experiment" def test_litlogger_version_property(litlogger_mock, tmp_path): """Test version property is set after experiment initialization.""" logger = LitLogger(name="test", root_dir=tmp_path) # Before accessing experiment, version is None assert logger.version is None # After accessing experiment, version is set _ = logger.experiment assert logger.version is not None def test_litlogger_with_trainer(litlogger_mock, tmp_path): """Test LitLogger works with Trainer.""" class LoggingModel(BoringModel): def training_step(self, batch, batch_idx): loss = super().training_step(batch, batch_idx) self.log("train_loss", loss["loss"]) return loss logger = LitLogger(name="test", root_dir=tmp_path) model = LoggingModel() trainer = Trainer( default_root_dir=tmp_path, max_steps=1, logger=logger, enable_checkpointing=False, enable_progress_bar=False, enable_model_summary=False, log_every_n_steps=1, ) trainer.fit(model) # Verify metrics were logged assert litlogger_mock.log_metrics.called def test_litlogger_metadata_in_init(litlogger_mock, tmp_path): """Test metadata is passed to litlogger.init.""" logger = LitLogger( name="test", root_dir=tmp_path, metadata={"experiment_type": "test", "version": "1.0"}, ) _ = logger.experiment call_kwargs = litlogger_mock.init.call_args[1] assert call_kwargs["metadata"] == {"experiment_type": "test", "version": "1.0"} def test_litlogger_log_model_disabled(litlogger_mock, tmp_path): """Test log_model option defaults to False.""" logger = LitLogger(name="test", root_dir=tmp_path) assert logger._log_model is False def test_litlogger_log_model_enabled(litlogger_mock, tmp_path): """Test log_model option can be enabled.""" logger = LitLogger(name="test", root_dir=tmp_path, log_model=True) assert logger._log_model is True def test_litlogger_after_save_checkpoint_disabled(litlogger_mock, tmp_path): """Test after_save_checkpoint does nothing when log_model=False.""" logger = LitLogger(name="test", root_dir=tmp_path, log_model=False) checkpoint_callback = MagicMock() checkpoint_callback.save_top_k = 1 logger.after_save_checkpoint(checkpoint_callback) # Should not set checkpoint callback assert logger._checkpoint_callback is None def test_litlogger_after_save_checkpoint_enabled(litlogger_mock, tmp_path): """Test after_save_checkpoint stores callback when log_model=True.""" logger = LitLogger(name="test", root_dir=tmp_path, log_model=True) checkpoint_callback = MagicMock() checkpoint_callback.save_top_k = 1 logger.after_save_checkpoint(checkpoint_callback) # Should store checkpoint callback for later assert logger._checkpoint_callback is checkpoint_callback def test_litlogger_save_logs_option(litlogger_mock, tmp_path): """Test save_logs option is passed to init.""" logger = LitLogger(name="test", root_dir=tmp_path, save_logs=True) _ = logger.experiment call_kwargs = litlogger_mock.init.call_args[1] assert call_kwargs["save_logs"] is True
{ "repo_id": "Lightning-AI/pytorch-lightning", "file_path": "tests/tests_pytorch/loggers/test_litlogger.py", "license": "Apache License 2.0", "lines": 236, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Lightning-AI/pytorch-lightning:tests/tests_pytorch/callbacks/test_model_checkpoint_manual_opt.py
import shutil import tempfile import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch from torch.utils.data import DataLoader, Dataset from lightning.pytorch import LightningModule, Trainer from lightning.pytorch.callbacks import ModelCheckpoint class FakeDataset(Dataset): def __init__(self): self.data = [torch.randn(3) for _ in range(4)] self.labels = [torch.randint(0, 2, (1,)) for _ in range(4)] def __len__(self): return 4 def __getitem__(self, idx): return self.data[idx], self.labels[idx] def save_model(model: torch.nn.Module, step_idx: int, saved_models): model_copy = deepcopy(model) state_dict = model_copy.cpu().state_dict() saved_models[step_idx] = state_dict def load_model(step_idx: int, saved_models): return saved_models[step_idx] class SimpleModule(LightningModule): def __init__(self): super().__init__() self.layer = torch.nn.Linear(3, 1) self.automatic_optimization = False self.fake_losses = [ torch.tensor(1.0), torch.tensor(1.0), torch.tensor(0.0), torch.tensor(1.0), ] self.saved_models = {} def training_step(self, batch, batch_idx): out = self.layer(batch[0]) loss = torch.nn.functional.binary_cross_entropy_with_logits(out, batch[1].float()) self.log("loss", self.fake_losses[batch_idx], on_step=True, on_epoch=True, logger=True) # Save model before optimization save_model(self.layer, batch_idx, self.saved_models) optimizer = self.optimizers() optimizer.zero_grad() self.manual_backward(loss) optimizer.step() return loss def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=0.01) @contextmanager def cleanup_after_test(): """Context manager to ensure all test artifacts are cleaned up.""" log_dir = Path("tests_pytorch/lightning_logs") try: yield finally: # Clean up any remaining log files if log_dir.exists(): shutil.rmtree(log_dir, ignore_errors=True) def test_model_checkpoint_manual_opt(): with cleanup_after_test(), tempfile.TemporaryDirectory() as tmpdir: dataset = FakeDataset() train_dataloader = DataLoader(dataset, batch_size=1) model = SimpleModule() trainer = Trainer( max_epochs=1, callbacks=[ ModelCheckpoint( save_top_k=1, monitor="loss", dirpath=tmpdir, mode="min", save_last=False, every_n_train_steps=1, train_time_interval=None, every_n_epochs=0, save_on_train_epoch_end=True, save_weights_only=True, ) ], log_every_n_steps=1, num_sanity_val_steps=0, logger=False, # Disable logging to prevent creating lightning_logs ) try: trainer.fit(model, train_dataloader) finally: trainer._teardown() # Ensure trainer is properly closed # The best loss is at batch_idx=2 (loss=0.0) best_step = 2 model_before_opt = load_model(best_step, model.saved_models) # Load the best checkpoint best_ckpt_path = trainer.checkpoint_callback.best_model_path best_ckpt = torch.load(best_ckpt_path, weights_only=True)["state_dict"] # The checkpoint should match the model before opt.step(), not after for layer_name, layer_value in best_ckpt.items(): assert torch.equal(model_before_opt[layer_name.removeprefix("layer.")], layer_value.cpu()), ( f"Mismatch in {layer_name}: checkpoint saved after optimization instead of before" ) def test_model_checkpoint_manual_opt_warning(): """Test that a warning is raised when using manual optimization without saving the state.""" class SimpleModuleNoSave(SimpleModule): def training_step(self, batch, batch_idx): out = self.layer(batch[0]) loss = torch.nn.functional.binary_cross_entropy_with_logits(out, batch[1].float()) self.log("loss", self.fake_losses[batch_idx], on_step=True, on_epoch=True, logger=True) # Don't save the model state before optimization optimizer = self.optimizers() optimizer.zero_grad() self.manual_backward(loss) optimizer.step() return loss with cleanup_after_test(), tempfile.TemporaryDirectory() as tmpdir: dataset = FakeDataset() train_dataloader = DataLoader(dataset, batch_size=1, num_workers=0) # Avoid num_workers warning model = SimpleModuleNoSave() # Clear any existing warnings warnings.filterwarnings("ignore", message=".*num_workers.*") with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") # Always trigger warnings trainer = Trainer( max_epochs=1, callbacks=[ ModelCheckpoint( save_top_k=1, monitor="loss", dirpath=tmpdir, mode="min", save_last=False, every_n_train_steps=1, train_time_interval=None, every_n_epochs=0, save_on_train_epoch_end=True, save_weights_only=True, ) ], log_every_n_steps=1, num_sanity_val_steps=0, logger=False, # Disable logging to prevent creating lightning_logs ) try: trainer.fit(model, train_dataloader) finally: trainer._teardown() # Find our warning in the list of warnings manual_opt_warnings = [ str(warning.message) for warning in w if "Using ModelCheckpoint with manual optimization and every_n_train_steps" in str(warning.message) ] # Verify our warning was raised assert len(manual_opt_warnings) > 0, "Expected warning about manual optimization not found" assert "The checkpoint will contain the model state AFTER optimization" in manual_opt_warnings[0]
{ "repo_id": "Lightning-AI/pytorch-lightning", "file_path": "tests/tests_pytorch/callbacks/test_model_checkpoint_manual_opt.py", "license": "Apache License 2.0", "lines": 153, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Lightning-AI/pytorch-lightning:tests/tests_pytorch/callbacks/test_model_checkpoint_additional_cases.py
import math import os from datetime import timedelta import pytest import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, Dataset from lightning.pytorch import LightningModule, Trainer, seed_everything from lightning.pytorch.callbacks import ModelCheckpoint from lightning.pytorch.demos.boring_classes import BoringModel class TinyDataset(Dataset): def __init__(self, n: int = 4): self.x = torch.arange(n, dtype=torch.float32).view(-1, 1) self.y = self.x.clone() def __len__(self): return len(self.x) def __getitem__(self, idx): return self.x[idx], self.y[idx] class TrainMetricModule(LightningModule): def __init__(self): super().__init__() self.layer = nn.Linear(1, 1) self._counter = 0.0 def training_step(self, batch, batch_idx): x, y = batch y_hat = self.layer(x) loss = F.mse_loss(y_hat, y) # strictly increasing train metric per step self._counter += 1.0 self.log("train_score", torch.tensor(self._counter), on_step=True, on_epoch=False, prog_bar=False, logger=True) return {"loss": loss} def validation_step(self, batch, batch_idx): pass def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=0.01) def _make_loaders(n=4): ds = TinyDataset(n=n) train_loader = DataLoader(ds, batch_size=2, shuffle=False) val_loader = DataLoader(ds, batch_size=2, shuffle=False) return train_loader, val_loader def test_model_checkpoint_every_n_train_steps_with_train_metric_saves_at_step(tmp_path): """When monitoring a train-step metric, step-interval checkpointing should save at the step boundary (no deferral) and best_model_score should match the last train metric value.""" seed_everything(123) train_loader, val_loader = _make_loaders(n=4) model = TrainMetricModule() ckpt = ModelCheckpoint( dirpath=tmp_path, monitor="train_score", mode="max", save_top_k=1, every_n_train_steps=1, train_time_interval=None, every_n_epochs=0, save_on_train_epoch_end=False, save_weights_only=True, ) # 2 batches/epoch, run 2 epochs to have multiple step saves trainer = Trainer( max_epochs=2, accelerator="cpu", devices=1, callbacks=[ckpt], num_sanity_val_steps=0, log_every_n_steps=1, limit_train_batches=2, limit_val_batches=0, # no validation needed for this test enable_checkpointing=True, enable_model_summary=False, logger=False, ) trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=val_loader) assert ckpt.best_model_score is not None # 2 epochs * 2 steps/epoch = 4 steps total; metric increments by 1 each step expected = 4.0 actual = float(ckpt.best_model_score) assert math.isclose(actual, expected, rel_tol=0, abs_tol=1e-6) @pytest.mark.parametrize("val_scores", [[0.2, 0.4, 0.9]]) def test_model_checkpoint_time_interval_with_val_metric_defers_until_validation(tmp_path, val_scores): """With time-interval-based checkpointing, and a validation-only metric, ensure we don't save using stale metrics at step boundaries; saving should occur at validation end.""" seed_everything(123) train_loader, val_loader = _make_loaders(n=4) model = ValMetricModule(val_scores=val_scores) ckpt = ModelCheckpoint( dirpath=tmp_path, monitor="auroc", mode="max", save_top_k=1, every_n_train_steps=0, # disable step-based train_time_interval=timedelta(seconds=0), # trigger as often as possible every_n_epochs=0, save_on_train_epoch_end=False, save_weights_only=True, ) trainer = Trainer( max_epochs=len(val_scores), accelerator="cpu", devices=1, callbacks=[ckpt], num_sanity_val_steps=0, log_every_n_steps=1, limit_train_batches=2, limit_val_batches=1, enable_checkpointing=True, enable_model_summary=False, logger=False, ) trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=val_loader) assert ckpt.best_model_score is not None expected = max(val_scores) actual = float(ckpt.best_model_score) assert math.isclose(actual, expected, rel_tol=0, abs_tol=1e-6) class ValMetricModule(LightningModule): def __init__(self, val_scores: list[float]): super().__init__() self.layer = nn.Linear(1, 1) self._val_scores = [float(s) for s in val_scores] def training_step(self, batch, batch_idx): x, y = batch y_hat = self.layer(x) loss = F.mse_loss(y_hat, y) return {"loss": loss} def validation_step(self, batch, batch_idx): pass def on_validation_epoch_end(self): score = self._val_scores[self.current_epoch] self.log("auroc", torch.tensor(score, dtype=torch.float32), prog_bar=False, logger=True) def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=0.01) @pytest.mark.parametrize("val_scores", [[0.1, 0.5, 1.0, 3.0]]) def test_model_checkpoint_defer_until_next_validation_when_val_every_2_epochs(tmp_path, val_scores): """With validation running every 2 epochs, step-triggered saves at the end of non-validation epochs should be deferred and then performed at the next validation end when the metric is available.""" seed_everything(123) train_loader, val_loader = _make_loaders(n=4) model = ValMetricModule(val_scores=val_scores) ckpt = ModelCheckpoint( dirpath=tmp_path, monitor="auroc", mode="max", save_top_k=1, every_n_train_steps=2, # end of each epoch train_time_interval=None, every_n_epochs=0, save_on_train_epoch_end=False, save_weights_only=True, ) trainer = Trainer( max_epochs=len(val_scores), accelerator="cpu", devices=1, callbacks=[ckpt], num_sanity_val_steps=0, log_every_n_steps=1, limit_train_batches=2, limit_val_batches=1, enable_checkpointing=True, enable_model_summary=False, logger=False, check_val_every_n_epoch=2, # only validate every 2 epochs ) trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=val_loader) assert ckpt.best_model_score is not None expected = max(val_scores) # last/maximum value occurs at final validation epoch actual = float(ckpt.best_model_score) assert math.isclose(actual, expected, rel_tol=0, abs_tol=1e-6) def test_model_checkpoint_save_last_link_symlink_bug(tmp_path): """Reproduce the bug where save_last='link' and save_top_k=-1 creates a recursive symlink.""" trainer = Trainer( default_root_dir=tmp_path, max_epochs=2, callbacks=[ModelCheckpoint(dirpath=tmp_path, every_n_epochs=10, save_last="link", save_top_k=-1)], enable_checkpointing=True, enable_model_summary=False, logger=False, ) model = BoringModel() trainer.fit(model) last_ckpt = tmp_path / "last.ckpt" assert last_ckpt.exists() # With the fix, if a symlink exists, it should not point to itself (preventing recursion) if os.path.islink(str(last_ckpt)): assert os.readlink(str(last_ckpt)) != str(last_ckpt)
{ "repo_id": "Lightning-AI/pytorch-lightning", "file_path": "tests/tests_pytorch/callbacks/test_model_checkpoint_additional_cases.py", "license": "Apache License 2.0", "lines": 185, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Lightning-AI/pytorch-lightning:tests/tests_pytorch/callbacks/test_model_checkpoint_edge_cases.py
import math from datetime import timedelta import pytest import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, Dataset from lightning.pytorch import LightningModule, Trainer, seed_everything from lightning.pytorch.callbacks import ModelCheckpoint class TinyDataset(Dataset): def __init__(self, n: int = 8): self.x = torch.arange(n, dtype=torch.float32).view(-1, 1) self.y = self.x.clone() def __len__(self): return len(self.x) def __getitem__(self, idx): return self.x[idx], self.y[idx] def _make_loaders(n=8, batch_size=2): ds = TinyDataset(n=n) train_loader = DataLoader(ds, batch_size=batch_size, shuffle=False) val_loader = DataLoader(ds, batch_size=batch_size, shuffle=False) return train_loader, val_loader class MultiValPerEpochModule(LightningModule): """Logs a validation metric on every validation run, even if validation is run multiple times per epoch.""" def __init__(self, val_scores: list[float]): super().__init__() self.layer = nn.Linear(1, 1) self._val_scores = [float(s) for s in val_scores] self._val_call_idx = 0 def training_step(self, batch, batch_idx): x, y = batch y_hat = self.layer(x) loss = F.mse_loss(y_hat, y) return {"loss": loss} def validation_step(self, batch, batch_idx): pass def on_validation_epoch_end(self): score = self._val_scores[self._val_call_idx] self._val_call_idx += 1 self.log("auroc", torch.tensor(score, dtype=torch.float32), prog_bar=False, logger=True) def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=0.01) class ValOnceEveryTwoEpochsModule(LightningModule): """Logs a validation metric only when validation runs (e.g., every 2 epochs), indexed by current_epoch.""" def __init__(self, val_scores: list[float]): super().__init__() self.layer = nn.Linear(1, 1) self._val_scores = [float(s) for s in val_scores] def training_step(self, batch, batch_idx): x, y = batch y_hat = self.layer(x) loss = F.mse_loss(y_hat, y) return {"loss": loss} def validation_step(self, batch, batch_idx): pass def on_validation_epoch_end(self): # current_epoch indexes into provided scores; only called when validation runs score = self._val_scores[self.current_epoch] self.log("auroc", torch.tensor(score, dtype=torch.float32), prog_bar=False, logger=True) def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=0.01) @pytest.mark.parametrize("val_scores", [[0.1, 0.9]]) def test_checkpoint_defers_with_mid_epoch_validation(tmp_path, val_scores): """With val_check_interval=0.5 (validation mid-epoch and at epoch end), and step-based checkpointing, saves must be deferred until each validation end so monitored validation metrics are fresh.""" seed_everything(123) # 4 train batches per epoch (batch_size=2 over n=8), so two validations: after 2 batches and after 4 batches train_loader, val_loader = _make_loaders(n=8, batch_size=2) model = MultiValPerEpochModule(val_scores=val_scores) ckpt = ModelCheckpoint( dirpath=tmp_path, monitor="auroc", mode="max", save_top_k=1, every_n_train_steps=1, # would trigger every step, but must defer to validation train_time_interval=None, every_n_epochs=0, save_on_train_epoch_end=False, save_weights_only=True, ) trainer = Trainer( max_epochs=1, accelerator="cpu", devices=1, callbacks=[ckpt], num_sanity_val_steps=0, log_every_n_steps=1, limit_train_batches=4, # ensure exactly 4 steps => two validations at 0.5 and 1.0 limit_val_batches=1, enable_checkpointing=True, enable_model_summary=False, logger=False, val_check_interval=0.5, ) trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=val_loader) assert ckpt.best_model_score is not None expected = max(val_scores) actual = float(ckpt.best_model_score) assert math.isclose(actual, expected, rel_tol=0, abs_tol=1e-6) @pytest.mark.parametrize("val_scores", [[0.2, 0.6]]) def test_time_interval_defers_across_epoch_until_first_validation(tmp_path, val_scores): """With time-interval saving and validation only every 2 epochs, ensure no save uses stale/missing validation metrics; the first save should happen at the first validation end (epoch 2).""" seed_everything(123) train_loader, val_loader = _make_loaders(n=4, batch_size=2) model = ValOnceEveryTwoEpochsModule(val_scores=val_scores) ckpt = ModelCheckpoint( dirpath=tmp_path, monitor="auroc", mode="max", save_top_k=1, every_n_train_steps=0, # disable step-based train_time_interval=timedelta(seconds=0), # trigger frequently every_n_epochs=0, save_on_train_epoch_end=False, save_weights_only=True, ) trainer = Trainer( max_epochs=2, accelerator="cpu", devices=1, callbacks=[ckpt], num_sanity_val_steps=0, log_every_n_steps=1, limit_train_batches=2, limit_val_batches=1, enable_checkpointing=True, enable_model_summary=False, logger=False, check_val_every_n_epoch=2, # first validation only after 2nd epoch ) trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=val_loader) assert ckpt.best_model_score is not None expected = val_scores[1] # validation runs only once at epoch 2, logging index 1 actual = float(ckpt.best_model_score) assert math.isclose(actual, expected, rel_tol=0, abs_tol=1e-6)
{ "repo_id": "Lightning-AI/pytorch-lightning", "file_path": "tests/tests_pytorch/callbacks/test_model_checkpoint_edge_cases.py", "license": "Apache License 2.0", "lines": 136, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Lightning-AI/pytorch-lightning:tests/tests_pytorch/callbacks/test_model_checkpoint_step_interval_val_metric.py
import math import pytest import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, Dataset from lightning.pytorch import LightningModule, Trainer, seed_everything from lightning.pytorch.callbacks import ModelCheckpoint class TinyDataset(Dataset): def __init__(self, n: int = 4): self.x = torch.arange(n, dtype=torch.float32).view(-1, 1) self.y = self.x.clone() def __len__(self): return len(self.x) def __getitem__(self, idx): return self.x[idx], self.y[idx] class ValMetricModule(LightningModule): def __init__(self, val_scores: list[float]): super().__init__() self.layer = nn.Linear(1, 1) self._val_scores = [float(s) for s in val_scores] # LightningModule API (minimal) def training_step(self, batch, batch_idx): x, y = batch y_hat = self.layer(x) loss = F.mse_loss(y_hat, y) return {"loss": loss} def validation_step(self, batch, batch_idx): # do nothing per-step; we log at epoch end pass def on_validation_epoch_end(self): # Log a validation metric only at validation epoch end # Values increase across epochs; best should be the last epoch score = self._val_scores[self.current_epoch] # use logger=True so it lands in trainer.callback_metrics self.log("auroc", torch.tensor(score, dtype=torch.float32), prog_bar=False, logger=True) def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=0.01) @pytest.mark.parametrize("val_scores", [[0.1, 0.5, 1.0]]) def test_model_checkpoint_every_n_train_steps_with_val_metric_saves_after_val(tmp_path, val_scores): """Reproduces #20919: Using every_n_train_steps with a validation-only metric should save the best checkpoint only after the metric is computed at validation, not earlier at the train-step boundary. Expectation: best_model_score equals the last (max) val score. """ seed_everything(123) # 2 train batches per epoch (so checkpoint triggers at the epoch boundary) ds = TinyDataset(n=4) train_loader = DataLoader(ds, batch_size=2, shuffle=False) val_loader = DataLoader(ds, batch_size=2, shuffle=False) model = ValMetricModule(val_scores=val_scores) ckpt = ModelCheckpoint( dirpath=tmp_path, monitor="auroc", mode="max", save_top_k=1, # critical: trigger on train steps, not on epoch end every_n_train_steps=2, # equal to number of train batches per epoch train_time_interval=None, every_n_epochs=0, save_on_train_epoch_end=False, save_weights_only=True, ) trainer = Trainer( max_epochs=len(val_scores), accelerator="cpu", devices=1, callbacks=[ckpt], num_sanity_val_steps=0, log_every_n_steps=1, limit_train_batches=2, limit_val_batches=1, enable_checkpointing=True, enable_model_summary=False, logger=False, ) trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=val_loader) assert ckpt.best_model_score is not None # Should equal the last (max) validation score expected = max(val_scores) actual = float(ckpt.best_model_score) assert math.isclose(actual, expected, rel_tol=0, abs_tol=1e-6), ( f"best_model_score should be {expected} (last/maximum val score), got {actual}.\n" f"This indicates the checkpoint was saved before the validation metric was computed." )
{ "repo_id": "Lightning-AI/pytorch-lightning", "file_path": "tests/tests_pytorch/callbacks/test_model_checkpoint_step_interval_val_metric.py", "license": "Apache License 2.0", "lines": 84, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Lightning-AI/pytorch-lightning:tests/tests_pytorch/plugins/test_async_checkpoint.py
import time from typing import Any, Optional import pytest import torch from lightning.fabric.plugins.io.checkpoint_io import CheckpointIO from lightning.pytorch.plugins.io.async_plugin import AsyncCheckpointIO class _CaptureCheckpointIO(CheckpointIO): def __init__(self) -> None: self.saved: Optional[dict[str, Any]] = None def save_checkpoint(self, checkpoint: dict[str, Any], path: str, storage_options: Optional[Any] = None) -> None: # Simulate some delay to increase race window time.sleep(0.05) # Store the received checkpoint object (not a deep copy) to inspect tensor values self.saved = checkpoint def load_checkpoint(self, path: str, map_location: Optional[Any] = None) -> dict[str, Any]: raise NotImplementedError def remove_checkpoint(self, path: str) -> None: pass @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_async_checkpoint_should_snapshot_values_before_mutation(): base = _CaptureCheckpointIO() async_io = AsyncCheckpointIO(checkpoint_io=base) # a tensor that we will mutate after scheduling the save t = torch.tensor([0.0]) ckpt = {"w": t} # schedule async save async_io.save_checkpoint(ckpt, path="unused") # mutate immediately afterward to mimic training thread stepping params t.add_(1.0) # ensure background thread finished async_io.teardown() assert base.saved is not None, "Async save did not run" # EXPECTATION: AsyncCheckpointIO should have captured value 0.0 (pre-mutation) # CURRENT BEHAVIOR (bug): it captures 1.0 because the dict holds references assert torch.allclose(base.saved["w"], torch.tensor([0.0])), ( "AsyncCheckpointIO must snapshot the checkpoint (clone tensors) on the main thread " "to avoid races with parameter mutation; got mutated value instead" )
{ "repo_id": "Lightning-AI/pytorch-lightning", "file_path": "tests/tests_pytorch/plugins/test_async_checkpoint.py", "license": "Apache License 2.0", "lines": 38, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Lightning-AI/pytorch-lightning:src/lightning/pytorch/callbacks/weight_averaging.py
# Copyright The Lightning AI team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Weight Averaging Callback ^^^^^^^^^^^^^^^^^^^^^^^^^ """ import itertools from copy import deepcopy from typing import Any, Optional, Union import torch from torch.optim.swa_utils import AveragedModel, get_ema_avg_fn from typing_extensions import override import lightning.pytorch as pl from lightning.pytorch.callbacks.callback import Callback from lightning.pytorch.utilities.model_helpers import is_overridden from lightning.pytorch.utilities.rank_zero import rank_zero_info, rank_zero_warn from lightning.pytorch.utilities.types import STEP_OUTPUT class WeightAveraging(Callback): r"""A callback that updates an averaged model for Stochastic Weight Averaging (SWA) or Exponential Moving Average (EMA) after each training step. Arguments given to the constructor will be passed to the :class:`AveragedModel` constructor. If no ``device`` is specified, the device of the original model will be used. Contrary to :class:`AveragedModel`, ``use_buffers`` is set to ``True`` by default. That is, by default the callback will compute running averages for both the parameters and the buffers of the model. Setting ``use_buffers`` to ``False`` will cause only the model parameters to be averaged, leaving updating the batch normalization statistics to the user (using ``torch.optim.swa_utils.update_bn()``). You can provide a custom averaging function with the ``avg_fn`` or ``multi_avg_fn`` parameter. See the :class:`AveragedModel` class for details. If no averaging function is provided, the default is to compute the equally-weighted average of the weights (SWA). You can customize when the average model is updated by overriding the ``should_update()`` method. The callback calls it with either ``step_idx`` or ``epoch_idx`` and the method returns a boolean indicating whether to update after the given step or epoch. The default is to update after every step. During validation and after the training finishes, the current model parameters will be replaced with the averaged values. See also the documentation on the :ref:`weight averaging callbacks <advanced/training_tricks:Weight Averaging>` provided by Lightning. Note: To ensure that the :class:`AveragedModel` will contain all layers, ``setup()`` will call :meth:`~lightning.pytorch.core.hooks.ModelHooks.configure_model` before instantiating the :class:`AveragedModel`. However, that hook is not called in a strategy aware context, sharded models do not work with weight averaging, and a warning will be issued. Example:: from lightning.pytorch.callbacks import WeightAveraging from torch.optim.swa_utils import get_ema_avg_fn class EMAWeightAveraging(WeightAveraging): def __init__(self): super().__init__(avg_fn=get_ema_avg_fn()) def should_update(self, step_idx=None, epoch_idx=None): # Start after 100 steps. return (step_idx is not None) and (step_idx >= 100) trainer = Trainer(callbacks=EMAWeightAveraging(), max_epochs=10) trainer.fit(model, dataloader) Args: device: By default, the :class:`AveragedModel` will be stored on the same device as the original model. If the ``device`` argument is provided, the :class:`AveragedModel` will be stored on this device instead. If you run out of GPU memory, you might want to use ``"cpu"``. use_buffers: If ``False``, the buffers of the model will not be averaged. kwargs: Additional keyword arguments to be passed to the :class:`AveragedModel` constructor, such as ``avg_fn`` or ``multi_avg_fn``. """ def __init__( self, device: Optional[Union[torch.device, str, int]] = None, use_buffers: bool = True, **kwargs: Any, ) -> None: # The default value is a string so that jsonargparse knows how to serialize it. if isinstance(device, str): self._device: Optional[Union[torch.device, int]] = torch.device(device) else: self._device = device self._use_buffers = use_buffers self._kwargs = kwargs self._average_model: Optional[AveragedModel] = None # Number of optimizer steps taken, when the average model was last updated. Initializing this with zero ensures # that self.should_update() will be first called after the first optimizer step, which takes place after N # batches when using accumulate_grad_batches=N. self._latest_update_step = 0 # The epoch after which the average model was last updated. The first epoch is 0, so initializing this to a # negative value means that if self.should_update(epoch_idx=0) returns True, the first update is after the first # epoch. self._latest_update_epoch = -1 def should_update(self, step_idx: Optional[int] = None, epoch_idx: Optional[int] = None) -> bool: """Called after every optimizer step and after every training epoch to check whether the average model should be updated. One of the arguments is set to the zero-based index of the last training step or epoch. The default implementation returns ``True`` when any ``step_idx`` is provided. The user can customize when the average model gets updated by overriding this method. Args: step_idx: Index of the last optimizer step, or ``None`` when called at the epoch end. epoch_idx: Index of the last epoch, or ``None`` when called after an optimizer step. Returns: ``True`` if the average model should be updated and ``False`` if not. """ return step_idx is not None @override def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: str) -> None: """Called when fit, validate, test, predict, or tune begins. Creates an :class:`AveragedModel` when fit begins. Args: trainer: The current :class:`~lightning.pytorch.trainer.trainer.Trainer` instance. pl_module: The current :class:`~lightning.pytorch.core.LightningModule` instance. stage: The :class:`~lightning.pytorch.trainer.trainer.Trainer` state. """ if stage == "fit": device = self._device or pl_module.device # If the configure_model hook is overridden, call it to create the layers before constructing the # AveragedModel. However, sharding will not be done and a warning will be issued. if is_overridden("configure_model", pl_module): rank_zero_warn( "You're using the WeightAveraging callback with a model that overrides the configure_model " "callback. WeightAveraging doesn't support sharding model layers, so you may run out of memory." ) pl_module.configure_model() self._average_model = AveragedModel( model=pl_module, device=device, use_buffers=self._use_buffers, **self._kwargs ) @override def on_train_batch_end( self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs: STEP_OUTPUT, batch: Any, batch_idx: int ) -> None: """Called when a training batch ends. Updates the :class:`AveragedModel` parameters, if requested by ``self.should_update()``. Args: trainer: The current :class:`~lightning.pytorch.trainer.trainer.Trainer` instance. pl_module: The current :class:`~lightning.pytorch.core.LightningModule` instance. outputs: Outputs from the training batch. batch: The training batch. batch_idx: Index of the training batch. """ # trainer.global_step is the number of optimizer steps taken so far, i.e. 1 after the first optimizer step. To # make step_idx consistent with epoch_idx, we'll pass a zero-based index. step_idx = trainer.global_step - 1 if (trainer.global_step > self._latest_update_step) and self.should_update(step_idx=step_idx): assert self._average_model is not None self._average_model.update_parameters(pl_module) self._latest_update_step = trainer.global_step @override def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: """Called when a training epoch ends. Updates the :class:`AveragedModel` parameters, if requested by ``self.should_update()``. Args: trainer: The current :class:`~lightning.pytorch.trainer.trainer.Trainer` instance. pl_module: The current :class:`~lightning.pytorch.core.LightningModule` instance. """ if (trainer.current_epoch > self._latest_update_epoch) and self.should_update(epoch_idx=trainer.current_epoch): assert self._average_model is not None self._average_model.update_parameters(pl_module) self._latest_update_epoch = trainer.current_epoch @override def on_train_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: """Called when training ends. Transfers parameters from the :class:`AveragedModel` to the current model. Args: trainer: The current :class:`~lightning.pytorch.trainer.trainer.Trainer` instance. pl_module: The current :class:`~lightning.pytorch.core.LightningModule` instance. """ assert self._average_model is not None self._copy_average_to_current(pl_module) @override def on_validation_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: """Called when a validation epoch begins. Transfers parameter values from the :class:`AveragedModel` to the current model. Args: trainer: The current :class:`~lightning.pytorch.trainer.trainer.Trainer` instance. pl_module: The current :class:`~lightning.pytorch.core.LightningModule` instance. """ if self._average_model is not None: self._swap_models(pl_module) @override def on_validation_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: """Called when a validation epoch ends. Recovers the current model parameters from the :class:`AveragedModel`. Args: trainer: The current :class:`~lightning.pytorch.trainer.trainer.Trainer` instance. pl_module: The current :class:`~lightning.pytorch.core.LightningModule` instance. """ if self._average_model is not None: self._swap_models(pl_module) @override def state_dict(self) -> dict[str, Any]: """Called when saving a checkpoint. Creates a ``state_dict`` of the callback state. Returns: A dictionary containing the callback state. """ return {"latest_update_step": self._latest_update_step} @override def load_state_dict(self, state_dict: dict[str, Any]) -> None: """Called when loading a checkpoint. Reloads the callback state given a ``state_dict``. Args: state_dict: A dictionary containing the callback state. """ self._latest_update_step = state_dict["latest_update_step"] @override def on_save_checkpoint( self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: dict[str, Any] ) -> None: r"""Called when saving a checkpoint. Moves the current model state to the key ``current_model_state``, and places the average model state in ``state_dict`` instead. Any other state variables of the ``AveragedModel`` will be saved in ``averaging_state``. Args: trainer: The current :class:`~lightning.pytorch.trainer.trainer.Trainer` instance. pl_module: The current :class:`~lightning.pytorch.core.LightningModule` instance. checkpoint: The checkpoint dictionary that will be saved. """ if self._average_model is None: rank_zero_info( "You're using the WeightAveraging callback, but saving a checkpoint outside the 'fit' stage. The state " "of the WeightAveraging callback won't be saved in the checkpoint. If training has finished, the " "average model parameters will be saved to the state_dict in the checkpoint." ) else: average_model_state = self._average_model.state_dict() checkpoint["current_model_state"] = checkpoint["state_dict"] # Truncate the "module." prefix (the first 7 characters) from the names of the variables in the # AveragedModel state. checkpoint["state_dict"] = { name[7:]: value for name, value in average_model_state.items() if name.startswith("module.") } checkpoint["averaging_state"] = { name: value for name, value in average_model_state.items() if not name.startswith("module.") } @override def on_load_checkpoint( self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: dict[str, Any] ) -> None: r"""Called when loading a model checkpoint. Loads the current model and the :class:`AveragedModel` parameters from the checkpoint. Args: trainer: The current :class:`~lightning.pytorch.trainer.trainer.Trainer` instance. pl_module: The current :class:`~lightning.pytorch.core.LightningModule` instance. checkpoint: The full checkpoint dictionary that got loaded by the Trainer. """ if self._average_model is None: rank_zero_warn( "You're using the WeightAveraging callback, but loading a checkpoint outside the 'fit' stage. The " "WeightAveraging state cannot be restored. If you're using the checkpoint for prediction or testing, " "you can ignore this warning. To disable the warning, remove the WeightAveraging callback." ) elif ("current_model_state" in checkpoint) and ("averaging_state" in checkpoint): rank_zero_info("Found current_model_state in the checkpoint. This will be used to initialize the model.") average_model_state = {"module." + name: value for name, value in checkpoint["state_dict"].items()} average_model_state |= checkpoint["averaging_state"] self._average_model.load_state_dict(average_model_state) # The current model state has already been loaded from "state_dict" (which contains the average model # weights) at this point, so overwriting "state_dict" in the checkpoint dictionary makes no difference. We # have to reload the model state from "current_model_state". pl_module.load_state_dict(checkpoint["current_model_state"]) else: rank_zero_warn( "The checkpoint was not created with WeightAveraging. Both the current and the average model will be " "initialized with state_dict." ) self._average_model.module.load_state_dict(deepcopy(checkpoint["state_dict"]), strict=False) def _swap_models(self, pl_module: "pl.LightningModule") -> None: """Swaps the parameter values of the current model and the :class:`AveragedModel`. Args: pl_module: The current :class:`~lightning.pytorch.core.LightningModule` instance. """ assert self._average_model is not None average_params = itertools.chain(self._average_model.module.parameters(), self._average_model.module.buffers()) current_params = itertools.chain(pl_module.parameters(), pl_module.buffers()) for average_param, current_param in zip(average_params, current_params): tmp = average_param.data.clone() average_param.data.copy_(current_param.data) current_param.data.copy_(tmp) def _copy_average_to_current(self, pl_module: "pl.LightningModule") -> None: """Copies the parameter values from the :class:`AveragedModel` to the current model. Args: pl_module: The current :class:`~lightning.pytorch.core.LightningModule` instance. """ assert self._average_model is not None average_params = itertools.chain(self._average_model.module.parameters(), self._average_model.module.buffers()) current_params = itertools.chain(pl_module.parameters(), pl_module.buffers()) for average_param, current_param in zip(average_params, current_params): current_param.data.copy_(average_param.data) class EMAWeightAveraging(WeightAveraging): """Exponential Moving Average (EMA) Weight Averaging callback.""" def __init__( self, device: Optional[Union[torch.device, str, int]] = None, use_buffers: bool = True, decay: float = 0.999, update_every_n_steps: int = 1, update_starting_at_step: Optional[int] = None, update_starting_at_epoch: Optional[int] = None, **kwargs: Any, ): super().__init__( device=device, use_buffers=use_buffers, **kwargs, avg_fn=get_ema_avg_fn(decay=decay), ) self.update_every_n_steps = update_every_n_steps self.update_starting_at_step = update_starting_at_step self.update_starting_at_epoch = update_starting_at_epoch def should_update(self, step_idx: Optional[int] = None, epoch_idx: Optional[int] = None) -> bool: """Decide when to update the model weights. Args: step_idx: The current step index. epoch_idx: The current epoch index. Returns: bool: True if the model weights should be updated, False otherwise. """ if step_idx is not None: # Check step-based conditions only if we have a valid step_idx meets_step_requirement = self.update_starting_at_step is None or step_idx >= self.update_starting_at_step meets_step_frequency = self.update_every_n_steps > 0 and step_idx % self.update_every_n_steps == 0 if meets_step_requirement and meets_step_frequency: return True if epoch_idx is not None: # Check epoch-based condition only if we specify one meets_epoch_requirement = ( self.update_starting_at_epoch is not None and epoch_idx >= self.update_starting_at_epoch ) if meets_epoch_requirement: return True return False
{ "repo_id": "Lightning-AI/pytorch-lightning", "file_path": "src/lightning/pytorch/callbacks/weight_averaging.py", "license": "Apache License 2.0", "lines": 332, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
Lightning-AI/pytorch-lightning:tests/tests_pytorch/callbacks/test_weight_averaging.py
# Copyright The Lightning AI team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from copy import deepcopy from pathlib import Path from typing import Any, Optional import pytest import torch from torch import Tensor, nn from torch.optim.swa_utils import get_swa_avg_fn from torch.utils.data import DataLoader, Dataset from lightning.pytorch import LightningModule, Trainer from lightning.pytorch.callbacks import EMAWeightAveraging, WeightAveraging from lightning.pytorch.demos.boring_classes import BoringModel, RandomDataset, RandomIterableDataset from tests_pytorch.helpers.runif import RunIf class TestModel(BoringModel): def __init__(self, batch_norm: bool = True) -> None: super().__init__() layers = [nn.Linear(32, 32)] if batch_norm: layers.append(nn.BatchNorm1d(32)) layers += [nn.ReLU(), nn.Linear(32, 2)] self.layer = nn.Sequential(*layers) self.crash_on_epoch = None def training_step(self, batch: Tensor, batch_idx: int) -> None: if self.crash_on_epoch and self.trainer.current_epoch >= self.crash_on_epoch: raise Exception("CRASH") return super().training_step(batch, batch_idx) def configure_optimizers(self) -> None: return torch.optim.SGD(self.layer.parameters(), lr=0.1) class LargeTestModel(BoringModel): def __init__(self): super().__init__() self.layer = None def configure_model(self): print("XXX configure_model") self.layer = nn.Sequential(nn.Linear(32, 32), nn.ReLU(), nn.Linear(32, 2)) def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=0.01) class EMAAveragingFunction: """EMA averaging function. Functionally equivalent to the closure that ``get_ema_avg_fn()`` would return. This class is needed because we cannot use a closure with ddp_spawn. (``Popen(process_obj)`` would fail with ``Can't get local object 'get_ema_avg_fn.<locals>.ema_update'``). """ def __init__(self, decay: float = 0.999) -> None: self.decay = decay @torch.no_grad() def __call__(self, ema_param: Tensor, current_param: Tensor, num_averaged: Tensor) -> Tensor: return self.decay * ema_param + (1 - self.decay) * current_param class EMATestCallback(WeightAveraging): def __init__(self, devices: int = 1, **kwargs: Any) -> None: super().__init__(avg_fn=EMAAveragingFunction(), **kwargs) self.devices = devices self.swap_calls = 0 self.copy_calls = 0 # Record the first epoch, as if we are resuming from a checkpoint this may not be equal to 0. self.first_epoch: Optional[int] = None def _swap_models(self, *args: Any, **kwargs: Any): self.swap_calls += 1 return super()._swap_models(*args, **kwargs) def _copy_average_to_current(self, *args: Any, **kwargs: Any): self.copy_calls += 1 return super()._copy_average_to_current(*args, **kwargs) def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None: super().on_train_start(trainer, pl_module) assert self.swap_calls == 0 assert self.copy_calls == 0 def on_train_epoch_start(self, trainer: Trainer, *args: Any) -> None: super().on_train_epoch_start(trainer, *args) # Since the checkpoint loaded was saved `on_train_epoch_end`, the first `FitLoop` iteration will not update the # model and will just call the epoch-level hooks. For that reason, we check that we are not restarting before # choosing the first epoch. if self.first_epoch is None and not trainer.fit_loop.restarting: self.first_epoch = trainer.current_epoch def on_train_epoch_end(self, trainer: Trainer, *args: Any) -> None: super().on_train_epoch_end(trainer, *args) assert self._average_model.n_averaged == trainer.global_step assert self.swap_calls == (trainer.current_epoch + 1 - self.first_epoch) * 2 assert self.copy_calls == 0 def on_train_end(self, trainer: Trainer, pl_module: LightningModule) -> None: super().on_train_end(trainer, pl_module) # length=32, batch_size=4, accumulate_grad_batches=2 # => Using one process we have 4 optimizer steps per epoch. # => Using two processes we have 2 optimizer steps per epoch. steps_per_epoch = 4 // self.devices assert self._average_model.n_averaged == trainer.max_epochs * steps_per_epoch assert self.swap_calls == (trainer.max_epochs - self.first_epoch) * 2 assert self.copy_calls == 1 class SWATestCallback(WeightAveraging): def __init__(self, **kwargs: Any) -> None: super().__init__(avg_fn=get_swa_avg_fn(), **kwargs) self.swap_calls = 0 self.copy_calls = 0 # Record the first epoch, as if we are resuming from a checkpoint this may not be equal to 0. self.first_epoch: Optional[int] = None def should_update(self, step_idx: Optional[int] = None, epoch_idx: Optional[int] = None) -> bool: return epoch_idx in (3, 5, 7) def _swap_models(self, *args: Any, **kwargs: Any): self.swap_calls += 1 return super()._swap_models(*args, **kwargs) def _copy_average_to_current(self, *args: Any, **kwargs: Any): self.copy_calls += 1 return super()._copy_average_to_current(*args, **kwargs) def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None: super().on_train_start(trainer, pl_module) assert self.swap_calls == 0 assert self.copy_calls == 0 def on_train_epoch_start(self, trainer: Trainer, *args: Any) -> None: super().on_train_epoch_start(trainer, *args) # Since the checkpoint loaded was saved `on_train_epoch_end`, the first `FitLoop` iteration will not update the # model and will just call the epoch-level hooks. For that reason, we check that we are not restarting before # choosing the first epoch. if self.first_epoch is None and not trainer.fit_loop.restarting: self.first_epoch = trainer.current_epoch def on_train_epoch_end(self, trainer: Trainer, *args: Any) -> None: super().on_train_epoch_end(trainer, *args) if trainer.current_epoch < 3: assert self._average_model.n_averaged == 0 elif trainer.current_epoch < 5: assert self._average_model.n_averaged == 1 elif trainer.current_epoch < 7: assert self._average_model.n_averaged == 2 else: assert self._average_model.n_averaged == 3 assert self.swap_calls == (trainer.current_epoch + 1 - self.first_epoch) * 2 assert self.copy_calls == 0 def on_train_end(self, trainer: Trainer, pl_module: LightningModule) -> None: super().on_train_end(trainer, pl_module) assert self._average_model.n_averaged == 3 assert self.swap_calls == (trainer.max_epochs - self.first_epoch) * 2 assert self.copy_calls == 1 def test_weight_averaging_deepcopy(tmp_path): """Ensure that WeightAveraging callback doesn't deepcopy the data loaders or the data module and consume memory more than necessary.""" class TestCallback(WeightAveraging): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.setup_called = False def setup(self, trainer, pl_module, stage) -> None: super().setup(trainer, pl_module, stage) assert self._average_model.module.train_dataloader is not pl_module.train_dataloader assert self._average_model.module.train_dataloader.__self__ == self._average_model.module assert self._average_model.module._trainer is None self.setup_called = True callback = TestCallback() trainer = Trainer(default_root_dir=tmp_path, callbacks=callback, fast_dev_run=True) trainer.fit(BoringModel(), train_dataloaders=DataLoader(RandomDataset(32, 2))) assert callback.setup_called @pytest.mark.parametrize("batch_norm", [True, False]) @pytest.mark.parametrize("iterable_dataset", [True, False]) def test_ema(tmp_path, batch_norm: bool, iterable_dataset: bool): model = TestModel(batch_norm=batch_norm) dataset = RandomIterableDataset(32, 32) if iterable_dataset else RandomDataset(32, 32) _train(model, dataset, tmp_path, EMATestCallback()) @pytest.mark.parametrize( "accelerator", [pytest.param("gpu", marks=RunIf(min_cuda_gpus=1)), pytest.param("mps", marks=RunIf(mps=True))] ) def test_ema_accelerator(tmp_path, accelerator): model = TestModel() dataset = RandomDataset(32, 32) _train(model, dataset, tmp_path, EMATestCallback(), accelerator=accelerator, devices=1) @RunIf(min_cuda_gpus=2, standalone=True) def test_ema_ddp(tmp_path): model = TestModel() dataset = RandomDataset(32, 32) _train(model, dataset, tmp_path, EMATestCallback(devices=2), strategy="ddp", accelerator="gpu", devices=2) @RunIf(min_cuda_gpus=2) def test_ema_ddp_spawn(tmp_path): model = TestModel() dataset = RandomDataset(32, 32) _train(model, dataset, tmp_path, EMATestCallback(devices=2), strategy="ddp_spawn", accelerator="gpu", devices=2) @RunIf(skip_windows=True) def test_ema_ddp_spawn_cpu(tmp_path): model = TestModel() dataset = RandomDataset(32, 32) _train(model, dataset, tmp_path, EMATestCallback(devices=2), strategy="ddp_spawn", accelerator="cpu", devices=2) @pytest.mark.parametrize("crash_on_epoch", [1, 3, 5]) def test_ema_resume(tmp_path, crash_on_epoch): dataset = RandomDataset(32, 32) model1 = TestModel() model2 = deepcopy(model1) _train(model1, dataset, tmp_path, EMATestCallback()) model2.crash_on_epoch = crash_on_epoch model2 = _train_and_resume(model2, dataset, tmp_path) for param1, param2 in zip(model1.parameters(), model2.parameters()): assert torch.allclose(param1, param2) @RunIf(skip_windows=True) def test_ema_resume_ddp(tmp_path): model = TestModel() model.crash_on_epoch = 3 dataset = RandomDataset(32, 32) _train_and_resume(model, dataset, tmp_path, strategy="ddp_spawn", devices=2) def test_swa(tmp_path): model = TestModel() dataset = RandomDataset(32, 32) _train(model, dataset, tmp_path, SWATestCallback()) @pytest.mark.parametrize( ("strategy", "accelerator", "devices"), [ ("auto", "cpu", 1), pytest.param("auto", "gpu", 1, marks=RunIf(min_cuda_gpus=1)), pytest.param("fsdp", "gpu", 1, marks=RunIf(min_cuda_gpus=1)), ], ) def test_ema_configure_model(tmp_path, strategy, accelerator, devices): model = LargeTestModel() dataset = RandomDataset(32, 32) callback = EMATestCallback() _train(model, dataset, tmp_path, callback, strategy=strategy, accelerator=accelerator, devices=devices) assert isinstance(callback._average_model.module.layer, nn.Sequential) def _train( model: BoringModel, dataset: Dataset, tmp_path: str, callback: WeightAveraging, strategy: str = "auto", accelerator: str = "cpu", devices: int = 1, checkpoint_path: Optional[str] = None, will_crash: bool = False, ) -> None: deterministic = accelerator == "cpu" trainer = Trainer( accelerator=accelerator, strategy=strategy, devices=devices, logger=False, callbacks=callback, max_epochs=8, num_sanity_val_steps=0, enable_checkpointing=will_crash, enable_progress_bar=False, enable_model_summary=False, accumulate_grad_batches=2, deterministic=deterministic, default_root_dir=tmp_path, ) dataloader = DataLoader(dataset, batch_size=4, shuffle=False) if will_crash: with pytest.raises(Exception, match="CRASH"): trainer.fit(model, dataloader, ckpt_path=checkpoint_path) else: trainer.fit(model, dataloader, ckpt_path=checkpoint_path) assert trainer.lightning_module == model def _train_and_resume(model: TestModel, dataset: Dataset, tmp_path: str, devices: int = 1, **kwargs) -> TestModel: _train(model, dataset, tmp_path, EMATestCallback(devices=devices), devices=devices, will_crash=True, **kwargs) checkpoint_dir = Path(tmp_path) / "checkpoints" checkpoint_names = os.listdir(checkpoint_dir) assert len(checkpoint_names) == 1 checkpoint_path = str(checkpoint_dir / checkpoint_names[0]) model = TestModel.load_from_checkpoint(checkpoint_path) callback = EMATestCallback(devices=devices) _train(model, dataset, tmp_path, callback, devices=devices, checkpoint_path=checkpoint_path, **kwargs) return model @pytest.mark.parametrize( ("strategy", "accelerator", "devices"), [ ("auto", "cpu", 1), pytest.param("auto", "gpu", 1, marks=RunIf(min_cuda_gpus=1)), ], ) def test_ema_weight_averaging(tmp_path, strategy, accelerator, devices): """Test EMAWeightAveraging callback with various update configurations.""" model = TestModel() dataset = RandomDataset(32, 32) # Test with default settings (update every step) callback = EMAWeightAveraging(decay=0.999, update_every_n_steps=1) _train(model, dataset, tmp_path, callback, strategy=strategy, accelerator=accelerator, devices=devices) # Verify the average model was created and updated assert callback._average_model is not None assert callback._average_model.n_averaged > 0 def test_ema_weight_averaging_step_frequency(tmp_path): """Test EMAWeightAveraging with custom step update frequency.""" model = TestModel() dataset = RandomDataset(32, 32) # Update every 5 steps callback = EMAWeightAveraging(decay=0.95, update_every_n_steps=5) _train(model, dataset, tmp_path, callback) assert callback._average_model is not None def test_ema_weight_averaging_starting_step(tmp_path): """Test EMAWeightAveraging with delayed start based on steps.""" model = TestModel() dataset = RandomDataset(32, 32) # Start updating after step 10 callback = EMAWeightAveraging(decay=0.999, update_every_n_steps=1, update_starting_at_step=10) _train(model, dataset, tmp_path, callback) assert callback._average_model is not None def test_ema_weight_averaging_starting_epoch(tmp_path): """Test EMAWeightAveraging with delayed start based on epochs.""" model = TestModel() dataset = RandomDataset(32, 32) # Start updating after epoch 3 callback = EMAWeightAveraging(decay=0.999, update_every_n_steps=1, update_starting_at_epoch=3) _train(model, dataset, tmp_path, callback) assert callback._average_model is not None def test_ema_weight_averaging_should_update(tmp_path): """Test the should_update logic of EMAWeightAveraging.""" # Test with step-based updates callback = EMAWeightAveraging(update_every_n_steps=5, update_starting_at_step=10) # Before starting step assert not callback.should_update(step_idx=5) assert not callback.should_update(step_idx=9) # At and after starting step, but not on update frequency assert callback.should_update(step_idx=10) # First update assert not callback.should_update(step_idx=11) assert not callback.should_update(step_idx=14) assert callback.should_update(step_idx=15) # Second update # Test with epoch-based updates callback = EMAWeightAveraging(update_starting_at_epoch=2) assert not callback.should_update(epoch_idx=0) assert not callback.should_update(epoch_idx=1) assert callback.should_update(epoch_idx=2) assert callback.should_update(epoch_idx=3) def test_ema_weight_averaging_checkpoint_save_load(tmp_path): """Test that EMAWeightAveraging correctly saves and loads checkpoints.""" model = TestModel() model.crash_on_epoch = 2 dataset = RandomDataset(32, 32) callback = EMAWeightAveraging(decay=0.99, update_every_n_steps=2) # Train and create checkpoint _train(model, dataset, tmp_path, callback, will_crash=True) # Resume from checkpoint model2 = TestModel() callback2 = EMAWeightAveraging(decay=0.99, update_every_n_steps=2) import glob # should be at the top _train( model2, dataset, tmp_path, callback2, checkpoint_path=glob.glob((tmp_path / "checkpoints" / "*.ckpt").as_posix())[0], ) assert callback2._average_model is not None @pytest.mark.parametrize("decay", [0.9, 0.99, 0.999, 0.9999]) def test_ema_weight_averaging_decay_values(tmp_path, decay): """Test EMAWeightAveraging with different decay values.""" model = TestModel() dataset = RandomDataset(32, 32) callback = EMAWeightAveraging(decay=decay, update_every_n_steps=1) _train(model, dataset, tmp_path, callback) assert callback._average_model is not None
{ "repo_id": "Lightning-AI/pytorch-lightning", "file_path": "tests/tests_pytorch/callbacks/test_weight_averaging.py", "license": "Apache License 2.0", "lines": 353, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Lightning-AI/pytorch-lightning:tests/tests_pytorch/models/test_torch_tensorrt.py
import os import re from io import BytesIO from pathlib import Path import pytest import torch import tests_pytorch.helpers.pipelines as pipes from lightning.pytorch.core.module import _TORCH_TRT_AVAILABLE from lightning.pytorch.demos.boring_classes import BoringModel from lightning.pytorch.utilities.exceptions import MisconfigurationException from lightning.pytorch.utilities.imports import _TORCH_EQUAL_2_9 from tests_pytorch.helpers.runif import RunIf @RunIf(max_torch="2.2.0") def test_torch_minimum_version(): model = BoringModel() with pytest.raises( MisconfigurationException, match=re.escape(f"TensorRT export requires PyTorch 2.2 or higher. Current version is {torch.__version__}."), ): model.to_tensorrt("model.trt") @pytest.mark.skipif(_TORCH_TRT_AVAILABLE, reason="Run this test only if tensorrt is not available.") @RunIf(min_torch="2.2.0") def test_missing_tensorrt_package(): model = BoringModel() with pytest.raises( ModuleNotFoundError, match=re.escape(f"`{type(model).__name__}.to_tensorrt` requires `torch_tensorrt` to be installed. "), ): model.to_tensorrt("model.trt") @RunIf(tensorrt=True, min_torch="2.2.0") def test_tensorrt_with_wrong_default_device(tmp_path): model = BoringModel() input_sample = torch.randn((1, 32)) file_path = os.path.join(tmp_path, "model.trt") with pytest.raises(MisconfigurationException): model.to_tensorrt(file_path, input_sample, default_device="cpu") @RunIf(tensorrt=True, min_cuda_gpus=1, min_torch="2.2.0") def test_tensorrt_saves_with_input_sample(tmp_path): model = BoringModel() ori_device = model.device input_sample = torch.randn((1, 32)) file_path = os.path.join(tmp_path, "model.trt") model.to_tensorrt(file_path, input_sample) assert os.path.isfile(file_path) assert os.path.getsize(file_path) > 4e2 assert model.device == ori_device file_path = Path(tmp_path) / "model.trt" model.to_tensorrt(file_path, input_sample) assert os.path.isfile(file_path) assert os.path.getsize(file_path) > 4e2 assert model.device == ori_device file_path = BytesIO() model.to_tensorrt(file_path, input_sample) assert len(file_path.getvalue()) > 4e2 @RunIf(tensorrt=True, min_cuda_gpus=1, min_torch="2.2.0") def test_tensorrt_error_if_no_input(tmp_path): model = BoringModel() model.example_input_array = None file_path = os.path.join(tmp_path, "model.trt") with pytest.raises( ValueError, match=r"Could not export to TensorRT since neither `input_sample` nor " r"`model.example_input_array` attribute is set.", ): model.to_tensorrt(file_path) @RunIf(tensorrt=True, min_cuda_gpus=2, min_torch="2.2.0") def test_tensorrt_saves_on_multi_gpu(tmp_path): trainer_options = { "default_root_dir": tmp_path, "max_epochs": 1, "limit_train_batches": 10, "limit_val_batches": 10, "accelerator": "gpu", "devices": [0, 1], "strategy": "ddp_spawn", "enable_progress_bar": False, } model = BoringModel() model.example_input_array = torch.randn((4, 32)) pipes.run_model_test(trainer_options, model, min_acc=0.08) file_path = os.path.join(tmp_path, "model.trt") model.to_tensorrt(file_path) assert os.path.exists(file_path) @pytest.mark.parametrize( ("ir", "export_type"), [ ("default", torch.fx.GraphModule), ("dynamo", torch.fx.GraphModule), pytest.param( "ts", torch.jit.ScriptModule, marks=pytest.mark.skipif( _TORCH_EQUAL_2_9, reason="TorchScript IR crashes with torch_tensorrt on PyTorch 2.9", ), ), ], ) @RunIf(tensorrt=True, min_cuda_gpus=1, min_torch="2.2.0") def test_tensorrt_save_ir_type(ir, export_type): model = BoringModel() model.example_input_array = torch.randn((4, 32)) ret = model.to_tensorrt(ir=ir) assert isinstance(ret, export_type) @pytest.mark.parametrize( "output_format", ["exported_program", "torchscript"], ) @pytest.mark.parametrize( "ir", [ "default", "dynamo", pytest.param( "ts", marks=pytest.mark.skipif( _TORCH_EQUAL_2_9, reason="TorchScript IR crashes with torch_tensorrt on PyTorch 2.9", ), ), ], ) @RunIf(tensorrt=True, min_cuda_gpus=1, min_torch="2.2.0") def test_tensorrt_export_reload(output_format, ir, tmp_path): if ir == "ts" and output_format == "exported_program": pytest.skip("TorchScript cannot be exported as exported_program") import torch_tensorrt model = BoringModel() model.cuda().eval() model.example_input_array = torch.ones((4, 32)) file_path = os.path.join(tmp_path, "model.trt") model.to_tensorrt(file_path, output_format=output_format, ir=ir) loaded_model = torch_tensorrt.load(file_path) if output_format == "exported_program": loaded_model = loaded_model.module() with torch.no_grad(), torch.inference_mode(): model_output = model(model.example_input_array.to("cuda")) jit_output = loaded_model(model.example_input_array.to("cuda")) assert torch.allclose(model_output, jit_output, rtol=1e-03, atol=1e-06)
{ "repo_id": "Lightning-AI/pytorch-lightning", "file_path": "tests/tests_pytorch/models/test_torch_tensorrt.py", "license": "Apache License 2.0", "lines": 139, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Lightning-AI/pytorch-lightning:tests/tests_pytorch/trainer/connectors/test_rich_integration.py
# Copyright The Lightning AI team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest.mock import patch import pytest import torch from lightning.pytorch import Trainer from lightning.pytorch.callbacks import ModelSummary, ProgressBar, RichModelSummary, RichProgressBar, TQDMProgressBar from lightning.pytorch.demos.boring_classes import BoringModel class TestRichIntegration: @patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", False) def test_no_rich_defaults_tqdm_and_model_summary(self, tmp_path): trainer = Trainer(default_root_dir=tmp_path, logger=False, enable_checkpointing=False) assert any(isinstance(cb, TQDMProgressBar) for cb in trainer.callbacks) assert any(isinstance(cb, ModelSummary) for cb in trainer.callbacks) assert not any(isinstance(cb, RichProgressBar) for cb in trainer.callbacks) assert not any(isinstance(cb, RichModelSummary) for cb in trainer.callbacks) @patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", False) def test_no_rich_respects_user_provided_tqdm_progress_bar(self, tmp_path): user_progress_bar = TQDMProgressBar() trainer = Trainer( default_root_dir=tmp_path, callbacks=[user_progress_bar], logger=False, enable_checkpointing=False ) assert user_progress_bar in trainer.callbacks assert sum(isinstance(cb, ProgressBar) for cb in trainer.callbacks) == 1 @patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", False) def test_no_rich_respects_user_provided_model_summary(self, tmp_path): user_model_summary = ModelSummary() trainer = Trainer( default_root_dir=tmp_path, callbacks=[user_model_summary], logger=False, enable_checkpointing=False ) assert user_model_summary in trainer.callbacks assert sum(isinstance(cb, ModelSummary) for cb in trainer.callbacks) == 1 # Check that the specific instance is the one from the trainer's list of ModelSummary callbacks assert trainer.callbacks[trainer.callbacks.index(user_model_summary)] == user_model_summary @patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", False) def test_no_rich_respects_user_provided_rich_model_summary(self, tmp_path): user_model_summary = RichModelSummary() trainer = Trainer( default_root_dir=tmp_path, callbacks=[user_model_summary], logger=False, enable_checkpointing=False ) assert user_model_summary in trainer.callbacks assert sum(isinstance(cb, ModelSummary) for cb in trainer.callbacks) == 1 # Check that the specific instance is the one from the trainer's list of ModelSummary callbacks model_summary_callbacks = [cb for cb in trainer.callbacks if isinstance(cb, ModelSummary)] assert user_model_summary in model_summary_callbacks assert isinstance(model_summary_callbacks[0], RichModelSummary) @patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", True) def test_rich_available_defaults_rich_progress_and_summary(self, tmp_path): trainer = Trainer(default_root_dir=tmp_path, logger=False, enable_checkpointing=False) assert any(isinstance(cb, RichProgressBar) for cb in trainer.callbacks) assert any(isinstance(cb, RichModelSummary) for cb in trainer.callbacks) assert not any(isinstance(cb, TQDMProgressBar) for cb in trainer.callbacks) # Ensure the only ModelSummary is the RichModelSummary model_summaries = [cb for cb in trainer.callbacks if isinstance(cb, ModelSummary)] assert len(model_summaries) == 1 assert isinstance(model_summaries[0], RichModelSummary) @patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", True) def test_rich_available_respects_user_tqdm_progress_bar(self, tmp_path): user_progress_bar = TQDMProgressBar() trainer = Trainer( default_root_dir=tmp_path, callbacks=[user_progress_bar], logger=False, enable_checkpointing=False ) assert user_progress_bar in trainer.callbacks assert sum(isinstance(cb, ProgressBar) for cb in trainer.callbacks) == 1 assert isinstance(trainer.progress_bar_callback, TQDMProgressBar) @patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", True) def test_rich_available_respects_user_model_summary(self, tmp_path): user_model_summary = ModelSummary() # Non-rich trainer = Trainer( default_root_dir=tmp_path, callbacks=[user_model_summary], logger=False, enable_checkpointing=False ) assert user_model_summary in trainer.callbacks model_summaries = [cb for cb in trainer.callbacks if isinstance(cb, ModelSummary)] assert len(model_summaries) == 1 assert isinstance(model_summaries[0], ModelSummary) assert not isinstance(model_summaries[0], RichModelSummary) @patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", False) def test_progress_bar_disabled_no_rich(self, tmp_path): trainer = Trainer( default_root_dir=tmp_path, enable_progress_bar=False, logger=False, enable_checkpointing=False ) assert not any(isinstance(cb, ProgressBar) for cb in trainer.callbacks) @patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", True) def test_progress_bar_disabled_with_rich(self, tmp_path): trainer = Trainer( default_root_dir=tmp_path, enable_progress_bar=False, logger=False, enable_checkpointing=False ) assert not any(isinstance(cb, ProgressBar) for cb in trainer.callbacks) @patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", False) def test_model_summary_disabled_no_rich(self, tmp_path): trainer = Trainer( default_root_dir=tmp_path, enable_model_summary=False, logger=False, enable_checkpointing=False ) assert not any(isinstance(cb, ModelSummary) for cb in trainer.callbacks) @patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", True) def test_model_summary_disabled_with_rich(self, tmp_path): trainer = Trainer( default_root_dir=tmp_path, enable_model_summary=False, logger=False, enable_checkpointing=False ) assert not any(isinstance(cb, ModelSummary) for cb in trainer.callbacks) @patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", True) def test_rich_progress_bar_tensor_metric(self, tmp_path): """Test that tensor metrics are converted to float for RichProgressBar.""" class MyModel(BoringModel): def training_step(self, batch, batch_idx): self.log("my_tensor_metric", torch.tensor(1.23), prog_bar=True) return super().training_step(batch, batch_idx) model = MyModel() trainer = Trainer( default_root_dir=tmp_path, limit_train_batches=1, limit_val_batches=0, max_epochs=1, logger=False, enable_checkpointing=False, ) with patch("lightning.pytorch.callbacks.progress.rich_progress.MetricsTextColumn.update") as mock_update: trainer.fit(model) assert mock_update.call_count > 0 # The metrics are updated multiple times, check the last call last_call_metrics = mock_update.call_args[0][0] assert "my_tensor_metric" in last_call_metrics metric_val = last_call_metrics["my_tensor_metric"] assert isinstance(metric_val, float) assert metric_val == pytest.approx(1.23)
{ "repo_id": "Lightning-AI/pytorch-lightning", "file_path": "tests/tests_pytorch/trainer/connectors/test_rich_integration.py", "license": "Apache License 2.0", "lines": 136, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Lightning-AI/pytorch-lightning:tests/tests_pytorch/loops/test_double_iter_in_iterable_dataset.py
# Copyright The Lightning AI team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This test tests the resuming of training from a checkpoint file using an IterableDataset. # And contains code mentioned in the issue: #19427. # Ref: https://github.com/Lightning-AI/pytorch-lightning/issues/19427 import multiprocessing as mp import os import sys from collections.abc import Iterator from pathlib import Path from queue import Queue import numpy as np import pytest from torch.utils.data import DataLoader, IterableDataset from lightning.pytorch import Trainer from lightning.pytorch.demos.boring_classes import BoringModel class QueueDataset(IterableDataset): def __init__(self, queue: Queue) -> None: super().__init__() self.queue = queue def __iter__(self) -> Iterator: for _ in range(5): tensor, _ = self.queue.get(timeout=5) yield tensor def train_model(queue: Queue, max_epochs: int, ckpt_path: Path) -> None: dataloader = DataLoader(QueueDataset(queue), num_workers=1, batch_size=None) trainer = Trainer( max_epochs=max_epochs, enable_progress_bar=False, enable_checkpointing=False, devices=1, logger=False, ) if ckpt_path.exists(): trainer.fit(BoringModel(), dataloader, ckpt_path=str(ckpt_path)) else: trainer.fit(BoringModel(), dataloader) trainer.save_checkpoint(str(ckpt_path)) @pytest.mark.skipif(sys.platform == "darwin", reason="Skip on macOS due to multiprocessing issues") def test_resume_training_with(tmp_path): """Test resuming training from checkpoint file using a IterableDataset.""" q = mp.Queue() arr = np.random.random([1, 32]).astype(np.float32) for idx in range(20): q.put((arr, idx)) max_epoch = 2 ckpt_path = tmp_path / "model.ckpt" train_model(q, max_epoch, ckpt_path) assert os.path.exists(ckpt_path), f"Checkpoint file '{ckpt_path}' wasn't created" ckpt_size = os.path.getsize(ckpt_path) assert ckpt_size > 0, f"Checkpoint file is empty (size: {ckpt_size} bytes)" train_model(q, max_epoch + 2, ckpt_path)
{ "repo_id": "Lightning-AI/pytorch-lightning", "file_path": "tests/tests_pytorch/loops/test_double_iter_in_iterable_dataset.py", "license": "Apache License 2.0", "lines": 64, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Lightning-AI/pytorch-lightning:tests/tests_pytorch/trainer/connectors/test_logger_connector.py
from unittest.mock import MagicMock, patch from lightning.pytorch import Trainer from lightning.pytorch.loggers import Logger from lightning.pytorch.trainer.connectors.logger_connector import _LoggerConnector @patch("lightning.pytorch.trainer.connectors.logger_connector.logger_connector.convert_tensors_to_scalars") def test_uses_provided_step(mock_convert): """Test that the LoggerConnector uses explicitly provided step to log metrics.""" trainer = MagicMock(spec=Trainer) trainer.loggers = [logger := MagicMock(spec=Logger)] connector = _LoggerConnector(trainer) mock_convert.return_value.pop.return_value = step = 42 connector.log_metrics((metrics := {"some_metric": 123}), step=step) assert connector._logged_metrics == metrics mock_convert.assert_called_once_with(metrics) logger.log_metrics.assert_called_once_with(metrics=mock_convert.return_value, step=step) logger.save.assert_called_once_with() @patch("lightning.pytorch.trainer.connectors.logger_connector.logger_connector.convert_tensors_to_scalars") def test_uses_step_metric(mock_convert): """Test that the LoggerConnector uses explicitly provided step metric to log metrics.""" trainer = MagicMock(spec=Trainer) trainer.loggers = [logger := MagicMock(spec=Logger)] connector = _LoggerConnector(trainer) mock_convert.return_value.pop.return_value = step = 42.0 metrics = {"some_metric": 123} connector.log_metrics(logged_metrics := {**metrics, "step": step}) assert connector._logged_metrics == logged_metrics mock_convert.assert_called_once_with(logged_metrics) logger.log_metrics.assert_called_once_with(metrics=mock_convert.return_value, step=int(step)) logger.save.assert_called_once_with() @patch("lightning.pytorch.trainer.connectors.logger_connector.logger_connector.convert_tensors_to_scalars") def test_uses_batches_that_stepped(mock_convert): """Test that the LoggerConnector uses implicitly provided batches_that_stepped to log metrics.""" trainer = MagicMock(spec=Trainer) trainer.fit_loop = MagicMock() trainer.loggers = [logger := MagicMock(spec=Logger)] connector = _LoggerConnector(trainer) mock_convert.return_value.pop.return_value = None connector.log_metrics(metrics := {"some_metric": 123}) assert connector._logged_metrics == metrics mock_convert.assert_called_once_with(metrics) logger.log_metrics.assert_called_once_with( metrics=mock_convert.return_value, step=trainer.fit_loop.epoch_loop._batches_that_stepped ) logger.save.assert_called_once_with() mock_convert.return_value.setdefault.assert_called_once_with("epoch", trainer.current_epoch)
{ "repo_id": "Lightning-AI/pytorch-lightning", "file_path": "tests/tests_pytorch/trainer/connectors/test_logger_connector.py", "license": "Apache License 2.0", "lines": 45, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
ManimCommunity/manim:scripts/release.py
#!/usr/bin/env python3 """ Release management tools for Manim. This script provides commands for preparing and managing Manim releases: - Generate changelogs from GitHub's release notes API - Update CITATION.cff with new version information - Fetch existing release notes for documentation Usage: # Generate changelog for an upcoming release uv run python scripts/release.py changelog --base v0.19.0 --version 0.20.0 # Also update CITATION.cff at the same time uv run python scripts/release.py changelog --base v0.19.0 --version 0.20.0 --update-citation # Update only CITATION.cff uv run python scripts/release.py citation --version 0.20.0 # Fetch existing release changelogs for documentation uv run python scripts/release.py fetch-releases Requirements: - gh CLI installed and authenticated - Python 3.11+ """ from __future__ import annotations import re import subprocess import sys from datetime import datetime from pathlib import Path from typing import TYPE_CHECKING import click if TYPE_CHECKING: from collections.abc import Sequence # ============================================================================= # Constants # ============================================================================= REPO = "manimcommunity/manim" SCRIPT_DIR = Path(__file__).resolve().parent REPO_ROOT = SCRIPT_DIR.parent CHANGELOG_DIR = REPO_ROOT / "docs" / "source" / "changelog" CITATION_TEMPLATE = SCRIPT_DIR / "TEMPLATE.cff" CITATION_FILE = REPO_ROOT / "CITATION.cff" # Minimum version for fetching historical releases DEFAULT_MIN_VERSION = "0.18.0" # ============================================================================= # GitHub CLI Helpers # ============================================================================= def run_gh( args: Sequence[str], *, check: bool = True, suppress_errors: bool = False, ) -> subprocess.CompletedProcess[str]: """ Run a gh CLI command. Args: args: Arguments to pass to gh check: If True, raise on non-zero exit suppress_errors: If True, don't print errors to stderr Returns: CompletedProcess with stdout/stderr """ result = subprocess.run( ["gh", *args], capture_output=True, text=True, ) if ( result.returncode != 0 and not suppress_errors and "not found" not in result.stderr.lower() ): click.echo(f"gh error: {result.stderr}", err=True) if check and result.returncode != 0: raise click.ClickException(f"gh command failed: gh {' '.join(args)}") return result def get_release_tags() -> list[str]: """Get all published release tags from GitHub, newest first.""" result = run_gh( ["release", "list", "--repo", REPO, "--limit", "100", "--json", "tagName"], check=False, ) if result.returncode != 0 or not result.stdout.strip(): return [] import json try: data = json.loads(result.stdout) return [item["tagName"] for item in data] except (json.JSONDecodeError, KeyError): return [] def get_release_body(tag: str) -> str | None: """Get the release body for a published release.""" result = run_gh( ["release", "view", tag, "--repo", REPO, "--json", "body", "--jq", ".body"], check=False, suppress_errors=True, ) if result.returncode != 0: return None return result.stdout.strip() or None def get_release_date(tag: str) -> str | None: """Get the release date formatted as 'Month DD, YYYY'.""" result = run_gh( [ "release", "view", tag, "--repo", REPO, "--json", "createdAt", "--jq", ".createdAt", ], check=False, ) if result.returncode != 0: return None date_str = result.stdout.strip().strip('"') try: dt = datetime.fromisoformat(date_str.replace("Z", "+00:00")) return dt.strftime("%B %d, %Y") except ValueError: return None def generate_release_notes(head_tag: str, base_tag: str) -> str: """ Generate release notes using GitHub's API. This respects .github/release.yml for PR categorization. """ result = run_gh( [ "api", f"repos/{REPO}/releases/generate-notes", "--field", f"tag_name={head_tag}", "--field", f"previous_tag_name={base_tag}", "--jq", ".body", ] ) body = result.stdout.strip() if not body: raise click.ClickException("GitHub API returned empty release notes") return body # ============================================================================= # Version Utilities # ============================================================================= def normalize_tag(tag: str) -> str: """Ensure tag has 'v' prefix.""" return tag if tag.startswith("v") else f"v{tag}" def version_from_tag(tag: str) -> str: """Extract version from tag (e.g., 'v0.18.0' -> '0.18.0').""" return tag[1:] if tag.startswith("v") else tag def parse_version(version: str) -> tuple[int, ...]: """Parse version string into comparable tuple.""" # Handle post-releases like '0.18.0.post0' version = version.replace(".post", "-post") parts = [] for part in version.replace("-", ".").split("."): try: parts.append(int(part)) except ValueError: continue # Pad to at least 3 components while len(parts) < 3: parts.append(0) return tuple(parts) def version_gte(version: str, min_version: str) -> bool: """Check if version >= min_version.""" return parse_version(version) >= parse_version(min_version) # ============================================================================= # Markdown Conversion # ============================================================================= def convert_to_myst(body: str) -> str: """ Convert GitHub markdown to MyST format. - PR URLs -> {pr}`NUMBER` - Issue URLs -> {issue}`NUMBER` - @mentions -> {user}`USERNAME` - Strips HTML comments - Ensures proper list spacing """ lines = body.split("\n") result = [] prev_bullet = False for line in lines: # Skip HTML comments if line.strip().startswith("<!--") and line.strip().endswith("-->"): continue # Convert URLs to extlinks line = re.sub( r"https://github\.com/ManimCommunity/manim/pull/(\d+)", r"{pr}`\1`", line, ) line = re.sub( r"https://github\.com/ManimCommunity/manim/issues/(\d+)", r"{issue}`\1`", line, ) line = re.sub(r"@([a-zA-Z0-9_-]+)", r"{user}`\1`", line) if line.startswith("**Full Changelog**:"): _, url = line.split(":", 1) url = url.strip().replace("vmain", "main") line = f"**Full Changelog**: [Compare view]({url})" # Handle list spacing is_bullet = line.strip().startswith("*") and not line.strip().startswith("**") if prev_bullet and not is_bullet and line.strip(): result.append("") result.append(line) prev_bullet = is_bullet return "\n".join(result) def format_changelog( version: str, body: str, date: str | None = None, title: str | None = None, ) -> str: """Create changelog file content with MyST frontmatter.""" title = title or f"v{version}" body = convert_to_myst(body) date_section = f"Date\n: {date}\n" if date else "" return f"""--- short-title: {title} description: Changelog for {title} --- # {title} {date_section} {body} """ # ============================================================================= # File Operations # ============================================================================= def get_existing_versions() -> set[str]: """Get versions that already have changelog files.""" if not CHANGELOG_DIR.exists(): return set() return { f.stem.replace("-changelog", "") for f in CHANGELOG_DIR.glob("*-changelog.*") } def save_changelog(version: str, content: str) -> Path: """Save changelog and return filepath.""" filepath = CHANGELOG_DIR / f"{version}-changelog.md" filepath.write_text(content) return filepath def update_citation(version: str, date: str | None = None) -> Path: """Update CITATION.cff from template.""" if not CITATION_TEMPLATE.exists(): raise click.ClickException(f"Citation template not found: {CITATION_TEMPLATE}") date = date or datetime.now().strftime("%Y-%m-%d") version_tag = normalize_tag(version) content = CITATION_TEMPLATE.read_text() content = content.replace("<version>", version_tag) content = content.replace("<date_released>", date) CITATION_FILE.write_text(content) return CITATION_FILE # ============================================================================= # CLI Commands # ============================================================================= @click.group() @click.option( "--dry-run", is_flag=True, help="Show what would be done without making changes" ) @click.pass_context def cli(ctx: click.Context, dry_run: bool) -> None: """Release management tools for Manim.""" ctx.ensure_object(dict) ctx.obj["dry_run"] = dry_run @cli.command() @click.option("--base", required=True, help="Base tag for comparison (e.g., v0.19.0)") @click.option( "--version", "version", required=True, help="New version number (e.g., 0.20.0)" ) @click.option("--head", default="main", help="Head ref for comparison (default: main)") @click.option("--title", help="Custom changelog title (default: vX.Y.Z)") @click.option( "--update-citation", "also_update_citation", is_flag=True, help="Also update CITATION.cff", ) @click.pass_context def changelog( ctx: click.Context, base: str, version: str, head: str, title: str | None, also_update_citation: bool, ) -> None: """Generate changelog for an upcoming release. Uses GitHub's release notes API with .github/release.yml categorization. """ dry_run = ctx.obj["dry_run"] base_tag = normalize_tag(base) head_tag = normalize_tag(head) if head != "main" else normalize_tag(version) click.echo(f"Generating changelog for v{version}...") click.echo(f" Comparing: {base_tag} → {head}") body = generate_release_notes(head_tag, base_tag) date = datetime.now().strftime("%B %d, %Y") content = format_changelog(version, body, date=date, title=title) if dry_run: click.echo() click.secho("[DRY RUN]", fg="yellow", bold=True) click.echo(f" Would save: {CHANGELOG_DIR / f'{version}-changelog.md'}") if also_update_citation: click.echo(f" Would update: {CITATION_FILE}") click.echo() click.echo("--- Generated changelog ---") click.echo(content) return filepath = save_changelog(version, content) click.secho(f" ✓ Saved: {filepath}", fg="green") if also_update_citation: citation_path = update_citation(version) click.secho(f" ✓ Updated: {citation_path}", fg="green") click.echo() click.echo("Next steps:") click.echo(" • Review and edit the changelog as needed") click.echo(" • Update docs/source/changelog.rst to include the new file") @cli.command() @click.option( "--version", "version", required=True, help="Version number (e.g., 0.20.0)" ) @click.option("--date", help="Release date as YYYY-MM-DD (default: today)") @click.pass_context def citation(ctx: click.Context, version: str, date: str | None) -> None: """Update CITATION.cff for a release.""" dry_run = ctx.obj["dry_run"] display_date = date or datetime.now().strftime("%Y-%m-%d") if dry_run: click.secho("[DRY RUN]", fg="yellow", bold=True) click.echo(f" Would update: {CITATION_FILE}") click.echo(f" Version: v{version}") click.echo(f" Date: {display_date}") return filepath = update_citation(version, date) click.secho(f"✓ Updated: {filepath}", fg="green") click.echo(f" Version: v{version}") click.echo(f" Date: {display_date}") @cli.command("fetch-releases") @click.option("--tag", help="Fetch a specific release tag") @click.option( "--min-version", default=DEFAULT_MIN_VERSION, help=f"Minimum version to fetch (default: {DEFAULT_MIN_VERSION})", ) @click.option("--force", is_flag=True, help="Overwrite existing changelog files") @click.pass_context def fetch_releases( ctx: click.Context, tag: str | None, min_version: str, force: bool, ) -> None: """Fetch existing release changelogs from GitHub. Converts GitHub release notes to documentation-ready MyST markdown. """ dry_run = ctx.obj["dry_run"] existing = get_existing_versions() # Single tag mode if tag: tag = normalize_tag(tag) version = version_from_tag(tag) if version in existing and not force: click.echo( f"Changelog for {version} already exists. Use --force to overwrite." ) return if dry_run: click.secho("[DRY RUN]", fg="yellow", bold=True) click.echo(f" Would fetch: {version}") return _fetch_single_release(tag, version) return # Batch mode click.echo(f"Existing versions: {', '.join(sorted(existing)) or '(none)'}") click.echo("Fetching release list...") tags = get_release_tags() click.echo(f"Found {len(tags)} releases") fetched = 0 prev_tag = None for tag in reversed(tags): version = version_from_tag(tag) if not version_gte(version, min_version): prev_tag = tag continue if version in existing and not force: click.echo(f" Skipping {version} (exists)") prev_tag = tag continue if dry_run: click.echo(f" [DRY RUN] Would fetch {version}") fetched += 1 else: if _fetch_single_release(tag, version, prev_tag): existing.add(version) fetched += 1 prev_tag = tag click.echo() click.echo(f"Processed {fetched} changelog(s)") if fetched > 0 and not dry_run: click.echo() click.echo("Next steps:") click.echo(" • Update docs/source/changelog.rst to include new files") def _fetch_single_release(tag: str, version: str, prev_tag: str | None = None) -> bool: """Fetch and save a single release changelog.""" click.echo(f" Fetching {version}...") body = get_release_body(tag) if not body and prev_tag: click.echo(f" No body, generating from {prev_tag}...") try: body = generate_release_notes(tag, prev_tag) except click.ClickException: body = None if not body: click.secho(f" ✗ Could not get release notes for {tag}", fg="red", err=True) return False date = get_release_date(tag) content = format_changelog(version, body, date=date) filepath = save_changelog(version, content) click.secho(f" ✓ Saved: {filepath}", fg="green") return True # ============================================================================= # Entry Point # ============================================================================= def main() -> None: """Entry point.""" cli() if __name__ == "__main__": sys.exit(main() or 0)
{ "repo_id": "ManimCommunity/manim", "file_path": "scripts/release.py", "license": "MIT License", "lines": 429, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
ManimCommunity/manim:tests/module/animation/test_updaters.py
from __future__ import annotations from manim import UP, Circle, Dot, FadeIn from manim.animation.updaters.mobject_update_utils import turn_animation_into_updater def test_turn_animation_into_updater_zero_run_time(): """Test that turn_animation_into_updater handles zero run_time correctly.""" # Create a simple mobject and animation mobject = Circle() animation = FadeIn(mobject, run_time=0) # Track updater calls update_calls = [] original_updaters = mobject.updaters.copy() # Call turn_animation_into_updater result = turn_animation_into_updater(animation) # Verify mobject is returned assert result is mobject # Get the updater that was added assert len(mobject.updaters) == len(original_updaters) + 1 updater = mobject.updaters[-1] # Simulate calling the updater updater(mobject, dt=0.1) # The updater should have finished and removed itself assert len(mobject.updaters) == len(original_updaters) assert updater not in mobject.updaters # Animation should be in finished state assert animation.total_time >= 0 def test_turn_animation_into_updater_positive_run_time_persists(): """Test that updater persists with positive run_time.""" mobject = Circle() animation = FadeIn(mobject, run_time=1.0) original_updaters = mobject.updaters.copy() # Call turn_animation_into_updater result = turn_animation_into_updater(animation) # Get the updater that was added updater = mobject.updaters[-1] # Simulate calling the updater (partial progress) updater(mobject, dt=0.1) # The updater should still be present (not finished) assert len(mobject.updaters) == len(original_updaters) + 1 assert updater in mobject.updaters def test_always(): d = Dot() circ = Circle() d.always.next_to(circ, UP) assert len(d.updaters) == 1 # we should be able to chain updaters d2 = Dot() d.always.next_to(d2, UP).next_to(circ, UP) assert len(d.updaters) == 3
{ "repo_id": "ManimCommunity/manim", "file_path": "tests/module/animation/test_updaters.py", "license": "MIT License", "lines": 48, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
ManimCommunity/manim:tests/module/mobject/test_table.py
"""Tests for Table and related mobjects.""" from __future__ import annotations from manim import Table from manim.utils.color import GREEN def test_highlighted_cell_color_access(): """Test that accessing the color of a highlighted cell doesn't cause infinite recursion. Regression test for https://github.com/ManimCommunity/manim/issues/4419 """ table = Table([["This", "is a"], ["simple", "table"]]) rect = table.get_highlighted_cell((1, 1), color=GREEN) # Should not raise RecursionError color = rect.color assert color == GREEN
{ "repo_id": "ManimCommunity/manim", "file_path": "tests/module/mobject/test_table.py", "license": "MIT License", "lines": 13, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
ManimCommunity/manim:tests/module/mobject/types/vectorized_mobject/test_dashed_vmobject.py
from manim import ORIGIN, UR, Arrow, DashedLine, DashedVMobject, VGroup from manim.mobject.geometry.tips import ArrowTip, StealthTip def _collect_tips(mobject): return [mob for mob in mobject.get_family() if isinstance(mob, ArrowTip)] def test_dashed_arrow_has_single_tip(): dashed = DashedVMobject(Arrow(ORIGIN, 2 * UR)) tips = _collect_tips(dashed) assert len(tips) == 1 def test_dashed_arrow_tip_not_duplicated_in_group_opacity(): base_arrow = Arrow(ORIGIN, 2 * UR) faded_arrow = base_arrow.copy().set_fill(opacity=0.4).set_stroke(opacity=0.4) dashed_group = ( VGroup(DashedVMobject(faded_arrow)) .set_fill(opacity=0.4, family=True) .set_stroke(opacity=0.4, family=True) ) tips = _collect_tips(dashed_group) assert len(tips) == 1 def test_dashed_arrow_custom_tip_shape_has_single_tip(): dashed = DashedVMobject(Arrow(ORIGIN, 2 * UR, tip_shape=StealthTip)) tips = _collect_tips(dashed) assert len(tips) == 1 assert isinstance(tips[0], StealthTip) def test_dashed_arrow_with_start_tip_has_two_tips(): dashed = DashedVMobject(Arrow(ORIGIN, 2 * UR).add_tip(at_start=True)) tips = _collect_tips(dashed) assert len(tips) == 2 def test_zero_length_dashed_line_submobjects_have_2d_points(): """Submobjects of a zero-length DashedLine must have 2-D point arrays.""" line = DashedLine(ORIGIN, ORIGIN) for sub in line.submobjects: assert sub.points.ndim == 2, ( f"Expected 2-D points array, got shape {sub.points.shape}" ) def test_become_nonzero_to_zero_dashed_line_does_not_crash(): """become() from a normal DashedLine to a zero-length one should not crash.""" normal = DashedLine(ORIGIN, 2 * UR) zero = DashedLine(ORIGIN, ORIGIN) normal.become(zero)
{ "repo_id": "ManimCommunity/manim", "file_path": "tests/module/mobject/types/vectorized_mobject/test_dashed_vmobject.py", "license": "MIT License", "lines": 39, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
ManimCommunity/manim:tests/module/animation/test_transform.py
from __future__ import annotations from manim import Circle, ReplacementTransform, Scene, Square, VGroup def test_no_duplicate_references(): scene = Scene() c = Circle() sq = Square() scene.add(c, sq) scene.play(ReplacementTransform(c, sq)) assert len(scene.mobjects) == 1 assert scene.mobjects[0] is sq def test_duplicate_references_in_group(): scene = Scene() c = Circle() sq = Square() vg = VGroup(c, sq) scene.add(vg) scene.play(ReplacementTransform(c, sq)) submobs = vg.submobjects assert len(submobs) == 1 assert submobs[0] is sq
{ "repo_id": "ManimCommunity/manim", "file_path": "tests/module/animation/test_transform.py", "license": "MIT License", "lines": 20, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
ManimCommunity/manim:manim/data_structures.py
"""Data classes and other necessary data structures for use in Manim.""" from __future__ import annotations from collections.abc import Iterable from dataclasses import dataclass from types import MethodType from typing import Any @dataclass class MethodWithArgs: """Object containing a :attr:`method` which is intended to be called later with the positional arguments :attr:`args` and the keyword arguments :attr:`kwargs`. Attributes ---------- method : MethodType A callable representing a method of some class. args : Iterable[Any] Positional arguments for :attr:`method`. kwargs : dict[str, Any] Keyword arguments for :attr:`method`. """ __slots__ = ["method", "args", "kwargs"] method: MethodType args: Iterable[Any] kwargs: dict[str, Any]
{ "repo_id": "ManimCommunity/manim", "file_path": "manim/data_structures.py", "license": "MIT License", "lines": 24, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
ManimCommunity/manim:tests/utils/test_polylabels.py
import numpy as np import pytest from manim.utils.polylabel import Cell, Polygon, polylabel # Test simple square and square with a hole for inside/outside logic @pytest.mark.parametrize( ("rings", "inside_points", "outside_points"), [ ( # Simple square: basic convex polygon [[[0, 0], [4, 0], [4, 4], [0, 4], [0, 0]]], # rings [ [2, 2], [1, 1], [3.9, 3.9], [0, 0], [2, 0], [0, 2], [0, 4], [4, 0], [4, 2], [2, 4], [4, 4], ], # inside points [[-1, -1], [5, 5], [4.1, 2]], # outside points ), ( # Square with a square hole (donut shape): tests handling of interior voids [ [[1, 1], [5, 1], [5, 5], [1, 5], [1, 1]], [[2, 2], [2, 4], [4, 4], [4, 2], [2, 2]], ], # rings [[1.5, 1.5], [3, 1.5], [1.5, 3]], # inside points [[3, 3], [6, 6], [0, 0]], # outside points ), ( # Non-convex polygon (same shape as flags used in Brazilian june festivals) [[[0, 0], [2, 2], [4, 0], [4, 4], [0, 4], [0, 0]]], # rings [[1, 3], [3.9, 3.9], [2, 3.5]], # inside points [ [0.1, 0], [1, 0], [2, 0], [2, 1], [2, 1.9], [3, 0], [3.9, 0], ], # outside points ), ], ) def test_polygon_inside_outside(rings, inside_points, outside_points): polygon = Polygon(rings) for point in inside_points: assert polygon.inside(point) for point in outside_points: assert not polygon.inside(point) # Test distance calculation with known expected distances @pytest.mark.parametrize( ("rings", "points", "expected_distance"), [ ( [[[0, 0], [4, 0], [4, 4], [0, 4], [0, 0]]], # rings [[2, 2]], # points 2.0, # Distance from center to closest edge in square ), ( [[[0, 0], [4, 0], [4, 4], [0, 4], [0, 0]]], # rings [[0, 0], [2, 0], [4, 2], [2, 4], [0, 2]], # points 0.0, # On the edge ), ( [[[0, 0], [4, 0], [4, 4], [0, 4], [0, 0]]], # rings [[5, 5]], # points -np.sqrt(2), # Outside and diagonally offset ), ], ) def test_polygon_compute_distance(rings, points, expected_distance): polygon = Polygon(rings) for point in points: result = polygon.compute_distance(np.array(point)) assert pytest.approx(result, rel=1e-3) == expected_distance @pytest.mark.parametrize( ("center", "h", "rings"), [ ( [2, 2], # center 1.0, # h [[[0, 0], [4, 0], [4, 4], [0, 4], [0, 0]]], # rings ), ( [3, 1.5], # center 0.5, # h [ [[1, 1], [5, 1], [5, 5], [1, 5], [1, 1]], [[2, 2], [2, 4], [4, 4], [4, 2], [2, 2]], ], # rings ), ], ) def test_cell(center, h, rings): polygon = Polygon(rings) cell = Cell(center, h, polygon) assert isinstance(cell.d, float) assert isinstance(cell.p, float) assert np.allclose(cell.c, center) assert cell.h == h other = Cell(np.add(center, [0.1, 0.1]), h, polygon) assert (cell < other) == (cell.d < other.d) assert (cell > other) == (cell.d > other.d) assert (cell <= other) == (cell.d <= other.d) assert (cell >= other) == (cell.d >= other.d) @pytest.mark.parametrize( ("rings", "expected_centers"), [ ( # Simple square: basic convex polygon [[[0, 0], [4, 0], [4, 4], [0, 4], [0, 0]]], [[2.0, 2.0]], # single correct pole of inaccessibility ), ( # Square with a square hole (donut shape): tests handling of interior voids [ [[1, 1], [5, 1], [5, 5], [1, 5], [1, 1]], [[2, 2], [2, 4], [4, 4], [4, 2], [2, 2]], ], [ # any of the four pole of inaccessibility options [1.5, 1.5], [1.5, 4.5], [4.5, 1.5], [4.5, 4.5], ], ), ], ) def test_polylabel(rings, expected_centers): # Add third dimension to conform to polylabel input format rings_3d = [np.column_stack([ring, np.zeros(len(ring))]) for ring in rings] result = polylabel(rings_3d, precision=0.01) assert isinstance(result, Cell) assert result.h <= 0.01 assert result.d >= 0.0 match_found = any(np.allclose(result.c, ec, atol=0.1) for ec in expected_centers) assert match_found, f"Expected one of {expected_centers}, but got {result.c}"
{ "repo_id": "ManimCommunity/manim", "file_path": "tests/utils/test_polylabels.py", "license": "MIT License", "lines": 144, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
ManimCommunity/manim:tests/module/mobject/test_matrix.py
from __future__ import annotations import numpy as np import pytest from manim.mobject.matrix import ( DecimalMatrix, IntegerMatrix, Matrix, ) from manim.mobject.text.tex_mobject import MathTex from manim.mobject.types.vectorized_mobject import VGroup class TestMatrix: @pytest.mark.parametrize( ( "matrix_elements", "left_bracket", "right_bracket", "expected_rows", "expected_columns", ), [ ([[1, 2], [3, 4]], "[", "]", 2, 2), ([[1, 2, 3]], "[", "]", 1, 3), ([[1], [2], [3]], "[", "]", 3, 1), ([[5]], "[", "]", 1, 1), ([[1, 0], [0, 1]], "(", ")", 2, 2), ([["a", "b"], ["c", "d"]], "[", "]", 2, 2), (np.array([[10, 20], [30, 40]]), "[", "]", 2, 2), ], ids=[ "2x2_default", "1x3_default", "3x1_default", "1x1_default", "2x2_parentheses", "2x2_strings", "2x2_numpy", ], ) def test_matrix_init_valid( self, matrix_elements, left_bracket, right_bracket, expected_rows, expected_columns, ): matrix = Matrix( matrix_elements, left_bracket=left_bracket, right_bracket=right_bracket ) assert isinstance(matrix, Matrix) assert matrix.left_bracket == left_bracket assert matrix.right_bracket == right_bracket assert len(matrix.get_rows()) == expected_rows assert len(matrix.get_columns()) == expected_columns @pytest.mark.parametrize( ("invalid_elements", "expected_error"), [ (10, TypeError), (10.4, TypeError), ([1, 2, 3], TypeError), ], ids=[ "integer", "float", "flat_list", ], ) def test_matrix_init_invalid(self, invalid_elements, expected_error): with pytest.raises(expected_error): Matrix(invalid_elements) @pytest.mark.parametrize( ("matrix_elements", "expected_columns"), [ ([[1, 2], [3, 4]], 2), ([[1, 2, 3]], 3), ([[1], [2], [3]], 1), ], ids=["2x2", "1x3", "3x1"], ) def test_get_columns(self, matrix_elements, expected_columns): matrix = Matrix(matrix_elements) assert isinstance(matrix, Matrix) assert len(matrix.get_columns()) == expected_columns for column in matrix.get_columns(): assert isinstance(column, VGroup) @pytest.mark.parametrize( ("matrix_elements", "expected_rows"), [ ([[1, 2], [3, 4]], 2), ([[1, 2, 3]], 1), ([[1], [2], [3]], 3), ], ids=["2x2", "1x3", "3x1"], ) def test_get_rows(self, matrix_elements, expected_rows): matrix = Matrix(matrix_elements) assert isinstance(matrix, Matrix) assert len(matrix.get_rows()) == expected_rows for row in matrix.get_rows(): assert isinstance(row, VGroup) @pytest.mark.parametrize( ("matrix_elements", "expected_entries_tex_string", "expected_entries_count"), [ ([[1, 2], [3, 4]], ["1", "2", "3", "4"], 4), ([[1, 2, 3]], ["1", "2", "3"], 3), ], ids=["2x2", "1x3"], ) def test_get_entries( self, matrix_elements, expected_entries_tex_string, expected_entries_count ): matrix = Matrix(matrix_elements) entries = matrix.get_entries() assert isinstance(matrix, Matrix) assert len(entries) == expected_entries_count for index_entry, entry in enumerate(entries): assert isinstance(entry, MathTex) assert expected_entries_tex_string[index_entry] == entry.tex_string @pytest.mark.parametrize( ("matrix_elements", "row", "column", "expected_value_str"), [ ([[1, 2], [3, 4]], 0, 0, "1"), ([[1, 2], [3, 4]], 1, 1, "4"), ([[1, 2, 3]], 0, 2, "3"), ([[1], [2], [3]], 2, 0, "3"), ], ids=["2x2_00", "2x2_11", "1x3_02", "3x1_20"], ) def test_get_element(self, matrix_elements, row, column, expected_value_str): matrix = Matrix(matrix_elements) assert isinstance(matrix.get_columns()[column][row], MathTex) assert isinstance(matrix.get_rows()[row][column], MathTex) assert matrix.get_columns()[column][row].tex_string == expected_value_str assert matrix.get_rows()[row][column].tex_string == expected_value_str @pytest.mark.parametrize( ("matrix_elements", "row", "column", "expected_error"), [ ([[1, 2]], 1, 0, IndexError), ([[1, 2]], 0, 2, IndexError), ], ids=["row_out_of_bounds", "col_out_of_bounds"], ) def test_get_element_invalid(self, matrix_elements, row, column, expected_error): matrix = Matrix(matrix_elements) with pytest.raises(expected_error): matrix.get_columns()[column][row] with pytest.raises(expected_error): matrix.get_rows()[row][column] class TestDecimalMatrix: @pytest.mark.parametrize( ("matrix_elements", "num_decimal_places", "expected_elements"), [ ([[1.234, 5.678], [9.012, 3.456]], 2, [[1.234, 5.678], [9.012, 3.456]]), ([[1.0, 2.0], [3.0, 4.0]], 0, [[1, 2], [3, 4]]), ([[1, 2.3], [4.567, 7]], 1, [[1.0, 2.3], [4.567, 7.0]]), ], ids=[ "basic_2_decimal_points", "basic_0_decimal_points", "mixed_1_decimal_points", ], ) def test_decimal_matrix_init( self, matrix_elements, num_decimal_places, expected_elements ): matrix = DecimalMatrix( matrix_elements, element_to_mobject_config={"num_decimal_places": num_decimal_places}, ) assert isinstance(matrix, DecimalMatrix) for column_index, column in enumerate(matrix.get_columns()): for row_index, element in enumerate(column): assert element.number == expected_elements[row_index][column_index] assert element.num_decimal_places == num_decimal_places class TestIntegerMatrix: @pytest.mark.parametrize( ("matrix_elements", "expected_elements"), [ ([[1, 2], [3, 4]], [[1, 2], [3, 4]]), ([[1.2, 2.8], [3.5, 4]], [[1.2, 2.8], [3.5, 4]]), ], ids=["basic_int", "mixed_float_int"], ) def test_integer_matrix_init(self, matrix_elements, expected_elements): matrix = IntegerMatrix(matrix_elements) assert isinstance(matrix, IntegerMatrix) for row_index, row in enumerate(matrix.get_rows()): for column_index, element in enumerate(row): assert element.number == expected_elements[row_index][column_index]
{ "repo_id": "ManimCommunity/manim", "file_path": "tests/module/mobject/test_matrix.py", "license": "MIT License", "lines": 189, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
OpenBMB/ChatDev:entity/configs/node/loop_timer.py
"""Configuration for loop timer guard nodes.""" from dataclasses import dataclass from typing import Mapping, Any, Optional from entity.configs.base import ( BaseConfig, ConfigError, ConfigFieldSpec, EnumOption, require_mapping, extend_path, optional_str, ) @dataclass class LoopTimerConfig(BaseConfig): """Configuration schema for the loop timer node type.""" max_duration: float = 60.0 duration_unit: str = "seconds" reset_on_emit: bool = True message: Optional[str] = None passthrough: bool = False @classmethod def from_dict( cls, data: Mapping[str, Any] | None, *, path: str ) -> "LoopTimerConfig": mapping = require_mapping(data or {}, path) max_duration_raw = mapping.get("max_duration", 60.0) try: max_duration = float(max_duration_raw) except (TypeError, ValueError) as exc: # pragma: no cover - defensive raise ConfigError( "max_duration must be a number", extend_path(path, "max_duration"), ) from exc if max_duration <= 0: raise ConfigError( "max_duration must be > 0", extend_path(path, "max_duration") ) duration_unit = str(mapping.get("duration_unit", "seconds")) valid_units = ["seconds", "minutes", "hours"] if duration_unit not in valid_units: raise ConfigError( f"duration_unit must be one of: {', '.join(valid_units)}", extend_path(path, "duration_unit"), ) reset_on_emit = bool(mapping.get("reset_on_emit", True)) message = optional_str(mapping, "message", path) passthrough = bool(mapping.get("passthrough", False)) return cls( max_duration=max_duration, duration_unit=duration_unit, reset_on_emit=reset_on_emit, message=message, passthrough=passthrough, path=path, ) def validate(self) -> None: if self.max_duration <= 0: raise ConfigError( "max_duration must be > 0", extend_path(self.path, "max_duration") ) valid_units = ["seconds", "minutes", "hours"] if self.duration_unit not in valid_units: raise ConfigError( f"duration_unit must be one of: {', '.join(valid_units)}", extend_path(self.path, "duration_unit"), ) FIELD_SPECS = { "max_duration": ConfigFieldSpec( name="max_duration", display_name="Maximum Duration", type_hint="float", required=True, default=60.0, description="How long the loop can run before this node emits an output.", ), "duration_unit": ConfigFieldSpec( name="duration_unit", display_name="Duration Unit", type_hint="str", required=True, default="seconds", description="Unit of time for max_duration: 'seconds', 'minutes', or 'hours'.", enum=["seconds", "minutes", "hours"], enum_options=[ EnumOption( value="seconds", label="Seconds", description="Time in seconds" ), EnumOption( value="minutes", label="Minutes", description="Time in minutes" ), EnumOption(value="hours", label="Hours", description="Time in hours"), ], ), "reset_on_emit": ConfigFieldSpec( name="reset_on_emit", display_name="Reset After Emit", type_hint="bool", required=False, default=True, description="Whether to reset the internal timer after reaching the limit.", advance=True, ), "message": ConfigFieldSpec( name="message", display_name="Release Message", type_hint="text", required=False, description="Optional text sent downstream once the time limit is reached.", advance=True, ), "passthrough": ConfigFieldSpec( name="passthrough", display_name="Passthrough Mode", type_hint="bool", required=False, default=False, description="If true, after emitting the limit message, all subsequent inputs pass through unchanged.", advance=True, ), }
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/node/loop_timer.py", "license": "Apache License 2.0", "lines": 120, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/executor/loop_timer_executor.py
"""Loop timer guard node executor.""" import time from typing import List, Dict, Any from entity.configs import Node from entity.configs.node.loop_timer import LoopTimerConfig from entity.messages import Message, MessageRole from runtime.node.executor.base import NodeExecutor class LoopTimerNodeExecutor(NodeExecutor): """Track loop duration and emit output only after hitting the time limit. Supports two modes: 1. Standard Mode (passthrough=False): Suppresses input until time limit, then emits message 2. Terminal Gate Mode (passthrough=True): Acts as a sequential switch - Before limit: Pass input through unchanged - At limit: Emit configured message, suppress original input - After limit: Transparent gate, pass all subsequent messages through """ STATE_KEY = "loop_timer" def execute(self, node: Node, inputs: List[Message]) -> List[Message]: config = node.as_config(LoopTimerConfig) if config is None: raise ValueError(f"Node {node.id} missing loop_timer configuration") state = self._get_state() timer_state = state.setdefault(node.id, {}) # Initialize timer on first execution current_time = time.time() if "start_time" not in timer_state: timer_state["start_time"] = current_time timer_state["emitted"] = False start_time = timer_state["start_time"] elapsed_time = current_time - start_time # Convert max_duration to seconds based on unit max_duration_seconds = self._convert_to_seconds( config.max_duration, config.duration_unit ) # Check if time limit has been reached limit_reached = elapsed_time >= max_duration_seconds # Terminal Gate Mode (passthrough=True) if config.passthrough: if not limit_reached: # Before limit: pass input through unchanged self.log_manager.debug( f"LoopTimer {node.id}: {elapsed_time:.1f}s / {max_duration_seconds:.1f}s " f"(passthrough mode: forwarding input)" ) return inputs elif not timer_state["emitted"]: # At limit: emit configured message, suppress original input timer_state["emitted"] = True if config.reset_on_emit: timer_state["start_time"] = current_time content = ( config.message or f"Time limit reached ({config.max_duration} {config.duration_unit})" ) metadata = { "loop_timer": { "elapsed_time": elapsed_time, "max_duration": config.max_duration, "duration_unit": config.duration_unit, "reset_on_emit": config.reset_on_emit, "passthrough": True, } } self.log_manager.debug( f"LoopTimer {node.id}: {elapsed_time:.1f}s / {max_duration_seconds:.1f}s " f"(passthrough mode: emitting limit message)" ) return [ Message( role=MessageRole.ASSISTANT, content=content, metadata=metadata, ) ] else: # After limit: transparent gate, pass all subsequent messages through self.log_manager.debug( f"LoopTimer {node.id}: {elapsed_time:.1f}s (passthrough mode: transparent gate)" ) return inputs # Standard Mode (passthrough=False) if not limit_reached: self.log_manager.debug( f"LoopTimer {node.id}: {elapsed_time:.1f}s / {max_duration_seconds:.1f}s " f"(suppress downstream)" ) return [] if config.reset_on_emit and not timer_state["emitted"]: timer_state["start_time"] = current_time timer_state["emitted"] = True content = ( config.message or f"Time limit reached ({config.max_duration} {config.duration_unit})" ) metadata = { "loop_timer": { "elapsed_time": elapsed_time, "max_duration": config.max_duration, "duration_unit": config.duration_unit, "reset_on_emit": config.reset_on_emit, "passthrough": False, } } self.log_manager.debug( f"LoopTimer {node.id}: {elapsed_time:.1f}s / {max_duration_seconds:.1f}s " f"reached limit, releasing output" ) return [ Message( role=MessageRole.ASSISTANT, content=content, metadata=metadata, ) ] def _get_state(self) -> Dict[str, Dict[str, Any]]: return self.context.global_state.setdefault(self.STATE_KEY, {}) def _convert_to_seconds(self, duration: float, unit: str) -> float: """Convert duration to seconds based on unit.""" unit_multipliers = { "seconds": 1.0, "minutes": 60.0, "hours": 3600.0, } return duration * unit_multipliers.get(unit, 1.0)
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/executor/loop_timer_executor.py", "license": "Apache License 2.0", "lines": 124, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:tools/sync_vuegraphs.py
""" Synchronize YAML Configurations to VueGraph Database This tool uploads local YAML workflow configurations from the yaml_instance/ directory to the VueGraph database via the API endpoint. This is essential for making workflow configurations available to the frontend visualization system. Purpose: - Ensures the database reflects the latest YAML configurations - Required after modifying workflow YAML files to see changes in the UI - Useful for development and deployment workflows Usage: python tools/sync_vuegraphs.py # or via Makefile: make sync """ import os import glob import requests import yaml from pathlib import Path # Configuration API_URL = "http://localhost:6400/api/vuegraphs/upload/content" YAML_DIR = "yaml_instance" def sync_yaml_to_vuegraphs(): """Reads all YAML files and uploads them to the VueGraph database.""" print(f"Syncing YAML files from {YAML_DIR} to {API_URL}...") yaml_files = glob.glob(os.path.join(YAML_DIR, "*.yaml")) for file_path in yaml_files: try: filename = Path(file_path).stem # simulation_hospital_lmstudio with open(file_path, 'r', encoding='utf-8') as f: content = f.read() # Basic validation to ensure it's a valid YAML try: yaml.safe_load(content) except yaml.YAMLError as e: print(f"Skipping {filename}: Invalid YAML - {e}") continue # Upload to VueGraph API payload = {"filename": filename, "content": content} response = requests.post(API_URL, json=payload) if response.status_code == 200: print(f"Synced: {filename}") else: print(f"Failed: {filename} - {response.status_code} {response.text}") except Exception as e: print(f"Error processing {file_path}: {e}") if __name__ == "__main__": sync_yaml_to_vuegraphs()
{ "repo_id": "OpenBMB/ChatDev", "file_path": "tools/sync_vuegraphs.py", "license": "Apache License 2.0", "lines": 48, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple