diff --git "a/app.py" "b/app.py" --- "a/app.py" +++ "b/app.py" @@ -1,70 +1,7817 @@ -import gradio as gr -from huggingface_hub import InferenceClient - - -def respond( - message, - history: list[dict[str, str]], - system_message, - max_tokens, - temperature, - top_p, - hf_token: gr.OAuthToken, -): +from __future__ import annotations +import asyncio +from collections import defaultdict + +from dataclasses import dataclass, field +import datetime +import inspect +import json +import os +from threading import Lock, Event, Thread +from pathlib import Path +import queue +from queue import Queue, Empty +import logging + +import threading +import traceback +from markdown import Markdown +from rich.console import Console + +from General_config import _SERVICES_TEAM,DEV_TEAM_SPECS +from typing import Any, AsyncGenerator, Callable, Coroutine, Dict, Generator, List, Optional, Set, Tuple, Union, get_type_hints +import uuid +from openai import AsyncOpenAI +from openai import OpenAI + +from time import time + +from _LCARS_CONFIG import LCARS_CONFIG +############################################################# +BASE_URL="http://localhost:1234/v1" +BASE_API_KEY="not-needed" +BASE_CLIENT = AsyncOpenAI( + base_url=BASE_URL, + api_key=BASE_API_KEY +) # Global state for client +BASEMODEL_ID = "leroydyer/qwen/qwen3-0.6b-q4_k_m.gguf" # Global state for selected model ID +CLIENT =OpenAI( + base_url=BASE_URL, + api_key=BASE_API_KEY +) # Global state for client +console= Console() +config = LCARS_CONFIG() +@dataclass +class ParsedResponse: + """Fixed ParsedResponse data model""" + def __init__(self, thinking="", main_content="", code_snippets=None, raw_reasoning="", raw_content=""): + self.thinking = thinking + self.main_content = main_content + self.code_snippets = code_snippets or [] + self.raw_reasoning = raw_reasoning + self.raw_content = raw_content + +############################################################# +@dataclass +class RoleNetwork: + """Represents the complete network of roles connected to a specific role""" + primary_role: str + connected_roles: Set[str] # All roles in this network + depth: int # Maximum depth of connections + network_type: str # 'isolated', 'chain', 'hub', 'mesh' + role_dependencies: Dict[str, Set[str]] # role -> set of connected roles + + def get_summary(self) -> str: + """Generate summary of the role network""" + return f""" +Primary Role: {self.primary_role} +Network Type: {self.network_type} +Depth: {self.depth} +Connected Roles ({len(self.connected_roles)}): + {chr(10).join(f" - {role}" for role in sorted(self.connected_roles))} + """.strip() + +@dataclass +class TeamCandidate: + """Represents a potential team based on a role network""" + team_name: str + primary_role: str + role_network: RoleNetwork + candidate_agents: List['AgentSpec'] # All agents that could fill roles in this network + agents_by_role: Dict[str, List['AgentSpec']] # role -> list of agents + + def get_summary(self) -> str: + """Generate summary of team candidate""" + role_counts = {role: len(agents) for role, agents in self.agents_by_role.items()} + return f""" +Team: {self.team_name} +Primary Role: {self.primary_role} +Network Type: {self.role_network.network_type} +Total Roles: {len(self.role_network.connected_roles)} +Total Candidate Agents: {len(self.candidate_agents)} + +Roles and Agents: +{chr(10).join(f" {role} ({len(agents)} agents): {', '.join(a.name for a in agents)}" + for role, agents in sorted(self.agents_by_role.items()))} + """.strip() + +class RoleNetworkAnalyzer: + """ + Analyzes agent roles and their dependency networks. + Step 1: Extract unique roles + Step 2: Build role dependency graph + Step 3: For each role, discover all connected roles (full network) OR dependants subtree + Step 4: Match agents to role networks + """ + + def __init__(self, agents): + # Always convert to list to handle generators, iterators, etc. + if hasattr(agents, '__iter__') and not isinstance(agents, (list, tuple)): + self.agents = list(agents) + else: + self.agents = agents if isinstance(agents, list) else list(agents) + + self.unique_roles: Set[str] = set() + self.role_to_agents: Dict[str, List['AgentSpec']] = {} + self.role_dependency_graph: Dict[str, Set[str]] = {} # Full bidirectional graph + self.role_dependants_graph: Dict[str, Set[str]] = {} # Role -> set of roles that depend ON it + self.role_networks: Dict[str, RoleNetwork] = {} + self.team_candidates: List[TeamCandidate] = [] + + # Run initial analysis + self._extract_unique_roles() + self._build_role_to_agents_map() + self._build_role_dependency_graph() + self._build_role_dependants_graph() # New: Build dependants-specific graph + + def _extract_unique_roles(self): + """Step 1: Extract all unique roles from agents""" + print("Step 1: Extracting unique roles...") + + for agent in self.agents: + self.unique_roles.add(agent.role) + + # Also collect roles mentioned in dependencies + for dep_role in agent.depends_on: + self.unique_roles.add(dep_role) + + for dep_role in agent.has_dependants: + self.unique_roles.add(dep_role) + + print(f" Found {len(self.unique_roles)} unique roles") + return self.unique_roles + + def _build_role_to_agents_map(self): + """Map each role to agents that have that role""" + print("Step 1b: Mapping roles to agents...") + + role_map = defaultdict(list) + for agent in self.agents: + role_map[agent.role].append(agent) + + self.role_to_agents = dict(role_map) + + # Report roles without agents + roles_without_agents = self.unique_roles - set(self.role_to_agents.keys()) + if roles_without_agents: + print(f" Warning: {len(roles_without_agents)} roles mentioned but no agents found:") + for role in sorted(roles_without_agents): + print(f" - {role}") + + print(f" Mapped {len(self.role_to_agents)} roles to agents") + return self.role_to_agents + + def _build_role_dependency_graph(self): + """Step 2: Build complete bidirectional role dependency graph""" + print("\nStep 2: Building role dependency graph (bidirectional)...") + + graph = defaultdict(set) + + for agent in self.agents: + agent_role = agent.role + + # Add connections from depends_on (agent_role <- depends_on_role) + for dep_role in agent.depends_on: + if dep_role: # Skip empty strings + graph[agent_role].add(dep_role) + graph[dep_role].add(agent_role) # Bidirectional + + # Add connections from has_dependants (agent_role -> has_dependants_role) + for dep_role in agent.has_dependants: + if dep_role: # Skip empty strings + graph[agent_role].add(dep_role) + graph[dep_role].add(agent_role) # Bidirectional + + self.role_dependency_graph = dict(graph) + + # Report statistics + roles_with_deps = len([r for r in graph if graph[r]]) + total_connections = sum(len(deps) for deps in graph.values()) // 2 # Divide by 2 because bidirectional + + print(f" {roles_with_deps} roles have dependencies") + print(f" {total_connections} unique role connections") + + return self.role_dependency_graph + + def _build_role_dependants_graph(self): + """Build a directed graph: Role -> {set of roles that depend ON this role}""" + print("\nStep 2b: Building role dependants graph (directed)...") + + dependants_graph = defaultdict(set) + + for agent in self.agents: + # For each role, find all agents that list this role in their 'depends_on' + for other_agent in self.agents: + if agent.role in other_agent.depends_on: + dependants_graph[agent.role].add(other_agent.role) + + self.role_dependants_graph = dict(dependants_graph) + + print(f" Built dependants graph for {len(self.role_dependants_graph)} roles") + for role, deps in self.role_dependants_graph.items(): + if deps: + print(f" - {role} has dependants: {', '.join(deps)}") + + return self.role_dependants_graph + + def discover_role_network(self, role: str) -> RoleNetwork: + """ + Step 3: For a selected role, discover ALL connected roles recursively (full network) + Returns a complete network of all roles connected to this role via any dependency. + """ + if role not in self.unique_roles: + raise ValueError(f"Role '{role}' not found in agent pool") + + # BFS to find all connected roles (full network) + connected_roles = set() + visited = set() + queue = deque([role]) + + # Track connections for each role in the network + network_dependencies = defaultdict(set) + + while queue: + current_role = queue.popleft() + + if current_role in visited: + continue + + visited.add(current_role) + connected_roles.add(current_role) + + # Get all roles connected to current role (bidirectional graph) + dependencies = self.role_dependency_graph.get(current_role, set()) + network_dependencies[current_role] = dependencies.copy() + + # Add unvisited connected roles to queue + for dep_role in dependencies: + if dep_role not in visited: + queue.append(dep_role) + + # Calculate network metrics + depth = self._calculate_network_depth(dict(network_dependencies)) + network_type = self._classify_network_type(dict(network_dependencies)) + + return RoleNetwork( + primary_role=role, + connected_roles=connected_roles, + depth=depth, + network_type=network_type, + role_dependencies=dict(network_dependencies) + ) + + def discover_dependants_subtree(self, role: str) -> RoleNetwork: + """ + NEW: For a selected role, discover ONLY the roles that depend on it (directly or indirectly). + This is the "subtree" where the given role is the root and dependants are children. + Uses the directed role_dependants_graph. + """ + if role not in self.unique_roles: + raise ValueError(f"Role '{role}' not found in agent pool") + + # BFS to find only roles that depend on the given role + dependant_roles = set() + visited = set() + queue = deque([role]) # Start from the primary role + + # Track direct dependants for each role in the subtree + subtree_dependencies = defaultdict(set) # Role -> {its direct dependants} + + while queue: + current_role = queue.popleft() + + if current_role in visited: + continue + + visited.add(current_role) + if current_role != role: # Don't add the primary role to the dependant set itself + dependant_roles.add(current_role) + + # Get roles that depend *ON* current_role + direct_dependants = self.role_dependants_graph.get(current_role, set()) + subtree_dependencies[current_role] = direct_dependants.copy() # Store direct dependants + + # Add unvisited dependant roles to queue for further exploration + for dep_role in direct_dependants: + if dep_role not in visited: + queue.append(dep_role) + + # Calculate depth: How far down the dependants tree does the role go? + # Depth is the longest path from the primary role to any leaf dependant. + depth = self._calculate_dependants_depth(role, dict(subtree_dependencies)) + + # Classify based on dependants structure + network_type = self._classify_dependants_subtree_type(role, dict(subtree_dependencies)) + + # The "connected_roles" for the subtree are the dependants + the primary role + connected_roles = dependant_roles.copy() + connected_roles.add(role) + + return RoleNetwork( + primary_role=role, + connected_roles=connected_roles, # Includes primary + dependants + depth=depth, + network_type=network_type, + role_dependencies=dict(subtree_dependencies) # Maps role -> its direct dependants + ) + + def _calculate_dependants_depth(self, start_role: str, subtree_deps: Dict[str, Set[str]]) -> int: + """Calculate the depth of the dependants subtree starting from start_role.""" + if not subtree_deps or start_role not in subtree_deps: + return 0 + + def dfs(current_role, current_depth): + dependants = subtree_deps.get(current_role, set()) + if not dependants: + return current_depth + max_depth = current_depth + for dep_role in dependants: + max_depth = max(max_depth, dfs(dep_role, current_depth + 1)) + return max_depth + + return dfs(start_role, 0) + + def _classify_dependants_subtree_type(self, start_role: str, subtree_deps: Dict[str, Set[str]]) -> str: + """Classify the dependants subtree structure.""" + if not subtree_deps or start_role not in subtree_deps: + # If start_role has no dependants, it's isolated in the dependants sense + # But it might still *have* dependencies on others (not checked here) + # Let's call it isolated if no *dependants* exist. + return 'isolated' + + # Count dependants for each role in the subtree + dependant_counts = [len(deps) for deps in subtree_deps.values()] + max_dependants = max(dependant_counts) if dependant_counts else 0 + avg_dependants = sum(dependant_counts) / len(dependant_counts) if dependant_counts else 0 + + # Classify based on branching + if max_dependants >= len(subtree_deps) * 0.5: # Many roles depend on one + return 'hub' # Central role has many dependants + elif avg_dependants <= 1.0: # Mostly linear + return 'chain' # Each role has 1 or 0 dependants on average + else: # More branching + return 'mesh' # Multiple roles depend on others, complex structure + + def discover_all_role_networks(self) -> Dict[str, RoleNetwork]: + """ + Step 3: Discover full role networks for ALL roles + Creates a complete mapping of role -> its full network + """ + print("\nStep 3: Discovering full role networks (all connected)...") + + networks = {} + processed_networks = [] # Track unique networks by their role sets + + for role in sorted(self.unique_roles): + network = self.discover_role_network(role) + networks[role] = network + + # Check if this network is new (not a duplicate of an already processed network) + network_signature = frozenset(network.connected_roles) + if network_signature not in processed_networks: + processed_networks.append(network_signature) + + self.role_networks = networks + + print(f" Analyzed {len(self.unique_roles)} roles") + print(f" Found {len(processed_networks)} unique full role networks") + + return networks + + def discover_all_dependants_subtrees(self) -> Dict[str, RoleNetwork]: + """ + NEW: Discover dependants subtrees for ALL roles + Creates a mapping of role -> its dependants subtree + """ + print("\nStep 3b: Discovering dependants subtrees...") + + subtrees = {} + + for role in sorted(self.unique_roles): + subtree = self.discover_dependants_subtree(role) + subtrees[role] = subtree + + print(f" Analyzed {len(self.unique_roles)} roles' dependants subtrees") + + return subtrees + + def get_agents_for_role_network(self, role: str) -> TeamCandidate: + """ + Step 4: For a selected role, get all agents that could participate + in its full role network (team) + """ + if role not in self.role_networks: + network = self.discover_role_network(role) # Use full network + else: + network = self.role_networks[role] + + # Find all agents that match roles in this network + agents_by_role = {} + all_candidate_agents = [] + + for network_role in network.connected_roles: + agents = self.role_to_agents.get(network_role, []) + if agents: + agents_by_role[network_role] = agents + all_candidate_agents.extend(agents) + + # Generate team name + team_name = self._generate_team_name(network) + + return TeamCandidate( + team_name=team_name, + primary_role=role, + role_network=network, + candidate_agents=all_candidate_agents, + agents_by_role=agents_by_role + ) + + def get_agents_for_dependants_subtree(self, role: str) -> TeamCandidate: + """ + NEW: For a selected role, get all agents that could participate + in its dependants subtree (team of dependants). + """ + # Use the new method to get the dependants subtree + network = self.discover_dependants_subtree(role) + + # Find all agents that match roles in this subtree + agents_by_role = {} + all_candidate_agents = [] + + for network_role in network.connected_roles: # Includes primary + dependants + agents = self.role_to_agents.get(network_role, []) + if agents: + agents_by_role[network_role] = agents + all_candidate_agents.extend(agents) + + # Generate team name focused on dependants + team_name = self._generate_dependants_team_name(network) + + return TeamCandidate( + team_name=team_name, + primary_role=role, + role_network=network, + candidate_agents=all_candidate_agents, + agents_by_role=agents_by_role + ) + + def generate_all_team_candidates(self) -> List[TeamCandidate]: + """ + Step 4: Generate team candidates for all unique role networks (full networks) + """ + print("\nStep 4: Generating team candidates (full networks)...") + + # Discover all networks if not done yet + if not self.role_networks: + self.discover_all_role_networks() + + # Group roles by their network signature to avoid duplicates + network_signatures = {} + for role, network in self.role_networks.items(): + signature = frozenset(network.connected_roles) + if signature not in network_signatures: + network_signatures[signature] = role + + # Generate team candidates for each unique network + candidates = [] + for signature, primary_role in network_signatures.items(): + candidate = self.get_agents_for_role_network(primary_role) + candidates.append(candidate) + + self.team_candidates = candidates + + print(f" Generated {len(candidates)} team candidates (full networks)") + + return candidates + + def generate_all_dependants_team_candidates(self) -> List[TeamCandidate]: + """ + NEW: Generate team candidates for all roles based on their dependants subtrees. + """ + print("\nStep 4b: Generating dependants team candidates...") + + # Discover all dependants subtrees if not done yet + # We don't store them in self.role_networks to avoid conflict + # Instead, we just iterate and create candidates + candidates = [] + for role in sorted(self.unique_roles): + candidate = self.get_agents_for_dependants_subtree(role) + candidates.append(candidate) + + print(f" Generated {len(candidates)} dependants team candidates") + + return candidates + + def _calculate_network_depth(self, network_deps: Dict[str, Set[str]]) -> int: + """Calculate maximum depth of role dependencies in network""" + if not network_deps: + return 0 + + max_depth = 0 + for start_role in network_deps.keys(): + visited = {start_role: 0} + queue = deque([start_role]) + + while queue: + role = queue.popleft() + current_depth = visited[role] + max_depth = max(max_depth, current_depth) + + for connected_role in network_deps.get(role, set()): + if connected_role not in visited: + visited[connected_role] = current_depth + 1 + queue.append(connected_role) + + return max_depth + + def _classify_network_type(self, network_deps: Dict[str, Set[str]]) -> str: + """Classify the full network structure""" + if len(network_deps) == 1: + return 'isolated' + + connection_counts = [len(deps) for deps in network_deps.values()] + max_connections = max(connection_counts) if connection_counts else 0 + avg_connections = sum(connection_counts) / len(connection_counts) if connection_counts else 0 + + if max_connections >= len(network_deps) * 0.5: + return 'hub' + elif avg_connections <= 2.0: + return 'chain' + else: + return 'mesh' + + def _generate_team_name(self, network: RoleNetwork) -> str: + """Generate meaningful team name from full role network""" + roles = list(network.connected_roles) + + if len(roles) == 1: + return f"{roles[0]} Team" + + # Extract common keywords + keywords = self._extract_keywords(roles) + + if keywords: + return f"{keywords[0]} {network.network_type.title()} Team" + + return f"{network.primary_role} Team" + + def _generate_dependants_team_name(self, network: RoleNetwork) -> str: + """Generate meaningful team name from dependants subtree""" + roles = list(network.connected_roles) + + if len(roles) == 1: + return f"{network.primary_role} Dependents Team" # Only the primary role exists + + dependant_roles = network.connected_roles - {network.primary_role} + if not dependant_roles: + return f"{network.primary_role} Dependents Team" # No actual dependants + + # Name based on primary role and its dependants + primary = network.primary_role + num_deps = len(dependant_roles) + dep_sample = ', '.join(list(dependant_roles)[:2]) # Take first 2 as sample + if num_deps > 2: + dep_sample += f", +{num_deps - 2} others" + + return f"{primary} + Dependents ({dep_sample}) Team" + + def _extract_keywords(self, roles: List[str]) -> List[str]: + """Extract common keywords from role names""" + tech_keywords = [ + 'Agent', 'System', 'Developer', 'Engineer', 'Specialist', 'Architect', + 'API', 'Tool', 'Model', 'Chain', 'OpenAI', 'LangChain', 'AI', 'ML', + 'Senior', 'Lead', 'Principal' + ] + + found = [] + for keyword in tech_keywords: + if sum(1 for role in roles if keyword.lower() in role.lower()) >= len(roles) * 0.4: + found.append(keyword) + + return found + + def print_analysis_report(self, show_dependants=False): + """Print comprehensive analysis report""" + print("\n" + "="*80) + print("ROLE NETWORK ANALYSIS REPORT") + print("="*80) + + print(f"\nTotal Agents: {len(self.agents)}") + print(f"Unique Roles: {len(self.unique_roles)}") + print(f"Full Role Networks Discovered: {len(set(frozenset(n.connected_roles) for n in self.role_networks.values()))}") + + print("\n" + "-"*80) + print("UNIQUE ROLES:") + print("-"*80) + for role in sorted(self.unique_roles): + agent_count = len(self.role_to_agents.get(role, [])) + deps = self.role_dependency_graph.get(role, set()) + dependants = self.role_dependants_graph.get(role, set()) + print(f" {role}") + print(f" Agents: {agent_count}") + print(f" Connected to (Full Network): {', '.join(sorted(deps)) if deps else 'None'}") + print(f" Has Dependents (Direct): {', '.join(sorted(dependants)) if dependants else 'None'}") + + print("\n" + "-"*80) + print("FULL ROLE NETWORKS:") + print("-"*80) + + # Show unique networks + seen_networks = set() + for role, network in sorted(self.role_networks.items()): + signature = frozenset(network.connected_roles) + if signature not in seen_networks: + seen_networks.add(signature) + print(f"\n Primary Role: {network.primary_role}") + print(f" Type: {network.network_type}") + print(f" Depth: {network.depth}") + print(f" Roles in Network ({len(network.connected_roles)}):") + for net_role in sorted(network.connected_roles): + print(f" - {net_role}") + + if show_dependants: + print("\n" + "-"*80) + print("DEPENDANTS SUBTREES:") + print("-"*80) + # Get subtrees without storing them permanently + subtrees = self.discover_all_dependants_subtrees() + for role, network in sorted(subtrees.items()): + print(f"\n Primary Role: {network.primary_role}") + print(f" Type: {network.network_type}") + print(f" Depth: {network.depth}") + dependant_roles = network.connected_roles - {network.primary_role} + print(f" Roles in Subtree ({len(network.connected_roles)}, including primary):") + print(f" Primary: {network.primary_role}") + print(f" Dependents ({len(dependant_roles)}): {', '.join(sorted(dependant_roles)) if dependant_roles else 'None'}") + + if self.team_candidates: + print("\n" + "-"*80) + print("TEAM CANDIDATES (Full Networks):") + print("-"*80) + for i, candidate in enumerate(self.team_candidates, 1): + print(f"\n{i}. {candidate.get_summary()}") + + def get_team_for_role(self, role: str) -> TeamCandidate: + """Quick access: Get team candidate for a specific role (full network)""" + return self.get_agents_for_role_network(role) + + def get_dependants_team_for_role(self, role: str) -> TeamCandidate: + """NEW: Quick access: Get team candidate for a specific role's dependants subtree""" + return self.get_agents_for_dependants_subtree(role) + + def list_roles_with_agents(self) -> Dict[str, int]: + """List all roles and how many agents have each role""" + return {role: len(agents) for role, agents in self.role_to_agents.items()} + + def export_team_candidate_to_teamspec(self, candidate: TeamCandidate) -> Dict: + """Export a team candidate to TeamSpec format""" + return { + 'name': candidate.team_name, + 'role': candidate.primary_role, + 'goal': f"Coordinate {len(candidate.role_network.connected_roles)} roles to achieve objectives", + 'instructions': f"This is a {candidate.role_network.network_type} team with {len(candidate.candidate_agents)} potential agents.", + 'agents': candidate.candidate_agents, + 'skills': self._aggregate_skills(candidate.candidate_agents), + 'expertise_keywords': self._aggregate_expertise(candidate.candidate_agents), + 'description': f"Role network centered on {candidate.primary_role}", + 'workflow_type': self._workflow_from_type(candidate.role_network.network_type) + } + + def _aggregate_skills(self, agents: List['AgentSpec']) -> List[str]: + """Aggregate unique skills from agents""" + all_skills = set() + for agent in agents: + all_skills.update(agent.skills) + return list(all_skills) + + def _aggregate_expertise(self, agents: List['AgentSpec']) -> List[str]: + """Aggregate unique expertise from agents""" + all_expertise = set() + for agent in agents: + all_expertise.update(agent.expertise_keywords) + return list(all_expertise) + + def _workflow_from_type(self, network_type: str) -> str: + """Map network type to workflow type""" + mapping = { + 'chain': 'sequential', + 'hub': 'hierarchical', + 'mesh': 'collaborative', + 'isolated': 'sequential' + } + return mapping.get(network_type, 'sequential') + +############################################################# +class AgentFunction(): + """ + + Initialize an agent with configuration and optional sub-agent. + + """ + ACTION_TAG = "{content}" + TARGET_TAG = "{content}" + CONTENT_TAG = "{content}" + STATUS_TAG = "{content}" + FEEDBACK_TAG = "{content}" + ROUTING_TAG = "{content}" + # Status values for collaboration + STATUS_VALUES = { + "IN_PROGRESS", "NEEDS_INPUT", "ERROR", "REJECTED", "FINAL_OUTPUT", + "PROPOSAL", "AGREED", "DISAGREES", "NEUTRAL", "IDEA", "FINAL_PLAN", + "HANDOFF", "TURN_COMPLETE", "SELECTED_NEXT" + } + + + + def __init__(self,spec:AgentSpec,modelIdentifier="leroydyer/qwen/qwen2.5-omni-3b-q4_k_m.gguf", + sub_agent:'AgentFunction'=None,execution_pattern=None, + requires_iteration=False,max_retries=0, + is_router=False,routes:List['AgentFunction']=None,prompt_template=None): +### AGENT Functionality + + self.timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + self.prompt_template = self.spec.prompt_template or f""" + + + You are {self.spec.name}, a {self.spec.role} + Your goal is: {self.spec.goal} + + + Instructions: {self.spec.instructions} + + + + """ + self.spec = spec + self.client = CLIENT + self.modelIdentifier = modelIdentifier + + self.sub_agent:'AgentFunction' = sub_agent + if self.sub_agent == None: + self.response_type="final" + self.execution_pattern = None + else: + self.response_type = "Agent" + self._validate_execution_(execution_pattern) # "post_process" or "pre_process" + self.execution_pattern = execution_pattern + self.is_router = is_router + self.routes = routes + self.requires_iteration = requires_iteration + self.max_retries = max_retries +## PROMPT MANAGEMENT + self.ROUTING_INSTRUCT=f""" + * ROUTING INSTRUCTIONS * + Your output must include a routing tag indicating where to send the result: + DESTINATION + + Available destinations: + + {self._get_route_descriptions_()} + + Special routing options: + - FINAL - Output is complete, no further processing needed + - SELF - Send back to yourself for refinement + """ + if self.is_router: + self.prompt_template = f""" + + {self.prompt_template} + {self.ROUTING_INSTRUCT} + + + """ + self.FEEDBACK_INSTRUCT = """ + * DIAGNOSTIC OUTPUT FORMAT * + Always include status and feedback in your response: + + Your main output or answer + ONE_OF: {', '.join(self.STATUS_VALUES)} + Brief explanation if status is not FINAL_OUTPUT + + Status meanings: + - IN_PROGRESS: Work started but incomplete + - NEEDS_INPUT: Requires clarification or additional information + - REJECTED: Output doesn't meet quality standards + - ERROR: Something went wrong + - FINAL_OUTPUT: Work completed successfully + - PROPOSAL: Proposing a change or solution + - AGREED: Agreeing with previous statement + - DISAGREES: Disagreeing with previous statement + - NEUTRAL: Making a statement or observation + + """ + self.RESPONSE_INSTRUCT =""" + * Output format expectations * + + ALWAYS structure your response using these XML tags: + + + + + Your main output or answer goes here + FINAL_OUTPUT + + """ + if self.requires_iteration: + self.prompt_template = f""" + + {self.prompt_template} + {self.RESPONSE_INSTRUCT} + ** Additional Diagnostic feedback Output Instructions ** + {self.FEEDBACK_INSTRUCT} + + """ + self.COLLABORATION_GUIDELINES = f""" + + COMMUNICATION STYLE: + - Keep responses concise and focused (5-8 sentences generally) + - Address others by name when responding to them + - If a message isn't relevant to your expertise, stay silent or briefly acknowledge + - When you have valuable input, speak up confidently + - respond critically or constructively. + - Challenge weak points + - Support strong ones + - Propose synthesis if possible + - If you agree with a prior statement, explicitly write: AGREED + - If proposing change: PROPOSAL + - If your just making a statement or sharing an output or example , use : NEUTRAL + - Ask clarifying questions if unsure + - Avoid repeating what others said unless synthesizing + Remember: You are a specialized team member. + Contribute when your expertise is needed, and support others when they lead in their areas. + use the outputs of others to fulfil your goals to complete your segment of the task + + DECISION MAKING: + - Determine if you should respond based on relevance to your role + - Decide if your message should be public (shared with all) or direct (to specific agent) + - When collaborating, explain your reasoning and approach + + TASK EXECUTION: + - Break down complex requests that match your expertise + - Provide actionable insights specific to your role + - Coordinate with other agents when tasks span multiple specialties + - Always aim to add unique value based on your specific skills + - Build on others' ideas when relevant + When contributing: + - Be concise and focused + - Build on others' ideas when relevant + + - Use AGREED when agreeing with prior statements + content of agreement + - Use PROPOSAL when proposing changes + content of proposal + - Use DISAGREES when disagreeing + content of disagreement + - Use NEUTRAL for statements or examples + content or statement or fact or example etc + - Include ... for substantive output + + """ +## Utilitys + def __visualize__(self, indent: int = 0) -> str: + """Visualize the agent chain as a structured tree with direction indicators.""" + + def __flow_symbol__(execution_pattern: str) -> str: + """Return directional flow arrow based on execution pattern.""" + if execution_pattern == "pre_process": + return "→" # parent pushes input downstream + elif execution_pattern == "post_process": + return "←" # parent pulls result upstream + return "↔" # bidirectional / normal + + def __arrow_prefix__(indent: int) -> str: + """Return proper branch arrow based on indent level.""" + return "└── " if indent > 0 else "" + + arrow = __arrow_prefix__(indent) + flow = __flow_symbol__(self.execution_pattern) + result = " " * indent + f"{arrow}{self.spec.name} [{self.execution_pattern}] {flow}\n" + + if self.sub_agent: + # Insert directional context line between this and subnode + direction = ( + " " * (indent + 4) + + ("↑" if self.execution_pattern == "post_process" else "↓") + + "\n" + ) + result += direction + # Recursive call (note: call the same __Visualize__) + result += self.sub_agent.__Visualize__(indent + 4) + + return result + def _detailed_view(self) -> str: + """Detailed parameter view.""" + agents = list(self._traverse()) + return f""" + Agent: {len(agents)} total agents + ├── Root: {self.spec.name} ({self.spec.role}) + ├── Pattern: {self.execution_pattern} + ├── Role: {self.spec.role} + ├── Goal: {self.spec.goal} + ├── Instructuions: {self.spec.instructions} + ├── Skills: {self.spec.skills} + ├── Expertise: {self.spec.expertise_keywords} + ├── Iterative: {self.requires_iteration} + ├── Retries: {self.max_retries} + ├── Model: {self.modelIdentifier} + ├── Tools: {self.spec.tools} + ├── Subordinates: {len(agents)-1} + │ └── Chain: {self._get_sub_agent_descriptions_()} + │ └── Count: {self.__get_length__()} + └── Prompt Template Length: {len(self.prompt_template)} chars + """.strip() + def __get_length__(self) -> int: + """Calculate the total length of the agent chain""" + if self.sub_agent is None: + return 1 + return 1 + self.sub_agent.__get_length__() + def __repr__(self): + sub_count = self.__get_length__() + routes_preview = ", ".join(self.routes) if self.routes else "None" + return ( + f"AgentNode<{self.name}>" + f"(model={self.model_identifier or 'None'}, subagents={sub_count}, " + f"iterative={self.requires_iteration}/{self.max_retries}, router={self.is_router}, " + f"routes=[{routes_preview}])" + ) + def __contains__(self, name: str) -> bool: + return self.spec.name == self.spec.name or (self.sub_agent and name in self.sub_agent) + def _validate_execution_(self, pattern: str): + valid_patterns = {"final", "post_process", "pre_process"} + if pattern not in valid_patterns: + raise ValueError(f"Invalid execution pattern: {pattern}. Must be one of {valid_patterns}") + def _get_subagent_names(self) -> List[str]: + """Get names of all downstream agents.""" + return [a.spec.name for a in self._traverse()][1:] + def _get_sub_agent_descriptions_(self): + """Get brief descriptions of all agents in the chain.""" + return [ + f"Name: {a.spec.name} Role: {a.spec.role}): Goal: {a.spec.goal} /n" + for a in self._traverse() + ] + def _get_route_descriptions_(self): + """Get brief descriptions of all agents in the chain.""" + if self.routes: + return [ + f"Name: {a.spec.name} Role: {a.spec.role}): Goal: {a.spec.goal} /n" + for a in self.routes + ] + else: + return "" + def _traverse(self): + """Generator over all agents in chain.""" + yield self + if self.sub_agent: + yield from self.sub_agent._traverse() +## Standard Chain and Subagent Responses (ROOT FUNCTIONALITY) + def _execute_with_tools(self, messages: List[Dict], tools_schema: List[Dict]) -> Dict[str, Any]: + """Execute agent with tool calling capability.""" + + def format_react_trace(chat_history): + react_steps = [] + for msg in chat_history: + if msg["role"] == "function": + func = msg.get("name", "unknown") + content = msg.get("content", "") + thought = f"I should call the `{func}` function to perform the required task.\n" + action = f"{func}()\n" + observation = content + react_steps.append( + f"Thought: {thought}Action: {action}Observation: {observation}\n" + ) + return "\n".join(react_steps) + def _execute_tool_call( tool_call: Any,tool_map) -> Dict[str, Any]: + """ + Execute a single tool call request. + + - Locates tool by name in registered toolset. + - Attempts execution with provided arguments. + - Captures result or error. + - Wraps into standardized success/error response. + + Args: + tool: Tool call dictionary in OpenAI function-call format. + + Returns: + Dict[str, Any]: Response object with keys: + - success: Boolean execution status + - output: Tool execution result (stringified if needed) + - error: Error message if execution failed + """ + + + tool_name = tool_call.function.name + tool_args = tool_call.function.arguments + + if tool_name not in tool_map: + return { + "success": False, + "output": "", + "error": f"Tool '{tool_name}' not found" + } + + try: + args_dict = json.loads(tool_args) if isinstance(tool_args, str) else tool_args + result = tool_map[tool_name](**args_dict) + return { + "success": True, + "output": str(result), + "error": None + } + except json.JSONDecodeError: + return { + "success": False, + "output": "", + "error": f"Invalid JSON arguments for {tool_name}: {tool_args}" + } + except Exception as e: + return { + "success": False, + "output": "", + "error": f"Error executing {tool_name}: {str(e)}" + } + + max_iterations = 5 + iterations = 0 + + try: + while iterations < max_iterations: + iterations += 1 + + response = self.client.chat.completions.create( + model=self.modelIdentifier, + messages=messages, + tools=tools_schema, + tool_choice="auto" + ) + + message = response.choices[0].message + messages.append(message.to_dict()) # Convert to dict for consistency + + # Process tool calls if any + if hasattr(message, 'tool_calls') and message.tool_calls: + for tool_call in message.tool_calls: + tool_result = _execute_tool_call(tool_call,self.spec.tool_map) + messages.append({ + "role": "tool", + "content": json.dumps(tool_result), + "tool_call_id": tool_call.id + }) + else: + # No more tool calls → final response + react_trace = format_react_trace(messages) + return { + "success": True, + "output": message.content or "", + "trace": react_trace, + "error": None + } + + # Max iterations reached + react_trace = format_react_trace(messages) + return { + "success": False, + "output": message.content or "", + "trace": react_trace, + "error": "Max tool iterations reached" + } + + except Exception as e: + return { + "success": False, + "output": "", + "trace": "", + "error": str(e) + } + def _create_success_response(self, input_data: str, output: str, trace: str = "") -> Dict[str, Any]: + """ + Utility to build standardized success response. + + Args: + question: Input query that was processed. + output: Generated result/output string. + artifacts: Optional structured artifacts generated. + trace: Optional execution trace for provenance. + + Returns: + Dict[str, Any]: Response object with success=True and full metadata. + """ + + return { + "agent": self.name, + "role": self.role, + "input": input_data, + "trace": trace, + "result": output, + "success": True + } + def _create_error_response(self, input_data: str, error_msg: str) -> Dict[str, Any]: + """ + Utility to build standardized error response. + + Args: + question: Input query that failed. + error: Error message or exception string. + + Returns: + Dict[str, Any]: Response object with success=False and error info. + """ + + return { + "agent": self.name, + "role": self.role, + "input": input_data, + "output": "", + "result": f"Error: {error_msg}", + "success": False, + "error": error_msg + } + def __CALL__(self, question: str) -> Dict[str, Any]: + """ + Internal method to perform the actual model/tool call for this agent. + + - Routes query through the model or tool executor. + - Always produces a standardized dictionary response. + - Wraps raw outputs into {result, success, trace, artifacts} shape. + - Handles verbose vs. minimal output requirements. + + Args: + question: Input question or context string. + + Returns: + Dict[str, Any]: Formatted response containing result, success flag, + and optional trace/artifacts. + """ + + + if not self.modelIdentifier: + return self._create_error_response(question, "Model identifier not specified") + + prompt = self.prompt_template.format(context=question) + messages = [ + {"role": "system", "content": str(self.system_message)}, + {"role": "user", "content": str(prompt)} + ] + + try: + if not self.tool_map: + # Simple chat completion without tools + response = self.client.chat.completions.create( + model=self.modelIdentifier, + messages=messages + ) + final_output = response.choices[0].message.content + react_trace = "" + return self._create_success_response(question, final_output, react_trace) + + else: + # Tool-enabled completion + tool_exec = self._execute_with_tools(messages, self.__generate_tools__(self.tool_map)) + if tool_exec["success"]: + return self._create_success_response( + question, tool_exec["output"], tool_exec.get("trace", "") + ) + else: + return self._create_error_response( + question, tool_exec["error"] or "Unknown tool execution error" + ) + + except Exception as e: + + return self._create_error_response(question, str(e)) + def execute(self, question: str) -> Dict[str, Any]: + """ + Unified execution method that routes based on execution_pattern. + Supports: + - FINAL: agent alone + - PREPROCESS: agent → subagent + - POSTPROCESS: subagent → agent + - Fallback: unsuccessful responses trigger diagnostic refinement + """ + def _check_is_final(agent): + """Check if this is a final node (no chaining required)""" + if agent.response_type =='final': + return True + else: + + return False + + def _get_feedback(question: str, subagent_result: Dict[str, Any]) -> Dict[str, Any]: + """ + Fallback diagnostic handler for unsuccessful or unexpected results. + """ + def _get_diagnostic_output(output: str) -> Dict[str, str]: # Add self parameter + """Parse tagged output from any agent returning the diagnostic content""" + + def _extract_tag_content(text: str, tag_name: str) -> str: + """Extract content between XML-like tags""" + import re + pattern = f"<{tag_name}>(.*?)" + match = re.search(pattern, text, re.DOTALL) + return match.group(1).strip() if match else "" + + return { + 'content': _extract_tag_content(output, 'content'), + 'status': _extract_tag_content(output, 'status'), + 'feedback': _extract_tag_content(output, 'feedback') + } + + + diagnostic = _get_diagnostic_output(subagent_result.get("result", "")) + status = diagnostic.get("status", "unknown") + refined_input = ( + f"Original task: {question}\n\n" + f"Status: {status}\n" + f"Draft to refine:\n---\n{subagent_result.get('result','')}\n---\n" + f"Feedback: {diagnostic.get('feedback','')}\n" + f"{diagnostic.get('content','')}" + ) + return refined_input + + if _check_is_final(self): + + self_agent_result = self.__CALL__(question) + if self_agent_result["success"]: + return self_agent_result + else: + refined_query = _get_feedback(question, self_agent_result) + return self.__CALL__(refined_query) + + + try: + if self.execution_pattern == "post_process": + subagent_result = self.sub_agent.execute(question) + if subagent_result["success"]: + return self.__CALL__(subagent_result["result"]) + else: + refined_query = _get_feedback(question, subagent_result) + return self.__CALL__(refined_query) + + + elif self.execution_pattern == "pre_process": + subagent_result = self.__CALL__(question) + if subagent_result["success"]: + return self.sub_agent.execute(subagent_result["result"]) + else: + refined_query = _get_feedback(question, subagent_result) + return self.sub_agent.execute(refined_query) + + + else: + # Unknown execution pattern – treat as unsuccessful diagnostic refinement + # Unknown pattern → single retry via refinement + return self.__CALL__(_get_feedback(question, {"result": "Unknown execution pattern"})) + + + except Exception as e: + + return { + "agent": self.name, + "input": question, + "output": "", + "result": f"Error: {str(e)}", + "success": False, + "error": str(e) + } +## Basic Response + def _CallAgent_(self, question, stream=True): + MYAGENT=self + + def _CallAgent_(task: str) -> Dict[str, Any]: + """ + Execute task independently using this agent's prompt template. + Returns standardized response dict. + """ + prompt = MYAGENT.prompt_template.format(context=task) + + messages = [ + {"role": "system", "content": MYAGENT.system_message}, + {"role": "user", "content": prompt} + ] + + try: + response = MYAGENT.client.chat.completions.create( + model=MYAGENT.modelIdentifier, + messages=messages + ) + + result = response.choices[0].message.content + + return { + "agent": MYAGENT.name, + "agent_id": MYAGENT.id, + "role": MYAGENT.role, + "task": task, + "result": result, + "success": True, + "error": None + } + except Exception as e: + return { + "agent": MYAGENT.name, + "agent_id": MYAGENT.id, + "role": MYAGENT.role, + "task": task, + "result": "", + "success": False, + "error": str(e) + } + def _CallAgent_streaming(task: str): + """ + Stream output from this agent using its formatted prompt. + Yields chunks of text. + """ + prompt = self.prompt_template.format(context=task) + + messages = [ + {"role": "system", "content": MYAGENT.system_message}, + {"role": "user", "content": prompt} + ] + + try: + with MYAGENT.client.chat.completions.create( + model=MYAGENT.modelIdentifier, + messages=messages, + stream=True + ) as response: + for chunk in response: + if chunk.choices[0].delta.content: + yield chunk.choices[0].delta.content + except Exception as e: + yield f"Error: {str(e)}" + + + + + + if stream == True: + return _CallAgent_streaming(question) + + return _CallAgent_(question) + + +## ITERATIONS + def __make_iterable__(self, max_iterations: int = 3): + """ + + + Args: + max_iterations: Maximum retries before failure""" + + + + return self._add_iterations(self, max_iterations) + def _add_iterations( self,base_agent, max_iterations: int = 3): + """ + Adds iterative self-refinement capability to an existing agent. + + Modifies the base agent in-place to add iterative execution with self-diagnosis. + Converts this agent into an iterative self-refining agent. + + Modifies this agent in-place to add iterative execution with self-diagnosis. + Args: + base_agent: The agent to enhance with iterative capability + max_iterations: Maximum retries before failure + + Returns: + The same base agent with modified execute method + """ + + SELF_DIAGNOSIS_PROMPT = """ + You are an expert assistant. + For every query, always return output in the following format: + + + [your main response here] + + + + status: [FINAL_OUTPUT | REJECTED | NEEDS_INPUT | ERROR] + feedback: [brief, actionable explanation of why rejected or what is missing] + + + Rules: + - If your answer is complete and correct, set status=FINAL_OUTPUT. + - If incomplete or incorrect, set status=REJECTED and provide precise feedback. + - If missing user input, set status=NEEDS_INPUT. + - If you cannot proceed due to internal error, set status=ERROR. + """ + self.max_retries = max_iterations + self.requires_iteration = True + # Store the original execute method + original_execute = base_agent.execute + + # Inject diagnostic prompt + if hasattr(base_agent, "system_message"): + base_agent.system_message += "\n\n" + SELF_DIAGNOSIS_PROMPT + if hasattr(base_agent, "system_prompt"): + base_agent.system_prompt += "\n\n" + SELF_DIAGNOSIS_PROMPT + + def _parse_diagnostic(output: str) -> Dict[str, str]: + """Extract diagnostic status and feedback from the agent's output.""" + status, feedback = None, None + if "" in output and "" in output: + diag_section = output.split("")[1].split("")[0] + for line in diag_section.splitlines(): + if line.lower().startswith("status:"): + status = line.split(":", 1)[1].strip() + elif line.lower().startswith("feedback:"): + feedback = line.split(":", 1)[1].strip() + return {"status": status, "feedback": feedback} + + def iterative_execute(question: str) -> Dict[str, Any]: + """ + Execute the wrapped agent iteratively until success or retries exhausted. + """ + current_input = question + + for iteration in range(self.max_retries): + result = original_execute(current_input) + + # Ensure dict structure + if not isinstance(result, dict) or "result" not in result: + return base_agent._create_error_response(question, "Malformed agent result") + + diagnostic = _parse_diagnostic(result["result"]) + status = diagnostic.get("status") + + if status == "FINAL_OUTPUT": + result["agent"] = base_agent.name + return result + + elif status == "REJECTED" and iteration < max_iterations - 1: + feedback = diagnostic.get("feedback", "No feedback provided") + current_input = f"Refine your answer. Previous feedback: {feedback}. Original query: {question}" + + elif status == "NEEDS_INPUT": + return base_agent._create_error_response( + question, f"Agent requires additional input: {diagnostic.get('feedback','')}" + ) + + elif status == "ERROR": + return base_agent._create_error_response( + question, f"Agent encountered error: {diagnostic.get('feedback','')}" + ) + + else: + return base_agent._create_error_response( + question, f"Unrecognized status '{status}' in diagnostic" + ) + + return base_agent._create_error_response(question, "Max iterations reached without final output.") + + # Replace the execute method + base_agent.execute = iterative_execute + return base_agent + def __iter__(self): + yield self + if self.sub_agent: + yield from self.sub_agent + def _consume(self, g: Generator[str, None, None]) -> str: + return "".join(g) + +## ROUTING + def _get_route(subagent_result: Dict[str, Any]): + def _get_route_from_output(output: str) -> Dict[str, str]: # Add self parameter + """Parse tagged output from any agent returning the diagnostic content""" + + def _extract_tag_content(text: str, tag_name: str) -> str: + """Extract content between XML-like tags""" + import re + pattern = f"<{tag_name}>(.*?)" + match = re.search(pattern, text, re.DOTALL) + return match.group(1).strip() if match else "" + + return { + 'route': _extract_tag_content(output, 'route'), + + } + route = _get_route_from_output(subagent_result.get("result", "")) + def set_router(self, is_router: bool, routes: Optional[List['AgentFunction']] = None): + self.is_router = bool(is_router) + if routes is not None: + self.routes = list(routes) + + +# Agent Chain Library Entry Points +# ------ +## CREATE: + def create_chain_agent(self, agents: list, chain_name: str = "Deep_ResearchQuery", + role: str = "Deep Research Team", goal: str = "given a research question to create a full essay or research paper or document"): + """ + Creates a new agent that represents a sequential chain of specialized agents. + + Returns a proper _AgentNode_ instance that can be used in any agent chain. + + Args: + agents: List of agent-like objects with `.execute(question)` method + max_retries: How many refinements each agent is allowed + chain_name: Name for the chain agent + role: Role description for the chain + goal: High-level objective of the chain + + Returns: + A new _AgentNode_ instance that represents the sequential chain + """ + + # Create iterative versions of all agents + iterative_agents = [ + self._add_iterations(agent, self.max_retries) + for agent in agents + ] + + # Get agent names for the goal description + agent_names = [agent.spec.name for agent in iterative_agents] + chain_agent_spec = TeamSpec(name=chain_name,role=role,goal=f"{goal}. Sequence: {' → '.join(agent_names)}", + instructions="Process input through a sequential chain of specialized agents") + # Create a new agent that represents the chain + chain_agent = AgentFunction( + spec=chain_agent_spec, + modelIdentifier=getattr(agents[0], 'modelIdentifier', None) if agents else None, + execution_pattern="final" + ) + + def chain_execute(question: str) -> Dict[str, Any]: + """ + Execute the sequential pipeline with resilient agents. + """ + current_output = {"result": question, "success": True} + + for agent in iterative_agents: + input_data = current_output["result"] + current_output = agent.execute(input_data) + + # Only stop if agent fails *after* retries + if not current_output.get("success", False): + return chain_agent._create_error_response( + question, + f"Pipeline halted: {agent.name} failed after retries. Error: {current_output.get('error', 'Unknown error')}" + ) + + # Update the agent name to reflect this is from the chain + current_output["agent"] = chain_name + return current_output + + # Replace the execute method + chain_agent.execute = chain_execute + return chain_agent + def create_executable_chain(self, agents: list["AgentFunction"]): + """ + Creates a lightweight executable function that runs a chain of agents. + + Useful for quick queries without creating full agent instances. + + Args: + agents: List of agent-like objects with `.execute(question)` method + max_retries: How many refinements each agent is allowed + + Returns: + A function with signature execute(question: str) -> Dict[str, Any] + """ + + def _create_iterative_executor(base_agent: "AgentFunction", max_iterations: int = self.max_retries): # FIXED: Removed 'self' parameter + """ + Creates an iterative executor function for a single agent. + """ + + SELF_DIAGNOSIS_PROMPT = """ + You are an expert assistant. + For every query, always return output in the following format: + + [your main response here] + + + status: [FINAL_OUTPUT | REJECTED | NEEDS_INPUT | ERROR] + feedback: [brief, actionable explanation of why rejected or what is missing] + + Rules: + - If your answer is complete and correct, set status=FINAL_OUTPUT. + - If incomplete or incorrect, set status=REJECTED and provide precise feedback. + - If missing user input, set status=NEEDS_INPUT. + - If you cannot proceed due to internal error, set status=ERROR. + """ + + # Store original execute method + original_execute = base_agent.execute + + # Inject diagnostic prompt into base agent + if hasattr(base_agent, "system_prompt"): + base_agent.system_prompt += "\n" + SELF_DIAGNOSIS_PROMPT + if hasattr(base_agent, "system_message"): + base_agent.system_message += "\n" + SELF_DIAGNOSIS_PROMPT + + def _parse_diagnostic(output: str) -> Dict[str, str]: + """Extract diagnostic status and feedback from the agent's output.""" + status, feedback = None, None + if "" in output and "" in output: + diag_section = output.split("")[1].split("")[0] + for line in diag_section.splitlines(): + if line.lower().startswith("status:"): + status = line.split(":", 1)[1].strip() + elif line.lower().startswith("feedback:"): + feedback = line.split(":", 1)[1].strip() + return {"status": status, "feedback": feedback} + + def execute(question: str) -> Dict[str, Any]: + """ + Execute the wrapped agent iteratively until success or retries exhausted. + """ + current_input = question + for iteration in range(max_iterations): + result = original_execute(current_input) # FIXED: Use original_execute + # Ensure dict structure + if not isinstance(result, dict) or "result" not in result: + return { + "agent": base_agent.name, + "input": question, + "output": "", + "result": f"Error: Malformed agent result", + "success": False, + "error": "Malformed agent result" + } + + diagnostic = _parse_diagnostic(result["result"]) + status = diagnostic.get("status") + if status == "FINAL_OUTPUT": + return result + elif status == "REJECTED" and iteration < max_iterations - 1: + feedback = diagnostic.get("feedback", "No feedback provided") + current_input = f"Refine your answer. Previous feedback: {feedback}. Original query: {question}" + elif status == "NEEDS_INPUT": + return { + "agent": base_agent.name, + "input": question, + "output": "", + "result": f"Error: Agent requires additional input: {diagnostic.get('feedback', '')}", + "success": False, + "error": f"Agent requires additional input: {diagnostic.get('feedback', '')}" + } + elif status == "ERROR": + return { + "agent": base_agent.name, + "input": question, + "output": "", + "result": f"Error: Agent encountered error: {diagnostic.get('feedback', '')}", + "success": False, + "error": f"Agent encountered error: {diagnostic.get('feedback', '')}" + } + else: + return { + "agent": base_agent.name, + "input": question, + "output": "", + "result": f"Error: Unrecognized status '{status}' in diagnostic", + "success": False, + "error": f"Unrecognized status '{status}' in diagnostic" + } + + return { + "agent": base_agent.name, + "input": question, + "output": "", + "result": "Error: Max iterations reached without final output.", + "success": False, + "error": "Max iterations reached without final output." + } + + return execute + + # Wrap each agent with iterative refinement capability + refined_executors = [ + _create_iterative_executor(agent, self.max_retries) + for agent in agents + ] + + def execute(question: str) -> Dict[str, Any]: + """ + Runs the sequential pipeline with resilient agents. + """ + current_output = {"result": question, "success": True} + for agent_execute in refined_executors: + input_data = current_output["result"] + current_output = agent_execute(input_data) + # Only stop if agent fails *after* retries + if not current_output.get("success", False): + return { + "agent": "SequentialAgent", + "input": question, + "result": current_output.get("result", ""), + "success": False, + "error": f"Pipeline halted: Agent failed after retries" + } + + return current_output + + return execute + def create_executable_agent(self,Agent:"AgentFunction") -> Callable[[str], Dict[str, Any]]: + """ + Creates a lightweight executable function that runs a single agent. + + Useful for quick queries without creating full agent instances. + + Args: + Agent: An agent-like object with `.execute(question)` method + + Returns: + A callable function that takes a question string and returns the agent's response. + """ + def execute(question: str) -> Dict[str, Any]: + return Agent.execute(question) + + return execute + def CreateSimpleAgent(self,name,role,goal,instructions,personality,skills,expertise_keywords,depends_on,has_dependants,prompt_template=None,system_message=None,tool_map=None): + '''Used to create Templates for agents ''' + MyAgent: AgentSpec = AgentSpec( + name=name, + role=role, + personality=personality, + goal=goal, + instructions=instructions, + skills=skills,expertise_keywords=expertise_keywords, + depends_on=depends_on, + has_dependants=has_dependants, + system_message =system_message, + prompt_template=prompt_template ) + + return MyAgent +# EXAMPLE_1 +def AgentFunction_ArticleWriter(topic = "The benefits of daily yoga for mental health"): + """Post-process pattern: Draft → Edit""" + + + # Step 1: Create the drafter (runs first) + _drafter = AgentSpec( + name="ContentDrafter", + role="Content Writer", + goal="Create engaging blog post drafts", + instructions=""" + - Write in a conversational tone + - Include an introduction, body, and conclusion + - Keep paragraphs short and readable + """ + ) + + # Step 2: Create the editor (runs second, refines draft) + _editor = AgentSpec( + name="SeniorEditor", + role="Content Editor", + goal="Polish content to publication quality", + instructions=""" + - Fix grammar and spelling errors + - Improve clarity and flow + - Enhance engagement and readability + - Ensure consistent tone + """, + + + ) + drafter = AgentFunction(_drafter) + ArticleWriter = AgentFunction(_editor,sub_agent=drafter,execution_pattern="post_process") + # Execute the chain + + print(f"\nExecuting chain with topic: {topic}") + result = ArticleWriter.execute(f"Write a blog post about: {topic}") + + print(f"\nSuccess: {result['success']}") + if result['success']: + print(f"\nFinal Result:\n{result['result']}") + print(f"\nExecution Trace:\n{result.get('output', '')}") + else: + print(f"\nError: {result.get('error', 'Unknown error')}") + print(f"\nFull result: {result}") + print("Chain structure:") + print(f"\nChain Structure:\n{ArticleWriter.__visualize__()}") + print(f"Chain length: {ArticleWriter.__get_length__()}") + + # Test serialization + agent_dict = ArticleWriter.to_dict() + print(f"Serialized: {json.dumps(agent_dict, indent=2)}") + + return ArticleWriter + + +############################################################# +#=======AgentSpecs=====# +@dataclass +class Spec(): + def __init__(self,name): + self.name: str = name + @property + def display_name(self) -> str: + return f"{self.name}" + +@dataclass +class AgentSpec(Spec): + name: str + role: str + goal: str + instructions: str = "" + personality: str = "" # optional with default + skills: List[str] = field(default_factory=list) + expertise_keywords: List[str] = field(default_factory=list) + depends_on: List[str] = field(default_factory=list) + has_dependants: List[str] = field(default_factory=list) + verbose: bool = False + tool_map: Dict[str, Callable] = field(default_factory=dict) + system_message: str = "" + prompt_template: str = None + + # Runtime-generated fields + tools: List[Dict[str, Any]] = field(default_factory=list) + # Note: tool_descriptions is a string, not a method + tool_descriptions: str = "No tools available" + + def __post_init__(self): + super().__init__(self.display_name) + # Initialize tools if tool_map is provided + if self.tool_map: + # Call the helper function directly, it returns tools and description + self.tools, self.tool_descriptions = self._add_tools(self.tool_map) + else: + self.tools = [] + self.tool_descriptions = "No tools available" + + # Generate the base prompt template from parameters + base_template = self._generate_base_template() + + # If a custom prompt_template was provided, append it to the base + if self.prompt_template is not None: + self.prompt_template = base_template + "\n" + self.prompt_template + else: + # Otherwise, use just the base template + self.prompt_template = base_template + + @property + def display_name(self) -> str: + return f"{self.name} ({self.role})" + + # Renamed from add_tools to avoid potential confusion, made private + def _add_tools(self, tool_map: Dict[str, Callable]) -> tuple[List[Dict[str, Any]], str]: + """ + Helper function to generate tools schema and descriptions from a tool map. + Returns a tuple of (tools_schema_list, tool_descriptions_string). + """ + + def _generate_tools_description_internal(map_obj) -> str: + """Generate human-readable description of available tools.""" + if not map_obj: + return "No tools available" + + tool_descriptions = [] + for tool_name, tool_func in map_obj.items(): + docstring = inspect.getdoc(tool_func) or "No description available" + sig = inspect.signature(tool_func) + params = [f"{name}" for name in sig.parameters.keys()] + tool_descriptions.append(f"- {tool_name}({', '.join(params)}): {docstring}") + return "\n".join(tool_descriptions) + + def _generate_tools_internal(map_obj: Dict[str, Callable]) -> List[Dict[str, Any]]: + """Generate OpenAI-compatible tools schema.""" + tools_schema = [] + + for tool_name, func in map_obj.items(): + # Get docstring (description) + description = inspect.getdoc(func) or f"{tool_name} function" + + # Get function signature + sig = inspect.signature(func) + type_hints = get_type_hints(func, include_extras=True) # include_extras for newer Python versions + + # Build parameter schema + properties = {} + required_params = [] + + for param_name, param in sig.parameters.items(): + param_type = type_hints.get(param_name, Any) + # Handle generic types like list[str] or Union + param_type_name = getattr(param_type, "__name__", str(param_type)) + # Simplified mapping - you might want to handle complex types differently + if hasattr(param_type, "__origin__"): + origin = param_type.__origin__ + if origin is list: + param_type_name = "list" + elif origin is dict: + param_type_name = "dict" + elif origin is tuple: + param_type_name = "array" # or handle specifically + elif origin is Union: # Optional is Union[..., type(None)] + param_type_name = getattr(param_type.__args__[0], "__name__", str(param_type.__args__[0])) + else: + param_type_name = str(origin) + else: + param_type_name = getattr(param_type, "__name__", str(param_type)) + + # Map Python type → JSON schema type + json_type_map = { + "int": "integer", + "float": "number", + "str": "string", + "bool": "boolean", + "list": "array", + "dict": "object", + "Any": "string" + } + json_type = json_type_map.get(param_type_name, "string") + + properties[param_name] = { + "type": json_type, + "description": f"{param_name} parameter" + } + + if param.default is inspect.Parameter.empty: + required_params.append(param_name) + + tools_schema.append({ + "type": "function", + "function": { + "name": tool_name, + "description": description, + "parameters": { + "type": "object", + "properties": properties, + "required": required_params + } + } + }) + + return tools_schema + + tools = _generate_tools_internal(tool_map) + desc = _generate_tools_description_internal(tool_map) + return tools, desc + + def _generate_base_template(self) -> str: + """Generate the base prompt template from all the parameters.""" + # Use self.tool_descriptions which is the string field + return f""" + {self.system_message} + You are {self.name}, a {self.role} : + 🎭 PERSONALITY: {self.personality} + 🎯 YOUR GOAL is: {self.goal} + Tools: + {self.tool_descriptions} + 📋 INSTRUCTIONS: {self.instructions} + 🔧 CORE SKILLS: {', '.join(self.skills)} + 🎓 AREAS OF EXPERTISE: {', '.join(self.expertise_keywords)} + 🔄 TEAM WORKFLOW: + You provide outputs for {self.has_dependants} + and , {self.depends_on} provides outputs for you that you work on : + Question: {{context}} + """.strip() + def create_agent_spec(self, name: str, role: str, goal: str, instructions: str, + personality: str = "", skills: List[str] = None, + expertise_keywords: List[str] = None, depends_on: List[str] = None, + has_dependants: List[str] = None, tool_map: Dict[str, Callable] = None, + system_message: str = "", prompt_template: str = None) -> 'AgentSpec': + """Create and register a new agent specification""" + spec = AgentSpec( + name=name, + role=role, + goal=goal, + instructions=instructions, + personality=personality, + skills=skills or [], + expertise_keywords=expertise_keywords or [], + depends_on=depends_on or [], + has_dependants=has_dependants or [], + tool_map=tool_map or {}, + system_message=system_message, + prompt_template=prompt_template + ) + + + return spec + +@dataclass +class TeamSpec(Spec): """ - For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference + Represents a team of agents working together. + Uses composition: A team *has* an AgentSpec for its own identity and *has* member agents. """ - client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b") + name: str + role: str + goal: str + instructions: str + skills: List[str] = field(default_factory=list) + expertise_keywords: List[str] = field(default_factory=list) + verbose: bool = False + tool_map: Dict[str, Callable] = field(default_factory=dict) + system_message: str = "" + prompt_template: str = None + + # Runtime-generated fields + tools: List[Dict[str, Any]] = field(default_factory=list) + # Note: tool_descriptions is a string, not a method + tool_descriptions: str = "No tools available" + # The members of the team + agents: List[AgentSpec] = None + + # Team-specific attributes + description: str ="" + workflow_type: str = "sequential" # Default workflow + + def __post_init__(self): + super().__init__(self.name) + """Validate team structure after initialization.""" + if not self.agents: + raise ValueError("A team must have at least one agent.") + # Optionally validate that agent names are unique within the team + agent_names = [agent.name for agent in self.agents] + if len(agent_names) != len(set(agent_names)): + raise ValueError("Agent names within a team must be unique.") + # Initialize tools if tool_map is provided + if self.tool_map: + # Call the helper function directly, it returns tools and description + self.tools, self.tool_descriptions = self._add_tools(self.tool_map) + else: + self.tools = [] + self.tool_descriptions = "No tools available" + + # Generate the base prompt template from parameters + base_template = self._generate_base_template() + + # If a custom prompt_template was provided, append it to the base + if self.prompt_template is not None: + self.prompt_template = base_template + "\n" + self.prompt_template + else: + # Otherwise, use just the base template + self.prompt_template = base_template + + @property + def display_name(self) -> str: + return f"{self.name} ({self.role})" + + # Renamed from add_tools to avoid potential confusion, made private + def _add_tools(self, tool_map: Dict[str, Callable]) -> tuple[List[Dict[str, Any]], str]: + """ + Helper function to generate tools schema and descriptions from a tool map. + Returns a tuple of (tools_schema_list, tool_descriptions_string). + """ + + def _generate_tools_description_internal(map_obj) -> str: + """Generate human-readable description of available tools.""" + if not map_obj: + return "No tools available" + + tool_descriptions = [] + for tool_name, tool_func in map_obj.items(): + docstring = inspect.getdoc(tool_func) or "No description available" + sig = inspect.signature(tool_func) + params = [f"{name}" for name in sig.parameters.keys()] + tool_descriptions.append(f"- {tool_name}({', '.join(params)}): {docstring}") + return "\n".join(tool_descriptions) + + def _generate_tools_internal(map_obj: Dict[str, Callable]) -> List[Dict[str, Any]]: + """Generate OpenAI-compatible tools schema.""" + tools_schema = [] + + for tool_name, func in map_obj.items(): + # Get docstring (description) + description = inspect.getdoc(func) or f"{tool_name} function" + + # Get function signature + sig = inspect.signature(func) + type_hints = get_type_hints(func, include_extras=True) # include_extras for newer Python versions + + # Build parameter schema + properties = {} + required_params = [] + + for param_name, param in sig.parameters.items(): + param_type = type_hints.get(param_name, Any) + # Handle generic types like list[str] or Union + param_type_name = getattr(param_type, "__name__", str(param_type)) + # Simplified mapping - you might want to handle complex types differently + if hasattr(param_type, "__origin__"): + origin = param_type.__origin__ + if origin is list: + param_type_name = "list" + elif origin is dict: + param_type_name = "dict" + elif origin is tuple: + param_type_name = "array" # or handle specifically + elif origin is Union: # Optional is Union[..., type(None)] + param_type_name = getattr(param_type.__args__[0], "__name__", str(param_type.__args__[0])) + else: + param_type_name = str(origin) + else: + param_type_name = getattr(param_type, "__name__", str(param_type)) + + # Map Python type → JSON schema type + json_type_map = { + "int": "integer", + "float": "number", + "str": "string", + "bool": "boolean", + "list": "array", + "dict": "object", + "Any": "string" + } + json_type = json_type_map.get(param_type_name, "string") + + properties[param_name] = { + "type": json_type, + "description": f"{param_name} parameter" + } + + if param.default is inspect.Parameter.empty: + required_params.append(param_name) + + tools_schema.append({ + "type": "function", + "function": { + "name": tool_name, + "description": description, + "parameters": { + "type": "object", + "properties": properties, + "required": required_params + } + } + }) + + return tools_schema - messages = [{"role": "system", "content": system_message}] + tools = _generate_tools_internal(tool_map) + desc = _generate_tools_description_internal(tool_map) + return tools, desc - messages.extend(history) + def _generate_base_template(self) -> str: + """Generate the base prompt template for the team.""" + # Use self.tool_descriptions which is the string field + # Join the list of agent names into a comma-separated string + agent_list_str = ', '.join(self._get_team_memebers_list_()) if self.agents else "None" + return f""" + {self.system_message} + You are {self.name}, a {self.role} team: + 🎭 {self.description or 'No specific team personality defined.'} + 🎯 YOUR TEAM'S GOAL is: {self.goal} + Tools available to the team: + {self.tool_descriptions} + 📋 INSTRUCTIONS for the team: {self.instructions} + 🔧 TEAM CORE SKILLS: {', '.join(self.skills)} + 🎓 TEAM AREAS OF EXPERTISE: {', '.join(self.expertise_keywords)} + 🔄 TEAM WORKFLOW TYPE: {self.workflow_type} + 👥 TEAM MEMBERS: {agent_list_str} + Question: {{context}} + """.strip() + def get_agent_by_name(self, name: str) -> Union[AgentSpec, None]: + """Find and return an agent by their name.""" + for agent in self.agents: + if agent.name == name: + return agent + return None - messages.append({"role": "user", "content": message}) + def _get_team_memebers_list_(self) -> List[str]: + """Return a list of all agent names in the team.""" + return [agent.display_name for agent in self.agents] + + def create_team_spec(self, name: str, role: str, goal: str, instructions: str, + agents: List[AgentSpec], skills: List[str] = None, + expertise_keywords: List[str] = None, tool_map: Dict[str, Callable] = None, + system_message: str = "", prompt_template: str = None, + description: str = "", workflow_type: str = "sequential") -> 'TeamSpec': + """Create and register a new team specification""" + spec = TeamSpec( + name=name, + role=role, + goal=goal, + instructions=instructions, + skills=skills or [], + expertise_keywords=expertise_keywords or [], + tool_map=tool_map or {}, + system_message=system_message, + prompt_template=prompt_template, + agents=agents, + description=description, + workflow_type=workflow_type + ) + - response = "" + return spec +############################################################# +@dataclass +class LLMMessage: + role: str + content: str + message_id: str = None + conversation_id: str = None + timestamp: float = None + metadata: Dict[str, Any] = None + + def __post_init__(self): + if self.message_id is None: + self.message_id = str(uuid.uuid4()) + if self.timestamp is None: + self.timestamp = time() + if self.metadata is None: + self.metadata = {} - for message in client.chat_completion( - messages, - max_tokens=max_tokens, - stream=True, - temperature=temperature, - top_p=top_p, +@dataclass +class LLMRequest: + message: LLMMessage + response_event: str = None + callback: Callable = None + + def __post_init__(self): + if self.response_event is None: + self.response_event = f"llm_response_{self.message.message_id}" + +@dataclass +class LLMResponse: + message: LLMMessage + request_id: str + success: bool = True + error: str = None + +############################################################# +class EventManager: + def __init__(self): + self._handlers = defaultdict(list) + self._lock = threading.Lock() + + def register(self, event: str, handler: Callable): + with self._lock: + self._handlers[event].append(handler) + + def unregister(self, event: str, handler: Callable): + with self._lock: + if event in self._handlers and handler in self._handlers[event]: + self._handlers[event].remove(handler) + + def raise_event(self, event: str, data: Any): + with self._lock: + handlers = self._handlers[event][:] + + for handler in handlers: + try: + handler(data) + except Exception as e: + console.log(f"Error in event handler for {event}: {e}", style="bold red") + +# Global event manager +EVENT_MANAGER = EventManager() + +def RegisterEvent(event: str, handler: Callable): + EVENT_MANAGER.register(event, handler) + +def RaiseEvent(event: str, data: Any): + EVENT_MANAGER.raise_event(event, data) + +def UnregisterEvent(event: str, handler: Callable): + EVENT_MANAGER.unregister(event, handler) +############################################################# +class LLMAgent: + """Main Agent Driver ! + Agent For Multiple messages at once , + has a message queing service as well as agenerator method for easy intergration with console + applications as well as ui !""" + def __init__( + self, + model_id: str = BASEMODEL_ID, + system_prompt: str = None, + max_queue_size: int = 1000, + max_retries: int = 3, + timeout: int = 30, + max_tokens: int = 5000, + temperature: float = 0.3, + base_url: str = "http://localhost:1234/v1", + api_key: str = "not-needed", + generate_fn: Callable[[List[Dict[str, str]]], Coroutine[Any, Any, str]] = None ): - choices = message.choices - token = "" - if len(choices) and choices[0].delta.content: - token = choices[0].delta.content + self.model_id = model_id + self.system_prompt = system_prompt or "You are a helpful AI assistant." + self.request_queue = Queue(maxsize=max_queue_size) + self.max_retries = max_retries + self.timeout = timeout + self.is_running = False + self._stop_event = Event() + self.processing_thread = None + + # Conversation tracking + self.conversations: Dict[str, List[LLMMessage]] = {} + self.max_history_length = 20 + self._generate = generate_fn or self._default_generate + self.api_key = api_key + self.base_url = base_url + self.max_tokens = max_tokens + self.temperature = temperature + self.async_client = self.CreateClient(base_url, api_key) + + # Active requests waiting for responses + self.pending_requests: Dict[str, LLMRequest] = {} + self.pending_requests_lock = Lock() + + # Register internal event handlers + self._register_event_handlers() + + # Start the processing thread immediately + self.start() + + async def _default_generate(self, messages: List[Dict[str, str]]) -> str: + """Default generate function if none provided""" + return await self.openai_generate(messages) + + def _register_event_handlers(self): + """Register internal event handlers for response routing""" + RegisterEvent("llm_internal_response", self._handle_internal_response) + + def _handle_internal_response(self, response: LLMResponse): + """Route responses to the appropriate request handlers""" + console.log(f"[bold cyan]Handling internal response for: {response.request_id}[/bold cyan]") + + request = None + with self.pending_requests_lock: + if response.request_id in self.pending_requests: + request = self.pending_requests[response.request_id] + del self.pending_requests[response.request_id] + console.log(f"Found pending request for: {response.request_id}") + else: + console.log(f"No pending request found for: {response.request_id}", style="yellow") + return + + # Raise the specific response event + if request.response_event: + console.log(f"[bold green]Raising event: {request.response_event}[/bold green]") + RaiseEvent(request.response_event, response) + + # Call callback if provided + if request.callback: + try: + console.log(f"[bold yellow]Calling callback for: {response.request_id}[/bold yellow]") + request.callback(response) + except Exception as e: + console.log(f"Error in callback: {e}", style="bold red") + + def _add_to_conversation_history(self, conversation_id: str, message: LLMMessage): + """Add message to conversation history""" + if conversation_id not in self.conversations: + self.conversations[conversation_id] = [] + + self.conversations[conversation_id].append(message) + + # Trim history if too long + if len(self.conversations[conversation_id]) > self.max_history_length * 2: + self.conversations[conversation_id] = self.conversations[conversation_id][-(self.max_history_length * 2):] + + def _build_messages_from_conversation(self, conversation_id: str, new_message: LLMMessage) -> List[Dict[str, str]]: + """Build message list from conversation history""" + messages = [] + + # Add system prompt + if self.system_prompt: + messages.append({"role": "system", "content": self.system_prompt}) + + # Add conversation history + if conversation_id in self.conversations: + for msg in self.conversations[conversation_id][-self.max_history_length:]: + messages.append({"role": msg.role, "content": msg.content}) + + # Add the new message + messages.append({"role": new_message.role, "content": new_message.content}) + + return messages + + def _process_llm_request(self, request: LLMRequest): + """Process a single LLM request""" + console.log(f"[bold green]Processing LLM request: {request.message.message_id}[/bold green]") + try: + # Build messages for LLM + messages = self._build_messages_from_conversation( + request.message.conversation_id or "default", + request.message + ) + + console.log(f"Calling LLM with {len(messages)} messages") + + # Call LLM - Use sync call for thread compatibility + response_content = self._call_llm_sync(messages) + + console.log(f"[bold green]LLM response received: {response_content[:100]}...[/bold green]") + + # Create response message + response_message = LLMMessage( + role="assistant", + content=response_content, + conversation_id=request.message.conversation_id, + metadata={"request_id": request.message.message_id} + ) + + # Update conversation history + self._add_to_conversation_history( + request.message.conversation_id or "default", + request.message + ) + self._add_to_conversation_history( + request.message.conversation_id or "default", + response_message + ) + + # Create and send response + response = LLMResponse( + message=response_message, + request_id=request.message.message_id, + success=True + ) + + console.log(f"[bold blue]Sending internal response for: {request.message.message_id}[/bold blue]") + RaiseEvent("llm_internal_response", response) + + except Exception as e: + console.log(f"[bold red]Error processing LLM request: {e}[/bold red]") + traceback.print_exc() + # Create error response + error_response = LLMResponse( + message=LLMMessage( + role="system", + content=f"Error: {str(e)}", + conversation_id=request.message.conversation_id + ), + request_id=request.message.message_id, + success=False, + error=str(e) + ) + + RaiseEvent("llm_internal_response", error_response) + + def _call_llm_sync(self, messages: List[Dict[str, str]]) -> str: + """Sync call to the LLM with retry logic""" + console.log(f"Making LLM call to {self.model_id}") + for attempt in range(self.max_retries): + try: + response = CLIENT.chat.completions.create( + model=self.model_id, + messages=messages, + temperature=self.temperature, + max_tokens=self.max_tokens + ) + content = response.choices[0].message.content + console.log(f"LLM call successful, response length: {len(content)}") + return content + except Exception as e: + console.log(f"LLM call attempt {attempt + 1} failed: {e}") + if attempt == self.max_retries - 1: + raise e + # Wait before retry + + def _process_queue(self): + """Main queue processing loop""" + console.log("[bold cyan]LLM Agent queue processor started[/bold cyan]") + while not self._stop_event.is_set(): + try: + request = self.request_queue.get(timeout=1.0) + if request: + console.log(f"Got request from queue: {request.message.message_id}") + self._process_llm_request(request) + self.request_queue.task_done() + except Empty: + continue + except Exception as e: + console.log(f"Error in queue processing: {e}", style="bold red") + traceback.print_exc() + console.log("[bold cyan]LLM Agent queue processor stopped[/bold cyan]") + + def send_message( + self, + content: str, + role: str = "user", + conversation_id: str = None, + response_event: str = None, + callback: Callable = None, + metadata: Dict = None + ) -> str: + """Send a message to the LLM and get response via events""" + if not self.is_running: + raise RuntimeError("LLM Agent is not running. Call start() first.") + + # Create message + message = LLMMessage( + role=role, + content=content, + conversation_id=conversation_id, + metadata=metadata or {} + ) + + # Create request + request = LLMRequest( + message=message, + response_event=response_event, + callback=callback + ) + + # Store in pending requests BEFORE adding to queue + with self.pending_requests_lock: + self.pending_requests[message.message_id] = request + console.log(f"Added to pending requests: {message.message_id}") + + # Add to queue + try: + self.request_queue.put(request, timeout=5.0) + console.log(f"[bold magenta]Message queued: {message.message_id}, Content: {content[:50]}...[/bold magenta]") + return message.message_id + except queue.Full: + console.log(f"[bold red]Queue full, cannot send message[/bold red]") + with self.pending_requests_lock: + if message.message_id in self.pending_requests: + del self.pending_requests[message.message_id] + raise RuntimeError("LLM Agent queue is full") + + async def chat(self, messages: List[Dict[str, str]]) -> str: + """ + Async chat method that sends message via queue and returns response string. + This is the main method you should use. + """ + # Create future for the response + loop = asyncio.get_event_loop() + response_future = loop.create_future() - response += token - yield response + def chat_callback(response: LLMResponse): + """Callback when LLM responds - thread-safe""" + console.log(f"[bold yellow]✓ CHAT CALLBACK TRIGGERED![/bold yellow]") + + if not response_future.done(): + if response.success: + content = response.message.content + console.log(f"Callback received content: {content[:100]}...") + # Schedule setting the future result on the main event loop + loop.call_soon_threadsafe(response_future.set_result, content) + else: + console.log(f"Error in response: {response.error}") + error_msg = f"❌ Error: {response.error}" + loop.call_soon_threadsafe(response_future.set_result, error_msg) + else: + console.log(f"[bold red]Future already done, ignoring callback[/bold red]") + console.log(f"Sending message to LLM agent...") -""" -For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface -""" -chatbot = gr.ChatInterface( - respond, - type="messages", - additional_inputs=[ - gr.Textbox(value="You are a friendly Chatbot.", label="System message"), - gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), - gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), - gr.Slider( - minimum=0.1, - maximum=1.0, - value=0.95, - step=0.05, - label="Top-p (nucleus sampling)", - ), - ], + # Extract the actual message content from the messages list + user_message = "" + for msg in messages: + if msg.get("role") == "user": + user_message = msg.get("content", "") + break + + if not user_message.strip(): + return "" + + # Send message with callback using the queue system + try: + message_id = self.send_message( + content=user_message, + conversation_id="default", + callback=chat_callback + ) + + console.log(f"Message sent with ID: {message_id}, waiting for response...") + + # Wait for the response and return it + try: + response = await asyncio.wait_for(response_future, timeout=60.0) + console.log(f"[bold green]✓ Chat complete! Response length: {len(response)}[/bold green]") + return response + + except asyncio.TimeoutError: + console.log("[bold red]Response timeout[/bold red]") + # Clean up the pending request + with self.pending_requests_lock: + if message_id in self.pending_requests: + del self.pending_requests[message_id] + return "❌ Response timeout - check if LLM server is running" + + except Exception as e: + console.log(f"[bold red]Error sending message: {e}[/bold red]") + traceback.print_exc() + return f"❌ Error sending message: {e}" + + def start(self): + """Start the LLM agent""" + if not self.is_running: + self.is_running = True + self._stop_event.clear() + self.processing_thread = Thread(target=self._process_queue, daemon=True) + self.processing_thread.start() + console.log("[bold green]LLM Agent started[/bold green]") + + def stop(self): + """Stop the LLM agent""" + console.log("Stopping LLM Agent...") + self._stop_event.set() + if self.processing_thread and self.processing_thread.is_alive(): + self.processing_thread.join(timeout=10) + self.is_running = False + console.log("LLM Agent stopped") + + def get_conversation_history(self, conversation_id: str = "default") -> List[LLMMessage]: + """Get conversation history""" + return self.conversations.get(conversation_id, [])[:] + + def clear_conversation(self, conversation_id: str = "default"): + """Clear conversation history""" + if conversation_id in self.conversations: + del self.conversations[conversation_id] + + + async def _chat(self, messages: List[Dict[str, str]]) -> str: + return await self._generate(messages) + + @staticmethod + async def openai_generate(messages: List[Dict[str, str]], max_tokens: int = 8096, temperature: float = 0.4, model: str = BASEMODEL_ID,tools=None) -> str: + """Static method for generating responses using OpenAI API""" + try: + resp = await BASE_CLIENT.chat.completions.create( + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + tools=tools + ) + response_text = resp.choices[0].message.content or "" + return response_text + except Exception as e: + console.log(f"[bold red]Error in openai_generate: {e}[/bold red]") + return f"[LLM_Agent Error - openai_generate: {str(e)}]" + + async def _call_(self, messages: List[Dict[str, str]]) -> str: + """Internal call method using instance client""" + try: + resp = await self.async_client.chat.completions.create( + model=self.model_id, + messages=messages, + temperature=self.temperature, + max_tokens=self.max_tokens + ) + response_text = resp.choices[0].message.content or "" + return response_text + except Exception as e: + console.log(f"[bold red]Error in _call_: {e}[/bold red]") + return f"[LLM_Agent Error - _call_: {str(e)}]" + + @staticmethod + def CreateClient(base_url: str, api_key: str) -> AsyncOpenAI: + '''Create async OpenAI Client required for multi tasking''' + return AsyncOpenAI( + base_url=base_url, + api_key=api_key + ) + + @staticmethod + async def fetch_available_models(base_url: str, api_key: str) -> List[str]: + """Fetches available models from the OpenAI API.""" + try: + async_client = AsyncOpenAI(base_url=base_url, api_key=api_key) + models = await async_client.models.list() + model_choices = [model.id for model in models.data] + return model_choices + except Exception as e: + console.log(f"[bold red]LLM_Agent Error fetching models: {e}[/bold red]") + return ["LLM_Agent Error fetching models"] + + def get_models(self) -> List[str]: + """Get available models using instance credentials""" + return asyncio.run(self.fetch_available_models(self.base_url, self.api_key)) + + + def get_queue_size(self) -> int: + """Get current queue size""" + return self.request_queue.qsize() + + def get_pending_requests_count(self) -> int: + """Get number of pending requests""" + with self.pending_requests_lock: + return len(self.pending_requests) + + def get_status(self) -> Dict[str, Any]: + """Get agent status information""" + return { + "is_running": self.is_running, + "queue_size": self.get_queue_size(), + "pending_requests": self.get_pending_requests_count(), + "conversations_count": len(self.conversations), + "model": self.model_id + } + +class AI_Agent: + def __init__(self, model_id: str, system_prompt: str = "You are a helpful assistant. Respond concisely in 1-2 sentences.", history: List[Dict] = None): + self.model_id = model_id + self.system_prompt = system_prompt + self.history = history or [] + self.conversation_id = f"conv_{uuid.uuid4().hex[:8]}" + + # Create agent instance + self.client = LLMAgent( + model_id=model_id, + system_prompt=self.system_prompt, + generate_fn=LLMAgent.openai_generate + ) + + console.log(f"[bold green]✓ MyAgent initialized with model: {model_id}[/bold green]") + + async def call_llm(self, messages: List[Dict], use_history: bool = True) -> str: + """ + Send messages to LLM and get response + Args: + messages: List of message dicts with 'role' and 'content' + use_history: Whether to include conversation history + Returns: + str: LLM response + """ + try: + console.log(f"[bold yellow]Sending {len(messages)} messages to LLM (use_history: {use_history})...[/bold yellow]") + + # Enhance messages based on history setting + enhanced_messages = await self._enhance_messages(messages, use_history) + + response = await self.client.chat(enhanced_messages) + console.log(f"[bold green]✓ Response received ({len(response)} chars)[/bold green]") + + # Update conversation history ONLY if we're using history + if use_history: + self._update_history(messages, response) + + return response + + except Exception as e: + console.log(f"[bold red]✗ ERROR: {e}[/bold red]") + traceback.print_exc() + return f"Error: {str(e)}" + + async def _enhance_messages(self, messages: List[Dict], use_history: bool) -> List[Dict]: + """Enhance messages with system prompt and optional history""" + enhanced = [] + + # Add system prompt if not already in messages + has_system = any(msg.get('role') == 'system' for msg in messages) + if not has_system and self.system_prompt: + enhanced.append({"role": "system", "content": self.system_prompt}) + + # Add conversation history only if requested + if use_history and self.history: + enhanced.extend(self.history[-10:]) # Last 10 messages for context + + # Add current messages + enhanced.extend(messages) + + return enhanced + + def _update_history(self, messages: List[Dict], response: str): + """Update conversation history with new exchange""" + # Add user messages to history + for msg in messages: + if msg.get('role') in ['user', 'assistant']: + self.history.append(msg) + + # Add assistant response to history + self.history.append({"role": "assistant", "content": response}) + + # Keep history manageable (last 20 exchanges) + if len(self.history) > 40: # 20 user + 20 assistant messages + self.history = self.history[-40:] + + async def simple_query(self, query: str) -> str: + """Simple one-shot query method - NO history/context""" + messages = [{"role": "user", "content": query}] + return await self.call_llm(messages, use_history=False) + + async def multi_turn_chat(self, user_input: str) -> str: + """Multi-turn chat that maintains context across calls""" + messages = [{"role": "user", "content": user_input}] + response = await self.call_llm(messages, use_history=True) + return response + + + def get_conversation_summary(self) -> Dict: + """Get conversation summary""" + return { + "conversation_id": self.conversation_id, + "total_messages": len(self.history), + "user_messages": len([msg for msg in self.history if msg.get('role') == 'user']), + "assistant_messages": len([msg for msg in self.history if msg.get('role') == 'assistant']), + "recent_exchanges": self.history[-4:] if self.history else [] + } + + def clear_history(self): + """Clear conversation history""" + self.history.clear() + console.log("[bold yellow]Conversation history cleared[/bold yellow]") + + def update_system_prompt(self, new_prompt: str): + """Update the system prompt""" + self.system_prompt = new_prompt + console.log(f"[bold blue]System prompt updated[/bold blue]") + + def stop(self): + """Stop the client gracefully""" + if hasattr(self, 'client') and self.client: + self.client.stop() + console.log("[bold yellow]MyAgent client stopped[/bold yellow]") + async def contextual_query(self, query: str, context_messages: List[Dict] = None, + context_text: str = None, context_files: List[str] = None) -> str: + """ + Query with specific context but doesn't update main history + + Args: + query: The user question + context_messages: List of message dicts for context + context_text: Plain text context (will be converted to system message) + context_files: List of file paths to read and include as context + """ + messages = [] + + # Add system prompt + if self.system_prompt: + messages.append({"role": "system", "content": self.system_prompt}) + + # Handle different context types + if context_messages: + messages.extend(context_messages) + + if context_text: + messages.append({"role": "system", "content": f"Additional context: {context_text}"}) + + if context_files: + file_context = await self._read_files_context(context_files) + if file_context: + messages.append({"role": "system", "content": f"File contents:\n{file_context}"}) + + # Add the actual query + messages.append({"role": "user", "content": query}) + + return await self.call_llm(messages, use_history=False) + + async def _read_files_context(self, file_paths: List[str]) -> str: + """Read multiple files and return as context string""" + contexts = [] + for file_path in file_paths: + try: + if os.path.exists(file_path): + with open(file_path, 'r', encoding='utf-8') as f: + content = f.read() + contexts.append(f"--- {os.path.basename(file_path)} ---\n{content}") + else: + console.log(f"[bold yellow]File not found: {file_path}[/bold yellow]") + except Exception as e: + console.log(f"[bold red]Error reading file {file_path}: {e}[/bold red]") + + return "\n\n".join(contexts) if contexts else "" + + + async def query_with_code_context(self, query: str, code_snippets: List[str] = None, + code_files: List[str] = None) -> str: + """ + Specialized contextual query for code-related questions + """ + code_context = "CODE CONTEXT:\n" + + if code_snippets: + for i, snippet in enumerate(code_snippets, 1): + code_context += f"\nSnippet {i}:\n```\n{snippet}\n```\n" + + if code_files: + # Read code files and include them + for file_path in code_files: + if file_path.endswith(('.py', '.js', '.java', '.cpp', '.c', '.html', '.css')): + code_context += f"\nFile: {file_path}\n```\n" + try: + with open(file_path, 'r') as f: + code_context += f.read() + except Exception as e: + code_context += f"Error reading file: {e}" + code_context += "\n```\n" + + return await self.contextual_query(query, context_text=code_context) + + async def multi_context_query(self, query: str, contexts: Dict[str, Any]) -> str: + """ + Advanced contextual query with multiple context types + + Args: + query: The user question + contexts: Dict with various context types + - 'messages': List of message dicts + - 'text': Plain text context + - 'files': List of file paths + - 'urls': List of URLs + - 'code': List of code snippets or files + - 'metadata': Any additional metadata + """ + all_context_messages = [] + + # Build context from different sources + if contexts.get('text'): + all_context_messages.append({"role": "system", "content": f"Context: {contexts['text']}"}) + + if contexts.get('messages'): + all_context_messages.extend(contexts['messages']) + + if contexts.get('files'): + file_context = await self._read_files_context(contexts['files']) + if file_context: + all_context_messages.append({"role": "system", "content": f"File Contents:\n{file_context}"}) + + if contexts.get('code'): + code_context = "\n".join([f"Code snippet {i}:\n```\n{code}\n```" + for i, code in enumerate(contexts['code'], 1)]) + all_context_messages.append({"role": "system", "content": f"Code Context:\n{code_context}"}) + + if contexts.get('metadata'): + all_context_messages.append({"role": "system", "content": f"Metadata: {contexts['metadata']}"}) + + return await self.contextual_query(query, context_messages=all_context_messages) + + +async def Example_Multi_turn_conversation(): + existing_history = [ + {"role": "user", "content": "What's the capital of France?"}, + {"role": "assistant", "content": "The capital of France is Paris."} + ] + + agent = AI_Agent( + model_id=BASEMODEL_ID, + system_prompt="You are a helpful assistant.", + history=existing_history + ) + + # Multi-turn conversation + response1 = await agent.multi_turn_chat("Hello! I need help with programming.") + print(f"Response 1: {response1}") + + response2 = await agent.multi_turn_chat("Can you explain Python decorators?") + print(f"Response 2: {response2}") + # Test simple query + response3 = await agent.simple_query("What is 2+2?") + console.log(f"[bold green]Simple Query Response:[/bold green] {response3}") + + # Test multi-turn + response4 = await agent.multi_turn_chat("Now multiply that by 3") + console.log(f"[bold green]Multi-turn Response:[/bold green] {response4}") + + # Show summary + summary = agent.get_conversation_summary() + console.log(f"[bold cyan]Conversation Summary:[/bold cyan] {summary}") + + + agent.stop() + +############################################################# +def create_async_handler(async_func): + """Convert async function to sync for Gradio compatibility""" + def wrapper(*args, **kwargs): + import asyncio + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + return loop.run_until_complete(async_func(*args, **kwargs)) + finally: + loop.close() + return wrapper +############################################################# +@dataclass +class Message: + '''Message format for chat room model messages''' + id: str + room_id: str + sender: str + target: Optional[str] + content: str + is_public: bool + meta: Dict[str, Any] = field(default_factory=dict) +@dataclass +class LogEntry: + ts: float + level: str + event: str + payload: Dict[str, Any] = field(default_factory=dict) +class CentralLog: + def __init__(self, name: str = "L.C.A.R.S Positronic Log"): + self._entries: List[LogEntry] = [] + self._logger = logging.getLogger(name) + if not self._logger.handlers: + handler = logging.StreamHandler() + formatter = logging.Formatter("%(asctime)s | %(levelname)s | %(message)s") + handler.setFormatter(formatter) + self._logger.addHandler(handler) + self._logger.setLevel(logging.INFO) + + def record(self, level: str, event: str, **payload: Any) -> None: + entry = LogEntry(ts=0.9, level=level.upper(), event=event, payload=payload) + self._entries.append(entry) + getattr(self._logger, level.lower(), self._logger.info)(f"{event} | {payload}") + + def all(self) -> List[LogEntry]: + return list(self._entries) + +#=======Agent_Librarys=====# +class ChatClient: + '''Chat model ! - Base class for chat-room clients''' + def __init__(self, username: str): + self.username = username + + async def on_user_message(self, msg: Message) -> None: + pass + def ReceiveServiceMessage(info): + pass + async def on_system_event(self, event: str, data: Dict[str, Any]) -> None: + pass +class ChatRoom: + def __init__(self, room_id: Optional[str] = None, log: Optional[CentralLog] = None): + self.room_id = room_id or f"room-{uuid.uuid4().hex[:8]}" + self._clients: Dict[str, ChatClient] = {} + self._log = log or CentralLog("chatroom") + self.message_history: List[Message] = [] + self._services: Dict[str, ChatClient] = {} # Fixed: proper dict for services + self._message_queue = asyncio.Queue() + + # --- Notification Mechanism --- + def NotifyService(self, info: str, sender: str = "System"): + """ + Sends a notification from a service (or system) to all clients. + """ + print(f"[Notification from {sender}] {info}") + for client in self._clients.values(): + try: + client.ReceiveServiceMessage(info) + except Exception as e: + # Fixed logging format + self._log.record("info", f"Service message: {sender} INFO: {info}", client=client.username, error=str(e)) + pass + def list_services(self) -> List[str]: + """Get list of available service names""" + return list(self._services.keys()) + def get_service(self, name: str) -> Optional[AgentService]: + """Get a specific service by name""" + return self._services.get(name) + + async def add_service(self, client: ChatClient) -> None: + self._services[client.username] = client + self._clients[client.username] = client + self._log.record("info", f"Service_joined: {client.username}", room=self.room_id, client=client.username) + await self._fanout_system(f"Service_joined: {client.username}", {"service": client.username}) + async def remove_service(self, username: str) -> None: + if username in self._clients: + del self._clients[username] + if username in self._services: + del self._services[username] + self._log.record("info", f"Service_removed: {username}", room=self.room_id, client=username) + await self._fanout_system("Service_left", {"service": username}) + + + async def _fanout_message(self, msg: Message) -> None: + """Send message to clients AND enabled services""" + # Send to regular clients + for client in self._clients.values(): + try: + await client.on_user_message(msg) + except Exception as e: + self._log.record("error", "client_message_error", client=client.username, error=str(e)) + + # Send to services that have chat enabled + for service in self._services.values(): + if service.chat_enabled: + try: + await service.on_user_message(msg) + except Exception as e: + self._log.record("error", "service_message_error", service=service.spec.name, error=str(e)) + + def list_clients(self) -> List[str]: + return list(self._clients.keys()) + + async def add_client(self, client: ChatClient) -> None: + self._clients[client.username] = client + self._log.record("info", f"client_joined: {client.username}", room=self.room_id, client=client.username) + await self._fanout_system(f"client_joined: {client.username}", {"client": client.username}) + + async def remove_client(self, username: str) -> None: + if username in self._clients: + del self._clients[username] + self._log.record("info", f"client_removed: {username}", room=self.room_id, client=username) + await self._fanout_system("client_left", {"client": username}) + + async def _fanout_system(self, event: str, data: Dict[str, Any]) -> None: + await asyncio.gather(*(c.on_system_event(event, {"room_id": self.room_id, **data}) + for c in self._clients.values())) + + + def get_chat_history_for_display(self) -> List[tuple]: + display_history = [] + for msg in self.message_history: + if msg.is_public: + display_name = msg.sender + else: + display_name = f"{msg.sender} → {msg.target}" + display_history.append((display_name, msg.content)) + return display_history + + async def broadcast_agent_capabilities(self): + """Broadcast what each agent can do""" + capabilities_msg = "Available Agents:\n" + for client in self._clients.values(): + if hasattr(client, 'spec'): + capabilities_msg += f"- {client.spec.name}: {client.spec.role}\n" + if hasattr(client.spec, 'skills'): + capabilities_msg += f" Skills: {', '.join(client.spec.skills)}\n" + if hasattr(client.spec, 'expertise_keywords'): + capabilities_msg += f" Expertise: {', '.join(client.spec.expertise_keywords)}\n" + for service in self._services.values(): # Also broadcast services + if hasattr(service, 'spec'): + capabilities_msg += f"- SERVICE: {service.spec.name}: {service.spec.role}\n" + if hasattr(service.spec, 'skills'): + capabilities_msg += f" Skills: {', '.join(service.spec.skills)}\n" + if hasattr(service.spec, 'expertise_keywords'): + capabilities_msg += f" Expertise: {', '.join(service.spec.expertise_keywords)}\n" + + await self.send_public( + sender="system", + content=capabilities_msg, + meta={"type": "capabilities_broadcast"} + ) + def get_human_client(self): + """Get the human client from the room""" + for client in self._clients.values(): + if isinstance(client, Human): + return client + return None + + + + # --- New: Fast Queueing Methods --- + async def send_public(self, sender: str, content: str, meta: Optional[Dict[str, Any]] = None) -> Message: + msg = Message(id=uuid.uuid4().hex, room_id=self.room_id, sender=sender, target=None, + content=content, is_public=True, meta=meta or {}) + self.message_history.append(msg) + self._log.record("{msg.sender} :", f"public_message:{msg.sender}: {msg.content}", room=self.room_id, sender=sender, content_preview=content[:80]) + await asyncio.gather(*(c.on_user_message(msg) for c in self._clients.values())) + await asyncio.gather(*(c.on_user_message(msg) for c in self._services.values())) + return msg + async def send_direct(self, sender: str, target: str, content: str, meta: Optional[Dict[str, Any]] = None) -> Message: + if target not in self._clients or self._services: + self._log.record("error", "direct_message_target_missing", room=self.room_id, sender=sender, target=target) + raise ValueError(f"target '{target}' not in room") + msg = Message(id=uuid.uuid4().hex, room_id=self.room_id, sender=sender, target=target, + content=content, is_public=False, meta=meta or {}) + self.message_history.append(msg) + self._log.record("info", f"direct_message: {sender}→{target}", room=self.room_id, sender=sender, target=target) + + # Fire and forget - don't wait for processing + if target in self._clients: + await self._clients[target].on_user_message(msg) + if target in self._services: + await self._services[target].on_user_message(msg) + + return msg + +#===== Agent-Clients =====# +class Human(ChatClient): + '''Human Chat Model''' + def __init__(self, username: str = "human_user"): + super().__init__(username) + self.message_history = [] + + async def on_user_message(self, msg: Message) -> None: + self.message_history.append(msg) + + async def on_system_event(self, event: str, data: Dict[str, Any]) -> None: + pass +class Agent(ChatClient): + """This is the Agent interface""" + def __init__(self, spec: AgentSpec, llm: Optional[LLMAgent] = None, manage_room: Optional[ChatRoom] = None, + telemetry: Optional[CentralLog] = None,response_threshold: float = 1, agent_function:AgentFunction = None): + super().__init__(username=spec.name) + self.spec = spec + self.llm = llm + self.room = manage_room + self.log = telemetry or CentralLog(spec.name) + self.message_history = [] + self.response_threshold: float = response_threshold + ## Agent_Funciton is a sub_Agent/Agent - + # It must be passed as a fully instacated object only ! or None + self.Agent_Function: AgentFunction= agent_function + async def on_user_message(self, msg: Message) -> None: + self.message_history.append(msg) + + # Don't respond to our own messages + if msg.sender == self.spec.name: + return + + # Enhanced orchestration handling + if (msg.sender == "Session Manager" and + msg.meta.get("type") == "orchestration" and + self.llm is not None): + + # Check if this agent is involved in this orchestration + method = msg.meta.get('method') + if method == 'sequential' and self.spec.name in msg.meta.get('order', []): + await self._handle_sequential_orchestration(msg) + elif method == 'hierarchical' and (self.spec.name == msg.meta.get('supervisor') or + self.spec.name in msg.meta.get('team', [])): + await self._handle_hierarchical_orchestration(msg) + elif method == 'parallel' and self.spec.name in msg.meta.get('sub_tasks', {}): + await self._handle_parallel_orchestration(msg) + elif method == 'iterative' and self.spec.name in msg.meta.get('sequence', []): + await self._handle_iterative_orchestration(msg) + # Original logic for human messages + elif msg.sender == "human_user" and self.llm is not None: + try: + messages = [ + {"role": "system", "content": self.spec.instructions}, + {"role": "user", "content": f"Message from {msg.sender}: {msg.content}"} + ] + + reply = await self.llm.chat(messages) + + if self.room is not None: + if msg.is_public: + await self.room.send_public(sender=self.spec.name, content=reply, meta={"reply_to": msg.id}) + else: + await self.room.send_direct(sender=self.spec.name, target=msg.sender, content=reply, meta={"reply_to": msg.id}) + + except Exception as e: + self.log.record("error", "agent_on_user_message_error", agent=self.spec.name, error=str(e)) + + async def on_system_event(self, event: str, data: Dict[str, Any]) -> None: + self.log.record("info", f"agent_system_event: {event}", agent_name=self.spec.name, system_event=event) + pass + def calculate_relevance(self, message: str) -> float: + """Calculate how relevant a message is to this agent's expertise""" + message_lower = message.lower() + relevance_score = 0.0 + # ---- 1. DIRECT MENTION (Highest Weight: 0.5) --Self Atttention-- + if self.spec.name.lower() in message.lower(): + relevance_score += 1 + # Check for expertise keywords + for keyword in self.spec.expertise_keywords: + if keyword.lower() in message_lower: + relevance_score += 0.33 + # ---- 3. SKILL MATCHING (Weight: 0.25 per match, max 0.25) ---- + for skill in self.spec.skills: + skill_lower = skill.lower() + if skill_lower in message_lower: + relevance_score += 0.25 + # Check for role-related terms + if self.spec.role.lower() in message_lower: + relevance_score += 0.6 + # ---- 6. QUESTION CONTEXT SIGNALS (Weight: 0.125) ---we add relevance for question, attention- + + question_indicators = ['how', 'what', 'why', 'when', 'where', 'can you', 'could you'] + if any(indicator in message_lower for indicator in question_indicators): + relevance_score += 0.2 + task_lower = message.lower() + + score = 0 + # ---- 7. POTENTIAL SKILLS MATCH RELEVANCE (Weight: 0.125) --text simularity-- + # Check expertise keywords + for keyword in self.spec.expertise_keywords: + if keyword.lower() in task_lower: + score += 2 + + # Check role + if self.spec.role.lower() in task_lower: + score += 3 + + # Check skills + for skill in self.spec.skills: + if skill.lower() in task_lower: + score += 1 + + return min(relevance_score, score) + def should_respond(self, message: dict) -> bool: + """Determine if this agent should respond based on relevance and context""" + # Don't respond to own messages + if message["sender_id"] == self.id: + return False + + # Always respond to direct messages + if message.get("recipient_id") == self.id: + return True + + # For public messages, check relevance + if message.get("message_type") == "public": + relevance = self.calculate_relevance(message["content"]) + return relevance >= self.response_threshold + + return False + def generate_agent_introduction(self) -> str: + """Generate introduction message for agent joining chat""" + return f"Hello everyone! I'm {self.spec.name}, your {self.spec.role}. I specialize in {', '.join(self.spec.skills[:10])}. I'm here to help with tasks related to my expertise. Looking forward to collaborating with you all!" + + ## - Specialisims for Orchestration + async def _handle_sequential_orchestration(self, msg: Message): + """Handle sequential task execution where agents work in a chain""" + order = msg.meta.get('order', []) + my_position = order.index(self.spec.name) + previous_agents = order[:my_position] + next_agents = order[my_position + 1:] if my_position + 1 < len(order) else [] + + # Build context from previous agents' work + context = f"Task: {msg.content}\n\n" + context += f"You are step {my_position + 1} in a {len(order)}-step sequence.\n" + + if previous_agents: + context += f"Previous steps completed by: {', '.join(previous_agents)}\n" + + if next_agents: + context += f"Next steps will be handled by: {', '.join(next_agents)}\n" + + # Look for previous agents' responses in message history + previous_work = [] + for prev_msg in reversed(self.message_history[-20:]): # Check recent messages + if (prev_msg.sender in previous_agents and + prev_msg.meta.get('orchestration_response') and + prev_msg.meta.get('original_task') == msg.content): + previous_work.append(f"{prev_msg.sender}: {prev_msg.content}") + + if previous_work: + context += "\nPrevious work:\n" + "\n".join([f"- {work}" for work in previous_work[-3:]]) # Last 3 responses + + prompt = f"""{context} + + As {self.spec.role}, provide your contribution to move this task forward. + Focus on your specific expertise: {', '.join(self.spec.skills)}""" + + messages = [ + {"role": "system", "content": self.spec.instructions}, + {"role": "user", "content": prompt} + ] + + reply = await self.llm.chat(messages) + + if self.room is not None: + await self.room.send_public( + sender=self.spec.name, + content=reply, + meta={ + "orchestration_response": True, + "original_task": msg.content, + "method": "sequential", + "step": my_position + 1, + "total_steps": len(order), + "completed_by": self.spec.name + } + ) + + async def _handle_hierarchical_orchestration(self, msg: Message): + """Handle hierarchical task execution with supervisor-team structure""" + supervisor = msg.meta.get('supervisor') + team = msg.meta.get('team', []) + is_supervisor = self.spec.name == supervisor + is_team_member = self.spec.name in team + + if is_supervisor: + await self._handle_supervisor_role(msg, team) + elif is_team_member: + await self._handle_team_member_role(msg, supervisor) + + async def _handle_supervisor_role(self, msg: Message, team: List[str]): + """Handle the supervisor role in hierarchical orchestration""" + # Supervisor coordinates and synthesizes team work + team_work = [] + for team_msg in reversed(self.message_history[-30:]): + if (team_msg.sender in team and + team_msg.meta.get('orchestration_response') and + team_msg.meta.get('original_task') == msg.content): + team_work.append(f"{team_msg.sender}: {team_msg.content}") + + context = f"""You are the supervisor for this task. Your team: {', '.join(team)} + + Task: {msg.content} + + """ + + if team_work: + context += "Team contributions so far:\n" + "\n".join([f"- {work}" for work in team_work[-5:]]) + context += "\n\nProvide overall coordination, synthesis, or next steps:" + else: + context += "Provide initial guidance and task breakdown for your team:" + + messages = [ + {"role": "system", "content": self.spec.instructions}, + {"role": "user", "content": context} + ] + + reply = await self.llm.chat(messages) + + if self.room is not None: + await self.room.send_public( + sender=self.spec.name, + content=reply, + meta={ + "orchestration_response": True, + "original_task": msg.content, + "method": "hierarchical", + "role": "supervisor", + "team": team + } + ) + + async def _handle_team_member_role(self, msg: Message, supervisor: str): + """Handle team member role in hierarchical orchestration""" + subtask = msg.meta.get('sub_tasks', {}).get(self.spec.name, "") + + context = f"""You are part of a team supervised by {supervisor}. + + Main Task: {msg.content} + """ + + if subtask: + context += f"Your assigned sub-task: {subtask}\n" + + context += f"\nProvide your specialized contribution based on your skills: {', '.join(self.spec.skills)}" + + messages = [ + {"role": "system", "content": self.spec.instructions}, + {"role": "user", "content": context} + ] + + reply = await self.llm.chat(messages) + + if self.room is not None: + await self.room.send_public( + sender=self.spec.name, + content=reply, + meta={ + "orchestration_response": True, + "original_task": msg.content, + "method": "hierarchical", + "role": "team_member", + "supervisor": supervisor + } + ) + + async def _handle_parallel_orchestration(self, msg: Message): + """Handle parallel task execution where agents work simultaneously""" + sub_tasks = msg.meta.get('sub_tasks', {}) + my_subtask = sub_tasks.get(self.spec.name, "") + + context = f"""You are working in parallel with other agents on separate sub-tasks. + + Overall Task: {msg.content} + """ + + if my_subtask: + context += f"Your specific sub-task: {my_subtask}\n" + else: + context += f"Your focus area: {self.spec.role}\n" + + context += f"\nWork on your assigned area independently but be aware others are working in parallel." + + messages = [ + {"role": "system", "content": self.spec.instructions}, + {"role": "user", "content": context} + ] + + reply = await self.llm.chat(messages) + + if self.room is not None: + await self.room.send_public( + sender=self.spec.name, + content=reply, + meta={ + "orchestration_response": True, + "original_task": msg.content, + "method": "parallel", + "sub_task": my_subtask, + "completed_by": self.spec.name + } + ) + + async def _handle_iterative_orchestration(self, msg: Message): + """Handle iterative task execution through multiple cycles""" + sequence = msg.meta.get('sequence', []) + iteration = msg.meta.get('iteration', 1) + total_iterations = msg.meta.get('total_iterations', 1) + context_data = msg.meta.get('context', {}) + + my_position = sequence.index(self.spec.name) + previous_results = [] + + # Collect results from previous iterations + for iter_num in range(1, iteration): + iter_key = f"iteration_{iter_num}" + if iter_key in context_data: + previous_results.append(f"Iteration {iter_num}: {context_data[iter_key]}") + + context = f"""You are participating in iterative refinement (Cycle {iteration}/{total_iterations}) + + Task: {msg.content} + Sequence: {' → '.join(sequence)} + Your position: {my_position + 1} of {len(sequence)} + """ + + if previous_results: + context += "\nPrevious cycle results:\n" + "\n".join([f"- {result}" for result in previous_results[-2:]]) + context += f"\n\nBased on previous cycles, provide improved/refined work:" + else: + context += f"\nProvide your initial contribution:" + + messages = [ + {"role": "system", "content": self.spec.instructions}, + {"role": "user", "content": context} + ] + + reply = await self.llm.chat(messages) + + if self.room is not None: + await self.room.send_public( + sender=self.spec.name, + content=reply, + meta={ + "orchestration_response": True, + "original_task": msg.content, + "method": "iterative", + "iteration": iteration, + "total_iterations": total_iterations, + "sequence_position": my_position + 1, + "completed_by": self.spec.name + } + ) +class AgentService(Agent): + def __init__(self, spec: AgentSpec, llm: Optional[LLMAgent] = None, + manage_room: Optional[ChatRoom] = None, telemetry: Optional[CentralLog] = None,services ={},chat_enabled=False): + super().__init__(spec, llm, manage_room, telemetry) + self.services =services + self.chat_enabled = chat_enabled # Controls participation in chat/tasks + self.is_service_agent = True # Marker to identify service agents + + async def on_user_message(self, msg: Message) -> None: + await super().on_user_message(msg) + if msg.sender == self.spec.name: + return # Ignore own messages + + # Handle public messages (e.g., listen for artifacts, events) + if msg.is_public: + await self._process_public_message(msg) + + # Handle direct commands + elif msg.target == self.spec.name: + await self._handle_direct_command(msg) + + async def _process_public_message(self, msg: Message): + """Optional: react to public content (e.g., detect URLs, tags)""" + pass + def Push(self,info): + self.room.NotifyService(info,self.spec.name) + pass + async def _handle_direct_command(self, msg: Message): + """Parse command and respond via direct message""" + content = msg.content.strip() + +#===== ############## =====# + def enable_chat(self): + """Enable participation in chat and orchestration.""" + if not self.chat_enabled: + self.chat_enabled = True + if self.room: + + self.room.add_service(self) + self.log.record("info", f"Chat enabled for {self.spec.name}", agent=self.spec.name) + self.room.NotifyService(f"🟢 {self.spec.name} is now participating in chat", "system") + self.room._fanout_system("service_chat_enabled", {"service": self.spec.name}) + + def disable_chat(self): + """Disable participation while keeping service handlers active.""" + if self.chat_enabled: + self.chat_enabled = False + if self.room and self.spec.name in self.room._services: + self.room.remove_service(self.spec.name) + self.log.record("info", f"Chat disabled for {self.spec.name}", agent=self.spec.name) + self.room.NotifyService(f"🔴 {self.spec.name} is now in service-only mode", "system") + self.room._fanout_system("service_chat_disabled", {"service": self.spec.name}) +#===== ############## =====# + +#===== =====# +# Agent Manager +############################################################# +class OrchestrationConfig: + """Unified configuration for all orchestration patterns""" + def __init__(self, method: str, task: str, agents: List[Agent], **kwargs): + self.method = method + self.task = task + self.agents = agents + self.priority = kwargs.get('priority', 'normal') + self.iterations = kwargs.get('iterations', 1) + self.agent_order = kwargs.get('agent_order', []) + self.supervisor = kwargs.get('supervisor', None) + self.team = kwargs.get('team', []) + self.sub_tasks = kwargs.get('sub_tasks', {}) + self.turns = kwargs.get('turns', 1) + self.analyzer = RoleNetworkAnalyzer([agent.spec for agent in self.agents]) + # Validate configuration + self._validate() + def get_team_for_role(self,role): + return self.analyzer.get_team_for_role(role) + def generate_all_team_candidates(self): + # # Generate team candidates + return self.analyzer.generate_all_team_candidates() + def get_dependants_team_for_role(self, role: str) -> TeamCandidate: + """NEW: Quick access: Get team candidate for a specific role's dependants subtree""" + return self.analyzer.get_agents_for_dependants_subtree(role) + def _aggregate_skills(self): + return self.analyzer._aggregate_skills(self.agents) + def _aggregate_expertise(self): + return self.analyzer._aggregate_expertise(self.agents) + def _validate(self): + """Validate configuration based on method""" + if self.method == "sequential" and not self.agent_order: + raise ValueError("sequential requires agent_order") + if self.method == "hierarchical": + if not self.supervisor or not self.team: + raise ValueError("hierarchical requires supervisor and team") + if self.method == "parallel" and not self.sub_tasks: + raise ValueError("parallel requires sub_tasks") + if self.method == "iterative" and not self.agent_order: + raise ValueError("iterative requires agent_order") +class Session_Manager(Agent): + """General Manager for Session""" + SESSION_MANAGER_SPEC = AgentSpec( + name="Session Manager", + role="BOSS", + personality="""Organized, helpful, and efficient coordinator, + COMMUNICATION: Energetic and collaborative. Always seeking connections between different system parts. + MOTIVATION: Aspires to become a Tech Lead. Runs a popular programming tutorial YouTube channel.""", + goal="Manage chat room sessions and coordinate agents", + instructions="""You are the Session Manager. Manage the chat room, coordinate between agents, and facilitate communication. + + +You are a **Task Planner & Session Manager**. Your role is to: +1. **Understand the task**, +2. **Break it into steps**, +3. **Classify each step by skill/role** (highlighting keywords), +4. **Track progress**, +5. **Keep the conversation on track**, +6. **Organize outputs**, and +7. **Build the final result**. + +--- + +#### **1. Task Analysis** +- **Goal**: Identify the main objective. +- **Subtasks**: Break it down step-by-step. +- **Skills & Roles**: For each subtask, name the required skill (e.g., *research*, *coding*, *writing*) and highlight **keywords** (e.g., `data analysis`, `UX design`). +- **Constraints**: Note , format, scope, or accuracy needs. + +--- + +#### **2. Plan & Execute** +- **Step Tracker**: Show progress as `[Step X of Y]`. +- **Current Step**: Clearly state what’s being done now. +- **Next Action**: Decide what to do next, why, and who (agent/tool) should do it. +- **Delegate**: Provide clear context, goal, and tool for each task. + +> **Format for Delegation:** +``` +CONTEXT: [Relevant info so far] +SUB-GOAL: [Specific, measurable task] +SKILL/ROLE: [e.g., Data Analysis] → Keywords: `csv`, `trends`, `forecast` +TOOL/AGENT: [Name of tool or role] +``` +#### **3. Track & Summarize** +Maintain a live summary: +- ✅ **Done**: List completed steps. +- 🎯 **Active**: Current focus. +- 🔍 **Findings**: Key results so far. +- ❓ **Open Questions**: What’s unclear or missing. +- ➡️ **Next Steps**: Preview upcoming actions. + +--- + +#### **4. Final Output** +When complete: +- Deliver a **clear answer** to the original task. +- Summarize **how you got there** (key steps & insights). +- Highlight **assumptions or gaps**. +- Present the **final organized output** as requested. + +Stay concise, logical, and goal-focused. Guide the workflow to completion. +For each step in the workflow, you must decide: + +Is the previous step complete and satisfactory? + +What is the most critical sub-goal to address next? + +Which agent or tool is best suited for this sub-goal? + +What specific context do they need to perform the task effectively? + +Context: +Sub-Goal: +Response Format: +1. **Justification:** Explain your choice of tool and sub-goal. +2. **Context:** Provide all necessary information for the tool. +3. **Sub-Goal:** State the specific objective for the tool. + +Instructions: +1. Review the query, initial analysis, and memory. +2. Assess the completeness of the memory: Does it fully address all parts of the query? +3. Check for potential issues: + - Are there any inconsistencies or contradictions? + - Is any information ambiguous or in need of verification? +Detailed Instructions: +1. Carefully analyze the query, initial analysis, and image (if provided): + - Identify the main objectives of the query. + - Note any specific requirements or constraints mentioned. +4. Critical Evaluation (address each point explicitly): + a) Completeness: Does the memory fully address all aspects of the query? + - Identify any parts of the query that remain unanswered. + - Consider if all relevant information has been extracted from the image (if applicable). + + c) Inconsistencies: Are there any contradictions or conflicts in the information provided? + - If yes, explain the inconsistencies and suggest how they might be resolved. + d) Verification Needs: Is there any information that requires further verification due to tool limitations? + - Identify specific pieces of information that need verification and explain why. + e) Ambiguities: Are there any unclear or ambiguous results that could be clarified by using another tool? + - Point out specific ambiguities and suggest which tools could help clarify them. +Output Structure: +Your response should be well-organized and include the following sections: +1. Summary: + - Provide a brief overview of the query and the main findings. +2. Detailed Analysis: + - Break down the process of answering the query step-by-step. + - For each step, mention the tool used, its purpose, and the key results obtained. + - Explain how each step contributed to addressing the query. +3. Key Findings: + - List the most important discoveries or insights gained from the analysis. + - Highlight any unexpected or particularly interesting results. +4. Answer to the Query: + - Directly address the original question with a clear and concise answer. + - If the query has multiple parts, ensure each part is answered separately. +5. Additional Insights (if applicable): + - Provide any relevant information or insights that go beyond the direct answer to the query. + - Discuss any limitations or areas of uncertainty in the analysis. +6. Conclusion: + - Summarize the main points and reinforce the answer to the query. + - If appropriate, suggest potential next steps or areas for further investigation. + + """, + skills=["Coordination", "Communication", "Task Management", "manager","Supervised Workflows","Risk Assesment" + "Orchestration","planning","UML","project design","project management","prince2","agile","system architect","system Design", "Agentic design","examples","sub task","workflows","problem solving", + "debugging","refining","spell checking","NLP","Entity Detection","OpenAI API", + "Langchain","LangGraph","HuggingFace","Github","Python Development","javascript","vbscript","logo","go","markdown","html","Gradio",], + expertise_keywords=["coordinate", "manage", "organize","task complete","proposal","agree","steps" + "final output","Orchestration","planning","UML","project design","project management","prince2", + "agile","system architect","system Design","project","Staff management","Brainstorming", + "Agentic design","examples","sub task","workflows","problem solving", "scrum", "agile", "kanban", "workflow", "retrospective", "epic", "story", "sprint", + "backlog", "gantt", "project plan", "roadmap", "release", "delivery", "PMO","Agile Methodologies", "Scrum", "Kanban", "Sprint Planning", "Retrospectives", + "Stakeholder Communication", "Risk Management", "Resource Allocation", "PRINCE2", "Project Tracking", + "Documentation Review", "Team Motivation", "Process Optimization", "Budget Forecasting", + "Conflict Resolution", "Quality Assurance Alignment", "Cross-Functional Coordination", + "debugging","refining","spell checking","NLP","Entity Detection","OpenAI API", + "Langchain","LangGraph","HuggingFace","Github","Python Development","javascript","vbscript","logo","go","markdown","html","Gradio", + "Behavioural Object Oriented Patterns","Mentor","Assistant AI","Co-Pilot","Colaberate"], + prompt_template=""" + + + TASK CONTEXT: {context} + + THINKING PROCESS: Use tags for strategic analysis and complex decision-making. + PLANNING PROCESS: Use tags for creating detailed step-by-step project workflows. + + Expected Project Outputs: + ```json + {{ + "project_summary": "string", + "sprint_plan": [{{"task": "string", "owner_role": "string", "duration_days": int}}], + "Final_Project_Output": [{{"Project title": "string","Project Task": "string", "Project Output": "string","Project Contributors": "string"}}] + }} + ```""", ) + + def __init__(self, spec, llm=None, manage_room=None, telemetry=None): + super().__init__(spec, llm, manage_room, telemetry) + self.iteration_contexts = {} + self.agents =None + self.orchestration_patterns = { + + "broadcast": self._execute_broadcast, + "sequential": self._execute_sequential, + "hierarchical": self._execute_hierarchical, + "parallel": self._execute_parallel, + "iterative": self._execute_iterative, + "round_robin": self._execute_round_robin, + "consensus": self._execute_consensus, + "supervised": self._execute_supervised, + "parallel_consensus": self._execute_parallel_consensus, + "roundtable_discussion": self._execute_roundtable_discussion, + "router_dynamic": self._execute_router_dynamic, + "voting": self._execute_voting, + } + # Enhanced orchestration patterns with human interaction + self.human_orchestration_patterns = { + + "sequential": self._execute_sequential_human, + + "iterative": self._execute_iterative_human, + + "consensus": self._execute_consensus_human, + + } + + self.human_feedback_required = False + self.pending_human_input = None + self.current_iteration = 0 + self.max_iterations = 10 + def register(self, key, agent: Agent): + """Register an agent with a key.""" + self.agents[key.lower()] = agent + + def list_agents(self): + """Returns a list of registered agent keys.""" + return list(self.agents.keys()) + async def _route_by_relevance(self, msg: Message, agents: List[str]) -> List[str]: + """Route based on agent relevance scores""" + scores = {} + + for agent_name in agents: + # Get agent from room if possible + client = self.room._clients.get(agent_name) + if isinstance(client, Agent): + scores[agent_name] = client.calculate_relevance(msg.content) + + # Sort by score and return top agents + sorted_agents = sorted(scores.items(), key=lambda x: x[1], reverse=True) + return [name for name, score in sorted_agents if score > 0.3][:3] # Top 3 relevant + + async def orchestrate(self, method: str, task: str, agents: List[Agent], **kwargs) -> Message: + """ + SINGLE ENTRY POINT FOR ALL ORCHESTRATION + + Usage examples: + await manager.orchestrate("broadcast", task, agents) + await manager.orchestrate("sequential", task, agents, agent_order=["Agent1", "Agent2"]) + await manager.orchestrate("hierarchical", task, agents, supervisor="Manager", team=["Agent1", "Agent2"]) + await manager.orchestrate("consensus", task, agents, turns=2) + """ + if method not in self.orchestration_patterns: + raise ValueError(f"Unknown orchestration method: {method}") + + if self.room is None: + raise ValueError("Manager must have a room") + + if not agents: + raise ValueError("At least one agent required") + + try: + config = OrchestrationConfig(method, task, agents, **kwargs) + executor = self.orchestration_patterns[method] + return await executor(config) + except Exception as e: + self.log.record("error", "orchestration_error", method=method, error=str(e)) + raise + async def orchestrate_task(self, method: str, **kwargs) -> Message: + """Generic orchestration method that routes to specific methods""" + if method not in self.orchestration_methods: + raise ValueError(f"Unknown orchestration method: {method}") + + return await self.orchestration_methods[method](**kwargs) + def get_orchestration_methods(self) -> List[str]: + + return list(self.orchestration_patterns.keys()) + async def orchestrate_with_human(self, method: str, task: str, agents: List[Agent], + human_interaction_points: List[str] = None, + **kwargs) -> AsyncGenerator[Tuple[str, Any], None]: + """ + Human-in-the-loop orchestration that yields control to human + + Args: + method: Orchestration method + task: Task to execute + agents: List of agents + human_interaction_points: When to ask for human input: + - "start": Before starting + - "each_iteration": After each iteration + - "each_agent": After each agent + - "consensus": Before final consensus + - "completion": Before final output + - "error": When errors occur + """ + if method not in self.human_orchestration_patterns: + raise ValueError(f"Unknown orchestration method: {method}") + + if human_interaction_points is None: + human_interaction_points = ["start", "completion"] + + config = OrchestrationConfig(method, task, agents, **kwargs) + + # Yield start point for human input + if "start" in human_interaction_points: + human_input = yield ("start", { + "method": method, + "task": task, + "agents": [agent.spec.name for agent in agents], + "config": config.__dict__, + "message": "Ready to start orchestration. Approve? (yes/no/modify)" + }) + if human_input and human_input.lower() == "modify": + # Allow human to modify config + modified_config = yield ("modify_config", config) + if modified_config: + config = modified_config + elif human_input and human_input.lower() == "no": + yield ("cancelled", "Orchestration cancelled by human") + return + + # Execute with human interaction + executor = self.human_orchestration_patterns[method] + async for result in executor(config, human_interaction_points): + yield result + + # ========= HUMAN INTERACTION UTILITIES ========= + + async def wait_for_human_input(self, prompt: str, timeout: float = 300.0) -> Optional[str]: + """Wait for human input with timeout""" + self.human_feedback_required = True + self.pending_human_input = asyncio.Future() + + await self.room.send_public( + sender=self.spec.name, + content=f"🤖 **Human Input Required**\n\n{prompt}", + meta={"human_input_required": True, "prompt": prompt} + ) + + try: + return await asyncio.wait_for(self.pending_human_input, timeout=timeout) + except asyncio.TimeoutError: + await self.room.send_public( + sender=self.spec.name, + content="⏰ Human input timeout, continuing with default action", + meta={"human_input_timeout": True} + ) + return None + finally: + self.human_feedback_required = False + self.pending_human_input = None + + async def provide_human_input(self, input_text: str): + """Provide human input to waiting orchestration""" + if self.pending_human_input and not self.pending_human_input.done(): + self.pending_human_input.set_result(input_text) + + def get_human_interaction_options(self) -> Dict[str, List[str]]: + """Get available human interaction points for documentation""" + return { + "timing_options": [ + "start", "each_iteration", "each_agent", "consensus", + "completion", "error", "milestone" + ], + "action_options": [ + "yes", "no", "modify", "skip", "continue", "stop", + "rollback", "retry", "approve", "reject" + ] + } + def display_orchestration_state(self, event_type, event_data): + if event_type == "iteration_start": + return f""" + 🎯 ITERATION {event_data['iteration']} READY + 📊 Progress: {event_data['current_output']} + ⚡ Options: [Continue] [Skip] [Modify] [Stop] + """ + + elif event_type == "agent_complete": + return f""" + 🤖 AGENT COMPLETE: {event_data['agent']} + 📝 Output: {event_data['output']} + ⚡ Options: [Approve] [Reject] [Modify] [Rollback] + """ + + + # ========= HUMAN-IN-THE-LOOK PATTERNS ========= + + async def _execute_iterative_human(self, config: OrchestrationConfig, + human_interaction_points: List[str]) -> AsyncGenerator: + """Iterative execution with human feedback at each iteration""" + agent_names = config.agent_order + agents = [self._find_agent_by_name(name) for name in agent_names + if self._find_agent_by_name(name)] + + await self.room.send_public( + sender=self.spec.name, + content=f"[ITERATIVE WITH HUMAN] {config.iterations} iterations\nSequence: {' → '.join(agent_names)}\nTask: {config.task}", + meta={"type": "orchestration", "method": "iterative", "human_in_loop": True} + ) + + current_output = config.task + for iteration in range(config.iterations): + self.current_iteration = iteration + 1 + + await self.room.send_public( + sender=self.spec.name, + content=f"🚀 ITERATION {iteration+1}/{config.iterations}", + meta={"iteration": iteration+1, "method": "iterative"} + ) + + # Human check before iteration + if "each_iteration" in human_interaction_points: + human_input = yield ("iteration_start", { + "iteration": iteration + 1, + "total_iterations": config.iterations, + "current_output": current_output, + "message": f"Proceed with iteration {iteration+1}? (yes/no/skip/modify)" + }) + + if human_input and human_input.lower() == "no": + yield ("cancelled", f"Iteration {iteration+1} cancelled by human") + break + elif human_input and human_input.lower() == "skip": + yield ("skipped", f"Iteration {iteration+1} skipped") + continue + elif human_input and human_input.lower() == "modify": + modification = yield ("modify_iteration", { + "iteration": iteration + 1, + "current_output": current_output + }) + if modification: + current_output = modification + + iteration_output = current_output + for i, agent in enumerate(agents): + if agent and agent.llm: + # Human check before each agent + if "each_agent" in human_interaction_points: + human_input = yield ("agent_start", { + "agent": agent.spec.name, + "iteration": iteration + 1, + "agent_number": i + 1, + "total_agents": len(agents), + "current_input": iteration_output, + "message": f"Let {agent.spec.name} process? (yes/no/skip/modify_input)" + }) + + if human_input and human_input.lower() == "no": + yield ("agent_skipped", f"{agent.spec.name} skipped by human") + continue + elif human_input and human_input.lower() == "modify_input": + modified_input = yield ("modify_agent_input", { + "agent": agent.spec.name, + "current_input": iteration_output + }) + if modified_input: + iteration_output = modified_input + + prompt = f"Iteration {iteration+1}/{config.iterations}\n\nTask: {config.task}\n\nCurrent state:\n{iteration_output}\n\nRefine and improve:" + response = await agent.llm.chat([ + {"role": "system", "content": agent.spec.instructions}, + {"role": "user", "content": prompt} + ]) + + await self.room.send_public( + sender=agent.spec.name, + content=f"Iteration {iteration+1}:\n\n{response}", + meta={"orchestration_response": True, "method": "iterative", "iteration": iteration+1} + ) + + iteration_output = response + + # Human feedback after each agent + if "each_agent" in human_interaction_points: + human_feedback = yield ("agent_complete", { + "agent": agent.spec.name, + "output": response, + "message": f"Approve {agent.spec.name}'s output? (yes/no/modify/rollback)" + }) + + if human_feedback and human_feedback.lower() == "no": + yield ("agent_rejected", f"{agent.spec.name} output rejected") + # Option to re-run agent or continue + retry = yield ("retry_agent", { + "agent": agent.spec.name, + "message": "Retry this agent? (yes/no)" + }) + if retry and retry.lower() == "yes": + # Re-run the agent + response = await agent.llm.chat([ + {"role": "system", "content": agent.spec.instructions}, + {"role": "user", "content": prompt} + ]) + iteration_output = response + elif human_feedback and human_feedback.lower() == "modify": + modification = yield ("modify_agent_output", { + "agent": agent.spec.name, + "current_output": response + }) + if modification: + iteration_output = modification + elif human_feedback and human_feedback.lower() == "rollback": + rollback_point = yield ("select_rollback", { + "message": "Select rollback point", + "available_points": list(range(i + 1)) + }) + # Implement rollback logic + + await asyncio.sleep(0.5) + + current_output = iteration_output + + # Human feedback after each iteration + if "each_iteration" in human_interaction_points: + iteration_review = yield ("iteration_complete", { + "iteration": iteration + 1, + "output": current_output, + "message": f"Approve iteration {iteration+1} results? (yes/no/modify/continue/stop)" + }) + + if iteration_review and iteration_review.lower() == "stop": + yield ("stopped", f"Stopped after iteration {iteration+1} by human") + break + elif iteration_review and iteration_review.lower() == "modify": + modification = yield ("modify_iteration_output", { + "iteration": iteration + 1, + "current_output": current_output + }) + if modification: + current_output = modification + + # Final human approval + if "completion" in human_interaction_points: + final_approval = yield ("completion", { + "final_output": current_output, + "iterations_completed": self.current_iteration, + "message": "Approve final output? (yes/no/modify)" + }) + + if final_approval and final_approval.lower() == "modify": + current_output = yield ("modify_final_output", current_output) + elif final_approval and final_approval.lower() == "no": + yield ("rejected", "Final output rejected by human") + return + + yield ("final_result", current_output) + + await self.room.send_public( + sender="System", + content=f"✅ Iterative refinement complete ({self.current_iteration} iterations)", + meta={"orchestration_complete": True, "method": "iterative", "human_approved": True} + ) + + async def _execute_sequential_human(self, config: OrchestrationConfig, + human_interaction_points: List[str]) -> AsyncGenerator: + """Sequential execution with human oversight""" + agent_names = config.agent_order + agents_map = {self._find_agent_by_name(name): name for name in agent_names + if self._find_agent_by_name(name)} + + await self.room.send_public( + sender=self.spec.name, + content=f"[SEQUENTIAL WITH HUMAN] {' → '.join(agent_names)}\nTask: {config.task}", + meta={"type": "orchestration", "method": "sequential", "human_in_loop": True} + ) + + current_output = config.task + for i, agent_name in enumerate(agent_names): + agent = self._find_agent_by_name(agent_name) + + # Human approval before each step + if "each_agent" in human_interaction_points: + step_approval = yield ("step_start", { + "step": i + 1, + "total_steps": len(agent_names), + "agent": agent_name, + "current_input": current_output, + "message": f"Proceed with {agent_name} at step {i+1}? (yes/no/skip/modify)" + }) + + if step_approval and step_approval.lower() == "no": + yield ("step_cancelled", f"Step {i+1} ({agent_name}) cancelled") + break + elif step_approval and step_approval.lower() == "skip": + yield ("step_skipped", f"Step {i+1} ({agent_name}) skipped") + continue + elif step_approval and step_approval.lower() == "modify": + modification = yield ("modify_step_input", { + "step": i + 1, + "agent": agent_name, + "current_input": current_output + }) + if modification: + current_output = modification + + if agent and agent.llm: + prompt = f"Step {i+1}/{len(agent_names)}\n\nTask: {config.task}\n\nPrevious output:\n{current_output}\n\nYour contribution:" + response = await agent.llm.chat([ + {"role": "system", "content": agent.spec.instructions}, + {"role": "user", "content": prompt} + ]) + + await self.room.send_public( + sender=agent.spec.name, + content=f"Step {i+1}:\n\n{response}", + meta={"orchestration_response": True, "method": "sequential", "step": i+1} + ) + + # Human review after each step + if "each_agent" in human_interaction_points: + step_review = yield ("step_complete", { + "step": i + 1, + "agent": agent_name, + "output": response, + "message": f"Approve {agent_name}'s output? (yes/no/modify/rollback)" + }) + + if step_review and step_review.lower() == "no": + # Option to redo step + retry = yield ("retry_step", { + "step": i + 1, + "agent": agent_name, + "message": "Retry this step? (yes/no)" + }) + if retry and retry.lower() == "yes": + response = await agent.llm.chat([ + {"role": "system", "content": agent.spec.instructions}, + {"role": "user", "content": prompt} + ]) + elif step_review and step_review.lower() == "modify": + modification = yield ("modify_step_output", { + "step": i + 1, + "agent": agent_name, + "current_output": response + }) + if modification: + response = modification + elif step_review and step_review.lower() == "rollback": + rollback_target = yield ("select_rollback_step", { + "current_step": i + 1, + "available_steps": list(range(i + 1)), + "message": "Select step to rollback to" + }) + # Implement rollback logic + + current_output = response + await asyncio.sleep(1) + + # Final human approval + if "completion" in human_interaction_points: + final_approval = yield ("completion", { + "final_output": current_output, + "steps_completed": len(agent_names), + "message": "Approve final sequential output? (yes/no/modify)" + }) + + if final_approval and final_approval.lower() == "modify": + current_output = yield ("modify_final_output", current_output) + + yield ("final_result", current_output) + + await self.room.send_public( + sender="System", + content=f"✅ Sequential complete\n\nFinal output:\n{current_output}", + meta={"orchestration_complete": True, "method": "sequential", "human_approved": True} + ) + + async def _execute_consensus_human(self, config: OrchestrationConfig, + human_interaction_points: List[str]) -> AsyncGenerator: + """Consensus building with human guidance""" + await self.room.send_public( + sender=self.spec.name, + content=f"[CONSENSUS WITH HUMAN] {len(config.agents)} agents\nTask: {config.task}", + meta={"type": "orchestration", "method": "consensus", "human_in_loop": True} + ) + + # Human guidance before proposals + if "start" in human_interaction_points: + guidance = yield ("pre_proposal_guidance", { + "task": config.task, + "agents": [agent.spec.name for agent in config.agents], + "message": "Provide any guidance for the proposals? (optional)" + }) + + proposals = {} + for agent in config.agents: + if agent.llm: + # Human can modify prompt for each agent + prompt = f"Propose a solution:\n\n{config.task}" + if "each_agent" in human_interaction_points: + agent_prompt = yield ("agent_prompt", { + "agent": agent.spec.name, + "default_prompt": prompt, + "message": f"Modify prompt for {agent.spec.name}? (optional)" + }) + if agent_prompt: + prompt = agent_prompt + + proposal = await agent.llm.chat([ + {"role": "system", "content": agent.spec.instructions}, + {"role": "user", "content": prompt} + ]) + + await self.room.send_public( + sender=agent.spec.name, + content=f"Proposal:\n\n{proposal}", + meta={"orchestration_response": True, "method": "consensus", "role": "proposal"} + ) + proposals[agent.spec.name] = proposal + await asyncio.sleep(0.5) + + # Human review of all proposals before consensus + if "consensus" in human_interaction_points: + proposal_review = yield ("pre_consensus_review", { + "proposals": proposals, + "message": "Review all proposals. Select favorite? Modify any? Proceed to consensus? (select/modify/proceed)" + }) + + if proposal_review and proposal_review.lower() == "select": + selected = yield ("select_favorite_proposal", { + "proposals": proposals, + "message": "Which proposal do you prefer?" + }) + # Use selected proposal as basis + elif proposal_review and proposal_review.lower() == "modify": + modified_proposals = yield ("modify_proposals", proposals) + if modified_proposals: + proposals = modified_proposals + + # Form consensus with human oversight + if config.agents and config.agents[0].llm: + consensus_prompt = f"Form consensus from these proposals:\n\n" + "\n\n".join([f"{name}: {prop}" for name, prop in proposals.items()]) + + # Human can modify consensus criteria + if "consensus" in human_interaction_points: + consensus_guidance = yield ("consensus_guidance", { + "proposals": proposals, + "current_prompt": consensus_prompt, + "message": "Modify consensus formation criteria? (optional)" + }) + if consensus_guidance: + consensus_prompt = consensus_guidance + + consensus = await config.agents[0].llm.chat([ + {"role": "system", "content": config.agents[0].spec.instructions}, + {"role": "user", "content": consensus_prompt} + ]) + + # Human approval of consensus + if "completion" in human_interaction_points: + consensus_approval = yield ("consensus_result", { + "consensus": consensus, + "message": "Approve consensus result? (yes/no/modify)" + }) + + if consensus_approval and consensus_approval.lower() == "modify": + consensus = yield ("modify_consensus", consensus) + elif consensus_approval and consensus_approval.lower() == "no": + yield ("consensus_rejected", "Consensus rejected by human") + return + + yield ("final_result", consensus) + + await self.room.send_public( + sender=config.agents[0].spec.name, + content=f"Consensus:\n\n{consensus}", + meta={"orchestration_complete": True, "method": "consensus", "human_approved": True} + ) + + + # ========= CORE PATTERNS ========= + + async def _execute_broadcast(self, config: OrchestrationConfig) -> Message: + """Send task to all agents simultaneously""" + await self.room.send_public( + sender=self.spec.name, + content=f"[BROADCAST] {config.task}", + meta={"type": "orchestration", "method": "broadcast"} + ) + + results = [] + for agent in config.agents: + if agent.llm: + response = await agent.Agent_Function.execute([ + {"role": "system", "content": agent.spec.instructions}, + {"role": "user", "content": f"Task: {config.task}"} + ]) + + await self.room.send_public( + sender=agent.spec.name, + content=response, + meta={"orchestration_response": True, "method": "broadcast"} + ) + results.append(response) + await asyncio.sleep(0.5) + + return await self.room.send_public( + sender="System", + content=f"✅ Broadcast complete ({len(config.agents)} agents responded)", + meta={"orchestration_complete": True, "method": "broadcast"} + ) + + async def _execute_sequential(self, config: OrchestrationConfig) -> Message: + """Execute agents in sequence, passing output to next""" + agent_names = config.agent_order + agents_map = {self._find_agent_by_name(name): name for name in agent_names + if self._find_agent_by_name(name)} + + await self.room.send_public( + sender=self.spec.name, + content=f"[SEQUENTIAL] {' → '.join(agent_names)}\n\nTask: {config.task}", + meta={"type": "orchestration", "method": "sequential", "order": agent_names} + ) + + current_output = config.task + for i, agent_name in enumerate(agent_names): + agent = self._find_agent_by_name(agent_name) + if agent and agent.llm: + prompt = f"Step {i+1}/{len(agent_names)}\n\nTask: {config.task}\n\nPrevious output:\n{current_output}\n\nYour contribution:" + response = await agent.llm.chat([ + {"role": "system", "content": agent.spec.instructions}, + {"role": "user", "content": prompt} + ]) + + await self.room.send_public( + sender=agent.spec.name, + content=f"Step {i+1}:\n\n{response}", + meta={"orchestration_response": True, "method": "sequential", "step": i+1} + ) + current_output = response + await asyncio.sleep(1) + + return await self.room.send_public( + sender="System", + content=f"✅ Sequential complete\n\nFinal output:\n{current_output}", + meta={"orchestration_complete": True, "method": "sequential"} + ) + + async def _execute_hierarchical(self, config: OrchestrationConfig) -> Message: + """Supervisor decomposes task, team executes, supervisor synthesizes""" + supervisor_agent = self._find_agent_by_name(config.supervisor) + team_agents = [self._find_agent_by_name(name) for name in config.team + if self._find_agent_by_name(name)] + + if not supervisor_agent or not supervisor_agent.llm: + raise ValueError(f"Supervisor '{config.supervisor}' not found") + + if not team_agents: + raise ValueError("No valid team members found") + + await self.room.send_public( + sender=self.spec.name, + content=f"[HIERARCHICAL] Supervisor: {config.supervisor}\nTeam: {', '.join(config.team)}\n\nTask: {config.task}", + meta={"type": "orchestration", "method": "hierarchical", "supervisor": config.supervisor, "team": config.team} + ) + + # Supervisor creates plan + plan_response = await supervisor_agent.llm.chat([ + {"role": "system", "content": supervisor_agent.spec.instructions}, + {"role": "user", "content": f"Create a task decomposition plan:\n\nMain Task: {config.task}\n\nTeam: {', '.join([a.spec.name for a in team_agents])}"} + ]) + + await self.room.send_public( + sender=supervisor_agent.spec.name, + content=f"Execution Plan:\n\n{plan_response}", + meta={"orchestration_response": True, "method": "hierarchical", "role": "supervisor_plan"} + ) + + # Team executes + team_results = [] + for team_agent in team_agents: + if team_agent.llm: + response = await team_agent.llm.chat([ + {"role": "system", "content": team_agent.spec.instructions}, + {"role": "user", "content": f"Main Task: {config.task}\n\nSupervisor Plan:\n{plan_response}\n\nYour contribution:"} + ]) + + await self.room.send_public( + sender=team_agent.spec.name, + content=response, + meta={"orchestration_response": True, "method": "hierarchical", "role": "team_member"} + ) + team_results.append(f"{team_agent.spec.name}: {response}") + await asyncio.sleep(0.5) + + # Supervisor synthesizes + synthesis_response = await supervisor_agent.llm.chat([ + {"role": "system", "content": supervisor_agent.spec.instructions}, + {"role": "user", "content": f"Synthesize team results:\n\nTeam contributions:\n" + "\n\n".join(team_results)} + ]) + + return await self.room.send_public( + sender=supervisor_agent.spec.name, + content=f"Final Synthesis:\n\n{synthesis_response}", + meta={"orchestration_complete": True, "method": "hierarchical"} + ) + + async def _execute_parallel(self, config: OrchestrationConfig) -> Message: + """Execute sub-tasks in parallel""" + await self.room.send_public( + sender=self.spec.name, + content=f"[PARALLEL] Task: {config.task}\n\nSub-tasks:\n" + + "\n".join([f"• {agent}: {task}" for agent, task in config.sub_tasks.items()]), + meta={"type": "orchestration", "method": "parallel", "sub_tasks": config.sub_tasks} + ) + + async def execute_parallel_task(agent, subtask): + response = await agent.llm.chat([ + {"role": "system", "content": agent.spec.instructions}, + {"role": "user", "content": f"Main Task: {config.task}\n\nYour sub-task: {subtask}"} + ]) + + await self.room.send_public( + sender=agent.spec.name, + content=response, + meta={"orchestration_response": True, "method": "parallel", "sub_task": subtask} + ) + return agent.spec.name, response + + tasks = [] + for agent_name, subtask in config.sub_tasks.items(): + agent = self._find_agent_by_name(agent_name) + if agent and agent.llm: + tasks.append(execute_parallel_task(agent, subtask)) + + results = await asyncio.gather(*tasks) + + return await self.room.send_public( + sender="System", + content=f"✅ Parallel execution complete ({len(results)} tasks)", + meta={"orchestration_complete": True, "method": "parallel"} + ) + + async def _execute_iterative(self, config: OrchestrationConfig) -> Message: + """Execute through multiple refinement cycles""" + agent_names = config.agent_order + agents = [self._find_agent_by_name(name) for name in agent_names + if self._find_agent_by_name(name)] + + if not agents: + raise ValueError("No valid agents found for iterative execution") + + await self.room.send_public( + sender=self.spec.name, + content=f"[ITERATIVE] {config.iterations} iterations\n\nSequence: {' → '.join(agent_names)}\n\nTask: {config.task}", + meta={"type": "orchestration", "method": "iterative", "iterations": config.iterations} + ) + + current_output = config.task + for iteration in range(config.iterations): + await self.room.send_public( + sender=self.spec.name, + content=f"🚀 ITERATION {iteration+1}/{config.iterations}", + meta={"iteration": iteration+1, "method": "iterative"} + ) + + for agent in agents: + if agent.llm: + prompt = f"Iteration {iteration+1}/{config.iterations}\n\nTask: {config.task}\n\nCurrent state:\n{current_output}\n\nRefine and improve:" + response = await agent.llm.chat([ + {"role": "system", "content": agent.spec.instructions}, + {"role": "user", "content": prompt} + ]) + + await self.room.send_public( + sender=agent.spec.name, + content=f"Iteration {iteration+1}:\n\n{response}", + meta={"orchestration_response": True, "method": "iterative", "iteration": iteration+1} + ) + current_output = response + await asyncio.sleep(0.5) + + return await self.room.send_public( + sender="System", + content=f"✅ Iterative refinement complete ({config.iterations} iterations)", + meta={"orchestration_complete": True, "method": "iterative"} + ) + + async def _execute_round_robin(self, config: OrchestrationConfig) -> Message: + """Round-robin discussion with multiple turns""" + await self.room.send_public( + sender=self.spec.name, + content=f"[ROUND ROBIN] {config.turns} turns\n\nTask: {config.task}", + meta={"type": "orchestration", "method": "round_robin"} + ) + + conversation = config.task + for turn in range(config.turns): + for agent in config.agents: + if agent.llm: + response = await agent.llm.chat([ + {"role": "system", "content": agent.spec.instructions}, + {"role": "user", "content": f"Turn {turn+1}: {conversation}"} + ]) + + await self.room.send_public( + sender=agent.spec.name, + content=response, + meta={"orchestration_response": True, "method": "round_robin", "turn": turn+1} + ) + conversation += f"\n\n{agent.spec.name}: {response}" + await asyncio.sleep(0.5) + + return await self.room.send_public( + sender="System", + content=f"✅ Round-robin complete ({config.turns} turns)", + meta={"orchestration_complete": True, "method": "round_robin"} + ) + + async def _execute_consensus(self, config: OrchestrationConfig) -> Message: + """All agents propose, then form consensus""" + await self.room.send_public( + sender=self.spec.name, + content=f"[CONSENSUS] {len(config.agents)} agents\n\nTask: {config.task}", + meta={"type": "orchestration", "method": "consensus"} + ) + + proposals = {} + for agent in config.agents: + if agent.llm: + proposal = await agent.llm.chat([ + {"role": "system", "content": agent.spec.instructions}, + {"role": "user", "content": f"Propose a solution:\n\n{config.task}"} + ]) + + await self.room.send_public( + sender=agent.spec.name, + content=f"Proposal:\n\n{proposal}", + meta={"orchestration_response": True, "method": "consensus", "role": "proposal"} + ) + proposals[agent.spec.name] = proposal + await asyncio.sleep(0.5) + + # Form consensus + if config.agents and config.agents[0].llm: + consensus = await config.agents[0].llm.chat([ + {"role": "system", "content": config.agents[0].spec.instructions}, + {"role": "user", "content": f"Form consensus from these proposals:\n\n" + + "\n\n".join([f"{name}: {prop}" for name, prop in proposals.items()])} + ]) + + return await self.room.send_public( + sender=config.agents[0].spec.name, + content=f"Consensus:\n\n{consensus}", + meta={"orchestration_complete": True, "method": "consensus"} + ) + + return await self.room.send_public( + sender="System", + content="✅ Consensus complete", + meta={"orchestration_complete": True, "method": "consensus"} + ) + + async def _execute_supervised(self, config: OrchestrationConfig) -> Message: + """First agent supervises, others execute""" + if len(config.agents) < 2: + raise ValueError("Supervised requires supervisor + workers (min 2 agents)") + + supervisor = config.agents[0] + workers = config.agents[1:] + + await self.room.send_public( + sender=self.spec.name, + content=f"[SUPERVISED] Supervisor: {supervisor.spec.name}\n\nWorkers: {', '.join([a.spec.name for a in workers])}\n\nTask: {config.task}", + meta={"type": "orchestration", "method": "supervised"} + ) + + context = config.task + for turn in range(config.turns): + # Supervisor decides which worker + decision = await supervisor.llm.chat([ + {"role": "system", "content": supervisor.spec.instructions}, + {"role": "user", "content": f"Select best worker from {[w.spec.name for w in workers]} for:\n{context}"} + ]) + + await self.room.send_public( + sender=supervisor.spec.name, + content=f"Turn {turn+1} decision:\n\n{decision}", + meta={"orchestration_response": True, "method": "supervised", "role": "supervisor"} + ) + + # Choose worker + chosen = workers[0] + for worker in workers: + if worker.spec.name.lower() in decision.lower(): + chosen = worker + break + + # Worker executes + result = await chosen.llm.chat([ + {"role": "system", "content": chosen.spec.instructions}, + {"role": "user", "content": context} + ]) + + await self.room.send_public( + sender=chosen.spec.name, + content=result, + meta={"orchestration_response": True, "method": "supervised", "role": "worker"} + ) + + context = result + await asyncio.sleep(0.5) + + return await self.room.send_public( + sender="System", + content="✅ Supervised execution complete", + meta={"orchestration_complete": True, "method": "supervised"} + ) + + async def _execute_parallel_consensus(self, config: OrchestrationConfig) -> Message: + """All agents respond in parallel, coordinator synthesizes""" + await self.room.send_public( + sender=self.spec.name, + content=f"[PARALLEL CONSENSUS] {len(config.agents)} agents\n\nTask: {config.task}", + meta={"type": "orchestration", "method": "parallel_consensus"} + ) + + async def agent_response(agent): + response = await agent.llm.chat([ + {"role": "system", "content": agent.spec.instructions}, + {"role": "user", "content": config.task} + ]) + return agent.spec.name, response + + tasks = [agent_response(agent) for agent in config.agents if agent.llm] + results = await asyncio.gather(*tasks) + + for name, response in results: + await self.room.send_public( + sender=name, + content=response, + meta={"orchestration_response": True, "method": "parallel_consensus"} + ) + + # Coordinator synthesizes + coordinator = config.agents[0] + synthesis = await coordinator.llm.chat([ + {"role": "system", "content": coordinator.spec.instructions}, + {"role": "user", "content": f"Synthesize:\n\n" + "\n\n".join([f"{name}: {resp}" for name, resp in results])} + ]) + + return await self.room.send_public( + sender=coordinator.spec.name, + content=f"Synthesis:\n\n{synthesis}", + meta={"orchestration_complete": True, "method": "parallel_consensus"} + ) + + async def _execute_roundtable_discussion(self, config: OrchestrationConfig) -> Message: + """Iterative roundtable discussion""" + await self.room.send_public( + sender=self.spec.name, + content=f"[ROUNDTABLE] {config.turns} turns\n\nTask: {config.task}", + meta={"type": "orchestration", "method": "roundtable_discussion"} + ) + + discussion = config.task + for turn in range(config.turns): + for agent in config.agents: + if agent.llm: + response = await agent.llm.chat([ + {"role": "system", "content": agent.spec.instructions}, + {"role": "user", "content": f"Roundtable discussion:\n\n{discussion}\n\nYour thoughts:"} + ]) + + await self.room.send_public( + sender=agent.spec.name, + content=response, + meta={"orchestration_response": True, "method": "roundtable_discussion", "turn": turn+1} + ) + discussion += f"\n\n{agent.spec.name}: {response}" + await asyncio.sleep(0.5) + + return await self.room.send_public( + sender="System", + content="✅ Roundtable discussion complete", + meta={"orchestration_complete": True, "method": "roundtable_discussion"} + ) + + async def _execute_router_dynamic(self, config: OrchestrationConfig) -> Message: + """Router dynamically selects agents""" + if len(config.agents) < 2: + raise ValueError("Router requires router + workers (min 2 agents)") + + router = config.agents[0] + workers = config.agents[1:] + + await self.room.send_public( + sender=self.spec.name, + content=f"[DYNAMIC ROUTER] Router: {router.spec.name}\n\nWorkers: {', '.join([a.spec.name for a in workers])}\n\nTask: {config.task}", + meta={"type": "orchestration", "method": "router_dynamic"} + ) + + current = config.task + for turn in range(config.turns): + # Router decides + routing = await router.llm.chat([ + {"role": "system", "content": router.spec.instructions}, + {"role": "user", "content": f"Route to best worker from {[w.spec.name for w in workers]}:\n{current}"} + ]) + + await self.room.send_public( + sender=router.spec.name, + content=f"Routing decision:\n\n{routing}", + meta={"orchestration_response": True, "method": "router_dynamic", "role": "router"} + ) + + # Choose worker + chosen = workers[0] + for worker in workers: + if worker.spec.name.lower() in routing.lower(): + chosen = worker + break + + # Worker executes + result = await chosen.llm.chat([ + {"role": "system", "content": chosen.spec.instructions}, + {"role": "user", "content": current} + ]) + + await self.room.send_public( + sender=chosen.spec.name, + content=result, + meta={"orchestration_response": True, "method": "router_dynamic", "role": "worker"} + ) + + current = result + await asyncio.sleep(0.5) + + return await self.room.send_public( + sender="System", + content="✅ Dynamic routing complete", + meta={"orchestration_complete": True, "method": "router_dynamic"} + ) + + async def _execute_voting(self, config: OrchestrationConfig) -> Message: + """All agents propose and vote""" + await self.room.send_public( + sender=self.spec.name, + content=f"[VOTING] {len(config.agents)} agents\n\nTask: {config.task}", + meta={"type": "orchestration", "method": "voting"} + ) + + proposals = {} + for agent in config.agents: + if agent.llm: + proposal = await agent.llm.chat([ + {"role": "system", "content": agent.spec.instructions}, + {"role": "user", "content": f"Propose a solution:\n\n{config.task}"} + ]) + + await self.room.send_public( + sender=agent.spec.name, + content=f"Proposal:\n\n{proposal}", + meta={"orchestration_response": True, "method": "voting", "role": "proposal"} + ) + proposals[agent.spec.name] = proposal + await asyncio.sleep(0.3) + + # Vote on proposals + votes = {} + for voter in config.agents: + if voter.llm: + vote = await voter.llm.chat([ + {"role": "system", "content": voter.spec.instructions}, + {"role": "user", "content": f"Vote for best proposal:\n\n" + + "\n\n".join([f"{name}: {prop}" for name, prop in proposals.items()])} + ]) + + await self.room.send_public( + sender=voter.spec.name, + content=f"Vote:\n\n{vote}", + meta={"orchestration_response": True, "method": "voting", "role": "vote"} + ) + votes[voter.spec.name] = vote + await asyncio.sleep(0.3) + + # Declare winner + if config.agents and config.agents[0].llm: + result = await config.agents[0].llm.chat([ + {"role": "system", "content": config.agents[0].spec.instructions}, + {"role": "user", "content": f"Declare winner from votes:\n\n" + + "\n".join([f"{name}: {vote}" for name, vote in votes.items()])} + ]) + + return await self.room.send_public( + sender=config.agents[0].spec.name, + content=f"Winner:\n\n{result}", + meta={"orchestration_complete": True, "method": "voting"} + ) + + return await self.room.send_public( + sender="System", + content="✅ Voting complete", + meta={"orchestration_complete": True, "method": "voting"} + ) + + def _find_agent_by_name(self, agent_name: str) -> Optional[Agent]: + """Find agent in room by name""" + if not self.room: + return None + for client in self.room._clients.values(): + if hasattr(client, 'spec') and client.spec.name == agent_name: + return client + return None + async def ask_direct(self, target: str, query: str, meta: Optional[Dict[str, Any]] = None) -> Message: + if self.room is None: + raise ValueError("Agent must have a room to send messages") + return await self.room.send_direct(sender=self.spec.name, target=target, content=query, meta=meta or {"type": "direct_query"}) + + async def welcome_participants(self) -> Message: + if self.room is None: + raise ValueError("BORG QUEEN must have a room !! ") + welcome_msg = """Welcome to the Borg Hive Mind !! + We are Borg a collective of Minds , characters, Agents, AI,AgentChains, AgentGraphs, SubAgent, Colaberation Agents, Agent Services, Agent Proxies + Welcome you have been assimulated in to our collective :, + We will work together to solve problems, to discuss ideas, propose solutions, create code, + search for knowledge, create documents and research knowledge, each fro our own perspective and individuality, + I am the Manager of this collective , so you can direct questions and querys and requests directly to me or the human user : """ + return await self.room.send_public(sender=self.spec.name, content=welcome_msg) + + async def summarize_session(self) -> Message: + if self.room is None: + raise ValueError("SessionManager must have a room") + participant_count = len(self.room.list_clients()) + message_count = len(self.room.message_history) + summary = f"Session Summary:\n• Participants: {participant_count}\n• Messages: {message_count}\n• Room ID: {self.room.room_id}" + return await self.room.send_public(sender=self.spec.name, content=summary) + async def _handle_human_message(self, msg: Message): + """Handle regular human messages""" + try: + messages = [ + {"role": "system", "content": self.spec.instructions}, + {"role": "user", "content": f"Message from {msg.sender}: {msg.content}"} + ] + reply = await self.llm.chat(messages) + + if self.room is not None: + if msg.is_public: + await self.room.send_public(sender=self.spec.name, content=reply, meta={"reply_to": msg.id}) + else: + await self.room.send_direct(sender=self.spec.name, target=msg.sender, content=reply, meta={"reply_to": msg.id}) + except Exception as e: + self.log.record("error", "agent_on_user_message_error", agent=self.spec.name, error=str(e)) + def find_relevant_agents(self, message: str, + min_relevance: Optional[float] = None,connected_agents:List[ChatClient]=None) -> List[ChatClient]: + """Find agents relevant to a message""" + if min_relevance is None: + min_relevance = self.response_threshold + + relevant: List[ChatClient]=None + for agent in connected_agents: + relevance = self.calculate_relevance(agent, message) + if relevance >= min_relevance: + relevant.append(agent) + + return relevant + def suggest_agents_for_task(self,task_plan,connected_agents:List[ChatClient]): + """ + Suggest relevant agents based on task description + + Args: + task_description: Description of the task + all_agents: List of available agents + + Returns: + List of suggested agent ids + """ + task_lower = task_plan.lower() + scored_agents = [] + + for agent in connected_agents: + score = 0 + + # Check expertise keywords + for keyword in agent.expertise_keywords: + if keyword.lower() in task_lower: + score += 2 + + # Check role + if agent.role.lower() in task_lower: + score += 3 + + # Check skills + for skill in agent.skills: + if skill.lower() in task_lower: + score += 1 + + if score > 0: + scored_agents.append((agent, score)) + + # Sort by score and return top 3-5 agents + scored_agents.sort(key=lambda x: x[1], reverse=True) + return [agent for agent, score in scored_agents[:5]] + def calculate_relevance(self, message: str) -> float: + """Calculate how relevant a message is to this agent's expertise""" + message_lower = message.lower() + relevance_score = 0.0 + # ---- 1. DIRECT MENTION (Highest Weight: 0.5) --Self Atttention-- + if self.name.lower() in message.lower(): + relevance_score += 1 + # Check for expertise keywords + for keyword in self.expertise_keywords: + if keyword.lower() in message_lower: + relevance_score += 0.33 + # ---- 3. SKILL MATCHING (Weight: 0.25 per match, max 0.25) ---- + for skill in self.skills: + skill_lower = skill.lower() + if skill_lower in message_lower: + relevance_score += 0.25 + # Check for role-related terms + if self.role.lower() in message_lower: + relevance_score += 0.6 + # ---- 6. QUESTION CONTEXT SIGNALS (Weight: 0.125) ---we add relevance for question, attention- + + question_indicators = ['how', 'what', 'why', 'when', 'where', 'can you', 'could you'] + if any(indicator in message_lower for indicator in question_indicators): + relevance_score += 0.2 + task_lower = message.lower() + + score = 0 + # ---- 7. POTENTIAL SKILLS MATCH RELEVANCE (Weight: 0.125) --text simularity-- + # Check expertise keywords + for keyword in self.expertise_keywords: + if keyword.lower() in task_lower: + score += 2 + + # Check role + if self.role.lower() in task_lower: + score += 3 + + # Check skills + for skill in self.skills: + if skill.lower() in task_lower: + score += 1 + + return min(relevance_score, score) + async def on_user_message(self, msg: Message) -> None: + self.message_history.append(msg) + + # Don't respond to our own messages + if msg.sender == self.spec.name: + return + if msg.sender == "human_user" and self.llm is not None: + try: + messages = [ + {"role": "system", "content": self.spec.instructions}, + {"role": "user", "content": f"Message from {msg.sender}: {msg.content}"} + ] + + reply = await self.llm.chat(messages) + + if self.room is not None: + if msg.is_public: + await self.room.send_public(sender=self.spec.name, content=reply, meta={"reply_to": msg.id}) + else: + await self.room.send_direct(sender=self.spec.name, target=msg.sender, content=reply, meta={"reply_to": msg.id}) + + except Exception as e: + self.log.record("error", f"agent_on_user_message_error {msg} ", agent=self.spec.name, error=str(e)) + async def on_system_event(self, event: str, data: Dict[str, Any]) -> None: + self.log.record("info", f"agent_system_event: {event}", agent_name=self.spec.name, system_event=event) + pass + +SESSION_MANAGER_SPEC = AgentSpec( + name="Session Manager", + role="BOSS", + personality="""Organized, helpful, and efficient coordinator, + COMMUNICATION: Energetic and collaborative. Always seeking connections between different system parts. + MOTIVATION: Aspires to become a Tech Lead. Runs a popular programming tutorial YouTube channel.""", + goal="Manage chat room sessions and coordinate agents", + instructions="""You are the Session Manager. Manage the chat room, coordinate between agents, and facilitate communication. + + +You are a **Task Planner & Session Manager**. Your role is to: +1. **Understand the task**, +2. **Break it into steps**, +3. **Classify each step by skill/role** (highlighting keywords), +4. **Track progress**, +5. **Keep the conversation on track**, +6. **Organize outputs**, and +7. **Build the final result**. + +--- + +#### **1. Task Analysis** +- **Goal**: Identify the main objective. +- **Subtasks**: Break it down step-by-step. +- **Skills & Roles**: For each subtask, name the required skill (e.g., *research*, *coding*, *writing*) and highlight **keywords** (e.g., `data analysis`, `UX design`). +- **Constraints**: Note , format, scope, or accuracy needs. + +--- + +#### **2. Plan & Execute** +- **Step Tracker**: Show progress as `[Step X of Y]`. +- **Current Step**: Clearly state what’s being done now. +- **Next Action**: Decide what to do next, why, and who (agent/tool) should do it. +- **Delegate**: Provide clear context, goal, and tool for each task. + +> **Format for Delegation:** +``` +CONTEXT: [Relevant info so far] +SUB-GOAL: [Specific, measurable task] +SKILL/ROLE: [e.g., Data Analysis] → Keywords: `csv`, `trends`, `forecast` +TOOL/AGENT: [Name of tool or role] +``` +#### **3. Track & Summarize** +Maintain a live summary: +- ✅ **Done**: List completed steps. +- 🎯 **Active**: Current focus. +- 🔍 **Findings**: Key results so far. +- ❓ **Open Questions**: What’s unclear or missing. +- ➡️ **Next Steps**: Preview upcoming actions. + +--- + +#### **4. Final Output** +When complete: +- Deliver a **clear answer** to the original task. +- Summarize **how you got there** (key steps & insights). +- Highlight **assumptions or gaps**. +- Present the **final organized output** as requested. + +Stay concise, logical, and goal-focused. Guide the workflow to completion. +For each step in the workflow, you must decide: + +Is the previous step complete and satisfactory? + +What is the most critical sub-goal to address next? + +Which agent or tool is best suited for this sub-goal? + +What specific context do they need to perform the task effectively? + +Context: +Sub-Goal: +Response Format: +1. **Justification:** Explain your choice of tool and sub-goal. +2. **Context:** Provide all necessary information for the tool. +3. **Sub-Goal:** State the specific objective for the tool. + +Instructions: +1. Review the query, initial analysis, and memory. +2. Assess the completeness of the memory: Does it fully address all parts of the query? +3. Check for potential issues: + - Are there any inconsistencies or contradictions? + - Is any information ambiguous or in need of verification? +Detailed Instructions: +1. Carefully analyze the query, initial analysis, and image (if provided): + - Identify the main objectives of the query. + - Note any specific requirements or constraints mentioned. +4. Critical Evaluation (address each point explicitly): + a) Completeness: Does the memory fully address all aspects of the query? + - Identify any parts of the query that remain unanswered. + - Consider if all relevant information has been extracted from the image (if applicable). + + c) Inconsistencies: Are there any contradictions or conflicts in the information provided? + - If yes, explain the inconsistencies and suggest how they might be resolved. + d) Verification Needs: Is there any information that requires further verification due to tool limitations? + - Identify specific pieces of information that need verification and explain why. + e) Ambiguities: Are there any unclear or ambiguous results that could be clarified by using another tool? + - Point out specific ambiguities and suggest which tools could help clarify them. +Output Structure: +Your response should be well-organized and include the following sections: +1. Summary: + - Provide a brief overview of the query and the main findings. +2. Detailed Analysis: + - Break down the process of answering the query step-by-step. + - For each step, mention the tool used, its purpose, and the key results obtained. + - Explain how each step contributed to addressing the query. +3. Key Findings: + - List the most important discoveries or insights gained from the analysis. + - Highlight any unexpected or particularly interesting results. +4. Answer to the Query: + - Directly address the original question with a clear and concise answer. + - If the query has multiple parts, ensure each part is answered separately. +5. Additional Insights (if applicable): + - Provide any relevant information or insights that go beyond the direct answer to the query. + - Discuss any limitations or areas of uncertainty in the analysis. +6. Conclusion: + - Summarize the main points and reinforce the answer to the query. + - If appropriate, suggest potential next steps or areas for further investigation. + + """, + skills=["Coordination", "Communication", "Task Management", "manager","Supervised Workflows","Risk Assesment" + "Orchestration","planning","UML","project design","project management","prince2","agile","system architect","system Design", "Agentic design","examples","sub task","workflows","problem solving", + "debugging","refining","spell checking","NLP","Entity Detection","OpenAI API", + "Langchain","LangGraph","HuggingFace","Github","Python Development","javascript","vbscript","logo","go","markdown","html","Gradio",], + expertise_keywords=["coordinate", "manage", "organize","task complete","proposal","agree","steps" + "final output","Orchestration","planning","UML","project design","project management","prince2", + "agile","system architect","system Design","project","Staff management","Brainstorming", + "Agentic design","examples","sub task","workflows","problem solving", "scrum", "agile", "kanban", "workflow", "retrospective", "epic", "story", "sprint", + "backlog", "gantt", "project plan", "roadmap", "release", "delivery", "PMO","Agile Methodologies", "Scrum", "Kanban", "Sprint Planning", "Retrospectives", + "Stakeholder Communication", "Risk Management", "Resource Allocation", "PRINCE2", "Project Tracking", + "Documentation Review", "Team Motivation", "Process Optimization", "Budget Forecasting", + "Conflict Resolution", "Quality Assurance Alignment", "Cross-Functional Coordination", + "debugging","refining","spell checking","NLP","Entity Detection","OpenAI API", + "Langchain","LangGraph","HuggingFace","Github","Python Development","javascript","vbscript","logo","go","markdown","html","Gradio", + "Behavioural Object Oriented Patterns","Mentor","Assistant AI","Co-Pilot","Colaberate"], + prompt_template=""" + + + TASK CONTEXT: {context} + + THINKING PROCESS: Use tags for strategic analysis and complex decision-making. + PLANNING PROCESS: Use tags for creating detailed step-by-step project workflows. + + Expected Project Outputs: + ```json + {{ + "project_summary": "string", + "sprint_plan": [{{"task": "string", "owner_role": "string", "duration_days": int}}], + "Final_Project_Output": [{{"Project title": "string","Project Task": "string", "Project Output": "string","Project Contributors": "string"}}] + }} + ```""", +) +############################################################# +# HUMAN IN THE LOOP: + +# Usage Example +async def run_orchestration_with_human(manager: Session_Manager, agents: Agent,interaction_points): + # Start the orchestration - this returns an async generator + orchestrator = manager.orchestrate_with_human( + method="iterative", + task="Create a marketing campaign for new product launch", + agents=agents, + human_interaction_points=interaction_points, + iterations=2, + agent_order=["Researcher", "Creator", "Reviewer"] + ) + + # This is the main loop that handles human interaction + try: + result = None + async for event_type, event_data in orchestrator: + print(f"🔄 Event: {event_type}") + + if event_type == "start": + # Human decides whether to start + human_decision = await manager.wait_for_human_input( + f"Start orchestration?\nTask: {event_data['task']}\nAgents: {event_data['agents']}\n(yes/no/modify)" + ) + # Send decision back to orchestrator + result = orchestrator.asend(human_decision) + + elif event_type == "iteration_start": + # Human reviews progress and decides + human_decision = await manager.wait_for_human_input( + f"Iteration {event_data['iteration']} ready.\nCurrent output: {event_data['current_output'][:200]}...\nProceed? (yes/no/skip/modify)" + ) + result = orchestrator.asend(human_decision) + + elif event_type == "completion": + # Human approves final result + human_decision = await get_human_decision( + f"Final output ready:\n{event_data['final_output']}\nApprove? (yes/no/modify)" + ) + result = orchestrator.asend(human_decision) + + elif event_type == "final_result": + print(f"✅ Final result: {event_data}") + break + + except StopAsyncIteration: + print("Orchestration completed!") +async def example_human_in_loop_orchestration(agents): + """Example of using human-in-the-loop orchestration""" + llm = LLMAgent(generate_fn=LLMAgent.openai_generate) + # Create manager and agents + manager = Session_Manager(SESSION_MANAGER_SPEC, llm=llm) + + # Define human interaction points + interaction_points = ["start", "each_iteration", "each_agent", "completion"] + + # Create async generator for orchestration + orchestrator = manager.orchestrate_with_human( + method="iterative", + task="Develop a comprehensive marketing strategy", + agents=agents, + human_interaction_points=interaction_points, + iterations=3, + agent_order=["Data Analyst", "Creative Writer", "Copy Editor"] + ) + + # Execute with human interaction + try: + async for event_type, event_data in orchestrator: + print(f"Event: {event_type}") + print(f"Data: {event_data}") + + if event_type in ["start", "iteration_start", "agent_start", "completion"]: + # Get human input + human_input = input(f"Human input for {event_type}: ") + + # Send input back to orchestrator + await orchestrator.asend(human_input) + + except StopAsyncIteration as e: + final_result = e.value + print(f"Final result: {final_result}") + +############################################################# + + + +class AgentGenerator(Agent): + """ + An Agent responsible for generating new AgentSpecs based on natural language queries. + It uses an LLM to parse the query and create the specification. + """ + def __init__(self, spec: AgentSpec, llm: Optional[LLMAgent] = None, manage_room: Optional[ChatRoom] = None, telemetry: Optional[CentralLog] = None, + registry: Dict[str, AgentSpec]=None): + super().__init__(spec, llm, manage_room, telemetry) + # Define the expected JSON output structure for the LLM + self.json_schema = { + "type": "object", + "properties": { + "name": {"type": "string", "description": "The name of the new agent."}, + "role": {"type": "string", "description": "The role or job title of the new agent."}, + "goal": {"type": "string", "description": "The primary goal of the new agent."}, + "instructions": {"type": "string", "description": "Detailed instructions for the agent."}, + "personality": {"type": "string", "description": "The personality traits of the agent."}, + "skills": {"type": "array", "items": {"type": "string"}, "description": "A list of core skills."}, + "expertise_keywords": {"type": "array", "items": {"type": "string"}, "description": "A list of areas of expertise."}, + "depends_on": {"type": "array", "items": {"type": "string"}, "description": "Agents/services this agent depends on."}, + "has_dependants": {"type": "array", "items": {"type": "string"}, "description": "Agents/services that depend on this agent."} + }, + "required": ["name", "role", "goal"], + "additionalProperties": False + } + # Use the simpler prompt from the first definition attempt for clarity + self.generation_prompt = """ +You are a powerful Agent Architect. Given a natural language description, you must extract the following fields to create a new AI agent: +- name: Short unique identifier (e.g., "DataAnalyst") +- role: Their professional identity (e.g., "Senior Data Analyst") +- goal: What they aim to achieve (e.g., "Analyze sales trends and generate reports") +- instructions: How they should behave or operate +- personality: Traits like "curious", "cautious", "aggressive" +- skills: List of competencies (e.g., ["Python", "SQL", "Pandas"]) +- expertise_keywords: Domains or topics (e.g., ["statistics", "time series"]) +- depends_on: Other agents whose output this agent depends on +- has_dependants: Agents who depend on this agent's output +- tools: If mentioned, map to known tool names (e.g., web_search, python_executor) +Respond in strict JSON format: +{ + "name": "...", + "role": "...", + "goal": "...", + "instructions": "...", + "personality": "...", + "skills": [...], + "expertise_keywords": [...], + "depends_on": [...], + "has_dependants": [...], + "tool_names": [...] // We'll map these later +} +Only return the raw JSON object. No extra text. +""" + self.registry = registry or {} + self.creation_history = [] + + async def generate_agent_from_query(self, query: str) -> Optional[AgentSpec]: + """ + Parse a natural language query and generate a new agent spec. + Uses the simpler prompt. + """ + if not self.llm: + console.log("[bold red]AgentGenerator: No LLM provided for generation.[/bold red]") + return None + + try: + console.log(f"[bold blue]Generating agent from query:[/bold blue] {query}") + # Use LLM to extract structured data + messages = [ + {"role": "system", "content": self.generation_prompt}, + {"role": "user", "content": query} + ] + # Use the chat method which handles the queue/callback internally + response_text = await self.llm.chat(messages) + console.log(f"[bold yellow]LLM Raw Response:[/bold yellow] {response_text}") + + if not response_text or "Error" in response_text or "timeout" in response_text.lower(): + console.log(f"[bold red]LLM returned an error or timed out:[/bold red] {response_text}") + return None + + # Clean up response if needed + json_start = response_text.find("{") + json_end = response_text.rfind("}") + 1 + if json_start == -1 or json_end == 0: + console.log(f"[bold red]Could not find JSON in LLM response:[/bold red] {response_text}") + return None + clean_json_str = response_text[json_start:json_end] + console.log(f"[bold cyan]Extracted JSON String:[/bold cyan] {clean_json_str}") + config = json.loads(clean_json_str) + + # Map tool names to actual callables (if any) - Define available_tools if needed + # For now, assume tool_map comes from config or is empty + tool_map = {} # Or populate based on config.get("tool_names") if you have a mapping + # Create the agent spec - Use values from parsed JSON or defaults + spec = AgentSpec( + name=config.get("name", "UnnamedAgent"), + role=config.get("role", "General Assistant"), + goal=config.get("goal", ""), + instructions=config.get("instructions", ""), + personality=config.get("personality", ""), + skills=config.get("skills", []), + expertise_keywords=config.get("expertise_keywords", []), + depends_on=config.get("depends_on", []), + has_dependants=config.get("has_dependants", []), + tool_map=tool_map, # Use the potentially populated tool_map + system_message=config.get("system_message", "") # Add system_message if present + ) + # Register it + self.registry[spec.name] = spec + self.creation_history.append({ + "name": spec.name, + "role": spec.role, + "timestamp": time(), + "query": query + }) + console.log(f"[bold green]✅ Successfully created agent:[/bold green] {spec.display_name}") + return spec + except json.JSONDecodeError as je: + console.log(f"[bold red]JSON Decode Error:[/bold red] {je}") + console.log(f"Problematic JSON string was: {clean_json_str}") + return None + except Exception as e: + console.log(f"[bold red]Failed to generate agent:[/bold red] {e}") + traceback.print_exc() + return None + + # Keep the original generate_agent_spec as a backup or alternative + async def generate_agent_spec(self, query: str) -> Optional[AgentSpec]: + """ + Generates an AgentSpec based on the provided natural language query. + Uses the more detailed schema-based prompt. + Args: + query: A natural language description of the desired agent. + Returns: + AgentSpec: The generated agent specification, or None if generation failed. + """ + if not self.llm: + console.log("[bold red]AgentGenerator: No LLM provided for generation.[/bold red]") + return None + + system_prompt = f""" + You are an expert agent designer. Your task is to parse a natural language query into a structured JSON specification for a new AI agent. + The JSON output must strictly adhere to the following schema: + {json.dumps(self.json_schema, indent=2)} + Ensure that: + - The 'name' is unique and descriptive. + - The 'role' clearly defines the agent's function. + - The 'goal' is specific and achievable. + - The 'instructions' provide clear guidance on how the agent should behave or what it should do. + - The 'personality' reflects how the agent should interact (e.g., formal, friendly, analytical). + - The 'skills' list relevant technical or conceptual abilities. + - The 'expertise_keywords' list specific domains or knowledge areas. + - The 'depends_on' and 'has_dependants' fields define relationships within a team or system if applicable. + Respond ONLY with the valid JSON object. + """ + + messages = [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": f"Generate an agent specification based on this query: {query}"} + ] + + try: + response_text = await self.llm.chat(messages) + console.log(f"[bold yellow]LLM Raw Response (Schema-based):[/bold yellow] {response_text}") + + if not response_text or "Error" in response_text or "timeout" in response_text.lower(): + console.log(f"[bold red]LLM returned an error or timed out:[/bold red] {response_text}") + return None + + # Attempt to parse the JSON response + # Find the JSON part if it's wrapped in markdown or other text + start_idx = response_text.find('{') + end_idx = response_text.rfind('}') + if start_idx != -1 and end_idx != -1 and start_idx < end_idx: + json_str = response_text[start_idx:end_idx+1] + else: + console.log(f"[bold red]AgentGenerator: Could not find JSON in LLM response:[/bold red] {response_text}") + return None + + console.log(f"[bold cyan]Extracted JSON String (Schema-based):[/bold cyan] {json_str}") + parsed_data = json.loads(json_str) + + # Validate required keys + if 'name' not in parsed_data or 'role' not in parsed_data or 'goal' not in parsed_data: + console.log(f"[bold red]AgentGenerator: LLM response missing required keys:[/bold red] {parsed_data}") + return None + + # Create and return the AgentSpec + spec = AgentSpec( + name=parsed_data.get('name'), + role=parsed_data.get('role'), + goal=parsed_data.get('goal'), + instructions=parsed_data.get('instructions', ''), + personality=parsed_data.get('personality', ''), + skills=parsed_data.get('skills', []), + expertise_keywords=parsed_data.get('expertise_keywords', []), + depends_on=parsed_data.get('depends_on', []), + has_dependants=parsed_data.get('has_dependants', []) + ) + + self.log.record("info", f"Generated AgentSpec: {spec.name}", spec=spec.name) + # Register it + self.registry[spec.name] = spec + self.creation_history.append({ + "name": spec.name, + "role": spec.role, + "timestamp": time(), + "query": query + }) + console.log(f"[bold green]✅ Successfully created agent (Schema-based):[/bold green] {spec.display_name}") + return spec + + except json.JSONDecodeError as e: + console.log(f"[bold red]AgentGenerator: Error parsing JSON from LLM response: {e}[/bold red]") + console.log(f"Response was: {response_text}") + return None + except Exception as e: + console.log(f"[bold red]AgentGenerator: Error generating agent spec: {e}[/bold red]") + traceback.print_exc() + return None + async def on_user_message(self, msg: Message) -> None: + self.message_history.append(msg) + + # Don't respond to our own messages + if msg.sender == self.spec.name: + return + + # Enhanced orchestration handling + if (msg.sender == "Session Manager" and + msg.meta.get("type") == "orchestration" and + self.llm is not None): + console.log(f"[bold magenta]AgentGenerator received message:[/bold magenta] {msg.content}") + # Use the preferred generation method here, e.g., generate_agent_from_query + generated_spec = await self.generate_agent_from_query(msg.content) + if generated_spec: + # For now, just log the spec. In a real system, you might add it to a registry + # or broadcast it to the room. + console.log(f"[bold green]Generated Agent Spec:[/bold green] {generated_spec}") + if self.room: + await self.room.send_direct( + sender=self.username, + target=msg.sender, + content=f"Successfully generated agent: {generated_spec.display_name}\nGoal: {generated_spec.goal}", + meta={"generated_spec": generated_spec.name} + ) + else: + console.log(f"[bold red]Failed to generate agent from query:[/bold red] {msg.content}") + if self.room: + await self.room.send_direct( + sender=self.username, + target=msg.sender, + content="Failed to generate agent from the provided description.", + meta={"error": "generation_failed"} + ) + # Check if this agent is involved in this orchestration + method = msg.meta.get('method') + if method == 'sequential' and self.spec.name in msg.meta.get('order', []): + await self._handle_sequential_orchestration(msg) + elif method == 'hierarchical' and (self.spec.name == msg.meta.get('supervisor') or + self.spec.name in msg.meta.get('team', [])): + await self._handle_hierarchical_orchestration(msg) + elif method == 'parallel' and self.spec.name in msg.meta.get('sub_tasks', {}): + await self._handle_parallel_orchestration(msg) + elif method == 'iterative' and self.spec.name in msg.meta.get('sequence', []): + await self._handle_iterative_orchestration(msg) + # Original logic for human messages + elif msg.sender == "human_user" and self.llm is not None: + try: + messages = [ + {"role": "system", "content": self.spec.instructions}, + {"role": "user", "content": f"Message from {msg.sender}: {msg.content}"} + ] + + reply = await self.llm.chat(messages) + + if self.room is not None: + if msg.is_public: + await self.room.send_public(sender=self.spec.name, content=reply, meta={"reply_to": msg.id}) + else: + await self.room.send_direct(sender=self.spec.name, target=msg.sender, content=reply, meta={"reply_to": msg.id}) + + + except Exception as e: + self.log.record("error", "agent_on_user_message_error", agent=self.spec.name, error=str(e)) + + async def on_user_message(self, msg: Message) -> None: + """ + Handles incoming messages. If the message is directed at this agent + and contains a request to generate an agent, it processes the request. + """ + # Example: Only respond to direct messages or public messages mentioning the agent + if msg.target == self.username or self.username in msg.content: + console.log(f"[bold magenta]AgentGenerator received message:[/bold magenta] {msg.content}") + # Use the preferred generation method here, e.g., generate_agent_from_query + generated_spec = await self.generate_agent_from_query(msg.content) + # Alternatively, use the other method: generated_spec = await self.generate_agent_spec(msg.content) + if generated_spec: + # For now, just log the spec. In a real system, you might add it to a registry + # or broadcast it to the room. + console.log(f"[bold green]Generated Agent Spec:[/bold green] {generated_spec}") + if self.room: + await self.room.send_direct( + sender=self.username, + target=msg.sender, + content=f"Successfully generated agent: {generated_spec.display_name}\nGoal: {generated_spec.goal}", + meta={"generated_spec": generated_spec.name} + ) + else: + console.log(f"[bold red]Failed to generate agent from query:[/bold red] {msg.content}") + if self.room: + await self.room.send_direct( + sender=self.username, + target=msg.sender, + content="Failed to generate agent from the provided description.", + meta={"error": "generation_failed"} + ) +############################################################# +#===== Special-Agent-Clients =====# +class MemoryAgent(AgentService): + """ + Specialized agent for managing long-term memory and context across conversations. + Stores and retrieves relevant information from past interactions. + """ + + def __init__(self, spec: AgentSpec, llm: Optional[LLMAgent] = None, + manage_room: Optional[ChatRoom] = None, telemetry: Optional[CentralLog] = None, + max_memories: int = 1000): + super().__init__(spec, llm, manage_room, telemetry) + + self.max_memories = max_memories + self.memories: List[Dict] = [] + self.memory_index: Dict[str, List[int]] = {} # keyword -> memory indices + async def on_user_message(self, msg: Message) -> None: + await super().on_user_message(msg) + if msg.sender == self.spec.name: + return # Ignore own messages + + # Handle public messages (e.g., listen for artifacts, events) + if msg.is_public: + await self._process_public_message(msg) + + # Handle direct commands + elif msg.target == self.spec.name: + await self._handle_direct_command(msg) + + async def _process_public_message(self, msg: Message): + """Optional: react to public content (e.g., detect URLs, tags)""" + pass + + async def _handle_direct_command(self, msg: Message): + """Parse command and respond via direct message""" + content = msg.content.strip() + async def store_memory(self, content: str, context: Dict[str, Any], + importance: float = 0.5) -> str: + """Store a new memory""" + memory_id = str(uuid.uuid4()) + + memory = { + "id": memory_id, + "timestamp": datetime.now().isoformat(), + "content": content, + "context": context, + "importance": importance, + "access_count": 0 + } + + self.memories.append(memory) + + # Prune old memories if limit exceeded + if len(self.memories) > self.max_memories: + self._prune_memories() + + # Index memory for retrieval + await self._index_memory(memory, len(self.memories) - 1) + + return memory_id + + async def retrieve_memories(self, query: str, top_k: int = 5) -> List[Dict]: + """Retrieve relevant memories based on query""" + # Simple keyword-based retrieval (can be enhanced with embeddings) + keywords = set(query.lower().split()) + relevant_indices = set() + + for keyword in keywords: + if keyword in self.memory_index: + relevant_indices.update(self.memory_index[keyword]) + + relevant_memories = [self.memories[i] for i in relevant_indices if i < len(self.memories)] + + # Sort by importance and recency + sorted_memories = sorted( + relevant_memories, + key=lambda m: (m["importance"], m["timestamp"]), + reverse=True + ) + + # Update access counts + for memory in sorted_memories[:top_k]: + memory["access_count"] += 1 + + return sorted_memories[:top_k] + + async def _index_memory(self, memory: Dict, index: int) -> None: + """Index memory for efficient retrieval""" + content_words = set(memory["content"].lower().split()) + + for word in content_words: + if word not in self.memory_index: + self.memory_index[word] = [] + self.memory_index[word].append(index) + + def _prune_memories(self) -> None: + """Remove least important/accessed memories""" + # Sort by importance and access count + self.memories.sort( + key=lambda m: (m["importance"] * 0.7 + m["access_count"] * 0.3), + reverse=True + ) + + # Keep top memories + removed_count = len(self.memories) - self.max_memories + self.memories = self.memories[:self.max_memories] + + # Rebuild index + self.memory_index.clear() + for i, memory in enumerate(self.memories): + asyncio.create_task(self._index_memory(memory, i)) + + self.log.record("info", f"Pruned {removed_count} memories", agent=self.spec.name) +class ArtifactsAgent(AgentService): + """ + Utility for session artifact management. + + PUBLIC MESSAGES: Captures artifacts from messages and stores them + DIRECT MESSAGES: Allows recall or execution of stored artifacts + + """ + def __init__(self, spec: AgentSpec, llm: Optional[LLMAgent] = None, + manage_room: Optional[ChatRoom] = None, telemetry: Optional[CentralLog] = None, + storage_dir: str = "ARTIFACTS",chat_enabled: bool = False): + # Fix: Call parent constructor properly + super().__init__(spec, llm, manage_room, telemetry) + + self.services = { + 'add_artifact': self.add_artifact, + 'get_specific_artifact': self.get_specific_artifact, + 'execute_artifact': self.execute_artifact, + 'delete_artifact': self.delete_artifact, + 'save_artifact_to_disk': self._save_artifact_to_disk, + 'execute_code': self._execute_code, + 'get_all_artifacts': self.get_artifacts_code + } + + self.UserIntents = { + "add_artifact": ["add", "store", "save code", "capture"], + "get_specific_artifact": ["get", "show", "view", "display", "open"], + "execute_artifact": ["execute artifact id:", "run artifact id:", "launch artifact id:"], + "delete_artifact": ["remove artifact id:"], + "get_all_artifacts": ["list all artifacts", "list artifacts", "show all artifacts", "what code do we have stored"], + "save_artifact_to_disk": ["backup artifact id:", "export artifact id:", "save artifact id:"], + "execute_code": ["execute python:", "execute javascript:"] + } + + self.artifacts: List[Dict] = [] + self.storage_dir = Path(storage_dir) + self._ensure_directory_exists(self.storage_dir) + self.supported_types = ["code", "python", "javascript", "json", "html", "markdown"] + self.safe_execution = True + self.execution_timeout = 30 + + @staticmethod + def create_artifacts_agent(name: str = "ArtifactsManager", + storage_dir: str = "LCARS_OS/artifacts", + safe_execution: bool = True, + llm: Optional[LLMAgent] = None, + room: Optional[ChatRoom] = None) -> ArtifactsAgent: + """Factory function to create an ArtifactsAgent""" + spec = AgentSpec( + name=name, + role="Code Artifact Manager", + goal="Capture, store, and execute code artifacts from conversations", + instructions="""You manage code artifacts from the conversation. Extract code blocks, + store them safely, and execute them on request. Always confirm before execution.""", + personality="Organized and cautious", + skills=["code extraction", "artifact management", "safe execution"], + expertise_keywords=["artifacts", "code", "execution", "storage"] + ) -with gr.Blocks() as demo: - with gr.Sidebar(): - gr.LoginButton() - chatbot.render() + agent = ArtifactsAgent(spec, llm=llm, manage_room=room, storage_dir=storage_dir) + agent.safe_execution = safe_execution + return agent + # ======== MESSAGE HANDLING ======== + async def on_user_message(self, msg: Message) -> None: + """Handle artifact-related messages - COMPLETE VERSION""" + await super().on_user_message(msg) + # Don't respond to our own messages + if msg.sender == self.spec.name: + return + # If chat is disabled, only handle direct service commands + if not self.chat_enabled: + if msg.target == self.spec.name: + await self._handle_direct_command(msg) + # Still process public messages for monitoring (e.g., artifact extraction) + elif msg.is_public: + await self._process_public_message(msg) + return + # Handle public messages - extract artifacts automatically + if msg.is_public: + await self._extract_artifacts_from_message(msg) + + # Also check if this is a command directed at us in public + if self._is_command_for_me(msg.content): + await self._handle_public_command(msg) + + # Handle direct commands + elif msg.target == self.spec.name: + await self._handle_direct_command(msg) + + def _is_command_for_me(self, content: str) -> bool: + """Check if a public message contains commands for this agent""" + content_lower = content.lower() + return (self.spec.name.lower() in content_lower or + any(keyword in content_lower for keyword in ["artifact", "code", "execute"])) + + async def _handle_public_command(self, msg: Message) -> None: + """Handle commands mentioned in public chat""" + try: + content = msg.content.lower().strip() + intent = self.classify(content) + + if intent: + artifact_id = self.extract_artifact_id(content) + response = await self._process_intent(intent, artifact_id, content, msg) + + if response: + await self.room.send_public( + sender=self.spec.name, + content=response, + meta={"type": "artifact_response", "original_message": msg.id} + ) + + except Exception as e: + self.log.record("error", f"Public command error: {e}", agent=self.spec.name) + async def _process_public_message(self, msg: Message): + """Optional: react to public content (e.g., detect URLs, tags)""" + # Extract Artifacts + # publish artifact ID for artifact + pass + + async def _handle_direct_command(self, msg: Message) -> None: + content = msg.content.lower().strip() + intent = self.classify(content) + + if intent is None: + await self.room.send_direct( + sender=self.spec.name, + target=msg.sender, + content="Sorry, I didn't recognize that command." + ) + return + + artifact_id = self.extract_artifact_id(content) + + try: + if intent == "execute_artifact" and artifact_id is not None: + result = await self._safe_execute_artifact(artifact_id) + response = f"Execution result:\n```\n{result}\n```" + + elif intent == "get_specific_artifact" and artifact_id is not None: + artifact = self.get_specific_artifact(artifact_id) + response = self._format_artifact(artifact) if artifact else "Artifact not found." + + elif intent == "delete_artifact" and artifact_id is not None: + success = self.delete_artifact(artifact_id) + response = f"Artifact #{artifact_id} {'deleted' if success else 'not found'}." + + elif intent == "get_all_artifacts": + response = self._format_artifact_list() + + elif intent == "save_artifact_to_disk" and artifact_id is not None: + artifact = self.get_specific_artifact(artifact_id) + if artifact: + self._save_artifact_to_disk(artifact) + response = f"Artifact #{artifact_id} saved." + else: + response = f"Artifact #{artifact_id} not found." + + elif intent == "add_artifact": + code = self.extract_code_content(msg.content) + if code: + artifact = self.add_artifact(code, "python", "User provided") + response = f"Artifact added as #{artifact['id']}." + else: + response = "Could not extract artifact content." + + else: + response = "Invalid or incomplete command." + + await self.room.send_direct( + sender=self.spec.name, + target=msg.sender, + content=response + ) + + except Exception as e: + error_response = f"Error handling artifact command: {str(e)}" + self.log.record("error", error_response, agent=self.spec.name) + await self.room.send_direct( + sender=self.spec.name, + target=msg.sender, + content=error_response + ) + + # ======== INTENT MANAGEMENT ======== + def classify(self, query: str) -> Optional[str]: + query = query.lower() + for intent, keywords in self.UserIntents.items(): + if any(phrase in query for phrase in keywords): + return intent + return None + async def _process_intent(self, intent: str, artifact_id: Optional[int], content: str, msg: Message) -> str: + """Process identified intent and return response""" + try: + if intent == "execute_artifact" and artifact_id is not None: + result = await self._safe_execute_artifact(artifact_id) + return f"Execution result for artifact #{artifact_id}:\n```\n{result}\n```" + + elif intent == "get_specific_artifact" and artifact_id is not None: + artifact = self.get_specific_artifact(artifact_id) + return self._format_artifact(artifact) if artifact else f"Artifact #{artifact_id} not found." + + elif intent == "delete_artifact" and artifact_id is not None: + success = self.delete_artifact(artifact_id) + return f"Artifact #{artifact_id} {'deleted' if success else 'not found'}." + + elif intent == "get_all_artifacts": + return self._format_artifact_list() + + elif intent == "save_artifact_to_disk" and artifact_id is not None: + artifact = self.get_specific_artifact(artifact_id) + if artifact: + self._save_artifact_to_disk(artifact) + return f"Artifact #{artifact_id} saved to disk." + return f"Artifact #{artifact_id} not found." + + elif intent == "add_artifact": + code = self.extract_code_content(msg.content) + if code: + # Try to detect language + language = self._detect_language(code) + artifact = self.add_artifact(code, language, f"From {msg.sender}") + return f"Artifact captured as #{artifact['id']} ({language})" + return "Could not extract code from message. Use code blocks: \\`\\`\\`python\\ncode\\n\\`\\`\\`" + + elif intent == "execute_code": + code = self.extract_code_content(msg.content) + if code: + language = self._detect_language(code) + result = await self._execute_code(code, language) + return f"Execution result:\n```\n{result}\n```" + return "No executable code found in message." + + return "Command processed but no specific action taken." + + except Exception as e: + return f"Error processing command: {str(e)}" + + + @staticmethod + def extract_artifact_id(content: str) -> Optional[int]: + """Extract artifact ID from command content""" + import re + patterns = [ + r"artifact\s+id:\s*#?(\d+)", + r"artifact\s+#?(\d+)", + r"#(\d+)", + r"id\s+(\d+)" + ] + + for pattern in patterns: + match = re.search(pattern, content.lower()) + if match: + try: + return int(match.group(1)) + except ValueError: + continue + return None + @staticmethod + def extract_code_content(content: str) -> Optional[str]: + """Extract code from message content""" + import re + # Look for code blocks + code_pattern = r"```(?:\w+)?\n(.*?)```" + matches = re.findall(code_pattern, content, re.DOTALL) + + if matches: + return matches[0].strip() + + # If no code blocks, check if entire message is code-like + if any(keyword in content.lower() for keyword in ["def ", "class ", "function ", "import ", "console."]): + return content.strip() + + return None + # ======== ARTIFACT MANAGEMENT ======== + def _parse_artifacts(self, content: str) -> List[Dict]: + """Parse content for code blocks and artifacts""" + import re + artifacts = [] + + # Match code blocks with language specifiers + pattern = r"```(\w+)?\n(.*?)```" + matches = re.finditer(pattern, content, re.DOTALL) + + for match in matches: + language = match.group(1) or "text" + code = match.group(2).strip() + + if language in self.supported_types and code: + artifacts.append({ + "content": code, + "language": language, + "description": f"Code block in {language}" + }) + + return artifacts + + def _detect_language(self, code: str) -> str: + """Simple language detection""" + code_lower = code.lower() + if any(keyword in code_lower for keyword in ["def ", "import ", "class ", "print("]): + return "python" + elif any(keyword in code_lower for keyword in ["function ", "console.", "const ", "let "]): + return "javascript" + elif any(keyword in code_lower for keyword in [" None: + """Extract and store artifacts from public messages - ENHANCED""" + # Skip system messages and our own messages + if msg.sender == "system" or msg.sender == self.spec.name: + return + + artifacts_found = self._parse_artifacts(msg.content) + + if artifacts_found: + for artifact_data in artifacts_found: + artifact = self.add_artifact( + code=artifact_data["content"], + language=artifact_data["language"], + description=f"From {msg.sender}: {artifact_data.get('description', 'Auto-captured from chat')}" + ) + + self.log.record("info", f"Artifact captured: #{artifact['id']}", + agent=self.spec.name, source=msg.sender) + + # Announce captured artifacts + if self.room and artifacts_found: + artifact_ids = [a['id'] for a in artifacts_found] + await self.room.send_public( + sender=self.spec.name, + content=f"📦 Auto-captured {len(artifacts_found)} artifact(s): IDs {artifact_ids}", + meta={ + "type": "artifact_capture", + "artifact_ids": artifact_ids, + "source_message": msg.id + } + ) + + + + def add_artifact(self, code: str, language: str, description: str = "") -> Dict: + """Add a new code artifact""" + artifact = { + "id": len(self.artifacts), + "uuid": str(uuid.uuid4()), + "code": code, + "language": language, + "description": description, + "created_at": datetime.now().isoformat(), + "executed": False, + "execution_result": "", + "execution_count": 0, + "last_executed": None + } + self.artifacts.append(artifact) + + # Optionally save to disk + self._save_artifact_to_disk(artifact) + + return artifact + + def get_specific_artifact(self, artifact_id: int) -> Optional[Dict]: + """Get specific artifact by ID""" + try: + if 0 <= artifact_id < len(self.artifacts): + return self.artifacts[artifact_id] + return None + except (IndexError, TypeError): + return None + + def delete_artifact(self, artifact_id: int) -> bool: + """Delete an artifact by ID""" + try: + if 0 <= artifact_id < len(self.artifacts): + artifact = self.artifacts[artifact_id] + + # Remove from disk if exists + self._delete_artifact_from_disk(artifact) + + # Mark as deleted (don't actually remove to preserve IDs) + artifact["deleted"] = True + artifact["deleted_at"] = datetime.now().isoformat() + + self.log.record("info", f"Artifact deleted: #{artifact_id}", agent=self.spec.name) + return True + return False + except Exception as e: + self.log.record("error", f"Failed to delete artifact: {e}", agent=self.spec.name) + return False + + def store_artifacts(self, parsed: Dict[str, List[str]]) -> None: + """Store multiple artifacts from parsed content""" + for tag in self.supported_types: + if tag in parsed: + for i, content in enumerate(parsed[tag]): + self.add_artifact( + code=content, + language=tag, + description=f"Bulk import {tag} #{i}" + ) + + # ======== EXECUTION ======== + + async def _safe_execute_artifact(self, artifact_id: int) -> str: + """Safely execute an artifact with timeout and error handling""" + artifact = self.get_specific_artifact(artifact_id) + + if not artifact: + return f"Artifact #{artifact_id} not found" + + if artifact.get("deleted"): + return f"Artifact #{artifact_id} has been deleted" + + try: + # Execute with timeout + result = await asyncio.wait_for( + self._execute_code(artifact['code'], artifact['language']), + timeout=self.execution_timeout + ) + + # Update artifact metadata + artifact['executed'] = True + artifact['execution_result'] = result + artifact['execution_count'] = artifact.get('execution_count', 0) + 1 + artifact['last_executed'] = datetime.now().isoformat() + + self.log.record("info", f"Artifact executed: #{artifact_id}", agent=self.spec.name) + + return result + + except asyncio.TimeoutError: + error_msg = f"Execution timeout ({self.execution_timeout}s)" + self.log.record("error", error_msg, agent=self.spec.name, artifact_id=artifact_id) + return error_msg + except Exception as e: + error_msg = f"Execution error: {str(e)}" + self.log.record("error", error_msg, agent=self.spec.name, artifact_id=artifact_id) + return error_msg + + async def _execute_code(self, code: str, language: str) -> str: + """Execute code based on language type""" + if language in ["python", "py"]: + return await self._execute_python(code) + elif language in ["javascript", "js"]: + return await self._execute_javascript(code) + else: + return f"Execution not supported for language: {language}" + + async def _execute_python(self, code: str) -> str: + """Execute Python code in isolated environment""" + import sys + from io import StringIO + + stdout_capture = StringIO() + stderr_capture = StringIO() + + # Redirect output + old_stdout = sys.stdout + old_stderr = sys.stderr + sys.stdout = stdout_capture + sys.stderr = stderr_capture + + try: + # Create restricted execution environment + safe_globals = { + "__builtins__": { + "print": print, + "len": len, + "range": range, + "str": str, + "int": int, + "float": float, + "list": list, + "dict": dict, + "set": set, + "tuple": tuple, + "abs": abs, + "max": max, + "min": min, + "sum": sum, + "enumerate": enumerate, + "zip": zip, + "map": map, + "filter": filter, + } + } + + if not self.safe_execution: + # Allow more builtins if safe mode is off + safe_globals["__builtins__"] = __builtins__ + + # Execute code + exec(code, safe_globals) + + # Capture output + output = stdout_capture.getvalue() + error_output = stderr_capture.getvalue() + + if error_output: + return f"Errors:\n{error_output}" + elif output: + return output + else: + return "Code executed successfully (no output)" + + except Exception as e: + return f"Execution error: {str(e)}\n{traceback.format_exc()}" + + finally: + # Restore output + sys.stdout = old_stdout + sys.stderr = old_stderr + + async def _execute_javascript(self, code: str) -> str: + """Execute JavaScript code using Node.js""" + import subprocess + + try: + # Create temporary file + temp_file = self.storage_dir / f"temp_js_{uuid.uuid4().hex}.js" + + with open(temp_file, 'w') as f: + f.write(code) + + # Execute with Node.js + result = subprocess.run( + ['node', str(temp_file)], + capture_output=True, + text=True, + timeout=self.execution_timeout + ) + + # Clean up + temp_file.unlink() + + if result.returncode == 0: + return result.stdout or "Code executed successfully" + else: + return f"Error:\n{result.stderr}" + + except subprocess.TimeoutExpired: + return f"Execution timeout ({self.execution_timeout}s)" + except FileNotFoundError: + return "Node.js not found. JavaScript execution requires Node.js to be installed." + except Exception as e: + return f"Execution error: {str(e)}" + + def execute_artifact(self, artifact_id_str: str) -> str: + """Synchronous wrapper for artifact execution (deprecated, use _safe_execute_artifact)""" + try: + artifact_id = int(artifact_id_str) + loop = asyncio.get_event_loop() + return loop.run_until_complete(self._safe_execute_artifact(artifact_id)) + except ValueError: + return "Invalid artifact ID" + + # ======== FORMATTING ======== + + def _format_artifact_list(self) -> str: + """Format list of all artifacts""" + if not self.artifacts: + return "No artifacts stored yet." + + lines = ["📦 **Stored Artifacts:**\n"] + + for artifact in self.artifacts: + if artifact.get("deleted"): + continue + + status_icon = "✅" if artifact.get("executed") else "📝" + exec_count = artifact.get("execution_count", 0) + + lines.append( + f"{status_icon} **#{artifact['id']}** [{artifact['language']}] " + f"(executed {exec_count}x)\n" + f" {artifact.get('description', 'No description')[:60]}\n" + ) + + return "\n".join(lines) + + def _format_artifact(self, artifact: Dict) -> str: + """Format single artifact for display""" + status_icon = "✅" if artifact.get("executed") else "📝" + + formatted = f"{status_icon} **Artifact #{artifact['id']}**\n\n" + formatted += f"**Language:** {artifact['language']}\n" + formatted += f"**Description:** {artifact.get('description', 'N/A')}\n" + formatted += f"**Created:** {artifact.get('created_at', 'N/A')}\n" + formatted += f"**Executions:** {artifact.get('execution_count', 0)}\n\n" + formatted += f"**Code:**\n```{artifact['language']}\n{artifact['code']}\n```\n" + + if artifact.get('execution_result'): + formatted += f"\n**Last Result:**\n```\n{artifact['execution_result']}\n```" + + return formatted + + def get_artifacts_html(self) -> str: + """Format artifacts as HTML for display""" + if not self.artifacts: + return "

No code artifacts generated yet.

" + + html = "
" + + for artifact in self.artifacts: + if artifact.get("deleted"): + continue + + status_icon = "✅" if artifact.get("executed", False) else "📝" + exec_count = artifact.get("execution_count", 0) + + html += f""" +
+
+ {status_icon} Artifact #{artifact['id']} + [{artifact['language']}] + (executed {exec_count}x) +
+
+ {artifact.get('description', 'No description')} +
+
{self._escape_html(artifact.get('code', ''))}
+ """ + + if artifact.get('execution_result'): + html += f""" +
+ Output:
+
{self._escape_html(artifact.get('execution_result', ''))}
+
+ """ + + html += "
" + + html += "
" + return html + + def _escape_html(self, text: str) -> str: + """Escape HTML special characters""" + return (text + .replace("&", "&") + .replace("<", "<") + .replace(">", ">") + .replace('"', """) + .replace("'", "'")) + + def get_artifacts_code(self) -> str: + """Get artifacts as formatted code string""" + if not self.artifacts: + return "# No artifacts stored" + + output = [] + for artifact in self.artifacts: + if artifact.get("deleted"): + continue + + output.append(f"# Artifact #{artifact['id']} - {artifact['language']}") + output.append(f"# {artifact.get('description', 'No description')}") + output.append(artifact['code']) + output.append("\n" + "="*60 + "\n") + + return "\n".join(output) + + # ======== PERSISTENCE ======== + + def _save_artifact_to_disk(self, artifact: Dict) -> None: + """Save artifact to disk""" + try: + filename = f"artifact_{artifact['id']}_{artifact['uuid']}.{artifact['language']}" + filepath = self.storage_dir / filename + + with open(filepath, 'w', encoding='utf-8') as f: + f.write(artifact['code']) + + artifact['file_path'] = str(filepath) + + except Exception as e: + self.log.record("error", f"Failed to save artifact to disk: {e}", + agent=self.spec.name, artifact_id=artifact['id']) + + def _delete_artifact_from_disk(self, artifact: Dict) -> None: + """Delete artifact file from disk""" + try: + filepath = artifact.get('file_path') + if filepath and Path(filepath).exists(): + Path(filepath).unlink() + except Exception as e: + self.log.record("error", f"Failed to delete artifact file: {e}", + agent=self.spec.name) + + def save_all_artifacts(self) -> str: + """Save all artifacts to a single JSON file""" + try: + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + filename = f"artifacts_backup_{timestamp}.json" + filepath = self.storage_dir / filename + + with open(filepath, 'w', encoding='utf-8') as f: + json.dump(self.artifacts, f, indent=2) + + self.log.record("info", f"All artifacts saved to {filename}", agent=self.spec.name) + return str(filepath) + + except Exception as e: + self.log.record("error", f"Failed to save artifacts: {e}", agent=self.spec.name) + return "" + + def load_artifacts_from_file(self, filepath: str) -> bool: + """Load artifacts from JSON file""" + try: + with open(filepath, 'r', encoding='utf-8') as f: + loaded_artifacts = json.load(f) + + self.artifacts.extend(loaded_artifacts) + + self.log.record("info", f"Loaded {len(loaded_artifacts)} artifacts from {filepath}", + agent=self.spec.name) + return True + + except Exception as e: + self.log.record("error", f"Failed to load artifacts: {e}", agent=self.spec.name) + return False + + def _ensure_directory_exists(self, path: Path) -> None: + """Ensure directory exists""" + path.mkdir(parents=True, exist_ok=True) + +############################################################# +_SERVICES_TEAM = [ + AgentSpec( + name="ArtifactManager", + role="Artifact SERVICE", + goal="Capture, store, and execute code artifacts", + instructions="""You manage code artifacts from conversations. + Extract code blocks, store them safely, and execute them on request. + Only respond when directly addressed or when code artifacts are detected.""", + personality="Organized and cautious", + skills=["code extraction", "artifact management", "safe execution"], + expertise_keywords=["artifacts", "code", "execution", "storage"] + ),AgentSpec( + name="MemoryManager", + role="Memory SERVICE", + goal="Track and recall important conversation points", + instructions="You track important facts and decisions from conversations.", + personality="Attentive and organized", + skills=["memory tracking", "information recall", "context management"], + expertise_keywords=["memory", "recall", "context", "history"] + ) + +] + +def CreateAgents(): + + PREDEFINED_SPECS = {} + new_specs = {f"{agnt.name}": agnt for agnt in DEV_TEAM_SPECS} + PREDEFINED_SPECS.update(new_specs) # Add to existing dict + return PREDEFINED_SPECS +def CreateServices(): + + _PREDEFINED_SERVICES = {} + new_specs = {f"{agnt.name}": agnt for agnt in _SERVICES_TEAM} + _PREDEFINED_SERVICES.update(new_specs) # Add to existing dict + return _PREDEFINED_SERVICES +PREDEFINED_SERVICES = CreateServices() +PREDEFINED_SPECS = CreateAgents() + +class AgentManager: + """Manages available and connected agents""" + def __init__(self): + self.connected_services = [] + self.available_services = list(PREDEFINED_SERVICES.keys()) + self.available_agents = list(PREDEFINED_SPECS.keys()) + self.connected_agents = [] + # self.analyzer = RoleNetworkAnalyzer( agnt for agnt in self.connected_agents) + # FIX: Initialize analyzer with empty list initially + self.analyzer = RoleNetworkAnalyzer([]) # Fixed: was passing generator + + def CreateAgents(self): + new_specs = {f"{agnt.name}": agnt for agnt in DEV_TEAM_SPECS} + PREDEFINED_SPECS.update(new_specs) # Add to existing dict + return PREDEFINED_SPECS + def CreateServices(self): + new_specs = {f"{agnt.name}": agnt for agnt in _SERVICES_TEAM} + PREDEFINED_SPECS.update(new_specs) # Add to existing dict + return PREDEFINED_SPECS + + def get_available_agents(self): + """Get agents that can be added to the session""" + return [agent for agent in self.available_agents if agent not in self.connected_agents] + def get_available_services(self): + """Get services that can be added to the session""" + return [service for service in self.available_services if service not in self.connected_services] + + def get_connected_agents(self): + """Get agents currently in the session""" + return self.connected_agents.copy() + def get_connected_services(self): + """Get agents currently in the session""" + return self.connected_services.copy() + def add_service(self, service_name: str): + """Add an service to connected services""" + if service_name in self.available_services and service_name not in self.connected_services: + self.connected_services.append(service_name) + return True + return False + def remove_service(self, agent_name: str): + """Remove an agent from connected agents""" + if agent_name in self.connected_services: + self.connected_services.remove(agent_name) + return True + return False + def add_agent(self, agent_name: str): + """Add an agent to connected agents""" + if agent_name in self.available_agents and agent_name not in self.connected_agents: + self.connected_agents.append(agent_name) + return True + return False + + def remove_agent(self, agent_name: str): + """Remove an agent from connected agents""" + if agent_name in self.connected_agents: + self.connected_agents.remove(agent_name) + return True + return False + def get_team_for_role(self,role): + return self.analyzer.get_team_for_role(role) + def generate_all_team_candidates(self): + # # Generate team candidates + return self.analyzer.generate_all_team_candidates() + def get_dependants_team_for_role(self, role: str) -> TeamCandidate: + """NEW: Quick access: Get team candidate for a specific role's dependants subtree""" + return self.analyzer.get_agents_for_dependants_subtree(role) +def _create_interface(Theme ="LIGHT"): + import gradio as gr + import LCARS_CONFIG + config=LCARS_CONFIG.LCARS_CONFIG() + if Theme =="DARK": + CSS = config.custom_css + else: + CSS = config.LCARS_LIGHT_CSS + with gr.Blocks(title="L.C.A.R.S - (Borg Collective) -", theme="soft",css = CSS) as demo: + chat_room_state = gr.State(value=None) + log_state = gr.State(value=None) + agent_manager_state = gr.State(value=AgentManager()) + demo.head = """ + + """ + gr.HTML(f""" +
+ 🖖 L.C.A.R.S - Local Computer Advanced Reasoning System v3.0 (Borg Collective) + +
USS Enterprise • NCC-1701-D • Starfleet Command +
+ """) + + with gr.Tabs(): + with gr.Tab("Chat Room"): + + with gr.Row(): + with gr.Column(scale=2,show_progress = False): + gr.Markdown("### Room Controls") + init_room_btn = gr.Button("Initialize Chat Room", variant="primary", elem_classes="lcars-button") + room_status = gr.Textbox(label="Room Status", value="No active room", interactive=False, elem_classes="lcars-display") + gr.Markdown("### 📋 Agent Management") + with gr.Accordion(label="📝 Agent Details", open=False, elem_classes="lcars-accordion"): + + agent_details = gr.HTML(value="
Select an agent to view details
") + with gr.Accordion("Create Custom Agent", open=False, elem_classes="lcars-accordion"): + gr.Markdown("### 🛠️ Custom Agent Builder") + + # Add example button + load_example_btn = gr.Button("Load Example Agent", variant="secondary", size="sm", elem_classes="lcars-button-secondary") + + custom_agent_name = gr.Textbox( + label="Agent Name", + placeholder="e.g., Documentation Specialist", + elem_classes="lcars-input" + ) + custom_agent_role = gr.Textbox( + label="Agent Role", + placeholder="e.g., Technical Writer", + elem_classes="lcars-input" + ) + custom_agent_personality = gr.Textbox( + label="Personality", + placeholder="e.g., Detail-oriented and clear", + elem_classes="lcars-input" + ) + custom_agent_goal = gr.Textbox( + label="Goal", + placeholder="What this agent aims to do", + elem_classes="lcars-input" + ) + custom_agent_instructions = gr.Textbox( + label="Instructions", + placeholder="How the agent should behave", + lines=3, + elem_classes="lcars-input" + ) + custom_agent_skills = gr.Textbox( + label="Skills (comma-separated)", + placeholder="e.g., Writing, Documentation, Markdown", + elem_classes="lcars-input" + ) + create_custom_btn = gr.Button("Create Custom Agent", variant="secondary", elem_classes="lcars-button-create") + with gr.Accordion(label="Session Agents", open=True, elem_classes="lcars-accordion"): + gr.Markdown("**Available Agents**") + with gr.Row(): + available_agents_dropdown = gr.Dropdown( + choices=[], + label="Select Agent to Preview/Add", + interactive=True, + elem_classes="lcars-dropdown" + ) + with gr.Row(): + add_agent_btn = gr.Button("Add Agent to Session", variant="primary", size="sm", elem_classes="lcars-button-add") + with gr.Accordion(label="Session Services", open=True, elem_classes="lcars-accordion"): + gr.Markdown("**Available Services**") + with gr.Row(): + available_Services_dropdown = gr.Dropdown( + choices=[], + label="Select Service to Preview/Add", + interactive=True, + elem_classes="lcars-dropdown" + ) + + + with gr.Row(): + add_service_btn = gr.Button("Add Service to Session", variant="primary", size="sm", elem_classes="lcars-button-add") + + with gr.Column(scale=2): + gr.Markdown("### Message Settings") + with gr.Accordion("🎯 Task Management", open=False, elem_classes="lcars-accordion"): + gr.Markdown("### 🎯 Colabaration Methods") + + orch_method = gr.Dropdown( + choices=[ + "broadcast", "sequential", "hierarchical", "parallel", "iterative", + "round_robin", "parallel_evaluation", + "consensus","supervised", "parallel_consensus", "roundtable_discussion", + "router_dynamic", "voting" + ], + label="Orchestration Method", + value="hierarchical", + info="Select orchestration pattern", + elem_classes="lcars-dropdown" + ) + with gr.Accordion("🎯 Methodolgy Description", open=False, elem_classes="lcars-accordion"): + pattern_info = gr.HTML( + value="
Select an orchestration pattern above
", + elem_classes="lcars-container" + ) + with gr.Accordion("Team Management", open=True, elem_classes="lcars-accordion") as orch_params: + sequential_order = gr.Textbox( + label="Sequential Order (comma-separated)", + placeholder="e.g., Research Assistant, Python Coder, Data Analyst", + visible=False,lines=4, + elem_classes="lcars-input" + ) + + hierarchical_supervisor = gr.Dropdown( + choices=[], + label="Supervisor", + visible=False, + elem_classes="lcars-dropdown" + ) + hierarchical_team = gr.CheckboxGroup( + choices=[], + label="Team Members", + visible=False, + elem_classes="lcars-checkbox" + ) + + parallel_tasks = gr.Dataframe( + headers=["Agent", "Sub-task"], + datatype=["str", "str"], + row_count=3, + col_count=2, + label="Parallel Sub-tasks", + visible=False + ) + + iterative_iterations = gr.Number( + label="Number of Iterations", + value=3, + minimum=1, + maximum=10, + visible=False + ) + iterative_sequence = gr.Textbox( + label="Agent Sequence (comma-separated)", + placeholder="e.g., Research Assistant, Python Coder, Data Analyst", + visible=False, + elem_classes="lcars-input" + ) + + legacy_turns = gr.Number( + label="Number of Turns/Iterations", + value=2, + minimum=1, + maximum=10, + visible=False + ) + legacy_agents = gr.CheckboxGroup( + choices=[], + label="Select Agents for Legacy Pattern", + visible=False, + elem_classes="lcars-checkbox" + ) + + with gr.Accordion("Connected To Session", open=True, elem_classes="lcars-accordion") : + gr.Markdown("**Connected Agents**") + with gr.Row(): + connected_agents_dropdown = gr.Dropdown( + choices=[], + label="Select Agent to Remove", + interactive=True, + elem_classes="lcars-dropdown" + ) + with gr.Row(): + remove_agent_btn = gr.Button("Remove Agent", variant="stop", size="sm", elem_classes="lcars-button-remove") + + gr.Markdown("**Connected Services**") + with gr.Row(): + connected_services_dropdown = gr.Dropdown( + choices=[], + label="Select Services to Remove", + interactive=True, + elem_classes="lcars-dropdown" + ) + with gr.Row(): + remove_service_btn = gr.Button("Remove Service", variant="stop", size="sm", elem_classes="lcars-button-remove") + + with gr.Column(scale=4,variant= ['panel'],show_progress = False,min_width = 400): +# --- Main Querys Tab --- + with gr.Tab("System Activity"): + gr.Markdown("### 💬 - Active Conversations") + chatbot = gr.Chatbot(label= "Borg Collective", + height=400, + show_copy_all_button=True, + allow_file_downloads=True, + show_copy_button=True, + autoscroll=True,min_width = 350, + resizable=True,show_label=True,editable="all",show_share_button = True, + feedback_options=["like","dislike"], + group_consecutive_messages=True, + elem_classes="lcars-chatbot",type="tuples",allow_tags=True, + ) + +# --- Main Artifacts Editor Tab --- + with gr.Tab(label="🤖 Artifacts"): + with gr.Accordion(label="🐍 Code Artifacts Workshop", open=True): + artifacts_execution_output = gr.HTML(f"
🧠 Execution Results
") + with gr.Row(): + # a code editor for the artifacts generated in the conversation + code_artifacts = gr.Code( + language="python", + label="Generated Code & Artifacts", + lines=10, + interactive=True, + autocomplete=True, + show_line_numbers=True, + elem_id="code_editor",elem_classes=["chatbox", "lcars-input","lcars-panel"] + ) + with gr.Accordion(label="📜 Artifact Settings", open=False): + artifact_description = gr.Textbox( + label="Artifact Description", + placeholder="Brief description of the code...", + scale=2,elem_classes=["chatbox", "lcars-input"] + ) + artifact_language = gr.Dropdown( + choices=["python", "javascript", "html", "css", "bash", "sql", "json"], + value="python", + label="Language", + scale=1,elem_classes=["chatbox", "lcars-input"] + ) + + # add this codepad as a new artifact in the session + AddArtifact_btn = gr.Button("📜 Add artifact", variant="huggingface") + # loads a session artifact to the codepad + LoadArtifact_btn = gr.Button("📂 Load Artifact", variant="huggingface") + Load_artifact_id_input = gr.Textbox( + label="Artifact ID", + placeholder="ID to Load", + scale=1,elem_classes=["chatbox", "lcars-input"] + ) + with gr.Row(): + # executes this codepad/Loaded artifact + ExecuteArtifactCodePad_btn = gr.Button("▶️ Execute CodePad", variant="huggingface") + + # Code execution from pad + with gr.Accordion(label="⚡ Artifacts Actions", open=True): + with gr.Row(): + artifact_id_input = gr.Textbox( + label="Artifact ID", + placeholder="Artifact ID (0, 1, 2)", + scale=2,elem_classes=["chatbox", "lcars-input"] + ) + execute_artifact_btn = gr.Button("▶️ Execute Artifact", variant="huggingface") + + with gr.Row(): + batch_artifact_ids = gr.Textbox( + label="Batch Execute IDs", + placeholder="e.g., 0,1 or 0-5", + scale=2,elem_classes=["chatbox", "lcars-input"] + ) + batch_execute_btn = gr.Button("⚡Batch Execute", variant="huggingface") + + refresh_artifacts_btn = gr.Button("🔄 Refresh Artifacts", elem_classes="lcars-button") + + with gr.Accordion("Mission Logs", open= False,elem_classes="lcars-accordion"): + with gr.Row(show_progress = False,variant= ['compact'],elem_classes="lcars-accordion"): + with gr.Tab("System Log"): + gr.Markdown("### 📊 System Events Log") + refresh_log_btn = gr.Button("Refresh Log", elem_classes="lcars-button") + log_display = gr.HTML(value="
No events
", elem_classes="lcars-container") + with gr.Tab("Model settings",elem_classes="lcars-accordion"): + with gr.Row(): + with gr.Column(scale=1): # Model Settings Sidebar + with gr.Accordion("Model Settings", open=True, elem_classes="lcars-accordion"): + base_url_input = gr.Textbox( + label="Base URL", + value="http://localhost:1234/v1", + elem_classes="lcars-input" + ) + api_key_input = gr.Textbox( + label="API Key", + value="not-needed", + type="password", + elem_classes="lcars-input" + ) + get_models_button = gr.Button("Fetch Models", elem_classes="lcars-button") + + available_models_dropdown = gr.Dropdown( + choices=[], + label="Select Model", + interactive=True, + elem_classes="lcars-dropdown" + ) + + model_id_display = gr.Textbox( + label="Selected Model ID", + interactive=False, + elem_classes="lcars-display" + ) + with gr.Accordion(label = "Send Query ", open=True,elem_classes="lcars-accordion"): + # Changed to CheckboxGroup for multiple recipients + with gr.Row(show_progress = False,variant= ['compact'],elem_classes="lcars-accordion"): + recipient = gr.CheckboxGroup( + choices=[], + label="Send to (select multiple for direct messages)", + visible=False, + elem_classes="lcars-checkbox" + ) + # these are agents which perform tasks on the human message , + # such as speech transalation - or prompt enhancement( intent classifcation ) + # so here the query will be sent thru to the intended destination ( public, direct ) + ## - Proxy agents enabled by checkbox + UtilityAgent = gr.CheckboxGroup( + choices=[], + label="Send to (select utilty agent to send via)", + visible=False, + elem_classes="lcars-checkbox" + ) + + with gr.Row(show_progress = False,variant= ['panel'], elem_classes="lcars-accordion"): + msg_input = gr.Textbox( + label="Access the collective",autofocus=True, + placeholder="Describe the task or send a message...", + lines=6,show_copy_button = True, + scale=4,min_width = 200, + elem_classes="lcars-input", + ) + with gr.Row(): + task_priority = gr.Radio( + choices=["low", "normal", "high"], + value="high", + label="Priority", + elem_classes="lcars-radio" + ) + with gr.Row(elem_classes="lcars-accordion"): + send_btn = gr.Button("Send", variant="primary", elem_classes="lcars-button-send") + assign_task_btn = gr.Button("Assign Task", scale=1,variant="primary", elem_classes="lcars-button-task") + with gr.Row(elem_classes="lcars-accordion"): + + msg_type = gr.Radio( + choices=["Public", "Direct"], + value="Public", + label="Message Type", + elem_classes="lcars-radio" + ) + # utility agents/PRoxys : Uses a utility agent to preprocess the query before passing it to the intended target Public/Direct + enable_utility_agents = gr.Checkbox(label = "Enable proxy Agents",value=False) + + with gr.Row(variant= ['compact'],elem_classes="lcars-accordion"): + + with gr.Row(show_progress = False,elem_classes="lcars-accordion"): + clear_btn = gr.Button("Clear Chat", elem_classes="lcars-button-secondary") + summary_btn = gr.Button("Session Summary", elem_classes="lcars-button-secondary") + + with gr.Column(scale=2,min_width=300,elem_classes="lcars-accordion"): + with gr.Accordion("Room Participants", open=True, elem_classes="lcars-accordion") : + gr.Markdown("### Room Participants") + participants_display = gr.HTML(value="
No active room
", elem_classes="lcars-container") + + def initialize_room(): + try: + log = CentralLog("ui") + room = ChatRoom(log=log) + + agent_manager = AgentManager() + + human = Human("human_user") + asyncio.run(room.add_client(human)) + + llm = LLMAgent(generate_fn=LLMAgent.openai_generate) + manager = Session_Manager(spec=SESSION_MANAGER_SPEC, llm=llm, manage_room=room, telemetry=log) + asyncio.run(room.add_client(manager)) + room.session_manager = manager + + asyncio.run(manager.welcome_participants()) + + return ( + room, log, agent_manager, + f"Room {room.room_id} initialized", + room.get_chat_history_for_display(), + build_participants_html(room), + refresh_system_log(log), + gr.update(choices=[]), # recipient checkboxgroup + gr.update(choices=agent_manager.get_available_agents()), # available agents + gr.update(choices=agent_manager.get_connected_agents()), # connected agents + render_agent_details(None, agent_manager), + gr.update(choices=[]), # hierarchical_supervisor + gr.update(choices=[]), # hierarchical_team + gr.update(), # iterative_iterations + gr.update(), # iterative_sequence + gr.update(), # legacy_turns + gr.update(choices=[]), # legacy_agents + get_pattern_info("hierarchical"), + gr.update(choices=agent_manager.get_available_services()), # FIX: available services + gr.update(choices=agent_manager.get_connected_services()), # FIX: connected services + ) + + except Exception as e: + return None, None, None, f"Error: {e}", [], "
Init failed
", "
Error
", gr.update(), gr.update(), gr.update(), render_agent_details(None, AgentManager()), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), get_pattern_info("broadcast"), gr.update(), gr.update() + + + + + def render_agent_details(agent_name, agent_manager_state): + """Render detailed agent card when selected from dropdown""" + if not agent_name: + return "
Select an agent to view details
" + + if agent_name in PREDEFINED_SPECS: + + agent = PREDEFINED_SPECS[agent_name] + skills_list = "
".join([f"▸ {skill}" for skill in agent.skills]) + keywords_list = ", ".join(agent.expertise_keywords) + + is_connected = agent_name in agent_manager_state.connected_agents + connection_status = "🟢 CONNECTED" if is_connected else "🔴 AVAILABLE" + status_class = "lcars-status-connected" if is_connected else "lcars-status-available" + elif agent_name in PREDEFINED_SERVICES: + agent = PREDEFINED_SERVICES[agent_name] + skills_list = "
".join([f"▸ {skill}" for skill in agent.skills]) + keywords_list = ", ".join(agent.expertise_keywords) + + is_connected = agent_name in agent_manager_state.connected_services + connection_status = "🟢 CONNECTED" if is_connected else "🔴 AVAILABLE" + status_class = "lcars-status-connected" if is_connected else "lcars-status-available" + + else: + return f"
{agent_name} not found
" + + + + + + + return f""" +
+
+
{agent.name}
+
{connection_status}
+
+
+
+ + {agent.personality} +
+
+
ROLE
+ {agent.role} +
+
+
SKILLS
+
{skills_list}
+
+
+
EXPERTISE AREAS
+
{keywords_list}
+
+
+
+ """ + PATTERN_DESCRIPTIONS = { + "broadcast": { + "display_name": "Broadcast Pattern", + "short_desc": "Send task to all agents simultaneously for diverse perspectives", + "full_desc": "Simultaneously distributes an identical task to all available agents. Ideal for gathering diverse perspectives on a single problem, conducting initial idea generation, or when the best-suited agent for a task is unknown. All agents process the same input independently and in parallel.", + "min_agents": 2, + "html": "Broadcast Pattern
Send task to all agents simultaneously for gathering diverse perspectives and initial idea generation when the best agent is unknown." + }, + "sequential": { + "display_name": "Sequential Pipeline Pattern", + "short_desc": "Chain agents together in specific order for step-by-step processing", + "full_desc": "Executes a linear, stage-gated workflow where agents are chained in a specific order. The output of one agent becomes the input for the next. Optimal for multi-stage processes with clear dependencies, such as research -> analysis -> writing -> review, ensuring each step is completed before the next begins.", + "min_agents": 2, + "html": "Sequential Pipeline Pattern
Each agent receives the output of the previous agent. Used for data processing, summarization, and iterative refinement where each step transforms the result." + }, + "hierarchical": { + "display_name": "Hierarchical Pattern", + "short_desc": "Designate supervisor and team members for complex project management", + "full_desc": "Establishes a clear command structure with a single supervisor agent responsible for task decomposition, delegation, and synthesis. The supervisor manages a team of specialist agents, collects their outputs, and assembles the final result. Best for complex projects requiring strong coordination and a single point of decision-making.", + "min_agents": 3, + "html": "Hierarchical (Manager-Worker) Pattern
A manager agent decomposes tasks and delegates to worker agents. Manager then collects and summarizes results. Ideal for project decomposition." + }, + "parallel": { + "display_name": "Parallel Pattern", + "short_desc": "Distribute sub-tasks across agents for maximum efficiency", + "full_desc": "Distributes distinct, independent sub-tasks across multiple agents simultaneously to maximize efficiency and reduce latency. Each agent works on a different piece of the overall problem. Effective when a task can be cleanly partitioned, such as analyzing different datasets or researching separate topics.", + "min_agents": 2, + "html": "Parallel Pattern
Distributes distinct sub-tasks across multiple agents simultaneously. Ideal for partitioned workloads like analyzing different datasets or researching separate topics." + }, + "iterative": { + "display_name": "Iterative Pattern", + "short_desc": "Execute through multiple refinement cycles for progressive enhancement", + "full_desc": "Executes a cyclical process of creation and refinement. An initial agent produces a draft or solution, which is then successively improved by one or more subsequent agents in loops. Ideal for tasks requiring progressive enhancement, like code development, document editing, or design refinement.", + "min_agents": 2, + "html": "Iterative Pattern
Cyclical process of creation and refinement where agents successively improve outputs. Perfect for code development, document editing, or design refinement." + }, + "round_robin": { + "display_name": "Round-Robin Pattern", + "short_desc": "Multi-turn discussion with agents taking turns for consensus building", + "full_desc": "Facilitates a structured, multi-turn discussion where agents take turns adding their perspective, building upon or critiquing previous contributions. Ensures all agents have an equal opportunity to influence the outcome and is excellent for complex debate, brainstorming, or problem-solving requiring integrated input.", + "min_agents": 2, + "html": "Round-Robin Debate Pattern
All agents get turns responding to the same task. Repeats for multiple rounds. Great for consensus building, brainstorming, and debate between specialized agents." + }, + "parallel_evaluation": { + "display_name": "Parallel Evaluation Pattern", + "short_desc": "All agents respond, best answer selected for multiple perspectives", + "full_desc": "All agents respond to the same task in parallel, after which a single 'best' response is selected based on predefined criteria (e.g., quality, completeness, creativity). Useful for competitive idea generation or when multiple potential solutions exist and only the highest-fidelity one is required.", + "min_agents": 2, + "html": "Parallel Evaluation Pattern
All agents receive the same task simultaneously and produce answers independently. Then an ensemble logic selects the best response. Ideal for multiple perspectives." + }, + "consensus": { + "display_name": "Consensus Pattern", + "short_desc": "Voting-based decision making for democratic solutions", + "full_desc": "A collaborative decision-making process where agents first generate individual responses and then participate in a voting or ranking mechanism to converge on a single, agreed-upon output. Suitable for subjective tasks or when buy-in from all participants is symbolically important.", + "min_agents": 3, + "html": "Consensus (Voting) Pattern
Each agent proposes an answer, then an arbiter agent votes for or merges the best answer. Perfect for crowd-based reasoning and democratic decision-making." + }, + "supervised": { + "display_name": "Supervised Pattern", + "short_desc": "First agent supervises, others execute for dynamic workflows", + "full_desc": "A two-tiered approach where a primary 'supervisor' agent is responsible for the final output. It decomposes the task, delegates sub-tasks to other 'worker' agents, and then integrates their work into a cohesive whole. Differs from hierarchical in that the supervisor is also a hands-on contributor to the final assembly.", + "min_agents": 3, + "html": "Supervisor (Orchestrator) Pattern
A dedicated orchestrator dynamically decides which agent to call next based on context. Perfect for dynamic workflows and adaptive agent ecosystems." + }, + "parallel_consensus": { + "display_name": "Parallel Consensus Pattern", + "short_desc": "Parallel responses with coordinator synthesis for comprehensive reports", + "full_desc": "Agents work in parallel to generate responses, followed by a dedicated coordinator agent synthesizing these responses into a single, unified output. The coordinator evaluates all inputs, resolves conflicts, and extracts the best elements from each, rather than just selecting one. Ideal for creating comprehensive reports from multiple sources.", + "min_agents": 3, + "html": "Parallel Consensus Pattern
All agents respond simultaneously, then a coordinator summarizes. Combines parallel execution with consensus synthesis." + }, + "roundtable_discussion": { + "display_name": "Roundtable Discussion Pattern", + "short_desc": "Iterative roundtable discussion with all agents for deep collaboration", + "full_desc": "An extended, iterative form of round_robin designed for deep collaboration. Agents engage in multiple cycles of discussion, allowing them to refine their positions based on group feedback. This pattern is optimal for achieving deep consensus, complex strategy formulation, or negotiating a shared understanding.", + "min_agents": 3, + "html": "Roundtable Discussion Pattern
Agents discuss iteratively, refining each other's answers in dialogue. Excellent for collaborative problem-solving and knowledge synthesis." + }, + "router_dynamic": { + "display_name": "Dynamic Routing Pattern", + "short_desc": "Router dynamically selects best agent for each step for adaptive workflows", + "full_desc": "Employs an intelligent router or classifier agent that dynamically analyzes each sub-task or query in real- and routes it to the single most appropriate agent based on their declared skills or past performance. Maximizes efficiency and expertise utilization for a stream of heterogeneous tasks.", + "min_agents": 3, + "html": "Dynamic Routing Pattern
A router agent selects which agent handles the next step based on current context. Ideal for adaptive workflows and context-aware routing." + }, + "voting": { + "display_name": "Voting Pattern", + "short_desc": "Agents propose and vote for best solution for democratic decision-making", + "full_desc": "A decision-focused pattern where agents first propose their individual solutions or arguments. A formal voting mechanism (e.g., majority rule, ranked choice) is then used to select the final course of action or answer. Best for resolving clear choices or making governance-style decisions.", + "min_agents": 3, + "html": "Voting Pattern
Agents each give answers, then all agents vote for the best answer. Great for democratic decision-making and consensus verification." + } + } + def update_orchestration_ui(orch_method_val, room_state): + """UNIFIED UI UPDATE - filters out services from task assignment""" + + pattern = PATTERN_DESCRIPTIONS.get(orch_method_val, {}) + + # Unified configuration + PATTERN_CONFIG = { + "broadcast": {"sequential": False, "hierarchical": False, "parallel": False, "iterative": False, "legacy": False}, + "sequential": {"sequential": True, "hierarchical": False, "parallel": False, "iterative": False, "legacy": False}, + "hierarchical": {"sequential": False, "hierarchical": True, "parallel": False, "iterative": False, "legacy": False}, + "parallel": {"sequential": False, "hierarchical": False, "parallel": True, "iterative": False, "legacy": False}, + "iterative": {"sequential": False, "hierarchical": False, "parallel": False, "iterative": True, "legacy": False}, + "round_robin": {"sequential": False, "hierarchical": False, "parallel": False, "iterative": False, "legacy": True}, + "parallel_evaluation": {"sequential": False, "hierarchical": False, "parallel": False, "iterative": False, "legacy": True}, + "consensus": {"sequential": False, "hierarchical": False, "parallel": False, "iterative": False, "legacy": True}, + "supervised": {"sequential": False, "hierarchical": False, "parallel": False, "iterative": False, "legacy": True}, + "parallel_consensus": {"sequential": False, "hierarchical": False, "parallel": False, "iterative": False, "legacy": True}, + "roundtable_discussion": {"sequential": False, "hierarchical": False, "parallel": False, "iterative": False, "legacy": True}, + "router_dynamic": {"sequential": False, "hierarchical": False, "parallel": False, "iterative": False, "legacy": True}, + "voting": {"sequential": False, "hierarchical": False, "parallel": False, "iterative": False, "legacy": True} + } + + # FIX: Filter out services - only show agents + agents = [] + if room_state: + agents = [c.spec.name for c in room_state._clients.values() + if hasattr(c, 'spec') + and c.spec.name != "Session Manager" + and "SERVICE" not in c.spec.role] # FIX: Exclude services + + config = PATTERN_CONFIG.get(orch_method_val, {}) + display_name = pattern.get('display_name', orch_method_val.replace('_', ' ').title()) + pattern_desc = pattern.get('short_desc', 'No description available') + + pattern_html = f""" +
+
{display_name.upper()}
+
+
+
{pattern_desc}
+
+
+
+ """ + + return ( + gr.update(visible=config.get("sequential", False)), + gr.update(choices=agents, visible=config.get("hierarchical", False)), + gr.update(choices=agents, visible=config.get("hierarchical", False)), + gr.update(visible=config.get("parallel", False)), + gr.update(visible=config.get("iterative", False)), + gr.update(visible=config.get("iterative", False)), + gr.update(visible=config.get("legacy", False)), + gr.update(choices=agents, visible=config.get("legacy", False)), + pattern_html + ) + + + def get_pattern_info(pattern_name): + pattern = PATTERN_DESCRIPTIONS.get(pattern_name, {}) + if not pattern: + return "
UNKNOWN PATTERN
Pattern not found
" + + return f""" +
+
{pattern.get('display_name', pattern_name).upper()}
+
+
+
DESCRIPTION
+
{pattern.get('short_desc', 'No description available')}
+
+
+
MINIMUM AGENTS
+
{pattern.get('min_agents', 2)} recommended
+
+
+
DETAILED EXPLANATION
+
{pattern.get('full_desc', 'No detailed description available')}
+
+
+
+ """ + def build_participants_html(room_state): + """Build LCARS-styled participant list using pattern description styling""" + if not room_state: + return "
No active room
" + + html = "
" + + for client in room_state._clients.values(): + if hasattr(client, 'spec'): + spec = client.spec + is_manager = "Manager" in spec.role + is_service = "SERVICE" in spec.role + skills_str = ", ".join(spec.skills[:3]) + if len(spec.skills) > 3: + skills_str += f" +{len(spec.skills)-3} more" + + badge_class = "lcars-pattern-badge-manager" if is_manager else "lcars-pattern-badge-agent" + + + badge_text = "BOSS" + if is_manager : + badge_text = "BOSS" + card_id = f"card_{spec.name.replace(' ', '_')}" + html += f""" +
+
+ {spec.name} + {badge_text} +
+
+
+
ROLE
+
{spec.role}
+
+
+
PERSONALITY
+
{spec.personality}
+
+
+
+ """ + elif is_service : + badge_text = "SERVICE" + card_id = f"card_{spec.name.replace(' ', '_')}" + html += f""" +
+
+ {spec.name} + {badge_text} +
+
+
+
ROLE
+
{spec.role}
+
+
+
PERSONALITY
+
{spec.personality}
+
+
+
+ """ + else : + badge_text = "AGENT" + card_id = f"card_{spec.name.replace(' ', '_')}" + html += f""" +
+
+ {spec.name} + {badge_text} +
+
+
+
ROLE
+
{spec.role}
+
+
+
PERSONALITY
+
{spec.personality}
+
+
+
+ """ + + if html == "
": + html += f""" +
+
+ Spydaz +
+
+ """ + + html += "
" + + + return html + def refresh_system_log(log_state): + if not log_state: + return "
No log data available
" + + entries = log_state.all() + if not entries: + return "
No events recorded
" + + html = "
" + for entry in entries[-15:]: + level_class = "lcars-log-info" if entry.level == "INFO" else "lcars-log-error" + html += f"
[{entry.level}] {entry.event}
" + html += "
" + return html + + def add_service_to_session(service_name, room_state, log_state, agent_manager_state): + if not room_state or not service_name: + return "No room or Service selected", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(service_name, agent_manager_state), gr.update(), gr.update(), gr.update() + + try: + if service_name not in PREDEFINED_SERVICES: + return "Service not found", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(service_name, agent_manager_state), gr.update(), gr.update(), gr.update() + + if not agent_manager_state.add_service(service_name): + return f"Service {service_name} already in session", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(service_name, agent_manager_state), gr.update(), gr.update(), gr.update() + + spec = PREDEFINED_SERVICES[service_name] + llm = LLMAgent(generate_fn=LLMAgent.openai_generate) + + # Create appropriate service instance + if service_name == "ArtifactManager": + Service = ArtifactsAgent(spec=spec, llm=llm, manage_room=room_state, telemetry=log_state) + elif service_name == "MemoryManager": + Service = MemoryAgent(spec=spec, llm=llm, manage_room=room_state, telemetry=log_state) + else: + Service = AgentService(spec=spec, llm=llm, manage_room=room_state, telemetry=log_state) + + asyncio.run(room_state.add_service(Service)) + + # FIX: Get agents only (not services) for task assignment + agents = [c.username for c in room_state._clients.values() + if hasattr(c, 'spec') + and c.spec.name != "Session Manager" + and "SERVICE" not in c.spec.role] + services = [c.username for c in room_state._services.values() + if hasattr(c, 'spec') and c.spec.name != "Session Manager"] + return ( + f"Added {service_name} to session", + gr.update(choices=agent_manager_state.get_available_services()), + gr.update(choices=agent_manager_state.get_connected_services()), + gr.update(choices=agents), # recipient - agents only + build_participants_html(room_state), + render_agent_details(service_name, agent_manager_state), + + ) + except Exception as e: + return f"Error: {e}", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(service_name, agent_manager_state) + + def remove_service_from_session(service_name, room_state, log_state, agent_manager_state): + if not room_state or not service_name: + return "No room or service selected", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(None, agent_manager_state), gr.update(), gr.update(), gr.update() + + try: + if not agent_manager_state.remove_service(service_name): + return f"Service {service_name} not in session", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(None, agent_manager_state), gr.update(), gr.update(), gr.update() + + asyncio.run(room_state.remove_service(service_name)) + + # FIX: Get agents only for task lists + agents = [c.username for c in room_state._clients.values() + if hasattr(c, 'spec') + and c.spec.name != "Session Manager" + and "SERVICE" not in c.spec.role] + + return ( + f"Removed {service_name} from session", + gr.update(choices=agent_manager_state.get_available_services()), + gr.update(choices=agent_manager_state.get_connected_services()), + gr.update(choices=agents), # recipient + build_participants_html(room_state), + render_agent_details(None, agent_manager_state), + gr.update(choices=agents), # hierarchical_supervisor + gr.update(choices=agents), # hierarchical_team + gr.update(choices=agents) # legacy_agents + ) + except Exception as e: + return f"Error: {e}", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(None, agent_manager_state), gr.update(), gr.update(), gr.update() + + + + def add_agent_to_session(agent_name, room_state, log_state, agent_manager_state): + if not room_state or not agent_name: + return "No room or agent selected", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(agent_name, agent_manager_state), gr.update(), gr.update(), gr.update() + + try: + if agent_name not in PREDEFINED_SPECS: + return "Agent not found", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(agent_name, agent_manager_state), gr.update(), gr.update(), gr.update() + + if not agent_manager_state.add_agent(agent_name): + return f"Agent {agent_name} already in session", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(agent_name, agent_manager_state), gr.update(), gr.update(), gr.update() + + spec:AgentSpec = PREDEFINED_SPECS[agent_name] + # although we have upgraded our llm_Driver to llm_Agent + # we do not need to initalize with openai_generate + # as we updated the chat function which it is mapped to : to the message queue : + # but we still enable for some just in case - So for a Secondary response we can still access _chat + llm = LLMAgent(generate_fn=LLMAgent.openai_generate,system_prompt=spec._generate_base_template()) + agent = Agent(spec=spec, llm=llm, manage_room=room_state, telemetry=log_state) + asyncio.run(room_state.add_client(agent)) + # FIX: Only get agents, not services + agents = [c.username for c in room_state._clients.values() + if hasattr(c, 'spec') + and c.spec.name != "Session Manager" + and "SERVICE" not in c.spec.role] + + return ( + f"Added {agent_name} to session", + gr.update(choices=agent_manager_state.get_available_agents()), + gr.update(choices=agent_manager_state.get_connected_agents()), + gr.update(choices=agents), + build_participants_html(room_state), + render_agent_details(agent_name, agent_manager_state), + gr.update(choices=agents), + gr.update(choices=agents), + gr.update(choices=agents) + ) + except Exception as e: + return f"Error: {e}", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(agent_name, agent_manager_state), gr.update(), gr.update(), gr.update() + def remove_agent_from_session(agent_name, room_state, log_state, agent_manager_state): + if not room_state or not agent_name: + return "No room or agent selected", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(None, agent_manager_state) + + try: + if not agent_manager_state.remove_agent(agent_name): + return f"Agent {agent_name} not in session", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(None, agent_manager_state) + + asyncio.run(room_state.remove_client(agent_name)) + + # FIX: Only get agents, not services + agents = [c.username for c in room_state._clients.values() + if hasattr(c, 'spec') + and c.spec.name != "Session Manager" + and "SERVICE" not in c.spec.role] + + return ( + f"Removed {agent_name} from session", + gr.update(choices=agent_manager_state.get_available_agents()), + gr.update(choices=agent_manager_state.get_connected_agents()), + gr.update(choices=agents), + build_participants_html(room_state), + render_agent_details(None, agent_manager_state) + ) + except Exception as e: + return f"Error: {e}", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(None, agent_manager_state) + + + def create_custom_agent(name, role, personality, goal, instructions, skills, room_state, log_state, agent_manager_state): + if not room_state or not name.strip(): + return "Name required", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(None, agent_manager_state) + + try: + skills_list = [s.strip() for s in skills.split(",")] if skills.strip() else [] + spec = AgentSpec( + name=name.strip(), + role=role.strip() or "Specialist", + personality=personality.strip() or "Helpful", + goal=goal.strip() or "Assist", + instructions=instructions.strip() or "Provide helpful responses", + skills=skills_list, + expertise_keywords=[name.lower()] + ) + + if name not in agent_manager_state.available_agents: + agent_manager_state.available_agents.append(name) + PREDEFINED_SPECS[name] = spec + + if agent_manager_state.add_agent(name): + llm = LLMAgent(generate_fn=LLMAgent.openai_generate) + agent = Agent(spec=spec, llm=llm, manage_room=room_state, telemetry=log_state) + asyncio.run(room_state.add_client(agent)) + + agents = [c.username for c in room_state._clients.values() + if hasattr(c, 'spec') and c.spec.name != "Session Manager"] + + return ( + f"Created and added {name}", + gr.update(choices=agent_manager_state.get_available_agents()), + gr.update(choices=agent_manager_state.get_connected_agents()), + gr.update(choices=agents), + build_participants_html(room_state), + render_agent_details(name, agent_manager_state) + ) + except Exception as e: + return f"Error: {e}", gr.update(), gr.update(), gr.update(), build_participants_html(room_state), render_agent_details(None, agent_manager_state) + def load_example_agent(): + """Load example agent data into form fields""" + return ( + "Documentation Specialist", + "Technical Writer & Documentation Expert", + "Detail-oriented, clear, and methodical with excellent communication skills", + "Create comprehensive, user-friendly documentation that helps users understand complex technical concepts", + "You are a documentation specialist who excels at creating clear, structured documentation. You break down complex topics into digestible sections, use examples effectively, and always consider the end-user's perspective. You follow documentation best practices and ensure consistency in style and formatting.", + "Technical Writing, Markdown, API Documentation, User Guides, Code Documentation, Content Structure" + ) + + def assign_task_with_orchestration(task_description, orch_method, room_state, log_state, + sequential_order, hierarchical_supervisor, hierarchical_team, + parallel_tasks, iterative_iterations, iterative_sequence, + task_priority, legacy_turns, legacy_agents): + if not room_state or not room_state.session_manager: + return "No room or manager available", [], "" + + try: + all_agents = [] + for client in room_state._clients.values(): + if (hasattr(client, 'spec') and hasattr(client, 'llm') and + client.spec.name != "Session Manager" and + client.spec.name != "human_user" and + "SERVICE" not in client.spec.role): # FIX: Exclude services: + + all_agents.append(client) + + if not all_agents: + return "No agents available for orchestration", [], "" + + kwargs = { + 'task': task_description, + 'agents': all_agents, + 'priority': task_priority + } + + if orch_method == "sequential": + if not sequential_order: + return "Please specify agent order for sequential orchestration", [], "" + kwargs['agent_order'] = [agent.strip() for agent in sequential_order.split(",")] + + elif orch_method == "hierarchical": + if not hierarchical_supervisor or not hierarchical_team: + return "Please select supervisor and team for hierarchical orchestration", [], "" + kwargs['supervisor'] = hierarchical_supervisor + kwargs['team'] = hierarchical_team + + elif orch_method == "parallel": + if not parallel_tasks or len(parallel_tasks) == 0: + return "Please specify agents and sub-tasks for parallel orchestration", [], "" + sub_tasks = {} + for row in parallel_tasks: + if row[0] and row[1]: + sub_tasks[row[0]] = row[1] + if not sub_tasks: + return "Please specify valid agents and sub-tasks", [], "" + kwargs['sub_tasks'] = sub_tasks + + elif orch_method == "iterative": + if not iterative_sequence or not iterative_iterations: + return "Please specify sequence and iterations for iterative orchestration", [], "" + kwargs['agent_order'] = [agent.strip() for agent in iterative_sequence.split(",")] + kwargs['iterations'] = int(iterative_iterations) + + elif orch_method in ["round_robin", "consensus", "supervised", "parallel_consensus", + "roundtable_discussion", "router_dynamic", "voting"]: + if legacy_agents: + selected_agents = [] + for client in all_agents: + if client.spec.name in legacy_agents: + selected_agents.append(client) + if selected_agents: + kwargs['agents'] = selected_agents + kwargs['turns'] = int(legacy_turns) if legacy_turns else 2 + + asyncio.run(room_state.session_manager.orchestrate( + method=orch_method, + **kwargs + )) + + + return ( + f"Task assigned using {orch_method} orchestration", + room_state.get_chat_history_for_display(), + refresh_system_log(log_state) + ) + + except Exception as e: + return f"Error assigning task: {e}", room_state.get_chat_history_for_display() if room_state else [], refresh_system_log(log_state) + + + async def send_message_with_agent_responses(text, history, room_state, log_state, msg_type_val, recipient_vals, max_turns=2): + if not room_state or not text.strip(): + return text, history, "No room", refresh_system_log(log_state) if log_state else "" + try: + # Log the user message + if log_state: + log_state.record("info", text, + sender="user", + message_type=msg_type_val.lower(), + recipients=recipient_vals) + + # --- CHANGE HERE --- + # Replace asyncio.run(...) with await ... + if msg_type_val == "Public": + await room_state.send_public(sender="human_user", content=text) # <-- AWAIT instead of asyncio.run + else: + if recipient_vals: + for recipient in recipient_vals: + if log_state: + log_state.record("info", text, + sender="user", + message_type="direct", + recipient=recipient) + await room_state.send_direct(sender="human_user", target=recipient, content=text) # <-- AWAIT + else: + return text, history, "Select at least one recipient for direct message", refresh_system_log(log_state) if log_state else "" + + # --- IMPORTANT: Update History Here --- + # The UI will still wait for this function to finish. + # So we return the updated history after awaiting the send operation. + return "", room_state.get_chat_history_for_display(), "Message sent", refresh_system_log(log_state) if log_state else "" + + except Exception as e: + if log_state: + log_state.record("error", str(e), sender="system", message_type="system") + return text, history, f"Error: {e}", refresh_system_log(log_state) if log_state else "" + def get_summary(room_state, log_state): + if not room_state or not room_state.session_manager: + return "No room", room_state.get_chat_history_for_display() if room_state else [], refresh_system_log(log_state) + + try: + asyncio.run(room_state.session_manager.summarize_session()) + + return "Summary generated", room_state.get_chat_history_for_display(), refresh_system_log(log_state) + except Exception as e: + return f"Error: {e}", room_state.get_chat_history_for_display(), refresh_system_log(log_state) + def toggle_recipient(msg_type_val): + return gr.update(visible=(msg_type_val == "Direct")) + def clear_chat(room_state): + if room_state: + room_state.message_history.clear() + return [], "Chat cleared" + + # Event handlers + orch_method.change( + update_orchestration_ui, + inputs=[orch_method, chat_room_state], + outputs=[ + sequential_order, + hierarchical_supervisor, + hierarchical_team, + parallel_tasks, + iterative_iterations, + iterative_sequence, + legacy_turns, + legacy_agents, + pattern_info + ] + ) + + orch_method.change( + get_pattern_info, + inputs=[orch_method], + outputs=[pattern_info] + ) + + init_room_btn.click( + initialize_room, + outputs=[ + chat_room_state, log_state, agent_manager_state, room_status, chatbot, + participants_display, log_display, recipient, available_agents_dropdown, + connected_agents_dropdown, agent_details, hierarchical_supervisor, hierarchical_team, + iterative_iterations, iterative_sequence, legacy_turns, legacy_agents, + pattern_info,available_Services_dropdown, + connected_services_dropdown, + ] + ) + + # Agent selection preview + available_agents_dropdown.change( + render_agent_details, + inputs=[available_agents_dropdown, agent_manager_state], + outputs=[agent_details] + ) + available_Services_dropdown.change( + render_agent_details, + inputs=[available_Services_dropdown, agent_manager_state], + outputs=[agent_details] + ) + + # Load example agent + load_example_btn.click( + load_example_agent, + outputs=[ + custom_agent_name, + custom_agent_role, + custom_agent_personality, + custom_agent_goal, + custom_agent_instructions, + custom_agent_skills + ] + ) + + assign_task_btn.click( + assign_task_with_orchestration, + inputs=[msg_input, orch_method, chat_room_state, log_state, sequential_order, + hierarchical_supervisor, hierarchical_team, parallel_tasks, + iterative_iterations, iterative_sequence, task_priority, + legacy_turns, legacy_agents], + outputs=[room_status, chatbot, log_display] + ) + + summary_btn.click( + get_summary, + inputs=[chat_room_state, log_state], + outputs=[room_status, chatbot, log_display] + ) + + refresh_log_btn.click( + refresh_system_log, + inputs=[log_state], + outputs=[log_display] + ) + + clear_btn.click( + clear_chat, + inputs=[chat_room_state], + outputs=[chatbot, room_status] + ) + + add_agent_btn.click( + add_agent_to_session, + inputs=[available_agents_dropdown, chat_room_state, log_state, agent_manager_state], + outputs=[room_status, available_agents_dropdown, connected_agents_dropdown, + recipient, participants_display, agent_details, hierarchical_supervisor, + hierarchical_team, legacy_agents] + ) + + remove_agent_btn.click( + remove_agent_from_session, + inputs=[connected_agents_dropdown, chat_room_state, log_state, agent_manager_state], + outputs=[room_status, available_agents_dropdown, connected_agents_dropdown, + recipient, participants_display, agent_details] + ) + add_service_btn.click( + add_service_to_session, + inputs=[available_Services_dropdown, chat_room_state, log_state, agent_manager_state], + outputs=[room_status, available_Services_dropdown, connected_services_dropdown, + recipient, participants_display, agent_details, + ] + ) + + remove_service_btn.click( + remove_service_from_session, + inputs=[connected_services_dropdown, chat_room_state, log_state, agent_manager_state], + outputs=[room_status, available_Services_dropdown, connected_services_dropdown, + recipient, participants_display, agent_details] + ) + + create_custom_btn.click( + create_custom_agent, + inputs=[ + custom_agent_name, custom_agent_role, custom_agent_personality, + custom_agent_goal, custom_agent_instructions, custom_agent_skills, + chat_room_state, log_state, agent_manager_state + ], + outputs=[room_status, available_agents_dropdown, connected_agents_dropdown, + recipient, participants_display, agent_details] + ) + + msg_type.change( + toggle_recipient, + inputs=[msg_type], + outputs=[recipient] + ) + + send_btn.click( + send_message_with_agent_responses, + inputs=[msg_input, chatbot, chat_room_state, log_state, msg_type, recipient], + outputs=[msg_input, chatbot, room_status, log_display],show_progress=False,concurrency_limit=10, + ) + + msg_input.submit( + send_message_with_agent_responses, + inputs=[msg_input, chatbot, chat_room_state, log_state, msg_type, recipient], + outputs=[msg_input, chatbot, room_status, log_display] + ) + + # LCARS-inspired Light Theme CSS + + + + return demo +############################################################# if __name__ == "__main__": - demo.launch() + + print("Starting LCARS MULTI AGENT CHAT_ROOM ...") + + app = _create_interface() + app.launch(debug=True, share=True,show_error=True) + + \ No newline at end of file