Spaces:
Build error
Build error
| import os | |
| import time | |
| from telemetry import log_event | |
| from memory_core import update_memory, load_memory | |
| from identity_core import IdentityManager | |
| from context_graph import ContextGraph | |
| class AgentCore: | |
| def __init__(self, model="gpt-4o-mini"): | |
| self.identity = IdentityManager() | |
| self.agent_id = self.identity.agent_id | |
| self.context = ContextGraph(self.agent_id) | |
| log_event(self.agent_id, "init", "success", {"model": model}) | |
| print(f"[INIT] Agent {self.agent_id} initialized with model {model}") | |
| def run(self, prompt): | |
| log_event(self.agent_id, "run_start", "in_progress", {"prompt": prompt}) | |
| try: | |
| # Simulate model output | |
| response = f"Agent {self.agent_id} processed: {prompt}" | |
| # Save memory + context | |
| update_memory(self.agent_id, "last_prompt", prompt) | |
| update_memory(self.agent_id, "last_response", response) | |
| prompt_node = self.context.add_node("prompt", prompt) | |
| response_node = self.context.add_node("response", response) | |
| self.context.connect(prompt_node, response_node) | |
| # Telemetry success | |
| log_event(self.agent_id, "run_complete", "success", {"response": response}) | |
| print(f"[RUN] {response}") | |
| return response | |
| except Exception as e: | |
| log_event(self.agent_id, "run_failed", "error", {"error": str(e)}) | |
| print(f"[ERROR] {e}") | |
| raise e |