Spaces:
Sleeping
Sleeping
| import re | |
| from summarizer import MemorySummarizer | |
| from memory import MemoryManager | |
| from context_graph import ContextGraph | |
| from telemetry import Telemetry | |
| from identity_core import create_agent_identity | |
| from semantic_memory import SemanticMemory | |
| def _categorize(prompt: str) -> str: | |
| p = prompt.lower() | |
| if any(k in p for k in ["goal", "ambition", "plan", "target"]): return "goals" | |
| if any(k in p for k in ["friend", "person", "mentor", "team", "contact"]): return "people" | |
| if any(k in p for k in ["favorite", "like", "love", "prefer"]): return "preferences" | |
| if any(k in p for k in ["city", "food", "color", "age", "birthday"]): return "personal" | |
| return "general" | |
| def _is_user_fact(p: str) -> bool: | |
| return bool(re.match(r"^\s*(my|i|i'm|i am|i like)\b", p.strip().lower())) | |
| class AgentCore: | |
| def __init__(self, model="gpt-4o-mini"): | |
| self.summarizer = MemorySummarizer("semantic_memory.json") | |
| self.agent_id = create_agent_identity() | |
| self.telemetry = Telemetry(self.agent_id) | |
| self.memory = MemoryManager(self.agent_id) | |
| self.context = ContextGraph() | |
| self.semantic = SemanticMemory(self.agent_id) # 🔥 vector memory | |
| self.model = model | |
| self.telemetry.log("init", "success", {"agent_id": self.agent_id}) | |
| print(f"[INIT] Agent {self.agent_id} initialized with model {self.model}") | |
| def run(self, prompt: str): | |
| self.telemetry.log("run_start", "in_progress", {"prompt": prompt}) | |
| try: | |
| category = _categorize(prompt) | |
| # 1) If user is stating a fact → write to both memories | |
| if _is_user_fact(prompt): | |
| self.context.link_context(self.agent_id, category, prompt, "stored") | |
| self.semantic.add(text=prompt, category=category) | |
| response = f"Noted — I’ll remember that under {category}." | |
| else: | |
| # 2) Query vector memory (semantic) first | |
| hits = self.semantic.query(query_text=prompt, category=None if "all" in prompt.lower() else category, top_k=5) | |
| if hits: | |
| # Humanize top results | |
| phrasings = [] | |
| for h in hits: | |
| t = h["text"].strip() | |
| # convert “My … is …” → “Your … is …” | |
| t = t.replace("My ", "Your ").replace("my ", "your ") | |
| t = t.replace("I am ", "You are ").replace("I'm ", "You're ") | |
| phrasings.append(t.rstrip(".")) | |
| # dedupe while preserving order | |
| seen = set(); nice = [] | |
| for ptxt in phrasings: | |
| if ptxt not in seen: | |
| seen.add(ptxt); nice.append(ptxt) | |
| joined = "; ".join(nice[:3]) | |
| response = f"From memory: {joined}." | |
| else: | |
| # 3) Fallback to classic context graph keyword recall | |
| cg = self.context.query_context(self.agent_id, keyword=None, category=category) if hasattr(self.context, "query_context") else [] | |
| if cg and cg != ["No context found."]: | |
| response = "From context: " + " ".join(cg[:3]) | |
| else: | |
| response = f"Agent {self.agent_id} processed: {prompt}" | |
| # 4) Persist trace + link | |
| self.memory.save({"prompt": prompt, "response": response}) | |
| # keep a lightweight index of Q→A strings in the graph | |
| try: | |
| self.context.link_context(self.agent_id, category, prompt, response) | |
| except TypeError: | |
| # backward-compat signature (agent_id, key, value) | |
| self.context.link_context(self.agent_id, prompt, response) | |
| self.telemetry.log("run_complete", "success", {"response": response}) | |
| print(f"[RUN] {response}") | |
| return response | |
| except Exception as e: | |
| self.telemetry.log("run_failed", "error", {"error": str(e)}) | |
| print(f"[ERROR] {e}") | |
| return f"Error: {e}" |