File size: 4,177 Bytes
025301f
ab14674
025301f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab14674
025301f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import re
from summarizer import MemorySummarizer
from memory import MemoryManager
from context_graph import ContextGraph
from telemetry import Telemetry
from identity_core import create_agent_identity
from semantic_memory import SemanticMemory

def _categorize(prompt: str) -> str:
    p = prompt.lower()
    if any(k in p for k in ["goal", "ambition", "plan", "target"]): return "goals"
    if any(k in p for k in ["friend", "person", "mentor", "team", "contact"]): return "people"
    if any(k in p for k in ["favorite", "like", "love", "prefer"]): return "preferences"
    if any(k in p for k in ["city", "food", "color", "age", "birthday"]): return "personal"
    return "general"

def _is_user_fact(p: str) -> bool:
    return bool(re.match(r"^\s*(my|i|i'm|i am|i like)\b", p.strip().lower()))

class AgentCore:
    def __init__(self, model="gpt-4o-mini"):
        self.summarizer = MemorySummarizer("semantic_memory.json")
        self.agent_id = create_agent_identity()
        self.telemetry = Telemetry(self.agent_id)
        self.memory = MemoryManager(self.agent_id)
        self.context = ContextGraph()
        self.semantic = SemanticMemory(self.agent_id)  # 🔥 vector memory
        self.model = model
        self.telemetry.log("init", "success", {"agent_id": self.agent_id})
        print(f"[INIT] Agent {self.agent_id} initialized with model {self.model}")

    def run(self, prompt: str):
        self.telemetry.log("run_start", "in_progress", {"prompt": prompt})
        try:
            category = _categorize(prompt)

            # 1) If user is stating a fact → write to both memories
            if _is_user_fact(prompt):
                self.context.link_context(self.agent_id, category, prompt, "stored")
                self.semantic.add(text=prompt, category=category)
                response = f"Noted — I’ll remember that under {category}."
            else:
                # 2) Query vector memory (semantic) first
                hits = self.semantic.query(query_text=prompt, category=None if "all" in prompt.lower() else category, top_k=5)

                if hits:
                    # Humanize top results
                    phrasings = []
                    for h in hits:
                        t = h["text"].strip()
                        # convert “My … is …” → “Your … is …”
                        t = t.replace("My ", "Your ").replace("my ", "your ")
                        t = t.replace("I am ", "You are ").replace("I'm ", "You're ")
                        phrasings.append(t.rstrip("."))
                    # dedupe while preserving order
                    seen = set(); nice = []
                    for ptxt in phrasings:
                        if ptxt not in seen:
                            seen.add(ptxt); nice.append(ptxt)
                    joined = "; ".join(nice[:3])
                    response = f"From memory: {joined}."
                else:
                    # 3) Fallback to classic context graph keyword recall
                    cg = self.context.query_context(self.agent_id, keyword=None, category=category) if hasattr(self.context, "query_context") else []
                    if cg and cg != ["No context found."]:
                        response = "From context: " + " ".join(cg[:3])
                    else:
                        response = f"Agent {self.agent_id} processed: {prompt}"

            # 4) Persist trace + link
            self.memory.save({"prompt": prompt, "response": response})
            # keep a lightweight index of Q→A strings in the graph
            try:
                self.context.link_context(self.agent_id, category, prompt, response)
            except TypeError:
                # backward-compat signature (agent_id, key, value)
                self.context.link_context(self.agent_id, prompt, response)

            self.telemetry.log("run_complete", "success", {"response": response})
            print(f"[RUN] {response}")
            return response

        except Exception as e:
            self.telemetry.log("run_failed", "error", {"error": str(e)})
            print(f"[ERROR] {e}")
            return f"Error: {e}"