Tpayne101 commited on
Commit
025301f
·
verified ·
1 Parent(s): 3523b6d

Create agentos_core_v3.py

Browse files
Files changed (1) hide show
  1. agentos_core_v3.py +84 -0
agentos_core_v3.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from memory import MemoryManager
3
+ from context_graph import ContextGraph
4
+ from telemetry import Telemetry
5
+ from identity_core import create_agent_identity
6
+ from semantic_memory import SemanticMemory
7
+
8
+ def _categorize(prompt: str) -> str:
9
+ p = prompt.lower()
10
+ if any(k in p for k in ["goal", "ambition", "plan", "target"]): return "goals"
11
+ if any(k in p for k in ["friend", "person", "mentor", "team", "contact"]): return "people"
12
+ if any(k in p for k in ["favorite", "like", "love", "prefer"]): return "preferences"
13
+ if any(k in p for k in ["city", "food", "color", "age", "birthday"]): return "personal"
14
+ return "general"
15
+
16
+ def _is_user_fact(p: str) -> bool:
17
+ return bool(re.match(r"^\s*(my|i|i'm|i am|i like)\b", p.strip().lower()))
18
+
19
+ class AgentCore:
20
+ def __init__(self, model="gpt-4o-mini"):
21
+ self.agent_id = create_agent_identity()
22
+ self.telemetry = Telemetry(self.agent_id)
23
+ self.memory = MemoryManager(self.agent_id)
24
+ self.context = ContextGraph()
25
+ self.semantic = SemanticMemory(self.agent_id) # 🔥 vector memory
26
+ self.model = model
27
+ self.telemetry.log("init", "success", {"agent_id": self.agent_id})
28
+ print(f"[INIT] Agent {self.agent_id} initialized with model {self.model}")
29
+
30
+ def run(self, prompt: str):
31
+ self.telemetry.log("run_start", "in_progress", {"prompt": prompt})
32
+ try:
33
+ category = _categorize(prompt)
34
+
35
+ # 1) If user is stating a fact → write to both memories
36
+ if _is_user_fact(prompt):
37
+ self.context.link_context(self.agent_id, category, prompt, "stored")
38
+ self.semantic.add(text=prompt, category=category)
39
+ response = f"Noted — I’ll remember that under {category}."
40
+ else:
41
+ # 2) Query vector memory (semantic) first
42
+ hits = self.semantic.query(query_text=prompt, category=None if "all" in prompt.lower() else category, top_k=5)
43
+
44
+ if hits:
45
+ # Humanize top results
46
+ phrasings = []
47
+ for h in hits:
48
+ t = h["text"].strip()
49
+ # convert “My … is …” → “Your … is …”
50
+ t = t.replace("My ", "Your ").replace("my ", "your ")
51
+ t = t.replace("I am ", "You are ").replace("I'm ", "You're ")
52
+ phrasings.append(t.rstrip("."))
53
+ # dedupe while preserving order
54
+ seen = set(); nice = []
55
+ for ptxt in phrasings:
56
+ if ptxt not in seen:
57
+ seen.add(ptxt); nice.append(ptxt)
58
+ joined = "; ".join(nice[:3])
59
+ response = f"From memory: {joined}."
60
+ else:
61
+ # 3) Fallback to classic context graph keyword recall
62
+ cg = self.context.query_context(self.agent_id, keyword=None, category=category) if hasattr(self.context, "query_context") else []
63
+ if cg and cg != ["No context found."]:
64
+ response = "From context: " + " ".join(cg[:3])
65
+ else:
66
+ response = f"Agent {self.agent_id} processed: {prompt}"
67
+
68
+ # 4) Persist trace + link
69
+ self.memory.save({"prompt": prompt, "response": response})
70
+ # keep a lightweight index of Q→A strings in the graph
71
+ try:
72
+ self.context.link_context(self.agent_id, category, prompt, response)
73
+ except TypeError:
74
+ # backward-compat signature (agent_id, key, value)
75
+ self.context.link_context(self.agent_id, prompt, response)
76
+
77
+ self.telemetry.log("run_complete", "success", {"response": response})
78
+ print(f"[RUN] {response}")
79
+ return response
80
+
81
+ except Exception as e:
82
+ self.telemetry.log("run_failed", "error", {"error": str(e)})
83
+ print(f"[ERROR] {e}")
84
+ return f"Error: {e}"