Tpayne101 commited on
Commit
940bc33
·
verified ·
1 Parent(s): f99c50d

Update agent_core.py

Browse files
Files changed (1) hide show
  1. agent_core.py +22 -59
agent_core.py CHANGED
@@ -1,63 +1,26 @@
1
- # agent_core.py
2
- import re
3
- from typing import List, Dict, Any
4
- from semantic_memory import SemanticMemory
5
-
6
- REMEMBER_HINTS = (
7
- r"\bremember\b", r"\bnote\b", r"\bsave this\b", r"\bstore this\b",
8
- r"\bmy name is\b", r"\bmy favorite\b", r"\bI (am|work|run|own)\b",
9
- )
10
-
11
- QUESTION_HINT = r"\?|\bwhat\b|\bwhich\b|\bhow\b|\bwho\b|\bwhen\b"
12
 
13
  class AgentCore:
14
- """
15
- Routes messages:
16
- - If declarative/about-self => store (with boosted weight).
17
- - If a question => retrieve + answer with concise summary.
18
- """
19
  def __init__(self):
20
- self.memory = SemanticMemory()
21
-
22
- def _looks_like_fact(self, text: str) -> bool:
23
- t = text.strip().lower()
24
- return any(re.search(p, t) for p in REMEMBER_HINTS) or t.startswith(("i ", "my "))
25
-
26
- def _is_question(self, text: str) -> bool:
27
- return bool(re.search(QUESTION_HINT, text.strip().lower()))
28
-
29
- def handle(self, message: str) -> str:
30
- if not message or not message.strip():
31
- return "Say something and I’ll learn from it."
32
-
33
- # Split on newlines to capture multiple facts in one paste
34
- parts = [p.strip() for p in message.split("\n") if p.strip()]
35
-
36
- # If looks factual, store each line
37
- stored = 0
38
- for p in parts:
39
- if self._looks_like_fact(p) and not self._is_question(p):
40
- # small boost for strongly self-referential items
41
- boost = 0.15 if any(x in p.lower() for x in ("i ", "my ", "motion", "brand", "company")) else 0.0
42
- self.memory.add(p, weight=min(1.0, self.memory._base_weight_from_text(p) + boost))
43
- stored += 1
44
-
45
- # If question, retrieve relevant context
46
- if self._is_question(message):
47
- summary = self.memory.summarize_context(message, top_k=6)
48
- # Gentle answer style (short + grounded)
49
- if summary.startswith("No memory yet"):
50
- return "I don’t have context for that yet. Tell me a fact and I’ll remember it."
51
- # concise “answer” feel: show 3 strongest lines only
52
- lines = summary.splitlines()
53
- keep = [l for l in lines if l.startswith("- ")][:3]
54
- friendly = "\n".join(keep) if keep else summary
55
- return f"Here’s what I recall that’s most relevant:\n{friendly}"
56
-
57
- # Otherwise acknowledge storage or neutral chat
58
- if stored > 0:
59
- return "Got it — I saved that to your profile and will use it next time."
60
  else:
61
- # Neutral chat that still learns a little
62
- self.memory.add(message, weight=self.memory._base_weight_from_text(message))
63
- return "Noted. If you want me to remember something long-term, say it directly (e.g., “My brand is MotionBoys”)."
 
1
+ from sentence_transformers import SentenceTransformer
 
 
 
 
 
 
 
 
 
 
2
 
3
  class AgentCore:
 
 
 
 
 
4
  def __init__(self):
5
+ # Using a light, fast model for embeddings
6
+ self.model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
7
+
8
+ def embed_text(self, text):
9
+ """
10
+ Convert input text into a numerical vector embedding.
11
+ """
12
+ return self.model.encode(text)
13
+
14
+ def respond(self, text):
15
+ """
16
+ Simple placeholder reasoning for responses.
17
+ Later you can replace with your reasoning engine or GPT API.
18
+ """
19
+ if "who" in text.lower():
20
+ return "I’m Aventra, your contextual reasoning agent."
21
+ elif "motionboys" in text.lower():
22
+ return "MotionBoys is your core empire — fashion meets innovation."
23
+ elif "dream" in text.lower():
24
+ return "Dreams are memory anchors — I’m storing this in long-term context."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  else:
26
+ return f"I understand: {text}"