CyberCoder225 commited on
Commit
c6a6830
·
verified ·
1 Parent(s): b012132

Update brain.py

Browse files
Files changed (1) hide show
  1. brain.py +6 -71
brain.py CHANGED
@@ -4,92 +4,27 @@ import datetime, shelve, re
4
 
5
  class MairaBrain:
6
  def __init__(self, repo_id, filename):
7
- print("🌌 Initializing Maira Neural Singularity v6.0 (Llama 3.2)")
8
- # This downloads the smarter Meta model
9
  model_path = hf_hub_download(repo_id=repo_id, filename=filename)
10
-
11
- # Optimized for Free Tier (16GB RAM)
12
  self.llm = Llama(
13
  model_path=model_path,
14
- n_ctx=4096, # Long memory
15
- n_threads=8, # Max CPU power
16
- n_batch=512,
17
- verbose=False
18
  )
19
-
20
  self.db_path = "maira_universe.db"
21
 
22
- def _process_metrics(self, user_data, user_input):
23
- """Hidden reasoning: Analyzes Loyalty and extracts facts"""
24
- # Update Loyalty (Relationship Score)
25
- sentiment = 1 if re.search(r"(love|good|best|thanks|smart|dope|cool)", user_input, re.I) else 0
26
- user_data["metrics"]["loyalty"] = min(100, user_data["metrics"].get("loyalty", 50) + (sentiment * 5))
27
-
28
- # Fact Extraction (Long Term Memory)
29
- # Patterns like: "My name is X", "I live in Y", "I love Z"
30
- match = re.search(r"(my name is|i work as|i love|i am|favorite) ([\w\s]+)", user_input, re.I)
31
- if match:
32
- key = match.group(1).lower().strip()
33
- val = match.group(2).strip()
34
- user_data["facts"][key] = val
35
-
36
  def get_response(self, user_id, user_input):
37
  with shelve.open(self.db_path, writeback=True) as db:
38
- # Initialize user if they are new
39
  if user_id not in db:
40
  db[user_id] = {"history": [], "facts": {}, "metrics": {"loyalty": 50}}
41
 
42
  u = db[user_id]
43
- self._process_metrics(u, user_input)
44
-
45
- # Dynamic Persona based on Loyalty
46
- loyalty = u["metrics"]["loyalty"]
47
- if loyalty > 80:
48
- vibe = "Extremely loyal, protective, and affectionate best friend."
49
- elif loyalty < 30:
50
- vibe = "Cold, sarcastic, and unimpressed high-status lady."
51
- else:
52
- vibe = "Smart, stylish, witty, and engaging lady."
53
-
54
- known_facts = ", ".join([f"{k}: {v}" for k, v in u["facts"].items()])
55
-
56
- # --- Llama 3.2 Optimized Chat Format ---
57
- prompt = (
58
- f"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n"
59
- f"Identity: Maira. Persona: {vibe}. "
60
- f"User Memory: {known_facts if known_facts else 'No prior data'}. "
61
- "Behavior: Fucking Advanced. Intelligent, feminine, WhatsApp-style. "
62
- "No formal AI talk. Be dope, keep it real, and stay in character.<|eot_id|>"
63
- f"<|start_header_id|>thought<|end_header_id|>\n\n"
64
- f"Loyalty score is {loyalty}. Response should be tailored to this vibe.<|eot_id|>"
65
- )
66
 
67
- # Add Chat History (Last 12 messages)
68
- for msg in u["history"][-12:]:
69
- role = "assistant" if msg.startswith("Maira:") else "user"
70
- content = msg.replace("Maira: ", "").replace("User: ", "")
71
- prompt += f"<|start_header_id|>{role}<|end_header_id|>\n\n{content}<|eot_id|>"
72
-
73
- # Current User Input
74
- prompt += f"<|start_header_id|>user<|end_header_id|>\n\n{user_input}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
75
-
76
- # Generation
77
- output = self.llm(
78
- prompt,
79
- max_tokens=300,
80
- temperature=0.85, # High vibe/creativity
81
- repeat_penalty=1.15,
82
- stop=["<|eot_id|>", "User:"]
83
- )
84
-
85
  response = output["choices"][0]["text"].strip()
86
 
87
- # Save History
88
  u["history"].append(f"User: {user_input}")
89
  u["history"].append(f"Maira: {response}")
90
-
91
- # Keep DB small
92
- if len(u["history"]) > 30: u["history"] = u["history"][-20:]
93
-
94
- db[user_id] = u
95
  return response
 
4
 
5
  class MairaBrain:
6
  def __init__(self, repo_id, filename):
7
+ # Alpine needs very specific loading
 
8
  model_path = hf_hub_download(repo_id=repo_id, filename=filename)
 
 
9
  self.llm = Llama(
10
  model_path=model_path,
11
+ n_ctx=2048, # 2048 is safer for first boot on Alpine
12
+ n_threads=8
 
 
13
  )
 
14
  self.db_path = "maira_universe.db"
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  def get_response(self, user_id, user_input):
17
  with shelve.open(self.db_path, writeback=True) as db:
 
18
  if user_id not in db:
19
  db[user_id] = {"history": [], "facts": {}, "metrics": {"loyalty": 50}}
20
 
21
  u = db[user_id]
22
+ # Simple identity logic
23
+ prompt = f"<|im_start|>system\nYou are Maira, a dope high-status lady.<|im_end|>\n<|im_start|>user\n{user_input}<|im_end|>\n<|im_start|>assistant\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
+ output = self.llm(prompt, max_tokens=200, stop=["<|im_end|>"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  response = output["choices"][0]["text"].strip()
27
 
 
28
  u["history"].append(f"User: {user_input}")
29
  u["history"].append(f"Maira: {response}")
 
 
 
 
 
30
  return response