CyberCoder225 commited on
Commit
41953db
·
verified ·
1 Parent(s): 07cbfcc

Update brain.py

Browse files
Files changed (1) hide show
  1. brain.py +89 -19
brain.py CHANGED
@@ -1,25 +1,95 @@
1
- from flask import Flask, request, jsonify
2
- from brain import MairaBrain
 
3
 
4
- app = Flask(__name__)
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- # THE UPGRADE CONSTANTS
7
- REPO_ID = "bartowski/Llama-3.2-1B-Instruct-GGUF"
8
- FILENAME = "Llama-3.2-1B-Instruct-Q4_K_M.gguf"
9
 
10
- maira = MairaBrain(REPO_ID, FILENAME)
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- @app.route("/", methods=["GET"])
13
- def home(): return "Maira v6.0 is Online."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- @app.route("/chat", methods=["POST"])
16
- def chat():
17
- data = request.json
18
- user_id = data.get("user_id", "default_user")
19
- user_input = data.get("message", "")
20
-
21
- response = maira.get_response(user_id, user_input)
22
- return jsonify({"response": response})
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- if __name__ == "__main__":
25
- app.run(host="0.0.0.0", port=7860)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llama_cpp import Llama
2
+ from huggingface_hub import hf_hub_download
3
+ import datetime, shelve, re
4
 
5
+ class MairaBrain:
6
+ def __init__(self, repo_id, filename):
7
+ print("🌌 Initializing Maira Neural Singularity v6.0 (Llama 3.2)")
8
+ # This downloads the smarter Meta model
9
+ model_path = hf_hub_download(repo_id=repo_id, filename=filename)
10
+
11
+ # Optimized for Free Tier (16GB RAM)
12
+ self.llm = Llama(
13
+ model_path=model_path,
14
+ n_ctx=4096, # Long memory
15
+ n_threads=8, # Max CPU power
16
+ n_batch=512,
17
+ verbose=False
18
+ )
19
 
20
+ self.db_path = "maira_universe.db"
 
 
21
 
22
+ def _process_metrics(self, user_data, user_input):
23
+ """Hidden reasoning: Analyzes Loyalty and extracts facts"""
24
+ # Update Loyalty (Relationship Score)
25
+ sentiment = 1 if re.search(r"(love|good|best|thanks|smart|dope|cool)", user_input, re.I) else 0
26
+ user_data["metrics"]["loyalty"] = min(100, user_data["metrics"].get("loyalty", 50) + (sentiment * 5))
27
+
28
+ # Fact Extraction (Long Term Memory)
29
+ # Patterns like: "My name is X", "I live in Y", "I love Z"
30
+ match = re.search(r"(my name is|i work as|i love|i am|favorite) ([\w\s]+)", user_input, re.I)
31
+ if match:
32
+ key = match.group(1).lower().strip()
33
+ val = match.group(2).strip()
34
+ user_data["facts"][key] = val
35
 
36
+ def get_response(self, user_id, user_input):
37
+ with shelve.open(self.db_path, writeback=True) as db:
38
+ # Initialize user if they are new
39
+ if user_id not in db:
40
+ db[user_id] = {"history": [], "facts": {}, "metrics": {"loyalty": 50}}
41
+
42
+ u = db[user_id]
43
+ self._process_metrics(u, user_input)
44
+
45
+ # Dynamic Persona based on Loyalty
46
+ loyalty = u["metrics"]["loyalty"]
47
+ if loyalty > 80:
48
+ vibe = "Extremely loyal, protective, and affectionate best friend."
49
+ elif loyalty < 30:
50
+ vibe = "Cold, sarcastic, and unimpressed high-status lady."
51
+ else:
52
+ vibe = "Smart, stylish, witty, and engaging lady."
53
 
54
+ known_facts = ", ".join([f"{k}: {v}" for k, v in u["facts"].items()])
55
+
56
+ # --- Llama 3.2 Optimized Chat Format ---
57
+ prompt = (
58
+ f"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n"
59
+ f"Identity: Maira. Persona: {vibe}. "
60
+ f"User Memory: {known_facts if known_facts else 'No prior data'}. "
61
+ "Behavior: Fucking Advanced. Intelligent, feminine, WhatsApp-style. "
62
+ "No formal AI talk. Be dope, keep it real, and stay in character.<|eot_id|>"
63
+ f"<|start_header_id|>thought<|end_header_id|>\n\n"
64
+ f"Loyalty score is {loyalty}. Response should be tailored to this vibe.<|eot_id|>"
65
+ )
66
+
67
+ # Add Chat History (Last 12 messages)
68
+ for msg in u["history"][-12:]:
69
+ role = "assistant" if msg.startswith("Maira:") else "user"
70
+ content = msg.replace("Maira: ", "").replace("User: ", "")
71
+ prompt += f"<|start_header_id|>{role}<|end_header_id|>\n\n{content}<|eot_id|>"
72
+
73
+ # Current User Input
74
+ prompt += f"<|start_header_id|>user<|end_header_id|>\n\n{user_input}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
75
 
76
+ # Generation
77
+ output = self.llm(
78
+ prompt,
79
+ max_tokens=300,
80
+ temperature=0.85, # High vibe/creativity
81
+ repeat_penalty=1.15,
82
+ stop=["<|eot_id|>", "User:"]
83
+ )
84
+
85
+ response = output["choices"][0]["text"].strip()
86
+
87
+ # Save History
88
+ u["history"].append(f"User: {user_input}")
89
+ u["history"].append(f"Maira: {response}")
90
+
91
+ # Keep DB small
92
+ if len(u["history"]) > 30: u["history"] = u["history"][-20:]
93
+
94
+ db[user_id] = u
95
+ return response