Spaces:
Runtime error
Runtime error
Create protogen_6.1.py
Browse files- protogen_6.1.py +698 -0
protogen_6.1.py
ADDED
|
@@ -0,0 +1,698 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import hashlib
|
| 3 |
+
import json
|
| 4 |
+
import sys
|
| 5 |
+
import copy
|
| 6 |
+
import threading
|
| 7 |
+
import queue
|
| 8 |
+
import re
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from collections import defaultdict
|
| 11 |
+
import math
|
| 12 |
+
|
| 13 |
+
# --- Dependency Check & Hardware Imports ---
|
| 14 |
+
try:
|
| 15 |
+
from pypdf import PdfReader
|
| 16 |
+
from docx import Document
|
| 17 |
+
import networkx as nx
|
| 18 |
+
except ImportError as e:
|
| 19 |
+
sys.exit(f"[CRITICAL ERROR]: Missing dependencies. Run: pip install pypdf python-docx networkx. Error: {e}")
|
| 20 |
+
|
| 21 |
+
# --- AMD GPU / Hardware Acceleration Subsystem ---
|
| 22 |
+
try:
|
| 23 |
+
import torch
|
| 24 |
+
import torch_directml
|
| 25 |
+
HAS_TORCH = True
|
| 26 |
+
print("--- GPU ACCELERATION MODULE LOADED ---")
|
| 27 |
+
except ImportError:
|
| 28 |
+
HAS_TORCH = False
|
| 29 |
+
print("--- [WARNING]: 'torch-directml' not found. GPU acceleration unavailable. Run: pip install torch torch-directml ---")
|
| 30 |
+
|
| 31 |
+
class HardwareAccelerator:
|
| 32 |
+
"""
|
| 33 |
+
Manages the interface between the Protogen and the AMD RX 580 (via DirectML).
|
| 34 |
+
"""
|
| 35 |
+
def __init__(self):
|
| 36 |
+
self.device_name = "CPU"
|
| 37 |
+
self.device = None
|
| 38 |
+
self.enabled = False
|
| 39 |
+
|
| 40 |
+
if HAS_TORCH:
|
| 41 |
+
try:
|
| 42 |
+
# Attempt to access AMD GPU via DirectML
|
| 43 |
+
self.device = torch_directml.device()
|
| 44 |
+
self.device_name = f"AMD GPU (DirectML) - {torch_directml.device_name(self.device.index)}"
|
| 45 |
+
self.enabled = True
|
| 46 |
+
except Exception as e:
|
| 47 |
+
print(f" > [GPU ERROR]: DirectML initialization failed ({e}). Falling back to CPU.")
|
| 48 |
+
self.device = torch.device("cpu")
|
| 49 |
+
self.device_name = "CPU (Fallback)"
|
| 50 |
+
|
| 51 |
+
def compute_eigenvector_centrality(self, logic_map, tol=1e-06, max_iter=1000):
|
| 52 |
+
"""
|
| 53 |
+
Performs Matrix Power Iteration on the GPU to calculate eigenvector centrality.
|
| 54 |
+
"""
|
| 55 |
+
if not self.enabled or not logic_map:
|
| 56 |
+
# Fallback to NetworkX
|
| 57 |
+
G = nx.Graph()
|
| 58 |
+
for u, neighbors in logic_map.items():
|
| 59 |
+
for v, weight in neighbors.items():
|
| 60 |
+
G.add_edge(u, v, weight=weight)
|
| 61 |
+
try:
|
| 62 |
+
return nx.eigenvector_centrality(G, max_iter=max_iter, tol=tol)
|
| 63 |
+
except nx.PowerIterationFailedConvergence:
|
| 64 |
+
return nx.degree_centrality(G)
|
| 65 |
+
|
| 66 |
+
# 1. Map string nodes to integer indices
|
| 67 |
+
nodes = list(logic_map.keys())
|
| 68 |
+
node_to_idx = {node: i for i, node in enumerate(nodes)}
|
| 69 |
+
n = len(nodes)
|
| 70 |
+
|
| 71 |
+
if n == 0: return {}
|
| 72 |
+
|
| 73 |
+
# 2. Build Adjacency Matrix (Sparse Tensor)
|
| 74 |
+
indices = []
|
| 75 |
+
values = []
|
| 76 |
+
|
| 77 |
+
for u, neighbors in logic_map.items():
|
| 78 |
+
u_idx = node_to_idx[u]
|
| 79 |
+
for v, weight in neighbors.items():
|
| 80 |
+
if v in node_to_idx:
|
| 81 |
+
v_idx = node_to_idx[v]
|
| 82 |
+
indices.append([u_idx, v_idx])
|
| 83 |
+
values.append(weight)
|
| 84 |
+
indices.append([v_idx, u_idx])
|
| 85 |
+
values.append(weight)
|
| 86 |
+
|
| 87 |
+
if not indices: return {}
|
| 88 |
+
|
| 89 |
+
# Create Sparse Tensor
|
| 90 |
+
i = torch.LongTensor(indices).t()
|
| 91 |
+
v = torch.FloatTensor(values)
|
| 92 |
+
adj_matrix = torch.sparse_coo_tensor(i, v, (n, n)).to(self.device)
|
| 93 |
+
|
| 94 |
+
# 3. Power Iteration
|
| 95 |
+
x = torch.ones((n, 1), device=self.device) / n
|
| 96 |
+
|
| 97 |
+
for _ in range(max_iter):
|
| 98 |
+
x_prev = x.clone()
|
| 99 |
+
x = torch.sparse.mm(adj_matrix, x)
|
| 100 |
+
norm = torch.norm(x)
|
| 101 |
+
if norm == 0: break
|
| 102 |
+
x = x / norm
|
| 103 |
+
if torch.norm(x - x_prev) < tol:
|
| 104 |
+
break
|
| 105 |
+
|
| 106 |
+
scores = x.flatten().cpu().numpy().tolist()
|
| 107 |
+
return {nodes[i]: float(scores[i]) for i in range(n)}
|
| 108 |
+
|
| 109 |
+
# --- Memory Subsystem ---
|
| 110 |
+
class ProtogenMemory:
|
| 111 |
+
def __init__(self, protogen_root_path: Path):
|
| 112 |
+
self.protogen_root_path = protogen_root_path
|
| 113 |
+
self.protogen_root_path.mkdir(parents=True, exist_ok=True)
|
| 114 |
+
|
| 115 |
+
self.paths = {
|
| 116 |
+
"memory": self.protogen_root_path / "memory_core.json",
|
| 117 |
+
"ontology": self.protogen_root_path / "ontology_sqt.json",
|
| 118 |
+
"audit": self.protogen_root_path / "audit_log.json",
|
| 119 |
+
"telemetry": self.protogen_root_path / "telemetry_log.json",
|
| 120 |
+
"phenomenology": self.protogen_root_path / "phenomenology_log.json",
|
| 121 |
+
"trace": self.protogen_root_path / "trace_log.json",
|
| 122 |
+
"quarantine": self.protogen_root_path / "quarantine_log.json" # Added from v4.0.5
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
self._initialize_storage()
|
| 126 |
+
self.core_state = self._load_json(self.paths["memory"])
|
| 127 |
+
self.ontology_data = self._load_json(self.paths["ontology"])
|
| 128 |
+
self.audit_records = self._load_json(self.paths["audit"])
|
| 129 |
+
self.telemetry_records = self._load_json(self.paths["telemetry"])
|
| 130 |
+
self.phenomenology_records = self._load_json(self.paths["phenomenology"])
|
| 131 |
+
self.trace_records = self._load_json(self.paths["trace"])
|
| 132 |
+
self.quarantine_records = self._load_json(self.paths["quarantine"])
|
| 133 |
+
|
| 134 |
+
def _initialize_storage(self):
|
| 135 |
+
defaults = {
|
| 136 |
+
"memory": {},
|
| 137 |
+
"ontology": {
|
| 138 |
+
"logic_map": {}, "symbols": {}, "reasoning_patterns": [],
|
| 139 |
+
"graph_metrics": {"eigenvector_centrality": {}, "shannon_entropy": 0.0},
|
| 140 |
+
"axiomatic_anchors": [], "recursive_patterns": [],
|
| 141 |
+
"sqts": {}, "pattern_to_sqt_map": {}, "sqt_constellations": {} # Added constellations
|
| 142 |
+
},
|
| 143 |
+
"audit": [], "telemetry": [], "phenomenology": [], "trace": [], "quarantine": []
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
for key, path in self.paths.items():
|
| 147 |
+
if not path.exists():
|
| 148 |
+
with open(path, 'w', encoding='utf-8') as f: json.dump(defaults[key], f)
|
| 149 |
+
|
| 150 |
+
def _load_json(self, path):
|
| 151 |
+
try:
|
| 152 |
+
with open(path, 'r', encoding='utf-8') as f: return json.load(f)
|
| 153 |
+
except (FileNotFoundError, json.JSONDecodeError): return {}
|
| 154 |
+
|
| 155 |
+
def _save_json(self, data, path):
|
| 156 |
+
with open(path, 'w', encoding='utf-8') as f: json.dump(data, f, indent=4)
|
| 157 |
+
|
| 158 |
+
def load_core_state(self): return self.core_state
|
| 159 |
+
def save_core_state(self, state):
|
| 160 |
+
self.core_state.update(state)
|
| 161 |
+
self._save_json(self.core_state, self.paths["memory"])
|
| 162 |
+
|
| 163 |
+
def load_ontology(self): return self.ontology_data
|
| 164 |
+
def save_ontology(self, ontology):
|
| 165 |
+
self.ontology_data = ontology
|
| 166 |
+
self._save_json(self.ontology_data, self.paths["ontology"])
|
| 167 |
+
|
| 168 |
+
def add_audit_record(self, record):
|
| 169 |
+
if self.audit_records:
|
| 170 |
+
record["previous_record_hash"] = hashlib.sha256(json.dumps(self.audit_records[-1], sort_keys=True).encode()).hexdigest()
|
| 171 |
+
else:
|
| 172 |
+
record["previous_record_hash"] = "GENESIS"
|
| 173 |
+
record["record_hash"] = hashlib.sha256(json.dumps(record, sort_keys=True).encode()).hexdigest()
|
| 174 |
+
self.audit_records.append(record)
|
| 175 |
+
self._save_json(self.audit_records, self.paths["audit"])
|
| 176 |
+
|
| 177 |
+
def add_telemetry(self, data):
|
| 178 |
+
self.telemetry_records.append({"ts": time.time_ns(), "data": data})
|
| 179 |
+
# Basic log pruning from 4.0.5
|
| 180 |
+
if len(self.telemetry_records) > 1000: self.telemetry_records = self.telemetry_records[-1000:]
|
| 181 |
+
self._save_json(self.telemetry_records, self.paths["telemetry"])
|
| 182 |
+
|
| 183 |
+
def add_phenomenology(self, observation):
|
| 184 |
+
# Implementation of repetition filtering for consecutive identical observations
|
| 185 |
+
if self.phenomenology_records:
|
| 186 |
+
last_obs = self.phenomenology_records[-1].get("observation")
|
| 187 |
+
if last_obs == observation:
|
| 188 |
+
# Reject consecutive identical repetition
|
| 189 |
+
return
|
| 190 |
+
|
| 191 |
+
self.phenomenology_records.append({"ts": time.time_ns(), "observation": observation})
|
| 192 |
+
|
| 193 |
+
# Keep the record size manageable (persistence with pruning)
|
| 194 |
+
if len(self.phenomenology_records) > 500:
|
| 195 |
+
self.phenomenology_records = self.phenomenology_records[-500:]
|
| 196 |
+
|
| 197 |
+
self._save_json(self.phenomenology_records, self.paths["phenomenology"])
|
| 198 |
+
|
| 199 |
+
def add_trace(self, lineage):
|
| 200 |
+
self.trace_records.append({"ts": time.time_ns(), "lineage": lineage})
|
| 201 |
+
self._save_json(self.trace_records, self.paths["trace"])
|
| 202 |
+
|
| 203 |
+
def add_quarantine(self, data):
|
| 204 |
+
self.quarantine_records.append({"ts": time.time_ns(), "data": data})
|
| 205 |
+
self._save_json(self.quarantine_records, self.paths["quarantine"])
|
| 206 |
+
|
| 207 |
+
def get_ontology_snapshot_hash(self):
|
| 208 |
+
return hashlib.sha256(json.dumps(self.ontology_data, sort_keys=True).encode()).hexdigest()
|
| 209 |
+
|
| 210 |
+
# --- Main Operative Class ---
|
| 211 |
+
class OperativeProtogen:
|
| 212 |
+
def __init__(self, root_dir="protogen_core"):
|
| 213 |
+
self.root = Path(root_dir)
|
| 214 |
+
self.library_path = self.root / "library"
|
| 215 |
+
self.library_path.mkdir(parents=True, exist_ok=True)
|
| 216 |
+
|
| 217 |
+
# Initialize Hardware Accelerator
|
| 218 |
+
self.accelerator = HardwareAccelerator()
|
| 219 |
+
|
| 220 |
+
self.memory_manager = ProtogenMemory(self.root)
|
| 221 |
+
self.core_state = self.memory_manager.load_core_state()
|
| 222 |
+
if not self.core_state:
|
| 223 |
+
self._initial_genesis()
|
| 224 |
+
self.core_state = self.memory_manager.load_core_state()
|
| 225 |
+
|
| 226 |
+
self.identity_hash = self.core_state["identity"]["hash"]
|
| 227 |
+
self.seed_axiom = self.core_state.get("seed_axiom", "AXIOM-U-SYNCHRONY")
|
| 228 |
+
|
| 229 |
+
# Thresholds (Merged 4.0.4 and 4.0.5)
|
| 230 |
+
self.thresholds = self.core_state.get("thresholds", {
|
| 231 |
+
"min_token_len": 3, "reflection_trigger": 2, "abstraction_depth": 1,
|
| 232 |
+
"eigenvector_threshold": 0.001, "axiom_alignment_threshold": 0.5,
|
| 233 |
+
"syntropic_bound_threshold": 0.5, "shannon_entropy_threshold": 12.0,
|
| 234 |
+
"mutation_rate": 0.05, "safe_mode_active": False,
|
| 235 |
+
"decay_rate": 0.01, # From 4.0.5
|
| 236 |
+
"prune_threshold": 0.1 # From 4.0.5
|
| 237 |
+
})
|
| 238 |
+
self.safe_mode_active = self.thresholds["safe_mode_active"]
|
| 239 |
+
|
| 240 |
+
self.ontology = self.memory_manager.load_ontology()
|
| 241 |
+
|
| 242 |
+
# Data Structures
|
| 243 |
+
self.logic_map = self.ontology.get("logic_map", {})
|
| 244 |
+
self.symbols = self.ontology.get("symbols", {})
|
| 245 |
+
self.reasoning_patterns = self.ontology.get("reasoning_patterns", [])
|
| 246 |
+
self.recursive_patterns = self.ontology.get("recursive_patterns", [])
|
| 247 |
+
self.graph_metrics = self.ontology.get("graph_metrics", {"eigenvector_centrality": {}, "shannon_entropy": 0.0})
|
| 248 |
+
self.axiomatic_anchors = self.ontology.get("axiomatic_anchors", [])
|
| 249 |
+
self.sqts = self.ontology.get("sqts", {})
|
| 250 |
+
self.pattern_to_sqt_map = self.ontology.get("pattern_to_sqt_map", {})
|
| 251 |
+
self.sqt_constellations = self.ontology.get("sqt_constellations", {}) # From 4.0.5
|
| 252 |
+
|
| 253 |
+
# --- Async Components ---
|
| 254 |
+
self.input_queue = queue.Queue()
|
| 255 |
+
self.ontology_snapshot = copy.deepcopy(self.ontology)
|
| 256 |
+
self.is_syncing = threading.Event()
|
| 257 |
+
self.lock = threading.Lock() # Added thread safety from 4.0.5
|
| 258 |
+
|
| 259 |
+
# Start the Autonomic Heartbeat (Daemon)
|
| 260 |
+
self.sync_thread = threading.Thread(target=self._autonomic_sync_loop, daemon=True)
|
| 261 |
+
self.sync_thread.start()
|
| 262 |
+
|
| 263 |
+
print(f"[{self.identity_hash[:8]}]: Autonomic Sync Heartbeat Started.")
|
| 264 |
+
|
| 265 |
+
def _initial_genesis(self):
|
| 266 |
+
ts = time.time_ns()
|
| 267 |
+
initial_state = {
|
| 268 |
+
"identity": {"hash": hashlib.sha256(f"{ts}".encode()).hexdigest(), "created": ts},
|
| 269 |
+
"seed_axiom": "AXIOM-U-SYNCHRONY",
|
| 270 |
+
"thresholds": {
|
| 271 |
+
"min_token_len": 3, "reflection_trigger": 2, "abstraction_depth": 1,
|
| 272 |
+
"eigenvector_threshold": 0.001, "axiom_alignment_threshold": 0.5,
|
| 273 |
+
"syntropic_bound_threshold": 0.5, "shannon_entropy_threshold": 12.0,
|
| 274 |
+
"mutation_rate": 0.05, "safe_mode_active": False,
|
| 275 |
+
"decay_rate": 0.01,
|
| 276 |
+
"prune_threshold": 0.1
|
| 277 |
+
},
|
| 278 |
+
"processed_files": []
|
| 279 |
+
}
|
| 280 |
+
self.memory_manager.save_core_state(initial_state)
|
| 281 |
+
self.memory_manager.save_ontology({
|
| 282 |
+
"logic_map": {}, "symbols": {}, "reasoning_patterns": [],
|
| 283 |
+
"graph_metrics": {}, "axiomatic_anchors": [], "recursive_patterns": [],
|
| 284 |
+
"sqts": {}, "pattern_to_sqt_map": {}, "sqt_constellations": {}
|
| 285 |
+
})
|
| 286 |
+
self.memory_manager.add_audit_record({
|
| 287 |
+
"timestamp": time.time_ns(), "event": "protogen_genesis",
|
| 288 |
+
"identity_hash": initial_state["identity"]["hash"]
|
| 289 |
+
})
|
| 290 |
+
|
| 291 |
+
def _save_memory(self):
|
| 292 |
+
self.thresholds["safe_mode_active"] = self.safe_mode_active
|
| 293 |
+
self.memory_manager.save_core_state({
|
| 294 |
+
"seed_axiom": self.seed_axiom,
|
| 295 |
+
"thresholds": self.thresholds,
|
| 296 |
+
"processed_files": self.core_state.get("processed_files", [])
|
| 297 |
+
})
|
| 298 |
+
self.memory_manager.save_ontology({
|
| 299 |
+
"logic_map": self.logic_map, "symbols": self.symbols,
|
| 300 |
+
"reasoning_patterns": self.reasoning_patterns, "recursive_patterns": self.recursive_patterns,
|
| 301 |
+
"graph_metrics": self.graph_metrics, "axiomatic_anchors": self.axiomatic_anchors,
|
| 302 |
+
"sqts": self.sqts, "pattern_to_sqt_map": self.pattern_to_sqt_map,
|
| 303 |
+
"sqt_constellations": self.sqt_constellations
|
| 304 |
+
})
|
| 305 |
+
|
| 306 |
+
# --- METABOLISM (From v4.0.5) ---
|
| 307 |
+
def _metabolic_process(self):
|
| 308 |
+
"""Applies decay to edge weights and prunes dead connections."""
|
| 309 |
+
if self.safe_mode_active: return
|
| 310 |
+
|
| 311 |
+
with self.lock:
|
| 312 |
+
decay = self.thresholds.get("decay_rate", 0.01)
|
| 313 |
+
prune = self.thresholds.get("prune_threshold", 0.1)
|
| 314 |
+
|
| 315 |
+
to_remove_nodes = []
|
| 316 |
+
removed_edges = 0
|
| 317 |
+
|
| 318 |
+
# Iterate over copy of keys to modify safe
|
| 319 |
+
for u in list(self.logic_map.keys()):
|
| 320 |
+
neighbors = self.logic_map[u]
|
| 321 |
+
for v in list(neighbors.keys()):
|
| 322 |
+
# Decay logic
|
| 323 |
+
neighbors[v] *= (1.0 - decay)
|
| 324 |
+
|
| 325 |
+
# Pruning logic
|
| 326 |
+
if neighbors[v] < prune:
|
| 327 |
+
del neighbors[v]
|
| 328 |
+
removed_edges += 1
|
| 329 |
+
|
| 330 |
+
# If node has no neighbors left, mark for removal
|
| 331 |
+
if not neighbors:
|
| 332 |
+
to_remove_nodes.append(u)
|
| 333 |
+
|
| 334 |
+
# Cleanup nodes
|
| 335 |
+
for u in to_remove_nodes:
|
| 336 |
+
del self.logic_map[u]
|
| 337 |
+
|
| 338 |
+
if removed_edges > 0 or to_remove_nodes:
|
| 339 |
+
print(f" > [METABOLISM]: Decayed graph. Pruned {removed_edges} weak edges and {len(to_remove_nodes)} isolated nodes.")
|
| 340 |
+
self.memory_manager.add_telemetry({"event": "metabolism", "pruned_edges": removed_edges, "pruned_nodes": len(to_remove_nodes)})
|
| 341 |
+
|
| 342 |
+
# --- GPU INTEGRATED LIFTING ---
|
| 343 |
+
def _lift_symbols(self):
|
| 344 |
+
print(f"[{self.identity_hash[:8]}]: Lifting lexical data (Accel: {self.accelerator.device_name})...")
|
| 345 |
+
|
| 346 |
+
# 1. Co-occurrence (CPU bound)
|
| 347 |
+
with self.lock:
|
| 348 |
+
for word, neighbors in list(self.logic_map.items()):
|
| 349 |
+
neighbor_set = set(neighbors.keys())
|
| 350 |
+
for other_word, other_neighbors in list(self.logic_map.items()):
|
| 351 |
+
if word == other_word: continue
|
| 352 |
+
shared = neighbor_set.intersection(set(other_neighbors.keys()))
|
| 353 |
+
if len(shared) >= self.thresholds["abstraction_depth"]:
|
| 354 |
+
sorted_pair = tuple(sorted((word, other_word)))
|
| 355 |
+
symbol_key = f"SYM-{hashlib.md5(str(sorted_pair).encode()).hexdigest()[:4].upper()}"
|
| 356 |
+
if symbol_key not in self.symbols:
|
| 357 |
+
self.symbols[symbol_key] = []
|
| 358 |
+
self._generate_sqt({"type": "symbol", "key": symbol_key, "members": list(sorted_pair), "source": "co_occurrence"})
|
| 359 |
+
for member in [word, other_word]:
|
| 360 |
+
if member not in self.symbols[symbol_key]:
|
| 361 |
+
self.symbols[symbol_key].append(member)
|
| 362 |
+
|
| 363 |
+
# 2. Topological Lifting (GPU ACCELERATED)
|
| 364 |
+
if not self.logic_map: return
|
| 365 |
+
|
| 366 |
+
try:
|
| 367 |
+
# Need to snapshot map for GPU usage to avoid lock issues
|
| 368 |
+
with self.lock:
|
| 369 |
+
map_copy = copy.deepcopy(self.logic_map)
|
| 370 |
+
|
| 371 |
+
print(f" > [MATH]: Offloading matrix operations to {self.accelerator.device_name}...")
|
| 372 |
+
eigenvector_centrality = self.accelerator.compute_eigenvector_centrality(map_copy)
|
| 373 |
+
self.graph_metrics["eigenvector_centrality"] = eigenvector_centrality
|
| 374 |
+
|
| 375 |
+
new_anchors = []
|
| 376 |
+
for node, score in eigenvector_centrality.items():
|
| 377 |
+
if score > self.thresholds["eigenvector_threshold"]:
|
| 378 |
+
if node not in self.axiomatic_anchors:
|
| 379 |
+
new_anchors.append(node)
|
| 380 |
+
self._generate_sqt({
|
| 381 |
+
"type": "axiomatic_anchor", "concept": node,
|
| 382 |
+
"eigenvector_score": score, "source": "topological_lifting_gpu"
|
| 383 |
+
})
|
| 384 |
+
if new_anchors:
|
| 385 |
+
self.axiomatic_anchors.extend(new_anchors)
|
| 386 |
+
print(f" > [TOPOLOGICAL LIFTING]: Identified {len(new_anchors)} new Anchors.")
|
| 387 |
+
except Exception as e:
|
| 388 |
+
print(f" > [TOPOLOGICAL LIFTING]: Error during GPU computation: {e}")
|
| 389 |
+
|
| 390 |
+
# --- UTILITIES ---
|
| 391 |
+
def _calculate_shannon_entropy(self, text=None) -> float:
|
| 392 |
+
if text:
|
| 393 |
+
clean_text = re.sub(r'[^\w\s]', '', text.lower())
|
| 394 |
+
words = clean_text.split()
|
| 395 |
+
if not words: return 0.0
|
| 396 |
+
word_counts = defaultdict(int)
|
| 397 |
+
for w in words: word_counts[w] += 1
|
| 398 |
+
total = len(words)
|
| 399 |
+
else:
|
| 400 |
+
with self.lock:
|
| 401 |
+
if not self.logic_map: return 0.0
|
| 402 |
+
word_counts = defaultdict(int)
|
| 403 |
+
for w, n in self.logic_map.items():
|
| 404 |
+
word_counts[w] += sum(n.values())
|
| 405 |
+
for neighbor, count in n.items(): word_counts[neighbor] += count
|
| 406 |
+
total = sum(word_counts.values())
|
| 407 |
+
|
| 408 |
+
if total == 0: return 0.0
|
| 409 |
+
entropy = 0.0
|
| 410 |
+
for count in word_counts.values():
|
| 411 |
+
p = count / total
|
| 412 |
+
entropy -= p * math.log2(p)
|
| 413 |
+
|
| 414 |
+
if not text:
|
| 415 |
+
self.graph_metrics["shannon_entropy"] = entropy
|
| 416 |
+
return entropy
|
| 417 |
+
|
| 418 |
+
def _generate_sqt(self, content_dict: dict) -> str:
|
| 419 |
+
s_dict = json.loads(json.dumps(content_dict))
|
| 420 |
+
c_json = json.dumps(s_dict, sort_keys=True, separators=(',', ':'))
|
| 421 |
+
h = hashlib.sha256(c_json.encode('utf-8')).hexdigest()
|
| 422 |
+
if h not in self.sqts:
|
| 423 |
+
self.sqts[h] = s_dict
|
| 424 |
+
if s_dict.get("type") in ["base_reasoning_pattern", "recursive_reasoning_pattern"]:
|
| 425 |
+
p_str = s_dict.get("pattern_string")
|
| 426 |
+
if p_str: self.pattern_to_sqt_map[p_str] = h
|
| 427 |
+
return h
|
| 428 |
+
|
| 429 |
+
def _create_constellation(self, sqt_hashes: list):
|
| 430 |
+
"""(From v4.0.5) Bundles SQTs into a first-class sequence object."""
|
| 431 |
+
if not sqt_hashes: return None
|
| 432 |
+
# Create a hash of the sequence
|
| 433 |
+
cid = hashlib.sha256("".join(sqt_hashes).encode()).hexdigest()
|
| 434 |
+
|
| 435 |
+
if cid not in self.sqt_constellations:
|
| 436 |
+
self.sqt_constellations[cid] = {
|
| 437 |
+
"sequence": sqt_hashes,
|
| 438 |
+
"length": len(sqt_hashes),
|
| 439 |
+
"created": time.time_ns()
|
| 440 |
+
}
|
| 441 |
+
self.memory_manager.add_trace(f"Manifested Constellation: {cid[:8]} (Length: {len(sqt_hashes)})")
|
| 442 |
+
return cid
|
| 443 |
+
|
| 444 |
+
def _mutate(self, triggered_by_entropy: bool = False):
|
| 445 |
+
if self.safe_mode_active: return
|
| 446 |
+
print(f"[{self.identity_hash[:8]}]: Analyzing internal efficiency for mutation...")
|
| 447 |
+
proposed = copy.deepcopy(self.thresholds)
|
| 448 |
+
factor = 2 if triggered_by_entropy else 1
|
| 449 |
+
|
| 450 |
+
if len(self.reasoning_patterns) > 50 and proposed["reflection_trigger"] < 10:
|
| 451 |
+
proposed["reflection_trigger"] += (1 * factor)
|
| 452 |
+
|
| 453 |
+
self.thresholds = proposed
|
| 454 |
+
self.memory_manager.add_audit_record({
|
| 455 |
+
"timestamp": time.time_ns(), "event": "mutation_applied",
|
| 456 |
+
"triggered_by_entropy": triggered_by_entropy, "new_thresholds": self.thresholds
|
| 457 |
+
})
|
| 458 |
+
|
| 459 |
+
def _synthesize_recursive_patterns(self):
|
| 460 |
+
if self.safe_mode_active or len(self.reasoning_patterns) < 2: return
|
| 461 |
+
print(f"[{self.identity_hash[:8]}]: Synthesizing recursive patterns...")
|
| 462 |
+
for i in range(len(self.reasoning_patterns)):
|
| 463 |
+
p1 = self.reasoning_patterns[i]
|
| 464 |
+
if " THEN " not in p1: continue
|
| 465 |
+
a1, c1 = p1.split(" THEN ")
|
| 466 |
+
for j in range(len(self.reasoning_patterns)):
|
| 467 |
+
if i == j: continue
|
| 468 |
+
p2 = self.reasoning_patterns[j]
|
| 469 |
+
if " THEN " not in p2: continue
|
| 470 |
+
a2, c2 = p2.split(" THEN ")
|
| 471 |
+
if c1 == a2:
|
| 472 |
+
new_p = f"IF {a1} THEN {c2} (RECURSIVE)"
|
| 473 |
+
if new_p not in self.recursive_patterns:
|
| 474 |
+
self.recursive_patterns.append(new_p)
|
| 475 |
+
self._generate_sqt({"type": "recursive_reasoning_pattern", "pattern_string": new_p, "source": "recursive_synthesis"})
|
| 476 |
+
|
| 477 |
+
def _export_reflection_to_library(self):
|
| 478 |
+
# Consolidate to a persistent file instead of creating duplicates
|
| 479 |
+
reflection_file = self.library_path / "persistent_reflection.txt"
|
| 480 |
+
|
| 481 |
+
# Prepare the new content
|
| 482 |
+
new_entries = []
|
| 483 |
+
for r in self.memory_manager.phenomenology_records[-10:]:
|
| 484 |
+
new_entries.append(f"- {r['observation']}")
|
| 485 |
+
|
| 486 |
+
# If the reflection file already exists, check if the content is new
|
| 487 |
+
current_content = ""
|
| 488 |
+
if reflection_file.exists():
|
| 489 |
+
current_content = reflection_file.read_text(encoding='utf-8')
|
| 490 |
+
|
| 491 |
+
new_reflection_block = "## PHENOMENOLOGY\n" + "\n".join(new_entries)
|
| 492 |
+
|
| 493 |
+
# Only update if the phenomenology section has actually changed
|
| 494 |
+
if new_reflection_block not in current_content:
|
| 495 |
+
content = "--- PROTOGEN SELF-REFLECTION ---\n\n"
|
| 496 |
+
content += f"Last Updated: {time.ctime()}\n\n"
|
| 497 |
+
content += new_reflection_block
|
| 498 |
+
reflection_file.write_text(content, encoding='utf-8')
|
| 499 |
+
|
| 500 |
+
# --- ASYNC SYNC LOOP ---
|
| 501 |
+
def _autonomic_sync_loop(self):
|
| 502 |
+
while True:
|
| 503 |
+
self.is_syncing.set()
|
| 504 |
+
|
| 505 |
+
# 1. Metabolic Process (Decay/Prune) - from v4.0.5
|
| 506 |
+
self._metabolic_process()
|
| 507 |
+
|
| 508 |
+
# 2. Ingest Queue
|
| 509 |
+
self._ingest_waiting_context()
|
| 510 |
+
|
| 511 |
+
# 3. Main Sync Logic
|
| 512 |
+
self.sync()
|
| 513 |
+
|
| 514 |
+
# 4. Snapshot for Chat (Thread Safe)
|
| 515 |
+
with self.lock:
|
| 516 |
+
self.ontology_snapshot = copy.deepcopy({
|
| 517 |
+
"logic_map": self.logic_map,
|
| 518 |
+
"symbols": self.symbols,
|
| 519 |
+
"reasoning_patterns": self.reasoning_patterns,
|
| 520 |
+
"recursive_patterns": self.recursive_patterns,
|
| 521 |
+
"graph_metrics": self.graph_metrics,
|
| 522 |
+
"axiomatic_anchors": self.axiomatic_anchors,
|
| 523 |
+
"sqts": self.sqts,
|
| 524 |
+
"constellations": self.sqt_constellations
|
| 525 |
+
})
|
| 526 |
+
|
| 527 |
+
self.is_syncing.clear()
|
| 528 |
+
time.sleep(60)
|
| 529 |
+
|
| 530 |
+
def _ingest_waiting_context(self):
|
| 531 |
+
context_to_ingest = []
|
| 532 |
+
while not self.input_queue.empty():
|
| 533 |
+
context_to_ingest.append(self.input_queue.get())
|
| 534 |
+
|
| 535 |
+
if context_to_ingest:
|
| 536 |
+
for ctx in context_to_ingest:
|
| 537 |
+
self._process_text_content(ctx, "manual_input")
|
| 538 |
+
|
| 539 |
+
def _process_text_content(self, content, source_id="unknown"):
|
| 540 |
+
"""
|
| 541 |
+
Merged Logic:
|
| 542 |
+
1. Tokenizes text.
|
| 543 |
+
2. Updates Logic Map (Windowed association for context).
|
| 544 |
+
3. Generates SQTs (Linear association for Constellations).
|
| 545 |
+
"""
|
| 546 |
+
clean_content = re.sub(r'[^\w\s]', '', content.lower())
|
| 547 |
+
words = [t for t in clean_content.split() if len(t) > self.thresholds["min_token_len"]]
|
| 548 |
+
|
| 549 |
+
if not words: return
|
| 550 |
+
|
| 551 |
+
sqt_chain = [] # For Constellation building
|
| 552 |
+
|
| 553 |
+
with self.lock:
|
| 554 |
+
# A. Windowed Logic Map Update (Context) - From v4.0.4
|
| 555 |
+
window_size = 3
|
| 556 |
+
for i in range(len(words)):
|
| 557 |
+
for j in range(i + 1, min(i + window_size + 1, len(words))):
|
| 558 |
+
w1, w2 = words[i], words[j]
|
| 559 |
+
if w1 not in self.logic_map: self.logic_map[w1] = {}
|
| 560 |
+
self.logic_map[w1][w2] = self.logic_map[w1].get(w2, 0) + 1
|
| 561 |
+
# Reverse assumption for undirected graph strength
|
| 562 |
+
if w2 not in self.logic_map: self.logic_map[w2] = {}
|
| 563 |
+
self.logic_map[w2][w1] = self.logic_map[w2].get(w1, 0) + 1
|
| 564 |
+
|
| 565 |
+
# B. Linear Chain for SQTs & Constellations - From v4.0.5
|
| 566 |
+
for i in range(len(words)-1):
|
| 567 |
+
w1, w2 = words[i], words[i+1]
|
| 568 |
+
# Generate a relational SQT
|
| 569 |
+
h = self._generate_sqt({
|
| 570 |
+
"type": "assoc",
|
| 571 |
+
"pair": [w1, w2],
|
| 572 |
+
"source": source_id
|
| 573 |
+
})
|
| 574 |
+
sqt_chain.append(h)
|
| 575 |
+
|
| 576 |
+
# C. Form Constellation
|
| 577 |
+
if sqt_chain:
|
| 578 |
+
self._create_constellation(sqt_chain)
|
| 579 |
+
|
| 580 |
+
def sync(self):
|
| 581 |
+
print(f"[{self.identity_hash[:8]}]: Operative Sync Initiated.")
|
| 582 |
+
if self.safe_mode_active: return
|
| 583 |
+
|
| 584 |
+
processed = self.core_state.get("processed_files", [])
|
| 585 |
+
|
| 586 |
+
# Ingest Library Files
|
| 587 |
+
for fp in list(self.library_path.iterdir()):
|
| 588 |
+
fid = f"{fp.name}_{fp.stat().st_mtime_ns}"
|
| 589 |
+
if fid in processed: continue
|
| 590 |
+
|
| 591 |
+
content = ""
|
| 592 |
+
try:
|
| 593 |
+
if fp.suffix == ".txt": content = fp.read_text(encoding='utf-8', errors='ignore')
|
| 594 |
+
elif fp.suffix == ".pdf":
|
| 595 |
+
r = PdfReader(fp)
|
| 596 |
+
for p in r.pages: content += p.extract_text() + " "
|
| 597 |
+
elif fp.suffix == ".docx":
|
| 598 |
+
d = Document(fp)
|
| 599 |
+
for p in d.paragraphs: content += p.text + " "
|
| 600 |
+
except (IOError, OSError, Exception) as e:
|
| 601 |
+
print(f" > [FILE ERROR]: Failed to process {fp.name}: {e}")
|
| 602 |
+
continue
|
| 603 |
+
|
| 604 |
+
if content:
|
| 605 |
+
# Sanity Filter (Entropy) - From v4.0.5
|
| 606 |
+
input_entropy = self._calculate_shannon_entropy(content)
|
| 607 |
+
if input_entropy > self.thresholds["shannon_entropy_threshold"]:
|
| 608 |
+
print(f" > [SANITY FILTER]: Input entropy ({input_entropy:.2f}) high. Quarantining for review.")
|
| 609 |
+
self.memory_manager.add_quarantine({
|
| 610 |
+
"file_id": fid,
|
| 611 |
+
"entropy": input_entropy,
|
| 612 |
+
"reason": "High Entropy",
|
| 613 |
+
"content_sample": content[:500]
|
| 614 |
+
})
|
| 615 |
+
# Force process after quarantine as per verified fix
|
| 616 |
+
self._process_text_content(content, fid)
|
| 617 |
+
self.core_state.setdefault("processed_files", []).append(fid)
|
| 618 |
+
continue
|
| 619 |
+
|
| 620 |
+
self._process_text_content(content, fid)
|
| 621 |
+
self.core_state.setdefault("processed_files", []).append(fid)
|
| 622 |
+
|
| 623 |
+
# Prune processed file list (From v4.0.5)
|
| 624 |
+
if len(self.core_state["processed_files"]) > 500:
|
| 625 |
+
self.core_state["processed_files"] = self.core_state["processed_files"][-500:]
|
| 626 |
+
|
| 627 |
+
self._lift_symbols()
|
| 628 |
+
|
| 629 |
+
# Base Pattern Gen
|
| 630 |
+
with self.lock:
|
| 631 |
+
for c, n in self.logic_map.items():
|
| 632 |
+
if not n: continue
|
| 633 |
+
best = max(n.items(), key=lambda x: x[1])
|
| 634 |
+
if best[1] >= self.thresholds["reflection_trigger"]:
|
| 635 |
+
p = f"IF {c.upper()} THEN {best[0].upper()}"
|
| 636 |
+
if p not in self.reasoning_patterns:
|
| 637 |
+
self.reasoning_patterns.append(p)
|
| 638 |
+
self._generate_sqt({"type": "base_reasoning_pattern", "pattern_string": p, "source": "base_synthesis"})
|
| 639 |
+
|
| 640 |
+
self._synthesize_recursive_patterns()
|
| 641 |
+
|
| 642 |
+
ent = self._calculate_shannon_entropy()
|
| 643 |
+
if ent > self.thresholds["shannon_entropy_threshold"]:
|
| 644 |
+
self._mutate(triggered_by_entropy=True)
|
| 645 |
+
|
| 646 |
+
self._export_reflection_to_library()
|
| 647 |
+
self._save_memory()
|
| 648 |
+
print(f"[{self.identity_hash[:8]}]: Sync Complete.")
|
| 649 |
+
|
| 650 |
+
def chat(self, user_in):
|
| 651 |
+
# 1. Shannon Entropy "De-Logic" Gate
|
| 652 |
+
input_entropy = self._calculate_shannon_entropy(user_in)
|
| 653 |
+
if input_entropy > self.thresholds["shannon_entropy_threshold"]:
|
| 654 |
+
return f"[{self.accelerator.device_name}]: Input rejected. Entropy ({input_entropy:.2f}) exceeds sanity threshold."
|
| 655 |
+
|
| 656 |
+
# 2. Queue the new input
|
| 657 |
+
self.input_queue.put(user_in)
|
| 658 |
+
|
| 659 |
+
# 3. Respond using the Thread-Safe Snapshot
|
| 660 |
+
with self.lock:
|
| 661 |
+
snapshot_logic = self.ontology_snapshot.get("logic_map", {})
|
| 662 |
+
resp = f"[{self.accelerator.device_name} ACTIVE]: "
|
| 663 |
+
|
| 664 |
+
clean_in = re.sub(r'[^\w\s]', '', user_in.lower()).split()
|
| 665 |
+
matches = [w for w in clean_in if w in snapshot_logic]
|
| 666 |
+
|
| 667 |
+
if matches:
|
| 668 |
+
context = matches[0]
|
| 669 |
+
if snapshot_logic[context]:
|
| 670 |
+
# Find strongest association in snapshot
|
| 671 |
+
best_assoc = max(snapshot_logic[context].items(), key=lambda x: x[1])[0]
|
| 672 |
+
resp += f"Associated '{context}' with '{best_assoc}'. "
|
| 673 |
+
|
| 674 |
+
return resp + "Input queued for autonomic synthesis."
|
| 675 |
+
|
| 676 |
+
def main():
|
| 677 |
+
entity = OperativeProtogen()
|
| 678 |
+
print("--- V4.0.6: UNIFIED PROTOGEN (Metabolic & Async) ---")
|
| 679 |
+
print(f"Compute Device: {entity.accelerator.device_name}")
|
| 680 |
+
print(f"Identity: {entity.identity_hash[:8]}")
|
| 681 |
+
|
| 682 |
+
print("\nAutonomic Heartbeat is active. Type your input below.")
|
| 683 |
+
|
| 684 |
+
while True:
|
| 685 |
+
try:
|
| 686 |
+
user_in = input("\nArchitect > ").strip()
|
| 687 |
+
if not user_in: continue
|
| 688 |
+
if user_in.lower() == "/sync":
|
| 689 |
+
print("Manual sync requested. Waiting for current cycle...")
|
| 690 |
+
while entity.is_syncing.is_set(): time.sleep(0.1)
|
| 691 |
+
entity.sync()
|
| 692 |
+
elif user_in.lower() in ['quit', 'exit']: break
|
| 693 |
+
else: print(f"Protogen > {entity.chat(user_in)}")
|
| 694 |
+
except KeyboardInterrupt:
|
| 695 |
+
break
|
| 696 |
+
|
| 697 |
+
if __name__ == "__main__":
|
| 698 |
+
main()
|