goodgoals commited on
Commit
87c39ab
·
verified ·
1 Parent(s): 0dea886

Create src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +1382 -0
src/streamlit_app.py ADDED
@@ -0,0 +1,1382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==========================Factories + Engines==========================
2
+ # AURA v7 — Neuro‑Symbolic Hybrid Brain
3
+ # Chunk 1 / 10
4
+ # Imports, Utilities, Embedding Service
5
+ # ==========================
6
+
7
+ import time
8
+ import re
9
+ from dataclasses import dataclass, field
10
+ from typing import List, Dict, Any, Optional, Iterable, Tuple, Callable
11
+
12
+ import torch
13
+ import networkx as nx
14
+ from sentence_transformers import SentenceTransformer, util
15
+ from datasets import load_dataset
16
+
17
+
18
+ # ==========================
19
+ # Device & Embedding Model
20
+ # ==========================
21
+
22
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
23
+ EMBEDDING_MODEL_NAME = "all-MiniLM-L6-v2"
24
+
25
+ print("Using device:", DEVICE)
26
+
27
+
28
+ # ==========================
29
+ # Utility Functions
30
+ # ==========================
31
+
32
+ def normalize_text(text: str) -> str:
33
+ """Lowercase, strip punctuation, collapse whitespace."""
34
+ if text is None:
35
+ return ""
36
+ text = text.lower().strip()
37
+ text = re.sub(r"[^\w\s\?]", " ", text)
38
+ text = re.sub(r"\s+", " ", text)
39
+ return text
40
+
41
+
42
+ def contains_negation(text: str) -> bool:
43
+ """Detect negation words in natural language."""
44
+ neg_words = ["not", "never", "no", "doesnt", "isnt", "arent", "without", "cannot", "can't"]
45
+ tokens = normalize_text(text).split()
46
+ return any(w in tokens for w in neg_words)
47
+
48
+
49
+ def extract_prob_modifier(text: str) -> float:
50
+ """
51
+ Detect probabilistic language and convert to confidence multipliers.
52
+ Examples:
53
+ "always" -> 1.0
54
+ "usually" -> 0.8
55
+ "often" -> 0.7
56
+ "sometimes" -> 0.5
57
+ "rarely" -> 0.3
58
+ "never" -> 0.0
59
+ """
60
+ t = normalize_text(text)
61
+ if "always" in t:
62
+ return 1.0
63
+ if "usually" in t:
64
+ return 0.8
65
+ if "often" in t:
66
+ return 0.7
67
+ if "sometimes" in t:
68
+ return 0.5
69
+ if "rarely" in t:
70
+ return 0.3
71
+ if "never" in t:
72
+ return 0.0
73
+ return 1.0 # default
74
+
75
+
76
+ def is_variable(token: Optional[str]) -> bool:
77
+ """Variables start with ? (e.g., ?x, ?y)."""
78
+ return isinstance(token, str) and token.startswith("?") and len(token) > 1
79
+
80
+
81
+ # ==========================
82
+ # Embedding Service
83
+ # ==========================
84
+
85
+ class EmbeddingService:
86
+ """Wrapper around SentenceTransformer for consistent encoding."""
87
+ def __init__(self, model_name: str = EMBEDDING_MODEL_NAME, device: torch.device = DEVICE):
88
+ self.model = SentenceTransformer(model_name, device=device)
89
+
90
+ def encode(self, text: str) -> torch.Tensor:
91
+ return self.model.encode(text, convert_to_tensor=True)
92
+
93
+ def encode_batch(self, texts: Iterable[str]) -> torch.Tensor:
94
+ return self.model.encode(list(texts), convert_to_tensor=True)
95
+
96
+
97
+ # Global embedding service instance
98
+ embedding_service = EmbeddingService()
99
+ # ==========================
100
+ # AURA v7 — Neuro‑Symbolic Hybrid Brain
101
+ # Chunk 2 / 10
102
+ # Core Data Structures
103
+ # ==========================
104
+
105
+ @dataclass(frozen=True)
106
+ class Fact:
107
+ """
108
+ A structured fact with:
109
+ - subject
110
+ - predicate
111
+ - object
112
+ - confidence (0–1)
113
+ - polarity (+1 = positive, -1 = negated)
114
+ - source (manual, induced, nl_input, dataset, etc.)
115
+ """
116
+ subject: str
117
+ predicate: str
118
+ obj: Optional[str] = None
119
+ confidence: float = 1.0
120
+ polarity: int = 1
121
+ verified: bool = True
122
+ source: str = "unknown"
123
+ timestamp: float = field(default_factory=time.time)
124
+ embedding: torch.Tensor = field(init=False, compare=False, repr=False)
125
+
126
+ def __post_init__(self):
127
+ # Normalize fields
128
+ object.__setattr__(self, "subject", normalize_text(self.subject))
129
+ object.__setattr__(self, "predicate", normalize_text(self.predicate))
130
+ if self.obj is not None:
131
+ object.__setattr__(self, "obj", normalize_text(self.obj))
132
+
133
+ # Create canonical text for embedding
134
+ canonical = self.to_text(include_polarity=False)
135
+ emb = embedding_service.encode(canonical)
136
+ object.__setattr__(self, "embedding", emb)
137
+
138
+ def to_text(self, include_polarity: bool = True) -> str:
139
+ """Return human-readable representation."""
140
+ base = f"{self.subject} {self.predicate}"
141
+ if self.obj:
142
+ base += f" {self.obj}"
143
+ if include_polarity and self.polarity == -1:
144
+ base = "NOT " + base
145
+ return base
146
+
147
+ def key(self) -> str:
148
+ """Unique key for memory storage."""
149
+ pol = "neg" if self.polarity == -1 else "pos"
150
+ return f"{pol}:{self.to_text(include_polarity=False)}"
151
+
152
+
153
+ @dataclass
154
+ class RulePattern:
155
+ """
156
+ A pattern used in rules, supporting variables (?x, ?y).
157
+ Includes polarity for negation-aware reasoning.
158
+ """
159
+ subject: str
160
+ predicate: str
161
+ obj: Optional[str] = None
162
+ polarity: int = 1
163
+
164
+ def normalized(self) -> "RulePattern":
165
+ return RulePattern(
166
+ subject=normalize_text(self.subject),
167
+ predicate=normalize_text(self.predicate),
168
+ obj=normalize_text(self.obj) if self.obj is not None else None,
169
+ polarity=self.polarity,
170
+ )
171
+
172
+
173
+ @dataclass
174
+ class Rule:
175
+ """
176
+ A rule with:
177
+ - name
178
+ - list of condition patterns
179
+ - conclusion pattern
180
+ - confidence
181
+ - source (manual, induced)
182
+ """
183
+ name: str
184
+ conditions: List[RulePattern]
185
+ conclusion: RulePattern
186
+ confidence: float = 1.0
187
+ source: str = "manual"
188
+
189
+ def normalized(self) -> "Rule":
190
+ return Rule(
191
+ name=self.name,
192
+ conditions=[c.normalized() for c in self.conditions],
193
+ conclusion=self.conclusion.normalized(),
194
+ confidence=self.confidence,
195
+ source=self.source,
196
+ )
197
+
198
+
199
+ @dataclass
200
+ class ReasoningEvent:
201
+ """
202
+ A log entry for meta-memory:
203
+ - which engine produced it
204
+ - message
205
+ - confidence
206
+ - timestamp
207
+ """
208
+ engine: str
209
+ message: str
210
+ confidence: float
211
+ timestamp: float = field(default_factory=time.time)
212
+ # ==========================
213
+ # AURA v7 — Neuro‑Symbolic Hybrid Brain
214
+ # Chunk 3 / 10
215
+ # Retrieval Index + Memory Systems
216
+ # ==========================
217
+
218
+ # ==========================
219
+ # Retrieval Index
220
+ # ==========================
221
+
222
+ class RetrievalIndex:
223
+ """
224
+ Stores embeddings for fast similarity search.
225
+ Used by semantic memory and analogy engine.
226
+ """
227
+ def __init__(self):
228
+ self.embeddings: List[torch.Tensor] = []
229
+ self.items: List[Any] = []
230
+
231
+ def add(self, embedding: torch.Tensor, item: Any):
232
+ self.embeddings.append(embedding)
233
+ self.items.append(item)
234
+
235
+ def search(self, query_embedding: torch.Tensor, top_k: int = 5) -> List[Tuple[Any, float]]:
236
+ if not self.embeddings:
237
+ return []
238
+ mat = torch.stack(self.embeddings)
239
+ sims = util.cos_sim(query_embedding, mat)[0]
240
+ k = min(top_k, len(sims))
241
+ topk = torch.topk(sims, k=k)
242
+ results = []
243
+ for idx in topk.indices:
244
+ i = idx.item()
245
+ results.append((self.items[i], sims[i].item()))
246
+ return results
247
+
248
+
249
+ # ==========================
250
+ # Sensory Memory
251
+ # ==========================
252
+
253
+ class SensoryMemory:
254
+ """
255
+ Stores raw text entries (e.g., dataset items, Wikipedia text).
256
+ Used as fallback when structured reasoning fails.
257
+ """
258
+ def __init__(self):
259
+ self.entries: List[str] = []
260
+ self.entry_embeddings: List[torch.Tensor] = []
261
+
262
+ def add_entry(self, text: str, embedding: Optional[torch.Tensor] = None):
263
+ self.entries.append(text)
264
+ if embedding is None:
265
+ embedding = embedding_service.encode(normalize_text(text))
266
+ self.entry_embeddings.append(embedding)
267
+
268
+
269
+ # ==========================
270
+ # Working Memory
271
+ # ==========================
272
+
273
+ class WorkingMemory:
274
+ """
275
+ Stores active facts used for reasoning.
276
+ """
277
+ def __init__(self):
278
+ self.facts: Dict[str, Fact] = {}
279
+
280
+ def add_fact(self, fact: Fact):
281
+ self.facts[fact.key()] = fact
282
+
283
+ def has_fact(self, fact: Fact) -> bool:
284
+ return fact.key() in self.facts
285
+
286
+ def all_facts(self) -> List[Fact]:
287
+ return list(self.facts.values())
288
+
289
+
290
+ # ==========================
291
+ # Semantic Memory
292
+ # ==========================
293
+
294
+ class SemanticMemory:
295
+ """
296
+ Stores long-term structured knowledge.
297
+ Uses a graph + retrieval index.
298
+ """
299
+ def __init__(self):
300
+ self.graph = nx.DiGraph()
301
+ self.index = RetrievalIndex()
302
+ self.fact_map: Dict[str, Fact] = {}
303
+
304
+ def add_fact(self, fact: Fact):
305
+ key = fact.key()
306
+ # If fact exists, keep the one with higher confidence
307
+ if key in self.fact_map:
308
+ existing = self.fact_map[key]
309
+ if fact.confidence > existing.confidence:
310
+ self.fact_map[key] = fact
311
+ return
312
+
313
+ self.fact_map[key] = fact
314
+ self.graph.add_node(key, data=fact)
315
+ self.index.add(fact.embedding, fact)
316
+
317
+ def add_relation(self, source: Fact, target: Fact, relation: str, weight: float = 1.0):
318
+ self.graph.add_edge(source.key(), target.key(), relation=relation, weight=weight)
319
+
320
+ def search_fact(self, query_embedding: torch.Tensor, threshold: float = 0.6) -> Optional[Fact]:
321
+ results = self.index.search(query_embedding, top_k=5)
322
+ best = [(f, score) for f, score in results if score >= threshold]
323
+ if not best:
324
+ return None
325
+ best.sort(key=lambda x: x[1], reverse=True)
326
+ return best[0][0]
327
+
328
+
329
+ # ==========================
330
+ # Episodic Memory
331
+ # ==========================
332
+
333
+ @dataclass
334
+ class Episode:
335
+ description: str
336
+ facts: List[Fact]
337
+ timestamp: float = field(default_factory=time.time)
338
+
339
+
340
+ class EpisodicMemory:
341
+ """
342
+ Stores episodes: groups of facts tied to a specific event or dataset item.
343
+ """
344
+ def __init__(self):
345
+ self.episodes: List[Episode] = []
346
+
347
+ def add_episode(self, description: str, facts: List[Fact]):
348
+ self.episodes.append(Episode(description=description, facts=facts))
349
+
350
+
351
+ # ==========================
352
+ # Procedural Memory
353
+ # ==========================
354
+
355
+ class ProceduralMemory:
356
+ """
357
+ Stores learned procedures (functions).
358
+ """
359
+ def __init__(self):
360
+ self.procedures: Dict[str, Callable] = {}
361
+
362
+ def add_procedure(self, name: str, func: Callable):
363
+ self.procedures[name] = func
364
+
365
+ def get_procedure(self, name: str) -> Optional[Callable]:
366
+ return self.procedures.get(name)
367
+
368
+
369
+ # ==========================
370
+ # Meta-Memory
371
+ # ==========================
372
+
373
+ class MetaMemory:
374
+ """
375
+ Tracks:
376
+ - reasoning events
377
+ - contradictions
378
+ - engine reliability scores
379
+ """
380
+ def __init__(self):
381
+ self.contradictions: List[str] = []
382
+ self.events: List[ReasoningEvent] = []
383
+ self.engine_scores: Dict[str, float] = {} # reliability scores
384
+
385
+ def log(self, engine: str, message: str, confidence: float):
386
+ self.events.append(ReasoningEvent(engine=engine, message=message, confidence=confidence))
387
+ # Update engine reliability
388
+ self.engine_scores[engine] = self.engine_scores.get(engine, 0.5) * 0.9 + confidence * 0.1
389
+
390
+ def add_contradiction(self, fact1: Fact, fact2: Fact):
391
+ msg = f"CONTRADICTION: '{fact1.to_text()}' conflicts with '{fact2.to_text()}'"
392
+ self.contradictions.append(msg)
393
+ self.events.append(ReasoningEvent(engine="TMS", message=msg, confidence=0.0))
394
+
395
+ def recent_trace(self, n: int = 20) -> List[ReasoningEvent]:
396
+ return self.events[-n:]
397
+ # ==========================
398
+ # AURA v7 — Neuro‑Symbolic Hybrid Brain
399
+ # Chunk 4 / 10
400
+ # Unification Engine
401
+ # ==========================
402
+
403
+ from abc import ABC, abstractmethod
404
+
405
+ class ReasoningEngineBase(ABC):
406
+ """Abstract base class for all reasoning engines."""
407
+ name: str
408
+
409
+ @abstractmethod
410
+ def reason_forward(self, engine: "CognitiveEngine") -> Tuple[List["Fact"], List[str], float]:
411
+ pass
412
+
413
+ @abstractmethod
414
+ def reason_backward(self, engine: "CognitiveEngine", goal: "RulePattern") -> Tuple[List["Fact"], List[str], float]:
415
+ pass
416
+
417
+ def unify_token(pattern: Optional[str], value: Optional[str], subst: Dict[str, str]) -> Optional[Dict[str, str]]:
418
+ """
419
+ Unify a single token:
420
+ - If pattern is a variable (?x), bind it.
421
+ - If pattern is a constant, it must match value.
422
+ """
423
+ if pattern is None and value is None:
424
+ return subst
425
+ if pattern is None or value is None:
426
+ return None
427
+
428
+ pattern = normalize_text(pattern)
429
+ value = normalize_text(value)
430
+
431
+ # Variable case
432
+ if is_variable(pattern):
433
+ var = pattern
434
+ if var in subst:
435
+ # Already bound → must match
436
+ return subst if subst[var] == value else None
437
+ # Bind new variable
438
+ new_subst = dict(subst)
439
+ new_subst[var] = value
440
+ return new_subst
441
+
442
+ # Constant case
443
+ if pattern == value:
444
+ return subst
445
+
446
+ return None
447
+
448
+
449
+ def unify_fact(pattern: "RulePattern", fact: "Fact", subst: Dict[str, str]) -> Optional[Dict[str, str]]:
450
+ """
451
+ Unify a rule pattern with a fact.
452
+ Includes polarity matching.
453
+ """
454
+ # Polarity must match
455
+ if pattern.polarity != fact.polarity:
456
+ return None
457
+
458
+ # Subject
459
+ subst1 = unify_token(pattern.subject, fact.subject, subst)
460
+ if subst1 is None:
461
+ return None
462
+
463
+ # Predicate
464
+ subst2 = unify_token(pattern.predicate, fact.predicate, subst1)
465
+ if subst2 is None:
466
+ return None
467
+
468
+ # Object
469
+ subst3 = unify_token(pattern.obj, fact.obj, subst2)
470
+ return subst3
471
+
472
+
473
+ def apply_substitution(pattern: "RulePattern", subst: Dict[str, str]) -> "RulePattern":
474
+ """
475
+ Apply variable bindings to a rule pattern.
476
+ """
477
+ def apply_token(token: Optional[str]) -> Optional[str]:
478
+ if token is None:
479
+ return None
480
+ token = normalize_text(token)
481
+ if is_variable(token) and token in subst:
482
+ return subst[token]
483
+ return token
484
+
485
+ return RulePattern(
486
+ subject=apply_token(pattern.subject),
487
+ predicate=apply_token(pattern.predicate),
488
+ obj=apply_token(pattern.obj),
489
+ polarity=pattern.polarity,
490
+ )
491
+ # ==========================
492
+ # AURA v7 — Neuro‑Symbolic Hybrid Brain
493
+ # Chunk 5 / 10
494
+ # Deductive Engine (Forward + Backward)
495
+ # ==========================
496
+
497
+ class DeductiveEngine(ReasoningEngineBase):
498
+ name = "deductive"
499
+
500
+ # ---------------------------------------------------------
501
+ # Forward Chaining
502
+ # ---------------------------------------------------------
503
+ def reason_forward(self, engine: "CognitiveEngine") -> Tuple[List[Fact], List[str], float]:
504
+ new_facts: List[Fact] = []
505
+ trace: List[str] = []
506
+ avg_conf = 0.0
507
+ count = 0
508
+
509
+ facts = engine.working.all_facts()
510
+ rules = [r.normalized() for r in engine.rules]
511
+
512
+ for rule in rules:
513
+ matches = self._match_rule(rule, facts)
514
+
515
+ for subst, cond_facts in matches:
516
+ concl_pattern = apply_substitution(rule.conclusion, subst)
517
+
518
+ # Polarity propagation
519
+ polarity = concl_pattern.polarity
520
+
521
+ # Confidence propagation
522
+ conf = self._propagate_confidence(rule, cond_facts)
523
+
524
+ concl_fact = Fact(
525
+ subject=concl_pattern.subject,
526
+ predicate=concl_pattern.predicate,
527
+ obj=concl_pattern.obj,
528
+ polarity=polarity,
529
+ confidence=conf,
530
+ verified=True,
531
+ source=f"rule:{rule.name}",
532
+ )
533
+
534
+ # Contradiction detection
535
+ self._check_contradictions(engine, concl_fact)
536
+
537
+ if not engine.working.has_fact(concl_fact):
538
+ new_facts.append(concl_fact)
539
+ used = ", ".join(f.to_text() for f in cond_facts)
540
+ msg = (
541
+ f"[Deduction] {rule.name} with {used} "
542
+ f"-> {concl_fact.to_text()} (conf={concl_fact.confidence:.2f})"
543
+ )
544
+ trace.append(msg)
545
+ avg_conf += concl_fact.confidence
546
+ count += 1
547
+
548
+ if count > 0:
549
+ avg_conf /= count
550
+ else:
551
+ avg_conf = 0.0
552
+
553
+ return new_facts, trace, avg_conf
554
+
555
+ # ---------------------------------------------------------
556
+ # Backward Chaining
557
+ # ---------------------------------------------------------
558
+ def reason_backward(self, engine: "CognitiveEngine", goal: RulePattern) -> Tuple[List[Fact], List[str], float]:
559
+ trace: List[str] = []
560
+ proven_facts: List[Fact] = []
561
+ avg_conf = 0.0
562
+ count = 0
563
+
564
+ # 1. Check if goal already exists in working memory
565
+ for f in engine.working.all_facts():
566
+ if unify_fact(goal, f, {}) is not None:
567
+ proven_facts.append(f)
568
+ trace.append(f"[Backward-Deduction] Goal already known: {f.to_text()}")
569
+ avg_conf += f.confidence
570
+ count += 1
571
+ return proven_facts, trace, avg_conf / max(count, 1)
572
+
573
+ # 2. Try to prove goal using rules
574
+ rules = [r.normalized() for r in engine.rules]
575
+
576
+ for rule in rules:
577
+ subst = unify_fact(rule.conclusion, Fact(goal.subject, goal.predicate, goal.obj, polarity=goal.polarity), {})
578
+ if subst is None:
579
+ continue
580
+
581
+ # Try to prove all conditions
582
+ all_proven = True
583
+ cond_facts: List[Fact] = []
584
+
585
+ for cond in rule.conditions:
586
+ cond_goal = apply_substitution(cond, subst)
587
+ pf, pt, pc = self.reason_backward(engine, cond_goal)
588
+ trace.extend(pt)
589
+
590
+ if not pf:
591
+ all_proven = False
592
+ break
593
+
594
+ cond_facts.extend(pf)
595
+ avg_conf += pc
596
+ count += 1
597
+
598
+ if all_proven:
599
+ concl_pattern = apply_substitution(rule.conclusion, subst)
600
+ concl_fact = Fact(
601
+ subject=concl_pattern.subject,
602
+ predicate=concl_pattern.predicate,
603
+ obj=concl_pattern.obj,
604
+ polarity=concl_pattern.polarity,
605
+ confidence=self._propagate_confidence(rule, cond_facts),
606
+ verified=True,
607
+ source=f"rule:{rule.name}",
608
+ )
609
+
610
+ proven_facts.append(concl_fact)
611
+ trace.append(f"[Backward-Deduction] Proved {concl_fact.to_text()} via {rule.name}")
612
+ avg_conf += concl_fact.confidence
613
+ count += 1
614
+ break
615
+
616
+ if count > 0:
617
+ avg_conf /= count
618
+ else:
619
+ avg_conf = 0.0
620
+
621
+ return proven_facts, trace, avg_conf
622
+
623
+ # ---------------------------------------------------------
624
+ # Rule Matching
625
+ # ---------------------------------------------------------
626
+ def _match_rule(self, rule: Rule, facts: List[Fact]) -> List[Tuple[Dict[str, str], List[Fact]]]:
627
+ results: List[Tuple[Dict[str, str], List[Fact]]] = []
628
+
629
+ def backtrack(i: int, subst: Dict[str, str], chosen: List[Fact]):
630
+ if i == len(rule.conditions):
631
+ results.append((subst, chosen.copy()))
632
+ return
633
+
634
+ pattern = rule.conditions[i]
635
+
636
+ for fact in facts:
637
+ new_subst = unify_fact(pattern, fact, subst)
638
+ if new_subst is not None and fact not in chosen:
639
+ chosen.append(fact)
640
+ backtrack(i + 1, new_subst, chosen)
641
+ chosen.pop()
642
+
643
+ backtrack(0, {}, [])
644
+ return results
645
+
646
+ # ---------------------------------------------------------
647
+ # Confidence Propagation
648
+ # ---------------------------------------------------------
649
+ def _propagate_confidence(self, rule: Rule, cond_facts: List[Fact]) -> float:
650
+ conf = rule.confidence
651
+ for f in cond_facts:
652
+ conf *= f.confidence
653
+ return max(min(conf, 1.0), 0.0)
654
+
655
+ # ---------------------------------------------------------
656
+ # Contradiction Detection
657
+ # ---------------------------------------------------------
658
+ def _check_contradictions(self, engine: "CognitiveEngine", new_fact: Fact):
659
+ """
660
+ If a fact with opposite polarity exists, log contradiction.
661
+ """
662
+ opposite_key = ("neg:" if new_fact.polarity == 1 else "pos:") + new_fact.to_text(include_polarity=False)
663
+
664
+ if opposite_key in engine.semantic.fact_map:
665
+ engine.meta.add_contradiction(new_fact, engine.semantic.fact_map[opposite_key])
666
+ # ==========================
667
+ # AURA v7 — Neuro‑Symbolic Hybrid Brain
668
+ # Chunk 6 / 10
669
+ # Inductive Engine + Analogical Engine
670
+ # ==========================
671
+
672
+ class InductiveEngine(ReasoningEngineBase):
673
+ """
674
+ Learns new rules from repeated relational patterns.
675
+ Example:
676
+ If we see:
677
+ fire is hot
678
+ touching_fire causes burn
679
+ stove is hot
680
+ touching_stove causes burn
681
+ → Induce rule: hot things burn
682
+ """
683
+ name = "inductive"
684
+
685
+ def reason_forward(self, engine: "CognitiveEngine") -> Tuple[List[Fact], List[str], float]:
686
+ trace: List[str] = []
687
+ new_facts: List[Fact] = []
688
+ avg_conf = 0.0
689
+ count = 0
690
+
691
+ facts = engine.working.all_facts()
692
+
693
+ # Collect patterns
694
+ hot_map = {}
695
+ burn_map = {}
696
+
697
+ for f in facts:
698
+ if f.predicate == "is" and f.obj == "hot" and f.polarity == 1:
699
+ hot_map[f.subject] = f
700
+ if f.predicate == "causes" and f.obj == "burn" and f.subject.startswith("touching_") and f.polarity == 1:
701
+ x = f.subject.replace("touching_", "")
702
+ burn_map[x] = f
703
+
704
+ # Look for repeated pattern
705
+ common = set(hot_map.keys()) & set(burn_map.keys())
706
+
707
+ if len(common) >= 2:
708
+ # Induce rule
709
+ rule = Rule(
710
+ name="induced_hot_things_burn_rule",
711
+ conditions=[RulePattern(subject="?x", predicate="is", obj="hot", polarity=1)],
712
+ conclusion=RulePattern(subject="touching_?x", predicate="causes", obj="burn", polarity=1),
713
+ confidence=0.8,
714
+ source="induced",
715
+ )
716
+
717
+ if rule.name not in [r.name for r in engine.rules]:
718
+ engine.rules.append(rule)
719
+ msg = "[Induction] Learned rule: if ?x is hot → touching_?x causes burn"
720
+ trace.append(msg)
721
+ avg_conf = 0.8
722
+ count = 1
723
+
724
+ return new_facts, trace, avg_conf
725
+
726
+ def reason_backward(self, engine: "CognitiveEngine", goal: RulePattern):
727
+ # Induction is forward-only
728
+ return [], [], 0.0
729
+
730
+
731
+ # ==========================
732
+ # Analogical Engine
733
+ # ==========================
734
+
735
+ class AnalogicalEngine(ReasoningEngineBase):
736
+ """
737
+ Structural analogy + embeddings.
738
+ Example:
739
+ fire is hot
740
+ touching_fire causes burn
741
+ stove is hot
742
+ → touching_stove causes burn (by analogy)
743
+ """
744
+ name = "analogical"
745
+
746
+ def reason_forward(self, engine: "CognitiveEngine") -> Tuple[List[Fact], List[str], float]:
747
+ trace = []
748
+ new_facts = []
749
+ avg_conf = 0.0
750
+ count = 0
751
+
752
+ facts = engine.working.all_facts()
753
+
754
+ hot_subjects = [f for f in facts if f.predicate == "is" and f.obj == "hot" and f.polarity == 1]
755
+ burn_facts = [f for f in facts if f.predicate == "causes" and f.obj == "burn" and f.subject.startswith("touching_")]
756
+
757
+ for hf in hot_subjects:
758
+ for bf in burn_facts:
759
+ x = bf.subject.replace("touching_", "")
760
+ if x == hf.subject:
761
+ # Find analogous subjects
762
+ for hf2 in hot_subjects:
763
+ if hf2.subject == hf.subject:
764
+ continue
765
+
766
+ sim = util.cos_sim(hf.embedding, hf2.embedding).item()
767
+ if sim > 0.6:
768
+ concl = Fact(
769
+ subject=f"touching_{hf2.subject}",
770
+ predicate="causes",
771
+ obj="burn",
772
+ polarity=1,
773
+ confidence=0.7 * sim,
774
+ verified=False,
775
+ source="analogical",
776
+ )
777
+
778
+ if not engine.working.has_fact(concl):
779
+ new_facts.append(concl)
780
+ msg = (
781
+ f"[Analogy] From {hf.subject}~{hf2.subject} and {bf.to_text()} "
782
+ f"→ {concl.to_text()} (sim={sim:.2f})"
783
+ )
784
+ trace.append(msg)
785
+ avg_conf += concl.confidence
786
+ count += 1
787
+
788
+ if count > 0:
789
+ avg_conf /= count
790
+ else:
791
+ avg_conf = 0.0
792
+
793
+ return new_facts, trace, avg_conf
794
+
795
+ def reason_backward(self, engine: "CognitiveEngine", goal: RulePattern):
796
+ # Analogy is forward-only
797
+ return [], [], 0.0
798
+ # ==========================
799
+ # AURA v7 — Neuro‑Symbolic Hybrid Brain
800
+ # Chunk 7 / 10
801
+ # Counterfactual Engine + Meta‑Controller + CognitiveEngine
802
+ # ==========================
803
+
804
+ class CounterfactualEngine(ReasoningEngineBase):
805
+ """
806
+ Hybrid counterfactual reasoning:
807
+ - Symbolic intervention: replace a fact and re-run reasoning
808
+ - Causal-ish graph reasoning via semantic memory
809
+ """
810
+ name = "counterfactual"
811
+
812
+ def reason_forward(self, engine: "CognitiveEngine"):
813
+ # Counterfactuals are not automatically generated
814
+ return [], [], 0.0
815
+
816
+ def reason_backward(self, engine: "CognitiveEngine", goal: RulePattern):
817
+ # Counterfactuals require explicit user request
818
+ return [], [], 0.0
819
+
820
+ def simulate(self, engine: "CognitiveEngine", intervention_fact: Fact) -> Dict[str, Any]:
821
+ """
822
+ Perform a symbolic intervention:
823
+ - Temporarily add the fact
824
+ - Re-run forward reasoning
825
+ - Observe differences
826
+ """
827
+ temp_engine = engine.clone()
828
+
829
+ temp_engine.add_fact(intervention_fact)
830
+ temp_engine.reason_forward_until_fixpoint(max_iterations=3)
831
+
832
+ return {
833
+ "intervention": intervention_fact.to_text(),
834
+ "new_facts": [f.to_text() for f in temp_engine.working.all_facts()],
835
+ }
836
+
837
+
838
+ # ==========================
839
+ # Meta-Controller
840
+ # ==========================
841
+
842
+ class MetaController:
843
+ """
844
+ Chooses which reasoning engines to trust based on:
845
+ - Past performance (engine_scores)
846
+ - Confidence of recent outputs
847
+ """
848
+ def __init__(self, meta_memory: MetaMemory):
849
+ self.meta = meta_memory
850
+
851
+ def choose_engines_forward(self, engines: List[ReasoningEngineBase]) -> List[ReasoningEngineBase]:
852
+ scored = []
853
+ for e in engines:
854
+ score = self.meta.engine_scores.get(e.name, 0.5)
855
+ scored.append((score, e))
856
+ scored.sort(key=lambda x: x[0], reverse=True)
857
+ return [e for _, e in scored]
858
+
859
+ def choose_engines_backward(self, engines: List[ReasoningEngineBase]) -> List[ReasoningEngineBase]:
860
+ return self.choose_engines_forward(engines)
861
+
862
+
863
+ # ==========================
864
+ # Cognitive Engine (Core Brain)
865
+ # ==========================
866
+
867
+ class CognitiveEngine:
868
+ """
869
+ The central orchestrator:
870
+ - Holds all memory systems
871
+ - Holds all reasoning engines
872
+ - Runs forward/backward reasoning
873
+ - Handles contradictions
874
+ """
875
+ def __init__(self):
876
+ # Memory systems
877
+ self.sensory = SensoryMemory()
878
+ self.working = WorkingMemory()
879
+ self.semantic = SemanticMemory()
880
+ self.episodic = EpisodicMemory()
881
+ self.procedural = ProceduralMemory()
882
+ self.meta = MetaMemory()
883
+
884
+ # Reasoning engines
885
+ self.engines: List[ReasoningEngineBase] = [
886
+ DeductiveEngine(),
887
+ InductiveEngine(),
888
+ AnalogicalEngine(),
889
+ CounterfactualEngine(),
890
+ ]
891
+
892
+ # Meta-controller
893
+ self.meta_controller = MetaController(self.meta)
894
+
895
+ # Rule store
896
+ self.rules: List[Rule] = []
897
+
898
+ # -------------------------
899
+ # Fact & Rule Management
900
+ # -------------------------
901
+
902
+ def add_fact(self, fact: Fact):
903
+ """
904
+ Add fact to working + semantic memory.
905
+ Check for contradictions.
906
+ """
907
+ # Check for contradictions
908
+ opposite_key = ("neg:" if fact.polarity == 1 else "pos:") + fact.to_text(include_polarity=False)
909
+ if opposite_key in self.semantic.fact_map:
910
+ self.meta.add_contradiction(fact, self.semantic.fact_map[opposite_key])
911
+
912
+ self.working.add_fact(fact)
913
+ self.semantic.add_fact(fact)
914
+
915
+ def add_rule(self, rule: Rule):
916
+ self.rules.append(rule)
917
+
918
+ # -------------------------
919
+ # Forward Reasoning Loop
920
+ # -------------------------
921
+
922
+ def reason_forward_until_fixpoint(self, max_iterations: int = 5):
923
+ """
924
+ Run forward reasoning until no new facts are produced.
925
+ """
926
+ for _ in range(max_iterations):
927
+ any_new = False
928
+
929
+ ordered_engines = self.meta_controller.choose_engines_forward(self.engines)
930
+
931
+ for engine in ordered_engines:
932
+ new_facts, trace, avg_conf = engine.reason_forward(self)
933
+
934
+ if new_facts:
935
+ any_new = True
936
+ for f in new_facts:
937
+ self.add_fact(f)
938
+
939
+ for t in trace:
940
+ self.meta.log(engine.name, t, avg_conf if avg_conf > 0 else 0.5)
941
+
942
+ if not any_new:
943
+ break
944
+
945
+ # -------------------------
946
+ # Backward Reasoning
947
+ # -------------------------
948
+
949
+ def reason_backward(self, goal: RulePattern) -> Tuple[List[Fact], List[str], float]:
950
+ ordered_engines = self.meta_controller.choose_engines_backward(self.engines)
951
+
952
+ best_facts = []
953
+ best_trace = []
954
+ best_conf = 0.0
955
+
956
+ for engine in ordered_engines:
957
+ facts, trace, conf = engine.reason_backward(self, goal)
958
+ if facts and conf > best_conf:
959
+ best_facts = facts
960
+ best_trace = trace
961
+ best_conf = conf
962
+
963
+ return best_facts, best_trace, best_conf
964
+
965
+ # -------------------------
966
+ # Cloning (for counterfactuals)
967
+ # -------------------------
968
+
969
+ def clone(self) -> "CognitiveEngine":
970
+ """
971
+ Create a deep-ish clone of the engine for counterfactual simulation.
972
+ """
973
+ new = CognitiveEngine()
974
+
975
+ # Copy facts
976
+ for f in self.working.all_facts():
977
+ new.add_fact(f)
978
+
979
+ # Copy rules
980
+ for r in self.rules:
981
+ new.add_rule(r)
982
+
983
+ return new
984
+ # ==========================
985
+ # AURA v7 — Neuro‑Symbolic Hybrid Brain
986
+ # Chunk 8 / 10
987
+ # Natural Language Parser (Hybrid)
988
+ # ==========================
989
+
990
+ class NLParser:
991
+ """
992
+ Hybrid natural language parser:
993
+ - Rule-based parsing for simple patterns
994
+ - Embedding-based fallback
995
+ - Negation detection
996
+ - Probabilistic modifier extraction
997
+ """
998
+
999
+ def __init__(self, engine: CognitiveEngine):
1000
+ self.engine = engine
1001
+
1002
+ # ---------------------------------------------------------
1003
+ # Assertion Parsing (creates Facts)
1004
+ # ---------------------------------------------------------
1005
+ def parse_assertion(self, text: str) -> Optional[Fact]:
1006
+ """
1007
+ Convert natural language assertions into structured Facts.
1008
+ Handles:
1009
+ - "X is Y"
1010
+ - "X causes Y"
1011
+ - Negation ("not", "never", etc.)
1012
+ - Probabilistic modifiers ("usually", "rarely")
1013
+ """
1014
+ t = normalize_text(text)
1015
+ neg = contains_negation(t)
1016
+ polarity = -1 if neg else 1
1017
+ prob = extract_prob_modifier(t)
1018
+
1019
+ # Remove negation words for cleaner parsing
1020
+ t_clean = re.sub(r"\bnot\b|\bnever\b|\bdoesnt\b|\bisnt\b|\barent\b", "", t).strip()
1021
+
1022
+ # Pattern: "x is y"
1023
+ m = re.match(r"(.+?) is (.+)", t_clean)
1024
+ if m:
1025
+ subj = m.group(1).strip()
1026
+ obj = m.group(2).strip()
1027
+ return Fact(
1028
+ subject=subj,
1029
+ predicate="is",
1030
+ obj=obj,
1031
+ polarity=polarity,
1032
+ confidence=prob,
1033
+ source="nl_input",
1034
+ )
1035
+
1036
+ # Pattern: "x causes y"
1037
+ m = re.match(r"(.+?) causes (.+)", t_clean)
1038
+ if m:
1039
+ subj = m.group(1).strip()
1040
+ obj = m.group(2).strip()
1041
+ return Fact(
1042
+ subject=subj,
1043
+ predicate="causes",
1044
+ obj=obj,
1045
+ polarity=polarity,
1046
+ confidence=prob,
1047
+ source="nl_input",
1048
+ )
1049
+
1050
+ # Fallback: treat as descriptive fact
1051
+ return Fact(
1052
+ subject=t_clean,
1053
+ predicate="describes",
1054
+ obj=None,
1055
+ polarity=polarity,
1056
+ confidence=prob,
1057
+ source="nl_input",
1058
+ )
1059
+
1060
+ # ---------------------------------------------------------
1061
+ # Query Parsing (creates RulePattern)
1062
+ # ---------------------------------------------------------
1063
+ def parse_query_to_goal(self, text: str) -> RulePattern:
1064
+ """
1065
+ Convert natural language questions into RulePatterns.
1066
+ Handles:
1067
+ - "Is X Y?"
1068
+ - "Does X cause Y?"
1069
+ - Negation ("not", "never")
1070
+ """
1071
+ t = normalize_text(text)
1072
+ neg = contains_negation(t)
1073
+ polarity = -1 if neg else 1
1074
+
1075
+ # Remove negation words for cleaner parsing
1076
+ t_clean = re.sub(r"\bnot\b|\bnever\b|\bdoesnt\b|\bisnt\b|\barent\b", "", t).strip()
1077
+
1078
+ # Pattern: "is x y?"
1079
+ m = re.match(r"is (.+?) (.+)\??", t_clean)
1080
+ if m:
1081
+ subj = m.group(1).strip()
1082
+ obj = m.group(2).strip()
1083
+ return RulePattern(
1084
+ subject=subj,
1085
+ predicate="is",
1086
+ obj=obj,
1087
+ polarity=polarity,
1088
+ )
1089
+
1090
+ # Pattern: "does x cause y?"
1091
+ m = re.match(r"does (.+?) cause (.+)\??", t_clean)
1092
+ if m:
1093
+ subj = m.group(1).strip()
1094
+ obj = m.group(2).strip()
1095
+ return RulePattern(
1096
+ subject=subj,
1097
+ predicate="causes",
1098
+ obj=obj,
1099
+ polarity=polarity,
1100
+ )
1101
+
1102
+ # Fallback
1103
+ return RulePattern(
1104
+ subject=t_clean,
1105
+ predicate="describes",
1106
+ obj=None,
1107
+ polarity=polarity,
1108
+ )
1109
+ # ==========================
1110
+ # AURA v7 — Neuro‑Symbolic Hybrid Brain
1111
+ # Chunk 9 / 10
1112
+ # Dataset Ingestion (Omniscience)
1113
+ # ==========================
1114
+
1115
+ def ingest_omniscience(engine: CognitiveEngine, max_items: int = 200):
1116
+ """
1117
+ Ingests the ArtificialAnalysis/AA-Omniscience-Public dataset
1118
+ into sensory, semantic, and episodic memory.
1119
+
1120
+ Each dataset item typically contains:
1121
+ - "input": natural language prompt
1122
+ - "output": natural language answer
1123
+ - "metadata": optional
1124
+ """
1125
+
1126
+ print("Loading Omniscience dataset...")
1127
+ ds = load_dataset("ArtificialAnalysis/AA-Omniscience-Public")
1128
+ data = ds["train"]
1129
+
1130
+ parser = NLParser(engine)
1131
+ count = 0
1132
+
1133
+ for item in data:
1134
+ text_in = item.get("input", "")
1135
+ text_out = item.get("output", "")
1136
+
1137
+ if not text_in and not text_out:
1138
+ continue
1139
+
1140
+ # -------------------------
1141
+ # Sensory Memory
1142
+ # -------------------------
1143
+ combined_text = f"{text_in} -> {text_out}"
1144
+ normalized = normalize_text(combined_text)
1145
+ emb = embedding_service.encode(normalized)
1146
+ engine.sensory.add_entry(combined_text, embedding=emb)
1147
+
1148
+ # -------------------------
1149
+ # Structured Fact Extraction
1150
+ # -------------------------
1151
+ fact_in = parser.parse_assertion(text_in) if text_in else None
1152
+ fact_out = parser.parse_assertion(text_out) if text_out else None
1153
+
1154
+ # Add to semantic + working memory
1155
+ local_facts = []
1156
+ if fact_in:
1157
+ engine.add_fact(fact_in)
1158
+ local_facts.append(fact_in)
1159
+ if fact_out:
1160
+ engine.add_fact(fact_out)
1161
+ local_facts.append(fact_out)
1162
+
1163
+ # -------------------------
1164
+ # Episodic Memory
1165
+ # -------------------------
1166
+ if local_facts:
1167
+ engine.episodic.add_episode(
1168
+ description="omniscience_item",
1169
+ facts=local_facts
1170
+ )
1171
+
1172
+ count += 1
1173
+ if count >= max_items:
1174
+ break
1175
+
1176
+ print(f"Ingested {count} items from AA-Omniscience-Public dataset.")
1177
+ # ==========================
1178
+ # AURA v7 — Neuro‑Symbolic Hybrid Brain
1179
+ # Chunk 10 / 10
1180
+ # AURA Interface + main()
1181
+ # ==========================
1182
+
1183
+ class AURAInterface:
1184
+ """
1185
+ High-level interface for interacting with AURA:
1186
+ - assert_fact(text)
1187
+ - query(text)
1188
+ """
1189
+
1190
+ def __init__(self, engine: CognitiveEngine):
1191
+ self.engine = engine
1192
+ self.parser = NLParser(engine)
1193
+
1194
+ # ---------------------------------------------------------
1195
+ # Assertions
1196
+ # ---------------------------------------------------------
1197
+ def assert_fact(self, text: str) -> Fact:
1198
+ fact = self.parser.parse_assertion(text)
1199
+ self.engine.add_fact(fact)
1200
+ return fact
1201
+
1202
+ # ---------------------------------------------------------
1203
+ # Queries
1204
+ # ---------------------------------------------------------
1205
+ def query(self, text: str) -> Dict[str, Any]:
1206
+ normalized = normalize_text(text)
1207
+ query_embedding = embedding_service.encode(normalized)
1208
+
1209
+ # Parse into structured goal
1210
+ goal = self.parser.parse_query_to_goal(text)
1211
+
1212
+ # If negated, require explicit proof
1213
+ if goal.polarity == -1:
1214
+ facts, trace, conf = self.engine.reason_backward(goal)
1215
+ if facts:
1216
+ best = max(facts, key=lambda f: f.confidence)
1217
+ return {
1218
+ "response": "Yes",
1219
+ "explanation": f"Proved negated fact: '{best.to_text()}'",
1220
+ "confidence": float(best.confidence),
1221
+ "trace": trace + [e.message for e in self.engine.meta.recent_trace()],
1222
+ }
1223
+ else:
1224
+ return {
1225
+ "response": "No",
1226
+ "explanation": "No rule or fact supports the negated claim.",
1227
+ "confidence": 0.0,
1228
+ "trace": [e.message for e in self.engine.meta.recent_trace()],
1229
+ }
1230
+
1231
+ # Try backward reasoning first
1232
+ facts, trace, conf = self.engine.reason_backward(goal)
1233
+ if facts:
1234
+ best = max(facts, key=lambda f: f.confidence)
1235
+ return {
1236
+ "response": "Yes" if best.polarity == 1 else "No",
1237
+ "explanation": f"Proved: '{best.to_text()}' via backward reasoning.",
1238
+ "confidence": float(best.confidence),
1239
+ "trace": trace + [e.message for e in self.engine.meta.recent_trace()],
1240
+ }
1241
+
1242
+ # Semantic memory fallback
1243
+ fact = self.engine.semantic.search_fact(query_embedding, threshold=0.6)
1244
+ if fact:
1245
+ return {
1246
+ "response": "Yes" if fact.polarity == 1 else "No",
1247
+ "explanation": f"Found in semantic memory: '{fact.to_text()}'",
1248
+ "confidence": float(fact.confidence),
1249
+ "trace": [e.message for e in self.engine.meta.recent_trace()],
1250
+ }
1251
+
1252
+ # Sensory memory fallback
1253
+ best_idx = None
1254
+ best_score = 0.0
1255
+ for idx, emb in enumerate(self.engine.sensory.entry_embeddings):
1256
+ sim = util.cos_sim(query_embedding, emb).item()
1257
+ if sim > best_score:
1258
+ best_score = sim
1259
+ best_idx = idx
1260
+
1261
+ if best_idx is not None and best_score >= 0.35:
1262
+ best_entry = self.engine.sensory.entries[best_idx]
1263
+ return {
1264
+ "response": "Possibly",
1265
+ "explanation": f"Sensory memory match: {best_entry[:200]}...",
1266
+ "confidence": float(best_score),
1267
+ "trace": [e.message for e in self.engine.meta.recent_trace()],
1268
+ }
1269
+
1270
+ # Unknown
1271
+ return {
1272
+ "response": "Unknown",
1273
+ "explanation": f"No information found for '{text}'",
1274
+ "confidence": 0.0,
1275
+ "trace": [e.message for e in self.engine.meta.recent_trace()],
1276
+ }
1277
+
1278
+
1279
+ # ==========================
1280
+ # Core Knowledge Initialization
1281
+ # ==========================
1282
+
1283
+ def build_core_knowledge(engine: CognitiveEngine):
1284
+ """
1285
+ Load basic commonsense knowledge + rules.
1286
+ """
1287
+ fire_hot = Fact(subject="fire", predicate="is", obj="hot", confidence=0.95, polarity=1, source="core")
1288
+ stove_hot = Fact(subject="stove", predicate="is", obj="hot", confidence=0.9, polarity=1, source="core")
1289
+ touching_fire_burn = Fact(subject="touching_fire", predicate="causes", obj="burn", confidence=0.95, polarity=1, source="core")
1290
+
1291
+ engine.add_fact(fire_hot)
1292
+ engine.add_fact(stove_hot)
1293
+ engine.add_fact(touching_fire_burn)
1294
+
1295
+ # Manual rules
1296
+ rule1 = Rule(
1297
+ name="hot_things_burn_rule",
1298
+ conditions=[RulePattern(subject="?x", predicate="is", obj="hot", polarity=1)],
1299
+ conclusion=RulePattern(subject="touching_?x", predicate="causes", obj="burn", polarity=1),
1300
+ confidence=0.9,
1301
+ source="manual",
1302
+ )
1303
+
1304
+ rule2 = Rule(
1305
+ name="burn_implies_danger_rule",
1306
+ conditions=[RulePattern(subject="touching_?x", predicate="causes", obj="burn", polarity=1)],
1307
+ conclusion=RulePattern(subject="?x", predicate="is", obj="dangerous", polarity=1),
1308
+ confidence=0.9,
1309
+ source="manual",
1310
+ )
1311
+
1312
+ # Negation rule example
1313
+ rule3 = Rule(
1314
+ name="cold_things_do_not_burn_rule",
1315
+ conditions=[RulePattern(subject="?x", predicate="is", obj="cold", polarity=1)],
1316
+ conclusion=RulePattern(subject="touching_?x", predicate="causes", obj="burn", polarity=-1),
1317
+ confidence=0.9,
1318
+ source="manual",
1319
+ )
1320
+
1321
+ engine.add_rule(rule1)
1322
+ engine.add_rule(rule2)
1323
+ engine.add_rule(rule3)
1324
+
1325
+
1326
+ # ==========================
1327
+ # Main REPL
1328
+ # ==========================
1329
+
1330
+ def main():
1331
+ engine = CognitiveEngine()
1332
+ api = AURAInterface(engine)
1333
+
1334
+ # Load core knowledge
1335
+ build_core_knowledge(engine)
1336
+ engine.reason_forward_until_fixpoint(max_iterations=5)
1337
+
1338
+ # Load Omniscience dataset
1339
+ ingest_omniscience(engine, max_items=50)
1340
+
1341
+ print("\nAURA v7 ready. Type assertions or questions.")
1342
+ print("Examples:")
1343
+ print(" - 'fire is hot'")
1344
+ print(" - 'ice is cold'")
1345
+ print(" - 'does touching fire cause burn?'")
1346
+ print(" - 'does touching ice cause burn?'")
1347
+ print(" - 'is stove dangerous?'")
1348
+ print(" - 'do hot things NOT burn with fire?'")
1349
+ print("Type 'exit' to quit.\n")
1350
+
1351
+ while True:
1352
+ try:
1353
+ query = input("You: ")
1354
+ except EOFError:
1355
+ break
1356
+
1357
+ if query.lower() in {"exit", "quit"}:
1358
+ print("Goodbye.")
1359
+ break
1360
+
1361
+ # Assertions
1362
+ if query.lower().startswith("assert "):
1363
+ fact = api.assert_fact(query[len("assert "):])
1364
+ engine.reason_forward_until_fixpoint(max_iterations=3)
1365
+ print(f"AURA: Asserted '{fact.to_text()}' (conf={fact.confidence:.2f})\n")
1366
+ continue
1367
+
1368
+ # Queries
1369
+ response = api.query(query)
1370
+ print(f"AURA Response: {response['response']}")
1371
+ print(f"Explanation: {response['explanation']}")
1372
+ print(f"Confidence: {response['confidence']:.2f}")
1373
+
1374
+ if response.get("trace"):
1375
+ print("Recent reasoning trace:")
1376
+ for line in response["trace"][-10:]:
1377
+ print(" -", line)
1378
+ print()
1379
+
1380
+
1381
+ if __name__ == "__main__":
1382
+ main()