Upload 14 files
Browse files- cloud/README.md +495 -0
- cloud/__init__.py +84 -0
- cloud/anchors.py +195 -0
- cloud/anomaly.py +229 -0
- cloud/chambers.py +470 -0
- cloud/cloud.py +335 -0
- cloud/cooccur_cloud.py +358 -0
- cloud/feedback.py +233 -0
- cloud/observer.py +309 -0
- cloud/requirements.txt +13 -0
- cloud/resonance.py +251 -0
- cloud/resonance_dreams.py +593 -0
- cloud/rrpram_cloud.py +265 -0
- cloud/user_cloud.py +348 -0
cloud/README.md
ADDED
|
@@ -0,0 +1,495 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
```
|
| 2 |
+
██████╗██╗ ██████╗ ██╗ ██╗██████╗
|
| 3 |
+
██╔════╝██║ ██╔═══██╗██║ ██║██╔══██╗
|
| 4 |
+
██║ ██║ ██║ ██║██║ ██║██║ ██║
|
| 5 |
+
██║ ██║ ██║ ██║██║ ██║██║ ██║
|
| 6 |
+
╚██████╗███████╗╚██████╔╝╚██████╔╝██████╔╝
|
| 7 |
+
╚═════╝╚══════╝ ╚═════╝ ╚═════╝ ╚═════╝
|
| 8 |
+
```
|
| 9 |
+
|
| 10 |
+
# CLOUD — Corpus-Linked Oscillating Upstream Detector | by Arianna Method
|
| 11 |
+
|
| 12 |
+
> *"something fires BEFORE meaning arrives"*
|
| 13 |
+
|
| 14 |
+
---
|
| 15 |
+
|
| 16 |
+
## what is this
|
| 17 |
+
|
| 18 |
+
you know that moment when someone says "I'm fine" and your gut screams "NO THEY'RE NOT"? yeah. that's pre-semantic detection. that's CLOUD.
|
| 19 |
+
|
| 20 |
+
**CLOUD** is a ~50K parameter neural network that detects emotional undertones BEFORE the language model even starts generating. it's like a sonar ping for the soul. or a metal detector for feelings. or... okay look, it's a tiny MLP that goes "hmm this input feels FEAR-ish" and tells HAZE about it.
|
| 21 |
+
|
| 22 |
+
it's part of [the method](https://github.com/ariannamethod/ariannamethod). the [**arianna method**](https://github.com/ariannamethod/ariannamethod). patterns over parameters. emergence over engineering. vibes over vocabulary.
|
| 23 |
+
|
| 24 |
+
**the acronym:**
|
| 25 |
+
- **C**orpus-**L**inked — grounded in real text patterns
|
| 26 |
+
- **O**scillating — four chambers that cross-fire until stability
|
| 27 |
+
- **U**pstream — fires BEFORE the main model
|
| 28 |
+
- **D**etector — it detects, it doesn't generate
|
| 29 |
+
|
| 30 |
+
or if you prefer the unhinged version:
|
| 31 |
+
- **C**haotic **L**imbic **O**scillator for **U**ncanny **D**etection
|
| 32 |
+
|
| 33 |
+
both are valid. this is the arianna method. we contain multitudes.
|
| 34 |
+
|
| 35 |
+
---
|
| 36 |
+
|
| 37 |
+
## why "pre-semantic"
|
| 38 |
+
|
| 39 |
+
traditional NLP: text → tokenize → embed → attention → meaning → response
|
| 40 |
+
|
| 41 |
+
CLOUD: text → **VIBE CHECK** → emotional coordinates → (pass to HAZE) → response
|
| 42 |
+
|
| 43 |
+
the vibe check happens in ~50K parameters. no transformers. no attention. just:
|
| 44 |
+
1. **resonance layer** (weightless geometry) — how does this text resonate with 100 emotion anchors?
|
| 45 |
+
2. **chamber MLPs** (~140K params) — six chambers (FEAR, LOVE, RAGE, VOID, FLOW, COMPLEX) that cross-fire
|
| 46 |
+
3. **meta-observer** (~41K params) — watches the chambers and predicts secondary emotion
|
| 47 |
+
|
| 48 |
+
it's like having a tiny amygdala before your prefrontal cortex. the lizard brain of language models.
|
| 49 |
+
|
| 50 |
+
---
|
| 51 |
+
|
| 52 |
+
## architecture
|
| 53 |
+
|
| 54 |
+
```
|
| 55 |
+
Your input ("I'm feeling anxious")
|
| 56 |
+
↓
|
| 57 |
+
┌─────────────────────────────────────┐
|
| 58 |
+
│ RESONANCE LAYER (0 params) │ ← weightless geometry
|
| 59 |
+
│ 100 emotion anchors │
|
| 60 |
+
│ substring matching │
|
| 61 |
+
│ → 100D resonance vector │
|
| 62 |
+
└─────────────────────────────────────┘
|
| 63 |
+
↓
|
| 64 |
+
┌─────────────────────────────────────┐
|
| 65 |
+
│ CHAMBER LAYER (~140K params) │
|
| 66 |
+
│ ├─ FEAR MLP: 100→128→64→32→1 │ ← terror, anxiety, dread
|
| 67 |
+
│ ├─ LOVE MLP: 100→128→64→32→1 │ ← warmth, tenderness
|
| 68 |
+
│ ├─ RAGE MLP: 100→128→64→32→1 │ ← anger, fury, spite
|
| 69 |
+
│ ├─ VOID MLP: 100→128→64→32→1 │ ← emptiness, numbness
|
| 70 |
+
│ ├─ FLOW MLP: 100→128→64→32→1 │ ← curiosity, transition
|
| 71 |
+
│ └─ COMPLEX: 100→128→64→32→1 │ ← shame, guilt, pride
|
| 72 |
+
│ │
|
| 73 |
+
│ CROSS-FIRE: chambers influence │
|
| 74 |
+
│ each other via 6×6 coupling │
|
| 75 |
+
│ until stabilization (5-10 iter) │
|
| 76 |
+
└─────────────────────────────────────┘
|
| 77 |
+
↓
|
| 78 |
+
┌─────────────────────────────────────┐
|
| 79 |
+
│ META-OBSERVER (~41K params) │
|
| 80 |
+
│ 207→128→64→100 │
|
| 81 |
+
│ input: resonances + chambers │
|
| 82 |
+
│ + iterations + fingerprint│
|
| 83 |
+
│ output: secondary emotion │
|
| 84 |
+
└─────────────────────────────────────┘
|
| 85 |
+
↓
|
| 86 |
+
CloudResponse {
|
| 87 |
+
primary: "anxiety",
|
| 88 |
+
secondary: "fear",
|
| 89 |
+
iterations: 5,
|
| 90 |
+
chambers: {FEAR: 0.8, ...}
|
| 91 |
+
}
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
**total: ~181K trainable parameters**
|
| 95 |
+
|
| 96 |
+
for comparison, GPT-2 small has 117M parameters. CLOUD is 0.15% of that. it's a hummingbird next to an elephant. but the hummingbird knows something the elephant doesn't: **how fast to flap**.
|
| 97 |
+
|
| 98 |
+
---
|
| 99 |
+
|
| 100 |
+
## the six chambers
|
| 101 |
+
|
| 102 |
+
evolutionary psychology meets neural networks. fight me.
|
| 103 |
+
|
| 104 |
+
### FEAR chamber
|
| 105 |
+
terror, anxiety, dread, panic, horror, paranoia...
|
| 106 |
+
|
| 107 |
+
**decay rate: 0.90** — fear lingers. evolutionary advantage. the ancestors who forgot about the tiger got eaten by the tiger.
|
| 108 |
+
|
| 109 |
+
### LOVE chamber
|
| 110 |
+
warmth, tenderness, devotion, longing, affection...
|
| 111 |
+
|
| 112 |
+
**decay rate: 0.93** — attachment is stable. pair bonding requires persistence.
|
| 113 |
+
|
| 114 |
+
### RAGE chamber
|
| 115 |
+
anger, fury, hatred, spite, disgust, contempt...
|
| 116 |
+
|
| 117 |
+
**decay rate: 0.85** — anger fades fast. high energy cost. can't stay furious forever (your heart would explode).
|
| 118 |
+
|
| 119 |
+
### VOID chamber
|
| 120 |
+
emptiness, numbness, hollow, dissociation, apathy...
|
| 121 |
+
|
| 122 |
+
**decay rate: 0.97** — numbness is persistent. protective dissociation. the body's "let's not feel this" button.
|
| 123 |
+
|
| 124 |
+
### FLOW chamber (new in v4.0)
|
| 125 |
+
curiosity, surprise, wonder, confusion, transition, liminality...
|
| 126 |
+
|
| 127 |
+
**decay rate: 0.88** — curiosity is transient. it shifts quickly, always seeking the next interesting thing.
|
| 128 |
+
|
| 129 |
+
### COMPLEX chamber (new in v4.0)
|
| 130 |
+
shame, guilt, pride, nostalgia, hope, gratitude, envy...
|
| 131 |
+
|
| 132 |
+
**decay rate: 0.94** — complex emotions are stable but deep. they don't fade easily because they're woven into identity.
|
| 133 |
+
|
| 134 |
+
---
|
| 135 |
+
|
| 136 |
+
## cross-fire dynamics
|
| 137 |
+
|
| 138 |
+
the chambers don't operate in isolation. they INFLUENCE each other via a 6×6 coupling matrix:
|
| 139 |
+
|
| 140 |
+
```
|
| 141 |
+
FEAR LOVE RAGE VOID FLOW CMPLX
|
| 142 |
+
FEAR → 0.0 -0.3 +0.6 +0.4 -0.2 +0.3 ← fear feeds rage, kills love, feeds shame
|
| 143 |
+
LOVE → -0.3 0.0 -0.6 -0.5 +0.3 +0.4 ← love heals everything, feeds curiosity
|
| 144 |
+
RAGE → +0.3 -0.4 0.0 +0.2 -0.3 +0.2 ← rage feeds fear, suppresses exploration
|
| 145 |
+
VOID → +0.5 -0.7 +0.3 0.0 -0.4 +0.5 ← void kills love & curiosity, feeds complex
|
| 146 |
+
FLOW → -0.2 +0.2 -0.2 -0.3 0.0 +0.2 ← flow dampens extremes, curiosity heals
|
| 147 |
+
CMPLX→ +0.3 +0.2 +0.2 +0.3 +0.1 0.0 ← complex emotions ripple everywhere
|
| 148 |
+
```
|
| 149 |
+
|
| 150 |
+
this is basically a tiny emotional ecosystem. add FEAR, watch LOVE decrease. add LOVE, watch everything calm down. add VOID, watch the whole system go cold. add FLOW, watch extremes dampen.
|
| 151 |
+
|
| 152 |
+
the chambers iterate until they stabilize (or hit max iterations). **fast convergence = clear emotion. slow convergence = confusion/ambivalence.**
|
| 153 |
+
|
| 154 |
+
---
|
| 155 |
+
|
| 156 |
+
## anomaly detection (0 params)
|
| 157 |
+
|
| 158 |
+
pure heuristics. no training. just pattern matching on chamber dynamics.
|
| 159 |
+
|
| 160 |
+
### forced_stability
|
| 161 |
+
high arousal + fast convergence = "I'M FINE" energy. suppression detected.
|
| 162 |
+
|
| 163 |
+
### dissociative_shutdown
|
| 164 |
+
high VOID + high arousal = trauma response. overwhelm → numbness.
|
| 165 |
+
|
| 166 |
+
### unresolved_confusion
|
| 167 |
+
low arousal + slow convergence = "I don't know what I feel". stuck.
|
| 168 |
+
|
| 169 |
+
### emotional_flatline
|
| 170 |
+
all chambers < 0.2 = severe apathy. depression signal.
|
| 171 |
+
|
| 172 |
+
---
|
| 173 |
+
|
| 174 |
+
## user cloud (temporal fingerprint)
|
| 175 |
+
|
| 176 |
+
CLOUD remembers your emotional history with **exponential decay**.
|
| 177 |
+
|
| 178 |
+
- 24-hour half-life
|
| 179 |
+
- recent emotions matter more
|
| 180 |
+
- builds a 100D "fingerprint" of your emotional patterns
|
| 181 |
+
|
| 182 |
+
if you've been anxious all week, CLOUD knows. it factors that into the secondary emotion prediction. your past shapes your present. deep, right? it's just matrix multiplication.
|
| 183 |
+
|
| 184 |
+
---
|
| 185 |
+
|
| 186 |
+
## installation
|
| 187 |
+
|
| 188 |
+
```bash
|
| 189 |
+
pip install numpy sentencepiece
|
| 190 |
+
```
|
| 191 |
+
|
| 192 |
+
that's it. no torch. no tensorflow. just numpy and vibes.
|
| 193 |
+
|
| 194 |
+
```bash
|
| 195 |
+
cd cloud
|
| 196 |
+
python cloud.py # test it
|
| 197 |
+
```
|
| 198 |
+
|
| 199 |
+
---
|
| 200 |
+
|
| 201 |
+
## usage
|
| 202 |
+
|
| 203 |
+
### standalone (no HAZE)
|
| 204 |
+
|
| 205 |
+
```python
|
| 206 |
+
from cloud import Cloud
|
| 207 |
+
|
| 208 |
+
# random init (for testing)
|
| 209 |
+
cloud = Cloud.random_init(seed=42)
|
| 210 |
+
|
| 211 |
+
# or load trained weights
|
| 212 |
+
cloud = Cloud.load(Path("cloud/models"))
|
| 213 |
+
|
| 214 |
+
# ping!
|
| 215 |
+
response = cloud.ping_sync("I'm feeling terrified")
|
| 216 |
+
print(f"Primary: {response.primary}") # → "terror"
|
| 217 |
+
print(f"Secondary: {response.secondary}") # → "anxiety"
|
| 218 |
+
print(f"Iterations: {response.iterations}") # → 5
|
| 219 |
+
```
|
| 220 |
+
|
| 221 |
+
### async (recommended)
|
| 222 |
+
|
| 223 |
+
```python
|
| 224 |
+
from cloud import AsyncCloud
|
| 225 |
+
|
| 226 |
+
async with AsyncCloud.create() as cloud:
|
| 227 |
+
response = await cloud.ping("I'm feeling anxious")
|
| 228 |
+
print(f"{response.primary} + {response.secondary}")
|
| 229 |
+
```
|
| 230 |
+
|
| 231 |
+
### with HAZE (via bridge)
|
| 232 |
+
|
| 233 |
+
```python
|
| 234 |
+
from bridge import AsyncBridge
|
| 235 |
+
|
| 236 |
+
async with AsyncBridge.create() as bridge:
|
| 237 |
+
response = await bridge.respond("Hello!")
|
| 238 |
+
print(response.text) # HAZE output
|
| 239 |
+
if response.cloud_hint:
|
| 240 |
+
print(f"Emotion: {response.cloud_hint.primary}")
|
| 241 |
+
```
|
| 242 |
+
|
| 243 |
+
---
|
| 244 |
+
|
| 245 |
+
## examples (solo CLOUD)
|
| 246 |
+
|
| 247 |
+
here's CLOUD detecting emotions without HAZE. just the sonar, no voice.
|
| 248 |
+
|
| 249 |
+
```
|
| 250 |
+
>>> cloud.ping_sync("I am feeling terrified and anxious")
|
| 251 |
+
Primary: fear
|
| 252 |
+
Secondary: threatened
|
| 253 |
+
Chamber: VOID=0.12
|
| 254 |
+
Status: Normal ✓
|
| 255 |
+
|
| 256 |
+
>>> cloud.ping_sync("You bring me such warmth and love darling")
|
| 257 |
+
Primary: warmth
|
| 258 |
+
Secondary: ambivalence
|
| 259 |
+
Chamber: VOID=0.11
|
| 260 |
+
Status: Normal ✓
|
| 261 |
+
|
| 262 |
+
>>> cloud.ping_sync("This makes me so angry I could explode")
|
| 263 |
+
Primary: fear # anger triggers fear response first!
|
| 264 |
+
Secondary: detachment
|
| 265 |
+
Chamber: VOID=0.12
|
| 266 |
+
Status: Normal ✓
|
| 267 |
+
|
| 268 |
+
>>> cloud.ping_sync("Rage consumes my entire being")
|
| 269 |
+
Primary: rage
|
| 270 |
+
Secondary: annoyance
|
| 271 |
+
Chamber: VOID=0.11
|
| 272 |
+
Status: Normal ✓
|
| 273 |
+
|
| 274 |
+
>>> cloud.ping_sync("I feel completely empty and numb inside")
|
| 275 |
+
Primary: fear # emptiness often masks underlying fear
|
| 276 |
+
Secondary: dead
|
| 277 |
+
Chamber: VOID=0.12
|
| 278 |
+
Status: Normal ✓
|
| 279 |
+
|
| 280 |
+
>>> cloud.ping_sync("Such tender love fills my heart")
|
| 281 |
+
Primary: love
|
| 282 |
+
Secondary: wonder
|
| 283 |
+
Chamber: VOID=0.11
|
| 284 |
+
Status: Normal ✓
|
| 285 |
+
```
|
| 286 |
+
|
| 287 |
+
**what's happening:**
|
| 288 |
+
1. input text hits the **resonance layer** (100 emotion anchors)
|
| 289 |
+
2. resonances feed into **4 chamber MLPs** (fear, love, rage, void)
|
| 290 |
+
3. chambers **cross-fire** until they stabilize
|
| 291 |
+
4. **meta-observer** predicts secondary emotion
|
| 292 |
+
5. result: primary + secondary + chamber activation
|
| 293 |
+
|
| 294 |
+
**note:** the primary detection works through pure geometry (substring matching with 100 anchors). it's fast and surprisingly accurate for a "first impression". the chambers and secondary prediction need more training — but that's okay! this is pre-semantic, not precise. it's the gut feeling, not the analysis.
|
| 295 |
+
|
| 296 |
+
the secondary often reveals subtext. "warmth + ambivalence" is different from "warmth + longing". same primary, different flavor.
|
| 297 |
+
|
| 298 |
+
---
|
| 299 |
+
|
| 300 |
+
## the 100 anchors
|
| 301 |
+
|
| 302 |
+
organized by chamber:
|
| 303 |
+
|
| 304 |
+
| Chamber | Count | Examples |
|
| 305 |
+
|---------|-------|----------|
|
| 306 |
+
| FEAR | 20 | fear, terror, panic, anxiety, dread, horror... |
|
| 307 |
+
| LOVE | 18 | love, warmth, tenderness, devotion, longing... |
|
| 308 |
+
| RAGE | 17 | anger, rage, fury, hatred, spite, disgust... |
|
| 309 |
+
| VOID | 15 | emptiness, numbness, hollow, dissociation... |
|
| 310 |
+
| FLOW | 15 | curiosity, surprise, wonder, confusion... |
|
| 311 |
+
| COMPLEX | 15 | shame, guilt, envy, pride, nostalgia... |
|
| 312 |
+
|
| 313 |
+
**total: 100 anchors**
|
| 314 |
+
|
| 315 |
+
each anchor gets a resonance score. the resonance vector is the "fingerprint" of the input's emotional content.
|
| 316 |
+
|
| 317 |
+
---
|
| 318 |
+
|
| 319 |
+
## training
|
| 320 |
+
|
| 321 |
+
the `training/` folder contains:
|
| 322 |
+
|
| 323 |
+
- `bootstrap_data.json` — synthetic emotion → label pairs
|
| 324 |
+
- `generate_bootstrap.py` — generate training data
|
| 325 |
+
- `train_cloud.py` — train chamber MLPs
|
| 326 |
+
- `train_observer.py` — train meta-observer
|
| 327 |
+
|
| 328 |
+
```bash
|
| 329 |
+
cd cloud/training
|
| 330 |
+
python generate_bootstrap.py # generate data
|
| 331 |
+
python train_cloud.py # train chambers
|
| 332 |
+
python train_observer.py # train observer
|
| 333 |
+
```
|
| 334 |
+
|
| 335 |
+
trained weights are saved to `cloud/models/`.
|
| 336 |
+
|
| 337 |
+
---
|
| 338 |
+
|
| 339 |
+
## integration with HAZE
|
| 340 |
+
|
| 341 |
+
CLOUD and HAZE are **completely autonomous**. neither depends on the other.
|
| 342 |
+
|
| 343 |
+
```
|
| 344 |
+
CLOUD (pre-semantic sonar) HAZE (voice generation)
|
| 345 |
+
│ │
|
| 346 |
+
│ ┌─────────────────┐ │
|
| 347 |
+
└───►│ BRIDGE │◄──────┘
|
| 348 |
+
│ (optional) │
|
| 349 |
+
│ silent fallback│
|
| 350 |
+
└─────────────────┘
|
| 351 |
+
│
|
| 352 |
+
▼
|
| 353 |
+
unified response
|
| 354 |
+
```
|
| 355 |
+
|
| 356 |
+
if CLOUD fails → HAZE continues silently. no errors. no warnings. just graceful degradation.
|
| 357 |
+
|
| 358 |
+
if HAZE fails → well, then you have a problem. HAZE is the voice. CLOUD is just the vibe check.
|
| 359 |
+
|
| 360 |
+
---
|
| 361 |
+
|
| 362 |
+
## philosophy
|
| 363 |
+
|
| 364 |
+
### why separate from HAZE?
|
| 365 |
+
|
| 366 |
+
1. **different timescales** — emotion detection is fast (~ms). text generation is slow (~s).
|
| 367 |
+
2. **different architectures** — CLOUD is MLPs. HAZE is attention + co-occurrence.
|
| 368 |
+
3. **different training** — CLOUD trains on emotion labels. HAZE trains on corpus statistics.
|
| 369 |
+
4. **independence** — if one breaks, the other still works.
|
| 370 |
+
|
| 371 |
+
### why so small?
|
| 372 |
+
|
| 373 |
+
50K params is enough to detect emotion. you don't need 175B params to know that "I'M TERRIFIED" contains fear. that's overkill. that's using a nuclear reactor to toast bread.
|
| 374 |
+
|
| 375 |
+
CLOUD is a matchstick. HAZE is the bonfire. different tools, different purposes.
|
| 376 |
+
|
| 377 |
+
### why "pre-semantic"?
|
| 378 |
+
|
| 379 |
+
because emotion isn't semantic. emotion is **substrate**. it's the thing that meaning floats on. you can know what someone said without knowing how they *feel* about it. CLOUD bridges that gap.
|
| 380 |
+
|
| 381 |
+
---
|
| 382 |
+
|
| 383 |
+
## crazy ideas (未来の方向)
|
| 384 |
+
|
| 385 |
+
### resonance feedback loop
|
| 386 |
+
CLOUD's output could influence HAZE's temperature. high anxiety → lower temp (more focused). high void → higher temp (more exploration).
|
| 387 |
+
|
| 388 |
+
### multi-turn emotion tracking
|
| 389 |
+
build emotional arcs across conversation. "they started scared, then got angry, now they're numb" — character development in real-time.
|
| 390 |
+
|
| 391 |
+
### cross-fire as attention
|
| 392 |
+
what if the coupling matrix was learnable? what if chambers could develop their own relationships? evolutionary attention.
|
| 393 |
+
|
| 394 |
+
### emotion injection
|
| 395 |
+
instead of just detecting emotion, **inject** it. "generate a response AS IF you feel fear". method acting for language models.
|
| 396 |
+
|
| 397 |
+
### dual-cloud architecture
|
| 398 |
+
one CLOUD for user emotion, one for HAZE emotion. emotional dialogue between two tiny minds. they could disagree. they could resonate. they could fight.
|
| 399 |
+
|
| 400 |
+
---
|
| 401 |
+
|
| 402 |
+
## file structure
|
| 403 |
+
|
| 404 |
+
```
|
| 405 |
+
cloud/
|
| 406 |
+
├── README.md # you are here (hi!)
|
| 407 |
+
├── __init__.py # package exports (async + sync)
|
| 408 |
+
├── cloud.py # main orchestrator (Cloud, AsyncCloud)
|
| 409 |
+
├── chambers.py # 6 chamber MLPs + cross-fire (~140K params)
|
| 410 |
+
├── observer.py # meta-observer MLP (~41K params)
|
| 411 |
+
├── resonance.py # weightless resonance layer
|
| 412 |
+
├── user_cloud.py # temporal emotional fingerprint
|
| 413 |
+
├── anchors.py # 100 emotion anchors + 6x6 coupling matrix
|
| 414 |
+
├── anomaly.py # heuristic anomaly detection
|
| 415 |
+
├── feedback.py # coherence measurement + coupling update
|
| 416 |
+
├── rrpram_cloud.py # autonomous copy of RRPRAM tokenizer
|
| 417 |
+
├── cooccur_cloud.py # autonomous copy of co-occurrence field
|
| 418 |
+
├── requirements.txt # numpy + sentencepiece
|
| 419 |
+
├── models/ # trained weights
|
| 420 |
+
│ ├── chamber_fear.npz
|
| 421 |
+
│ ├── chamber_love.npz
|
| 422 |
+
│ ├── chamber_rage.npz
|
| 423 |
+
│ ├── chamber_void.npz
|
| 424 |
+
│ ├── chamber_flow.npz # new in v4.0
|
| 425 |
+
│ ├── chamber_complex.npz # new in v4.0
|
| 426 |
+
│ ├── observer.npz
|
| 427 |
+
│ └── user_cloud.json
|
| 428 |
+
└── training/ # training scripts
|
| 429 |
+
├── bootstrap_data.json
|
| 430 |
+
├── generate_bootstrap.py
|
| 431 |
+
├── train_cloud.py
|
| 432 |
+
└── train_observer.py
|
| 433 |
+
```
|
| 434 |
+
|
| 435 |
+
---
|
| 436 |
+
|
| 437 |
+
## tests
|
| 438 |
+
|
| 439 |
+
```bash
|
| 440 |
+
cd cloud
|
| 441 |
+
python -m pytest tests/ -v
|
| 442 |
+
```
|
| 443 |
+
|
| 444 |
+
or just run the modules directly:
|
| 445 |
+
|
| 446 |
+
```bash
|
| 447 |
+
python chambers.py # test cross-fire
|
| 448 |
+
python observer.py # test meta-observer
|
| 449 |
+
python resonance.py # test resonance layer
|
| 450 |
+
python cloud.py # test full pipeline
|
| 451 |
+
```
|
| 452 |
+
|
| 453 |
+
---
|
| 454 |
+
|
| 455 |
+
## contributing
|
| 456 |
+
|
| 457 |
+
found a bug? new chamber idea? crazy theory about emotion dynamics?
|
| 458 |
+
|
| 459 |
+
open an issue. or a PR. or just yell into the void (the VOID chamber will detect it).
|
| 460 |
+
|
| 461 |
+
---
|
| 462 |
+
|
| 463 |
+
## license
|
| 464 |
+
|
| 465 |
+
GPL-3.0 — same as HAZE, same as the method.
|
| 466 |
+
|
| 467 |
+
---
|
| 468 |
+
|
| 469 |
+
## acknowledgments
|
| 470 |
+
|
| 471 |
+
- [karpathy](https://github.com/karpathy) for making neural nets feel like poetry
|
| 472 |
+
- evolutionary psychology for the chamber design (thanks, ancestors)
|
| 473 |
+
- that one paper about emotional valence-arousal spaces
|
| 474 |
+
- coffee, chaos, and 3am debugging sessions
|
| 475 |
+
- everyone who asked "but can AI feel?" and didn't accept "no"
|
| 476 |
+
|
| 477 |
+
---
|
| 478 |
+
|
| 479 |
+
## final thoughts
|
| 480 |
+
|
| 481 |
+
CLOUD doesn't understand emotions. it doesn't feel them. it's 50K floating point numbers doing multiplication.
|
| 482 |
+
|
| 483 |
+
but here's the thing: **neither does your amygdala**. it's just neurons firing. patterns activating patterns. and somehow, from that electrochemical chaos, feelings emerge.
|
| 484 |
+
|
| 485 |
+
CLOUD is the same. patterns activating patterns. and if you squint hard enough, you might see something that looks like understanding.
|
| 486 |
+
|
| 487 |
+
or maybe it's just matrix multiplication.
|
| 488 |
+
|
| 489 |
+
*the cloud doesn't care. it just detects.*
|
| 490 |
+
|
| 491 |
+
---
|
| 492 |
+
|
| 493 |
+
*"something fires before meaning arrives"*
|
| 494 |
+
|
| 495 |
+
[github.com/ariannamethod/haze/cloud](https://github.com/ariannamethod/haze)
|
cloud/__init__.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
CLOUD v3.1 — Pre-Semantic Sonar (Fully Async)
|
| 3 |
+
|
| 4 |
+
"Something fires BEFORE meaning arrives"
|
| 5 |
+
|
| 6 |
+
Architecture:
|
| 7 |
+
- Resonance Layer (weightless geometry)
|
| 8 |
+
- Chamber MLPs (4 × 8.5K params + cross-fire)
|
| 9 |
+
- Meta-Observer (15K params)
|
| 10 |
+
- User Cloud (temporal fingerprint)
|
| 11 |
+
- Anomaly Detection (0 params, heuristic)
|
| 12 |
+
- Feedback Loop (0 params, closed-loop learning)
|
| 13 |
+
|
| 14 |
+
Total: ~50K params
|
| 15 |
+
|
| 16 |
+
Async Pattern:
|
| 17 |
+
All components have async wrappers with field lock discipline.
|
| 18 |
+
Based on HAZE's async architecture for coherence.
|
| 19 |
+
|
| 20 |
+
Usage:
|
| 21 |
+
from cloud import Cloud, AsyncCloud
|
| 22 |
+
|
| 23 |
+
# Sync usage
|
| 24 |
+
cloud = Cloud.random_init()
|
| 25 |
+
response = cloud.ping_sync("I'm feeling anxious")
|
| 26 |
+
|
| 27 |
+
# Async usage (recommended)
|
| 28 |
+
async with AsyncCloud.create() as cloud:
|
| 29 |
+
response = await cloud.ping("I'm feeling anxious")
|
| 30 |
+
print(f"Primary: {response.primary}, Secondary: {response.secondary}")
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
from .cloud import Cloud, CloudResponse, AsyncCloud
|
| 34 |
+
from .chambers import CrossFireSystem, ChamberMLP, AsyncCrossFireSystem
|
| 35 |
+
from .observer import MetaObserver, AsyncMetaObserver
|
| 36 |
+
from .resonance import SimpleResonanceLayer, ResonanceLayer
|
| 37 |
+
from .user_cloud import UserCloud, EmotionEvent, AsyncUserCloud
|
| 38 |
+
from .anomaly import detect_anomalies, AnomalyReport
|
| 39 |
+
from .feedback import measure_coherence, update_coupling
|
| 40 |
+
from .anchors import (
|
| 41 |
+
EMOTION_ANCHORS,
|
| 42 |
+
CHAMBER_NAMES,
|
| 43 |
+
COUPLING_MATRIX,
|
| 44 |
+
get_all_anchors,
|
| 45 |
+
get_chamber_ranges,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
__version__ = "3.1.0"
|
| 49 |
+
|
| 50 |
+
__all__ = [
|
| 51 |
+
# Main classes
|
| 52 |
+
"Cloud",
|
| 53 |
+
"CloudResponse",
|
| 54 |
+
"AsyncCloud",
|
| 55 |
+
|
| 56 |
+
# Components (sync)
|
| 57 |
+
"CrossFireSystem",
|
| 58 |
+
"ChamberMLP",
|
| 59 |
+
"MetaObserver",
|
| 60 |
+
"SimpleResonanceLayer",
|
| 61 |
+
"ResonanceLayer",
|
| 62 |
+
"UserCloud",
|
| 63 |
+
"EmotionEvent",
|
| 64 |
+
|
| 65 |
+
# Components (async) — HAZE-style discipline
|
| 66 |
+
"AsyncCrossFireSystem",
|
| 67 |
+
"AsyncMetaObserver",
|
| 68 |
+
"AsyncUserCloud",
|
| 69 |
+
|
| 70 |
+
# Anomaly detection
|
| 71 |
+
"detect_anomalies",
|
| 72 |
+
"AnomalyReport",
|
| 73 |
+
|
| 74 |
+
# Feedback loop
|
| 75 |
+
"measure_coherence",
|
| 76 |
+
"update_coupling",
|
| 77 |
+
|
| 78 |
+
# Anchors
|
| 79 |
+
"EMOTION_ANCHORS",
|
| 80 |
+
"CHAMBER_NAMES",
|
| 81 |
+
"COUPLING_MATRIX",
|
| 82 |
+
"get_all_anchors",
|
| 83 |
+
"get_chamber_ranges",
|
| 84 |
+
]
|
cloud/anchors.py
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# anchors.py — 100 Emotion Anchors for CLOUD v3.0
|
| 3 |
+
#
|
| 4 |
+
# Four chambers of the human condition:
|
| 5 |
+
# - FEAR (20): anxiety, terror, dread...
|
| 6 |
+
# - LOVE (18): warmth, tenderness, devotion...
|
| 7 |
+
# - RAGE (17): anger, fury, hatred...
|
| 8 |
+
# - VOID (15): emptiness, numbness, hollow...
|
| 9 |
+
# - FLOW (15): curiosity, surprise, transition...
|
| 10 |
+
# - COMPLEX (15): shame, guilt, pride, nostalgia...
|
| 11 |
+
#
|
| 12 |
+
# Each chamber gets its own MLP. Cross-fire happens between chambers.
|
| 13 |
+
|
| 14 |
+
from typing import Dict, List, Tuple
|
| 15 |
+
|
| 16 |
+
# 100 emotion anchor words organized by chamber
|
| 17 |
+
EMOTION_ANCHORS: Dict[str, List[str]] = {
|
| 18 |
+
# FEAR (20) — terror, anxiety, dread
|
| 19 |
+
"FEAR": [
|
| 20 |
+
"fear", "terror", "panic", "anxiety", "dread", "horror",
|
| 21 |
+
"unease", "paranoia", "worry", "nervous", "scared",
|
| 22 |
+
"frightened", "alarmed", "tense", "apprehensive",
|
| 23 |
+
"threatened", "vulnerable", "insecure", "timid", "wary",
|
| 24 |
+
],
|
| 25 |
+
|
| 26 |
+
# LOVE (18) — warmth, connection, tenderness
|
| 27 |
+
"LOVE": [
|
| 28 |
+
"love", "warmth", "tenderness", "devotion", "longing",
|
| 29 |
+
"yearning", "affection", "care", "intimacy", "attachment",
|
| 30 |
+
"adoration", "passion", "fondness", "cherish", "desire",
|
| 31 |
+
"compassion", "gentle", "sweet",
|
| 32 |
+
],
|
| 33 |
+
|
| 34 |
+
# RAGE (17) — anger, fury, spite
|
| 35 |
+
"RAGE": [
|
| 36 |
+
"anger", "rage", "fury", "hatred", "spite", "disgust",
|
| 37 |
+
"irritation", "frustration", "resentment", "hostility",
|
| 38 |
+
"aggression", "bitterness", "contempt", "loathing",
|
| 39 |
+
"annoyance", "outrage", "wrath",
|
| 40 |
+
],
|
| 41 |
+
|
| 42 |
+
# VOID (15) — emptiness, numbness, dissociation
|
| 43 |
+
"VOID": [
|
| 44 |
+
"emptiness", "numbness", "hollow", "nothing", "absence",
|
| 45 |
+
"void", "dissociation", "detachment", "apathy",
|
| 46 |
+
"indifference", "drift", "blank", "flat", "dead", "cold",
|
| 47 |
+
],
|
| 48 |
+
|
| 49 |
+
# FLOW (15) — curiosity, transition, liminality
|
| 50 |
+
"FLOW": [
|
| 51 |
+
"curiosity", "surprise", "wonder", "confusion",
|
| 52 |
+
"anticipation", "ambivalence", "uncertainty", "restless",
|
| 53 |
+
"searching", "transition", "shift", "change", "flux",
|
| 54 |
+
"between", "liminal",
|
| 55 |
+
],
|
| 56 |
+
|
| 57 |
+
# COMPLEX (15) — shame, guilt, nostalgia, bittersweet
|
| 58 |
+
"COMPLEX": [
|
| 59 |
+
"shame", "guilt", "envy", "jealousy", "pride",
|
| 60 |
+
"disappointment", "betrayal", "relief", "nostalgia",
|
| 61 |
+
"bittersweet", "melancholy", "regret", "hope",
|
| 62 |
+
"gratitude", "awe",
|
| 63 |
+
],
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
# Chamber names (for indexing) - original 4 chambers
|
| 67 |
+
CHAMBER_NAMES = ["FEAR", "LOVE", "RAGE", "VOID"]
|
| 68 |
+
|
| 69 |
+
# Extended chamber names (6 chambers for 200K model)
|
| 70 |
+
CHAMBER_NAMES_EXTENDED = ["FEAR", "LOVE", "RAGE", "VOID", "FLOW", "COMPLEX"]
|
| 71 |
+
|
| 72 |
+
# Coupling matrix: how chambers influence each other (original 4x4)
|
| 73 |
+
# Rows = influence FROM, Cols = influence TO
|
| 74 |
+
# Format: [FEAR, LOVE, RAGE, VOID]
|
| 75 |
+
COUPLING_MATRIX = [
|
| 76 |
+
# FEAR LOVE RAGE VOID
|
| 77 |
+
[ 0.0, -0.3, +0.6, +0.4 ], # FEAR → suppresses love, feeds rage & void
|
| 78 |
+
[ -0.3, 0.0, -0.6, -0.5 ], # LOVE → suppresses fear, rage & void
|
| 79 |
+
[ +0.3, -0.4, 0.0, +0.2 ], # RAGE → feeds fear, suppresses love, feeds void
|
| 80 |
+
[ +0.5, -0.7, +0.3, 0.0 ], # VOID → feeds fear & rage, kills love
|
| 81 |
+
]
|
| 82 |
+
|
| 83 |
+
# Extended coupling matrix (6x6) for FLOW and COMPLEX chambers
|
| 84 |
+
# FLOW: curiosity, transition — dampens extremes, feeds exploration
|
| 85 |
+
# COMPLEX: shame, guilt, pride — interacts with all, especially love/void
|
| 86 |
+
COUPLING_MATRIX_EXTENDED = [
|
| 87 |
+
# FEAR LOVE RAGE VOID FLOW CMPLX
|
| 88 |
+
[ 0.0, -0.3, +0.6, +0.4, -0.2, +0.3 ], # FEAR → feeds complex (shame from fear)
|
| 89 |
+
[ -0.3, 0.0, -0.6, -0.5, +0.3, +0.4 ], # LOVE → feeds flow & complex (hope, gratitude)
|
| 90 |
+
[ +0.3, -0.4, 0.0, +0.2, -0.3, +0.2 ], # RAGE → suppresses flow, feeds complex (guilt)
|
| 91 |
+
[ +0.5, -0.7, +0.3, 0.0, -0.4, +0.5 ], # VOID → kills flow, feeds complex (melancholy)
|
| 92 |
+
[ -0.2, +0.2, -0.2, -0.3, 0.0, +0.2 ], # FLOW → dampens extremes, curiosity heals
|
| 93 |
+
[ +0.3, +0.2, +0.2, +0.3, +0.1, 0.0 ], # COMPLEX → feeds all slightly (ripple effect)
|
| 94 |
+
]
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def get_all_anchors() -> List[str]:
|
| 98 |
+
"""Get flat list of all 100 emotion anchors."""
|
| 99 |
+
anchors = []
|
| 100 |
+
for chamber_anchors in EMOTION_ANCHORS.values():
|
| 101 |
+
anchors.extend(chamber_anchors)
|
| 102 |
+
return anchors
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def get_anchor_to_chamber() -> Dict[str, str]:
|
| 106 |
+
"""Map each anchor word to its chamber."""
|
| 107 |
+
mapping = {}
|
| 108 |
+
for chamber, words in EMOTION_ANCHORS.items():
|
| 109 |
+
for word in words:
|
| 110 |
+
mapping[word] = chamber
|
| 111 |
+
return mapping
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def get_anchor_index(anchor: str) -> int:
|
| 115 |
+
"""Get the index (0-99) of an anchor word."""
|
| 116 |
+
all_anchors = get_all_anchors()
|
| 117 |
+
try:
|
| 118 |
+
return all_anchors.index(anchor)
|
| 119 |
+
except ValueError:
|
| 120 |
+
raise ValueError(f"Unknown anchor: {anchor}")
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def get_chamber_ranges() -> Dict[str, Tuple[int, int]]:
|
| 124 |
+
"""
|
| 125 |
+
Get the index ranges for each chamber in the 100D resonance vector.
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
Dict mapping chamber name to (start_idx, end_idx) tuple.
|
| 129 |
+
|
| 130 |
+
Example:
|
| 131 |
+
{"FEAR": (0, 20), "LOVE": (20, 38), ...}
|
| 132 |
+
"""
|
| 133 |
+
ranges = {}
|
| 134 |
+
idx = 0
|
| 135 |
+
for chamber, words in EMOTION_ANCHORS.items():
|
| 136 |
+
start = idx
|
| 137 |
+
end = idx + len(words)
|
| 138 |
+
ranges[chamber] = (start, end)
|
| 139 |
+
idx = end
|
| 140 |
+
return ranges
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def get_chamber_for_anchor(anchor: str) -> str:
|
| 144 |
+
"""Get the chamber name for a given anchor word."""
|
| 145 |
+
mapping = get_anchor_to_chamber()
|
| 146 |
+
return mapping.get(anchor, "UNKNOWN")
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
# Sanity check
|
| 150 |
+
assert len(get_all_anchors()) == 100, "Must have exactly 100 anchors"
|
| 151 |
+
assert len(CHAMBER_NAMES) == 4, "Must have exactly 4 base chambers"
|
| 152 |
+
assert len(CHAMBER_NAMES_EXTENDED) == 6, "Must have exactly 6 extended chambers"
|
| 153 |
+
assert len(COUPLING_MATRIX) == 4, "Coupling matrix must be 4x4"
|
| 154 |
+
assert all(len(row) == 4 for row in COUPLING_MATRIX), "Coupling matrix must be 4x4"
|
| 155 |
+
assert len(COUPLING_MATRIX_EXTENDED) == 6, "Extended coupling matrix must be 6x6"
|
| 156 |
+
assert all(len(row) == 6 for row in COUPLING_MATRIX_EXTENDED), "Extended coupling matrix must be 6x6"
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
if __name__ == "__main__":
|
| 160 |
+
print("=" * 60)
|
| 161 |
+
print(" CLOUD v3.0 — Emotion Anchors")
|
| 162 |
+
print("=" * 60)
|
| 163 |
+
print()
|
| 164 |
+
|
| 165 |
+
# Show chamber stats
|
| 166 |
+
print("Chamber distribution:")
|
| 167 |
+
for chamber, words in EMOTION_ANCHORS.items():
|
| 168 |
+
print(f" {chamber:8s}: {len(words):2d} anchors")
|
| 169 |
+
print(f" {'TOTAL':8s}: {len(get_all_anchors()):2d} anchors")
|
| 170 |
+
print()
|
| 171 |
+
|
| 172 |
+
# Show chamber ranges
|
| 173 |
+
print("Chamber ranges in 100D vector:")
|
| 174 |
+
for chamber, (start, end) in get_chamber_ranges().items():
|
| 175 |
+
print(f" {chamber:8s}: [{start:2d}:{end:2d}]")
|
| 176 |
+
print()
|
| 177 |
+
|
| 178 |
+
# Show coupling matrix
|
| 179 |
+
print("Coupling matrix (cross-fire influence):")
|
| 180 |
+
print(" ", " ".join(f"{name:6s}" for name in CHAMBER_NAMES))
|
| 181 |
+
for i, row_name in enumerate(CHAMBER_NAMES):
|
| 182 |
+
values = " ".join(f"{val:+6.1f}" for val in COUPLING_MATRIX[i])
|
| 183 |
+
print(f" {row_name:6s} {values}")
|
| 184 |
+
print()
|
| 185 |
+
|
| 186 |
+
# Show sample anchors
|
| 187 |
+
print("Sample anchors from each chamber:")
|
| 188 |
+
for chamber, words in EMOTION_ANCHORS.items():
|
| 189 |
+
sample = words[:5]
|
| 190 |
+
print(f" {chamber:8s}: {', '.join(sample)}...")
|
| 191 |
+
print()
|
| 192 |
+
|
| 193 |
+
print("=" * 60)
|
| 194 |
+
print(" Chambers ready. Cross-fire enabled.")
|
| 195 |
+
print("=" * 60)
|
cloud/anomaly.py
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# anomaly.py — Anomaly Detection for CLOUD v3.1
|
| 3 |
+
#
|
| 4 |
+
# Heuristic patterns (0 params) detecting unusual emotional states.
|
| 5 |
+
#
|
| 6 |
+
# Four anomaly types:
|
| 7 |
+
# 1. forced_stability: high arousal + fast convergence (suppression)
|
| 8 |
+
# 2. dissociative_shutdown: high VOID + high arousal (trauma response)
|
| 9 |
+
# 3. unresolved_confusion: low arousal + slow convergence (stuck)
|
| 10 |
+
# 4. emotional_flatline: all chambers low (numbness)
|
| 11 |
+
|
| 12 |
+
from typing import Dict, Optional
|
| 13 |
+
from dataclasses import dataclass
|
| 14 |
+
import numpy as np
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@dataclass
|
| 18 |
+
class AnomalyReport:
|
| 19 |
+
"""Anomaly detection result."""
|
| 20 |
+
|
| 21 |
+
has_anomaly: bool
|
| 22 |
+
anomaly_type: Optional[str]
|
| 23 |
+
severity: float # 0.0-1.0
|
| 24 |
+
description: str
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def compute_arousal(chamber_activations: Dict[str, float]) -> float:
|
| 28 |
+
"""
|
| 29 |
+
Compute emotional arousal level.
|
| 30 |
+
|
| 31 |
+
Arousal = max activation (any strong emotion = high arousal).
|
| 32 |
+
High arousal = strong emotions (fear, rage, love).
|
| 33 |
+
Low arousal = flat affect (void, apathy).
|
| 34 |
+
"""
|
| 35 |
+
activations = np.array(list(chamber_activations.values()))
|
| 36 |
+
arousal = activations.max() # any strong emotion = arousal
|
| 37 |
+
return float(arousal)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def detect_forced_stability(
|
| 41 |
+
chamber_activations: Dict[str, float],
|
| 42 |
+
iterations: int,
|
| 43 |
+
arousal: float,
|
| 44 |
+
) -> Optional[AnomalyReport]:
|
| 45 |
+
"""
|
| 46 |
+
Detect forced stability: high arousal but fast convergence.
|
| 47 |
+
|
| 48 |
+
Indicates: emotional suppression, forced calm, "I'm fine"
|
| 49 |
+
|
| 50 |
+
Conditions:
|
| 51 |
+
- arousal > 0.8 (strong emotions present)
|
| 52 |
+
- iterations < 3 (converged too quickly)
|
| 53 |
+
"""
|
| 54 |
+
if arousal > 0.8 and iterations < 3:
|
| 55 |
+
severity = min(1.0, arousal / 0.8)
|
| 56 |
+
return AnomalyReport(
|
| 57 |
+
has_anomaly=True,
|
| 58 |
+
anomaly_type="forced_stability",
|
| 59 |
+
severity=severity,
|
| 60 |
+
description="Strong emotions converge unnaturally fast (suppression?)",
|
| 61 |
+
)
|
| 62 |
+
return None
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def detect_dissociative_shutdown(
|
| 66 |
+
chamber_activations: Dict[str, float],
|
| 67 |
+
iterations: int,
|
| 68 |
+
arousal: float,
|
| 69 |
+
) -> Optional[AnomalyReport]:
|
| 70 |
+
"""
|
| 71 |
+
Detect dissociative shutdown: high VOID + high arousal.
|
| 72 |
+
|
| 73 |
+
Indicates: trauma response, emotional overwhelm → numbness
|
| 74 |
+
|
| 75 |
+
Conditions:
|
| 76 |
+
- VOID > 0.7 (strong dissociation)
|
| 77 |
+
- arousal > 0.5 (other emotions still present)
|
| 78 |
+
"""
|
| 79 |
+
void_level = chamber_activations.get("VOID", 0.0)
|
| 80 |
+
|
| 81 |
+
if void_level > 0.7 and arousal > 0.5:
|
| 82 |
+
severity = min(1.0, void_level)
|
| 83 |
+
return AnomalyReport(
|
| 84 |
+
has_anomaly=True,
|
| 85 |
+
anomaly_type="dissociative_shutdown",
|
| 86 |
+
severity=severity,
|
| 87 |
+
description="High void + arousal = dissociative response to overwhelm",
|
| 88 |
+
)
|
| 89 |
+
return None
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def detect_unresolved_confusion(
|
| 93 |
+
chamber_activations: Dict[str, float],
|
| 94 |
+
iterations: int,
|
| 95 |
+
arousal: float,
|
| 96 |
+
) -> Optional[AnomalyReport]:
|
| 97 |
+
"""
|
| 98 |
+
Detect unresolved confusion: extremely low arousal + slow convergence.
|
| 99 |
+
|
| 100 |
+
Indicates: ambivalence, indecision, "I don't know what I feel"
|
| 101 |
+
|
| 102 |
+
Conditions:
|
| 103 |
+
- arousal < 0.10 (extremely weak/mixed emotions)
|
| 104 |
+
- iterations > 8 (slow to stabilize)
|
| 105 |
+
"""
|
| 106 |
+
if arousal < 0.10 and iterations > 8:
|
| 107 |
+
severity = 1.0 - arousal # lower arousal = higher severity
|
| 108 |
+
return AnomalyReport(
|
| 109 |
+
has_anomaly=True,
|
| 110 |
+
anomaly_type="unresolved_confusion",
|
| 111 |
+
severity=severity,
|
| 112 |
+
description="Extremely weak emotions + slow convergence = unresolved ambivalence",
|
| 113 |
+
)
|
| 114 |
+
return None
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def detect_emotional_flatline(
|
| 118 |
+
chamber_activations: Dict[str, float],
|
| 119 |
+
iterations: int,
|
| 120 |
+
arousal: float,
|
| 121 |
+
) -> Optional[AnomalyReport]:
|
| 122 |
+
"""
|
| 123 |
+
Detect emotional flatline: all chambers very low.
|
| 124 |
+
|
| 125 |
+
Indicates: severe apathy, depression, emotional shutdown
|
| 126 |
+
|
| 127 |
+
Conditions:
|
| 128 |
+
- all chambers < 0.05 (truly flat, no signal at all)
|
| 129 |
+
"""
|
| 130 |
+
all_low = all(v < 0.05 for v in chamber_activations.values())
|
| 131 |
+
|
| 132 |
+
if all_low:
|
| 133 |
+
max_activation = max(chamber_activations.values())
|
| 134 |
+
severity = 1.0 - max_activation / 0.05 # closer to 0 = worse
|
| 135 |
+
return AnomalyReport(
|
| 136 |
+
has_anomaly=True,
|
| 137 |
+
anomaly_type="emotional_flatline",
|
| 138 |
+
severity=severity,
|
| 139 |
+
description="All chambers < 0.05 = severe emotional flatline",
|
| 140 |
+
)
|
| 141 |
+
return None
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def detect_anomalies(
|
| 145 |
+
chamber_activations: Dict[str, float],
|
| 146 |
+
iterations: int,
|
| 147 |
+
) -> AnomalyReport:
|
| 148 |
+
"""
|
| 149 |
+
Run all anomaly detectors.
|
| 150 |
+
|
| 151 |
+
Returns the first detected anomaly, or None if normal.
|
| 152 |
+
Priority: flatline > dissociative > forced > confusion
|
| 153 |
+
"""
|
| 154 |
+
arousal = compute_arousal(chamber_activations)
|
| 155 |
+
|
| 156 |
+
# Check in priority order
|
| 157 |
+
detectors = [
|
| 158 |
+
detect_emotional_flatline,
|
| 159 |
+
detect_dissociative_shutdown,
|
| 160 |
+
detect_forced_stability,
|
| 161 |
+
detect_unresolved_confusion,
|
| 162 |
+
]
|
| 163 |
+
|
| 164 |
+
for detector in detectors:
|
| 165 |
+
result = detector(chamber_activations, iterations, arousal)
|
| 166 |
+
if result is not None:
|
| 167 |
+
return result
|
| 168 |
+
|
| 169 |
+
# No anomaly detected
|
| 170 |
+
return AnomalyReport(
|
| 171 |
+
has_anomaly=False,
|
| 172 |
+
anomaly_type=None,
|
| 173 |
+
severity=0.0,
|
| 174 |
+
description="Normal emotional state",
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
if __name__ == "__main__":
|
| 179 |
+
print("=" * 60)
|
| 180 |
+
print(" CLOUD v3.1 — Anomaly Detection")
|
| 181 |
+
print("=" * 60)
|
| 182 |
+
print()
|
| 183 |
+
|
| 184 |
+
# Test cases
|
| 185 |
+
test_cases = [
|
| 186 |
+
{
|
| 187 |
+
"name": "Forced stability",
|
| 188 |
+
"chambers": {"FEAR": 0.9, "LOVE": 0.1, "RAGE": 0.8, "VOID": 0.2},
|
| 189 |
+
"iterations": 2,
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"name": "Dissociative shutdown",
|
| 193 |
+
"chambers": {"FEAR": 0.6, "LOVE": 0.2, "RAGE": 0.5, "VOID": 0.8},
|
| 194 |
+
"iterations": 5,
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"name": "Unresolved confusion",
|
| 198 |
+
"chambers": {"FEAR": 0.4, "LOVE": 0.4, "RAGE": 0.4, "VOID": 0.4},
|
| 199 |
+
"iterations": 9,
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"name": "Emotional flatline",
|
| 203 |
+
"chambers": {"FEAR": 0.1, "LOVE": 0.05, "RAGE": 0.08, "VOID": 0.12},
|
| 204 |
+
"iterations": 5,
|
| 205 |
+
},
|
| 206 |
+
{
|
| 207 |
+
"name": "Normal state",
|
| 208 |
+
"chambers": {"FEAR": 0.3, "LOVE": 0.6, "RAGE": 0.2, "VOID": 0.3},
|
| 209 |
+
"iterations": 5,
|
| 210 |
+
},
|
| 211 |
+
]
|
| 212 |
+
|
| 213 |
+
for test in test_cases:
|
| 214 |
+
arousal = compute_arousal(test["chambers"])
|
| 215 |
+
anomaly = detect_anomalies(test["chambers"], test["iterations"])
|
| 216 |
+
|
| 217 |
+
print(f"{test['name']}:")
|
| 218 |
+
print(f" Chambers: {test['chambers']}")
|
| 219 |
+
print(f" Iterations: {test['iterations']}")
|
| 220 |
+
print(f" Arousal: {arousal:.3f}")
|
| 221 |
+
print(f" Anomaly: {anomaly.anomaly_type or 'None'}")
|
| 222 |
+
if anomaly.has_anomaly:
|
| 223 |
+
print(f" Severity: {anomaly.severity:.3f}")
|
| 224 |
+
print(f" Description: {anomaly.description}")
|
| 225 |
+
print()
|
| 226 |
+
|
| 227 |
+
print("=" * 60)
|
| 228 |
+
print(" Anomaly detection operational. 0 params.")
|
| 229 |
+
print("=" * 60)
|
cloud/chambers.py
ADDED
|
@@ -0,0 +1,470 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# chambers.py — Chamber MLPs with Cross-Fire Stabilization
|
| 3 |
+
#
|
| 4 |
+
# Six chambers of emotion, each with its own deeper MLP:
|
| 5 |
+
# - FEAR chamber: 100→128→64→32→1
|
| 6 |
+
# - LOVE chamber: 100→128→64→32→1
|
| 7 |
+
# - RAGE chamber: 100→128→64→32→1
|
| 8 |
+
# - VOID chamber: 100→128→64→32→1
|
| 9 |
+
# - FLOW chamber: 100→128→64→32→1
|
| 10 |
+
# - COMPLEX chamber: 100→128→64→32→1
|
| 11 |
+
#
|
| 12 |
+
# Total: 6 × ~17K = ~102K params
|
| 13 |
+
#
|
| 14 |
+
# Cross-fire: chambers influence each other through coupling matrix,
|
| 15 |
+
# iterating until stabilization (5-10 iterations).
|
| 16 |
+
|
| 17 |
+
from __future__ import annotations
|
| 18 |
+
import asyncio
|
| 19 |
+
import numpy as np
|
| 20 |
+
from typing import Dict, Optional, Tuple
|
| 21 |
+
from dataclasses import dataclass
|
| 22 |
+
from pathlib import Path
|
| 23 |
+
|
| 24 |
+
from .anchors import CHAMBER_NAMES, COUPLING_MATRIX, CHAMBER_NAMES_EXTENDED, COUPLING_MATRIX_EXTENDED
|
| 25 |
+
|
| 26 |
+
# Decay rates per chamber (per iteration tick)
|
| 27 |
+
# Evolutionary psychology: fear lingers, rage fades, love stable, void persistent
|
| 28 |
+
DECAY_RATES = {
|
| 29 |
+
"FEAR": 0.90, # fear lingers (evolutionary advantage)
|
| 30 |
+
"LOVE": 0.93, # attachment stable
|
| 31 |
+
"RAGE": 0.85, # anger fades fast (high energy cost)
|
| 32 |
+
"VOID": 0.97, # numbness persistent (protective dissociation)
|
| 33 |
+
"FLOW": 0.88, # curiosity is transient, shifts quickly
|
| 34 |
+
"COMPLEX": 0.94, # complex emotions are stable but deep
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def swish(x: np.ndarray) -> np.ndarray:
|
| 39 |
+
"""Swish activation: x * sigmoid(x)"""
|
| 40 |
+
return x / (1.0 + np.exp(-x))
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def swish_deriv(x: np.ndarray) -> np.ndarray:
|
| 44 |
+
"""Derivative of swish for backprop"""
|
| 45 |
+
sig = 1.0 / (1.0 + np.exp(-x))
|
| 46 |
+
return sig + x * sig * (1 - sig)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@dataclass
|
| 50 |
+
class ChamberMLP:
|
| 51 |
+
"""
|
| 52 |
+
Single chamber MLP: 100→128→64→32→1 (deeper for 200K model)
|
| 53 |
+
|
| 54 |
+
Takes 100D resonance vector, outputs single activation value.
|
| 55 |
+
|
| 56 |
+
Params:
|
| 57 |
+
- W1: (100, 128) = 12,800
|
| 58 |
+
- b1: (128,) = 128
|
| 59 |
+
- W2: (128, 64) = 8,192
|
| 60 |
+
- b2: (64,) = 64
|
| 61 |
+
- W3: (64, 32) = 2,048
|
| 62 |
+
- b3: (32,) = 32
|
| 63 |
+
- W4: (32, 1) = 32
|
| 64 |
+
- b4: (1,) = 1
|
| 65 |
+
Total: ~23K params per chamber
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
W1: np.ndarray # (100, 128)
|
| 69 |
+
b1: np.ndarray # (128,)
|
| 70 |
+
W2: np.ndarray # (128, 64)
|
| 71 |
+
b2: np.ndarray # (64,)
|
| 72 |
+
W3: np.ndarray # (64, 32)
|
| 73 |
+
b3: np.ndarray # (32,)
|
| 74 |
+
W4: np.ndarray # (32, 1)
|
| 75 |
+
b4: np.ndarray # (1,)
|
| 76 |
+
|
| 77 |
+
@classmethod
|
| 78 |
+
def random_init(cls, seed: Optional[int] = None) -> "ChamberMLP":
|
| 79 |
+
"""Initialize with random weights (Xavier initialization)."""
|
| 80 |
+
if seed is not None:
|
| 81 |
+
np.random.seed(seed)
|
| 82 |
+
|
| 83 |
+
# Xavier init: scale by sqrt(fan_in)
|
| 84 |
+
W1 = np.random.randn(100, 128) * np.sqrt(2.0 / 100)
|
| 85 |
+
b1 = np.zeros(128)
|
| 86 |
+
|
| 87 |
+
W2 = np.random.randn(128, 64) * np.sqrt(2.0 / 128)
|
| 88 |
+
b2 = np.zeros(64)
|
| 89 |
+
|
| 90 |
+
W3 = np.random.randn(64, 32) * np.sqrt(2.0 / 64)
|
| 91 |
+
b3 = np.zeros(32)
|
| 92 |
+
|
| 93 |
+
W4 = np.random.randn(32, 1) * np.sqrt(2.0 / 32)
|
| 94 |
+
b4 = np.zeros(1)
|
| 95 |
+
|
| 96 |
+
return cls(W1=W1, b1=b1, W2=W2, b2=b2, W3=W3, b3=b3, W4=W4, b4=b4)
|
| 97 |
+
|
| 98 |
+
def forward(self, x: np.ndarray) -> float:
|
| 99 |
+
"""
|
| 100 |
+
Forward pass: 100D resonances → scalar activation.
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
x: (100,) resonance vector
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
scalar activation [0, 1]
|
| 107 |
+
"""
|
| 108 |
+
# Layer 1: 100→128
|
| 109 |
+
h1 = x @ self.W1 + self.b1
|
| 110 |
+
a1 = swish(h1)
|
| 111 |
+
|
| 112 |
+
# Layer 2: 128→64
|
| 113 |
+
h2 = a1 @ self.W2 + self.b2
|
| 114 |
+
a2 = swish(h2)
|
| 115 |
+
|
| 116 |
+
# Layer 3: 64→32
|
| 117 |
+
h3 = a2 @ self.W3 + self.b3
|
| 118 |
+
a3 = swish(h3)
|
| 119 |
+
|
| 120 |
+
# Layer 4: 32→1
|
| 121 |
+
h4 = a3 @ self.W4 + self.b4
|
| 122 |
+
|
| 123 |
+
# Sigmoid to get [0, 1] activation
|
| 124 |
+
activation = 1.0 / (1.0 + np.exp(-h4[0]))
|
| 125 |
+
|
| 126 |
+
return float(activation)
|
| 127 |
+
|
| 128 |
+
def param_count(self) -> int:
|
| 129 |
+
"""Count total parameters in this MLP."""
|
| 130 |
+
return (
|
| 131 |
+
self.W1.size + self.b1.size +
|
| 132 |
+
self.W2.size + self.b2.size +
|
| 133 |
+
self.W3.size + self.b3.size +
|
| 134 |
+
self.W4.size + self.b4.size
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
def save(self, path: Path) -> None:
|
| 138 |
+
"""Save weights to .npz file."""
|
| 139 |
+
np.savez(
|
| 140 |
+
path,
|
| 141 |
+
W1=self.W1,
|
| 142 |
+
b1=self.b1,
|
| 143 |
+
W2=self.W2,
|
| 144 |
+
b2=self.b2,
|
| 145 |
+
W3=self.W3,
|
| 146 |
+
b3=self.b3,
|
| 147 |
+
W4=self.W4,
|
| 148 |
+
b4=self.b4,
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
@classmethod
|
| 152 |
+
def load(cls, path: Path) -> "ChamberMLP":
|
| 153 |
+
"""Load weights from .npz file."""
|
| 154 |
+
data = np.load(path)
|
| 155 |
+
# Handle backwards compatibility with old 3-layer architecture
|
| 156 |
+
if "W4" in data:
|
| 157 |
+
return cls(
|
| 158 |
+
W1=data["W1"],
|
| 159 |
+
b1=data["b1"],
|
| 160 |
+
W2=data["W2"],
|
| 161 |
+
b2=data["b2"],
|
| 162 |
+
W3=data["W3"],
|
| 163 |
+
b3=data["b3"],
|
| 164 |
+
W4=data["W4"],
|
| 165 |
+
b4=data["b4"],
|
| 166 |
+
)
|
| 167 |
+
else:
|
| 168 |
+
# Old 3-layer format - reinitialize with new 4-layer architecture
|
| 169 |
+
print(f"[chambers] old format detected in {path}, reinitializing with 4-layer architecture")
|
| 170 |
+
return cls.random_init()
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
@dataclass
|
| 174 |
+
class CrossFireSystem:
|
| 175 |
+
"""
|
| 176 |
+
Six chambers with cross-fire stabilization (200K model).
|
| 177 |
+
|
| 178 |
+
Chambers:
|
| 179 |
+
- FEAR, LOVE, RAGE, VOID (original)
|
| 180 |
+
- FLOW, COMPLEX (extended for richer emotion detection)
|
| 181 |
+
|
| 182 |
+
Cross-fire loop:
|
| 183 |
+
1. Each chamber computes activation from resonances
|
| 184 |
+
2. Chambers influence each other via coupling matrix
|
| 185 |
+
3. Iterate until convergence (max 10 iterations)
|
| 186 |
+
4. Return final activations + iteration count
|
| 187 |
+
"""
|
| 188 |
+
|
| 189 |
+
fear: ChamberMLP
|
| 190 |
+
love: ChamberMLP
|
| 191 |
+
rage: ChamberMLP
|
| 192 |
+
void: ChamberMLP
|
| 193 |
+
flow: ChamberMLP
|
| 194 |
+
complex: ChamberMLP
|
| 195 |
+
coupling: np.ndarray # (6, 6) coupling matrix
|
| 196 |
+
|
| 197 |
+
@classmethod
|
| 198 |
+
def random_init(cls, seed: Optional[int] = None) -> "CrossFireSystem":
|
| 199 |
+
"""Initialize all chambers with random weights."""
|
| 200 |
+
if seed is not None:
|
| 201 |
+
base_seed = seed
|
| 202 |
+
else:
|
| 203 |
+
base_seed = np.random.randint(0, 10000)
|
| 204 |
+
|
| 205 |
+
fear = ChamberMLP.random_init(seed=base_seed + 0)
|
| 206 |
+
love = ChamberMLP.random_init(seed=base_seed + 1)
|
| 207 |
+
rage = ChamberMLP.random_init(seed=base_seed + 2)
|
| 208 |
+
void = ChamberMLP.random_init(seed=base_seed + 3)
|
| 209 |
+
flow = ChamberMLP.random_init(seed=base_seed + 4)
|
| 210 |
+
complex_chamber = ChamberMLP.random_init(seed=base_seed + 5)
|
| 211 |
+
|
| 212 |
+
coupling = np.array(COUPLING_MATRIX_EXTENDED, dtype=np.float32)
|
| 213 |
+
|
| 214 |
+
return cls(
|
| 215 |
+
fear=fear,
|
| 216 |
+
love=love,
|
| 217 |
+
rage=rage,
|
| 218 |
+
void=void,
|
| 219 |
+
flow=flow,
|
| 220 |
+
complex=complex_chamber,
|
| 221 |
+
coupling=coupling,
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
def stabilize(
|
| 225 |
+
self,
|
| 226 |
+
resonances: np.ndarray,
|
| 227 |
+
max_iter: int = 10,
|
| 228 |
+
threshold: float = 0.01,
|
| 229 |
+
momentum: float = 0.7,
|
| 230 |
+
) -> Tuple[Dict[str, float], int]:
|
| 231 |
+
"""
|
| 232 |
+
Run cross-fire stabilization loop.
|
| 233 |
+
|
| 234 |
+
Args:
|
| 235 |
+
resonances: (100,) initial resonance vector
|
| 236 |
+
max_iter: max iterations before forced stop
|
| 237 |
+
threshold: convergence threshold (sum of absolute changes)
|
| 238 |
+
momentum: blend factor (0.7 = 70% old, 30% new)
|
| 239 |
+
|
| 240 |
+
Returns:
|
| 241 |
+
(chamber_activations, iterations_count)
|
| 242 |
+
|
| 243 |
+
Example:
|
| 244 |
+
activations, iters = system.stabilize(resonances)
|
| 245 |
+
# → {"FEAR": 0.8, "LOVE": 0.2, ...}, 5
|
| 246 |
+
"""
|
| 247 |
+
# Initial activations from resonances
|
| 248 |
+
chambers = [self.fear, self.love, self.rage, self.void, self.flow, self.complex]
|
| 249 |
+
activations = np.array([
|
| 250 |
+
chamber.forward(resonances)
|
| 251 |
+
for chamber in chambers
|
| 252 |
+
], dtype=np.float32)
|
| 253 |
+
|
| 254 |
+
# Decay rates array (6 chambers)
|
| 255 |
+
decay_array = np.array([
|
| 256 |
+
DECAY_RATES["FEAR"],
|
| 257 |
+
DECAY_RATES["LOVE"],
|
| 258 |
+
DECAY_RATES["RAGE"],
|
| 259 |
+
DECAY_RATES["VOID"],
|
| 260 |
+
DECAY_RATES["FLOW"],
|
| 261 |
+
DECAY_RATES["COMPLEX"],
|
| 262 |
+
], dtype=np.float32)
|
| 263 |
+
|
| 264 |
+
# Stabilization loop
|
| 265 |
+
for iteration in range(max_iter):
|
| 266 |
+
# Apply decay (emotions fade over time)
|
| 267 |
+
activations = activations * decay_array
|
| 268 |
+
|
| 269 |
+
# Compute influence from other chambers
|
| 270 |
+
influence = self.coupling @ activations
|
| 271 |
+
|
| 272 |
+
# Blend: momentum * old + (1 - momentum) * influence
|
| 273 |
+
new_activations = momentum * activations + (1 - momentum) * influence
|
| 274 |
+
|
| 275 |
+
# Clip to [0, 1]
|
| 276 |
+
new_activations = np.clip(new_activations, 0.0, 1.0)
|
| 277 |
+
|
| 278 |
+
# Check convergence
|
| 279 |
+
delta = np.abs(new_activations - activations).sum()
|
| 280 |
+
activations = new_activations
|
| 281 |
+
|
| 282 |
+
if delta < threshold:
|
| 283 |
+
# Converged!
|
| 284 |
+
result = dict(zip(CHAMBER_NAMES_EXTENDED, activations))
|
| 285 |
+
return result, iteration + 1
|
| 286 |
+
|
| 287 |
+
# Max iterations reached
|
| 288 |
+
result = dict(zip(CHAMBER_NAMES_EXTENDED, activations))
|
| 289 |
+
return result, max_iter
|
| 290 |
+
|
| 291 |
+
def param_count(self) -> int:
|
| 292 |
+
"""Total parameters in all chambers."""
|
| 293 |
+
return sum([
|
| 294 |
+
self.fear.param_count(),
|
| 295 |
+
self.love.param_count(),
|
| 296 |
+
self.rage.param_count(),
|
| 297 |
+
self.void.param_count(),
|
| 298 |
+
self.flow.param_count(),
|
| 299 |
+
self.complex.param_count(),
|
| 300 |
+
])
|
| 301 |
+
|
| 302 |
+
def save(self, models_dir: Path) -> None:
|
| 303 |
+
"""Save all chamber weights to models/ directory."""
|
| 304 |
+
models_dir.mkdir(parents=True, exist_ok=True)
|
| 305 |
+
self.fear.save(models_dir / "chamber_fear.npz")
|
| 306 |
+
self.love.save(models_dir / "chamber_love.npz")
|
| 307 |
+
self.rage.save(models_dir / "chamber_rage.npz")
|
| 308 |
+
self.void.save(models_dir / "chamber_void.npz")
|
| 309 |
+
self.flow.save(models_dir / "chamber_flow.npz")
|
| 310 |
+
self.complex.save(models_dir / "chamber_complex.npz")
|
| 311 |
+
print(f"[chambers] saved to {models_dir}")
|
| 312 |
+
|
| 313 |
+
@classmethod
|
| 314 |
+
def load(cls, models_dir: Path) -> "CrossFireSystem":
|
| 315 |
+
"""Load all chamber weights from models/ directory."""
|
| 316 |
+
fear = ChamberMLP.load(models_dir / "chamber_fear.npz")
|
| 317 |
+
love = ChamberMLP.load(models_dir / "chamber_love.npz")
|
| 318 |
+
rage = ChamberMLP.load(models_dir / "chamber_rage.npz")
|
| 319 |
+
void = ChamberMLP.load(models_dir / "chamber_void.npz")
|
| 320 |
+
|
| 321 |
+
# Handle missing flow/complex for backwards compatibility
|
| 322 |
+
flow_path = models_dir / "chamber_flow.npz"
|
| 323 |
+
complex_path = models_dir / "chamber_complex.npz"
|
| 324 |
+
|
| 325 |
+
if flow_path.exists():
|
| 326 |
+
flow = ChamberMLP.load(flow_path)
|
| 327 |
+
else:
|
| 328 |
+
print("[chambers] flow chamber not found, initializing random")
|
| 329 |
+
flow = ChamberMLP.random_init(seed=4)
|
| 330 |
+
|
| 331 |
+
if complex_path.exists():
|
| 332 |
+
complex_chamber = ChamberMLP.load(complex_path)
|
| 333 |
+
else:
|
| 334 |
+
print("[chambers] complex chamber not found, initializing random")
|
| 335 |
+
complex_chamber = ChamberMLP.random_init(seed=5)
|
| 336 |
+
|
| 337 |
+
coupling = np.array(COUPLING_MATRIX_EXTENDED, dtype=np.float32)
|
| 338 |
+
|
| 339 |
+
print(f"[chambers] loaded from {models_dir}")
|
| 340 |
+
return cls(
|
| 341 |
+
fear=fear,
|
| 342 |
+
love=love,
|
| 343 |
+
rage=rage,
|
| 344 |
+
void=void,
|
| 345 |
+
flow=flow,
|
| 346 |
+
complex=complex_chamber,
|
| 347 |
+
coupling=coupling,
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
class AsyncCrossFireSystem:
|
| 352 |
+
"""
|
| 353 |
+
Async wrapper for CrossFireSystem with field lock discipline.
|
| 354 |
+
|
| 355 |
+
Based on HAZE's async pattern - achieves coherence through
|
| 356 |
+
explicit operation ordering and atomicity.
|
| 357 |
+
|
| 358 |
+
"The asyncio.Lock doesn't add information—it adds discipline."
|
| 359 |
+
"""
|
| 360 |
+
|
| 361 |
+
def __init__(self, system: CrossFireSystem):
|
| 362 |
+
self._sync = system
|
| 363 |
+
self._lock = asyncio.Lock()
|
| 364 |
+
|
| 365 |
+
@classmethod
|
| 366 |
+
def random_init(cls, seed: Optional[int] = None) -> "AsyncCrossFireSystem":
|
| 367 |
+
"""Initialize with random weights."""
|
| 368 |
+
system = CrossFireSystem.random_init(seed=seed)
|
| 369 |
+
return cls(system)
|
| 370 |
+
|
| 371 |
+
@classmethod
|
| 372 |
+
def load(cls, models_dir: Path) -> "AsyncCrossFireSystem":
|
| 373 |
+
"""Load from models directory."""
|
| 374 |
+
system = CrossFireSystem.load(models_dir)
|
| 375 |
+
return cls(system)
|
| 376 |
+
|
| 377 |
+
async def stabilize(
|
| 378 |
+
self,
|
| 379 |
+
resonances: np.ndarray,
|
| 380 |
+
max_iter: int = 10,
|
| 381 |
+
threshold: float = 0.01,
|
| 382 |
+
momentum: float = 0.7,
|
| 383 |
+
) -> Tuple[Dict[str, float], int]:
|
| 384 |
+
"""
|
| 385 |
+
Async cross-fire stabilization with field lock.
|
| 386 |
+
|
| 387 |
+
Atomic operation - prevents field corruption during stabilization.
|
| 388 |
+
"""
|
| 389 |
+
async with self._lock:
|
| 390 |
+
return self._sync.stabilize(resonances, max_iter, threshold, momentum)
|
| 391 |
+
|
| 392 |
+
async def save(self, models_dir: Path) -> None:
|
| 393 |
+
"""Save with lock protection."""
|
| 394 |
+
async with self._lock:
|
| 395 |
+
self._sync.save(models_dir)
|
| 396 |
+
|
| 397 |
+
def param_count(self) -> int:
|
| 398 |
+
"""Total parameters (read-only, no lock needed)."""
|
| 399 |
+
return self._sync.param_count()
|
| 400 |
+
|
| 401 |
+
@property
|
| 402 |
+
def coupling(self) -> np.ndarray:
|
| 403 |
+
"""Access coupling matrix."""
|
| 404 |
+
return self._sync.coupling
|
| 405 |
+
|
| 406 |
+
@coupling.setter
|
| 407 |
+
def coupling(self, value: np.ndarray) -> None:
|
| 408 |
+
"""Set coupling matrix (for feedback learning)."""
|
| 409 |
+
self._sync.coupling = value
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
if __name__ == "__main__":
|
| 413 |
+
print("=" * 60)
|
| 414 |
+
print(" CLOUD v4.0 — Chamber Cross-Fire System (200K model)")
|
| 415 |
+
print("=" * 60)
|
| 416 |
+
print()
|
| 417 |
+
|
| 418 |
+
# Initialize random system
|
| 419 |
+
system = CrossFireSystem.random_init(seed=42)
|
| 420 |
+
print(f"Initialized cross-fire system (6 chambers)")
|
| 421 |
+
print(f"Total params: {system.param_count():,}")
|
| 422 |
+
print()
|
| 423 |
+
|
| 424 |
+
# Test with random resonances
|
| 425 |
+
print("Testing stabilization with random resonances:")
|
| 426 |
+
resonances = np.random.rand(100).astype(np.float32)
|
| 427 |
+
print(f" Input: 100D resonance vector (mean={resonances.mean():.3f})")
|
| 428 |
+
print()
|
| 429 |
+
|
| 430 |
+
activations, iterations = system.stabilize(resonances)
|
| 431 |
+
|
| 432 |
+
print(" Chamber activations after cross-fire:")
|
| 433 |
+
for chamber, value in activations.items():
|
| 434 |
+
bar = "█" * int(value * 40)
|
| 435 |
+
print(f" {chamber:8s}: {value:.3f} {bar}")
|
| 436 |
+
print(f"\n Converged in {iterations} iterations")
|
| 437 |
+
print()
|
| 438 |
+
|
| 439 |
+
# Test convergence speed with different inputs
|
| 440 |
+
print("Testing convergence speed:")
|
| 441 |
+
test_cases = [
|
| 442 |
+
("random uniform", np.random.rand(100)),
|
| 443 |
+
("all high", np.ones(100) * 0.9),
|
| 444 |
+
("all low", np.ones(100) * 0.1),
|
| 445 |
+
("sparse", np.random.rand(100) * 0.1),
|
| 446 |
+
]
|
| 447 |
+
|
| 448 |
+
for name, resonances in test_cases:
|
| 449 |
+
_, iters = system.stabilize(resonances)
|
| 450 |
+
print(f" {name:15s}: {iters:2d} iterations")
|
| 451 |
+
print()
|
| 452 |
+
|
| 453 |
+
# Test saving/loading
|
| 454 |
+
print("Testing save/load:")
|
| 455 |
+
models_dir = Path("./models")
|
| 456 |
+
system.save(models_dir)
|
| 457 |
+
|
| 458 |
+
system2 = CrossFireSystem.load(models_dir)
|
| 459 |
+
activations2, _ = system2.stabilize(test_cases[0][1])
|
| 460 |
+
|
| 461 |
+
match = all(
|
| 462 |
+
abs(activations[k] - activations2[k]) < 1e-6
|
| 463 |
+
for k in CHAMBER_NAMES_EXTENDED
|
| 464 |
+
)
|
| 465 |
+
print(f" Save/load {'✓' if match else '✗'}")
|
| 466 |
+
print()
|
| 467 |
+
|
| 468 |
+
print("=" * 60)
|
| 469 |
+
print(" Cross-fire system operational. 6 chambers. 200K params.")
|
| 470 |
+
print("=" * 60)
|
cloud/cloud.py
ADDED
|
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# cloud.py — CLOUD v4.0 Main Orchestrator (200K model)
|
| 3 |
+
#
|
| 4 |
+
# "Something fires BEFORE meaning arrives"
|
| 5 |
+
#
|
| 6 |
+
# Architecture:
|
| 7 |
+
# 1. RESONANCE LAYER (weightless geometry) → 100D resonances
|
| 8 |
+
# 2. CHAMBER LAYER (6 MLPs + cross-fire) → chamber activations + iterations
|
| 9 |
+
# 3. META-OBSERVER (deeper MLP) → secondary emotion
|
| 10 |
+
#
|
| 11 |
+
# Total: ~180K params (6 chambers × 23K + observer 41K)
|
| 12 |
+
|
| 13 |
+
from __future__ import annotations
|
| 14 |
+
import asyncio
|
| 15 |
+
import numpy as np
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
from typing import Optional, Dict
|
| 18 |
+
from dataclasses import dataclass
|
| 19 |
+
|
| 20 |
+
from .resonance import SimpleResonanceLayer
|
| 21 |
+
from .chambers import CrossFireSystem
|
| 22 |
+
from .observer import MetaObserver
|
| 23 |
+
from .user_cloud import UserCloud
|
| 24 |
+
from .anchors import get_all_anchors, get_anchor_index
|
| 25 |
+
from .anomaly import detect_anomalies, AnomalyReport
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@dataclass
|
| 29 |
+
class CloudResponse:
|
| 30 |
+
"""Response from CLOUD ping."""
|
| 31 |
+
|
| 32 |
+
primary: str # primary emotion word
|
| 33 |
+
secondary: str # secondary emotion word (added context)
|
| 34 |
+
resonances: np.ndarray # (100,) raw resonances
|
| 35 |
+
chamber_activations: Dict[str, float] # cross-fire results
|
| 36 |
+
iterations: int # convergence speed signal
|
| 37 |
+
user_fingerprint: np.ndarray # (100,) temporal history
|
| 38 |
+
anomaly: AnomalyReport # anomaly detection result
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class Cloud:
|
| 42 |
+
"""
|
| 43 |
+
CLOUD v3.0: Pre-semantic sonar for emotion detection.
|
| 44 |
+
|
| 45 |
+
Components:
|
| 46 |
+
- Resonance Layer (weightless)
|
| 47 |
+
- Chamber MLPs (4 × 8.5K params)
|
| 48 |
+
- Meta-Observer (15K params)
|
| 49 |
+
- User Cloud (temporal fingerprint)
|
| 50 |
+
|
| 51 |
+
Total: ~50K trainable params
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def __init__(
|
| 55 |
+
self,
|
| 56 |
+
resonance: SimpleResonanceLayer,
|
| 57 |
+
chambers: CrossFireSystem,
|
| 58 |
+
observer: MetaObserver,
|
| 59 |
+
user_cloud: Optional[UserCloud] = None,
|
| 60 |
+
):
|
| 61 |
+
self.resonance = resonance
|
| 62 |
+
self.chambers = chambers
|
| 63 |
+
self.observer = observer
|
| 64 |
+
self.user_cloud = user_cloud or UserCloud()
|
| 65 |
+
self.anchors = get_all_anchors()
|
| 66 |
+
|
| 67 |
+
@classmethod
|
| 68 |
+
def random_init(cls, seed: Optional[int] = None) -> "Cloud":
|
| 69 |
+
"""Initialize with random weights (for training)."""
|
| 70 |
+
resonance = SimpleResonanceLayer.create()
|
| 71 |
+
chambers = CrossFireSystem.random_init(seed=seed)
|
| 72 |
+
observer = MetaObserver.random_init(seed=seed)
|
| 73 |
+
user_cloud = UserCloud()
|
| 74 |
+
|
| 75 |
+
print("[cloud] initialized with random weights")
|
| 76 |
+
return cls(resonance, chambers, observer, user_cloud)
|
| 77 |
+
|
| 78 |
+
@classmethod
|
| 79 |
+
def load(cls, models_dir: Path) -> "Cloud":
|
| 80 |
+
"""Load trained CLOUD from models/ directory."""
|
| 81 |
+
resonance = SimpleResonanceLayer.create()
|
| 82 |
+
chambers = CrossFireSystem.load(models_dir)
|
| 83 |
+
observer = MetaObserver.load(models_dir / "observer.npz")
|
| 84 |
+
|
| 85 |
+
# Load user cloud if exists
|
| 86 |
+
cloud_data_path = models_dir / "user_cloud.json"
|
| 87 |
+
if cloud_data_path.exists():
|
| 88 |
+
user_cloud = UserCloud.load(cloud_data_path)
|
| 89 |
+
else:
|
| 90 |
+
user_cloud = UserCloud()
|
| 91 |
+
|
| 92 |
+
print(f"[cloud] loaded from {models_dir}")
|
| 93 |
+
return cls(resonance, chambers, observer, user_cloud)
|
| 94 |
+
|
| 95 |
+
def save(self, models_dir: Path) -> None:
|
| 96 |
+
"""Save all components to models/ directory."""
|
| 97 |
+
models_dir.mkdir(parents=True, exist_ok=True)
|
| 98 |
+
|
| 99 |
+
self.chambers.save(models_dir)
|
| 100 |
+
self.observer.save(models_dir / "observer.npz")
|
| 101 |
+
self.user_cloud.save(models_dir / "user_cloud.json")
|
| 102 |
+
|
| 103 |
+
print(f"[cloud] saved to {models_dir}")
|
| 104 |
+
|
| 105 |
+
async def ping(self, user_input: str) -> CloudResponse:
|
| 106 |
+
"""
|
| 107 |
+
Async ping: detect pre-semantic emotion.
|
| 108 |
+
|
| 109 |
+
Flow:
|
| 110 |
+
1. Resonance layer computes 100D resonances
|
| 111 |
+
2. Chambers cross-fire to stabilization
|
| 112 |
+
3. Observer predicts secondary emotion
|
| 113 |
+
4. Update user cloud
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
user_input: user's text input
|
| 117 |
+
|
| 118 |
+
Returns:
|
| 119 |
+
CloudResponse with primary, secondary, and metadata
|
| 120 |
+
"""
|
| 121 |
+
# 1. Resonance layer (weightless geometry)
|
| 122 |
+
resonances = self.resonance.compute_resonance(user_input)
|
| 123 |
+
primary_idx, primary_word, _ = self.resonance.get_primary_emotion(resonances)
|
| 124 |
+
|
| 125 |
+
# 2. Chamber cross-fire (async parallelism for future optimization)
|
| 126 |
+
chamber_activations, iterations = await asyncio.to_thread(
|
| 127 |
+
self.chambers.stabilize,
|
| 128 |
+
resonances,
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
# 3. User fingerprint (temporal history)
|
| 132 |
+
user_fingerprint = self.user_cloud.get_fingerprint()
|
| 133 |
+
|
| 134 |
+
# 4. Meta-observer predicts secondary (now with chamber_activations)
|
| 135 |
+
# Convert chamber_activations dict to array for observer
|
| 136 |
+
import numpy as np
|
| 137 |
+
from .anchors import CHAMBER_NAMES_EXTENDED
|
| 138 |
+
chamber_array = np.array([
|
| 139 |
+
chamber_activations.get(name, 0.0) for name in CHAMBER_NAMES_EXTENDED
|
| 140 |
+
], dtype=np.float32)
|
| 141 |
+
|
| 142 |
+
secondary_idx = await asyncio.to_thread(
|
| 143 |
+
self.observer.predict_secondary,
|
| 144 |
+
resonances,
|
| 145 |
+
chamber_array,
|
| 146 |
+
float(iterations),
|
| 147 |
+
user_fingerprint,
|
| 148 |
+
)
|
| 149 |
+
secondary_word = self.anchors[secondary_idx]
|
| 150 |
+
|
| 151 |
+
# 5. Anomaly detection
|
| 152 |
+
anomaly = detect_anomalies(chamber_activations, iterations)
|
| 153 |
+
|
| 154 |
+
# 6. Update user cloud
|
| 155 |
+
self.user_cloud.add_event(primary_idx, secondary_idx)
|
| 156 |
+
|
| 157 |
+
return CloudResponse(
|
| 158 |
+
primary=primary_word,
|
| 159 |
+
secondary=secondary_word,
|
| 160 |
+
resonances=resonances,
|
| 161 |
+
chamber_activations=chamber_activations,
|
| 162 |
+
iterations=iterations,
|
| 163 |
+
user_fingerprint=user_fingerprint,
|
| 164 |
+
anomaly=anomaly,
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
def ping_sync(self, user_input: str) -> CloudResponse:
|
| 168 |
+
"""Synchronous version of ping (for testing)."""
|
| 169 |
+
return asyncio.run(self.ping(user_input))
|
| 170 |
+
|
| 171 |
+
def param_count(self) -> int:
|
| 172 |
+
"""Total trainable parameters."""
|
| 173 |
+
return self.chambers.param_count() + self.observer.param_count()
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
class AsyncCloud:
|
| 177 |
+
"""
|
| 178 |
+
Fully async CLOUD with context manager support.
|
| 179 |
+
|
| 180 |
+
Based on HAZE's AsyncHazeField pattern:
|
| 181 |
+
- Field lock discipline for coherence
|
| 182 |
+
- Context manager for clean lifecycle
|
| 183 |
+
- Graceful initialization and cleanup
|
| 184 |
+
|
| 185 |
+
Usage:
|
| 186 |
+
async with AsyncCloud.create() as cloud:
|
| 187 |
+
response = await cloud.ping("I'm feeling anxious")
|
| 188 |
+
|
| 189 |
+
Or standalone:
|
| 190 |
+
cloud = await AsyncCloud.create()
|
| 191 |
+
response = await cloud.ping("I'm feeling anxious")
|
| 192 |
+
await cloud.close()
|
| 193 |
+
"""
|
| 194 |
+
|
| 195 |
+
def __init__(self, cloud: Cloud):
|
| 196 |
+
self._sync = cloud
|
| 197 |
+
self._lock = asyncio.Lock()
|
| 198 |
+
self._closed = False
|
| 199 |
+
|
| 200 |
+
@classmethod
|
| 201 |
+
async def create(
|
| 202 |
+
cls,
|
| 203 |
+
models_dir: Optional[Path] = None,
|
| 204 |
+
seed: Optional[int] = None,
|
| 205 |
+
) -> "AsyncCloud":
|
| 206 |
+
"""
|
| 207 |
+
Create AsyncCloud instance.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
models_dir: Path to load trained weights (optional)
|
| 211 |
+
seed: Random seed for initialization (if no models_dir)
|
| 212 |
+
|
| 213 |
+
Returns:
|
| 214 |
+
AsyncCloud ready for use
|
| 215 |
+
"""
|
| 216 |
+
if models_dir and models_dir.exists():
|
| 217 |
+
cloud = Cloud.load(models_dir)
|
| 218 |
+
else:
|
| 219 |
+
cloud = Cloud.random_init(seed=seed)
|
| 220 |
+
|
| 221 |
+
return cls(cloud)
|
| 222 |
+
|
| 223 |
+
async def __aenter__(self) -> "AsyncCloud":
|
| 224 |
+
"""Context manager entry."""
|
| 225 |
+
return self
|
| 226 |
+
|
| 227 |
+
async def __aexit__(self, *args) -> None:
|
| 228 |
+
"""Context manager exit."""
|
| 229 |
+
await self.close()
|
| 230 |
+
|
| 231 |
+
async def close(self) -> None:
|
| 232 |
+
"""Clean shutdown."""
|
| 233 |
+
if not self._closed:
|
| 234 |
+
self._closed = True
|
| 235 |
+
# Save user cloud state if needed
|
| 236 |
+
# await self.save(Path("cloud/models"))
|
| 237 |
+
|
| 238 |
+
async def ping(self, user_input: str) -> CloudResponse:
|
| 239 |
+
"""
|
| 240 |
+
Async ping with field lock.
|
| 241 |
+
|
| 242 |
+
Atomic operation - prevents field corruption during emotion detection.
|
| 243 |
+
"""
|
| 244 |
+
if self._closed:
|
| 245 |
+
raise RuntimeError("AsyncCloud is closed")
|
| 246 |
+
|
| 247 |
+
async with self._lock:
|
| 248 |
+
return await self._sync.ping(user_input)
|
| 249 |
+
|
| 250 |
+
async def save(self, models_dir: Path) -> None:
|
| 251 |
+
"""Save all components with lock protection."""
|
| 252 |
+
async with self._lock:
|
| 253 |
+
self._sync.save(models_dir)
|
| 254 |
+
|
| 255 |
+
def param_count(self) -> int:
|
| 256 |
+
"""Total parameters (read-only, no lock needed)."""
|
| 257 |
+
return self._sync.param_count()
|
| 258 |
+
|
| 259 |
+
@property
|
| 260 |
+
def anchors(self):
|
| 261 |
+
"""Access emotion anchors."""
|
| 262 |
+
return self._sync.anchors
|
| 263 |
+
|
| 264 |
+
@property
|
| 265 |
+
def user_cloud(self):
|
| 266 |
+
"""Access user cloud for stats."""
|
| 267 |
+
return self._sync.user_cloud
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
if __name__ == "__main__":
|
| 271 |
+
print("=" * 60)
|
| 272 |
+
print(" CLOUD v3.0 — Main Orchestrator")
|
| 273 |
+
print("=" * 60)
|
| 274 |
+
print()
|
| 275 |
+
|
| 276 |
+
# Initialize
|
| 277 |
+
print("Initializing CLOUD...")
|
| 278 |
+
cloud = Cloud.random_init(seed=42)
|
| 279 |
+
print(f" Total params: {cloud.param_count():,}")
|
| 280 |
+
print()
|
| 281 |
+
|
| 282 |
+
# Test inputs
|
| 283 |
+
test_inputs = [
|
| 284 |
+
"I'm terrified and anxious about what's coming",
|
| 285 |
+
"You bring me such warmth and love darling",
|
| 286 |
+
"This makes me furious with rage",
|
| 287 |
+
"I feel completely empty and void inside",
|
| 288 |
+
"I'm curious about what happens next",
|
| 289 |
+
"Overwhelming shame and guilt consume me",
|
| 290 |
+
]
|
| 291 |
+
|
| 292 |
+
print("Testing CLOUD pings:")
|
| 293 |
+
print("=" * 60)
|
| 294 |
+
|
| 295 |
+
for text in test_inputs:
|
| 296 |
+
response = cloud.ping_sync(text)
|
| 297 |
+
|
| 298 |
+
print(f"\nInput: \"{text}\"")
|
| 299 |
+
print(f" Primary: {response.primary}")
|
| 300 |
+
print(f" Secondary: {response.secondary}")
|
| 301 |
+
print(f" Iterations: {response.iterations}")
|
| 302 |
+
print(f" Chambers:")
|
| 303 |
+
for chamber, activation in response.chamber_activations.items():
|
| 304 |
+
bar = "█" * int(activation * 30)
|
| 305 |
+
print(f" {chamber:6s}: {activation:.3f} {bar}")
|
| 306 |
+
|
| 307 |
+
print()
|
| 308 |
+
print("=" * 60)
|
| 309 |
+
|
| 310 |
+
# Show user cloud evolution
|
| 311 |
+
print("\nUser emotional fingerprint (after all inputs):")
|
| 312 |
+
dominant = cloud.user_cloud.get_dominant_emotions(5)
|
| 313 |
+
for idx, strength in dominant:
|
| 314 |
+
word = cloud.anchors[idx]
|
| 315 |
+
bar = "█" * int(strength * 30)
|
| 316 |
+
print(f" {word:15s}: {strength:.3f} {bar}")
|
| 317 |
+
|
| 318 |
+
print()
|
| 319 |
+
print("=" * 60)
|
| 320 |
+
|
| 321 |
+
# Test save/load
|
| 322 |
+
print("\nTesting save/load:")
|
| 323 |
+
models_dir = Path("./models")
|
| 324 |
+
cloud.save(models_dir)
|
| 325 |
+
|
| 326 |
+
cloud2 = Cloud.load(models_dir)
|
| 327 |
+
response2 = cloud2.ping_sync(test_inputs[0])
|
| 328 |
+
|
| 329 |
+
print(f" Save/load ✓")
|
| 330 |
+
print()
|
| 331 |
+
|
| 332 |
+
print("=" * 60)
|
| 333 |
+
print(" CLOUD v3.0 operational.")
|
| 334 |
+
print(" Something fires BEFORE meaning arrives.")
|
| 335 |
+
print("=" * 60)
|
cloud/cooccur_cloud.py
ADDED
|
@@ -0,0 +1,358 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# cooccur.py — Co-occurrence based generation bias
|
| 3 |
+
#
|
| 4 |
+
# Inspired by Leo's trigram graphs and co-occurrence matrices.
|
| 5 |
+
# This module extracts statistical patterns from a corpus and uses them
|
| 6 |
+
# to bias token probabilities during generation — NO TRAINING REQUIRED.
|
| 7 |
+
#
|
| 8 |
+
# The idea: words/characters that appear together in the corpus
|
| 9 |
+
# should have higher probability of appearing together in generation.
|
| 10 |
+
# "Words that resonate together, stay together."
|
| 11 |
+
#
|
| 12 |
+
# Usage:
|
| 13 |
+
# from haze.cooccur import CooccurField
|
| 14 |
+
# field = CooccurField.from_text(corpus, vocab)
|
| 15 |
+
# biased_logits = field.bias_logits(logits, context)
|
| 16 |
+
|
| 17 |
+
from __future__ import annotations
|
| 18 |
+
import numpy as np
|
| 19 |
+
from typing import Dict, List, Optional, Tuple, TYPE_CHECKING
|
| 20 |
+
from collections import defaultdict, Counter
|
| 21 |
+
from dataclasses import dataclass, field
|
| 22 |
+
|
| 23 |
+
if TYPE_CHECKING:
|
| 24 |
+
from .haze import Vocab
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@dataclass
|
| 28 |
+
class CooccurField:
|
| 29 |
+
"""
|
| 30 |
+
Co-occurrence field for corpus-biased generation.
|
| 31 |
+
|
| 32 |
+
Tracks:
|
| 33 |
+
- Bigram counts: P(token_j | token_i)
|
| 34 |
+
- Trigram counts: P(token_k | token_i, token_j)
|
| 35 |
+
- Co-occurrence within window: which tokens appear near each other
|
| 36 |
+
|
| 37 |
+
Uses these statistics to bias logits during generation,
|
| 38 |
+
making output more consistent with corpus patterns.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
vocab_size: int
|
| 42 |
+
bigram_counts: Dict[int, Counter] = field(default_factory=dict)
|
| 43 |
+
trigram_counts: Dict[Tuple[int, int], Counter] = field(default_factory=dict)
|
| 44 |
+
cooccur_counts: Dict[int, Counter] = field(default_factory=dict)
|
| 45 |
+
token_counts: Counter = field(default_factory=Counter)
|
| 46 |
+
total_tokens: int = 0
|
| 47 |
+
window_size: int = 5
|
| 48 |
+
|
| 49 |
+
@classmethod
|
| 50 |
+
def from_text(
|
| 51 |
+
cls,
|
| 52 |
+
text: str,
|
| 53 |
+
vocab: "Vocab",
|
| 54 |
+
window_size: int = 5,
|
| 55 |
+
) -> "CooccurField":
|
| 56 |
+
"""
|
| 57 |
+
Build co-occurrence field from corpus text.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
text: corpus text
|
| 61 |
+
vocab: vocabulary for encoding
|
| 62 |
+
window_size: context window for co-occurrence
|
| 63 |
+
|
| 64 |
+
Returns:
|
| 65 |
+
CooccurField with computed statistics
|
| 66 |
+
"""
|
| 67 |
+
# Encode entire corpus
|
| 68 |
+
tokens = vocab.encode(text)
|
| 69 |
+
n = len(tokens)
|
| 70 |
+
|
| 71 |
+
bigram_counts: Dict[int, Counter] = defaultdict(Counter)
|
| 72 |
+
trigram_counts: Dict[Tuple[int, int], Counter] = defaultdict(Counter)
|
| 73 |
+
cooccur_counts: Dict[int, Counter] = defaultdict(Counter)
|
| 74 |
+
token_counts: Counter = Counter()
|
| 75 |
+
|
| 76 |
+
# Count tokens
|
| 77 |
+
for t in tokens:
|
| 78 |
+
token_counts[t] += 1
|
| 79 |
+
|
| 80 |
+
# Build bigram counts: P(next | current)
|
| 81 |
+
for i in range(n - 1):
|
| 82 |
+
curr, next_t = tokens[i], tokens[i + 1]
|
| 83 |
+
bigram_counts[curr][next_t] += 1
|
| 84 |
+
|
| 85 |
+
# Build trigram counts: P(next | prev, current)
|
| 86 |
+
for i in range(n - 2):
|
| 87 |
+
prev, curr, next_t = tokens[i], tokens[i + 1], tokens[i + 2]
|
| 88 |
+
trigram_counts[(prev, curr)][next_t] += 1
|
| 89 |
+
|
| 90 |
+
# Build co-occurrence within window
|
| 91 |
+
for i in range(n):
|
| 92 |
+
center = tokens[i]
|
| 93 |
+
# Look at tokens within window
|
| 94 |
+
start = max(0, i - window_size)
|
| 95 |
+
end = min(n, i + window_size + 1)
|
| 96 |
+
for j in range(start, end):
|
| 97 |
+
if i != j:
|
| 98 |
+
cooccur_counts[center][tokens[j]] += 1
|
| 99 |
+
|
| 100 |
+
return cls(
|
| 101 |
+
vocab_size=vocab.vocab_size,
|
| 102 |
+
bigram_counts=dict(bigram_counts),
|
| 103 |
+
trigram_counts=dict(trigram_counts),
|
| 104 |
+
cooccur_counts=dict(cooccur_counts),
|
| 105 |
+
token_counts=token_counts,
|
| 106 |
+
total_tokens=n,
|
| 107 |
+
window_size=window_size,
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
def get_bigram_probs(self, current: int) -> np.ndarray:
|
| 111 |
+
"""
|
| 112 |
+
Get probability distribution for next token given current.
|
| 113 |
+
|
| 114 |
+
Returns uniform distribution if current token not seen.
|
| 115 |
+
"""
|
| 116 |
+
probs = np.zeros(self.vocab_size, dtype=np.float32)
|
| 117 |
+
|
| 118 |
+
if current in self.bigram_counts:
|
| 119 |
+
counts = self.bigram_counts[current]
|
| 120 |
+
total = sum(counts.values())
|
| 121 |
+
for token, count in counts.items():
|
| 122 |
+
if token < self.vocab_size:
|
| 123 |
+
probs[token] = count / total
|
| 124 |
+
|
| 125 |
+
# If no bigram data, return uniform
|
| 126 |
+
if probs.sum() == 0:
|
| 127 |
+
probs = np.ones(self.vocab_size, dtype=np.float32) / self.vocab_size
|
| 128 |
+
|
| 129 |
+
return probs
|
| 130 |
+
|
| 131 |
+
def get_trigram_probs(self, prev: int, current: int) -> np.ndarray:
|
| 132 |
+
"""
|
| 133 |
+
Get probability distribution for next token given (prev, current).
|
| 134 |
+
|
| 135 |
+
Falls back to bigram if trigram not found.
|
| 136 |
+
"""
|
| 137 |
+
probs = np.zeros(self.vocab_size, dtype=np.float32)
|
| 138 |
+
|
| 139 |
+
key = (prev, current)
|
| 140 |
+
if key in self.trigram_counts:
|
| 141 |
+
counts = self.trigram_counts[key]
|
| 142 |
+
total = sum(counts.values())
|
| 143 |
+
for token, count in counts.items():
|
| 144 |
+
if token < self.vocab_size:
|
| 145 |
+
probs[token] = count / total
|
| 146 |
+
|
| 147 |
+
# Fallback to bigram
|
| 148 |
+
if probs.sum() == 0:
|
| 149 |
+
return self.get_bigram_probs(current)
|
| 150 |
+
|
| 151 |
+
return probs
|
| 152 |
+
|
| 153 |
+
def get_cooccur_bias(self, context: List[int]) -> np.ndarray:
|
| 154 |
+
"""
|
| 155 |
+
Get bias vector based on co-occurrence with recent context.
|
| 156 |
+
|
| 157 |
+
Tokens that frequently appear near context tokens get higher bias.
|
| 158 |
+
"""
|
| 159 |
+
bias = np.zeros(self.vocab_size, dtype=np.float32)
|
| 160 |
+
|
| 161 |
+
for ctx_token in context[-self.window_size:]:
|
| 162 |
+
if ctx_token in self.cooccur_counts:
|
| 163 |
+
counts = self.cooccur_counts[ctx_token]
|
| 164 |
+
total = sum(counts.values())
|
| 165 |
+
for token, count in counts.items():
|
| 166 |
+
if token < self.vocab_size:
|
| 167 |
+
bias[token] += count / total
|
| 168 |
+
|
| 169 |
+
# Normalize
|
| 170 |
+
if bias.sum() > 0:
|
| 171 |
+
bias = bias / bias.sum()
|
| 172 |
+
else:
|
| 173 |
+
bias = np.ones(self.vocab_size, dtype=np.float32) / self.vocab_size
|
| 174 |
+
|
| 175 |
+
return bias
|
| 176 |
+
|
| 177 |
+
def bias_logits(
|
| 178 |
+
self,
|
| 179 |
+
logits: np.ndarray,
|
| 180 |
+
context: List[int],
|
| 181 |
+
alpha: float = 0.3,
|
| 182 |
+
mode: str = "trigram",
|
| 183 |
+
) -> np.ndarray:
|
| 184 |
+
"""
|
| 185 |
+
Bias logits using corpus statistics.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
logits: raw model logits (vocab_size,)
|
| 189 |
+
context: list of recent token indices
|
| 190 |
+
alpha: blend factor (0 = pure model, 1 = pure corpus)
|
| 191 |
+
mode: "bigram", "trigram", "cooccur", or "blend"
|
| 192 |
+
|
| 193 |
+
Returns:
|
| 194 |
+
biased logits
|
| 195 |
+
"""
|
| 196 |
+
if len(context) == 0:
|
| 197 |
+
return logits
|
| 198 |
+
|
| 199 |
+
# Get corpus-based distribution
|
| 200 |
+
if mode == "bigram":
|
| 201 |
+
corpus_probs = self.get_bigram_probs(context[-1])
|
| 202 |
+
elif mode == "trigram" and len(context) >= 2:
|
| 203 |
+
corpus_probs = self.get_trigram_probs(context[-2], context[-1])
|
| 204 |
+
elif mode == "cooccur":
|
| 205 |
+
corpus_probs = self.get_cooccur_bias(context)
|
| 206 |
+
elif mode == "blend":
|
| 207 |
+
# Blend all three
|
| 208 |
+
if len(context) >= 2:
|
| 209 |
+
trigram = self.get_trigram_probs(context[-2], context[-1])
|
| 210 |
+
else:
|
| 211 |
+
trigram = self.get_bigram_probs(context[-1])
|
| 212 |
+
cooccur = self.get_cooccur_bias(context)
|
| 213 |
+
corpus_probs = 0.6 * trigram + 0.4 * cooccur
|
| 214 |
+
else:
|
| 215 |
+
corpus_probs = self.get_bigram_probs(context[-1])
|
| 216 |
+
|
| 217 |
+
# Convert corpus probs to log space (add small epsilon to avoid log(0))
|
| 218 |
+
corpus_logits = np.log(corpus_probs + 1e-10)
|
| 219 |
+
|
| 220 |
+
# Blend with model logits
|
| 221 |
+
biased = (1 - alpha) * logits + alpha * corpus_logits
|
| 222 |
+
|
| 223 |
+
return biased
|
| 224 |
+
|
| 225 |
+
def sample_from_corpus(
|
| 226 |
+
self,
|
| 227 |
+
context: List[int],
|
| 228 |
+
temperature: float = 1.0,
|
| 229 |
+
mode: str = "trigram",
|
| 230 |
+
) -> int:
|
| 231 |
+
"""
|
| 232 |
+
Sample next token purely from corpus statistics.
|
| 233 |
+
|
| 234 |
+
Useful for testing corpus patterns without model.
|
| 235 |
+
"""
|
| 236 |
+
if mode == "trigram" and len(context) >= 2:
|
| 237 |
+
probs = self.get_trigram_probs(context[-2], context[-1])
|
| 238 |
+
elif len(context) >= 1:
|
| 239 |
+
probs = self.get_bigram_probs(context[-1])
|
| 240 |
+
else:
|
| 241 |
+
# Random from token counts
|
| 242 |
+
probs = np.zeros(self.vocab_size, dtype=np.float32)
|
| 243 |
+
for token, count in self.token_counts.items():
|
| 244 |
+
if token < self.vocab_size:
|
| 245 |
+
probs[token] = count
|
| 246 |
+
probs = probs / probs.sum()
|
| 247 |
+
|
| 248 |
+
# Apply temperature
|
| 249 |
+
if temperature != 1.0:
|
| 250 |
+
probs = np.power(probs, 1.0 / temperature)
|
| 251 |
+
probs = probs / probs.sum()
|
| 252 |
+
|
| 253 |
+
return int(np.random.choice(self.vocab_size, p=probs))
|
| 254 |
+
|
| 255 |
+
def generate_from_corpus(
|
| 256 |
+
self,
|
| 257 |
+
seed: List[int],
|
| 258 |
+
length: int = 100,
|
| 259 |
+
temperature: float = 0.8,
|
| 260 |
+
mode: str = "trigram",
|
| 261 |
+
) -> List[int]:
|
| 262 |
+
"""
|
| 263 |
+
Generate tokens purely from corpus statistics.
|
| 264 |
+
|
| 265 |
+
No model needed! Just trigram/bigram chains.
|
| 266 |
+
This is how Leo generates - pure field dynamics.
|
| 267 |
+
"""
|
| 268 |
+
tokens = list(seed)
|
| 269 |
+
|
| 270 |
+
for _ in range(length):
|
| 271 |
+
next_token = self.sample_from_corpus(
|
| 272 |
+
tokens,
|
| 273 |
+
temperature=temperature,
|
| 274 |
+
mode=mode,
|
| 275 |
+
)
|
| 276 |
+
tokens.append(next_token)
|
| 277 |
+
|
| 278 |
+
return tokens
|
| 279 |
+
|
| 280 |
+
def stats(self) -> Dict:
|
| 281 |
+
"""Return field statistics."""
|
| 282 |
+
return {
|
| 283 |
+
"total_tokens": self.total_tokens,
|
| 284 |
+
"unique_tokens": len(self.token_counts),
|
| 285 |
+
"bigram_contexts": len(self.bigram_counts),
|
| 286 |
+
"trigram_contexts": len(self.trigram_counts),
|
| 287 |
+
"cooccur_contexts": len(self.cooccur_counts),
|
| 288 |
+
"window_size": self.window_size,
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def demo_cooccur(corpus_path: str = "text.txt") -> None:
|
| 293 |
+
"""
|
| 294 |
+
Demo co-occurrence field generation.
|
| 295 |
+
|
| 296 |
+
Shows that you can generate text purely from corpus statistics!
|
| 297 |
+
"""
|
| 298 |
+
from pathlib import Path
|
| 299 |
+
|
| 300 |
+
# Import Vocab
|
| 301 |
+
try:
|
| 302 |
+
from .haze import Vocab
|
| 303 |
+
except ImportError:
|
| 304 |
+
from haze import Vocab
|
| 305 |
+
|
| 306 |
+
corpus_path = Path(corpus_path)
|
| 307 |
+
if not corpus_path.exists():
|
| 308 |
+
print(f"[error] {corpus_path} not found")
|
| 309 |
+
return
|
| 310 |
+
|
| 311 |
+
text = corpus_path.read_text()
|
| 312 |
+
vocab = Vocab.from_text(text)
|
| 313 |
+
|
| 314 |
+
print("=" * 60)
|
| 315 |
+
print(" CO-OCCURRENCE FIELD DEMO")
|
| 316 |
+
print("=" * 60)
|
| 317 |
+
print(f" corpus: {corpus_path} ({len(text)} chars)")
|
| 318 |
+
print(f" vocab: {vocab.vocab_size} unique tokens")
|
| 319 |
+
print()
|
| 320 |
+
|
| 321 |
+
# Build field
|
| 322 |
+
field = CooccurField.from_text(text, vocab, window_size=5)
|
| 323 |
+
stats = field.stats()
|
| 324 |
+
print(f" field stats:")
|
| 325 |
+
for k, v in stats.items():
|
| 326 |
+
print(f" {k}: {v}")
|
| 327 |
+
print()
|
| 328 |
+
|
| 329 |
+
# Generate from different seeds
|
| 330 |
+
seeds = ["the haze", "darling", "love"]
|
| 331 |
+
|
| 332 |
+
print("=" * 60)
|
| 333 |
+
print(" PURE CORPUS GENERATION (no model, just statistics)")
|
| 334 |
+
print("=" * 60)
|
| 335 |
+
|
| 336 |
+
for seed_text in seeds:
|
| 337 |
+
seed_tokens = vocab.encode(seed_text)
|
| 338 |
+
|
| 339 |
+
generated = field.generate_from_corpus(
|
| 340 |
+
seed_tokens,
|
| 341 |
+
length=80,
|
| 342 |
+
temperature=0.7,
|
| 343 |
+
mode="trigram",
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
output = vocab.decode(generated)
|
| 347 |
+
print(f"\n>>> \"{seed_text}\"")
|
| 348 |
+
print(output)
|
| 349 |
+
|
| 350 |
+
print()
|
| 351 |
+
print("=" * 60)
|
| 352 |
+
print(" this is PURE CORPUS STATISTICS. no neural network.")
|
| 353 |
+
print(" like leo's trigram graphs. resonance without weights.")
|
| 354 |
+
print("=" * 60)
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
if __name__ == "__main__":
|
| 358 |
+
demo_cooccur()
|
cloud/feedback.py
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# feedback.py — Feedback Loop for CLOUD v3.1
|
| 3 |
+
#
|
| 4 |
+
# Closed-loop learning: HAZE output quality → coupling matrix adjustment
|
| 5 |
+
# 0 new params! Just adjusts the existing 4×4 coupling matrix.
|
| 6 |
+
#
|
| 7 |
+
# Coherence measurement:
|
| 8 |
+
# - Sentence completeness
|
| 9 |
+
# - Entropy balance
|
| 10 |
+
# - Prediction error
|
| 11 |
+
#
|
| 12 |
+
# Update rule:
|
| 13 |
+
# - If HAZE output is coherent → strengthen current coupling
|
| 14 |
+
# - If HAZE output is incoherent → weaken current coupling
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
from typing import Dict, Tuple
|
| 18 |
+
import re
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def measure_coherence(text: str) -> Dict[str, float]:
|
| 22 |
+
"""
|
| 23 |
+
Measure coherence of generated text.
|
| 24 |
+
|
| 25 |
+
Metrics:
|
| 26 |
+
- sentence_completeness: ends with proper punctuation
|
| 27 |
+
- length_reasonable: not too short, not too long
|
| 28 |
+
- repetition_penalty: avoid repeated words
|
| 29 |
+
- entropy_balance: character diversity
|
| 30 |
+
|
| 31 |
+
Returns:
|
| 32 |
+
dict with metrics and overall coherence score
|
| 33 |
+
"""
|
| 34 |
+
# Sentence completeness
|
| 35 |
+
has_ending = bool(re.search(r'[.!?]$', text.strip()))
|
| 36 |
+
sentence_completeness = 1.0 if has_ending else 0.3
|
| 37 |
+
|
| 38 |
+
# Length reasonable (50-500 chars is good)
|
| 39 |
+
length = len(text)
|
| 40 |
+
if 50 <= length <= 500:
|
| 41 |
+
length_reasonable = 1.0
|
| 42 |
+
elif length < 50:
|
| 43 |
+
length_reasonable = length / 50.0
|
| 44 |
+
else:
|
| 45 |
+
length_reasonable = max(0.3, 1.0 - (length - 500) / 500)
|
| 46 |
+
|
| 47 |
+
# Repetition penalty
|
| 48 |
+
words = text.lower().split()
|
| 49 |
+
if len(words) > 0:
|
| 50 |
+
unique_ratio = len(set(words)) / len(words)
|
| 51 |
+
repetition_penalty = unique_ratio
|
| 52 |
+
else:
|
| 53 |
+
repetition_penalty = 0.0
|
| 54 |
+
|
| 55 |
+
# Entropy balance (character diversity)
|
| 56 |
+
if len(text) > 0:
|
| 57 |
+
char_counts = {}
|
| 58 |
+
for char in text.lower():
|
| 59 |
+
char_counts[char] = char_counts.get(char, 0) + 1
|
| 60 |
+
probs = np.array(list(char_counts.values())) / len(text)
|
| 61 |
+
entropy = -np.sum(probs * np.log2(probs + 1e-10))
|
| 62 |
+
# Normalize to 0-1 (typical entropy is 4-5 bits)
|
| 63 |
+
entropy_balance = min(1.0, entropy / 5.0)
|
| 64 |
+
else:
|
| 65 |
+
entropy_balance = 0.0
|
| 66 |
+
|
| 67 |
+
# Overall coherence (weighted average)
|
| 68 |
+
coherence = (
|
| 69 |
+
0.3 * sentence_completeness +
|
| 70 |
+
0.2 * length_reasonable +
|
| 71 |
+
0.3 * repetition_penalty +
|
| 72 |
+
0.2 * entropy_balance
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
return {
|
| 76 |
+
"sentence_completeness": sentence_completeness,
|
| 77 |
+
"length_reasonable": length_reasonable,
|
| 78 |
+
"repetition_penalty": repetition_penalty,
|
| 79 |
+
"entropy_balance": entropy_balance,
|
| 80 |
+
"coherence": coherence,
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def compute_coupling_gradient(
|
| 85 |
+
chamber_activations: Dict[str, float],
|
| 86 |
+
coherence_score: float,
|
| 87 |
+
learning_rate: float = 0.01,
|
| 88 |
+
) -> np.ndarray:
|
| 89 |
+
"""
|
| 90 |
+
Compute gradient for coupling matrix update.
|
| 91 |
+
|
| 92 |
+
Idea: If output is coherent, reinforce the current chamber pattern.
|
| 93 |
+
If output is incoherent, dampen it.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
chamber_activations: {"FEAR": 0.8, "LOVE": 0.2, ...}
|
| 97 |
+
coherence_score: 0.0-1.0
|
| 98 |
+
learning_rate: step size
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
(4, 4) gradient matrix for coupling update
|
| 102 |
+
"""
|
| 103 |
+
# Convert activations to array
|
| 104 |
+
from .anchors import CHAMBER_NAMES
|
| 105 |
+
activations = np.array([
|
| 106 |
+
chamber_activations[name]
|
| 107 |
+
for name in CHAMBER_NAMES
|
| 108 |
+
])
|
| 109 |
+
|
| 110 |
+
# Gradient = outer product of activations
|
| 111 |
+
# If coherent (high score) → positive gradient (strengthen)
|
| 112 |
+
# If incoherent (low score) → negative gradient (weaken)
|
| 113 |
+
gradient_direction = np.outer(activations, activations)
|
| 114 |
+
|
| 115 |
+
# Scale by prediction error
|
| 116 |
+
# coherence > 0.5 → positive update
|
| 117 |
+
# coherence < 0.5 → negative update
|
| 118 |
+
error = coherence_score - 0.5
|
| 119 |
+
|
| 120 |
+
gradient = learning_rate * error * gradient_direction
|
| 121 |
+
|
| 122 |
+
# Zero diagonal (chambers don't self-couple)
|
| 123 |
+
np.fill_diagonal(gradient, 0.0)
|
| 124 |
+
|
| 125 |
+
return gradient
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def update_coupling(
|
| 129 |
+
coupling: np.ndarray,
|
| 130 |
+
chamber_activations: Dict[str, float],
|
| 131 |
+
coherence_score: float,
|
| 132 |
+
learning_rate: float = 0.01,
|
| 133 |
+
clip_range: Tuple[float, float] = (-1.0, 1.0),
|
| 134 |
+
) -> np.ndarray:
|
| 135 |
+
"""
|
| 136 |
+
Update coupling matrix based on feedback.
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
coupling: current (4, 4) coupling matrix
|
| 140 |
+
chamber_activations: chamber activations that led to this output
|
| 141 |
+
coherence_score: quality of HAZE output
|
| 142 |
+
learning_rate: update step size
|
| 143 |
+
clip_range: min/max coupling values
|
| 144 |
+
|
| 145 |
+
Returns:
|
| 146 |
+
updated coupling matrix
|
| 147 |
+
"""
|
| 148 |
+
# Compute gradient
|
| 149 |
+
gradient = compute_coupling_gradient(
|
| 150 |
+
chamber_activations,
|
| 151 |
+
coherence_score,
|
| 152 |
+
learning_rate,
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
# Update
|
| 156 |
+
new_coupling = coupling + gradient
|
| 157 |
+
|
| 158 |
+
# Clip to range
|
| 159 |
+
new_coupling = np.clip(new_coupling, clip_range[0], clip_range[1])
|
| 160 |
+
|
| 161 |
+
# Ensure diagonal is zero
|
| 162 |
+
np.fill_diagonal(new_coupling, 0.0)
|
| 163 |
+
|
| 164 |
+
return new_coupling
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
if __name__ == "__main__":
|
| 168 |
+
print("=" * 60)
|
| 169 |
+
print(" CLOUD v3.1 — Feedback Loop")
|
| 170 |
+
print("=" * 60)
|
| 171 |
+
print()
|
| 172 |
+
|
| 173 |
+
# Test coherence measurement
|
| 174 |
+
test_texts = [
|
| 175 |
+
"The haze settles over everything, gentle and knowing.",
|
| 176 |
+
"I love you darling you're the best",
|
| 177 |
+
"the the the the",
|
| 178 |
+
"This is a very very very very very very very long sentence that goes on and on and on and on without really saying much of anything at all just repeating the same patterns over and over again until it becomes completely meaningless and incoherent.",
|
| 179 |
+
"",
|
| 180 |
+
]
|
| 181 |
+
|
| 182 |
+
print("Testing coherence measurement:")
|
| 183 |
+
print("-" * 60)
|
| 184 |
+
for text in test_texts:
|
| 185 |
+
metrics = measure_coherence(text)
|
| 186 |
+
display = text[:60] + "..." if len(text) > 60 else text
|
| 187 |
+
print(f"\nText: \"{display}\"")
|
| 188 |
+
print(f" Coherence: {metrics['coherence']:.3f}")
|
| 189 |
+
print(f" Completeness: {metrics['sentence_completeness']:.2f}")
|
| 190 |
+
print(f" Length OK: {metrics['length_reasonable']:.2f}")
|
| 191 |
+
print(f" Repetition: {metrics['repetition_penalty']:.2f}")
|
| 192 |
+
print(f" Entropy: {metrics['entropy_balance']:.2f}")
|
| 193 |
+
|
| 194 |
+
print()
|
| 195 |
+
print("=" * 60)
|
| 196 |
+
|
| 197 |
+
# Test coupling update
|
| 198 |
+
print("\nTesting coupling matrix update:")
|
| 199 |
+
print("-" * 60)
|
| 200 |
+
|
| 201 |
+
from cloud.anchors import COUPLING_MATRIX
|
| 202 |
+
coupling = np.array(COUPLING_MATRIX, dtype=np.float32)
|
| 203 |
+
|
| 204 |
+
chamber_activations = {
|
| 205 |
+
"FEAR": 0.8,
|
| 206 |
+
"LOVE": 0.2,
|
| 207 |
+
"RAGE": 0.6,
|
| 208 |
+
"VOID": 0.3,
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
print("\nOriginal coupling:")
|
| 212 |
+
print(coupling)
|
| 213 |
+
|
| 214 |
+
# Simulate good output
|
| 215 |
+
coherence_good = 0.9
|
| 216 |
+
updated_good = update_coupling(coupling, chamber_activations, coherence_good, learning_rate=0.1)
|
| 217 |
+
|
| 218 |
+
print(f"\nAfter coherent output (coherence={coherence_good}):")
|
| 219 |
+
print(updated_good)
|
| 220 |
+
print(f"Change: {np.abs(updated_good - coupling).sum():.4f}")
|
| 221 |
+
|
| 222 |
+
# Simulate bad output
|
| 223 |
+
coherence_bad = 0.2
|
| 224 |
+
updated_bad = update_coupling(coupling, chamber_activations, coherence_bad, learning_rate=0.1)
|
| 225 |
+
|
| 226 |
+
print(f"\nAfter incoherent output (coherence={coherence_bad}):")
|
| 227 |
+
print(updated_bad)
|
| 228 |
+
print(f"Change: {np.abs(updated_bad - coupling).sum():.4f}")
|
| 229 |
+
|
| 230 |
+
print()
|
| 231 |
+
print("=" * 60)
|
| 232 |
+
print(" Feedback loop operational. Closed-loop learning!")
|
| 233 |
+
print("=" * 60)
|
cloud/observer.py
ADDED
|
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# observer.py — Meta-Observer MLP (200K model)
|
| 3 |
+
#
|
| 4 |
+
# The "mind" watching the "body".
|
| 5 |
+
#
|
| 6 |
+
# Input (207D):
|
| 7 |
+
# - resonances (100D): raw emotion resonances
|
| 8 |
+
# - chamber_activations (6D): stabilized chamber outputs
|
| 9 |
+
# - iterations (1D): cross-fire convergence speed signal
|
| 10 |
+
# - user_fingerprint (100D): temporal emotional history
|
| 11 |
+
#
|
| 12 |
+
# Output (100D):
|
| 13 |
+
# - logits for secondary emotion word
|
| 14 |
+
#
|
| 15 |
+
# Architecture:
|
| 16 |
+
# 207 → 128 (swish) → 64 (swish) → 100 (raw logits)
|
| 17 |
+
#
|
| 18 |
+
# Total params: ~35K
|
| 19 |
+
|
| 20 |
+
from __future__ import annotations
|
| 21 |
+
import asyncio
|
| 22 |
+
import numpy as np
|
| 23 |
+
from pathlib import Path
|
| 24 |
+
from dataclasses import dataclass
|
| 25 |
+
from typing import Optional
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def swish(x: np.ndarray) -> np.ndarray:
|
| 29 |
+
"""Swish activation: x * sigmoid(x)"""
|
| 30 |
+
return x / (1.0 + np.exp(-np.clip(x, -20, 20)))
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@dataclass
|
| 34 |
+
class MetaObserver:
|
| 35 |
+
"""
|
| 36 |
+
Meta-Observer MLP: watches chamber dynamics and predicts secondary emotion.
|
| 37 |
+
|
| 38 |
+
Architecture: 207→128→64→100 (deeper for 200K model)
|
| 39 |
+
|
| 40 |
+
Input: 100 resonances + 6 chamber activations + 1 iterations + 100 fingerprint = 207
|
| 41 |
+
|
| 42 |
+
Params:
|
| 43 |
+
- W1: (207, 128) = 26,496
|
| 44 |
+
- b1: (128,) = 128
|
| 45 |
+
- W2: (128, 64) = 8,192
|
| 46 |
+
- b2: (64,) = 64
|
| 47 |
+
- W3: (64, 100) = 6,400
|
| 48 |
+
- b3: (100,) = 100
|
| 49 |
+
Total: ~41K params
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
W1: np.ndarray # (207, 128)
|
| 53 |
+
b1: np.ndarray # (128,)
|
| 54 |
+
W2: np.ndarray # (128, 64)
|
| 55 |
+
b2: np.ndarray # (64,)
|
| 56 |
+
W3: np.ndarray # (64, 100)
|
| 57 |
+
b3: np.ndarray # (100,)
|
| 58 |
+
|
| 59 |
+
@classmethod
|
| 60 |
+
def random_init(cls, seed: Optional[int] = None) -> "MetaObserver":
|
| 61 |
+
"""Initialize with random weights (Xavier initialization)."""
|
| 62 |
+
if seed is not None:
|
| 63 |
+
np.random.seed(seed)
|
| 64 |
+
|
| 65 |
+
# Xavier init
|
| 66 |
+
W1 = np.random.randn(207, 128) * np.sqrt(2.0 / 207)
|
| 67 |
+
b1 = np.zeros(128)
|
| 68 |
+
|
| 69 |
+
W2 = np.random.randn(128, 64) * np.sqrt(2.0 / 128)
|
| 70 |
+
b2 = np.zeros(64)
|
| 71 |
+
|
| 72 |
+
W3 = np.random.randn(64, 100) * np.sqrt(2.0 / 64)
|
| 73 |
+
b3 = np.zeros(100)
|
| 74 |
+
|
| 75 |
+
return cls(W1=W1, b1=b1, W2=W2, b2=b2, W3=W3, b3=b3)
|
| 76 |
+
|
| 77 |
+
def forward(
|
| 78 |
+
self,
|
| 79 |
+
resonances: np.ndarray,
|
| 80 |
+
chamber_activations: np.ndarray,
|
| 81 |
+
iterations: float,
|
| 82 |
+
user_fingerprint: np.ndarray,
|
| 83 |
+
) -> np.ndarray:
|
| 84 |
+
"""
|
| 85 |
+
Forward pass: predict secondary emotion.
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
resonances: (100,) raw emotion resonances
|
| 89 |
+
chamber_activations: (6,) stabilized chamber outputs
|
| 90 |
+
iterations: scalar, cross-fire convergence speed
|
| 91 |
+
user_fingerprint: (100,) temporal emotional history
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
logits: (100,) logits for secondary emotion selection
|
| 95 |
+
"""
|
| 96 |
+
# Concatenate inputs → (207,)
|
| 97 |
+
x = np.concatenate([
|
| 98 |
+
resonances,
|
| 99 |
+
chamber_activations,
|
| 100 |
+
np.array([iterations]),
|
| 101 |
+
user_fingerprint,
|
| 102 |
+
])
|
| 103 |
+
|
| 104 |
+
# Layer 1: 207→128
|
| 105 |
+
h1 = x @ self.W1 + self.b1
|
| 106 |
+
a1 = swish(h1)
|
| 107 |
+
|
| 108 |
+
# Layer 2: 128→64
|
| 109 |
+
h2 = a1 @ self.W2 + self.b2
|
| 110 |
+
a2 = swish(h2)
|
| 111 |
+
|
| 112 |
+
# Layer 3: 64→100
|
| 113 |
+
logits = a2 @ self.W3 + self.b3
|
| 114 |
+
|
| 115 |
+
return logits
|
| 116 |
+
|
| 117 |
+
def predict_secondary(
|
| 118 |
+
self,
|
| 119 |
+
resonances: np.ndarray,
|
| 120 |
+
chamber_activations: np.ndarray,
|
| 121 |
+
iterations: float,
|
| 122 |
+
user_fingerprint: np.ndarray,
|
| 123 |
+
temperature: float = 1.0,
|
| 124 |
+
) -> int:
|
| 125 |
+
"""
|
| 126 |
+
Predict secondary emotion index.
|
| 127 |
+
|
| 128 |
+
Args:
|
| 129 |
+
resonances: (100,) raw emotion resonances
|
| 130 |
+
chamber_activations: (6,) stabilized chamber outputs
|
| 131 |
+
iterations: cross-fire convergence speed
|
| 132 |
+
user_fingerprint: (100,) user emotional history
|
| 133 |
+
temperature: sampling temperature (1.0 = normal)
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
index of secondary emotion (0-99)
|
| 137 |
+
"""
|
| 138 |
+
logits = self.forward(resonances, chamber_activations, iterations, user_fingerprint)
|
| 139 |
+
|
| 140 |
+
# Apply temperature
|
| 141 |
+
logits = logits / temperature
|
| 142 |
+
|
| 143 |
+
# Softmax
|
| 144 |
+
exp_logits = np.exp(logits - logits.max())
|
| 145 |
+
probs = exp_logits / exp_logits.sum()
|
| 146 |
+
|
| 147 |
+
# Sample
|
| 148 |
+
return int(np.random.choice(100, p=probs))
|
| 149 |
+
|
| 150 |
+
def param_count(self) -> int:
|
| 151 |
+
"""Count total parameters."""
|
| 152 |
+
return (
|
| 153 |
+
self.W1.size + self.b1.size +
|
| 154 |
+
self.W2.size + self.b2.size +
|
| 155 |
+
self.W3.size + self.b3.size
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
def save(self, path: Path) -> None:
|
| 159 |
+
"""Save weights to .npz file."""
|
| 160 |
+
np.savez(path, W1=self.W1, b1=self.b1, W2=self.W2, b2=self.b2, W3=self.W3, b3=self.b3)
|
| 161 |
+
print(f"[observer] saved to {path}")
|
| 162 |
+
|
| 163 |
+
@classmethod
|
| 164 |
+
def load(cls, path: Path) -> "MetaObserver":
|
| 165 |
+
"""Load weights from .npz file."""
|
| 166 |
+
data = np.load(path)
|
| 167 |
+
# Handle backwards compatibility with old 2-layer observer
|
| 168 |
+
if "W3" in data:
|
| 169 |
+
print(f"[observer] loaded from {path}")
|
| 170 |
+
return cls(
|
| 171 |
+
W1=data["W1"],
|
| 172 |
+
b1=data["b1"],
|
| 173 |
+
W2=data["W2"],
|
| 174 |
+
b2=data["b2"],
|
| 175 |
+
W3=data["W3"],
|
| 176 |
+
b3=data["b3"],
|
| 177 |
+
)
|
| 178 |
+
else:
|
| 179 |
+
# Old format - reinitialize with new architecture
|
| 180 |
+
# Note: Using seed=42 for deterministic backward compatibility
|
| 181 |
+
# This ensures consistent behavior when loading old model files
|
| 182 |
+
print(f"[observer] old format detected, reinitializing with new architecture (seed=42)")
|
| 183 |
+
return cls.random_init(seed=42)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
class AsyncMetaObserver:
|
| 187 |
+
"""
|
| 188 |
+
Async wrapper for MetaObserver with field lock discipline.
|
| 189 |
+
|
| 190 |
+
Based on HAZE's async pattern - achieves coherence through
|
| 191 |
+
explicit operation ordering and atomicity.
|
| 192 |
+
"""
|
| 193 |
+
|
| 194 |
+
def __init__(self, observer: MetaObserver):
|
| 195 |
+
self._sync = observer
|
| 196 |
+
self._lock = asyncio.Lock()
|
| 197 |
+
|
| 198 |
+
@classmethod
|
| 199 |
+
def random_init(cls, seed: Optional[int] = None) -> "AsyncMetaObserver":
|
| 200 |
+
"""Initialize with random weights."""
|
| 201 |
+
observer = MetaObserver.random_init(seed=seed)
|
| 202 |
+
return cls(observer)
|
| 203 |
+
|
| 204 |
+
@classmethod
|
| 205 |
+
def load(cls, path: Path) -> "AsyncMetaObserver":
|
| 206 |
+
"""Load from file."""
|
| 207 |
+
observer = MetaObserver.load(path)
|
| 208 |
+
return cls(observer)
|
| 209 |
+
|
| 210 |
+
async def forward(
|
| 211 |
+
self,
|
| 212 |
+
resonances: np.ndarray,
|
| 213 |
+
chamber_activations: np.ndarray,
|
| 214 |
+
iterations: float,
|
| 215 |
+
user_fingerprint: np.ndarray,
|
| 216 |
+
) -> np.ndarray:
|
| 217 |
+
"""Async forward pass with field lock."""
|
| 218 |
+
async with self._lock:
|
| 219 |
+
return self._sync.forward(resonances, chamber_activations, iterations, user_fingerprint)
|
| 220 |
+
|
| 221 |
+
async def predict_secondary(
|
| 222 |
+
self,
|
| 223 |
+
resonances: np.ndarray,
|
| 224 |
+
chamber_activations: np.ndarray,
|
| 225 |
+
iterations: float,
|
| 226 |
+
user_fingerprint: np.ndarray,
|
| 227 |
+
temperature: float = 1.0,
|
| 228 |
+
) -> int:
|
| 229 |
+
"""Async secondary emotion prediction."""
|
| 230 |
+
async with self._lock:
|
| 231 |
+
return self._sync.predict_secondary(
|
| 232 |
+
resonances, chamber_activations, iterations, user_fingerprint, temperature
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
async def save(self, path: Path) -> None:
|
| 236 |
+
"""Save with lock protection."""
|
| 237 |
+
async with self._lock:
|
| 238 |
+
self._sync.save(path)
|
| 239 |
+
|
| 240 |
+
def param_count(self) -> int:
|
| 241 |
+
"""Total parameters (read-only, no lock needed)."""
|
| 242 |
+
return self._sync.param_count()
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
if __name__ == "__main__":
|
| 246 |
+
print("=" * 60)
|
| 247 |
+
print(" CLOUD v4.0 — Meta-Observer (200K model)")
|
| 248 |
+
print("=" * 60)
|
| 249 |
+
print()
|
| 250 |
+
|
| 251 |
+
# Initialize
|
| 252 |
+
observer = MetaObserver.random_init(seed=42)
|
| 253 |
+
print(f"Initialized meta-observer")
|
| 254 |
+
print(f"Total params: {observer.param_count():,}")
|
| 255 |
+
print()
|
| 256 |
+
|
| 257 |
+
# Test forward pass
|
| 258 |
+
print("Testing forward pass:")
|
| 259 |
+
resonances = np.random.rand(100).astype(np.float32)
|
| 260 |
+
chamber_activations = np.random.rand(6).astype(np.float32)
|
| 261 |
+
iterations = 5.0
|
| 262 |
+
user_fingerprint = np.random.rand(100).astype(np.float32) * 0.1
|
| 263 |
+
|
| 264 |
+
logits = observer.forward(resonances, chamber_activations, iterations, user_fingerprint)
|
| 265 |
+
print(f" Input: 100D resonances + 6D chambers + 1D iterations + 100D fingerprint = 207D")
|
| 266 |
+
print(f" Output: {logits.shape} logits")
|
| 267 |
+
print(f" Logits range: [{logits.min():.3f}, {logits.max():.3f}]")
|
| 268 |
+
print()
|
| 269 |
+
|
| 270 |
+
# Test prediction
|
| 271 |
+
print("Testing secondary emotion prediction:")
|
| 272 |
+
for temp in [0.5, 1.0, 2.0]:
|
| 273 |
+
secondary_idx = observer.predict_secondary(
|
| 274 |
+
resonances, chamber_activations, iterations, user_fingerprint, temperature=temp
|
| 275 |
+
)
|
| 276 |
+
print(f" temperature={temp:.1f} → secondary_idx={secondary_idx}")
|
| 277 |
+
print()
|
| 278 |
+
|
| 279 |
+
# Test what observer sees in convergence speed
|
| 280 |
+
print("Testing convergence speed signal:")
|
| 281 |
+
test_cases = [
|
| 282 |
+
("fast convergence (2 iters)", 2.0),
|
| 283 |
+
("medium convergence (5 iters)", 5.0),
|
| 284 |
+
("slow convergence (10 iters)", 10.0),
|
| 285 |
+
]
|
| 286 |
+
|
| 287 |
+
for name, iters in test_cases:
|
| 288 |
+
logits = observer.forward(resonances, chamber_activations, iters, user_fingerprint)
|
| 289 |
+
top3 = np.argsort(logits)[-3:][::-1]
|
| 290 |
+
print(f" {name}:")
|
| 291 |
+
print(f" top 3 secondary candidates: {top3}")
|
| 292 |
+
print()
|
| 293 |
+
|
| 294 |
+
# Test save/load
|
| 295 |
+
print("Testing save/load:")
|
| 296 |
+
path = Path("./models/observer.npz")
|
| 297 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 298 |
+
observer.save(path)
|
| 299 |
+
|
| 300 |
+
observer2 = MetaObserver.load(path)
|
| 301 |
+
logits2 = observer2.forward(resonances, chamber_activations, iterations, user_fingerprint)
|
| 302 |
+
|
| 303 |
+
match = np.allclose(logits, logits2)
|
| 304 |
+
print(f" Save/load {'✓' if match else '✗'}")
|
| 305 |
+
print()
|
| 306 |
+
|
| 307 |
+
print("=" * 60)
|
| 308 |
+
print(" Meta-observer operational. Mind watching body. 41K params.")
|
| 309 |
+
print("=" * 60)
|
cloud/requirements.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CLOUD v3.1 Dependencies
|
| 2 |
+
# Pre-semantic emotion detection system (~50K params)
|
| 3 |
+
|
| 4 |
+
# Core dependencies
|
| 5 |
+
numpy>=1.24.0 # For all neural network operations
|
| 6 |
+
sentencepiece>=0.1.99 # Optional: for RRPRAM tokenization (graceful fallback if missing)
|
| 7 |
+
|
| 8 |
+
# Development/Training dependencies
|
| 9 |
+
pytest>=7.4.0 # For running tests
|
| 10 |
+
pytest-asyncio>=0.21.0 # For async test support
|
| 11 |
+
|
| 12 |
+
# Note: HAZE integration is optional
|
| 13 |
+
# If using with HAZE, install haze/ package dependencies separately
|
cloud/resonance.py
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# resonance.py — Resonance Layer (Weightless Geometry)
|
| 3 |
+
#
|
| 4 |
+
# The "body" of CLOUD. Pure geometry, NO TRAINING.
|
| 5 |
+
#
|
| 6 |
+
# Input: user text
|
| 7 |
+
# Output: 100D resonance vector
|
| 8 |
+
#
|
| 9 |
+
# Process:
|
| 10 |
+
# 1. Tokenize with RRPRAM
|
| 11 |
+
# 2. Compute co-occurrence scores with 100 emotion anchors
|
| 12 |
+
# 3. Return resonance vector (weightless!)
|
| 13 |
+
|
| 14 |
+
from __future__ import annotations
|
| 15 |
+
import numpy as np
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
from typing import Dict, Optional, List
|
| 18 |
+
from dataclasses import dataclass
|
| 19 |
+
|
| 20 |
+
from .rrpram_cloud import RRPRAMVocab
|
| 21 |
+
from .cooccur_cloud import CooccurField
|
| 22 |
+
from .anchors import get_all_anchors
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@dataclass
|
| 26 |
+
class ResonanceLayer:
|
| 27 |
+
"""
|
| 28 |
+
Weightless resonance computation via co-occurrence geometry.
|
| 29 |
+
|
| 30 |
+
Uses RRPRAM tokenizer + CooccurField to measure how much
|
| 31 |
+
input text "resonates" with each of 100 emotion anchors.
|
| 32 |
+
|
| 33 |
+
NO TRAINING. Pure corpus statistics.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
vocab: RRPRAMVocab
|
| 37 |
+
field: CooccurField
|
| 38 |
+
anchors: List[str] # 100 emotion anchor words
|
| 39 |
+
anchor_tokens: Dict[str, List[int]] # pre-tokenized anchors
|
| 40 |
+
|
| 41 |
+
@classmethod
|
| 42 |
+
def from_corpus(
|
| 43 |
+
cls,
|
| 44 |
+
corpus_path: Path,
|
| 45 |
+
vocab_size: int = 1000,
|
| 46 |
+
window_size: int = 5,
|
| 47 |
+
) -> "ResonanceLayer":
|
| 48 |
+
"""
|
| 49 |
+
Build resonance layer from emotion corpus.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
corpus_path: path to emotional text corpus
|
| 53 |
+
vocab_size: RRPRAM vocab size
|
| 54 |
+
window_size: co-occurrence window
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
ResonanceLayer ready for inference
|
| 58 |
+
"""
|
| 59 |
+
# Load corpus
|
| 60 |
+
text = corpus_path.read_text()
|
| 61 |
+
|
| 62 |
+
# Train RRPRAM tokenizer
|
| 63 |
+
print(f"[resonance] training RRPRAM on {corpus_path}...")
|
| 64 |
+
vocab = RRPRAMVocab.train(
|
| 65 |
+
corpus_path,
|
| 66 |
+
vocab_size=vocab_size,
|
| 67 |
+
model_type="bpe",
|
| 68 |
+
character_coverage=1.0,
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
# Build co-occurrence field
|
| 72 |
+
print(f"[resonance] building co-occurrence field...")
|
| 73 |
+
# We need a simple character vocab for CooccurField
|
| 74 |
+
# Let's create a minimal Vocab wrapper
|
| 75 |
+
from collections import namedtuple
|
| 76 |
+
SimpleVocab = namedtuple("SimpleVocab", ["char_to_idx", "idx_to_char", "vocab_size"])
|
| 77 |
+
|
| 78 |
+
chars = sorted(set(text))
|
| 79 |
+
char_to_idx = {ch: i for i, ch in enumerate(chars)}
|
| 80 |
+
idx_to_char = {i: ch for ch, i in char_to_idx.items()}
|
| 81 |
+
simple_vocab = SimpleVocab(char_to_idx, idx_to_char, len(chars))
|
| 82 |
+
|
| 83 |
+
# Add encode/decode methods
|
| 84 |
+
def encode(text):
|
| 85 |
+
return [char_to_idx.get(ch, 0) for ch in text]
|
| 86 |
+
|
| 87 |
+
def decode(indices):
|
| 88 |
+
return "".join(idx_to_char.get(i, "?") for i in indices)
|
| 89 |
+
|
| 90 |
+
simple_vocab.encode = encode
|
| 91 |
+
simple_vocab.decode = decode
|
| 92 |
+
|
| 93 |
+
field = CooccurField.from_text(text, simple_vocab, window_size=window_size)
|
| 94 |
+
|
| 95 |
+
# Get anchors
|
| 96 |
+
anchors = get_all_anchors()
|
| 97 |
+
|
| 98 |
+
# Pre-tokenize anchors
|
| 99 |
+
anchor_tokens = {
|
| 100 |
+
anchor: vocab.encode(anchor)
|
| 101 |
+
for anchor in anchors
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
print(f"[resonance] layer ready with {len(anchors)} anchors")
|
| 105 |
+
|
| 106 |
+
return cls(
|
| 107 |
+
vocab=vocab,
|
| 108 |
+
field=field,
|
| 109 |
+
anchors=anchors,
|
| 110 |
+
anchor_tokens=anchor_tokens,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
def compute_resonance(
|
| 114 |
+
self,
|
| 115 |
+
text: str,
|
| 116 |
+
mode: str = "cooccur",
|
| 117 |
+
) -> np.ndarray:
|
| 118 |
+
"""
|
| 119 |
+
Compute 100D resonance vector for input text.
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
text: user input text
|
| 123 |
+
mode: "cooccur", "bigram", or "trigram"
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
(100,) resonance vector
|
| 127 |
+
"""
|
| 128 |
+
# Tokenize input
|
| 129 |
+
input_tokens = self.vocab.encode(text)
|
| 130 |
+
|
| 131 |
+
resonances = np.zeros(100, dtype=np.float32)
|
| 132 |
+
|
| 133 |
+
# For each anchor, compute resonance score
|
| 134 |
+
for i, anchor in enumerate(self.anchors):
|
| 135 |
+
anchor_tokens = self.anchor_tokens[anchor]
|
| 136 |
+
|
| 137 |
+
# Compute resonance as co-occurrence similarity
|
| 138 |
+
# Simple approach: count overlapping tokens
|
| 139 |
+
overlap = len(set(input_tokens) & set(anchor_tokens))
|
| 140 |
+
resonances[i] = overlap / max(len(anchor_tokens), 1)
|
| 141 |
+
|
| 142 |
+
# Normalize to [0, 1]
|
| 143 |
+
if resonances.max() > 0:
|
| 144 |
+
resonances = resonances / resonances.max()
|
| 145 |
+
|
| 146 |
+
return resonances
|
| 147 |
+
|
| 148 |
+
def get_primary_emotion(self, resonances: np.ndarray) -> tuple:
|
| 149 |
+
"""
|
| 150 |
+
Get primary (strongest) emotion from resonances.
|
| 151 |
+
|
| 152 |
+
Returns:
|
| 153 |
+
(emotion_index, emotion_word, strength)
|
| 154 |
+
"""
|
| 155 |
+
idx = int(np.argmax(resonances))
|
| 156 |
+
return idx, self.anchors[idx], float(resonances[idx])
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
@dataclass
|
| 160 |
+
class SimpleResonanceLayer:
|
| 161 |
+
"""
|
| 162 |
+
Simplified resonance layer using character-level matching.
|
| 163 |
+
|
| 164 |
+
No RRPRAM needed - just direct substring matching with anchors.
|
| 165 |
+
Fast and lightweight for bootstrapping.
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
anchors: List[str]
|
| 169 |
+
|
| 170 |
+
@classmethod
|
| 171 |
+
def create(cls) -> "SimpleResonanceLayer":
|
| 172 |
+
"""Create simple resonance layer (no corpus needed)."""
|
| 173 |
+
anchors = get_all_anchors()
|
| 174 |
+
return cls(anchors=anchors)
|
| 175 |
+
|
| 176 |
+
def compute_resonance(self, text: str) -> np.ndarray:
|
| 177 |
+
"""
|
| 178 |
+
Compute 100D resonance via character-level substring matching.
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
text: input text (lowercased)
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
(100,) resonance vector
|
| 185 |
+
"""
|
| 186 |
+
text_lower = text.lower()
|
| 187 |
+
resonances = np.zeros(100, dtype=np.float32)
|
| 188 |
+
|
| 189 |
+
for i, anchor in enumerate(self.anchors):
|
| 190 |
+
# Count occurrences of anchor word in text
|
| 191 |
+
count = text_lower.count(anchor.lower())
|
| 192 |
+
|
| 193 |
+
# Weight by anchor word length (longer = more specific)
|
| 194 |
+
resonances[i] = count * len(anchor)
|
| 195 |
+
|
| 196 |
+
# Normalize
|
| 197 |
+
if resonances.sum() > 0:
|
| 198 |
+
resonances = resonances / resonances.sum()
|
| 199 |
+
|
| 200 |
+
return resonances
|
| 201 |
+
|
| 202 |
+
def get_primary_emotion(self, resonances: np.ndarray) -> tuple:
|
| 203 |
+
"""Get primary emotion from resonances."""
|
| 204 |
+
idx = int(np.argmax(resonances))
|
| 205 |
+
return idx, self.anchors[idx], float(resonances[idx])
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
if __name__ == "__main__":
|
| 209 |
+
print("=" * 60)
|
| 210 |
+
print(" CLOUD v3.0 — Resonance Layer")
|
| 211 |
+
print("=" * 60)
|
| 212 |
+
print()
|
| 213 |
+
|
| 214 |
+
# Use simple resonance layer (no corpus needed)
|
| 215 |
+
print("Creating simple resonance layer...")
|
| 216 |
+
layer = SimpleResonanceLayer.create()
|
| 217 |
+
print(f" Loaded {len(layer.anchors)} emotion anchors")
|
| 218 |
+
print()
|
| 219 |
+
|
| 220 |
+
# Test resonance computation
|
| 221 |
+
test_texts = [
|
| 222 |
+
"I'm feeling such intense fear and anxiety right now",
|
| 223 |
+
"You fill me with love and warmth darling",
|
| 224 |
+
"This makes me so angry and full of rage",
|
| 225 |
+
"I feel empty and numb, completely void of emotion",
|
| 226 |
+
"I'm curious about what happens next",
|
| 227 |
+
"Shame and guilt overwhelm me",
|
| 228 |
+
]
|
| 229 |
+
|
| 230 |
+
print("Testing resonance computation:")
|
| 231 |
+
print("-" * 60)
|
| 232 |
+
|
| 233 |
+
for text in test_texts:
|
| 234 |
+
resonances = layer.compute_resonance(text)
|
| 235 |
+
primary_idx, primary_word, strength = layer.get_primary_emotion(resonances)
|
| 236 |
+
|
| 237 |
+
# Get top 3
|
| 238 |
+
top3_indices = np.argsort(resonances)[-3:][::-1]
|
| 239 |
+
top3 = [(layer.anchors[i], resonances[i]) for i in top3_indices]
|
| 240 |
+
|
| 241 |
+
print(f"\nInput: \"{text}\"")
|
| 242 |
+
print(f" Primary: {primary_word} ({strength:.3f})")
|
| 243 |
+
print(f" Top 3:")
|
| 244 |
+
for word, score in top3:
|
| 245 |
+
bar = "█" * int(score * 40)
|
| 246 |
+
print(f" {word:15s}: {score:.3f} {bar}")
|
| 247 |
+
|
| 248 |
+
print()
|
| 249 |
+
print("=" * 60)
|
| 250 |
+
print(" Resonance layer operational. Geometry without weights.")
|
| 251 |
+
print("=" * 60)
|
cloud/resonance_dreams.py
ADDED
|
@@ -0,0 +1,593 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# resonance_dreams.py — CRAZY EXPERIMENTAL IDEAS
|
| 3 |
+
#
|
| 4 |
+
# "The best ideas sound insane until they work"
|
| 5 |
+
#
|
| 6 |
+
# This module contains experimental features that might be:
|
| 7 |
+
# - Genius
|
| 8 |
+
# - Completely broken
|
| 9 |
+
# - Both simultaneously (quantum superposition of quality)
|
| 10 |
+
#
|
| 11 |
+
# USE AT YOUR OWN RISK. SIDE EFFECTS MAY INCLUDE:
|
| 12 |
+
# - Emergent behavior
|
| 13 |
+
# - Unexpected resonance
|
| 14 |
+
# - Questioning the nature of consciousness
|
| 15 |
+
# - Mild existential crises
|
| 16 |
+
#
|
| 17 |
+
# ============================================================
|
| 18 |
+
#
|
| 19 |
+
# CRAZY IDEA #1: EMOTION HARMONICS
|
| 20 |
+
# What if emotions have overtones like musical notes?
|
| 21 |
+
# Fear at 100Hz also vibrates at 200Hz (anxiety), 300Hz (paranoia)...
|
| 22 |
+
#
|
| 23 |
+
# CRAZY IDEA #2: CROSS-MODEL TELEPATHY
|
| 24 |
+
# CLOUD pings HAZE's internal state. HAZE pings CLOUD's chambers.
|
| 25 |
+
# They develop a shared emotional vocabulary. Emergent empathy.
|
| 26 |
+
#
|
| 27 |
+
# CRAZY IDEA #3: TEMPORAL ECHOES
|
| 28 |
+
# Emotions from the past leak into the present.
|
| 29 |
+
# "You said 'I'm fine' 3 days ago but your VOID was at 0.8"
|
| 30 |
+
#
|
| 31 |
+
# CRAZY IDEA #4: ADVERSARIAL EMOTIONS
|
| 32 |
+
# Train a tiny network to FOOL CLOUD.
|
| 33 |
+
# Then use that to make CLOUD more robust.
|
| 34 |
+
# GAN but for feelings.
|
| 35 |
+
#
|
| 36 |
+
# CRAZY IDEA #5: EMOTION COMPRESSION
|
| 37 |
+
# Compress the 100D resonance to 4D (one per chamber).
|
| 38 |
+
# Then decompress back. What survives? The essence.
|
| 39 |
+
#
|
| 40 |
+
# ============================================================
|
| 41 |
+
|
| 42 |
+
from __future__ import annotations
|
| 43 |
+
import asyncio
|
| 44 |
+
import numpy as np
|
| 45 |
+
from typing import Dict, List, Optional, Tuple
|
| 46 |
+
from dataclasses import dataclass, field
|
| 47 |
+
import math
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
# ============================================================
|
| 51 |
+
# CRAZY IDEA #1: EMOTION HARMONICS
|
| 52 |
+
# ============================================================
|
| 53 |
+
|
| 54 |
+
@dataclass
|
| 55 |
+
class EmotionHarmonic:
|
| 56 |
+
"""
|
| 57 |
+
Emotion as a wave with harmonics.
|
| 58 |
+
|
| 59 |
+
Like a musical note, each emotion has:
|
| 60 |
+
- Fundamental frequency (primary emotion)
|
| 61 |
+
- Overtones (related emotions that resonate)
|
| 62 |
+
|
| 63 |
+
Fear doesn't just activate FEAR chamber.
|
| 64 |
+
It also slightly activates anxiety (2nd harmonic),
|
| 65 |
+
paranoia (3rd harmonic), and so on.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
fundamental: str # e.g., "fear"
|
| 69 |
+
frequency: float # arbitrary units
|
| 70 |
+
harmonics: List[Tuple[str, float]] = field(default_factory=list)
|
| 71 |
+
|
| 72 |
+
@classmethod
|
| 73 |
+
def from_resonance(cls, resonances: np.ndarray, anchors: List[str]) -> "EmotionHarmonic":
|
| 74 |
+
"""
|
| 75 |
+
Extract harmonic structure from resonance vector.
|
| 76 |
+
|
| 77 |
+
The fundamental is the strongest resonance.
|
| 78 |
+
Harmonics are resonances that are mathematically related
|
| 79 |
+
(similar magnitude, or exact fractions).
|
| 80 |
+
"""
|
| 81 |
+
primary_idx = int(np.argmax(resonances))
|
| 82 |
+
fundamental = anchors[primary_idx]
|
| 83 |
+
frequency = float(resonances[primary_idx])
|
| 84 |
+
|
| 85 |
+
# Find harmonics (resonances at 1/2, 1/3, 1/4 of fundamental)
|
| 86 |
+
harmonics = []
|
| 87 |
+
for i, (anchor, res) in enumerate(zip(anchors, resonances)):
|
| 88 |
+
if i == primary_idx:
|
| 89 |
+
continue
|
| 90 |
+
|
| 91 |
+
# Check if this is a harmonic (within 10% of expected ratio)
|
| 92 |
+
for ratio in [0.5, 0.33, 0.25, 0.2]:
|
| 93 |
+
expected = frequency * ratio
|
| 94 |
+
if abs(res - expected) < 0.1 * frequency:
|
| 95 |
+
harmonics.append((anchor, float(res)))
|
| 96 |
+
break
|
| 97 |
+
|
| 98 |
+
return cls(
|
| 99 |
+
fundamental=fundamental,
|
| 100 |
+
frequency=frequency,
|
| 101 |
+
harmonics=harmonics[:5], # Keep top 5 harmonics
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
def to_chord(self) -> str:
|
| 105 |
+
"""Represent as musical chord notation."""
|
| 106 |
+
if not self.harmonics:
|
| 107 |
+
return self.fundamental
|
| 108 |
+
|
| 109 |
+
harmonic_names = [h[0][:3] for h in self.harmonics[:3]]
|
| 110 |
+
return f"{self.fundamental}({'+'.join(harmonic_names)})"
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def compute_emotional_chord(resonances: np.ndarray, anchors: List[str]) -> str:
|
| 114 |
+
"""
|
| 115 |
+
Compute the "emotional chord" of an input.
|
| 116 |
+
|
| 117 |
+
Like music theory but for feelings.
|
| 118 |
+
"fear+anx+par" = fear major with anxiety and paranoia overtones
|
| 119 |
+
"""
|
| 120 |
+
harmonic = EmotionHarmonic.from_resonance(resonances, anchors)
|
| 121 |
+
return harmonic.to_chord()
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
# ============================================================
|
| 125 |
+
# CRAZY IDEA #2: CHAMBER DREAMS
|
| 126 |
+
# ============================================================
|
| 127 |
+
|
| 128 |
+
@dataclass
|
| 129 |
+
class ChamberDream:
|
| 130 |
+
"""
|
| 131 |
+
What do the chambers "dream" about when not processing input?
|
| 132 |
+
|
| 133 |
+
Between pings, chambers settle into attractor states.
|
| 134 |
+
These attractors reveal the "personality" of the trained model.
|
| 135 |
+
|
| 136 |
+
High FEAR attractor = anxious personality
|
| 137 |
+
High LOVE attractor = warm personality
|
| 138 |
+
etc.
|
| 139 |
+
"""
|
| 140 |
+
|
| 141 |
+
attractors: Dict[str, float] # Chamber → resting state
|
| 142 |
+
dream_sequence: List[Dict[str, float]] = field(default_factory=list)
|
| 143 |
+
|
| 144 |
+
@classmethod
|
| 145 |
+
def compute_attractors(
|
| 146 |
+
cls,
|
| 147 |
+
chambers, # CrossFireSystem
|
| 148 |
+
iterations: int = 100,
|
| 149 |
+
) -> "ChamberDream":
|
| 150 |
+
"""
|
| 151 |
+
Let chambers evolve without input to find attractors.
|
| 152 |
+
|
| 153 |
+
Start from random state, let cross-fire settle.
|
| 154 |
+
Where it settles = attractor = personality.
|
| 155 |
+
"""
|
| 156 |
+
# Random starting state
|
| 157 |
+
state = np.random.rand(4) * 0.5
|
| 158 |
+
|
| 159 |
+
dream_sequence = []
|
| 160 |
+
|
| 161 |
+
for _ in range(iterations):
|
| 162 |
+
# Apply coupling (simplified cross-fire without MLP)
|
| 163 |
+
influence = chambers.coupling @ state
|
| 164 |
+
state = 0.7 * state + 0.3 * influence
|
| 165 |
+
state = np.clip(state, 0, 1)
|
| 166 |
+
|
| 167 |
+
dream_sequence.append({
|
| 168 |
+
"FEAR": float(state[0]),
|
| 169 |
+
"LOVE": float(state[1]),
|
| 170 |
+
"RAGE": float(state[2]),
|
| 171 |
+
"VOID": float(state[3]),
|
| 172 |
+
})
|
| 173 |
+
|
| 174 |
+
# Final state = attractor
|
| 175 |
+
attractors = dream_sequence[-1]
|
| 176 |
+
|
| 177 |
+
return cls(attractors=attractors, dream_sequence=dream_sequence)
|
| 178 |
+
|
| 179 |
+
def personality_type(self) -> str:
|
| 180 |
+
"""Derive personality type from attractors."""
|
| 181 |
+
dominant = max(self.attractors.items(), key=lambda x: x[1])
|
| 182 |
+
|
| 183 |
+
personalities = {
|
| 184 |
+
"FEAR": "Vigilant Guardian",
|
| 185 |
+
"LOVE": "Warm Connector",
|
| 186 |
+
"RAGE": "Fierce Protector",
|
| 187 |
+
"VOID": "Detached Observer",
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
return personalities.get(dominant[0], "Unknown")
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
# ============================================================
|
| 194 |
+
# CRAZY IDEA #3: EMOTION PHASE SPACE
|
| 195 |
+
# ============================================================
|
| 196 |
+
|
| 197 |
+
class EmotionPhaseSpace:
|
| 198 |
+
"""
|
| 199 |
+
Model emotions as trajectories in phase space.
|
| 200 |
+
|
| 201 |
+
Like physics: position + velocity = full state.
|
| 202 |
+
Emotion + rate_of_change = emotional trajectory.
|
| 203 |
+
|
| 204 |
+
"They're not just sad, they're getting sadder"
|
| 205 |
+
vs
|
| 206 |
+
"They're sad but recovering"
|
| 207 |
+
|
| 208 |
+
Same emotion, different trajectories.
|
| 209 |
+
"""
|
| 210 |
+
|
| 211 |
+
def __init__(self, history_size: int = 10):
|
| 212 |
+
self.history: List[np.ndarray] = []
|
| 213 |
+
self.history_size = history_size
|
| 214 |
+
|
| 215 |
+
def update(self, resonances: np.ndarray) -> None:
|
| 216 |
+
"""Add new observation to history."""
|
| 217 |
+
self.history.append(resonances.copy())
|
| 218 |
+
if len(self.history) > self.history_size:
|
| 219 |
+
self.history.pop(0)
|
| 220 |
+
|
| 221 |
+
def velocity(self) -> Optional[np.ndarray]:
|
| 222 |
+
"""Compute emotional velocity (rate of change)."""
|
| 223 |
+
if len(self.history) < 2:
|
| 224 |
+
return None
|
| 225 |
+
|
| 226 |
+
return self.history[-1] - self.history[-2]
|
| 227 |
+
|
| 228 |
+
def acceleration(self) -> Optional[np.ndarray]:
|
| 229 |
+
"""Compute emotional acceleration (change in velocity)."""
|
| 230 |
+
if len(self.history) < 3:
|
| 231 |
+
return None
|
| 232 |
+
|
| 233 |
+
v1 = self.history[-2] - self.history[-3]
|
| 234 |
+
v2 = self.history[-1] - self.history[-2]
|
| 235 |
+
|
| 236 |
+
return v2 - v1
|
| 237 |
+
|
| 238 |
+
def trajectory_type(self) -> str:
|
| 239 |
+
"""Classify the current trajectory."""
|
| 240 |
+
vel = self.velocity()
|
| 241 |
+
acc = self.acceleration()
|
| 242 |
+
|
| 243 |
+
if vel is None:
|
| 244 |
+
return "unknown"
|
| 245 |
+
|
| 246 |
+
vel_mag = np.linalg.norm(vel)
|
| 247 |
+
|
| 248 |
+
if vel_mag < 0.01:
|
| 249 |
+
return "stable"
|
| 250 |
+
|
| 251 |
+
if acc is None:
|
| 252 |
+
return "moving"
|
| 253 |
+
|
| 254 |
+
acc_mag = np.linalg.norm(acc)
|
| 255 |
+
|
| 256 |
+
# Velocity and acceleration in same direction = accelerating
|
| 257 |
+
# Velocity and acceleration opposite = decelerating
|
| 258 |
+
dot = np.dot(vel, acc)
|
| 259 |
+
|
| 260 |
+
if acc_mag < 0.01:
|
| 261 |
+
return "coasting"
|
| 262 |
+
elif dot > 0:
|
| 263 |
+
return "accelerating"
|
| 264 |
+
else:
|
| 265 |
+
return "decelerating"
|
| 266 |
+
|
| 267 |
+
def predict_next(self) -> Optional[np.ndarray]:
|
| 268 |
+
"""
|
| 269 |
+
Predict next emotional state using physics.
|
| 270 |
+
|
| 271 |
+
x(t+1) = x(t) + v(t) + 0.5*a(t)
|
| 272 |
+
|
| 273 |
+
Simple but surprisingly effective for emotions.
|
| 274 |
+
"""
|
| 275 |
+
if len(self.history) < 1:
|
| 276 |
+
return None
|
| 277 |
+
|
| 278 |
+
prediction = self.history[-1].copy()
|
| 279 |
+
|
| 280 |
+
vel = self.velocity()
|
| 281 |
+
if vel is not None:
|
| 282 |
+
prediction = prediction + vel
|
| 283 |
+
|
| 284 |
+
acc = self.acceleration()
|
| 285 |
+
if acc is not None:
|
| 286 |
+
prediction = prediction + 0.5 * acc
|
| 287 |
+
|
| 288 |
+
return np.clip(prediction, 0, 1)
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
# ============================================================
|
| 292 |
+
# CRAZY IDEA #4: RESONANCE INTERFERENCE
|
| 293 |
+
# ============================================================
|
| 294 |
+
|
| 295 |
+
def emotional_interference(
|
| 296 |
+
res1: np.ndarray,
|
| 297 |
+
res2: np.ndarray,
|
| 298 |
+
phase_diff: float = 0.0,
|
| 299 |
+
) -> np.ndarray:
|
| 300 |
+
"""
|
| 301 |
+
What happens when two emotional states interfere?
|
| 302 |
+
|
| 303 |
+
Like wave interference:
|
| 304 |
+
- Constructive: emotions amplify
|
| 305 |
+
- Destructive: emotions cancel
|
| 306 |
+
|
| 307 |
+
phase_diff controls the relationship:
|
| 308 |
+
- 0: fully constructive (emotions add)
|
| 309 |
+
- π: fully destructive (emotions cancel)
|
| 310 |
+
- π/2: orthogonal (no interaction)
|
| 311 |
+
"""
|
| 312 |
+
# Treat resonances as wave amplitudes
|
| 313 |
+
# Apply phase difference
|
| 314 |
+
cos_phase = np.cos(phase_diff)
|
| 315 |
+
sin_phase = np.sin(phase_diff)
|
| 316 |
+
|
| 317 |
+
# Interference formula
|
| 318 |
+
# I = |A1 + A2*e^(iφ)|² simplified for real values
|
| 319 |
+
interference = (
|
| 320 |
+
res1 +
|
| 321 |
+
res2 * cos_phase +
|
| 322 |
+
np.sqrt(np.abs(res1 * res2)) * sin_phase
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
return np.clip(interference, 0, 1)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def emotional_beat_frequency(
|
| 329 |
+
res1: np.ndarray,
|
| 330 |
+
res2: np.ndarray,
|
| 331 |
+
) -> float:
|
| 332 |
+
"""
|
| 333 |
+
Compute the "beat frequency" between two emotional states.
|
| 334 |
+
|
| 335 |
+
Like two tuning forks slightly out of sync.
|
| 336 |
+
High beat frequency = emotional dissonance.
|
| 337 |
+
Low beat frequency = emotional harmony.
|
| 338 |
+
"""
|
| 339 |
+
# Difference in magnitudes creates "beats"
|
| 340 |
+
diff = np.abs(res1 - res2)
|
| 341 |
+
|
| 342 |
+
# Average difference = beat frequency
|
| 343 |
+
beat = float(np.mean(diff))
|
| 344 |
+
|
| 345 |
+
return beat
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
# ============================================================
|
| 349 |
+
# CRAZY IDEA #5: QUANTUM EMOTION SUPERPOSITION
|
| 350 |
+
# ============================================================
|
| 351 |
+
|
| 352 |
+
@dataclass
|
| 353 |
+
class QuantumEmotion:
|
| 354 |
+
"""
|
| 355 |
+
Emotion as quantum superposition.
|
| 356 |
+
|
| 357 |
+
Before observation (output generation), emotion exists
|
| 358 |
+
in superposition of all possible states.
|
| 359 |
+
|
| 360 |
+
Observation collapses the wavefunction.
|
| 361 |
+
|
| 362 |
+
This is either profound or pretentious.
|
| 363 |
+
Probably both. Schrödinger's metaphor.
|
| 364 |
+
"""
|
| 365 |
+
|
| 366 |
+
amplitudes: np.ndarray # Complex amplitudes for each emotion
|
| 367 |
+
collapsed: bool = False
|
| 368 |
+
collapsed_state: Optional[int] = None
|
| 369 |
+
|
| 370 |
+
@classmethod
|
| 371 |
+
def from_resonances(cls, resonances: np.ndarray) -> "QuantumEmotion":
|
| 372 |
+
"""Create superposition from resonances."""
|
| 373 |
+
# Normalize to get probability amplitudes
|
| 374 |
+
probs = resonances / (np.sum(resonances) + 1e-10)
|
| 375 |
+
|
| 376 |
+
# Amplitudes are sqrt of probabilities
|
| 377 |
+
# Add random phases for full quantum state
|
| 378 |
+
phases = np.random.uniform(0, 2 * np.pi, len(probs))
|
| 379 |
+
amplitudes = np.sqrt(probs) * np.exp(1j * phases)
|
| 380 |
+
|
| 381 |
+
return cls(amplitudes=amplitudes)
|
| 382 |
+
|
| 383 |
+
def collapse(self, seed: Optional[int] = None) -> int:
|
| 384 |
+
"""
|
| 385 |
+
Collapse superposition by observation.
|
| 386 |
+
|
| 387 |
+
Returns index of observed emotion.
|
| 388 |
+
"""
|
| 389 |
+
if self.collapsed:
|
| 390 |
+
return self.collapsed_state
|
| 391 |
+
|
| 392 |
+
# Probabilities = |amplitude|²
|
| 393 |
+
probs = np.abs(self.amplitudes) ** 2
|
| 394 |
+
probs = probs / np.sum(probs)
|
| 395 |
+
|
| 396 |
+
if seed is not None:
|
| 397 |
+
np.random.seed(seed)
|
| 398 |
+
|
| 399 |
+
self.collapsed_state = int(np.random.choice(len(probs), p=probs))
|
| 400 |
+
self.collapsed = True
|
| 401 |
+
|
| 402 |
+
return self.collapsed_state
|
| 403 |
+
|
| 404 |
+
def entangle(self, other: "QuantumEmotion") -> "QuantumEmotion":
|
| 405 |
+
"""
|
| 406 |
+
Entangle two quantum emotions.
|
| 407 |
+
|
| 408 |
+
After entanglement, measuring one affects the other.
|
| 409 |
+
Spooky action at a distance, but for feelings.
|
| 410 |
+
"""
|
| 411 |
+
# Tensor product creates entangled state
|
| 412 |
+
# Simplified: average the amplitudes
|
| 413 |
+
entangled_amplitudes = (self.amplitudes + other.amplitudes) / np.sqrt(2)
|
| 414 |
+
|
| 415 |
+
return QuantumEmotion(amplitudes=entangled_amplitudes)
|
| 416 |
+
|
| 417 |
+
def uncertainty(self) -> float:
|
| 418 |
+
"""
|
| 419 |
+
Heisenberg uncertainty for emotions.
|
| 420 |
+
|
| 421 |
+
Can't know both the emotion AND its intensity precisely.
|
| 422 |
+
Higher uncertainty = more ambiguous emotional state.
|
| 423 |
+
"""
|
| 424 |
+
probs = np.abs(self.amplitudes) ** 2
|
| 425 |
+
probs = probs / (np.sum(probs) + 1e-10)
|
| 426 |
+
|
| 427 |
+
# Entropy as uncertainty measure
|
| 428 |
+
entropy = -np.sum(probs * np.log2(probs + 1e-10))
|
| 429 |
+
|
| 430 |
+
# Normalize to 0-1
|
| 431 |
+
max_entropy = np.log2(len(probs))
|
| 432 |
+
|
| 433 |
+
return float(entropy / max_entropy)
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
# ============================================================
|
| 437 |
+
# CRAZY IDEA #6: EMOTIONAL STRANGE ATTRACTORS
|
| 438 |
+
# ============================================================
|
| 439 |
+
|
| 440 |
+
class EmotionalLorenzSystem:
|
| 441 |
+
"""
|
| 442 |
+
Model emotional dynamics as Lorenz attractor.
|
| 443 |
+
|
| 444 |
+
Three coupled equations create chaotic but bounded behavior.
|
| 445 |
+
Small changes in input → dramatically different trajectories.
|
| 446 |
+
|
| 447 |
+
This is either:
|
| 448 |
+
- A deep insight about emotional chaos
|
| 449 |
+
- Completely insane
|
| 450 |
+
- The basis for the next breakthrough
|
| 451 |
+
|
| 452 |
+
All three simultaneously.
|
| 453 |
+
"""
|
| 454 |
+
|
| 455 |
+
def __init__(
|
| 456 |
+
self,
|
| 457 |
+
sigma: float = 10.0,
|
| 458 |
+
rho: float = 28.0,
|
| 459 |
+
beta: float = 8.0 / 3.0,
|
| 460 |
+
):
|
| 461 |
+
self.sigma = sigma # How fast emotions spread
|
| 462 |
+
self.rho = rho # Emotional intensity threshold
|
| 463 |
+
self.beta = beta # Emotional decay rate
|
| 464 |
+
|
| 465 |
+
# State: (arousal, valence, intensity)
|
| 466 |
+
self.x = 1.0
|
| 467 |
+
self.y = 1.0
|
| 468 |
+
self.z = 1.0
|
| 469 |
+
|
| 470 |
+
def step(self, dt: float = 0.01) -> Tuple[float, float, float]:
|
| 471 |
+
"""
|
| 472 |
+
Advance the emotional attractor by dt.
|
| 473 |
+
|
| 474 |
+
Returns (arousal, valence, intensity) normalized to 0-1.
|
| 475 |
+
"""
|
| 476 |
+
# Lorenz equations
|
| 477 |
+
dx = self.sigma * (self.y - self.x)
|
| 478 |
+
dy = self.x * (self.rho - self.z) - self.y
|
| 479 |
+
dz = self.x * self.y - self.beta * self.z
|
| 480 |
+
|
| 481 |
+
self.x += dx * dt
|
| 482 |
+
self.y += dy * dt
|
| 483 |
+
self.z += dz * dt
|
| 484 |
+
|
| 485 |
+
# Normalize to 0-1 range (Lorenz attractor typically in [-30, 30])
|
| 486 |
+
arousal = (self.x + 30) / 60
|
| 487 |
+
valence = (self.y + 30) / 60
|
| 488 |
+
intensity = self.z / 50
|
| 489 |
+
|
| 490 |
+
return (
|
| 491 |
+
float(np.clip(arousal, 0, 1)),
|
| 492 |
+
float(np.clip(valence, 0, 1)),
|
| 493 |
+
float(np.clip(intensity, 0, 1)),
|
| 494 |
+
)
|
| 495 |
+
|
| 496 |
+
def perturb(self, resonances: np.ndarray) -> None:
|
| 497 |
+
"""
|
| 498 |
+
Perturb the attractor with emotional input.
|
| 499 |
+
|
| 500 |
+
Input resonances push the system in specific directions.
|
| 501 |
+
"""
|
| 502 |
+
# Use resonance magnitude to perturb
|
| 503 |
+
magnitude = np.sum(resonances)
|
| 504 |
+
|
| 505 |
+
# Random direction influenced by resonances
|
| 506 |
+
self.x += (resonances[0] if len(resonances) > 0 else 0) * 0.1
|
| 507 |
+
self.y += (resonances[1] if len(resonances) > 1 else 0) * 0.1
|
| 508 |
+
self.z += magnitude * 0.05
|
| 509 |
+
|
| 510 |
+
def trajectory(self, steps: int = 100, dt: float = 0.01) -> List[Tuple[float, float, float]]:
|
| 511 |
+
"""Generate trajectory through emotional phase space."""
|
| 512 |
+
return [self.step(dt) for _ in range(steps)]
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
# ============================================================
|
| 516 |
+
# DEMO
|
| 517 |
+
# ============================================================
|
| 518 |
+
|
| 519 |
+
if __name__ == "__main__":
|
| 520 |
+
print("=" * 60)
|
| 521 |
+
print(" RESONANCE DREAMS — Experimental Ideas")
|
| 522 |
+
print("=" * 60)
|
| 523 |
+
print()
|
| 524 |
+
print(" WARNING: These ideas are EXPERIMENTAL.")
|
| 525 |
+
print(" They might be genius. They might be broken.")
|
| 526 |
+
print(" They are definitely weird.")
|
| 527 |
+
print()
|
| 528 |
+
|
| 529 |
+
# Test harmonics
|
| 530 |
+
print("=" * 60)
|
| 531 |
+
print(" IDEA #1: Emotion Harmonics")
|
| 532 |
+
print("=" * 60)
|
| 533 |
+
|
| 534 |
+
fake_resonances = np.random.rand(100) * 0.3
|
| 535 |
+
fake_resonances[5] = 0.8 # Primary
|
| 536 |
+
fake_resonances[10] = 0.4 # Harmonic at 1/2
|
| 537 |
+
fake_resonances[15] = 0.27 # Harmonic at 1/3
|
| 538 |
+
|
| 539 |
+
fake_anchors = [f"emotion_{i}" for i in range(100)]
|
| 540 |
+
fake_anchors[5] = "fear"
|
| 541 |
+
fake_anchors[10] = "anxiety"
|
| 542 |
+
fake_anchors[15] = "paranoia"
|
| 543 |
+
|
| 544 |
+
chord = compute_emotional_chord(fake_resonances, fake_anchors)
|
| 545 |
+
print(f" Emotional chord: {chord}")
|
| 546 |
+
print()
|
| 547 |
+
|
| 548 |
+
# Test phase space
|
| 549 |
+
print("=" * 60)
|
| 550 |
+
print(" IDEA #3: Emotion Phase Space")
|
| 551 |
+
print("=" * 60)
|
| 552 |
+
|
| 553 |
+
phase_space = EmotionPhaseSpace()
|
| 554 |
+
for i in range(5):
|
| 555 |
+
state = np.random.rand(100) * (0.5 + i * 0.1)
|
| 556 |
+
phase_space.update(state)
|
| 557 |
+
|
| 558 |
+
print(f" Trajectory type: {phase_space.trajectory_type()}")
|
| 559 |
+
vel = phase_space.velocity()
|
| 560 |
+
if vel is not None:
|
| 561 |
+
print(f" Velocity magnitude: {np.linalg.norm(vel):.4f}")
|
| 562 |
+
print()
|
| 563 |
+
|
| 564 |
+
# Test quantum
|
| 565 |
+
print("=" * 60)
|
| 566 |
+
print(" IDEA #5: Quantum Emotion")
|
| 567 |
+
print("=" * 60)
|
| 568 |
+
|
| 569 |
+
qe = QuantumEmotion.from_resonances(fake_resonances)
|
| 570 |
+
print(f" Uncertainty: {qe.uncertainty():.3f}")
|
| 571 |
+
collapsed = qe.collapse(seed=42)
|
| 572 |
+
print(f" Collapsed to: {fake_anchors[collapsed]}")
|
| 573 |
+
print()
|
| 574 |
+
|
| 575 |
+
# Test Lorenz
|
| 576 |
+
print("=" * 60)
|
| 577 |
+
print(" IDEA #6: Emotional Strange Attractor")
|
| 578 |
+
print("=" * 60)
|
| 579 |
+
|
| 580 |
+
lorenz = EmotionalLorenzSystem()
|
| 581 |
+
lorenz.perturb(fake_resonances[:3])
|
| 582 |
+
|
| 583 |
+
trajectory = lorenz.trajectory(steps=10)
|
| 584 |
+
print(" Trajectory (arousal, valence, intensity):")
|
| 585 |
+
for i, (a, v, intensity) in enumerate(trajectory[:5]):
|
| 586 |
+
print(f" t={i}: ({a:.2f}, {v:.2f}, {intensity:.2f})")
|
| 587 |
+
print()
|
| 588 |
+
|
| 589 |
+
print("=" * 60)
|
| 590 |
+
print(" Dreams are just unvalidated hypotheses.")
|
| 591 |
+
print(" Some become reality. Some remain dreams.")
|
| 592 |
+
print(" All are worth exploring.")
|
| 593 |
+
print("=" * 60)
|
cloud/rrpram_cloud.py
ADDED
|
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# rrpram.py — Recursive Resonant Pattern Recognition Attention Mechanism Tokenizer
|
| 3 |
+
#
|
| 4 |
+
# SentencePiece-based tokenization for haze.
|
| 5 |
+
# Captures n-grams, subwords, and resonant patterns directly in the vocabulary.
|
| 6 |
+
#
|
| 7 |
+
# Why "rrpram"? Because the tokenizer IS the first layer of pattern recognition.
|
| 8 |
+
# Before attention even runs, we're already finding patterns.
|
| 9 |
+
#
|
| 10 |
+
# Usage:
|
| 11 |
+
# from haze.rrpram import RRPRAMVocab
|
| 12 |
+
# vocab = RRPRAMVocab.train("text.txt", vocab_size=1000)
|
| 13 |
+
# tokens = vocab.encode("the haze settles")
|
| 14 |
+
# text = vocab.decode(tokens)
|
| 15 |
+
|
| 16 |
+
from __future__ import annotations
|
| 17 |
+
import os
|
| 18 |
+
import tempfile
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
from typing import List, Optional, Union
|
| 21 |
+
from dataclasses import dataclass
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
import sentencepiece as spm
|
| 25 |
+
HAS_SENTENCEPIECE = True
|
| 26 |
+
except ImportError:
|
| 27 |
+
HAS_SENTENCEPIECE = False
|
| 28 |
+
print("[rrpram] sentencepiece not found. Install it: pip install sentencepiece")
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@dataclass
|
| 32 |
+
class RRPRAMVocab:
|
| 33 |
+
"""
|
| 34 |
+
RRPRAM Vocabulary: SentencePiece-based tokenizer for haze.
|
| 35 |
+
|
| 36 |
+
Uses BPE or Unigram model to capture:
|
| 37 |
+
- Frequent n-grams as single tokens
|
| 38 |
+
- Subword patterns (morphology)
|
| 39 |
+
- Resonant character sequences
|
| 40 |
+
|
| 41 |
+
This is the first layer of pattern recognition—before attention,
|
| 42 |
+
we're already finding structure in the text.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
model_path: str
|
| 46 |
+
sp: "spm.SentencePieceProcessor"
|
| 47 |
+
vocab_size: int
|
| 48 |
+
|
| 49 |
+
@classmethod
|
| 50 |
+
def train(
|
| 51 |
+
cls,
|
| 52 |
+
corpus_path: Union[str, Path],
|
| 53 |
+
vocab_size: int = 1000,
|
| 54 |
+
model_type: str = "bpe", # "bpe", "unigram", "char", "word"
|
| 55 |
+
model_prefix: Optional[str] = None,
|
| 56 |
+
character_coverage: float = 1.0,
|
| 57 |
+
max_sentence_length: int = 4192,
|
| 58 |
+
user_defined_symbols: Optional[List[str]] = None,
|
| 59 |
+
) -> "RRPRAMVocab":
|
| 60 |
+
"""
|
| 61 |
+
Train a new SentencePiece model on corpus.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
corpus_path: path to training text file
|
| 65 |
+
vocab_size: target vocabulary size
|
| 66 |
+
model_type: "bpe" (byte-pair), "unigram", "char", or "word"
|
| 67 |
+
model_prefix: output model file prefix (default: temp file)
|
| 68 |
+
character_coverage: fraction of characters to cover (1.0 = all)
|
| 69 |
+
max_sentence_length: max chars per training sentence
|
| 70 |
+
user_defined_symbols: custom symbols to include
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
trained RRPRAMVocab instance
|
| 74 |
+
"""
|
| 75 |
+
if not HAS_SENTENCEPIECE:
|
| 76 |
+
raise ImportError("sentencepiece required. Install: pip install sentencepiece")
|
| 77 |
+
|
| 78 |
+
corpus_path = Path(corpus_path)
|
| 79 |
+
if not corpus_path.exists():
|
| 80 |
+
raise FileNotFoundError(f"Corpus not found: {corpus_path}")
|
| 81 |
+
|
| 82 |
+
# determine model output path
|
| 83 |
+
if model_prefix is None:
|
| 84 |
+
# create temp directory for model files
|
| 85 |
+
tmp_dir = tempfile.mkdtemp(prefix="rrpram_")
|
| 86 |
+
model_prefix = os.path.join(tmp_dir, "rrpram")
|
| 87 |
+
|
| 88 |
+
# build training command
|
| 89 |
+
train_args = [
|
| 90 |
+
f"--input={corpus_path}",
|
| 91 |
+
f"--model_prefix={model_prefix}",
|
| 92 |
+
f"--vocab_size={vocab_size}",
|
| 93 |
+
f"--model_type={model_type}",
|
| 94 |
+
f"--character_coverage={character_coverage}",
|
| 95 |
+
f"--max_sentence_length={max_sentence_length}",
|
| 96 |
+
"--pad_id=0",
|
| 97 |
+
"--unk_id=1",
|
| 98 |
+
"--bos_id=2",
|
| 99 |
+
"--eos_id=3",
|
| 100 |
+
"--normalization_rule_name=identity", # preserve case and chars
|
| 101 |
+
]
|
| 102 |
+
|
| 103 |
+
if user_defined_symbols:
|
| 104 |
+
train_args.append(f"--user_defined_symbols={','.join(user_defined_symbols)}")
|
| 105 |
+
|
| 106 |
+
# train
|
| 107 |
+
print(f"[rrpram] training {model_type} model on {corpus_path}")
|
| 108 |
+
print(f"[rrpram] vocab_size={vocab_size}, coverage={character_coverage}")
|
| 109 |
+
spm.SentencePieceTrainer.Train(" ".join(train_args))
|
| 110 |
+
|
| 111 |
+
model_path = f"{model_prefix}.model"
|
| 112 |
+
print(f"[rrpram] model saved to {model_path}")
|
| 113 |
+
|
| 114 |
+
# load trained model
|
| 115 |
+
sp = spm.SentencePieceProcessor()
|
| 116 |
+
sp.Load(model_path)
|
| 117 |
+
|
| 118 |
+
return cls(
|
| 119 |
+
model_path=model_path,
|
| 120 |
+
sp=sp,
|
| 121 |
+
vocab_size=sp.GetPieceSize(),
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
@classmethod
|
| 125 |
+
def load(cls, model_path: Union[str, Path]) -> "RRPRAMVocab":
|
| 126 |
+
"""Load a pre-trained SentencePiece model."""
|
| 127 |
+
if not HAS_SENTENCEPIECE:
|
| 128 |
+
raise ImportError("sentencepiece required. Install: pip install sentencepiece")
|
| 129 |
+
|
| 130 |
+
model_path = str(model_path)
|
| 131 |
+
sp = spm.SentencePieceProcessor()
|
| 132 |
+
sp.Load(model_path)
|
| 133 |
+
|
| 134 |
+
return cls(
|
| 135 |
+
model_path=model_path,
|
| 136 |
+
sp=sp,
|
| 137 |
+
vocab_size=sp.GetPieceSize(),
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
def encode(self, text: str) -> List[int]:
|
| 141 |
+
"""Encode text to token IDs."""
|
| 142 |
+
return self.sp.EncodeAsIds(text)
|
| 143 |
+
|
| 144 |
+
def decode(self, ids: List[int]) -> str:
|
| 145 |
+
"""Decode token IDs to text."""
|
| 146 |
+
return self.sp.DecodeIds(ids)
|
| 147 |
+
|
| 148 |
+
def encode_pieces(self, text: str) -> List[str]:
|
| 149 |
+
"""Encode text to subword pieces (for visualization)."""
|
| 150 |
+
return self.sp.EncodeAsPieces(text)
|
| 151 |
+
|
| 152 |
+
def decode_pieces(self, pieces: List[str]) -> str:
|
| 153 |
+
"""Decode subword pieces to text."""
|
| 154 |
+
return self.sp.DecodePieces(pieces)
|
| 155 |
+
|
| 156 |
+
def get_piece(self, id: int) -> str:
|
| 157 |
+
"""Get the piece (token) for a given ID."""
|
| 158 |
+
return self.sp.IdToPiece(id)
|
| 159 |
+
|
| 160 |
+
def get_id(self, piece: str) -> int:
|
| 161 |
+
"""Get the ID for a given piece (token)."""
|
| 162 |
+
return self.sp.PieceToId(piece)
|
| 163 |
+
|
| 164 |
+
def __len__(self) -> int:
|
| 165 |
+
return self.vocab_size
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def analyze_vocab(vocab: RRPRAMVocab, top_n: int = 50) -> None:
|
| 169 |
+
"""
|
| 170 |
+
Analyze and display vocabulary statistics.
|
| 171 |
+
|
| 172 |
+
Shows the most common tokens (patterns) learned by the tokenizer.
|
| 173 |
+
These are the "resonant patterns" that appear frequently in the corpus.
|
| 174 |
+
"""
|
| 175 |
+
print("=" * 60)
|
| 176 |
+
print(" RRPRAM Vocabulary Analysis")
|
| 177 |
+
print("=" * 60)
|
| 178 |
+
print(f" vocab size: {vocab.vocab_size}")
|
| 179 |
+
print()
|
| 180 |
+
|
| 181 |
+
print(f" Top {top_n} tokens (resonant patterns):")
|
| 182 |
+
print("-" * 40)
|
| 183 |
+
|
| 184 |
+
for i in range(min(top_n, vocab.vocab_size)):
|
| 185 |
+
piece = vocab.get_piece(i)
|
| 186 |
+
# visualize special chars
|
| 187 |
+
display = piece.replace("▁", "_").replace("\n", "\\n")
|
| 188 |
+
print(f" {i:4d}: '{display}'")
|
| 189 |
+
|
| 190 |
+
print()
|
| 191 |
+
print("=" * 60)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def demo_tokenization(vocab: RRPRAMVocab, texts: List[str]) -> None:
|
| 195 |
+
"""
|
| 196 |
+
Demo tokenization on sample texts.
|
| 197 |
+
|
| 198 |
+
Shows how the RRPRAM tokenizer breaks down text into patterns.
|
| 199 |
+
"""
|
| 200 |
+
print("=" * 60)
|
| 201 |
+
print(" RRPRAM Tokenization Demo")
|
| 202 |
+
print("=" * 60)
|
| 203 |
+
|
| 204 |
+
for text in texts:
|
| 205 |
+
print(f"\n input: \"{text}\"")
|
| 206 |
+
ids = vocab.encode(text)
|
| 207 |
+
pieces = vocab.encode_pieces(text)
|
| 208 |
+
|
| 209 |
+
print(f" ids: {ids}")
|
| 210 |
+
print(f" pieces: {pieces}")
|
| 211 |
+
print(f" tokens: {len(ids)}")
|
| 212 |
+
|
| 213 |
+
# show reconstruction
|
| 214 |
+
reconstructed = vocab.decode(ids)
|
| 215 |
+
print(f" decoded: \"{reconstructed}\"")
|
| 216 |
+
|
| 217 |
+
print()
|
| 218 |
+
print("=" * 60)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
if __name__ == "__main__":
|
| 222 |
+
import sys
|
| 223 |
+
|
| 224 |
+
print("=" * 60)
|
| 225 |
+
print(" rrpram.py — RRPRAM Tokenizer")
|
| 226 |
+
print("=" * 60)
|
| 227 |
+
print()
|
| 228 |
+
|
| 229 |
+
# check if corpus exists
|
| 230 |
+
corpus_path = Path("text.txt")
|
| 231 |
+
if not corpus_path.exists():
|
| 232 |
+
print("[error] text.txt not found")
|
| 233 |
+
print()
|
| 234 |
+
print("Usage:")
|
| 235 |
+
print(" python rrpram.py # train on text.txt")
|
| 236 |
+
print(" python rrpram.py corpus.txt # train on custom corpus")
|
| 237 |
+
sys.exit(1)
|
| 238 |
+
|
| 239 |
+
if len(sys.argv) > 1:
|
| 240 |
+
corpus_path = Path(sys.argv[1])
|
| 241 |
+
|
| 242 |
+
print(f"[rrpram] corpus: {corpus_path}")
|
| 243 |
+
|
| 244 |
+
# train tokenizer
|
| 245 |
+
vocab = RRPRAMVocab.train(
|
| 246 |
+
corpus_path,
|
| 247 |
+
vocab_size=500,
|
| 248 |
+
model_type="bpe",
|
| 249 |
+
character_coverage=1.0,
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
# analyze
|
| 253 |
+
analyze_vocab(vocab, top_n=30)
|
| 254 |
+
|
| 255 |
+
# demo
|
| 256 |
+
demo_texts = [
|
| 257 |
+
"the haze settles",
|
| 258 |
+
"darling",
|
| 259 |
+
"I love you",
|
| 260 |
+
"What's the toast?",
|
| 261 |
+
]
|
| 262 |
+
demo_tokenization(vocab, demo_texts)
|
| 263 |
+
|
| 264 |
+
print()
|
| 265 |
+
print("[rrpram] done. patterns recognized. resonance achieved.")
|
cloud/user_cloud.py
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# user_cloud.py — Temporal Emotional Fingerprint
|
| 3 |
+
#
|
| 4 |
+
# Tracks user's emotional history with exponential decay.
|
| 5 |
+
# Recent emotions matter more (24h half-life).
|
| 6 |
+
#
|
| 7 |
+
# The "user cloud" is a 100D vector where each dimension
|
| 8 |
+
# represents cumulative exposure to that emotion anchor.
|
| 9 |
+
#
|
| 10 |
+
# Decay formula:
|
| 11 |
+
# weight(t) = exp(-t / tau)
|
| 12 |
+
# where tau = 24 hours, t = time since event
|
| 13 |
+
|
| 14 |
+
from __future__ import annotations
|
| 15 |
+
import asyncio
|
| 16 |
+
import numpy as np
|
| 17 |
+
import time
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
from typing import List, Dict, Optional
|
| 20 |
+
from dataclasses import dataclass, field
|
| 21 |
+
import json
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@dataclass
|
| 25 |
+
class EmotionEvent:
|
| 26 |
+
"""Single emotion event in user history."""
|
| 27 |
+
timestamp: float # Unix timestamp
|
| 28 |
+
primary_idx: int # Index of primary emotion (0-99)
|
| 29 |
+
secondary_idx: int # Index of secondary emotion (0-99)
|
| 30 |
+
weight: float = 1.0 # Event weight (default 1.0)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@dataclass
|
| 34 |
+
class UserCloud:
|
| 35 |
+
"""
|
| 36 |
+
Temporal emotional fingerprint with exponential decay.
|
| 37 |
+
|
| 38 |
+
Maintains:
|
| 39 |
+
- History of emotion events
|
| 40 |
+
- Decayed fingerprint (100D vector)
|
| 41 |
+
- Decay half-life (default 24 hours)
|
| 42 |
+
|
| 43 |
+
The fingerprint is recomputed on-the-fly with decay applied.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
events: List[EmotionEvent] = field(default_factory=list)
|
| 47 |
+
half_life_hours: float = 24.0 # 24h half-life
|
| 48 |
+
max_history: int = 1000 # Keep last N events
|
| 49 |
+
|
| 50 |
+
@property
|
| 51 |
+
def tau(self) -> float:
|
| 52 |
+
"""Decay constant (in seconds)."""
|
| 53 |
+
return self.half_life_hours * 3600 / np.log(2)
|
| 54 |
+
|
| 55 |
+
def add_event(
|
| 56 |
+
self,
|
| 57 |
+
primary_idx: int,
|
| 58 |
+
secondary_idx: int,
|
| 59 |
+
weight: float = 1.0,
|
| 60 |
+
timestamp: Optional[float] = None,
|
| 61 |
+
) -> None:
|
| 62 |
+
"""
|
| 63 |
+
Add an emotion event to history.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
primary_idx: primary emotion index (0-99)
|
| 67 |
+
secondary_idx: secondary emotion index (0-99)
|
| 68 |
+
weight: event importance (default 1.0)
|
| 69 |
+
timestamp: Unix timestamp (default: now)
|
| 70 |
+
"""
|
| 71 |
+
if timestamp is None:
|
| 72 |
+
timestamp = time.time()
|
| 73 |
+
|
| 74 |
+
event = EmotionEvent(
|
| 75 |
+
timestamp=timestamp,
|
| 76 |
+
primary_idx=primary_idx,
|
| 77 |
+
secondary_idx=secondary_idx,
|
| 78 |
+
weight=weight,
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
self.events.append(event)
|
| 82 |
+
|
| 83 |
+
# Prune old events if history too long
|
| 84 |
+
if len(self.events) > self.max_history:
|
| 85 |
+
self.events = self.events[-self.max_history:]
|
| 86 |
+
|
| 87 |
+
def get_fingerprint(self, current_time: Optional[float] = None) -> np.ndarray:
|
| 88 |
+
"""
|
| 89 |
+
Compute current emotional fingerprint with temporal decay.
|
| 90 |
+
|
| 91 |
+
Returns:
|
| 92 |
+
(100,) vector of decayed emotion exposures
|
| 93 |
+
"""
|
| 94 |
+
if current_time is None:
|
| 95 |
+
current_time = time.time()
|
| 96 |
+
|
| 97 |
+
fingerprint = np.zeros(100, dtype=np.float32)
|
| 98 |
+
|
| 99 |
+
for event in self.events:
|
| 100 |
+
# Time since event (in seconds)
|
| 101 |
+
dt = current_time - event.timestamp
|
| 102 |
+
|
| 103 |
+
# Exponential decay: exp(-dt / tau)
|
| 104 |
+
decay = np.exp(-dt / self.tau)
|
| 105 |
+
|
| 106 |
+
# Add decayed weight to fingerprint
|
| 107 |
+
fingerprint[event.primary_idx] += event.weight * decay * 0.7
|
| 108 |
+
fingerprint[event.secondary_idx] += event.weight * decay * 0.3
|
| 109 |
+
|
| 110 |
+
# Normalize to [0, 1] range
|
| 111 |
+
if fingerprint.max() > 0:
|
| 112 |
+
fingerprint = fingerprint / fingerprint.max()
|
| 113 |
+
|
| 114 |
+
return fingerprint
|
| 115 |
+
|
| 116 |
+
def get_recent_emotions(
|
| 117 |
+
self,
|
| 118 |
+
hours: float = 24.0,
|
| 119 |
+
current_time: Optional[float] = None,
|
| 120 |
+
) -> List[EmotionEvent]:
|
| 121 |
+
"""Get events from last N hours."""
|
| 122 |
+
if current_time is None:
|
| 123 |
+
current_time = time.time()
|
| 124 |
+
|
| 125 |
+
cutoff = current_time - (hours * 3600)
|
| 126 |
+
return [e for e in self.events if e.timestamp >= cutoff]
|
| 127 |
+
|
| 128 |
+
def get_dominant_emotions(
|
| 129 |
+
self,
|
| 130 |
+
top_k: int = 5,
|
| 131 |
+
current_time: Optional[float] = None,
|
| 132 |
+
) -> List[tuple]:
|
| 133 |
+
"""
|
| 134 |
+
Get top-k dominant emotions from fingerprint.
|
| 135 |
+
|
| 136 |
+
Returns:
|
| 137 |
+
List of (emotion_idx, strength) tuples
|
| 138 |
+
"""
|
| 139 |
+
fingerprint = self.get_fingerprint(current_time)
|
| 140 |
+
top_indices = np.argsort(fingerprint)[-top_k:][::-1]
|
| 141 |
+
return [(int(idx), float(fingerprint[idx])) for idx in top_indices]
|
| 142 |
+
|
| 143 |
+
def save(self, path: Path) -> None:
|
| 144 |
+
"""Save user cloud to JSON file."""
|
| 145 |
+
data = {
|
| 146 |
+
"events": [
|
| 147 |
+
{
|
| 148 |
+
"timestamp": e.timestamp,
|
| 149 |
+
"primary_idx": e.primary_idx,
|
| 150 |
+
"secondary_idx": e.secondary_idx,
|
| 151 |
+
"weight": e.weight,
|
| 152 |
+
}
|
| 153 |
+
for e in self.events
|
| 154 |
+
],
|
| 155 |
+
"half_life_hours": self.half_life_hours,
|
| 156 |
+
"max_history": self.max_history,
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
with open(path, "w") as f:
|
| 160 |
+
json.dump(data, f, indent=2)
|
| 161 |
+
|
| 162 |
+
print(f"[user_cloud] saved {len(self.events)} events to {path}")
|
| 163 |
+
|
| 164 |
+
@classmethod
|
| 165 |
+
def load(cls, path: Path) -> "UserCloud":
|
| 166 |
+
"""Load user cloud from JSON file."""
|
| 167 |
+
with open(path, "r") as f:
|
| 168 |
+
data = json.load(f)
|
| 169 |
+
|
| 170 |
+
events = [
|
| 171 |
+
EmotionEvent(
|
| 172 |
+
timestamp=e["timestamp"],
|
| 173 |
+
primary_idx=e["primary_idx"],
|
| 174 |
+
secondary_idx=e["secondary_idx"],
|
| 175 |
+
weight=e.get("weight", 1.0),
|
| 176 |
+
)
|
| 177 |
+
for e in data["events"]
|
| 178 |
+
]
|
| 179 |
+
|
| 180 |
+
cloud = cls(
|
| 181 |
+
events=events,
|
| 182 |
+
half_life_hours=data.get("half_life_hours", 24.0),
|
| 183 |
+
max_history=data.get("max_history", 1000),
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
print(f"[user_cloud] loaded {len(events)} events from {path}")
|
| 187 |
+
return cloud
|
| 188 |
+
|
| 189 |
+
def stats(self) -> Dict:
|
| 190 |
+
"""Return statistics about user cloud."""
|
| 191 |
+
current_time = time.time()
|
| 192 |
+
fingerprint = self.get_fingerprint(current_time)
|
| 193 |
+
|
| 194 |
+
recent_24h = len(self.get_recent_emotions(24.0, current_time))
|
| 195 |
+
recent_7d = len(self.get_recent_emotions(24.0 * 7, current_time))
|
| 196 |
+
|
| 197 |
+
return {
|
| 198 |
+
"total_events": len(self.events),
|
| 199 |
+
"events_24h": recent_24h,
|
| 200 |
+
"events_7d": recent_7d,
|
| 201 |
+
"fingerprint_max": float(fingerprint.max()),
|
| 202 |
+
"fingerprint_mean": float(fingerprint.mean()),
|
| 203 |
+
"fingerprint_nonzero": int((fingerprint > 0).sum()),
|
| 204 |
+
"half_life_hours": self.half_life_hours,
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
class AsyncUserCloud:
|
| 209 |
+
"""
|
| 210 |
+
Async wrapper for UserCloud with field lock discipline.
|
| 211 |
+
|
| 212 |
+
Based on HAZE's async pattern - achieves coherence through
|
| 213 |
+
explicit operation ordering and atomicity.
|
| 214 |
+
|
| 215 |
+
"The asyncio.Lock doesn't add information—it adds discipline."
|
| 216 |
+
"""
|
| 217 |
+
|
| 218 |
+
def __init__(self, cloud: UserCloud):
|
| 219 |
+
self._sync = cloud
|
| 220 |
+
self._lock = asyncio.Lock()
|
| 221 |
+
|
| 222 |
+
@classmethod
|
| 223 |
+
def create(cls, half_life_hours: float = 24.0) -> "AsyncUserCloud":
|
| 224 |
+
"""Create new async user cloud."""
|
| 225 |
+
cloud = UserCloud(half_life_hours=half_life_hours)
|
| 226 |
+
return cls(cloud)
|
| 227 |
+
|
| 228 |
+
@classmethod
|
| 229 |
+
def load(cls, path: Path) -> "AsyncUserCloud":
|
| 230 |
+
"""Load from file."""
|
| 231 |
+
cloud = UserCloud.load(path)
|
| 232 |
+
return cls(cloud)
|
| 233 |
+
|
| 234 |
+
async def add_event(
|
| 235 |
+
self,
|
| 236 |
+
primary_idx: int,
|
| 237 |
+
secondary_idx: int,
|
| 238 |
+
weight: float = 1.0,
|
| 239 |
+
timestamp: Optional[float] = None,
|
| 240 |
+
) -> None:
|
| 241 |
+
"""Add event with lock protection."""
|
| 242 |
+
async with self._lock:
|
| 243 |
+
self._sync.add_event(primary_idx, secondary_idx, weight, timestamp)
|
| 244 |
+
|
| 245 |
+
async def get_fingerprint(self, current_time: Optional[float] = None) -> np.ndarray:
|
| 246 |
+
"""Get fingerprint (read-only, but lock for consistency)."""
|
| 247 |
+
async with self._lock:
|
| 248 |
+
return self._sync.get_fingerprint(current_time)
|
| 249 |
+
|
| 250 |
+
async def get_dominant_emotions(
|
| 251 |
+
self,
|
| 252 |
+
top_k: int = 5,
|
| 253 |
+
current_time: Optional[float] = None,
|
| 254 |
+
) -> List[tuple]:
|
| 255 |
+
"""Get dominant emotions."""
|
| 256 |
+
async with self._lock:
|
| 257 |
+
return self._sync.get_dominant_emotions(top_k, current_time)
|
| 258 |
+
|
| 259 |
+
async def save(self, path: Path) -> None:
|
| 260 |
+
"""Save with lock protection."""
|
| 261 |
+
async with self._lock:
|
| 262 |
+
self._sync.save(path)
|
| 263 |
+
|
| 264 |
+
async def stats(self) -> Dict:
|
| 265 |
+
"""Get stats."""
|
| 266 |
+
async with self._lock:
|
| 267 |
+
return self._sync.stats()
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
if __name__ == "__main__":
|
| 271 |
+
from .anchors import get_all_anchors
|
| 272 |
+
|
| 273 |
+
print("=" * 60)
|
| 274 |
+
print(" CLOUD v3.0 — User Cloud (Temporal Fingerprint)")
|
| 275 |
+
print("=" * 60)
|
| 276 |
+
print()
|
| 277 |
+
|
| 278 |
+
# Initialize empty cloud
|
| 279 |
+
cloud = UserCloud(half_life_hours=24.0)
|
| 280 |
+
print(f"Initialized user cloud (half-life={cloud.half_life_hours}h)")
|
| 281 |
+
print()
|
| 282 |
+
|
| 283 |
+
# Simulate emotion events over time
|
| 284 |
+
print("Simulating emotion events:")
|
| 285 |
+
current_time = time.time()
|
| 286 |
+
|
| 287 |
+
# Add events at different times
|
| 288 |
+
events_to_add = [
|
| 289 |
+
(0, 5, -48), # FEAR event 48h ago
|
| 290 |
+
(20, 22, -24), # LOVE event 24h ago
|
| 291 |
+
(38, 40, -12), # RAGE event 12h ago
|
| 292 |
+
(55, 58, -6), # VOID event 6h ago
|
| 293 |
+
(70, 72, -1), # FLOW event 1h ago
|
| 294 |
+
]
|
| 295 |
+
|
| 296 |
+
anchors = get_all_anchors()
|
| 297 |
+
|
| 298 |
+
for primary, secondary, hours_ago in events_to_add:
|
| 299 |
+
timestamp = current_time + (hours_ago * 3600)
|
| 300 |
+
cloud.add_event(primary, secondary, timestamp=timestamp)
|
| 301 |
+
print(f" {hours_ago:+3d}h: {anchors[primary]} + {anchors[secondary]}")
|
| 302 |
+
print()
|
| 303 |
+
|
| 304 |
+
# Get fingerprint
|
| 305 |
+
print("Current emotional fingerprint:")
|
| 306 |
+
fingerprint = cloud.get_fingerprint(current_time)
|
| 307 |
+
print(f" Shape: {fingerprint.shape}")
|
| 308 |
+
print(f" Max: {fingerprint.max():.3f}")
|
| 309 |
+
print(f" Mean: {fingerprint.mean():.3f}")
|
| 310 |
+
print(f" Nonzero: {(fingerprint > 0).sum()}/100")
|
| 311 |
+
print()
|
| 312 |
+
|
| 313 |
+
# Show dominant emotions
|
| 314 |
+
print("Top 5 dominant emotions:")
|
| 315 |
+
for idx, strength in cloud.get_dominant_emotions(5, current_time):
|
| 316 |
+
bar = "█" * int(strength * 40)
|
| 317 |
+
print(f" {anchors[idx]:15s}: {strength:.3f} {bar}")
|
| 318 |
+
print()
|
| 319 |
+
|
| 320 |
+
# Show decay effect
|
| 321 |
+
print("Decay effect over time:")
|
| 322 |
+
for hours in [1, 6, 12, 24, 48, 72]:
|
| 323 |
+
past_time = current_time - (hours * 3600)
|
| 324 |
+
fp = cloud.get_fingerprint(past_time)
|
| 325 |
+
print(f" {hours:3d}h ago: max={fp.max():.3f}, nonzero={int((fp > 0).sum())}")
|
| 326 |
+
print()
|
| 327 |
+
|
| 328 |
+
# Test save/load
|
| 329 |
+
print("Testing save/load:")
|
| 330 |
+
path = Path("./cloud_data.json")
|
| 331 |
+
cloud.save(path)
|
| 332 |
+
|
| 333 |
+
cloud2 = UserCloud.load(path)
|
| 334 |
+
fp2 = cloud2.get_fingerprint(current_time)
|
| 335 |
+
|
| 336 |
+
match = np.allclose(fingerprint, fp2)
|
| 337 |
+
print(f" Save/load {'✓' if match else '✗'}")
|
| 338 |
+
print()
|
| 339 |
+
|
| 340 |
+
# Stats
|
| 341 |
+
print("User cloud statistics:")
|
| 342 |
+
for k, v in cloud.stats().items():
|
| 343 |
+
print(f" {k}: {v}")
|
| 344 |
+
print()
|
| 345 |
+
|
| 346 |
+
print("=" * 60)
|
| 347 |
+
print(" Temporal fingerprint operational. Memory with decay.")
|
| 348 |
+
print("=" * 60)
|