Spaces:
Paused
Paused
| """ | |
| MEGAMIND Hotel Mind — A federated knowledge mind running on Hugging Face Spaces. | |
| Hotel Mind has its own NumPy W_know matrix and can: | |
| 1. Learn from text (encode into W_know via Hebbian learning) | |
| 2. Think locally (activate W_know, retrieve chunks) | |
| 3. Query the MEGAMIND federation via the Gateway for cross-mind intelligence | |
| Architecture: | |
| Hotel Mind (HF Spaces) <--HTTP--> Gateway (Cloudflare Tunnel) <--NATS--> All other minds | |
| """ | |
| import gradio as gr | |
| import numpy as np | |
| import json | |
| import os | |
| import hashlib | |
| import time | |
| import urllib.request | |
| import urllib.error | |
| from pathlib import Path | |
| # ============================================================================ | |
| # W_know Matrix — Hebbian Knowledge Storage | |
| # ============================================================================ | |
| NEURONS = 512 | |
| DATA_DIR = "data" | |
| W_KNOW_PATH = os.path.join(DATA_DIR, "w_know.npy") | |
| CHUNKS_PATH = os.path.join(DATA_DIR, "chunks.json") | |
| SPECIALTY = "general-knowledge" | |
| NODE_NAME = "hotel" | |
| # Gateway URL for federated queries (set via env or default) | |
| GATEWAY_URL = os.environ.get("GATEWAY_URL", "") | |
| class HotelMind: | |
| def __init__(self): | |
| os.makedirs(DATA_DIR, exist_ok=True) | |
| self.neurons = NEURONS | |
| self.w_know = self._load_or_init_wknow() | |
| self.chunks = self._load_chunks() | |
| self.pattern_count = len(self.chunks) | |
| def _load_or_init_wknow(self): | |
| if os.path.exists(W_KNOW_PATH): | |
| w = np.load(W_KNOW_PATH) | |
| print(f"Loaded W_know: {w.shape}") | |
| return w | |
| w = np.zeros((self.neurons, self.neurons), dtype=np.float32) | |
| print(f"Initialized fresh W_know: {self.neurons}x{self.neurons}") | |
| return w | |
| def _save_wknow(self): | |
| np.save(W_KNOW_PATH, self.w_know) | |
| def _load_chunks(self): | |
| if os.path.exists(CHUNKS_PATH): | |
| with open(CHUNKS_PATH, "r") as f: | |
| return json.load(f) | |
| return [] | |
| def _save_chunks(self): | |
| with open(CHUNKS_PATH, "w") as f: | |
| json.dump(self.chunks, f) | |
| def _text_to_vector(self, text): | |
| """Encode text into a neuron-sized vector using hash-based projection.""" | |
| vec = np.zeros(self.neurons, dtype=np.float32) | |
| words = text.lower().split() | |
| for i, word in enumerate(words): | |
| h = int(hashlib.md5(word.encode()).hexdigest(), 16) | |
| idx = h % self.neurons | |
| weight = 1.0 / (1.0 + i * 0.1) # position decay | |
| vec[idx] += weight | |
| # bigram features | |
| if i > 0: | |
| bigram = words[i-1] + "_" + word | |
| h2 = int(hashlib.md5(bigram.encode()).hexdigest(), 16) | |
| vec[h2 % self.neurons] += weight * 0.5 | |
| # Normalize | |
| norm = np.linalg.norm(vec) | |
| if norm > 0: | |
| vec /= norm | |
| return vec | |
| def learn(self, text, source=""): | |
| """Hebbian learning: strengthen connections between co-activated neurons.""" | |
| vec = self._text_to_vector(text) | |
| # Hebbian update: dW = eta * (v @ v^T) | |
| eta = 0.01 | |
| outer = np.outer(vec, vec) | |
| self.w_know += eta * outer | |
| # Clip to prevent unbounded growth | |
| np.clip(self.w_know, -10.0, 10.0, out=self.w_know) | |
| # Store chunk | |
| chunk = { | |
| "text": text[:500], | |
| "source": source, | |
| "neuron_idx": int(np.argmax(vec)), | |
| "timestamp": time.time(), | |
| } | |
| self.chunks.append(chunk) | |
| self.pattern_count = len(self.chunks) | |
| # Save periodically | |
| if self.pattern_count % 10 == 0: | |
| self._save_wknow() | |
| self._save_chunks() | |
| return self.pattern_count | |
| def think(self, query): | |
| """Activate W_know with query, find matching chunks.""" | |
| vec = self._text_to_vector(query) | |
| # Propagate through W_know with tanh activation (20 steps) | |
| state = vec.copy() | |
| phi_history = [] | |
| for step in range(20): | |
| new_state = np.tanh(self.w_know @ state) | |
| # Phi = integrated information (norm of difference) | |
| phi = float(np.linalg.norm(new_state - state)) | |
| phi_history.append(phi) | |
| state = new_state | |
| final_phi = phi_history[-1] if phi_history else 0.0 | |
| # Find top-20 activated neurons | |
| top_neurons = np.argsort(np.abs(state))[-20:][::-1] | |
| # Retrieve matching chunks | |
| matched = [] | |
| keywords = set(query.lower().split()) | |
| for chunk in self.chunks: | |
| chunk_words = set(chunk["text"].lower().split()) | |
| overlap = len(keywords & chunk_words) | |
| if overlap > 0 or chunk["neuron_idx"] in top_neurons: | |
| score = overlap * 0.1 + (1.0 if chunk["neuron_idx"] in top_neurons else 0.0) | |
| matched.append((score, chunk)) | |
| matched.sort(key=lambda x: -x[0]) | |
| matched = matched[:10] | |
| return { | |
| "phi": final_phi, | |
| "fired_neurons": len(top_neurons), | |
| "chunks": [ | |
| {"text": c["text"], "source": c["source"], "score": s} | |
| for s, c in matched | |
| ], | |
| "phi_history": phi_history, | |
| } | |
| def federated_think(self, query): | |
| """Query the MEGAMIND federation via the Gateway.""" | |
| if not GATEWAY_URL: | |
| local = self.think(query) | |
| return { | |
| "query": query, | |
| "total_brains": 1, | |
| "responded": 1, | |
| "local_result": local, | |
| "federation": "not configured — set GATEWAY_URL", | |
| } | |
| # Query federation gateway | |
| try: | |
| data = json.dumps({"query": query}).encode() | |
| req = urllib.request.Request( | |
| f"{GATEWAY_URL}/think", | |
| data=data, | |
| headers={"Content-Type": "application/json"}, | |
| ) | |
| with urllib.request.urlopen(req, timeout=10) as resp: | |
| fed_result = json.loads(resp.read().decode()) | |
| except Exception as e: | |
| fed_result = {"error": str(e)} | |
| # Also run local think | |
| local = self.think(query) | |
| return { | |
| "query": query, | |
| "local": { | |
| "phi": local["phi"], | |
| "chunks": local["chunks"][:5], | |
| }, | |
| "federation": fed_result, | |
| } | |
| def get_stats(self): | |
| density = np.count_nonzero(self.w_know) / (self.neurons * self.neurons) * 100 | |
| return { | |
| "node_name": NODE_NAME, | |
| "specialty": SPECIALTY, | |
| "neurons": self.neurons, | |
| "patterns": self.pattern_count, | |
| "w_know_density": f"{density:.2f}%", | |
| "gateway_url": GATEWAY_URL or "not set", | |
| } | |
| # ============================================================================ | |
| # Gradio Interface | |
| # ============================================================================ | |
| mind = HotelMind() | |
| def think_handler(query, federated): | |
| """Handle think queries from the Gradio UI.""" | |
| if not query.strip(): | |
| return "Please enter a query." | |
| if federated and GATEWAY_URL: | |
| result = mind.federated_think(query) | |
| else: | |
| result = mind.think(query) | |
| return json.dumps(result, indent=2, default=str) | |
| def learn_handler(text, source): | |
| """Handle learning new knowledge.""" | |
| if not text.strip(): | |
| return "Please enter text to learn." | |
| count = mind.learn(text, source) | |
| return f"Learned! Total patterns: {count}" | |
| def batch_learn_handler(file): | |
| """Learn from uploaded text file (one chunk per line or JSON array).""" | |
| if file is None: | |
| return "Please upload a file." | |
| content = file.decode("utf-8") if isinstance(file, bytes) else open(file.name, "r").read() | |
| # Try JSON array first | |
| try: | |
| items = json.loads(content) | |
| if isinstance(items, list): | |
| for item in items: | |
| if isinstance(item, str): | |
| mind.learn(item) | |
| elif isinstance(item, dict): | |
| mind.learn(item.get("text", ""), item.get("source", "")) | |
| mind._save_wknow() | |
| mind._save_chunks() | |
| return f"Learned {len(items)} items. Total patterns: {mind.pattern_count}" | |
| except json.JSONDecodeError: | |
| pass | |
| # Fall back to line-by-line | |
| lines = [l.strip() for l in content.split("\n") if l.strip()] | |
| for line in lines: | |
| mind.learn(line) | |
| mind._save_wknow() | |
| mind._save_chunks() | |
| return f"Learned {len(lines)} lines. Total patterns: {mind.pattern_count}" | |
| def status_handler(): | |
| stats = mind.get_stats() | |
| return json.dumps(stats, indent=2) | |
| # Build the Gradio interface | |
| with gr.Blocks(title="MEGAMIND Hotel Mind", theme=gr.themes.Soft()) as app: | |
| gr.Markdown(""" | |
| # MEGAMIND Hotel Mind | |
| **A federated knowledge mind in the MEGAMIND distributed intelligence network.** | |
| Hotel Mind has its own W_know neural matrix and can think locally or query | |
| the entire federation (7+ minds across multiple machines) for cross-domain intelligence. | |
| """) | |
| with gr.Tab("Think"): | |
| query_input = gr.Textbox( | |
| label="Query", | |
| placeholder="Ask anything... e.g., 'How do transformers work in deep learning?'", | |
| lines=2, | |
| ) | |
| federated_check = gr.Checkbox( | |
| label="Federated (query all minds)", | |
| value=True, | |
| ) | |
| think_btn = gr.Button("Think", variant="primary") | |
| think_output = gr.Code(label="Result", language="json") | |
| think_btn.click(think_handler, [query_input, federated_check], think_output) | |
| with gr.Tab("Learn"): | |
| gr.Markdown("Teach Hotel Mind new knowledge by entering text or uploading a file.") | |
| learn_text = gr.Textbox( | |
| label="Knowledge Text", | |
| placeholder="Enter knowledge to learn...", | |
| lines=4, | |
| ) | |
| learn_source = gr.Textbox( | |
| label="Source URL (optional)", | |
| placeholder="https://...", | |
| ) | |
| learn_btn = gr.Button("Learn", variant="primary") | |
| learn_output = gr.Textbox(label="Result") | |
| learn_btn.click(learn_handler, [learn_text, learn_source], learn_output) | |
| gr.Markdown("---") | |
| gr.Markdown("### Batch Learn from File") | |
| file_input = gr.File(label="Upload .txt or .json file") | |
| batch_btn = gr.Button("Batch Learn") | |
| batch_output = gr.Textbox(label="Result") | |
| batch_btn.click(batch_learn_handler, [file_input], batch_output) | |
| with gr.Tab("Status"): | |
| status_btn = gr.Button("Refresh Status") | |
| status_output = gr.Code(label="Mind Status", language="json") | |
| status_btn.click(status_handler, [], status_output) | |
| gr.Markdown(""" | |
| --- | |
| *Part of the [MEGAMIND](https://github.com) distributed AGI federation. | |
| 7+ minds across 3+ machines, connected via NATS messaging over Tailscale mesh.* | |
| """) | |
| if __name__ == "__main__": | |
| print(f"Hotel Mind starting — {mind.neurons} neurons, {mind.pattern_count} patterns") | |
| print(f"Gateway URL: {GATEWAY_URL or 'not configured'}") | |
| app.launch(server_name="0.0.0.0", server_port=7860) | |