|
|
|
|
|
""" |
|
|
Dimensional Emergent Node Entanglement Matrix Database |
|
|
======================================================= |
|
|
Creates sophisticated training data using holographic emergence principles |
|
|
from luimennua.md for LLM training. |
|
|
|
|
|
This system: |
|
|
1. Creates dimensional nodes with quantum-inspired states |
|
|
2. Establishes entanglement matrices between nodes |
|
|
3. Generates emergent training data from node interactions |
|
|
4. Stores in database for LLM fine-tuning |
|
|
|
|
|
Based on: Vibrational Lattice & Holographic Infinity from luimennua.md |
|
|
|
|
|
Author: Assistant |
|
|
License: MIT |
|
|
""" |
|
|
|
|
|
import numpy as np |
|
|
import sqlite3 |
|
|
import json |
|
|
import hashlib |
|
|
from typing import Dict, List, Optional, Any, Tuple |
|
|
from dataclasses import dataclass, asdict |
|
|
from datetime import datetime |
|
|
import pickle |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass |
|
|
class DimensionalNode: |
|
|
""" |
|
|
A node in the dimensional entanglement matrix. |
|
|
|
|
|
Each node represents a concept/token/embedding with: |
|
|
- Quantum state (complex vector) |
|
|
- Spatial position (emergent geometry) |
|
|
- Vibrational phase (temporal dynamics) |
|
|
- Entanglement links to other nodes |
|
|
""" |
|
|
node_id: str |
|
|
quantum_state: np.ndarray |
|
|
position: np.ndarray |
|
|
phase: float |
|
|
dimension: int |
|
|
metadata: Dict[str, Any] |
|
|
created_at: str |
|
|
|
|
|
def to_dict(self) -> Dict: |
|
|
"""Convert to dictionary for database storage.""" |
|
|
return { |
|
|
'node_id': self.node_id, |
|
|
'quantum_state': pickle.dumps(self.quantum_state), |
|
|
'position': pickle.dumps(self.position), |
|
|
'phase': self.phase, |
|
|
'dimension': self.dimension, |
|
|
'metadata': json.dumps(self.metadata), |
|
|
'created_at': self.created_at |
|
|
} |
|
|
|
|
|
@classmethod |
|
|
def from_dict(cls, data: Dict) -> 'DimensionalNode': |
|
|
"""Reconstruct from database.""" |
|
|
return cls( |
|
|
node_id=data['node_id'], |
|
|
quantum_state=pickle.loads(data['quantum_state']), |
|
|
position=pickle.loads(data['position']), |
|
|
phase=data['phase'], |
|
|
dimension=data['dimension'], |
|
|
metadata=json.loads(data['metadata']), |
|
|
created_at=data['created_at'] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class EntanglementMatrix: |
|
|
""" |
|
|
Matrix of entanglement coefficients between dimensional nodes. |
|
|
|
|
|
Based on: |Ο_Οβ© β |Ο_Ο'β© from luimennua.md |
|
|
|
|
|
Ξ¦[i,j] = β¨Ο_i|Ο_jβ© = entanglement strength between nodes i and j |
|
|
""" |
|
|
|
|
|
def __init__(self, nodes: List[DimensionalNode]): |
|
|
self.nodes = nodes |
|
|
self.matrix = self._compute_entanglement_matrix() |
|
|
|
|
|
def _compute_entanglement_matrix(self) -> np.ndarray: |
|
|
""" |
|
|
Compute full entanglement matrix. |
|
|
|
|
|
Ξ¦[i,j] = |β¨Ο_i|Ο_jβ©|Β² (quantum overlap) |
|
|
""" |
|
|
n = len(self.nodes) |
|
|
matrix = np.zeros((n, n), dtype=complex) |
|
|
|
|
|
for i, node_i in enumerate(self.nodes): |
|
|
for j, node_j in enumerate(self.nodes): |
|
|
|
|
|
min_len = min(len(node_i.quantum_state), len(node_j.quantum_state)) |
|
|
overlap = np.vdot( |
|
|
node_i.quantum_state[:min_len], |
|
|
node_j.quantum_state[:min_len] |
|
|
) |
|
|
|
|
|
|
|
|
spatial_dist = np.linalg.norm(node_i.position - node_j.position) |
|
|
spatial_factor = np.exp(-spatial_dist / 10.0) |
|
|
|
|
|
|
|
|
phase_diff = abs(node_i.phase - node_j.phase) |
|
|
phase_factor = np.cos(phase_diff) |
|
|
|
|
|
|
|
|
matrix[i, j] = overlap * spatial_factor * phase_factor |
|
|
|
|
|
return matrix |
|
|
|
|
|
def get_entangled_nodes(self, node_idx: int, threshold: float = 0.5) -> List[Tuple[int, float]]: |
|
|
""" |
|
|
Get nodes strongly entangled with given node. |
|
|
|
|
|
Returns: List of (node_index, entanglement_strength) |
|
|
""" |
|
|
entanglements = [] |
|
|
for j in range(len(self.nodes)): |
|
|
if j != node_idx: |
|
|
strength = abs(self.matrix[node_idx, j]) |
|
|
if strength > threshold: |
|
|
entanglements.append((j, float(strength))) |
|
|
|
|
|
return sorted(entanglements, key=lambda x: x[1], reverse=True) |
|
|
|
|
|
def compute_emergent_pattern(self, node_indices: List[int]) -> np.ndarray: |
|
|
""" |
|
|
Compute emergent pattern from multiple entangled nodes. |
|
|
|
|
|
Pattern = Ξ£_i w_i |Ο_iβ© where w_i are entanglement weights |
|
|
""" |
|
|
if not node_indices: |
|
|
return np.zeros(64, dtype=complex) |
|
|
|
|
|
|
|
|
submatrix = self.matrix[np.ix_(node_indices, node_indices)] |
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
submatrix = (submatrix + submatrix.conj().T) / 2 |
|
|
submatrix += np.eye(len(submatrix)) * 1e-6 |
|
|
eigenvalues, eigenvectors = np.linalg.eigh(submatrix) |
|
|
weights = eigenvectors[:, -1] |
|
|
except np.linalg.LinAlgError: |
|
|
|
|
|
weights = np.ones(len(node_indices)) / len(node_indices) |
|
|
|
|
|
|
|
|
pattern = np.zeros(64, dtype=complex) |
|
|
for idx, node_idx in enumerate(node_indices): |
|
|
state = self.nodes[node_idx].quantum_state |
|
|
min_len = min(len(state), len(pattern)) |
|
|
pattern[:min_len] += weights[idx] * state[:min_len] |
|
|
|
|
|
|
|
|
pattern /= (np.linalg.norm(pattern) + 1e-8) |
|
|
|
|
|
return pattern |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class DimensionalDatabase: |
|
|
""" |
|
|
Database for dimensional nodes and entanglement matrices. |
|
|
|
|
|
Stores: |
|
|
- Dimensional nodes (concepts/tokens/embeddings) |
|
|
- Entanglement matrices |
|
|
- Generated training data |
|
|
- Emergence patterns |
|
|
""" |
|
|
|
|
|
def __init__(self, db_path: str = "dimensional_entanglement.db"): |
|
|
self.db_path = db_path |
|
|
self.conn = sqlite3.connect(db_path) |
|
|
self.conn.row_factory = sqlite3.Row |
|
|
self._create_tables() |
|
|
|
|
|
def _create_tables(self): |
|
|
"""Create database schema.""" |
|
|
cursor = self.conn.cursor() |
|
|
|
|
|
|
|
|
cursor.execute(""" |
|
|
CREATE TABLE IF NOT EXISTS dimensional_nodes ( |
|
|
node_id TEXT PRIMARY KEY, |
|
|
quantum_state BLOB, |
|
|
position BLOB, |
|
|
phase REAL, |
|
|
dimension INTEGER, |
|
|
metadata TEXT, |
|
|
created_at TEXT |
|
|
) |
|
|
""") |
|
|
|
|
|
|
|
|
cursor.execute(""" |
|
|
CREATE TABLE IF NOT EXISTS entanglements ( |
|
|
id INTEGER PRIMARY KEY AUTOINCREMENT, |
|
|
node_i TEXT, |
|
|
node_j TEXT, |
|
|
strength REAL, |
|
|
phase_coherence REAL, |
|
|
spatial_proximity REAL, |
|
|
created_at TEXT, |
|
|
FOREIGN KEY (node_i) REFERENCES dimensional_nodes(node_id), |
|
|
FOREIGN KEY (node_j) REFERENCES dimensional_nodes(node_id) |
|
|
) |
|
|
""") |
|
|
|
|
|
|
|
|
cursor.execute(""" |
|
|
CREATE TABLE IF NOT EXISTS training_data ( |
|
|
id INTEGER PRIMARY KEY AUTOINCREMENT, |
|
|
data_id TEXT UNIQUE, |
|
|
prompt TEXT, |
|
|
completion TEXT, |
|
|
source_nodes TEXT, -- JSON list of node IDs |
|
|
entanglement_pattern BLOB, |
|
|
emergence_score REAL, |
|
|
dimension_signature TEXT, |
|
|
metadata TEXT, |
|
|
created_at TEXT |
|
|
) |
|
|
""") |
|
|
|
|
|
|
|
|
cursor.execute(""" |
|
|
CREATE TABLE IF NOT EXISTS emergence_patterns ( |
|
|
id INTEGER PRIMARY KEY AUTOINCREMENT, |
|
|
pattern_id TEXT UNIQUE, |
|
|
pattern_vector BLOB, |
|
|
contributing_nodes TEXT, -- JSON list |
|
|
emergence_metric REAL, |
|
|
holographic_signature TEXT, |
|
|
created_at TEXT |
|
|
) |
|
|
""") |
|
|
|
|
|
|
|
|
cursor.execute("CREATE INDEX IF NOT EXISTS idx_dimension ON dimensional_nodes(dimension)") |
|
|
cursor.execute("CREATE INDEX IF NOT EXISTS idx_entanglement_strength ON entanglements(strength)") |
|
|
cursor.execute("CREATE INDEX IF NOT EXISTS idx_emergence_score ON training_data(emergence_score)") |
|
|
|
|
|
self.conn.commit() |
|
|
|
|
|
def add_node(self, node: DimensionalNode): |
|
|
"""Add a dimensional node to the database.""" |
|
|
cursor = self.conn.cursor() |
|
|
node_dict = node.to_dict() |
|
|
|
|
|
cursor.execute(""" |
|
|
INSERT OR REPLACE INTO dimensional_nodes |
|
|
(node_id, quantum_state, position, phase, dimension, metadata, created_at) |
|
|
VALUES (?, ?, ?, ?, ?, ?, ?) |
|
|
""", ( |
|
|
node_dict['node_id'], |
|
|
node_dict['quantum_state'], |
|
|
node_dict['position'], |
|
|
node_dict['phase'], |
|
|
node_dict['dimension'], |
|
|
node_dict['metadata'], |
|
|
node_dict['created_at'] |
|
|
)) |
|
|
|
|
|
self.conn.commit() |
|
|
|
|
|
def get_nodes_by_dimension(self, dimension: int) -> List[DimensionalNode]: |
|
|
"""Retrieve all nodes in a specific dimension.""" |
|
|
cursor = self.conn.cursor() |
|
|
cursor.execute(""" |
|
|
SELECT * FROM dimensional_nodes WHERE dimension = ? |
|
|
""", (dimension,)) |
|
|
|
|
|
nodes = [] |
|
|
for row in cursor.fetchall(): |
|
|
nodes.append(DimensionalNode.from_dict(dict(row))) |
|
|
|
|
|
return nodes |
|
|
|
|
|
def add_entanglement(self, node_i: str, node_j: str, strength: float, |
|
|
phase_coherence: float, spatial_proximity: float): |
|
|
"""Record entanglement between two nodes.""" |
|
|
cursor = self.conn.cursor() |
|
|
cursor.execute(""" |
|
|
INSERT INTO entanglements |
|
|
(node_i, node_j, strength, phase_coherence, spatial_proximity, created_at) |
|
|
VALUES (?, ?, ?, ?, ?, ?) |
|
|
""", (node_i, node_j, strength, phase_coherence, spatial_proximity, |
|
|
datetime.now().isoformat())) |
|
|
|
|
|
self.conn.commit() |
|
|
|
|
|
def add_training_data(self, prompt: str, completion: str, source_nodes: List[str], |
|
|
entanglement_pattern: np.ndarray, emergence_score: float, |
|
|
dimension_signature: str, metadata: Dict = None): |
|
|
"""Add generated training data.""" |
|
|
cursor = self.conn.cursor() |
|
|
|
|
|
data_id = hashlib.sha256( |
|
|
f"{prompt}{completion}".encode() |
|
|
).hexdigest()[:16] |
|
|
|
|
|
cursor.execute(""" |
|
|
INSERT OR REPLACE INTO training_data |
|
|
(data_id, prompt, completion, source_nodes, entanglement_pattern, |
|
|
emergence_score, dimension_signature, metadata, created_at) |
|
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) |
|
|
""", ( |
|
|
data_id, |
|
|
prompt, |
|
|
completion, |
|
|
json.dumps(source_nodes), |
|
|
pickle.dumps(entanglement_pattern), |
|
|
emergence_score, |
|
|
dimension_signature, |
|
|
json.dumps(metadata or {}), |
|
|
datetime.now().isoformat() |
|
|
)) |
|
|
|
|
|
self.conn.commit() |
|
|
return data_id |
|
|
|
|
|
def get_training_data(self, min_emergence_score: float = 0.5, |
|
|
limit: int = 1000) -> List[Dict]: |
|
|
"""Retrieve high-quality training data.""" |
|
|
cursor = self.conn.cursor() |
|
|
cursor.execute(""" |
|
|
SELECT * FROM training_data |
|
|
WHERE emergence_score >= ? |
|
|
ORDER BY emergence_score DESC |
|
|
LIMIT ? |
|
|
""", (min_emergence_score, limit)) |
|
|
|
|
|
return [dict(row) for row in cursor.fetchall()] |
|
|
|
|
|
def export_training_jsonl(self, output_path: str, min_emergence_score: float = 0.5): |
|
|
"""Export training data in JSONL format for LLM fine-tuning.""" |
|
|
data = self.get_training_data(min_emergence_score=min_emergence_score) |
|
|
|
|
|
with open(output_path, 'w') as f: |
|
|
for item in data: |
|
|
training_example = { |
|
|
'prompt': item['prompt'], |
|
|
'completion': item['completion'], |
|
|
'metadata': { |
|
|
'emergence_score': item['emergence_score'], |
|
|
'dimension_signature': item['dimension_signature'], |
|
|
'source_nodes': json.loads(item['source_nodes']), |
|
|
'data_id': item['data_id'] |
|
|
} |
|
|
} |
|
|
f.write(json.dumps(training_example) + '\n') |
|
|
|
|
|
print(f"β Exported {len(data)} training examples to {output_path}") |
|
|
|
|
|
def close(self): |
|
|
"""Close database connection.""" |
|
|
self.conn.close() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TrainingDataGenerator: |
|
|
""" |
|
|
Generate sophisticated training data from dimensional entanglement matrices. |
|
|
|
|
|
Uses emergent patterns from entangled nodes to create: |
|
|
- Question-answer pairs |
|
|
- Reasoning chains |
|
|
- Multi-hop inference examples |
|
|
- Conceptual analogies |
|
|
""" |
|
|
|
|
|
def __init__(self, database: DimensionalDatabase): |
|
|
self.db = database |
|
|
|
|
|
def generate_from_entangled_cluster(self, |
|
|
nodes: List[DimensionalNode], |
|
|
cluster_theme: str = "general") -> Dict: |
|
|
""" |
|
|
Generate training data from a cluster of entangled nodes. |
|
|
|
|
|
Args: |
|
|
nodes: List of entangled dimensional nodes |
|
|
cluster_theme: Semantic theme of the cluster |
|
|
|
|
|
Returns: |
|
|
Training example dictionary |
|
|
""" |
|
|
if len(nodes) < 2: |
|
|
return None |
|
|
|
|
|
|
|
|
matrix = EntanglementMatrix(nodes) |
|
|
|
|
|
|
|
|
node_indices = list(range(len(nodes))) |
|
|
pattern = matrix.compute_emergent_pattern(node_indices) |
|
|
|
|
|
|
|
|
emergence_score = self._calculate_emergence_score(matrix, nodes) |
|
|
|
|
|
|
|
|
prompt = self._generate_prompt_from_nodes(nodes, cluster_theme) |
|
|
|
|
|
|
|
|
completion = self._generate_completion_from_pattern(pattern, nodes, cluster_theme) |
|
|
|
|
|
|
|
|
dimensions = sorted(set(node.dimension for node in nodes)) |
|
|
dimension_signature = f"D{'-'.join(map(str, dimensions))}" |
|
|
|
|
|
return { |
|
|
'prompt': prompt, |
|
|
'completion': completion, |
|
|
'source_nodes': [node.node_id for node in nodes], |
|
|
'entanglement_pattern': pattern, |
|
|
'emergence_score': emergence_score, |
|
|
'dimension_signature': dimension_signature, |
|
|
'metadata': { |
|
|
'cluster_theme': cluster_theme, |
|
|
'num_nodes': len(nodes), |
|
|
'avg_entanglement': float(np.mean(np.abs(matrix.matrix))) |
|
|
} |
|
|
} |
|
|
|
|
|
def _calculate_emergence_score(self, matrix: EntanglementMatrix, |
|
|
nodes: List[DimensionalNode]) -> float: |
|
|
""" |
|
|
Calculate how emergent/sophisticated this training example is. |
|
|
|
|
|
Higher scores = more complex entanglement patterns |
|
|
""" |
|
|
|
|
|
entanglement_values = np.abs(matrix.matrix[np.triu_indices_from(matrix.matrix, k=1)]) |
|
|
diversity = np.std(entanglement_values) if len(entanglement_values) > 0 else 0.0 |
|
|
|
|
|
|
|
|
dimensions = set(node.dimension for node in nodes) |
|
|
dimensional_score = len(dimensions) / 10.0 |
|
|
|
|
|
|
|
|
phases = [node.phase for node in nodes] |
|
|
phase_coherence = 1.0 - np.std(phases) / (2 * np.pi) |
|
|
|
|
|
|
|
|
positions = np.array([node.position for node in nodes]) |
|
|
spatial_spread = np.std(positions) if len(positions) > 1 else 0.0 |
|
|
|
|
|
|
|
|
score = ( |
|
|
0.3 * diversity + |
|
|
0.3 * dimensional_score + |
|
|
0.2 * phase_coherence + |
|
|
0.2 * min(spatial_spread / 10.0, 1.0) |
|
|
) |
|
|
|
|
|
return float(np.clip(score, 0.0, 1.0)) |
|
|
|
|
|
def _generate_prompt_from_nodes(self, nodes: List[DimensionalNode], |
|
|
theme: str) -> str: |
|
|
"""Generate prompt from node metadata.""" |
|
|
|
|
|
concepts = [] |
|
|
for node in nodes[:5]: |
|
|
if 'concept' in node.metadata: |
|
|
concepts.append(node.metadata['concept']) |
|
|
elif 'token' in node.metadata: |
|
|
concepts.append(node.metadata['token']) |
|
|
|
|
|
if not concepts: |
|
|
concepts = [f"concept_{i}" for i in range(min(3, len(nodes)))] |
|
|
|
|
|
|
|
|
prompts = [ |
|
|
f"Explain the relationship between {concepts[0]} and {concepts[1] if len(concepts) > 1 else 'related concepts'}.", |
|
|
f"How does {concepts[0]} emerge from the interaction of multiple dimensions?", |
|
|
f"Describe the entanglement between {', '.join(concepts[:3])}.", |
|
|
f"What patterns emerge when considering {concepts[0]} in the context of {theme}?", |
|
|
] |
|
|
|
|
|
return np.random.choice(prompts) |
|
|
|
|
|
def _generate_completion_from_pattern(self, pattern: np.ndarray, |
|
|
nodes: List[DimensionalNode], |
|
|
theme: str) -> str: |
|
|
"""Generate completion using emergent pattern.""" |
|
|
|
|
|
pattern_real = np.abs(pattern[:len(nodes)]) |
|
|
pattern_real /= (np.sum(pattern_real) + 1e-8) |
|
|
|
|
|
|
|
|
weighted_concepts = [] |
|
|
for i, node in enumerate(nodes[:len(pattern_real)]): |
|
|
weight = pattern_real[i] |
|
|
concept = node.metadata.get('concept', f'concept_{i}') |
|
|
weighted_concepts.append((concept, weight)) |
|
|
|
|
|
|
|
|
weighted_concepts.sort(key=lambda x: x[1], reverse=True) |
|
|
|
|
|
|
|
|
top_concepts = [c for c, w in weighted_concepts[:3]] |
|
|
|
|
|
completion = f"The emergent pattern reveals that {top_concepts[0]} " |
|
|
completion += f"is fundamentally connected to {top_concepts[1] if len(top_concepts) > 1 else 'the system'}. " |
|
|
completion += f"Through dimensional entanglement, we observe that " |
|
|
completion += f"these concepts form a holographic structure where each part contains information about the whole. " |
|
|
completion += f"The phase coherence across dimensions suggests a deep symmetry in how {theme} manifests." |
|
|
|
|
|
return completion |
|
|
|
|
|
def generate_batch(self, num_examples: int = 100, |
|
|
dimensions: Optional[List[int]] = None) -> List[Dict]: |
|
|
""" |
|
|
Generate a batch of training examples. |
|
|
|
|
|
Args: |
|
|
num_examples: Number of examples to generate |
|
|
dimensions: Which dimensions to sample from (None = all) |
|
|
|
|
|
Returns: |
|
|
List of training examples |
|
|
""" |
|
|
examples = [] |
|
|
|
|
|
|
|
|
if dimensions is None: |
|
|
dimensions = list(range(10)) |
|
|
|
|
|
all_nodes = [] |
|
|
for dim in dimensions: |
|
|
all_nodes.extend(self.db.get_nodes_by_dimension(dim)) |
|
|
|
|
|
if len(all_nodes) < 2: |
|
|
print("β Not enough nodes in database. Generate nodes first.") |
|
|
return [] |
|
|
|
|
|
print(f"π Generating {num_examples} training examples from {len(all_nodes)} nodes...") |
|
|
|
|
|
for i in range(num_examples): |
|
|
|
|
|
cluster_size = np.random.randint(2, min(8, len(all_nodes) + 1)) |
|
|
cluster = np.random.choice(all_nodes, size=cluster_size, replace=False) |
|
|
|
|
|
|
|
|
example = self.generate_from_entangled_cluster( |
|
|
list(cluster), |
|
|
cluster_theme=f"theme_{i % 10}" |
|
|
) |
|
|
|
|
|
if example and example['emergence_score'] > 0.3: |
|
|
examples.append(example) |
|
|
|
|
|
|
|
|
self.db.add_training_data(**example) |
|
|
|
|
|
if (i + 1) % 20 == 0: |
|
|
print(f" Generated {i + 1}/{num_examples}...") |
|
|
|
|
|
print(f"β Generated {len(examples)} high-quality examples") |
|
|
return examples |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class DimensionalNodeFactory: |
|
|
""" |
|
|
Factory for creating dimensional nodes from: |
|
|
- Text tokens |
|
|
- Embeddings |
|
|
- Concepts |
|
|
- Random initialization |
|
|
""" |
|
|
|
|
|
@staticmethod |
|
|
def create_from_text(text: str, dimension: int = 0) -> DimensionalNode: |
|
|
"""Create node from text string.""" |
|
|
|
|
|
text_hash = hashlib.sha256(text.encode()).digest() |
|
|
state_real = np.frombuffer(text_hash[:32], dtype=np.uint8).astype(np.float64) / 255.0 |
|
|
state_imag = np.frombuffer(text_hash[32:64] if len(text_hash) >= 64 else text_hash[:32], |
|
|
dtype=np.uint8).astype(np.float64) / 255.0 |
|
|
|
|
|
|
|
|
if len(state_real) < 64: |
|
|
state_real = np.pad(state_real, (0, 64 - len(state_real))) |
|
|
state_imag = np.pad(state_imag, (0, 64 - len(state_imag))) |
|
|
|
|
|
quantum_state = (state_real[:64] + 1j * state_imag[:64]) |
|
|
norm = np.linalg.norm(quantum_state) |
|
|
if norm > 1e-10: |
|
|
quantum_state /= norm |
|
|
else: |
|
|
quantum_state = np.ones(64, dtype=complex) / np.sqrt(64) |
|
|
|
|
|
|
|
|
position = np.array([ |
|
|
float(text_hash[0]) / 255.0, |
|
|
float(text_hash[1]) / 255.0, |
|
|
float(text_hash[2]) / 255.0 |
|
|
]) * 10.0 |
|
|
|
|
|
|
|
|
phase = (float(text_hash[3]) / 255.0) * 2 * np.pi |
|
|
|
|
|
node_id = f"node_{hashlib.md5(text.encode()).hexdigest()[:12]}" |
|
|
|
|
|
return DimensionalNode( |
|
|
node_id=node_id, |
|
|
quantum_state=quantum_state, |
|
|
position=position, |
|
|
phase=phase, |
|
|
dimension=dimension, |
|
|
metadata={'concept': text, 'source': 'text'}, |
|
|
created_at=datetime.now().isoformat() |
|
|
) |
|
|
|
|
|
@staticmethod |
|
|
def create_from_embedding(embedding: np.ndarray, concept: str, |
|
|
dimension: int = 0) -> DimensionalNode: |
|
|
"""Create node from embedding vector.""" |
|
|
|
|
|
if len(embedding) < 64: |
|
|
quantum_state = np.zeros(64, dtype=complex) |
|
|
quantum_state[:len(embedding)] = embedding |
|
|
else: |
|
|
quantum_state = embedding[:64].astype(complex) |
|
|
|
|
|
quantum_state /= (np.linalg.norm(quantum_state) + 1e-8) |
|
|
|
|
|
|
|
|
position = np.array([ |
|
|
np.mean(embedding[::3]), |
|
|
np.mean(embedding[1::3]), |
|
|
np.mean(embedding[2::3]) |
|
|
]) |
|
|
|
|
|
|
|
|
phase = (np.var(embedding) % 1.0) * 2 * np.pi |
|
|
|
|
|
node_id = f"node_{hashlib.md5(concept.encode()).hexdigest()[:12]}" |
|
|
|
|
|
return DimensionalNode( |
|
|
node_id=node_id, |
|
|
quantum_state=quantum_state, |
|
|
position=position, |
|
|
phase=phase, |
|
|
dimension=dimension, |
|
|
metadata={'concept': concept, 'source': 'embedding'}, |
|
|
created_at=datetime.now().isoformat() |
|
|
) |
|
|
|
|
|
@staticmethod |
|
|
def create_random(dimension: int = 0, concept: str = None) -> DimensionalNode: |
|
|
"""Create random node for testing.""" |
|
|
quantum_state = np.random.randn(64) + 1j * np.random.randn(64) |
|
|
quantum_state /= np.linalg.norm(quantum_state) |
|
|
|
|
|
position = np.random.randn(3) * 5.0 |
|
|
phase = np.random.random() * 2 * np.pi |
|
|
|
|
|
node_id = f"node_{hashlib.md5(str(np.random.random()).encode()).hexdigest()[:12]}" |
|
|
|
|
|
return DimensionalNode( |
|
|
node_id=node_id, |
|
|
quantum_state=quantum_state, |
|
|
position=position, |
|
|
phase=phase, |
|
|
dimension=dimension, |
|
|
metadata={'concept': concept or f'random_concept_{dimension}', 'source': 'random'}, |
|
|
created_at=datetime.now().isoformat() |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def demo_dimensional_entanglement_system(): |
|
|
"""Demonstrate the complete system.""" |
|
|
print("=" * 80) |
|
|
print("π Dimensional Emergent Node Entanglement Matrix System") |
|
|
print("=" * 80) |
|
|
|
|
|
|
|
|
print("\nπ Initializing database...") |
|
|
db = DimensionalDatabase("dimensional_entanglement.db") |
|
|
|
|
|
|
|
|
print("\nπ Creating dimensional nodes across 5 dimensions...") |
|
|
concepts = [ |
|
|
|
|
|
("quantum_entanglement", 0), ("wave_function", 0), ("superposition", 0), |
|
|
("decoherence", 0), ("measurement", 0), |
|
|
|
|
|
|
|
|
("topology", 1), ("manifold", 1), ("symmetry", 1), |
|
|
("transformation", 1), ("invariance", 1), |
|
|
|
|
|
|
|
|
("algorithm", 2), ("recursion", 2), ("emergence", 2), |
|
|
("complexity", 2), ("optimization", 2), |
|
|
|
|
|
|
|
|
("evolution", 3), ("adaptation", 3), ("self_organization", 3), |
|
|
("morphogenesis", 3), ("homeostasis", 3), |
|
|
|
|
|
|
|
|
("consciousness", 4), ("qualia", 4), ("intentionality", 4), |
|
|
("emergence", 4), ("reduction", 4), |
|
|
] |
|
|
|
|
|
nodes = [] |
|
|
for concept, dim in concepts: |
|
|
node = DimensionalNodeFactory.create_from_text(concept, dimension=dim) |
|
|
db.add_node(node) |
|
|
nodes.append(node) |
|
|
print(f" β Created node: {concept} (D{dim})") |
|
|
|
|
|
|
|
|
print(f"\nπ Computing entanglement matrix for {len(nodes)} nodes...") |
|
|
matrix = EntanglementMatrix(nodes) |
|
|
|
|
|
|
|
|
print("\nπ« Storing entanglement relationships...") |
|
|
stored_count = 0 |
|
|
for i, node_i in enumerate(nodes): |
|
|
entangled = matrix.get_entangled_nodes(i, threshold=0.3) |
|
|
for j, strength in entangled[:5]: |
|
|
node_j = nodes[j] |
|
|
|
|
|
|
|
|
phase_coherence = np.cos(abs(node_i.phase - node_j.phase)) |
|
|
spatial_proximity = 1.0 / (1.0 + np.linalg.norm(node_i.position - node_j.position)) |
|
|
|
|
|
db.add_entanglement( |
|
|
node_i.node_id, |
|
|
node_j.node_id, |
|
|
strength, |
|
|
float(phase_coherence), |
|
|
float(spatial_proximity) |
|
|
) |
|
|
stored_count += 1 |
|
|
|
|
|
print(f" β Stored {stored_count} entanglement relationships") |
|
|
|
|
|
|
|
|
print("\nπ― Generating sophisticated training data...") |
|
|
generator = TrainingDataGenerator(db) |
|
|
examples = generator.generate_batch(num_examples=50, dimensions=[0, 1, 2, 3, 4]) |
|
|
|
|
|
|
|
|
print("\nπ Sample Training Examples:") |
|
|
print("-" * 80) |
|
|
for i, example in enumerate(examples[:3], 1): |
|
|
print(f"\nExample {i} (Emergence Score: {example['emergence_score']:.3f}):") |
|
|
print(f"Dimension Signature: {example['dimension_signature']}") |
|
|
print(f"Prompt: {example['prompt']}") |
|
|
print(f"Completion: {example['completion'][:200]}...") |
|
|
print(f"Source Nodes: {len(example['source_nodes'])} nodes") |
|
|
|
|
|
|
|
|
print("\nπΎ Exporting training data...") |
|
|
db.export_training_jsonl("training_data_emergent.jsonl", min_emergence_score=0.4) |
|
|
|
|
|
|
|
|
print("\nπ Database Statistics:") |
|
|
print(f" Total Nodes: {len(nodes)}") |
|
|
print(f" Total Entanglements: {stored_count}") |
|
|
print(f" Training Examples Generated: {len(examples)}") |
|
|
print(f" High-Quality Examples (score > 0.5): {sum(1 for e in examples if e['emergence_score'] > 0.5)}") |
|
|
|
|
|
|
|
|
print("\nπ Entanglement Matrix (top connections):") |
|
|
for i in range(min(5, len(nodes))): |
|
|
entangled = matrix.get_entangled_nodes(i, threshold=0.5) |
|
|
if entangled: |
|
|
print(f" {nodes[i].metadata['concept']} ββ ", end="") |
|
|
connections = [f"{nodes[j].metadata['concept']}({s:.2f})" |
|
|
for j, s in entangled[:3]] |
|
|
print(", ".join(connections)) |
|
|
|
|
|
db.close() |
|
|
|
|
|
print("\n" + "=" * 80) |
|
|
print("β¨ System Ready! Training data generated from dimensional entanglement.") |
|
|
print("=" * 80) |
|
|
print("\nπ Files created:") |
|
|
print(" - dimensional_entanglement.db (SQLite database)") |
|
|
print(" - training_data_emergent.jsonl (Training data for LLM)") |
|
|
print("\nπ Next steps:") |
|
|
print(" 1. Review training_data_emergent.jsonl") |
|
|
print(" 2. Fine-tune your LLM with this data") |
|
|
print(" 3. Add more nodes from your domain") |
|
|
print(" 4. Generate more sophisticated examples") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo_dimensional_entanglement_system() |
|
|
|
|
|
|