Harry00's picture
Upload mle/tests.py
240bfd9 verified
"""
Tests et benchmarks du MLE System
Vérifie :
1. Apprendissage avec le temps (réduction de l'énergie)
2. Généralisation (performance sur cas non vus)
3. Cohérence sémantique (clusters plus nets)
4. Performance CPU (temps d'inférence)
"""
import numpy as np
import time
from typing import List, Dict, Tuple
import json
from .mle_system import MLESystem
from .memory import VECTOR_SIZE
def generate_related_vectors(n: int, base_sparsity: float = 0.05,
relatedness: float = 0.7) -> List[np.ndarray]:
"""
Génère des vecteurs liés sémantiquement.
Ils partagent une fraction 'relatedness' de leurs bits actifs.
"""
target_active = int(VECTOR_SIZE * base_sparsity)
n_shared = int(target_active * relatedness)
n_unique = target_active - n_shared
# Base partagée
shared_indices = np.random.choice(VECTOR_SIZE, size=n_shared, replace=False)
vectors = []
for i in range(n):
vec = np.zeros(VECTOR_SIZE, dtype=np.uint8)
vec[shared_indices] = 1
# Bits uniques
remaining = np.setdiff1d(np.arange(VECTOR_SIZE), shared_indices)
unique_indices = np.random.choice(remaining, size=n_unique, replace=False)
vec[unique_indices] = 1
vectors.append(vec)
return vectors
def generate_unrelated_vectors(n: int, base_sparsity: float = 0.05) -> List[np.ndarray]:
"""Génère des vecteurs indépendants."""
target_active = int(VECTOR_SIZE * base_sparsity)
vectors = []
for i in range(n):
indices = np.random.choice(VECTOR_SIZE, size=target_active, replace=False)
vec = np.zeros(VECTOR_SIZE, dtype=np.uint8)
vec[indices] = 1
vectors.append(vec)
return vectors
def generate_query_from_base(base: np.ndarray, noise: float = 0.1) -> np.ndarray:
"""Génère une requête bruitée à partir d'un vecteur de base."""
vec = base.copy()
active = np.where(vec)[0]
n_flip = int(len(active) * noise)
if n_flip > 0:
# Éteint des bits actifs
to_off = np.random.choice(active, size=min(n_flip, len(active)), replace=False)
vec[to_off] = 0
# Allume des bits aléatoires
inactive = np.where(vec == 0)[0]
to_on = np.random.choice(inactive, size=min(n_flip, len(inactive)), replace=False)
vec[to_on] = 1
return vec
class MLEBenchmark:
"""Benchmark complet du système MLE."""
def __init__(self, system: MLESystem):
self.system = system
self.results: Dict[str, List[float]] = {
'phase': [],
'final_energy': [],
'convergence_rate': [],
'memory_size': [],
'n_associations': [],
'avg_inference_time_ms': [],
'semantic_coherence': [],
'generalization_score': [],
}
def run_learning_curve(
self,
n_train: int = 200,
n_test: int = 50,
n_batches: int = 5,
vectors_per_batch: int = 20,
):
"""
Exécute un benchmark d'apprentissage en courbe.
1. Génère des concepts de base
2. Entraîne sur plusieurs batches
3. Teste la généralisation à chaque étape
"""
print("\n" + "="*70)
print("BENCHMARK: Learning Curve & Generalization")
print("="*70)
# Génère les concepts de base (simulant des catégories sémantiques)
n_concepts = 10
concepts = []
for i in range(n_concepts):
# Chaque concept a une base sémantique
base = generate_related_vectors(1, relatedness=1.0)[0]
# Variantes du concept
variants = generate_related_vectors(5, relatedness=0.8)
concepts.append((base, variants))
# Crée les données d'entraînement et de test
train_data = []
test_data = []
for base, variants in concepts:
# Quelques requêtes bruitées pour entraînement
for v in variants[:3]:
train_data.append(v)
# Quelques requêtes très bruitées pour test
for v in variants[3:]:
test_data.append(v)
# Requêtes bruitées à partir de la base
for _ in range(3):
train_data.append(generate_query_from_base(base, noise=0.15))
for _ in range(2):
test_data.append(generate_query_from_base(base, noise=0.25))
np.random.shuffle(train_data)
np.random.shuffle(test_data)
# Phase d'entraînement par batches
for batch_idx in range(n_batches):
print(f"\n--- Training Batch {batch_idx + 1}/{n_batches} ---")
start_idx = batch_idx * vectors_per_batch
end_idx = min(start_idx + vectors_per_batch, len(train_data))
batch = train_data[start_idx:end_idx]
for i, vec in enumerate(batch):
result = self.system.process(vec)
if i % 10 == 0:
print(f" Processed {i}/{len(batch)} vectors, "
f"energy={result.energy_trajectory[-1]:.1f if result.energy_trajectory else 0:.1f}, "
f"converged={result.converged}")
# Évalue après chaque batch
self._evaluate("train", batch_idx)
# Phase de test (généralisation)
print(f"\n--- Testing Generalization ({len(test_data)} vectors) ---")
generalization_scores = []
for i, vec in enumerate(test_data):
result = self.system.process(vec)
# Score de généralisation : distance aux concepts originaux
# Plus l'énergie finale est basse, plus la généralisation est bonne
if result.energy_trajectory:
score = 1.0 / (1.0 + result.energy_trajectory[-1] / 1000.0)
generalization_scores.append(score)
if i % 10 == 0:
print(f" Tested {i}/{len(test_data)} vectors, "
f"energy={result.energy_trajectory[-1]:.1f if result.energy_trajectory else 0:.1f}")
self._evaluate("test", n_batches)
avg_gen = float(np.mean(generalization_scores)) if generalization_scores else 0.0
self.results['generalization_score'].append(avg_gen)
print(f"\nAverage Generalization Score: {avg_gen:.4f}")
def _evaluate(self, phase: str, step: int):
"""Évalue et enregistre les métriques."""
summary = self.system.get_metrics_summary()
self.results['phase'].append(f"{phase}_{step}")
self.results['final_energy'].append(
summary.get('performance', {}).get('avg_final_energy', 0.0)
)
self.results['convergence_rate'].append(
summary.get('performance', {}).get('convergence_rate', 0.0)
)
self.results['memory_size'].append(
summary.get('memory', {}).get('size', 0)
)
self.results['n_associations'].append(
summary.get('energy', {}).get('n_associations', 0)
)
self.results['avg_inference_time_ms'].append(
summary.get('performance', {}).get('avg_inference_time_ms', 0.0)
)
self.results['semantic_coherence'].append(
summary.get('performance', {}).get('semantic_coherence', 0.0)
)
print(f" [Metrics] Energy={self.results['final_energy'][-1]:.1f}, "
f"Convergence={self.results['convergence_rate'][-1]:.2%}, "
f"Memory={self.results['memory_size'][-1]}, "
f"Assoc={self.results['n_associations'][-1]}, "
f"Coherence={self.results['semantic_coherence'][-1]:.3f}")
def run_stability_test(self, n_iterations: int = 100):
"""
Test de stabilité : le système ne doit pas diverger
avec un flux continu de données.
"""
print("\n" + "="*70)
print("BENCHMARK: Stability Test")
print("="*70)
# Génère un flux continu
base_vectors = generate_unrelated_vectors(5)
energies = []
memory_sizes = []
for i in range(n_iterations):
# Alterne entre vecteurs connus et nouveaux
if i % 3 == 0 and i > 0:
# Nouveau vecteur
vec = generate_unrelated_vectors(1)[0]
else:
# Vecteur lié à un existant
base = base_vectors[i % len(base_vectors)]
vec = generate_query_from_base(base, noise=0.2)
result = self.system.process(vec)
if result.energy_trajectory:
energies.append(result.energy_trajectory[-1])
memory_sizes.append(self.system.memory.size)
if i % 20 == 0:
print(f" Iteration {i}: energy={np.mean(energies[-20:]):.1f if energies else 0:.1f}, "
f"memory={self.system.memory.size}")
# Vérifie la stabilité
if len(energies) > 20:
early_mean = np.mean(energies[:20])
late_mean = np.mean(energies[-20:])
print(f"\n Early energy: {early_mean:.1f}")
print(f" Late energy: {late_mean:.1f}")
if late_mean < early_mean * 0.9:
print(" ✓ Energy decreased with experience (learning confirmed)")
elif late_mean < early_mean * 1.1:
print(" ✓ Energy stable (system stable)")
else:
print(" ✗ Energy increased (potential instability)")
def run_binding_test(self, n_trials: int = 20):
"""
Test de binding/unbinding et composition.
"""
print("\n" + "="*70)
print("BENCHMARK: Binding & Composition Test")
print("="*70)
# Crée des vecteurs pour role-filler
roles = generate_unrelated_vectors(3) # agent, action, patient
fillers = generate_unrelated_vectors(3) # john, run, ball
successes = 0
for trial in range(n_trials):
role_idx = trial % 3
filler_idx = (trial + 1) % 3
# Binding
bound = self.system.binder.bind_role_filler(
roles[role_idx],
fillers[filler_idx]
)
# Unbinding
recovered = self.system.binder.unbind_role_filler(bound, roles[role_idx])
# Vérifie la similarité
similarity = np.mean(recovered == fillers[filler_idx])
if similarity > 0.6:
successes += 1
print(f" Binding/Unbinding accuracy: {successes}/{n_trials} ({successes/n_trials:.1%})")
def run_abstraction_test(self, n_patterns: int = 10, n_instances: int = 5):
"""
Test de formation d'abstractions.
Le système doit détecter des patterns récurrents et les compiler.
"""
print("\n" + "="*70)
print("BENCHMARK: Abstraction Test")
print("="*70)
initial_size = self.system.memory.size
for p in range(n_patterns):
# Génère des instances d'un pattern
pattern_base = generate_related_vectors(1, relatedness=1.0)[0]
for i in range(n_instances):
instance = generate_query_from_base(pattern_base, noise=0.15)
self.system.process(instance)
final_size = self.system.memory.size
abstractions_created = final_size - initial_size - n_patterns * n_instances
print(f" Initial memory size: {initial_size}")
print(f" Final memory size: {final_size}")
print(f" Expected new vectors: {n_patterns * n_instances}")
print(f" Actual new vectors: {final_size - initial_size}")
print(f" Potential abstractions: {max(0, abstractions_created)}")
def run_all(self):
"""Exécute tous les benchmarks."""
print("\n" + "="*70)
print("MLE SYSTEM COMPREHENSIVE BENCHMARK")
print("="*70)
self.run_learning_curve()
self.run_stability_test()
self.run_binding_test()
self.run_abstraction_test()
# Résumé final
print("\n" + "="*70)
print("FINAL SUMMARY")
print("="*70)
self.system.print_summary()
return self.results
def quick_test():
"""Test rapide pour vérifier le fonctionnement de base."""
print("Quick functionality test...")
mle = MLESystem(
memory_capacity=1000,
online_learning=True,
)
# Test basique
vec = np.zeros(VECTOR_SIZE, dtype=np.uint8)
vec[np.random.choice(VECTOR_SIZE, size=200, replace=False)] = 1
result = mle.process(vec)
print(f" Basic inference: converged={result.converged}, "
f"iterations={result.n_iterations}, "
f"energy={result.energy_trajectory[-1]:.1f if result.energy_trajectory else 0:.1f}")
# Test binding
a = np.zeros(VECTOR_SIZE, dtype=np.uint8)
a[np.random.choice(VECTOR_SIZE, size=200, replace=False)] = 1
b = np.zeros(VECTOR_SIZE, dtype=np.uint8)
b[np.random.choice(VECTOR_SIZE, size=200, replace=False)] = 1
bound = mle.binder.bind(a, b)
recovered = mle.binder.unbind(bound, a)
similarity = np.mean(recovered == b)
print(f" Binding test: similarity={similarity:.3f}")
# Test requête
neighbors = mle.query(vec, k=3)
print(f" Query test: found {len(neighbors)} neighbors")
print(" ✓ All basic tests passed")
return mle
if __name__ == "__main__":
# Test rapide
mle = quick_test()
# Benchmark complet
benchmark = MLEBenchmark(mle)
results = benchmark.run_all()
# Sauvegarde
with open("benchmark_results.json", "w") as f:
json.dump(results, f, indent=2)
print("\nResults saved to benchmark_results.json")