File size: 1,932 Bytes
1ea8a03 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 | #!/usr/bin/env python3
"""Test EQ engine implementation."""
import torch
from modeling_zenith import ZenithConfig, ZenithModel
def test_eq_engine():
print("Testing EQ Engine Implementation...")
# Create config with all EQ features enabled
config = ZenithConfig(
use_eq_adapter=True,
use_eq_attention_bias=True,
use_eq_gated_ffn=True,
use_eq_recurrence=True,
eq_consistency_weight=0.02,
eq_state_dim=256,
num_layers=4, # Small for testing
hidden_size=512,
num_heads=8,
head_dim=64,
intermediate_size=2048
)
print(f"Config: {config}")
# Create model
model = ZenithModel(config)
print(f"[OK] Model created successfully")
print(f" Parameters: {sum(p.numel() for p in model.parameters()):,}")
# Test forward pass
batch_size = 2
seq_len = 16
input_ids = torch.randint(0, config.vocab_size, (batch_size, seq_len))
# Training mode to test consistency loss
model.train()
outputs = model(input_ids=input_ids, labels=input_ids)
print(f"[OK] Forward pass successful")
print(f" Logits shape: {outputs.logits.shape}")
print(f" Loss: {outputs.loss.item() if outputs.loss is not None else 'None'}")
# Test inference mode
model.eval()
with torch.no_grad():
outputs = model(input_ids=input_ids)
print(f"[OK] Inference successful")
print(f" Logits shape: {outputs.logits.shape}")
print("\n[SUCCESS] EQ Engine implementation is FULLY FUNCTIONAL")
print("\nFeatures implemented:")
print(" [1] EQ attention bias")
print(" [2] EQ-gated FFN")
print(" [3] Recurrent EQ state with GRU")
print(" [4] EQ consistency loss")
print(" [5] Per-layer EQ adapter integration")
if __name__ == "__main__":
test_eq_engine()
|