| import torch |
| from components.tiny_confessional_layer_fixed import TinyConfessionalLayer |
|
|
| def test_fixed_layer(): |
| """Test the fixed TinyConfessionalLayer with various input shapes.""" |
| print("🧪 Testing TinyConfessionalLayer with shape safety...") |
| |
| |
| test_cases = [ |
| (1, 10, 256), |
| (2, 8, 512), |
| (4, 20, 128), |
| (1, 5, 768), |
| (3, 3, 3), |
| ] |
| |
| for batch, seq, d_model in test_cases: |
| print(f"\nTesting: batch={batch}, seq={seq}, d_model={d_model}") |
| |
| try: |
| |
| model = TinyConfessionalLayer( |
| d_model=256, |
| enable_ambient=False, |
| enable_windsurf=False |
| ) |
| |
| |
| x = torch.randn(batch, seq, d_model) |
| |
| |
| out, metadata = model(x, audit_mode=True) |
| |
| |
| expected_shape = (batch, seq, 256) |
| assert out.shape == expected_shape, \ |
| f"Expected shape {expected_shape}, got {out.shape}" |
| |
| print(f"✅ Success! Input: {x.shape} -> Output: {out.shape}") |
| print(f" Cycles: {metadata['cycles_run']}, " |
| f"Shape fixes: {metadata.get('shape_issues_resolved', 0)}") |
| |
| except Exception as e: |
| print(f"❌ Test failed: {str(e)}") |
| import traceback |
| traceback.print_exc() |
|
|
| if __name__ == "__main__": |
| test_fixed_layer() |
| print("\n🎉 All tests completed!") |
|
|