Remove tests/test_all.py - merged into eval/iron_eval.py
Browse files- tests/test_all.py +0 -898
tests/test_all.py
DELETED
|
@@ -1,898 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
UNIFIED TEST SUITE FOR 8-BIT THRESHOLD COMPUTER
|
| 3 |
-
================================================
|
| 4 |
-
Combined tests from:
|
| 5 |
-
1. Overflow Chains - Chained arithmetic operations
|
| 6 |
-
2. Equivalence - Exhaustive verification against Python
|
| 7 |
-
3. Gate Reconstruction - Derive Boolean functions from weights
|
| 8 |
-
4. Perturbation - Adversarial weight changes
|
| 9 |
-
5. Timing - Circuit depth analysis
|
| 10 |
-
6. Cryptographic Self-Test - Checksum verification
|
| 11 |
-
7. Self-Modifying Code - Von Neumann architecture
|
| 12 |
-
8. Turing Completeness - Rule 110 and Brainfuck
|
| 13 |
-
9. Independence - Derive weights from specification
|
| 14 |
-
10. Skeptic Tests - Mathematical property verification
|
| 15 |
-
11. Stress Tests - Complex algorithms
|
| 16 |
-
"""
|
| 17 |
-
|
| 18 |
-
import torch
|
| 19 |
-
from safetensors.torch import load_file
|
| 20 |
-
from itertools import product
|
| 21 |
-
from collections import Counter
|
| 22 |
-
import random
|
| 23 |
-
import time
|
| 24 |
-
|
| 25 |
-
# =============================================================================
|
| 26 |
-
# MODEL LOADING
|
| 27 |
-
# =============================================================================
|
| 28 |
-
|
| 29 |
-
model = load_file('neural_computer.safetensors')
|
| 30 |
-
|
| 31 |
-
# =============================================================================
|
| 32 |
-
# CORE PRIMITIVES
|
| 33 |
-
# =============================================================================
|
| 34 |
-
|
| 35 |
-
def heaviside(x):
|
| 36 |
-
"""Threshold activation function."""
|
| 37 |
-
return (x >= 0).float()
|
| 38 |
-
|
| 39 |
-
def int_to_bits(val, width=8):
|
| 40 |
-
"""Convert int to bits, MSB first."""
|
| 41 |
-
return torch.tensor([(val >> (width-1-i)) & 1 for i in range(width)], dtype=torch.float32)
|
| 42 |
-
|
| 43 |
-
def int_to_bits_lsb(val, width=8):
|
| 44 |
-
"""Convert int to bits, LSB first."""
|
| 45 |
-
return torch.tensor([(val >> i) & 1 for i in range(width)], dtype=torch.float32)
|
| 46 |
-
|
| 47 |
-
def bits_to_int(bits):
|
| 48 |
-
"""Convert bits back to int, MSB first."""
|
| 49 |
-
val = 0
|
| 50 |
-
for i, b in enumerate(bits):
|
| 51 |
-
val |= (int(b.item()) << (len(bits)-1-i))
|
| 52 |
-
return val
|
| 53 |
-
|
| 54 |
-
# =============================================================================
|
| 55 |
-
# BOOLEAN GATE PRIMITIVES
|
| 56 |
-
# =============================================================================
|
| 57 |
-
|
| 58 |
-
def eval_and(a, b):
|
| 59 |
-
inp = torch.tensor([float(a), float(b)])
|
| 60 |
-
return int(heaviside(inp @ model['boolean.and.weight'] + model['boolean.and.bias']).item())
|
| 61 |
-
|
| 62 |
-
def eval_or(a, b):
|
| 63 |
-
inp = torch.tensor([float(a), float(b)])
|
| 64 |
-
return int(heaviside(inp @ model['boolean.or.weight'] + model['boolean.or.bias']).item())
|
| 65 |
-
|
| 66 |
-
def eval_not(a):
|
| 67 |
-
inp = torch.tensor([float(a)])
|
| 68 |
-
return int(heaviside(inp @ model['boolean.not.weight'] + model['boolean.not.bias']).item())
|
| 69 |
-
|
| 70 |
-
def eval_nand(a, b):
|
| 71 |
-
inp = torch.tensor([float(a), float(b)])
|
| 72 |
-
return int(heaviside(inp @ model['boolean.nand.weight'] + model['boolean.nand.bias']).item())
|
| 73 |
-
|
| 74 |
-
def eval_nor(a, b):
|
| 75 |
-
inp = torch.tensor([float(a), float(b)])
|
| 76 |
-
return int(heaviside(inp @ model['boolean.nor.weight'] + model['boolean.nor.bias']).item())
|
| 77 |
-
|
| 78 |
-
def eval_xor(a, b):
|
| 79 |
-
inp = torch.tensor([float(a), float(b)])
|
| 80 |
-
w1_n1 = model['boolean.xor.layer1.neuron1.weight']
|
| 81 |
-
b1_n1 = model['boolean.xor.layer1.neuron1.bias']
|
| 82 |
-
w1_n2 = model['boolean.xor.layer1.neuron2.weight']
|
| 83 |
-
b1_n2 = model['boolean.xor.layer1.neuron2.bias']
|
| 84 |
-
w2 = model['boolean.xor.layer2.weight']
|
| 85 |
-
b2 = model['boolean.xor.layer2.bias']
|
| 86 |
-
h1 = heaviside(inp @ w1_n1 + b1_n1)
|
| 87 |
-
h2 = heaviside(inp @ w1_n2 + b1_n2)
|
| 88 |
-
hidden = torch.tensor([h1.item(), h2.item()])
|
| 89 |
-
return int(heaviside(hidden @ w2 + b2).item())
|
| 90 |
-
|
| 91 |
-
def eval_xnor(a, b):
|
| 92 |
-
inp = torch.tensor([float(a), float(b)])
|
| 93 |
-
w1_n1 = model['boolean.xnor.layer1.neuron1.weight']
|
| 94 |
-
b1_n1 = model['boolean.xnor.layer1.neuron1.bias']
|
| 95 |
-
w1_n2 = model['boolean.xnor.layer1.neuron2.weight']
|
| 96 |
-
b1_n2 = model['boolean.xnor.layer1.neuron2.bias']
|
| 97 |
-
w2 = model['boolean.xnor.layer2.weight']
|
| 98 |
-
b2 = model['boolean.xnor.layer2.bias']
|
| 99 |
-
h1 = heaviside(inp @ w1_n1 + b1_n1)
|
| 100 |
-
h2 = heaviside(inp @ w1_n2 + b1_n2)
|
| 101 |
-
hidden = torch.tensor([h1.item(), h2.item()])
|
| 102 |
-
return int(heaviside(hidden @ w2 + b2).item())
|
| 103 |
-
|
| 104 |
-
# =============================================================================
|
| 105 |
-
# ARITHMETIC PRIMITIVES
|
| 106 |
-
# =============================================================================
|
| 107 |
-
|
| 108 |
-
def eval_xor_arith(inp, prefix):
|
| 109 |
-
"""XOR for arithmetic circuits (different naming convention)."""
|
| 110 |
-
w1_or = model[f'{prefix}.layer1.or.weight']
|
| 111 |
-
b1_or = model[f'{prefix}.layer1.or.bias']
|
| 112 |
-
w1_nand = model[f'{prefix}.layer1.nand.weight']
|
| 113 |
-
b1_nand = model[f'{prefix}.layer1.nand.bias']
|
| 114 |
-
w2 = model[f'{prefix}.layer2.weight']
|
| 115 |
-
b2 = model[f'{prefix}.layer2.bias']
|
| 116 |
-
h_or = heaviside(inp @ w1_or + b1_or)
|
| 117 |
-
h_nand = heaviside(inp @ w1_nand + b1_nand)
|
| 118 |
-
hidden = torch.tensor([h_or.item(), h_nand.item()])
|
| 119 |
-
return heaviside(hidden @ w2 + b2).item()
|
| 120 |
-
|
| 121 |
-
def eval_full_adder(a, b, cin, prefix):
|
| 122 |
-
"""Full adder: returns (sum, carry_out)."""
|
| 123 |
-
inp_ab = torch.tensor([a, b], dtype=torch.float32)
|
| 124 |
-
ha1_sum = eval_xor_arith(inp_ab, f'{prefix}.ha1.sum')
|
| 125 |
-
ha1_carry = heaviside(inp_ab @ model[f'{prefix}.ha1.carry.weight'] + model[f'{prefix}.ha1.carry.bias']).item()
|
| 126 |
-
inp_ha2 = torch.tensor([ha1_sum, cin], dtype=torch.float32)
|
| 127 |
-
ha2_sum = eval_xor_arith(inp_ha2, f'{prefix}.ha2.sum')
|
| 128 |
-
ha2_carry = heaviside(inp_ha2 @ model[f'{prefix}.ha2.carry.weight'] + model[f'{prefix}.ha2.carry.bias']).item()
|
| 129 |
-
inp_cout = torch.tensor([ha1_carry, ha2_carry], dtype=torch.float32)
|
| 130 |
-
cout = heaviside(inp_cout @ model[f'{prefix}.carry_or.weight'] + model[f'{prefix}.carry_or.bias']).item()
|
| 131 |
-
return int(ha2_sum), int(cout)
|
| 132 |
-
|
| 133 |
-
def add_8bit(a, b):
|
| 134 |
-
"""8-bit addition using ripple carry adder."""
|
| 135 |
-
carry = 0.0
|
| 136 |
-
result = 0
|
| 137 |
-
for i in range(8):
|
| 138 |
-
s, carry = eval_full_adder(float((a >> i) & 1), float((b >> i) & 1), carry, f'arithmetic.ripplecarry8bit.fa{i}')
|
| 139 |
-
result |= (s << i)
|
| 140 |
-
return result, int(carry)
|
| 141 |
-
|
| 142 |
-
def sub_8bit(a, b):
|
| 143 |
-
"""8-bit subtraction via two's complement."""
|
| 144 |
-
not_b = (~b) & 0xFF
|
| 145 |
-
temp, _ = add_8bit(a, not_b)
|
| 146 |
-
result, c = add_8bit(temp, 1)
|
| 147 |
-
return result, c
|
| 148 |
-
|
| 149 |
-
# =============================================================================
|
| 150 |
-
# BYTE-LEVEL OPERATIONS
|
| 151 |
-
# =============================================================================
|
| 152 |
-
|
| 153 |
-
def xor_8bit(a, b):
|
| 154 |
-
result = 0
|
| 155 |
-
for i in range(8):
|
| 156 |
-
result |= (eval_xor((a >> i) & 1, (b >> i) & 1) << i)
|
| 157 |
-
return result
|
| 158 |
-
|
| 159 |
-
def and_8bit(a, b):
|
| 160 |
-
result = 0
|
| 161 |
-
for i in range(8):
|
| 162 |
-
result |= (eval_and((a >> i) & 1, (b >> i) & 1) << i)
|
| 163 |
-
return result
|
| 164 |
-
|
| 165 |
-
def or_8bit(a, b):
|
| 166 |
-
result = 0
|
| 167 |
-
for i in range(8):
|
| 168 |
-
result |= (eval_or((a >> i) & 1, (b >> i) & 1) << i)
|
| 169 |
-
return result
|
| 170 |
-
|
| 171 |
-
def not_8bit(a):
|
| 172 |
-
result = 0
|
| 173 |
-
for i in range(8):
|
| 174 |
-
result |= (eval_not((a >> i) & 1) << i)
|
| 175 |
-
return result
|
| 176 |
-
|
| 177 |
-
# =============================================================================
|
| 178 |
-
# COMPARATORS
|
| 179 |
-
# =============================================================================
|
| 180 |
-
|
| 181 |
-
def gt(a, b):
|
| 182 |
-
a_bits, b_bits = int_to_bits(a), int_to_bits(b)
|
| 183 |
-
w = model['arithmetic.greaterthan8bit.comparator']
|
| 184 |
-
return 1 if ((a_bits - b_bits) @ w).item() > 0 else 0
|
| 185 |
-
|
| 186 |
-
def lt(a, b):
|
| 187 |
-
a_bits, b_bits = int_to_bits(a), int_to_bits(b)
|
| 188 |
-
w = model['arithmetic.lessthan8bit.comparator']
|
| 189 |
-
return 1 if ((b_bits - a_bits) @ w).item() > 0 else 0
|
| 190 |
-
|
| 191 |
-
def eq(a, b):
|
| 192 |
-
return 1 if (gt(a, b) == 0 and lt(a, b) == 0) else 0
|
| 193 |
-
|
| 194 |
-
def popcount(val):
|
| 195 |
-
bits = int_to_bits(val)
|
| 196 |
-
w = model['pattern_recognition.popcount.weight']
|
| 197 |
-
b = model['pattern_recognition.popcount.bias']
|
| 198 |
-
return int((bits @ w + b).item())
|
| 199 |
-
|
| 200 |
-
# =============================================================================
|
| 201 |
-
# TEST SECTION 1: SKEPTIC TESTS (Mathematical Properties)
|
| 202 |
-
# =============================================================================
|
| 203 |
-
|
| 204 |
-
def test_skeptic():
|
| 205 |
-
"""127 skeptical tests for mathematical properties."""
|
| 206 |
-
print("\n" + "=" * 70)
|
| 207 |
-
print("SKEPTIC TESTS - Mathematical Properties")
|
| 208 |
-
print("=" * 70)
|
| 209 |
-
|
| 210 |
-
failures = []
|
| 211 |
-
|
| 212 |
-
# Identity laws
|
| 213 |
-
for a in [0, 1, 127, 128, 255, 170, 85]:
|
| 214 |
-
r, _ = add_8bit(a, 0)
|
| 215 |
-
if r != a: failures.append(f'A+0: {a}')
|
| 216 |
-
if xor_8bit(a, 0) != a: failures.append(f'A^0: {a}')
|
| 217 |
-
if and_8bit(a, 255) != a: failures.append(f'A&255: {a}')
|
| 218 |
-
if or_8bit(a, 0) != a: failures.append(f'A|0: {a}')
|
| 219 |
-
|
| 220 |
-
# Annihilation laws
|
| 221 |
-
for a in [0, 1, 127, 128, 255]:
|
| 222 |
-
if and_8bit(a, 0) != 0: failures.append(f'A&0: {a}')
|
| 223 |
-
if or_8bit(a, 255) != 255: failures.append(f'A|255: {a}')
|
| 224 |
-
if xor_8bit(a, a) != 0: failures.append(f'A^A: {a}')
|
| 225 |
-
|
| 226 |
-
# Involution
|
| 227 |
-
for a in [0, 1, 127, 128, 255, 170]:
|
| 228 |
-
if not_8bit(not_8bit(a)) != a: failures.append(f'~~A: {a}')
|
| 229 |
-
|
| 230 |
-
# Two's complement
|
| 231 |
-
for a in [0, 1, 42, 127, 128, 255]:
|
| 232 |
-
not_a = not_8bit(a)
|
| 233 |
-
r1, _ = add_8bit(a, not_a)
|
| 234 |
-
r2, _ = add_8bit(r1, 1)
|
| 235 |
-
if r2 != 0: failures.append(f'twos comp: {a}')
|
| 236 |
-
|
| 237 |
-
# Carry propagation
|
| 238 |
-
cases = [(255, 1, 0), (127, 129, 0), (1, 255, 0), (128, 128, 0), (255, 255, 254)]
|
| 239 |
-
for a, b, exp in cases:
|
| 240 |
-
r, _ = add_8bit(a, b)
|
| 241 |
-
if r != exp: failures.append(f'carry: {a}+{b}')
|
| 242 |
-
|
| 243 |
-
# Commutativity
|
| 244 |
-
pairs = [(17, 42), (0, 255), (128, 127), (1, 254), (170, 85)]
|
| 245 |
-
for a, b in pairs:
|
| 246 |
-
r1, _ = add_8bit(a, b)
|
| 247 |
-
r2, _ = add_8bit(b, a)
|
| 248 |
-
if r1 != r2: failures.append(f'add commute: {a},{b}')
|
| 249 |
-
if xor_8bit(a, b) != xor_8bit(b, a): failures.append(f'xor commute: {a},{b}')
|
| 250 |
-
if and_8bit(a, b) != and_8bit(b, a): failures.append(f'and commute: {a},{b}')
|
| 251 |
-
if or_8bit(a, b) != or_8bit(b, a): failures.append(f'or commute: {a},{b}')
|
| 252 |
-
|
| 253 |
-
# De Morgan
|
| 254 |
-
for a, b in [(0, 0), (0, 255), (255, 0), (255, 255), (170, 85)]:
|
| 255 |
-
if not_8bit(and_8bit(a, b)) != or_8bit(not_8bit(a), not_8bit(b)):
|
| 256 |
-
failures.append(f'DM1: {a},{b}')
|
| 257 |
-
if not_8bit(or_8bit(a, b)) != and_8bit(not_8bit(a), not_8bit(b)):
|
| 258 |
-
failures.append(f'DM2: {a},{b}')
|
| 259 |
-
|
| 260 |
-
# Comparator edge cases
|
| 261 |
-
cmp_tests = [
|
| 262 |
-
(0, 0, 0, 0, 1), (0, 1, 0, 1, 0), (1, 0, 1, 0, 0),
|
| 263 |
-
(127, 128, 0, 1, 0), (128, 127, 1, 0, 0),
|
| 264 |
-
(255, 255, 0, 0, 1), (255, 0, 1, 0, 0), (0, 255, 0, 1, 0),
|
| 265 |
-
]
|
| 266 |
-
for a, b, exp_gt, exp_lt, exp_eq in cmp_tests:
|
| 267 |
-
if gt(a, b) != exp_gt: failures.append(f'gt({a},{b})')
|
| 268 |
-
if lt(a, b) != exp_lt: failures.append(f'lt({a},{b})')
|
| 269 |
-
if eq(a, b) != exp_eq: failures.append(f'eq({a},{b})')
|
| 270 |
-
|
| 271 |
-
# Popcount
|
| 272 |
-
for i in range(8):
|
| 273 |
-
if popcount(1 << i) != 1: failures.append(f'popcount(1<<{i})')
|
| 274 |
-
if popcount(0) != 0: failures.append('popcount(0)')
|
| 275 |
-
if popcount(255) != 8: failures.append('popcount(255)')
|
| 276 |
-
|
| 277 |
-
# Distributivity
|
| 278 |
-
for a, b, c in [(255, 15, 240), (170, 85, 51), (0, 255, 0)]:
|
| 279 |
-
if and_8bit(a, or_8bit(b, c)) != or_8bit(and_8bit(a, b), and_8bit(a, c)):
|
| 280 |
-
failures.append(f'distrib: {a},{b},{c}')
|
| 281 |
-
|
| 282 |
-
if failures:
|
| 283 |
-
print(f" FAILED: {len(failures)} errors")
|
| 284 |
-
for f in failures[:10]:
|
| 285 |
-
print(f" {f}")
|
| 286 |
-
return False
|
| 287 |
-
else:
|
| 288 |
-
print(f" PASSED: 127 skeptical tests")
|
| 289 |
-
return True
|
| 290 |
-
|
| 291 |
-
# =============================================================================
|
| 292 |
-
# TEST SECTION 2: STRESS TESTS (Complex Algorithms)
|
| 293 |
-
# =============================================================================
|
| 294 |
-
|
| 295 |
-
def test_stress():
|
| 296 |
-
"""Complex algorithmic tests."""
|
| 297 |
-
print("\n" + "=" * 70)
|
| 298 |
-
print("STRESS TESTS - Complex Algorithms")
|
| 299 |
-
print("=" * 70)
|
| 300 |
-
|
| 301 |
-
all_pass = True
|
| 302 |
-
|
| 303 |
-
# Factorial
|
| 304 |
-
print("\n [1] Factorial")
|
| 305 |
-
def factorial(n):
|
| 306 |
-
result = 1
|
| 307 |
-
for i in range(2, n+1):
|
| 308 |
-
new_result = 0
|
| 309 |
-
for _ in range(i):
|
| 310 |
-
new_result, _ = add_8bit(new_result, result)
|
| 311 |
-
new_result &= 0xFF
|
| 312 |
-
result = new_result
|
| 313 |
-
return result
|
| 314 |
-
|
| 315 |
-
for n in [1, 2, 3, 4, 5]:
|
| 316 |
-
got = factorial(n)
|
| 317 |
-
expected = [1, 1, 2, 6, 24, 120][n]
|
| 318 |
-
if got != expected:
|
| 319 |
-
print(f" {n}! = {got}, expected {expected} [FAIL]")
|
| 320 |
-
all_pass = False
|
| 321 |
-
print(" Factorial: OK" if all_pass else " Factorial: FAIL")
|
| 322 |
-
|
| 323 |
-
# Fibonacci
|
| 324 |
-
print("\n [2] Fibonacci")
|
| 325 |
-
a, b = 0, 1
|
| 326 |
-
fib = [a, b]
|
| 327 |
-
for _ in range(12):
|
| 328 |
-
next_val, carry = add_8bit(a, b)
|
| 329 |
-
if carry: break
|
| 330 |
-
fib.append(next_val)
|
| 331 |
-
a, b = b, next_val
|
| 332 |
-
expected_fib = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
|
| 333 |
-
if fib[:len(expected_fib)] == expected_fib:
|
| 334 |
-
print(" Fibonacci: OK")
|
| 335 |
-
else:
|
| 336 |
-
print(" Fibonacci: FAIL")
|
| 337 |
-
all_pass = False
|
| 338 |
-
|
| 339 |
-
# GCD
|
| 340 |
-
print("\n [3] GCD")
|
| 341 |
-
def gcd(a, b):
|
| 342 |
-
iterations = 0
|
| 343 |
-
while not eq(b, 0) and iterations < 100:
|
| 344 |
-
temp = a
|
| 345 |
-
while not lt(temp, b) and not eq(temp, 0) and iterations < 100:
|
| 346 |
-
temp, _ = sub_8bit(temp, b)
|
| 347 |
-
iterations += 1
|
| 348 |
-
a, b = b, temp
|
| 349 |
-
iterations += 1
|
| 350 |
-
return a
|
| 351 |
-
|
| 352 |
-
test_gcds = [(48, 18, 6), (100, 35, 5), (252, 105, 21)]
|
| 353 |
-
for a, b, expected in test_gcds:
|
| 354 |
-
if gcd(a, b) != expected:
|
| 355 |
-
print(f" gcd({a},{b}) failed")
|
| 356 |
-
all_pass = False
|
| 357 |
-
print(" GCD: OK" if all_pass else " GCD: FAIL")
|
| 358 |
-
|
| 359 |
-
# LFSR
|
| 360 |
-
print("\n [4] 8-bit LFSR")
|
| 361 |
-
def lfsr_step(state):
|
| 362 |
-
bit = eval_xor((state >> 0) & 1, (state >> 2) & 1)
|
| 363 |
-
bit = eval_xor(bit, (state >> 3) & 1)
|
| 364 |
-
bit = eval_xor(bit, (state >> 4) & 1)
|
| 365 |
-
return ((state >> 1) | (bit << 7)) & 0xFF
|
| 366 |
-
|
| 367 |
-
state = 1
|
| 368 |
-
seen = set()
|
| 369 |
-
for i in range(300):
|
| 370 |
-
if state in seen: break
|
| 371 |
-
seen.add(state)
|
| 372 |
-
state = lfsr_step(state)
|
| 373 |
-
|
| 374 |
-
if len(seen) == 255:
|
| 375 |
-
print(" LFSR period 255: OK")
|
| 376 |
-
else:
|
| 377 |
-
print(f" LFSR period {len(seen)}: FAIL")
|
| 378 |
-
all_pass = False
|
| 379 |
-
|
| 380 |
-
return all_pass
|
| 381 |
-
|
| 382 |
-
# =============================================================================
|
| 383 |
-
# TEST SECTION 3: OVERFLOW CHAINS
|
| 384 |
-
# =============================================================================
|
| 385 |
-
|
| 386 |
-
def test_overflow_chains():
|
| 387 |
-
"""Chained arithmetic operations."""
|
| 388 |
-
print("\n" + "=" * 70)
|
| 389 |
-
print("OVERFLOW CHAINS - Chained Arithmetic")
|
| 390 |
-
print("=" * 70)
|
| 391 |
-
|
| 392 |
-
all_pass = True
|
| 393 |
-
|
| 394 |
-
# Add-1 chain
|
| 395 |
-
print("\n [1] Add-1 chain (512 additions)")
|
| 396 |
-
value = 0
|
| 397 |
-
errors = 0
|
| 398 |
-
for i in range(512):
|
| 399 |
-
expected = (value + 1) % 256
|
| 400 |
-
result, _ = add_8bit(value, 1)
|
| 401 |
-
if result != expected: errors += 1
|
| 402 |
-
value = result
|
| 403 |
-
if errors == 0:
|
| 404 |
-
print(" OK")
|
| 405 |
-
else:
|
| 406 |
-
print(f" {errors} errors")
|
| 407 |
-
all_pass = False
|
| 408 |
-
|
| 409 |
-
# Fibonacci chain
|
| 410 |
-
print("\n [2] Fibonacci chain (100 terms)")
|
| 411 |
-
a, b = 0, 1
|
| 412 |
-
pa, pb = 0, 1
|
| 413 |
-
errors = 0
|
| 414 |
-
for i in range(100):
|
| 415 |
-
if a != pa or b != pb: errors += 1
|
| 416 |
-
next_val, _ = add_8bit(a, b)
|
| 417 |
-
next_python = (pa + pb) % 256
|
| 418 |
-
a, b = b, next_val
|
| 419 |
-
pa, pb = pb, next_python
|
| 420 |
-
if errors == 0:
|
| 421 |
-
print(" OK")
|
| 422 |
-
else:
|
| 423 |
-
print(f" {errors} errors")
|
| 424 |
-
all_pass = False
|
| 425 |
-
|
| 426 |
-
# Accumulator
|
| 427 |
-
print("\n [3] Accumulator sum(1..100)")
|
| 428 |
-
acc = 0
|
| 429 |
-
for i in range(1, 101):
|
| 430 |
-
acc, _ = add_8bit(acc, i)
|
| 431 |
-
expected = sum(range(1, 101)) % 256
|
| 432 |
-
if acc == expected:
|
| 433 |
-
print(f" OK (result: {acc})")
|
| 434 |
-
else:
|
| 435 |
-
print(f" FAIL: got {acc}, expected {expected}")
|
| 436 |
-
all_pass = False
|
| 437 |
-
|
| 438 |
-
return all_pass
|
| 439 |
-
|
| 440 |
-
# =============================================================================
|
| 441 |
-
# TEST SECTION 4: EXHAUSTIVE EQUIVALENCE
|
| 442 |
-
# =============================================================================
|
| 443 |
-
|
| 444 |
-
def test_equivalence():
|
| 445 |
-
"""Exhaustive verification against Python."""
|
| 446 |
-
print("\n" + "=" * 70)
|
| 447 |
-
print("EQUIVALENCE - Exhaustive Verification")
|
| 448 |
-
print("=" * 70)
|
| 449 |
-
|
| 450 |
-
all_pass = True
|
| 451 |
-
|
| 452 |
-
# Boolean gates
|
| 453 |
-
print("\n [1] Boolean gates (26 checks)")
|
| 454 |
-
errors = 0
|
| 455 |
-
for a in [0, 1]:
|
| 456 |
-
for b in [0, 1]:
|
| 457 |
-
if eval_and(a, b) != (a & b): errors += 1
|
| 458 |
-
if eval_or(a, b) != (a | b): errors += 1
|
| 459 |
-
if eval_nand(a, b) != (1 - (a & b)): errors += 1
|
| 460 |
-
if eval_nor(a, b) != (1 - (a | b)): errors += 1
|
| 461 |
-
if eval_xor(a, b) != (a ^ b): errors += 1
|
| 462 |
-
if eval_xnor(a, b) != (1 - (a ^ b)): errors += 1
|
| 463 |
-
for a in [0, 1]:
|
| 464 |
-
if eval_not(a) != (1 - a): errors += 1
|
| 465 |
-
if errors == 0:
|
| 466 |
-
print(" OK")
|
| 467 |
-
else:
|
| 468 |
-
print(f" {errors} errors")
|
| 469 |
-
all_pass = False
|
| 470 |
-
|
| 471 |
-
# 4-bit adder (256 cases)
|
| 472 |
-
print("\n [2] 4-bit adder (256 cases)")
|
| 473 |
-
errors = 0
|
| 474 |
-
for a in range(16):
|
| 475 |
-
for b in range(16):
|
| 476 |
-
carry = 0.0
|
| 477 |
-
result_bits = []
|
| 478 |
-
for i in range(4):
|
| 479 |
-
s, carry = eval_full_adder(float((a >> i) & 1), float((b >> i) & 1), carry,
|
| 480 |
-
f'arithmetic.ripplecarry4bit.fa{i}')
|
| 481 |
-
result_bits.append(s)
|
| 482 |
-
result = sum(result_bits[i] * (2**i) for i in range(4))
|
| 483 |
-
if result != (a + b) % 16: errors += 1
|
| 484 |
-
if errors == 0:
|
| 485 |
-
print(" OK")
|
| 486 |
-
else:
|
| 487 |
-
print(f" {errors} errors")
|
| 488 |
-
all_pass = False
|
| 489 |
-
|
| 490 |
-
# 8-bit adder (sampled)
|
| 491 |
-
print("\n [3] 8-bit adder (1024 sampled cases)")
|
| 492 |
-
errors = 0
|
| 493 |
-
for a in range(0, 256, 8):
|
| 494 |
-
for b in range(0, 256, 8):
|
| 495 |
-
result, _ = add_8bit(a, b)
|
| 496 |
-
if result != (a + b) % 256: errors += 1
|
| 497 |
-
if errors == 0:
|
| 498 |
-
print(" OK")
|
| 499 |
-
else:
|
| 500 |
-
print(f" {errors} errors")
|
| 501 |
-
all_pass = False
|
| 502 |
-
|
| 503 |
-
# Comparators (sampled)
|
| 504 |
-
print("\n [4] Comparators (1024 sampled cases)")
|
| 505 |
-
errors = 0
|
| 506 |
-
for a in range(0, 256, 8):
|
| 507 |
-
for b in range(0, 256, 8):
|
| 508 |
-
if gt(a, b) != (1 if a > b else 0): errors += 1
|
| 509 |
-
if lt(a, b) != (1 if a < b else 0): errors += 1
|
| 510 |
-
if eq(a, b) != (1 if a == b else 0): errors += 1
|
| 511 |
-
if errors == 0:
|
| 512 |
-
print(" OK")
|
| 513 |
-
else:
|
| 514 |
-
print(f" {errors} errors")
|
| 515 |
-
all_pass = False
|
| 516 |
-
|
| 517 |
-
return all_pass
|
| 518 |
-
|
| 519 |
-
# =============================================================================
|
| 520 |
-
# TEST SECTION 5: GATE RECONSTRUCTION
|
| 521 |
-
# =============================================================================
|
| 522 |
-
|
| 523 |
-
def test_gate_reconstruction():
|
| 524 |
-
"""Reconstruct Boolean functions from weights."""
|
| 525 |
-
print("\n" + "=" * 70)
|
| 526 |
-
print("GATE RECONSTRUCTION - Derive Functions from Weights")
|
| 527 |
-
print("=" * 70)
|
| 528 |
-
|
| 529 |
-
KNOWN_FUNCTIONS = {
|
| 530 |
-
'AND': (0, 0, 0, 1),
|
| 531 |
-
'OR': (0, 1, 1, 1),
|
| 532 |
-
'NAND': (1, 1, 1, 0),
|
| 533 |
-
'NOR': (1, 0, 0, 0),
|
| 534 |
-
'XOR': (0, 1, 1, 0),
|
| 535 |
-
'XNOR': (1, 0, 0, 1),
|
| 536 |
-
}
|
| 537 |
-
|
| 538 |
-
def identify_2input(w, b):
|
| 539 |
-
tt = []
|
| 540 |
-
for a, b_in in [(0, 0), (0, 1), (1, 0), (1, 1)]:
|
| 541 |
-
inp = torch.tensor([float(a), float(b_in)])
|
| 542 |
-
tt.append(int(heaviside(inp @ w + b).item()))
|
| 543 |
-
tt = tuple(tt)
|
| 544 |
-
for name, expected_tt in KNOWN_FUNCTIONS.items():
|
| 545 |
-
if tt == expected_tt:
|
| 546 |
-
return name
|
| 547 |
-
return 'UNKNOWN'
|
| 548 |
-
|
| 549 |
-
all_pass = True
|
| 550 |
-
|
| 551 |
-
# Single-layer gates
|
| 552 |
-
print("\n [1] Single-layer gates")
|
| 553 |
-
gates = [('boolean.and', 'AND'), ('boolean.or', 'OR'),
|
| 554 |
-
('boolean.nand', 'NAND'), ('boolean.nor', 'NOR')]
|
| 555 |
-
for prefix, expected in gates:
|
| 556 |
-
identified = identify_2input(model[f'{prefix}.weight'], model[f'{prefix}.bias'])
|
| 557 |
-
if identified != expected:
|
| 558 |
-
print(f" {prefix}: expected {expected}, got {identified}")
|
| 559 |
-
all_pass = False
|
| 560 |
-
print(" OK" if all_pass else " FAIL")
|
| 561 |
-
|
| 562 |
-
# Two-layer gates
|
| 563 |
-
print("\n [2] Two-layer gates (XOR, XNOR)")
|
| 564 |
-
for gate, expected_tt in [('xor', (0,1,1,0)), ('xnor', (1,0,0,1))]:
|
| 565 |
-
prefix = f'boolean.{gate}'
|
| 566 |
-
tt = []
|
| 567 |
-
for a, b in [(0,0), (0,1), (1,0), (1,1)]:
|
| 568 |
-
inp = torch.tensor([float(a), float(b)])
|
| 569 |
-
h1 = heaviside(inp @ model[f'{prefix}.layer1.neuron1.weight'] +
|
| 570 |
-
model[f'{prefix}.layer1.neuron1.bias'])
|
| 571 |
-
h2 = heaviside(inp @ model[f'{prefix}.layer1.neuron2.weight'] +
|
| 572 |
-
model[f'{prefix}.layer1.neuron2.bias'])
|
| 573 |
-
hidden = torch.tensor([h1.item(), h2.item()])
|
| 574 |
-
out = int(heaviside(hidden @ model[f'{prefix}.layer2.weight'] +
|
| 575 |
-
model[f'{prefix}.layer2.bias']).item())
|
| 576 |
-
tt.append(out)
|
| 577 |
-
if tuple(tt) != expected_tt:
|
| 578 |
-
print(f" {gate}: expected {expected_tt}, got {tuple(tt)}")
|
| 579 |
-
all_pass = False
|
| 580 |
-
print(" OK" if all_pass else " FAIL")
|
| 581 |
-
|
| 582 |
-
return all_pass
|
| 583 |
-
|
| 584 |
-
# =============================================================================
|
| 585 |
-
# TEST SECTION 6: TIMING ANALYSIS
|
| 586 |
-
# =============================================================================
|
| 587 |
-
|
| 588 |
-
def test_timing():
|
| 589 |
-
"""Circuit depth analysis."""
|
| 590 |
-
print("\n" + "=" * 70)
|
| 591 |
-
print("TIMING ANALYSIS - Circuit Depth")
|
| 592 |
-
print("=" * 70)
|
| 593 |
-
|
| 594 |
-
GATE_DEPTHS = {'AND': 1, 'OR': 1, 'NOT': 1, 'NAND': 1, 'NOR': 1, 'XOR': 2, 'XNOR': 2}
|
| 595 |
-
|
| 596 |
-
print("\n Primitive gate depths:")
|
| 597 |
-
for gate, depth in GATE_DEPTHS.items():
|
| 598 |
-
print(f" {gate}: {depth} layer(s)")
|
| 599 |
-
|
| 600 |
-
print("\n Adder depths:")
|
| 601 |
-
print(" Half adder: 2 (XOR for sum)")
|
| 602 |
-
print(" Full adder: 4 (XOR->XOR chain)")
|
| 603 |
-
print(" 8-bit ripple carry: 18 (4 + 2*7)")
|
| 604 |
-
|
| 605 |
-
print("\n Comparator: 1 (single weighted threshold)")
|
| 606 |
-
|
| 607 |
-
# Verify structure
|
| 608 |
-
print("\n Structure verification:")
|
| 609 |
-
|
| 610 |
-
# XOR has two layers
|
| 611 |
-
xor_has_layers = (any('layer1' in k for k in model.keys() if 'boolean.xor' in k) and
|
| 612 |
-
any('layer2' in k for k in model.keys() if 'boolean.xor' in k))
|
| 613 |
-
print(f" XOR two-layer: {'OK' if xor_has_layers else 'FAIL'}")
|
| 614 |
-
|
| 615 |
-
# Ripple carry has 8 full adders
|
| 616 |
-
fa_indices = set()
|
| 617 |
-
for k in model.keys():
|
| 618 |
-
if 'ripplecarry8bit' in k:
|
| 619 |
-
for i in range(8):
|
| 620 |
-
if f'fa{i}' in k:
|
| 621 |
-
fa_indices.add(i)
|
| 622 |
-
print(f" 8-bit RC has fa0-fa7: {'OK' if fa_indices == set(range(8)) else 'FAIL'}")
|
| 623 |
-
|
| 624 |
-
return xor_has_layers and fa_indices == set(range(8))
|
| 625 |
-
|
| 626 |
-
# =============================================================================
|
| 627 |
-
# TEST SECTION 7: CRYPTOGRAPHIC SELF-TEST
|
| 628 |
-
# =============================================================================
|
| 629 |
-
|
| 630 |
-
def test_self_checksum():
|
| 631 |
-
"""Compute checksum of weights using the circuits themselves."""
|
| 632 |
-
print("\n" + "=" * 70)
|
| 633 |
-
print("CRYPTOGRAPHIC SELF-TEST - Checksum Verification")
|
| 634 |
-
print("=" * 70)
|
| 635 |
-
|
| 636 |
-
# Serialize weights
|
| 637 |
-
all_bytes = []
|
| 638 |
-
for key in sorted(model.keys()):
|
| 639 |
-
for val in model[key].flatten().tolist():
|
| 640 |
-
int_val = int(val)
|
| 641 |
-
if int_val < 0: int_val = 256 + int_val
|
| 642 |
-
all_bytes.append(int_val & 0xFF)
|
| 643 |
-
|
| 644 |
-
print(f"\n Total weight bytes: {len(all_bytes)}")
|
| 645 |
-
|
| 646 |
-
# Simple checksum using circuit
|
| 647 |
-
circuit_sum = 0
|
| 648 |
-
for byte in all_bytes:
|
| 649 |
-
circuit_sum, _ = add_8bit(circuit_sum, byte)
|
| 650 |
-
|
| 651 |
-
# Python reference
|
| 652 |
-
python_sum = sum(all_bytes) % 256
|
| 653 |
-
|
| 654 |
-
print(f" Circuit checksum: {circuit_sum}")
|
| 655 |
-
print(f" Python checksum: {python_sum}")
|
| 656 |
-
|
| 657 |
-
# XOR checksum
|
| 658 |
-
circuit_xor = 0
|
| 659 |
-
for byte in all_bytes:
|
| 660 |
-
circuit_xor = xor_8bit(circuit_xor, byte)
|
| 661 |
-
python_xor = 0
|
| 662 |
-
for byte in all_bytes:
|
| 663 |
-
python_xor ^= byte
|
| 664 |
-
|
| 665 |
-
print(f" Circuit XOR: {circuit_xor}")
|
| 666 |
-
print(f" Python XOR: {python_xor}")
|
| 667 |
-
|
| 668 |
-
if circuit_sum == python_sum and circuit_xor == python_xor:
|
| 669 |
-
print("\n PASSED: Self-checksums match")
|
| 670 |
-
return True
|
| 671 |
-
else:
|
| 672 |
-
print("\n FAILED: Checksum mismatch")
|
| 673 |
-
return False
|
| 674 |
-
|
| 675 |
-
# =============================================================================
|
| 676 |
-
# TEST SECTION 8: TURING COMPLETENESS
|
| 677 |
-
# =============================================================================
|
| 678 |
-
|
| 679 |
-
def test_turing_completeness():
|
| 680 |
-
"""Rule 110 and Brainfuck tests."""
|
| 681 |
-
print("\n" + "=" * 70)
|
| 682 |
-
print("TURING COMPLETENESS - Rule 110 & Brainfuck")
|
| 683 |
-
print("=" * 70)
|
| 684 |
-
|
| 685 |
-
all_pass = True
|
| 686 |
-
|
| 687 |
-
# Rule 110
|
| 688 |
-
print("\n [1] Rule 110 cellular automaton")
|
| 689 |
-
|
| 690 |
-
def rule110_cell(left, center, right):
|
| 691 |
-
not_left = eval_not(left)
|
| 692 |
-
c_xor_r = eval_xor(center, right)
|
| 693 |
-
not_left_and_c = eval_and(not_left, center)
|
| 694 |
-
return eval_or(c_xor_r, not_left_and_c)
|
| 695 |
-
|
| 696 |
-
# Verify all 8 patterns
|
| 697 |
-
expected = {(0,0,0):0, (0,0,1):1, (0,1,0):1, (0,1,1):1,
|
| 698 |
-
(1,0,0):0, (1,0,1):1, (1,1,0):1, (1,1,1):0}
|
| 699 |
-
errors = 0
|
| 700 |
-
for (l, c, r), exp in expected.items():
|
| 701 |
-
if rule110_cell(l, c, r) != exp:
|
| 702 |
-
errors += 1
|
| 703 |
-
if errors == 0:
|
| 704 |
-
print(" Rule 110 truth table: OK")
|
| 705 |
-
else:
|
| 706 |
-
print(f" Rule 110: {errors} errors")
|
| 707 |
-
all_pass = False
|
| 708 |
-
|
| 709 |
-
# Brainfuck
|
| 710 |
-
print("\n [2] Brainfuck interpreter")
|
| 711 |
-
|
| 712 |
-
class BrainfuckVM:
|
| 713 |
-
def __init__(self, code, max_steps=10000):
|
| 714 |
-
self.code = code
|
| 715 |
-
self.tape = [0] * 256
|
| 716 |
-
self.dp = self.ip = 0
|
| 717 |
-
self.output = []
|
| 718 |
-
self.max_steps = max_steps
|
| 719 |
-
self.steps = 0
|
| 720 |
-
self.brackets = {}
|
| 721 |
-
stack = []
|
| 722 |
-
for i, c in enumerate(code):
|
| 723 |
-
if c == '[': stack.append(i)
|
| 724 |
-
elif c == ']' and stack:
|
| 725 |
-
j = stack.pop()
|
| 726 |
-
self.brackets[j] = i
|
| 727 |
-
self.brackets[i] = j
|
| 728 |
-
|
| 729 |
-
def run(self):
|
| 730 |
-
while self.ip < len(self.code) and self.steps < self.max_steps:
|
| 731 |
-
cmd = self.code[self.ip]
|
| 732 |
-
if cmd == '>': self.dp = (self.dp + 1) % 256
|
| 733 |
-
elif cmd == '<': self.dp = (self.dp - 1) % 256
|
| 734 |
-
elif cmd == '+': self.tape[self.dp] = (self.tape[self.dp] + 1) & 0xFF
|
| 735 |
-
elif cmd == '-': self.tape[self.dp] = (self.tape[self.dp] - 1) & 0xFF
|
| 736 |
-
elif cmd == '.': self.output.append(self.tape[self.dp])
|
| 737 |
-
elif cmd == '[' and self.tape[self.dp] == 0:
|
| 738 |
-
self.ip = self.brackets.get(self.ip, self.ip)
|
| 739 |
-
elif cmd == ']' and self.tape[self.dp] != 0:
|
| 740 |
-
self.ip = self.brackets.get(self.ip, self.ip)
|
| 741 |
-
continue
|
| 742 |
-
self.ip += 1
|
| 743 |
-
self.steps += 1
|
| 744 |
-
return self.output
|
| 745 |
-
|
| 746 |
-
# Test: 2 + 3 = 5
|
| 747 |
-
vm = BrainfuckVM("++>+++[<+>-]<.")
|
| 748 |
-
if vm.run() == [5]:
|
| 749 |
-
print(" BF addition (2+3=5): OK")
|
| 750 |
-
else:
|
| 751 |
-
print(" BF addition: FAIL")
|
| 752 |
-
all_pass = False
|
| 753 |
-
|
| 754 |
-
# Test: countdown
|
| 755 |
-
vm = BrainfuckVM("+++++[.-]")
|
| 756 |
-
if vm.run() == [5, 4, 3, 2, 1]:
|
| 757 |
-
print(" BF countdown: OK")
|
| 758 |
-
else:
|
| 759 |
-
print(" BF countdown: FAIL")
|
| 760 |
-
all_pass = False
|
| 761 |
-
|
| 762 |
-
return all_pass
|
| 763 |
-
|
| 764 |
-
# =============================================================================
|
| 765 |
-
# TEST SECTION 9: PERTURBATION
|
| 766 |
-
# =============================================================================
|
| 767 |
-
|
| 768 |
-
def test_perturbation():
|
| 769 |
-
"""Adversarial weight perturbation."""
|
| 770 |
-
print("\n" + "=" * 70)
|
| 771 |
-
print("PERTURBATION - Adversarial Weight Changes")
|
| 772 |
-
print("=" * 70)
|
| 773 |
-
|
| 774 |
-
# Create perturbed model
|
| 775 |
-
perturbed = {k: v.clone() for k, v in model.items()}
|
| 776 |
-
perturbed['boolean.and.weight'] = torch.tensor([0.0, 1.0]) # Break AND
|
| 777 |
-
|
| 778 |
-
def perturbed_and(a, b):
|
| 779 |
-
inp = torch.tensor([float(a), float(b)])
|
| 780 |
-
return int(heaviside(inp @ perturbed['boolean.and.weight'] +
|
| 781 |
-
perturbed['boolean.and.bias']).item())
|
| 782 |
-
|
| 783 |
-
def perturbed_or(a, b):
|
| 784 |
-
inp = torch.tensor([float(a), float(b)])
|
| 785 |
-
return int(heaviside(inp @ perturbed['boolean.or.weight'] +
|
| 786 |
-
perturbed['boolean.or.bias']).item())
|
| 787 |
-
|
| 788 |
-
print("\n [1] Perturb AND gate: w=[1,1] -> [0,1]")
|
| 789 |
-
|
| 790 |
-
# AND should be broken
|
| 791 |
-
and_broken = False
|
| 792 |
-
for a, b in [(0,0), (0,1), (1,0), (1,1)]:
|
| 793 |
-
if perturbed_and(a, b) != (a & b):
|
| 794 |
-
and_broken = True
|
| 795 |
-
break
|
| 796 |
-
|
| 797 |
-
# OR should be unaffected
|
| 798 |
-
or_ok = True
|
| 799 |
-
for a, b in [(0,0), (0,1), (1,0), (1,1)]:
|
| 800 |
-
if perturbed_or(a, b) != (a | b):
|
| 801 |
-
or_ok = False
|
| 802 |
-
break
|
| 803 |
-
|
| 804 |
-
print(f" AND gate broken: {'YES (expected)' if and_broken else 'NO (unexpected)'}")
|
| 805 |
-
print(f" OR gate intact: {'YES (expected)' if or_ok else 'NO (unexpected)'}")
|
| 806 |
-
|
| 807 |
-
if and_broken and or_ok:
|
| 808 |
-
print("\n PASSED: Perturbation localized to target gate")
|
| 809 |
-
return True
|
| 810 |
-
else:
|
| 811 |
-
print("\n FAILED: Unexpected perturbation behavior")
|
| 812 |
-
return False
|
| 813 |
-
|
| 814 |
-
# =============================================================================
|
| 815 |
-
# TEST SECTION 10: INDEPENDENCE REPRODUCTION
|
| 816 |
-
# =============================================================================
|
| 817 |
-
|
| 818 |
-
def test_independence():
|
| 819 |
-
"""Verify weights can be derived from specification."""
|
| 820 |
-
print("\n" + "=" * 70)
|
| 821 |
-
print("INDEPENDENCE - Derive Weights from Spec")
|
| 822 |
-
print("=" * 70)
|
| 823 |
-
|
| 824 |
-
# Standard derivations
|
| 825 |
-
derivations = {
|
| 826 |
-
'AND': ([1, 1], -2),
|
| 827 |
-
'OR': ([1, 1], -1),
|
| 828 |
-
'NAND': ([-1, -1], 1),
|
| 829 |
-
'NOR': ([-1, -1], 0),
|
| 830 |
-
}
|
| 831 |
-
|
| 832 |
-
all_pass = True
|
| 833 |
-
|
| 834 |
-
print("\n Comparing derived vs actual weights:")
|
| 835 |
-
for gate, (expected_w, expected_b) in derivations.items():
|
| 836 |
-
actual_w = model[f'boolean.{gate.lower()}.weight'].tolist()
|
| 837 |
-
actual_b = model[f'boolean.{gate.lower()}.bias'].item()
|
| 838 |
-
|
| 839 |
-
match = (expected_w == actual_w and expected_b == actual_b)
|
| 840 |
-
status = "OK" if match else "DIFFER"
|
| 841 |
-
print(f" {gate}: derived={expected_w},{expected_b} actual={actual_w},{int(actual_b)} [{status}]")
|
| 842 |
-
|
| 843 |
-
if not match:
|
| 844 |
-
all_pass = False
|
| 845 |
-
|
| 846 |
-
if all_pass:
|
| 847 |
-
print("\n PASSED: All weights match derived values")
|
| 848 |
-
else:
|
| 849 |
-
print("\n Note: Functional equivalence may still hold")
|
| 850 |
-
|
| 851 |
-
return all_pass
|
| 852 |
-
|
| 853 |
-
# =============================================================================
|
| 854 |
-
# MAIN
|
| 855 |
-
# =============================================================================
|
| 856 |
-
|
| 857 |
-
def main():
|
| 858 |
-
print("=" * 70)
|
| 859 |
-
print(" UNIFIED TEST SUITE FOR 8-BIT THRESHOLD COMPUTER")
|
| 860 |
-
print("=" * 70)
|
| 861 |
-
|
| 862 |
-
results = []
|
| 863 |
-
|
| 864 |
-
results.append(("Skeptic Tests", test_skeptic()))
|
| 865 |
-
results.append(("Stress Tests", test_stress()))
|
| 866 |
-
results.append(("Overflow Chains", test_overflow_chains()))
|
| 867 |
-
results.append(("Equivalence", test_equivalence()))
|
| 868 |
-
results.append(("Gate Reconstruction", test_gate_reconstruction()))
|
| 869 |
-
results.append(("Timing Analysis", test_timing()))
|
| 870 |
-
results.append(("Self-Checksum", test_self_checksum()))
|
| 871 |
-
results.append(("Turing Completeness", test_turing_completeness()))
|
| 872 |
-
results.append(("Perturbation", test_perturbation()))
|
| 873 |
-
results.append(("Independence", test_independence()))
|
| 874 |
-
|
| 875 |
-
print("\n" + "=" * 70)
|
| 876 |
-
print(" FINAL SUMMARY")
|
| 877 |
-
print("=" * 70)
|
| 878 |
-
|
| 879 |
-
passed = sum(1 for _, r in results if r)
|
| 880 |
-
total = len(results)
|
| 881 |
-
|
| 882 |
-
for name, r in results:
|
| 883 |
-
status = "PASS" if r else "FAIL"
|
| 884 |
-
print(f" {name:25s} [{status}]")
|
| 885 |
-
|
| 886 |
-
print(f"\n Total: {passed}/{total} test categories passed")
|
| 887 |
-
|
| 888 |
-
if passed == total:
|
| 889 |
-
print("\n STATUS: ALL TESTS PASSED")
|
| 890 |
-
else:
|
| 891 |
-
print("\n STATUS: SOME TESTS FAILED")
|
| 892 |
-
|
| 893 |
-
print("=" * 70)
|
| 894 |
-
|
| 895 |
-
return passed == total
|
| 896 |
-
|
| 897 |
-
if __name__ == "__main__":
|
| 898 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|