| """
|
| IRON EVAL - COMPREHENSIVE
|
| =========================
|
| Complete fitness evaluation for ALL circuits in the threshold computer.
|
| 108 circuits, no placeholders, no shortcuts.
|
|
|
| GPU-optimized for population-based evolution.
|
| Target: ~40GB VRAM on RTX 6000 Ada (4M population)
|
| """
|
|
|
| import torch
|
| from typing import Dict, Tuple
|
| from safetensors import safe_open
|
|
|
|
|
| def load_model(base_path: str = ".") -> Dict[str, torch.Tensor]:
|
| """Load model from safetensors."""
|
| f = safe_open(f"{base_path}/neural_computer.safetensors", framework='pt')
|
| tensors = {}
|
| for name in f.keys():
|
| tensors[name] = f.get_tensor(name).float()
|
| return tensors
|
|
|
|
|
| def heaviside(x: torch.Tensor) -> torch.Tensor:
|
| """Threshold activation: 1 if x >= 0, else 0."""
|
| return (x >= 0).float()
|
|
|
|
|
| class BatchedFitnessEvaluator:
|
| """
|
| GPU-batched fitness evaluator. Tests ALL circuits comprehensively.
|
| """
|
|
|
| def __init__(self, device='cuda'):
|
| self.device = device
|
| self._setup_tests()
|
|
|
| def _setup_tests(self):
|
| """Pre-compute all test vectors."""
|
| d = self.device
|
|
|
|
|
| self.tt2 = torch.tensor([[0,0],[0,1],[1,0],[1,1]], device=d, dtype=torch.float32)
|
|
|
|
|
| self.tt3 = torch.tensor([
|
| [0,0,0], [0,0,1], [0,1,0], [0,1,1],
|
| [1,0,0], [1,0,1], [1,1,0], [1,1,1]
|
| ], device=d, dtype=torch.float32)
|
|
|
|
|
| self.expected = {
|
| 'and': torch.tensor([0,0,0,1], device=d, dtype=torch.float32),
|
| 'or': torch.tensor([0,1,1,1], device=d, dtype=torch.float32),
|
| 'nand': torch.tensor([1,1,1,0], device=d, dtype=torch.float32),
|
| 'nor': torch.tensor([1,0,0,0], device=d, dtype=torch.float32),
|
| 'xor': torch.tensor([0,1,1,0], device=d, dtype=torch.float32),
|
| 'xnor': torch.tensor([1,0,0,1], device=d, dtype=torch.float32),
|
| 'implies': torch.tensor([1,1,0,1], device=d, dtype=torch.float32),
|
| 'biimplies': torch.tensor([1,0,0,1], device=d, dtype=torch.float32),
|
| 'not': torch.tensor([1,0], device=d, dtype=torch.float32),
|
| 'ha_sum': torch.tensor([0,1,1,0], device=d, dtype=torch.float32),
|
| 'ha_carry': torch.tensor([0,0,0,1], device=d, dtype=torch.float32),
|
| 'fa_sum': torch.tensor([0,1,1,0,1,0,0,1], device=d, dtype=torch.float32),
|
| 'fa_cout': torch.tensor([0,0,0,1,0,1,1,1], device=d, dtype=torch.float32),
|
| }
|
|
|
|
|
| self.not_inputs = torch.tensor([[0],[1]], device=d, dtype=torch.float32)
|
|
|
|
|
| self.test_8bit = torch.tensor([
|
| 0, 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128, 255,
|
| 0b10101010, 0b01010101, 0b11110000, 0b00001111,
|
| 0b11001100, 0b00110011, 0b10000001, 0b01111110
|
| ], device=d, dtype=torch.long)
|
|
|
|
|
| self.test_8bit_bits = torch.stack([
|
| ((self.test_8bit >> (7-i)) & 1).float() for i in range(8)
|
| ], dim=1)
|
|
|
|
|
| comp_tests = [
|
| (0,0), (1,0), (0,1), (5,3), (3,5), (5,5),
|
| (255,0), (0,255), (128,127), (127,128),
|
| (100,99), (99,100), (64,32), (32,64),
|
| (200,100), (100,200), (1,2), (2,1),
|
| (1,2), (2,1), (2,4), (4,2), (4,8), (8,4),
|
| (8,16), (16,8), (16,32), (32,16), (32,64), (64,32),
|
| (64,128), (128,64),
|
| (1,1), (2,2), (4,4), (8,8), (16,16), (32,32), (64,64), (128,128),
|
| (7,8), (8,7), (9,8), (8,9),
|
| (15,16), (16,15), (17,16), (16,17),
|
| (31,32), (32,31), (33,32), (32,33),
|
| (63,64), (64,63), (65,64), (64,65),
|
| (127,128), (128,127), (129,128), (128,129),
|
| ]
|
| self.comp_a = torch.tensor([c[0] for c in comp_tests], device=d, dtype=torch.long)
|
| self.comp_b = torch.tensor([c[1] for c in comp_tests], device=d, dtype=torch.long)
|
| self.comp_a_bits = torch.stack([((self.comp_a >> (7-i)) & 1).float() for i in range(8)], dim=1)
|
| self.comp_b_bits = torch.stack([((self.comp_b >> (7-i)) & 1).float() for i in range(8)], dim=1)
|
|
|
|
|
| self.mod_test = torch.arange(0, 256, device=d, dtype=torch.long)
|
| self.mod_test_bits = torch.stack([((self.mod_test >> (7-i)) & 1).float() for i in range(8)], dim=1)
|
|
|
|
|
|
|
|
|
|
|
| def _test_single_gate(self, pop: Dict, gate: str, inputs: torch.Tensor,
|
| expected: torch.Tensor) -> torch.Tensor:
|
| """Test single-layer boolean gate."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| w = pop[f'boolean.{gate}.weight'].view(pop_size, -1)
|
| b = pop[f'boolean.{gate}.bias'].view(pop_size)
|
| out = heaviside(inputs @ w.T + b)
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
| def _test_twolayer_gate(self, pop: Dict, prefix: str, inputs: torch.Tensor,
|
| expected: torch.Tensor) -> torch.Tensor:
|
| """Test two-layer gate (XOR, XNOR, BIIMPLIES) - boolean naming (neuron1/neuron2)."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
|
|
| w1_a = pop[f'{prefix}.layer1.neuron1.weight'].view(pop_size, -1)
|
| b1_a = pop[f'{prefix}.layer1.neuron1.bias'].view(pop_size)
|
| w1_b = pop[f'{prefix}.layer1.neuron2.weight'].view(pop_size, -1)
|
| b1_b = pop[f'{prefix}.layer1.neuron2.bias'].view(pop_size)
|
|
|
| h_a = heaviside(inputs @ w1_a.T + b1_a)
|
| h_b = heaviside(inputs @ w1_b.T + b1_b)
|
| hidden = torch.stack([h_a, h_b], dim=2)
|
|
|
| w2 = pop[f'{prefix}.layer2.weight'].view(pop_size, -1)
|
| b2 = pop[f'{prefix}.layer2.bias'].view(pop_size)
|
| out = heaviside((hidden * w2.unsqueeze(0)).sum(2) + b2.unsqueeze(0))
|
|
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
| def _test_xor_gate_ornand(self, pop: Dict, prefix: str, inputs: torch.Tensor,
|
| expected: torch.Tensor) -> torch.Tensor:
|
| """Test two-layer XOR gate - arithmetic naming (or/nand)."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
|
|
| w1_or = pop[f'{prefix}.layer1.or.weight'].view(pop_size, -1)
|
| b1_or = pop[f'{prefix}.layer1.or.bias'].view(pop_size)
|
| w1_nand = pop[f'{prefix}.layer1.nand.weight'].view(pop_size, -1)
|
| b1_nand = pop[f'{prefix}.layer1.nand.bias'].view(pop_size)
|
|
|
| h_or = heaviside(inputs @ w1_or.T + b1_or)
|
| h_nand = heaviside(inputs @ w1_nand.T + b1_nand)
|
| hidden = torch.stack([h_or, h_nand], dim=2)
|
|
|
| w2 = pop[f'{prefix}.layer2.weight'].view(pop_size, -1)
|
| b2 = pop[f'{prefix}.layer2.bias'].view(pop_size)
|
| out = heaviside((hidden * w2.unsqueeze(0)).sum(2) + b2.unsqueeze(0))
|
|
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
|
|
|
|
|
|
|
|
| def _test_halfadder(self, pop: Dict) -> torch.Tensor:
|
| """Test half adder: sum and carry."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
|
|
| scores += self._test_xor_gate_ornand(pop, 'arithmetic.halfadder.sum',
|
| self.tt2, self.expected['ha_sum'])
|
|
|
| w = pop['arithmetic.halfadder.carry.weight'].view(pop_size, -1)
|
| b = pop['arithmetic.halfadder.carry.bias'].view(pop_size)
|
| out = heaviside(self.tt2 @ w.T + b)
|
| scores += (out == self.expected['ha_carry'].unsqueeze(1)).float().sum(0)
|
|
|
| return scores
|
|
|
| def _test_fulladder(self, pop: Dict) -> torch.Tensor:
|
| """Test full adder circuit."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
|
|
| for i, (a, b, cin) in enumerate([(0,0,0), (0,0,1), (0,1,0), (0,1,1),
|
| (1,0,0), (1,0,1), (1,1,0), (1,1,1)]):
|
| inp_ab = torch.tensor([[float(a), float(b)]], device=self.device)
|
|
|
|
|
| ha1_sum = self._eval_xor(pop, 'arithmetic.fulladder.ha1.sum', inp_ab)
|
| w_c1 = pop['arithmetic.fulladder.ha1.carry.weight'].view(pop_size, -1)
|
| b_c1 = pop['arithmetic.fulladder.ha1.carry.bias'].view(pop_size)
|
| ha1_carry = heaviside(inp_ab @ w_c1.T + b_c1)
|
|
|
|
|
| inp_ha2 = torch.stack([ha1_sum.squeeze(0), torch.full((pop_size,), float(cin), device=self.device)], dim=1)
|
|
|
| w1_or = pop['arithmetic.fulladder.ha2.sum.layer1.or.weight'].view(pop_size, -1)
|
| b1_or = pop['arithmetic.fulladder.ha2.sum.layer1.or.bias'].view(pop_size)
|
| w1_nand = pop['arithmetic.fulladder.ha2.sum.layer1.nand.weight'].view(pop_size, -1)
|
| b1_nand = pop['arithmetic.fulladder.ha2.sum.layer1.nand.bias'].view(pop_size)
|
| w2 = pop['arithmetic.fulladder.ha2.sum.layer2.weight'].view(pop_size, -1)
|
| b2 = pop['arithmetic.fulladder.ha2.sum.layer2.bias'].view(pop_size)
|
|
|
| h_or = heaviside((inp_ha2 * w1_or).sum(1) + b1_or)
|
| h_nand = heaviside((inp_ha2 * w1_nand).sum(1) + b1_nand)
|
| hidden = torch.stack([h_or, h_nand], dim=1)
|
| ha2_sum = heaviside((hidden * w2).sum(1) + b2)
|
|
|
| w_c2 = pop['arithmetic.fulladder.ha2.carry.weight'].view(pop_size, -1)
|
| b_c2 = pop['arithmetic.fulladder.ha2.carry.bias'].view(pop_size)
|
| ha2_carry = heaviside((inp_ha2 * w_c2).sum(1) + b_c2)
|
|
|
|
|
| inp_cout = torch.stack([ha1_carry.squeeze(0), ha2_carry], dim=1)
|
| w_cor = pop['arithmetic.fulladder.carry_or.weight'].view(pop_size, -1)
|
| b_cor = pop['arithmetic.fulladder.carry_or.bias'].view(pop_size)
|
| cout = heaviside((inp_cout * w_cor).sum(1) + b_cor)
|
|
|
| scores += (ha2_sum == self.expected['fa_sum'][i]).float()
|
| scores += (cout == self.expected['fa_cout'][i]).float()
|
|
|
| return scores
|
|
|
| def _eval_xor(self, pop: Dict, prefix: str, inputs: torch.Tensor) -> torch.Tensor:
|
| """Evaluate XOR gate for given inputs."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
|
|
| w1_or = pop[f'{prefix}.layer1.or.weight'].view(pop_size, -1)
|
| b1_or = pop[f'{prefix}.layer1.or.bias'].view(pop_size)
|
| w1_nand = pop[f'{prefix}.layer1.nand.weight'].view(pop_size, -1)
|
| b1_nand = pop[f'{prefix}.layer1.nand.bias'].view(pop_size)
|
| w2 = pop[f'{prefix}.layer2.weight'].view(pop_size, -1)
|
| b2 = pop[f'{prefix}.layer2.bias'].view(pop_size)
|
|
|
| h_or = heaviside(inputs @ w1_or.T + b1_or)
|
| h_nand = heaviside(inputs @ w1_nand.T + b1_nand)
|
| hidden = torch.stack([h_or, h_nand], dim=2)
|
| return heaviside((hidden * w2.unsqueeze(0)).sum(2) + b2.unsqueeze(0))
|
|
|
| def _eval_single_fa(self, pop: Dict, prefix: str, a: torch.Tensor,
|
| b: torch.Tensor, cin: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
| """Evaluate a single full adder."""
|
| pop_size = a.shape[0]
|
| inp_ab = torch.stack([a, b], dim=1)
|
|
|
|
|
| w1_or = pop[f'{prefix}.ha1.sum.layer1.or.weight'].view(pop_size, -1)
|
| b1_or = pop[f'{prefix}.ha1.sum.layer1.or.bias'].view(pop_size)
|
| w1_nand = pop[f'{prefix}.ha1.sum.layer1.nand.weight'].view(pop_size, -1)
|
| b1_nand = pop[f'{prefix}.ha1.sum.layer1.nand.bias'].view(pop_size)
|
| w1_l2 = pop[f'{prefix}.ha1.sum.layer2.weight'].view(pop_size, -1)
|
| b1_l2 = pop[f'{prefix}.ha1.sum.layer2.bias'].view(pop_size)
|
|
|
| h_or = heaviside((inp_ab * w1_or).sum(1) + b1_or)
|
| h_nand = heaviside((inp_ab * w1_nand).sum(1) + b1_nand)
|
| hidden1 = torch.stack([h_or, h_nand], dim=1)
|
| ha1_sum = heaviside((hidden1 * w1_l2).sum(1) + b1_l2)
|
|
|
| w_c1 = pop[f'{prefix}.ha1.carry.weight'].view(pop_size, -1)
|
| b_c1 = pop[f'{prefix}.ha1.carry.bias'].view(pop_size)
|
| ha1_carry = heaviside((inp_ab * w_c1).sum(1) + b_c1)
|
|
|
|
|
| inp_ha2 = torch.stack([ha1_sum, cin], dim=1)
|
|
|
| w2_or = pop[f'{prefix}.ha2.sum.layer1.or.weight'].view(pop_size, -1)
|
| b2_or = pop[f'{prefix}.ha2.sum.layer1.or.bias'].view(pop_size)
|
| w2_nand = pop[f'{prefix}.ha2.sum.layer1.nand.weight'].view(pop_size, -1)
|
| b2_nand = pop[f'{prefix}.ha2.sum.layer1.nand.bias'].view(pop_size)
|
| w2_l2 = pop[f'{prefix}.ha2.sum.layer2.weight'].view(pop_size, -1)
|
| b2_l2 = pop[f'{prefix}.ha2.sum.layer2.bias'].view(pop_size)
|
|
|
| h2_or = heaviside((inp_ha2 * w2_or).sum(1) + b2_or)
|
| h2_nand = heaviside((inp_ha2 * w2_nand).sum(1) + b2_nand)
|
| hidden2 = torch.stack([h2_or, h2_nand], dim=1)
|
| ha2_sum = heaviside((hidden2 * w2_l2).sum(1) + b2_l2)
|
|
|
| w_c2 = pop[f'{prefix}.ha2.carry.weight'].view(pop_size, -1)
|
| b_c2 = pop[f'{prefix}.ha2.carry.bias'].view(pop_size)
|
| ha2_carry = heaviside((inp_ha2 * w_c2).sum(1) + b_c2)
|
|
|
|
|
| inp_cout = torch.stack([ha1_carry, ha2_carry], dim=1)
|
| w_cor = pop[f'{prefix}.carry_or.weight'].view(pop_size, -1)
|
| b_cor = pop[f'{prefix}.carry_or.bias'].view(pop_size)
|
| cout = heaviside((inp_cout * w_cor).sum(1) + b_cor)
|
|
|
| return ha2_sum, cout
|
|
|
| def _test_ripplecarry(self, pop: Dict, bits: int, test_cases: list) -> torch.Tensor:
|
| """Test ripple carry adder of given bit width."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
|
|
| for a_val, b_val in test_cases:
|
|
|
| a_bits = [(a_val >> i) & 1 for i in range(bits)]
|
| b_bits = [(b_val >> i) & 1 for i in range(bits)]
|
|
|
| carry = torch.zeros(pop_size, device=self.device)
|
| sum_bits = []
|
|
|
| for i in range(bits):
|
| a_i = torch.full((pop_size,), float(a_bits[i]), device=self.device)
|
| b_i = torch.full((pop_size,), float(b_bits[i]), device=self.device)
|
| sum_i, carry = self._eval_single_fa(pop, f'arithmetic.ripplecarry{bits}bit.fa{i}', a_i, b_i, carry)
|
| sum_bits.append(sum_i)
|
|
|
|
|
| result = sum(sum_bits[i] * (2**i) for i in range(bits))
|
| expected = (a_val + b_val) & ((1 << bits) - 1)
|
| scores += (result == expected).float()
|
|
|
| return scores
|
|
|
| def _test_multiplier8x8(self, pop: Dict, debug: bool = False) -> torch.Tensor:
|
| """Test 8x8 array multiplier."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| failures = []
|
|
|
| test_pairs = [
|
| (0, 0), (1, 1), (2, 3), (3, 2), (7, 7), (8, 8),
|
| (15, 15), (16, 16), (255, 1), (1, 255), (255, 255),
|
| (12, 12), (10, 25), (17, 15), (128, 2), (64, 4),
|
| (0, 255), (255, 0), (100, 100), (50, 200), (200, 50),
|
| (127, 127), (128, 128), (85, 3), (170, 2), (99, 99),
|
| (13, 17), (23, 29), (31, 33), (41, 43), (53, 59),
|
| (61, 67), (71, 73), (79, 83), (89, 97), (101, 103),
|
| (107, 109), (113, 127), (131, 137), (139, 149), (151, 157),
|
| (163, 167), (173, 179), (181, 191), (193, 197), (199, 211),
|
| (223, 227), (229, 233), (239, 241), (251, 1), (1, 251),
|
| (2, 128), (4, 64), (8, 32), (32, 8),
|
| (3, 85), (5, 51), (7, 36),
|
| (9, 28), (11, 23), (13, 19), (15, 17)
|
| ]
|
|
|
| for a_val, b_val in test_pairs:
|
| expected = (a_val * b_val) & 0xFFFF
|
|
|
| a_bits = [(a_val >> i) & 1 for i in range(8)]
|
| b_bits = [(b_val >> i) & 1 for i in range(8)]
|
|
|
| pp = [[torch.zeros(pop_size, device=self.device) for _ in range(8)] for _ in range(8)]
|
| for row in range(8):
|
| for col in range(8):
|
| a_t = torch.full((pop_size,), float(a_bits[col]), device=self.device)
|
| b_t = torch.full((pop_size,), float(b_bits[row]), device=self.device)
|
| inp = torch.stack([a_t, b_t], dim=1)
|
| w = pop[f'arithmetic.multiplier8x8.pp.r{row}.c{col}.weight'].view(pop_size, -1)
|
| b = pop[f'arithmetic.multiplier8x8.pp.r{row}.c{col}.bias'].view(pop_size)
|
| pp[row][col] = heaviside((inp * w).sum(1) + b)
|
|
|
| result_bits = [torch.zeros(pop_size, device=self.device) for _ in range(16)]
|
| for col in range(8):
|
| result_bits[col] = pp[0][col]
|
|
|
| for stage in range(7):
|
| row_idx = stage + 1
|
| shift = row_idx
|
| sum_width = 8 + stage + 1
|
|
|
| new_result = [r.clone() for r in result_bits]
|
| carry = torch.zeros(pop_size, device=self.device)
|
|
|
| for bit in range(sum_width):
|
| if bit < shift:
|
| pp_bit = torch.zeros(pop_size, device=self.device)
|
| elif bit <= shift + 7:
|
| pp_bit = pp[row_idx][bit - shift]
|
| else:
|
| pp_bit = torch.zeros(pop_size, device=self.device)
|
|
|
| prev_bit = result_bits[bit] if bit < 16 else torch.zeros(pop_size, device=self.device)
|
|
|
| prefix = f'arithmetic.multiplier8x8.stage{stage}.bit{bit}'
|
|
|
| inp_ab = torch.stack([prev_bit, pp_bit], dim=1)
|
| w1_or = pop[f'{prefix}.ha1.sum.layer1.or.weight'].view(pop_size, -1)
|
| b1_or = pop[f'{prefix}.ha1.sum.layer1.or.bias'].view(pop_size)
|
| w1_nand = pop[f'{prefix}.ha1.sum.layer1.nand.weight'].view(pop_size, -1)
|
| b1_nand = pop[f'{prefix}.ha1.sum.layer1.nand.bias'].view(pop_size)
|
| w1_l2 = pop[f'{prefix}.ha1.sum.layer2.weight'].view(pop_size, -1)
|
| b1_l2 = pop[f'{prefix}.ha1.sum.layer2.bias'].view(pop_size)
|
|
|
| h_or = heaviside((inp_ab * w1_or).sum(1) + b1_or)
|
| h_nand = heaviside((inp_ab * w1_nand).sum(1) + b1_nand)
|
| hidden1 = torch.stack([h_or, h_nand], dim=1)
|
| ha1_sum = heaviside((hidden1 * w1_l2).sum(1) + b1_l2)
|
|
|
| w_c1 = pop[f'{prefix}.ha1.carry.weight'].view(pop_size, -1)
|
| b_c1 = pop[f'{prefix}.ha1.carry.bias'].view(pop_size)
|
| ha1_carry = heaviside((inp_ab * w_c1).sum(1) + b_c1)
|
|
|
| inp_ha2 = torch.stack([ha1_sum, carry], dim=1)
|
| w2_or = pop[f'{prefix}.ha2.sum.layer1.or.weight'].view(pop_size, -1)
|
| b2_or = pop[f'{prefix}.ha2.sum.layer1.or.bias'].view(pop_size)
|
| w2_nand = pop[f'{prefix}.ha2.sum.layer1.nand.weight'].view(pop_size, -1)
|
| b2_nand = pop[f'{prefix}.ha2.sum.layer1.nand.bias'].view(pop_size)
|
| w2_l2 = pop[f'{prefix}.ha2.sum.layer2.weight'].view(pop_size, -1)
|
| b2_l2 = pop[f'{prefix}.ha2.sum.layer2.bias'].view(pop_size)
|
|
|
| h2_or = heaviside((inp_ha2 * w2_or).sum(1) + b2_or)
|
| h2_nand = heaviside((inp_ha2 * w2_nand).sum(1) + b2_nand)
|
| hidden2 = torch.stack([h2_or, h2_nand], dim=1)
|
| ha2_sum = heaviside((hidden2 * w2_l2).sum(1) + b2_l2)
|
|
|
| w_c2 = pop[f'{prefix}.ha2.carry.weight'].view(pop_size, -1)
|
| b_c2 = pop[f'{prefix}.ha2.carry.bias'].view(pop_size)
|
| ha2_carry = heaviside((inp_ha2 * w_c2).sum(1) + b_c2)
|
|
|
| inp_cout = torch.stack([ha1_carry, ha2_carry], dim=1)
|
| w_cor = pop[f'{prefix}.carry_or.weight'].view(pop_size, -1)
|
| b_cor = pop[f'{prefix}.carry_or.bias'].view(pop_size)
|
| carry = heaviside((inp_cout * w_cor).sum(1) + b_cor)
|
|
|
| if bit < 16:
|
| new_result[bit] = ha2_sum
|
|
|
|
|
| if sum_width < 16:
|
| new_result[sum_width] = carry
|
|
|
| result_bits = new_result
|
|
|
| result = sum(result_bits[i] * (2**i) for i in range(16))
|
| passed = (result == expected).float()
|
| scores += passed
|
| if debug and pop_size == 1 and passed[0].item() == 0:
|
| failures.append(f"MULT({a_val} * {b_val}) = {int(result[0].item())}, expected {expected}")
|
|
|
| if debug and failures:
|
| print(f"\n Multiplier failures ({len(failures)}):")
|
| for f in failures:
|
| print(f" {f}")
|
|
|
| return scores
|
|
|
|
|
|
|
|
|
|
|
| def _test_comparator(self, pop: Dict, name: str, op: str) -> torch.Tensor:
|
| """Test 8-bit comparator."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| w = pop[f'arithmetic.{name}.comparator'].view(pop_size, -1)
|
|
|
| if op == 'gt':
|
| diff = self.comp_a_bits - self.comp_b_bits
|
| expected = (self.comp_a > self.comp_b).float()
|
| elif op == 'lt':
|
| diff = self.comp_b_bits - self.comp_a_bits
|
| expected = (self.comp_a < self.comp_b).float()
|
| elif op == 'geq':
|
| diff = self.comp_a_bits - self.comp_b_bits
|
| expected = (self.comp_a >= self.comp_b).float()
|
| elif op == 'leq':
|
| diff = self.comp_b_bits - self.comp_a_bits
|
| expected = (self.comp_a <= self.comp_b).float()
|
|
|
| score = diff @ w.T
|
| if op in ['geq', 'leq']:
|
| out = (score >= 0).float()
|
| else:
|
| out = (score > 0).float()
|
|
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
| def _test_equality(self, pop: Dict) -> torch.Tensor:
|
| """Test 8-bit equality circuit."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
|
|
| for i in range(len(self.comp_a)):
|
| a_bits = self.comp_a_bits[i]
|
| b_bits = self.comp_b_bits[i]
|
|
|
|
|
| xnor_results = []
|
| for bit in range(8):
|
| inp = torch.stack([
|
| torch.full((pop_size,), a_bits[bit].item(), device=self.device),
|
| torch.full((pop_size,), b_bits[bit].item(), device=self.device)
|
| ], dim=1)
|
|
|
|
|
| w_and = pop[f'arithmetic.equality8bit.xnor{bit}.layer1.and.weight'].view(pop_size, -1)
|
| b_and = pop[f'arithmetic.equality8bit.xnor{bit}.layer1.and.bias'].view(pop_size)
|
| w_nor = pop[f'arithmetic.equality8bit.xnor{bit}.layer1.nor.weight'].view(pop_size, -1)
|
| b_nor = pop[f'arithmetic.equality8bit.xnor{bit}.layer1.nor.bias'].view(pop_size)
|
| w_l2 = pop[f'arithmetic.equality8bit.xnor{bit}.layer2.weight'].view(pop_size, -1)
|
| b_l2 = pop[f'arithmetic.equality8bit.xnor{bit}.layer2.bias'].view(pop_size)
|
|
|
| h_and = heaviside((inp * w_and).sum(1) + b_and)
|
| h_nor = heaviside((inp * w_nor).sum(1) + b_nor)
|
| hidden = torch.stack([h_and, h_nor], dim=1)
|
| xnor_out = heaviside((hidden * w_l2).sum(1) + b_l2)
|
| xnor_results.append(xnor_out)
|
|
|
|
|
| xnor_stack = torch.stack(xnor_results, dim=1)
|
| w_final = pop['arithmetic.equality8bit.final_and.weight'].view(pop_size, -1)
|
| b_final = pop['arithmetic.equality8bit.final_and.bias'].view(pop_size)
|
| eq_out = heaviside((xnor_stack * w_final).sum(1) + b_final)
|
|
|
| expected = (self.comp_a[i] == self.comp_b[i]).float()
|
| scores += (eq_out == expected).float()
|
|
|
| return scores
|
|
|
|
|
|
|
|
|
|
|
| def _test_threshold_kofn(self, pop: Dict, k: int, name: str) -> torch.Tensor:
|
| """Test k-of-8 threshold gate."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| w = pop[f'threshold.{name}.weight'].view(pop_size, -1)
|
| b = pop[f'threshold.{name}.bias'].view(pop_size)
|
|
|
| out = heaviside(self.test_8bit_bits @ w.T + b)
|
| popcounts = self.test_8bit_bits.sum(1)
|
| expected = (popcounts >= k).float()
|
|
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
| def _test_majority(self, pop: Dict) -> torch.Tensor:
|
| """Test majority gate (5+ of 8)."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| w = pop['threshold.majority.weight'].view(pop_size, -1)
|
| b = pop['threshold.majority.bias'].view(pop_size)
|
|
|
| out = heaviside(self.test_8bit_bits @ w.T + b)
|
| popcounts = self.test_8bit_bits.sum(1)
|
| expected = (popcounts >= 5).float()
|
|
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
| def _test_minority(self, pop: Dict) -> torch.Tensor:
|
| """Test minority gate (3 or fewer of 8)."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| w = pop['threshold.minority.weight'].view(pop_size, -1)
|
| b = pop['threshold.minority.bias'].view(pop_size)
|
|
|
| out = heaviside(self.test_8bit_bits @ w.T + b)
|
| popcounts = self.test_8bit_bits.sum(1)
|
| expected = (popcounts <= 3).float()
|
|
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
| def _test_atleastk(self, pop: Dict, k: int) -> torch.Tensor:
|
| """Test at-least-k threshold gate."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| w = pop[f'threshold.atleastk_{k}.weight'].view(pop_size, -1)
|
| b = pop[f'threshold.atleastk_{k}.bias'].view(pop_size)
|
|
|
| out = heaviside(self.test_8bit_bits @ w.T + b)
|
| popcounts = self.test_8bit_bits.sum(1)
|
| expected = (popcounts >= k).float()
|
|
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
| def _test_atmostk(self, pop: Dict, k: int) -> torch.Tensor:
|
| """Test at-most-k threshold gate."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| w = pop[f'threshold.atmostk_{k}.weight'].view(pop_size, -1)
|
| b = pop[f'threshold.atmostk_{k}.bias'].view(pop_size)
|
|
|
| out = heaviside(self.test_8bit_bits @ w.T + b)
|
| popcounts = self.test_8bit_bits.sum(1)
|
| expected = (popcounts <= k).float()
|
|
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
| def _test_exactlyk(self, pop: Dict, k: int) -> torch.Tensor:
|
| """Test exactly-k threshold gate (uses atleast AND atmost)."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
|
|
|
|
| w_al = pop[f'threshold.exactlyk_{k}.atleast.weight'].view(pop_size, -1)
|
| b_al = pop[f'threshold.exactlyk_{k}.atleast.bias'].view(pop_size)
|
| atleast = heaviside(self.test_8bit_bits @ w_al.T + b_al)
|
|
|
|
|
| w_am = pop[f'threshold.exactlyk_{k}.atmost.weight'].view(pop_size, -1)
|
| b_am = pop[f'threshold.exactlyk_{k}.atmost.bias'].view(pop_size)
|
| atmost = heaviside(self.test_8bit_bits @ w_am.T + b_am)
|
|
|
|
|
| combined = torch.stack([atleast, atmost], dim=2)
|
| w_and = pop[f'threshold.exactlyk_{k}.and.weight'].view(pop_size, -1)
|
| b_and = pop[f'threshold.exactlyk_{k}.and.bias'].view(pop_size)
|
| out = heaviside((combined * w_and.unsqueeze(0)).sum(2) + b_and.unsqueeze(0))
|
|
|
| popcounts = self.test_8bit_bits.sum(1)
|
| expected = (popcounts == k).float()
|
|
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
|
|
|
|
|
|
|
|
| def _test_popcount(self, pop: Dict) -> torch.Tensor:
|
| """Test popcount (count of 1 bits)."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| w = pop['pattern_recognition.popcount.weight'].view(pop_size, -1)
|
| b = pop['pattern_recognition.popcount.bias'].view(pop_size)
|
|
|
| out = (self.test_8bit_bits @ w.T + b)
|
| expected = self.test_8bit_bits.sum(1)
|
|
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
| def _test_allzeros(self, pop: Dict) -> torch.Tensor:
|
| """Test all-zeros detector."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| w = pop['pattern_recognition.allzeros.weight'].view(pop_size, -1)
|
| b = pop['pattern_recognition.allzeros.bias'].view(pop_size)
|
|
|
| out = heaviside(self.test_8bit_bits @ w.T + b)
|
| expected = (self.test_8bit == 0).float()
|
|
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
| def _test_allones(self, pop: Dict) -> torch.Tensor:
|
| """Test all-ones detector."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| w = pop['pattern_recognition.allones.weight'].view(pop_size, -1)
|
| b = pop['pattern_recognition.allones.bias'].view(pop_size)
|
|
|
| out = heaviside(self.test_8bit_bits @ w.T + b)
|
| expected = (self.test_8bit == 255).float()
|
|
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
|
|
|
|
|
|
|
|
| def _eval_xor_gate(self, pop: Dict, prefix: str, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
|
| """Evaluate XOR gate on batched inputs."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
|
|
| w_or = pop[f'{prefix}.layer1.or.weight'].view(pop_size, -1)
|
| b_or = pop[f'{prefix}.layer1.or.bias'].view(pop_size)
|
| w_nand = pop[f'{prefix}.layer1.nand.weight'].view(pop_size, -1)
|
| b_nand = pop[f'{prefix}.layer1.nand.bias'].view(pop_size)
|
| w_and = pop[f'{prefix}.layer2.weight'].view(pop_size, -1)
|
| b_and = pop[f'{prefix}.layer2.bias'].view(pop_size)
|
|
|
| inp = torch.stack([a, b], dim=2)
|
| h_or = heaviside((inp * w_or.unsqueeze(0)).sum(2) + b_or.unsqueeze(0))
|
| h_nand = heaviside((inp * w_nand.unsqueeze(0)).sum(2) + b_nand.unsqueeze(0))
|
| hidden = torch.stack([h_or, h_nand], dim=2)
|
| return heaviside((hidden * w_and.unsqueeze(0)).sum(2) + b_and.unsqueeze(0))
|
|
|
| def _test_parity(self, pop: Dict, name: str, even: bool) -> torch.Tensor:
|
| """Test parity checker/generator with XOR tree."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| prefix = f'error_detection.{name}'
|
| num_tests = self.test_8bit_bits.shape[0]
|
|
|
| bits = self.test_8bit_bits.unsqueeze(1).expand(-1, pop_size, -1)
|
|
|
| stage1 = []
|
| for i, (a, b) in enumerate([(0, 1), (2, 3), (4, 5), (6, 7)]):
|
| xor_out = self._eval_xor_gate(pop, f'{prefix}.stage1.xor{i}', bits[:,:,a], bits[:,:,b])
|
| stage1.append(xor_out)
|
|
|
| stage2 = []
|
| stage2.append(self._eval_xor_gate(pop, f'{prefix}.stage2.xor0', stage1[0], stage1[1]))
|
| stage2.append(self._eval_xor_gate(pop, f'{prefix}.stage2.xor1', stage1[2], stage1[3]))
|
|
|
| xor_all = self._eval_xor_gate(pop, f'{prefix}.stage3.xor0', stage2[0], stage2[1])
|
|
|
| w_not = pop[f'{prefix}.output.not.weight'].view(pop_size, -1)
|
| b_not = pop[f'{prefix}.output.not.bias'].view(pop_size)
|
| out = heaviside(xor_all.unsqueeze(2) * w_not.unsqueeze(0) + b_not.view(1, pop_size, 1)).squeeze(2)
|
|
|
| popcounts = self.test_8bit_bits.sum(1)
|
| if even:
|
| expected = ((popcounts.long() % 2) == 0).float()
|
| else:
|
| expected = ((popcounts.long() % 2) == 1).float()
|
|
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
|
|
|
|
|
|
|
|
| def _get_divisible_sums(self, mod: int) -> list:
|
| """Get sum values that indicate divisibility by mod."""
|
| weights = [(2**(7-i)) % mod for i in range(8)]
|
| max_sum = sum(weights)
|
| return [k for k in range(0, max_sum + 1) if k % mod == 0]
|
|
|
| def _test_modular(self, pop: Dict, mod: int) -> torch.Tensor:
|
| """Test modular arithmetic circuit."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
|
|
| if mod in [2, 4, 8]:
|
| w = pop[f'modular.mod{mod}.weight'].view(pop_size, -1)
|
| b = pop[f'modular.mod{mod}.bias'].view(pop_size)
|
| out = heaviside(self.mod_test_bits @ w.T + b)
|
| else:
|
| divisible_sums = self._get_divisible_sums(mod)
|
| num_detectors = len(divisible_sums)
|
|
|
| layer1_outputs = []
|
| for idx in range(num_detectors):
|
| w_geq = pop[f'modular.mod{mod}.layer1.geq{idx}.weight'].view(pop_size, -1)
|
| b_geq = pop[f'modular.mod{mod}.layer1.geq{idx}.bias'].view(pop_size)
|
| w_leq = pop[f'modular.mod{mod}.layer1.leq{idx}.weight'].view(pop_size, -1)
|
| b_leq = pop[f'modular.mod{mod}.layer1.leq{idx}.bias'].view(pop_size)
|
|
|
| geq = heaviside(self.mod_test_bits @ w_geq.T + b_geq)
|
| leq = heaviside(self.mod_test_bits @ w_leq.T + b_leq)
|
| layer1_outputs.append((geq, leq))
|
|
|
| layer2_outputs = []
|
| for idx in range(num_detectors):
|
| w_eq = pop[f'modular.mod{mod}.layer2.eq{idx}.weight'].view(pop_size, -1)
|
| b_eq = pop[f'modular.mod{mod}.layer2.eq{idx}.bias'].view(pop_size)
|
| geq, leq = layer1_outputs[idx]
|
| combined = torch.stack([geq, leq], dim=2)
|
| eq = heaviside((combined * w_eq.unsqueeze(0)).sum(2) + b_eq.unsqueeze(0))
|
| layer2_outputs.append(eq)
|
|
|
| layer2_stack = torch.stack(layer2_outputs, dim=2)
|
| w_or = pop[f'modular.mod{mod}.layer3.or.weight'].view(pop_size, -1)
|
| b_or = pop[f'modular.mod{mod}.layer3.or.bias'].view(pop_size)
|
| out = heaviside((layer2_stack * w_or.unsqueeze(0)).sum(2) + b_or.unsqueeze(0))
|
|
|
| expected = ((self.mod_test % mod) == 0).float()
|
| return (out == expected.unsqueeze(1)).float().sum(0)
|
|
|
|
|
|
|
|
|
|
|
| def _test_mux2to1(self, pop: Dict) -> torch.Tensor:
|
| """Test 2:1 multiplexer."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
|
|
| for a in [0, 1]:
|
| for b in [0, 1]:
|
| for sel in [0, 1]:
|
| expected = a if sel == 1 else b
|
|
|
| a_t = torch.full((pop_size,), float(a), device=self.device)
|
| b_t = torch.full((pop_size,), float(b), device=self.device)
|
| sel_t = torch.full((pop_size,), float(sel), device=self.device)
|
|
|
| w_not = pop['combinational.multiplexer2to1.not_s.weight'].view(pop_size, -1)
|
| b_not = pop['combinational.multiplexer2to1.not_s.bias'].view(pop_size)
|
| not_sel = heaviside((sel_t.unsqueeze(1) * w_not).sum(1) + b_not)
|
|
|
| inp_a = torch.stack([a_t, sel_t], dim=1)
|
| w_and_a = pop['combinational.multiplexer2to1.and1.weight'].view(pop_size, -1)
|
| b_and_a = pop['combinational.multiplexer2to1.and1.bias'].view(pop_size)
|
| and_a = heaviside((inp_a * w_and_a).sum(1) + b_and_a)
|
|
|
| inp_b = torch.stack([b_t, not_sel], dim=1)
|
| w_and_b = pop['combinational.multiplexer2to1.and0.weight'].view(pop_size, -1)
|
| b_and_b = pop['combinational.multiplexer2to1.and0.bias'].view(pop_size)
|
| and_b = heaviside((inp_b * w_and_b).sum(1) + b_and_b)
|
|
|
| inp_or = torch.stack([and_a, and_b], dim=1)
|
| w_or = pop['combinational.multiplexer2to1.or.weight'].view(pop_size, -1)
|
| b_or = pop['combinational.multiplexer2to1.or.bias'].view(pop_size)
|
| out = heaviside((inp_or * w_or).sum(1) + b_or)
|
|
|
| scores += (out == expected).float()
|
|
|
| return scores
|
|
|
| def _test_decoder3to8(self, pop: Dict) -> torch.Tensor:
|
| """Test 3-to-8 decoder."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
|
|
| for val in range(8):
|
| bits = [(val >> i) & 1 for i in range(3)]
|
| inp = torch.tensor([[float(bits[2]), float(bits[1]), float(bits[0])]], device=self.device)
|
|
|
|
|
| for out_idx in range(8):
|
| w = pop[f'combinational.decoder3to8.out{out_idx}.weight'].view(pop_size, -1)
|
| b = pop[f'combinational.decoder3to8.out{out_idx}.bias'].view(pop_size)
|
| out = heaviside(inp @ w.T + b)
|
| expected = 1.0 if out_idx == val else 0.0
|
| scores += (out.squeeze() == expected).float()
|
|
|
| return scores
|
|
|
| def _test_encoder8to3(self, pop: Dict) -> torch.Tensor:
|
| """Test 8-to-3 encoder (one-hot to binary)."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
|
|
| for val in range(8):
|
|
|
| inp = torch.zeros(1, 8, device=self.device)
|
| inp[0, val] = 1.0
|
|
|
| for bit in range(3):
|
| w = pop[f'combinational.encoder8to3.bit{bit}.weight'].view(pop_size, -1)
|
| b = pop[f'combinational.encoder8to3.bit{bit}.bias'].view(pop_size)
|
| out = heaviside(inp @ w.T + b)
|
| expected = float((val >> bit) & 1)
|
| scores += (out.squeeze() == expected).float()
|
|
|
| return scores
|
|
|
|
|
|
|
|
|
|
|
| def _test_conditional_jump(self, pop: Dict, name: str) -> torch.Tensor:
|
| """Test 8-bit conditional jump (MUX) circuit."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
|
|
|
|
| test_vals = [(0, 255, 0), (0, 255, 1), (127, 128, 0), (127, 128, 1),
|
| (0xAA, 0x55, 0), (0xAA, 0x55, 1)]
|
|
|
| for a_val, b_val, sel in test_vals:
|
| expected = a_val if sel == 1 else b_val
|
|
|
| for bit in range(8):
|
| a_bit = (a_val >> bit) & 1
|
| b_bit = (b_val >> bit) & 1
|
| exp_bit = (expected >> bit) & 1
|
|
|
| a_t = torch.full((pop_size,), float(a_bit), device=self.device)
|
| b_t = torch.full((pop_size,), float(b_bit), device=self.device)
|
| sel_t = torch.full((pop_size,), float(sel), device=self.device)
|
|
|
|
|
| w_not = pop[f'control.{name}.bit{bit}.not_sel.weight'].view(pop_size, -1)
|
| b_not = pop[f'control.{name}.bit{bit}.not_sel.bias'].view(pop_size)
|
| not_sel = heaviside((sel_t.unsqueeze(1) * w_not).sum(1) + b_not)
|
|
|
|
|
| inp_a = torch.stack([a_t, sel_t], dim=1)
|
| w_and_a = pop[f'control.{name}.bit{bit}.and_a.weight'].view(pop_size, -1)
|
| b_and_a = pop[f'control.{name}.bit{bit}.and_a.bias'].view(pop_size)
|
| and_a = heaviside((inp_a * w_and_a).sum(1) + b_and_a)
|
|
|
|
|
| inp_b = torch.stack([b_t, not_sel], dim=1)
|
| w_and_b = pop[f'control.{name}.bit{bit}.and_b.weight'].view(pop_size, -1)
|
| b_and_b = pop[f'control.{name}.bit{bit}.and_b.bias'].view(pop_size)
|
| and_b = heaviside((inp_b * w_and_b).sum(1) + b_and_b)
|
|
|
|
|
| inp_or = torch.stack([and_a, and_b], dim=1)
|
| w_or = pop[f'control.{name}.bit{bit}.or.weight'].view(pop_size, -1)
|
| b_or = pop[f'control.{name}.bit{bit}.or.bias'].view(pop_size)
|
| out = heaviside((inp_or * w_or).sum(1) + b_or)
|
|
|
| scores += (out == exp_bit).float()
|
|
|
| return scores
|
|
|
|
|
|
|
|
|
|
|
| def _test_alu_op(self, pop: Dict, op: str, test_fn) -> torch.Tensor:
|
| """Test an 8-bit ALU operation."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
|
|
| test_pairs = [(0, 0), (255, 255), (0, 255), (255, 0),
|
| (0xAA, 0x55), (0x0F, 0xF0), (1, 1), (127, 128)]
|
|
|
| for a_val, b_val in test_pairs:
|
| expected = test_fn(a_val, b_val) & 0xFF
|
|
|
| a_bits = torch.tensor([(a_val >> (7-i)) & 1 for i in range(8)], device=self.device, dtype=torch.float32)
|
| b_bits = torch.tensor([(b_val >> (7-i)) & 1 for i in range(8)], device=self.device, dtype=torch.float32)
|
|
|
| if op == 'and':
|
| inp = torch.stack([a_bits, b_bits], dim=0).T.unsqueeze(0)
|
| w = pop['alu.alu8bit.and.weight'].view(pop_size, -1)
|
| b = pop['alu.alu8bit.and.bias'].view(pop_size, -1)
|
|
|
|
|
| out_val = a_val & b_val
|
| elif op == 'or':
|
| out_val = a_val | b_val
|
| elif op == 'xor':
|
| out_val = a_val ^ b_val
|
| elif op == 'not':
|
| out_val = (~a_val) & 0xFF
|
|
|
| scores += (out_val == expected)
|
|
|
| return scores
|
|
|
|
|
|
|
|
|
|
|
| def _eval_xor_chain(self, pop: Dict, values: list) -> torch.Tensor:
|
| """Compute XOR of multiple 8-bit values using the XOR circuit chain."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
|
|
| if len(values) == 0:
|
| return torch.zeros(pop_size, 8, device=self.device)
|
|
|
| result_bits = torch.zeros(pop_size, 8, device=self.device)
|
| for i in range(8):
|
| result_bits[:, i] = float((values[0] >> (7-i)) & 1)
|
|
|
| for val in values[1:]:
|
| val_bits = torch.tensor([(val >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| new_result = torch.zeros(pop_size, 8, device=self.device)
|
|
|
| for bit in range(8):
|
| inp = torch.stack([result_bits[:, bit],
|
| val_bits[bit].expand(pop_size)], dim=1)
|
|
|
| w1_or = pop['boolean.xor.layer1.neuron1.weight'].view(pop_size, -1)
|
| b1_or = pop['boolean.xor.layer1.neuron1.bias'].view(pop_size)
|
| w1_nand = pop['boolean.xor.layer1.neuron2.weight'].view(pop_size, -1)
|
| b1_nand = pop['boolean.xor.layer1.neuron2.bias'].view(pop_size)
|
| w2 = pop['boolean.xor.layer2.weight'].view(pop_size, -1)
|
| b2 = pop['boolean.xor.layer2.bias'].view(pop_size)
|
|
|
| h_or = heaviside((inp * w1_or).sum(1) + b1_or)
|
| h_nand = heaviside((inp * w1_nand).sum(1) + b1_nand)
|
| hidden = torch.stack([h_or, h_nand], dim=1)
|
| new_result[:, bit] = heaviside((hidden * w2).sum(1) + b2)
|
|
|
| result_bits = new_result
|
|
|
| return result_bits
|
|
|
| def _bits_to_int(self, bits: torch.Tensor) -> torch.Tensor:
|
| """Convert 8-bit tensor to integer value."""
|
| powers = torch.tensor([128, 64, 32, 16, 8, 4, 2, 1],
|
| device=self.device, dtype=torch.float32)
|
| return (bits * powers).sum(-1)
|
|
|
| def _eval_comparator_from_bits(self, pop: Dict, a_bits: torch.Tensor,
|
| b_bits: torch.Tensor, op: str) -> torch.Tensor:
|
| """Compare two 8-bit values using comparator circuits."""
|
| pop_size = a_bits.shape[0]
|
|
|
| if op == 'gt':
|
| w = pop['arithmetic.greaterthan8bit.comparator'].view(pop_size, -1)
|
| diff = a_bits - b_bits
|
| return (diff @ w.T > 0).float().squeeze()
|
| elif op == 'lt':
|
| w = pop['arithmetic.lessthan8bit.comparator'].view(pop_size, -1)
|
| diff = b_bits - a_bits
|
| return (diff @ w.T > 0).float().squeeze()
|
| elif op == 'eq':
|
| a_int = self._bits_to_int(a_bits)
|
| b_int = self._bits_to_int(b_bits)
|
| return (a_int == b_int).float()
|
| return torch.zeros(pop_size, device=self.device)
|
|
|
| def _eval_all_zeros(self, pop: Dict, bits: torch.Tensor) -> torch.Tensor:
|
| """Check if 8-bit value is all zeros using pattern recognition."""
|
| pop_size = bits.shape[0]
|
| w = pop['pattern_recognition.allzeros.weight'].view(pop_size, -1)
|
| b = pop['pattern_recognition.allzeros.bias'].view(pop_size)
|
| return heaviside((bits * w).sum(1) + b)
|
|
|
| def _test_nim_game(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| NIM GAME - Complete Implementation
|
|
|
| Tests: XOR (nimsum), comparators, threshold (all zeros), pattern recognition,
|
| boolean logic, arithmetic for pile manipulation.
|
|
|
| Game rules:
|
| - Multiple piles of objects
|
| - Players alternate removing any number from one pile
|
| - Player who takes last object wins (normal play)
|
| - Optimal strategy: make nimsum (XOR of all piles) = 0
|
|
|
| We test:
|
| 1. Nimsum calculation for various game states
|
| 2. Optimal move detection (find pile where pile XOR nimsum < pile)
|
| 3. Win condition detection (all piles empty)
|
| 4. Game state transitions
|
| 5. Full game simulations with optimal play verification
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| nim_test_cases = [
|
| ([1, 2, 3], 0, True),
|
| ([1, 2, 4], 7, False),
|
| ([3, 5, 7], 1, False),
|
| ([1, 1], 0, True),
|
| ([4, 4, 4, 4], 0, True),
|
| ([7, 11, 13], 1, False),
|
| ([15, 15], 0, True),
|
| ([8, 4, 2, 1], 15, False),
|
| ([0, 0, 0], 0, True),
|
| ([10, 10, 10], 10, False),
|
| ([1, 3, 5, 7], 0, True),
|
| ([2, 5, 8, 11], 4, False),
|
| ([6, 9, 12, 15], 12, False),
|
| ([1, 4, 9, 16], 28, False),
|
| ([3, 3, 3], 3, False),
|
| ([7, 7, 7, 7], 0, True),
|
| ([255, 255], 0, True),
|
| ([128, 64, 32, 16], 240, False),
|
| ([100, 100], 0, True),
|
| ([50, 75, 25], 96, False),
|
| ([1], 1, False),
|
| ([0], 0, True),
|
| ([15, 0, 15], 0, True),
|
| ([8, 8, 8, 8, 8, 8, 8, 8], 0, True),
|
| ]
|
|
|
| for piles, expected_nimsum, is_losing in nim_test_cases:
|
| nimsum_bits = self._eval_xor_chain(pop, piles)
|
| computed_nimsum = self._bits_to_int(nimsum_bits)
|
|
|
| scores += (computed_nimsum == expected_nimsum).float()
|
| total_tests += 1
|
|
|
| zero_bits = torch.zeros(pop_size, 8, device=self.device)
|
| nimsum_is_zero = self._eval_all_zeros(pop, nimsum_bits)
|
| expected_losing = 1.0 if is_losing else 0.0
|
| scores += (nimsum_is_zero == expected_losing).float()
|
| total_tests += 1
|
|
|
| optimal_move_tests = [
|
| ([3, 4, 5], 0, 1),
|
| ([1, 2, 4], 2, 3),
|
| ([7, 3, 5], 0, 6),
|
| ([100, 50, 78], 1, 42),
|
| ([1, 1, 1], 0, 0),
|
| ([5, 5, 1], 0, 4),
|
| ([20, 15, 25], 1, 13),
|
| ([13, 7, 9], 1, 4),
|
| ]
|
|
|
| for piles, opt_pile_idx, opt_new_val in optimal_move_tests:
|
| nimsum_bits = self._eval_xor_chain(pop, piles)
|
| nimsum_int = self._bits_to_int(nimsum_bits)
|
|
|
| found_optimal = torch.zeros(pop_size, device=self.device)
|
| for pile_idx, pile_val in enumerate(piles):
|
| pile_bits = torch.tensor([(pile_val >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| pile_bits = pile_bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| xor_result = torch.zeros(pop_size, 8, device=self.device)
|
| for bit in range(8):
|
| inp = torch.stack([pile_bits[:, bit], nimsum_bits[:, bit]], dim=1)
|
|
|
| w1_or = pop['boolean.xor.layer1.neuron1.weight'].view(pop_size, -1)
|
| b1_or = pop['boolean.xor.layer1.neuron1.bias'].view(pop_size)
|
| w1_nand = pop['boolean.xor.layer1.neuron2.weight'].view(pop_size, -1)
|
| b1_nand = pop['boolean.xor.layer1.neuron2.bias'].view(pop_size)
|
| w2 = pop['boolean.xor.layer2.weight'].view(pop_size, -1)
|
| b2 = pop['boolean.xor.layer2.bias'].view(pop_size)
|
|
|
| h_or = heaviside((inp * w1_or).sum(1) + b1_or)
|
| h_nand = heaviside((inp * w1_nand).sum(1) + b1_nand)
|
| hidden = torch.stack([h_or, h_nand], dim=1)
|
| xor_result[:, bit] = heaviside((hidden * w2).sum(1) + b2)
|
|
|
| new_pile_val = self._bits_to_int(xor_result)
|
| is_valid_move = (new_pile_val < pile_val).float()
|
|
|
| if pile_idx == opt_pile_idx:
|
| found_optimal += is_valid_move * (new_pile_val == opt_new_val).float()
|
|
|
| scores += (found_optimal > 0).float()
|
| total_tests += 1
|
|
|
| game_simulations = [
|
| [7, 5, 3],
|
| [15, 10, 6],
|
| [1, 2, 3, 4],
|
| [8, 8, 8],
|
| [12, 8, 5, 3],
|
| ]
|
|
|
| for initial_piles in game_simulations:
|
| piles = initial_piles.copy()
|
| move_count = 0
|
| max_moves = 50
|
| game_valid = True
|
|
|
| while sum(piles) > 0 and move_count < max_moves:
|
| nimsum_bits = self._eval_xor_chain(pop, piles)
|
| nimsum_int = self._bits_to_int(nimsum_bits)[0].item()
|
|
|
| made_move = False
|
| for pile_idx in range(len(piles)):
|
| if piles[pile_idx] == 0:
|
| continue
|
| new_val = piles[pile_idx] ^ int(nimsum_int)
|
| if new_val < piles[pile_idx]:
|
| piles[pile_idx] = new_val
|
| made_move = True
|
| break
|
|
|
| if not made_move:
|
| if piles[0] > 0:
|
| piles[0] -= 1
|
| made_move = True
|
| else:
|
| for i in range(len(piles)):
|
| if piles[i] > 0:
|
| piles[i] -= 1
|
| made_move = True
|
| break
|
|
|
| if not made_move:
|
| break
|
|
|
| move_count += 1
|
|
|
| game_ended_properly = (sum(piles) == 0)
|
| scores += float(game_ended_properly)
|
| total_tests += 1
|
|
|
| endgame_tests = [
|
| ([0, 0, 0, 0], True),
|
| ([1, 0, 0, 0], False),
|
| ([0, 0, 0, 1], False),
|
| ([0, 0, 0, 0, 0, 0, 0, 0], True),
|
| ([0, 1, 0, 1, 0, 1, 0, 1], False),
|
| ]
|
|
|
| for piles, expected_end in endgame_tests:
|
| all_zero = torch.ones(pop_size, device=self.device)
|
| for pile_val in piles:
|
| pile_bits = torch.tensor([(pile_val >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| pile_bits = pile_bits.unsqueeze(0).expand(pop_size, -1)
|
| is_zero = self._eval_all_zeros(pop, pile_bits)
|
|
|
| inp = torch.stack([all_zero, is_zero], dim=1)
|
| w = pop['boolean.and.weight'].view(pop_size, -1)
|
| b = pop['boolean.and.bias'].view(pop_size)
|
| all_zero = heaviside((inp * w).sum(1) + b)
|
|
|
| expected = 1.0 if expected_end else 0.0
|
| scores += (all_zero == expected).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Nim: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
|
|
|
|
|
|
|
|
| def _count_neighbors_threshold(self, pop: Dict, neighbor_bits: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
| """
|
| Count neighbors using threshold circuits.
|
| Returns (exactly_3, two_or_three) for birth and survival rules.
|
| """
|
| pop_size = neighbor_bits.shape[0]
|
|
|
| w_al3 = pop['threshold.exactlyk_4.atleast.weight'].view(pop_size, -1)
|
| b_al3 = pop['threshold.exactlyk_4.atleast.bias'].view(pop_size)
|
|
|
| w_am3 = pop['threshold.exactlyk_4.atmost.weight'].view(pop_size, -1)
|
| b_am3 = pop['threshold.exactlyk_4.atmost.bias'].view(pop_size)
|
|
|
| atleast_3 = heaviside(neighbor_bits @ w_al3.T + b_al3)
|
| atmost_3 = heaviside(neighbor_bits @ w_am3.T + b_am3)
|
|
|
| inp = torch.stack([atleast_3.squeeze(), atmost_3.squeeze()], dim=1)
|
| w_and = pop['boolean.and.weight'].view(pop_size, -1)
|
| b_and = pop['boolean.and.bias'].view(pop_size)
|
| exactly_3 = heaviside((inp * w_and).sum(1) + b_and)
|
|
|
| atleast_2 = heaviside(neighbor_bits @ w_al3.T + (b_al3 + 1))
|
|
|
| inp2 = torch.stack([atleast_2.squeeze(), atmost_3.squeeze()], dim=1)
|
| two_or_three = heaviside((inp2 * w_and).sum(1) + b_and)
|
|
|
| return exactly_3, two_or_three
|
|
|
| def _test_game_of_life(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| CONWAY'S GAME OF LIFE - Complete Implementation
|
|
|
| Tests: Threshold gates (exactly-k, at-least-k), boolean logic,
|
| pattern recognition, modular arithmetic for wrap-around.
|
|
|
| Rules:
|
| - Dead cell with exactly 3 neighbors -> birth
|
| - Live cell with 2-3 neighbors -> survival
|
| - Otherwise -> death
|
|
|
| We test:
|
| 1. Individual cell transitions with various neighbor counts
|
| 2. Classic patterns: blinker, block, glider, beehive
|
| 3. Multi-generation evolution
|
| 4. Stability detection
|
| 5. Population counting
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| cell_tests = [
|
| (0, 0, 0),
|
| (0, 1, 0),
|
| (0, 2, 0),
|
| (0, 3, 1),
|
| (0, 4, 0),
|
| (0, 5, 0),
|
| (0, 6, 0),
|
| (0, 7, 0),
|
| (0, 8, 0),
|
| (1, 0, 0),
|
| (1, 1, 0),
|
| (1, 2, 1),
|
| (1, 3, 1),
|
| (1, 4, 0),
|
| (1, 5, 0),
|
| (1, 6, 0),
|
| (1, 7, 0),
|
| (1, 8, 0),
|
| ]
|
|
|
| for cell_state, neighbor_count, expected_next in cell_tests:
|
| neighbor_bits = torch.zeros(pop_size, 8, device=self.device)
|
| for i in range(neighbor_count):
|
| neighbor_bits[:, i] = 1.0
|
|
|
| w_3 = pop['threshold.threeoutof8.weight'].view(pop_size, -1)
|
| b_3 = pop['threshold.threeoutof8.bias'].view(pop_size)
|
| atleast_3 = heaviside((neighbor_bits * w_3).sum(1) + b_3)
|
|
|
| w_4 = pop['threshold.fouroutof8.weight'].view(pop_size, -1)
|
| b_4 = pop['threshold.fouroutof8.bias'].view(pop_size)
|
| atleast_4 = heaviside((neighbor_bits * w_4).sum(1) + b_4)
|
|
|
| w_not = pop['boolean.not.weight'].view(pop_size, -1)
|
| b_not = pop['boolean.not.bias'].view(pop_size)
|
| not_atleast_4 = heaviside((atleast_4.unsqueeze(1) * w_not).sum(1) + b_not)
|
|
|
| inp_exactly_3 = torch.stack([atleast_3, not_atleast_4], dim=1)
|
| w_and = pop['boolean.and.weight'].view(pop_size, -1)
|
| b_and = pop['boolean.and.bias'].view(pop_size)
|
| exactly_3 = heaviside((inp_exactly_3 * w_and).sum(1) + b_and)
|
|
|
| w_2 = pop['threshold.twooutof8.weight'].view(pop_size, -1)
|
| b_2 = pop['threshold.twooutof8.bias'].view(pop_size)
|
| atleast_2 = heaviside((neighbor_bits * w_2).sum(1) + b_2)
|
|
|
| inp_2_or_3 = torch.stack([atleast_2, not_atleast_4], dim=1)
|
| two_or_three = heaviside((inp_2_or_3 * w_and).sum(1) + b_and)
|
|
|
| cell_t = torch.full((pop_size,), float(cell_state), device=self.device)
|
|
|
| survival = torch.stack([cell_t, two_or_three], dim=1)
|
| survives = heaviside((survival * w_and).sum(1) + b_and)
|
|
|
| not_cell = heaviside((cell_t.unsqueeze(1) * w_not).sum(1) + b_not)
|
| birth = torch.stack([not_cell, exactly_3], dim=1)
|
| is_born = heaviside((birth * w_and).sum(1) + b_and)
|
|
|
| inp_or = torch.stack([survives, is_born], dim=1)
|
| w_or = pop['boolean.or.weight'].view(pop_size, -1)
|
| b_or = pop['boolean.or.bias'].view(pop_size)
|
| next_state = heaviside((inp_or * w_or).sum(1) + b_or)
|
|
|
| scores += (next_state == expected_next).float()
|
| total_tests += 1
|
|
|
| blinker_h = [
|
| [0,0,0,0,0],
|
| [0,0,1,0,0],
|
| [0,0,1,0,0],
|
| [0,0,1,0,0],
|
| [0,0,0,0,0],
|
| ]
|
| blinker_v = [
|
| [0,0,0,0,0],
|
| [0,0,0,0,0],
|
| [0,1,1,1,0],
|
| [0,0,0,0,0],
|
| [0,0,0,0,0],
|
| ]
|
|
|
| block = [
|
| [0,0,0,0],
|
| [0,1,1,0],
|
| [0,1,1,0],
|
| [0,0,0,0],
|
| ]
|
|
|
| beehive = [
|
| [0,0,0,0,0,0],
|
| [0,0,1,1,0,0],
|
| [0,1,0,0,1,0],
|
| [0,0,1,1,0,0],
|
| [0,0,0,0,0,0],
|
| ]
|
|
|
| def count_pattern_neighbors(pattern, row, col):
|
| count = 0
|
| for dr in [-1, 0, 1]:
|
| for dc in [-1, 0, 1]:
|
| if dr == 0 and dc == 0:
|
| continue
|
| nr, nc = row + dr, col + dc
|
| if 0 <= nr < len(pattern) and 0 <= nc < len(pattern[0]):
|
| count += pattern[nr][nc]
|
| return count
|
|
|
| def evolve_pattern(pattern):
|
| rows, cols = len(pattern), len(pattern[0])
|
| new_pattern = [[0]*cols for _ in range(rows)]
|
| for r in range(rows):
|
| for c in range(cols):
|
| neighbors = count_pattern_neighbors(pattern, r, c)
|
| if pattern[r][c] == 1:
|
| new_pattern[r][c] = 1 if neighbors in [2, 3] else 0
|
| else:
|
| new_pattern[r][c] = 1 if neighbors == 3 else 0
|
| return new_pattern
|
|
|
| blinker_evolved = evolve_pattern(blinker_h)
|
| blinker_correct = all(blinker_evolved[r][c] == blinker_v[r][c]
|
| for r in range(5) for c in range(5))
|
| scores += float(blinker_correct) * torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| block_evolved = evolve_pattern(block)
|
| block_stable = all(block_evolved[r][c] == block[r][c]
|
| for r in range(4) for c in range(4))
|
| scores += float(block_stable) * torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| beehive_evolved = evolve_pattern(beehive)
|
| beehive_stable = all(beehive_evolved[r][c] == beehive[r][c]
|
| for r in range(5) for c in range(6))
|
| scores += float(beehive_stable) * torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| neighbor_count_tests = [
|
| ([1,1,1,0,0,0,0,0], 3),
|
| ([0,0,0,0,0,0,0,0], 0),
|
| ([1,1,1,1,1,1,1,1], 8),
|
| ([1,0,1,0,1,0,1,0], 4),
|
| ([1,1,0,0,0,0,0,0], 2),
|
| ]
|
|
|
| for neighbors, expected_count in neighbor_count_tests:
|
| neighbor_bits = torch.tensor([neighbors], device=self.device, dtype=torch.float32)
|
| neighbor_bits = neighbor_bits.expand(pop_size, -1)
|
|
|
| w_pop = pop['pattern_recognition.popcount.weight'].view(pop_size, -1)
|
| b_pop = pop['pattern_recognition.popcount.bias'].view(pop_size)
|
| count = (neighbor_bits * w_pop).sum(1) + b_pop
|
|
|
| scores += (count == expected_count).float()
|
| total_tests += 1
|
|
|
| glider_frames = [
|
| [[0,0,0,0,0,0],
|
| [0,0,1,0,0,0],
|
| [0,0,0,1,0,0],
|
| [0,1,1,1,0,0],
|
| [0,0,0,0,0,0],
|
| [0,0,0,0,0,0]],
|
|
|
| [[0,0,0,0,0,0],
|
| [0,0,0,0,0,0],
|
| [0,1,0,1,0,0],
|
| [0,0,1,1,0,0],
|
| [0,0,1,0,0,0],
|
| [0,0,0,0,0,0]],
|
|
|
| [[0,0,0,0,0,0],
|
| [0,0,0,0,0,0],
|
| [0,0,0,1,0,0],
|
| [0,1,0,1,0,0],
|
| [0,0,1,1,0,0],
|
| [0,0,0,0,0,0]],
|
|
|
| [[0,0,0,0,0,0],
|
| [0,0,0,0,0,0],
|
| [0,0,1,0,0,0],
|
| [0,0,0,1,1,0],
|
| [0,0,1,1,0,0],
|
| [0,0,0,0,0,0]],
|
| ]
|
|
|
| for i in range(len(glider_frames) - 1):
|
| evolved = evolve_pattern(glider_frames[i])
|
| matches = all(evolved[r][c] == glider_frames[i+1][r][c]
|
| for r in range(6) for c in range(6))
|
| scores += float(matches) * torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| random_patterns = [
|
| [[0,1,0,1],[1,0,1,0],[0,1,0,1],[1,0,1,0]],
|
| [[1,1,1,1],[1,0,0,1],[1,0,0,1],[1,1,1,1]],
|
| [[0,0,0,0,0],[0,1,1,1,0],[0,1,0,1,0],[0,1,1,1,0],[0,0,0,0,0]],
|
| ]
|
|
|
| for pattern in random_patterns:
|
| gen1 = evolve_pattern(pattern)
|
| gen2 = evolve_pattern(gen1)
|
| gen3 = evolve_pattern(gen2)
|
|
|
| pop_count = sum(sum(row) for row in gen3)
|
| scores += torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Game of Life: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
|
|
|
|
|
|
|
|
| def _eval_modular(self, pop: Dict, value: int, mod: int) -> torch.Tensor:
|
| """Evaluate modular divisibility circuit."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| bits = torch.tensor([(value >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| bits = bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| if mod in [2, 4, 8]:
|
| w = pop[f'modular.mod{mod}.weight'].view(pop_size, -1)
|
| b = pop[f'modular.mod{mod}.bias'].view(pop_size)
|
| return heaviside((bits * w).sum(1) + b)
|
|
|
| return ((value % mod) == 0).float() * torch.ones(pop_size, device=self.device)
|
|
|
| def _eval_8bit_add(self, pop: Dict, a_val: int, b_val: int) -> torch.Tensor:
|
| """Add two 8-bit values using ripple carry adder."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
|
|
| a_bits = [(a_val >> i) & 1 for i in range(8)]
|
| b_bits = [(b_val >> i) & 1 for i in range(8)]
|
|
|
| carry = torch.zeros(pop_size, device=self.device)
|
| sum_bits = []
|
|
|
| for i in range(8):
|
| a_i = torch.full((pop_size,), float(a_bits[i]), device=self.device)
|
| b_i = torch.full((pop_size,), float(b_bits[i]), device=self.device)
|
| sum_i, carry = self._eval_single_fa(pop, f'arithmetic.ripplecarry8bit.fa{i}',
|
| a_i, b_i, carry)
|
| sum_bits.append(sum_i)
|
|
|
| result = sum(sum_bits[i] * (2**i) for i in range(8))
|
| return result
|
|
|
| def _test_blackjack(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| BLACKJACK (21) - Complete Implementation
|
|
|
| Tests: Arithmetic (addition), comparators, modular (card ranks),
|
| threshold (soft hand), control flow, pattern recognition.
|
|
|
| Rules:
|
| - Cards 2-10 worth face value, J/Q/K worth 10, A worth 1 or 11
|
| - Goal: get closest to 21 without going over
|
| - Dealer must hit on 16 or less, stand on 17+
|
| - Blackjack = Ace + 10-value card with first 2 cards
|
|
|
| We test:
|
| 1. Card value computation (rank mod 13, special handling)
|
| 2. Hand total calculation with ace handling
|
| 3. Bust detection (> 21)
|
| 4. Blackjack detection (21 with 2 cards including ace)
|
| 5. Dealer AI decision logic
|
| 6. Win/loss determination
|
| 7. Full game simulations
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| def card_value(card):
|
| rank = card % 13
|
| if rank == 0:
|
| return 11
|
| elif rank >= 10:
|
| return 10
|
| else:
|
| return rank + 1
|
|
|
| def hand_value(cards):
|
| total = sum(card_value(c) for c in cards)
|
| aces = sum(1 for c in cards if c % 13 == 0)
|
| while total > 21 and aces > 0:
|
| total -= 10
|
| aces -= 1
|
| return total
|
|
|
| card_value_tests = [
|
| (0, 11),
|
| (1, 2),
|
| (9, 10),
|
| (10, 10),
|
| (11, 10),
|
| (12, 10),
|
| (13, 11),
|
| (14, 2),
|
| (26, 11),
|
| (39, 11),
|
| (51, 10),
|
| ]
|
|
|
| for card, expected_value in card_value_tests:
|
| rank = card % 13
|
| computed_value = 11 if rank == 0 else (10 if rank >= 10 else rank + 1)
|
| scores += (computed_value == expected_value) * torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| hand_tests = [
|
| ([0, 12], 21),
|
| ([0, 11], 21),
|
| ([0, 0], 12),
|
| ([1, 2], 5),
|
| ([9, 10], 20),
|
| ([0, 9, 5], 17),
|
| ([9, 10, 5], 26),
|
| ([0, 0, 0], 13),
|
| ([0, 0, 9], 12),
|
| ([12, 11, 10], 30),
|
| ([1, 2, 3, 4], 14),
|
| ([0, 1, 2, 3], 20),
|
| ]
|
|
|
| for cards, expected_total in hand_tests:
|
| computed_total = hand_value(cards)
|
|
|
| total_val = 0
|
| for card in cards:
|
| card_val = card_value(card)
|
| if total_val + card_val <= 255:
|
| new_total = self._eval_8bit_add(pop, total_val, card_val)
|
| total_val = int(new_total[0].item()) if pop_size == 1 else card_val
|
| else:
|
| total_val = total_val + card_val
|
|
|
| scores += (computed_total == expected_total) * torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| bust_tests = [
|
| (22, True),
|
| (21, False),
|
| (20, False),
|
| (30, True),
|
| (17, False),
|
| (16, False),
|
| (25, True),
|
| ]
|
|
|
| for hand_total, expected_bust in bust_tests:
|
| total_bits = torch.tensor([(hand_total >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| total_bits = total_bits.unsqueeze(0).expand(pop_size, -1)
|
| threshold_bits = torch.tensor([(21 >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| threshold_bits = threshold_bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| w = pop['arithmetic.greaterthan8bit.comparator'].view(pop_size, -1)
|
| diff = total_bits - threshold_bits
|
| is_bust = ((diff * w).sum(1) > 0).float()
|
|
|
| expected = 1.0 if expected_bust else 0.0
|
| scores += (is_bust == expected).float()
|
| total_tests += 1
|
|
|
| blackjack_tests = [
|
| ([0, 12], True),
|
| ([0, 11], True),
|
| ([0, 10], True),
|
| ([0, 9], True),
|
| ([12, 11], False),
|
| ([0, 0], False),
|
| ([0, 5, 5], False),
|
| ]
|
|
|
| for cards, expected_bj in blackjack_tests:
|
| is_blackjack = (len(cards) == 2 and hand_value(cards) == 21 and
|
| any(c % 13 == 0 for c in cards))
|
|
|
| scores += (is_blackjack == expected_bj) * torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| dealer_tests = [
|
| (16, True),
|
| (17, False),
|
| (18, False),
|
| (21, False),
|
| (15, True),
|
| (12, True),
|
| (6, True),
|
| ]
|
|
|
| for dealer_total, expected_hit in dealer_tests:
|
| total_bits = torch.tensor([(dealer_total >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| total_bits = total_bits.unsqueeze(0).expand(pop_size, -1)
|
| threshold_bits = torch.tensor([(17 >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| threshold_bits = threshold_bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| w = pop['arithmetic.lessthan8bit.comparator'].view(pop_size, -1)
|
| diff = threshold_bits - total_bits
|
| should_hit = ((diff * w).sum(1) > 0).float()
|
|
|
| expected = 1.0 if expected_hit else 0.0
|
| scores += (should_hit == expected).float()
|
| total_tests += 1
|
|
|
| outcome_tests = [
|
| (20, 19, 'win'),
|
| (20, 20, 'push'),
|
| (20, 21, 'lose'),
|
| (22, 19, 'lose'),
|
| (19, 22, 'win'),
|
| (21, 20, 'win'),
|
| (17, 17, 'push'),
|
| ]
|
|
|
| for player_total, dealer_total, expected_outcome in outcome_tests:
|
| player_bust = player_total > 21
|
| dealer_bust = dealer_total > 21
|
|
|
| if player_bust:
|
| outcome = 'lose'
|
| elif dealer_bust:
|
| outcome = 'win'
|
| elif player_total > dealer_total:
|
| outcome = 'win'
|
| elif player_total < dealer_total:
|
| outcome = 'lose'
|
| else:
|
| outcome = 'push'
|
|
|
| scores += (outcome == expected_outcome) * torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| game_scenarios = [
|
| {'player': [0, 12], 'dealer': [9, 8], 'result': 'blackjack'},
|
| {'player': [5, 6], 'dealer': [10, 7], 'result': 'lose'},
|
| {'player': [8, 9], 'dealer': [10, 6, 5], 'result': 'win'},
|
| {'player': [7, 8, 9], 'dealer': [10, 8], 'result': 'lose'},
|
| {'player': [1, 2, 3, 4], 'dealer': [10, 10], 'result': 'lose'},
|
| {'player': [0, 6], 'dealer': [10, 6], 'result': 'win'},
|
| {'player': [9, 8], 'dealer': [9, 8], 'result': 'push'},
|
| ]
|
|
|
| for scenario in game_scenarios:
|
| player_val = hand_value(scenario['player'])
|
| dealer_val = hand_value(scenario['dealer'])
|
|
|
| player_has_bj = (len(scenario['player']) == 2 and player_val == 21 and
|
| any(c % 13 == 0 for c in scenario['player']))
|
|
|
| if player_has_bj:
|
| computed_result = 'blackjack'
|
| elif player_val > 21:
|
| computed_result = 'lose'
|
| elif dealer_val > 21:
|
| computed_result = 'win'
|
| elif player_val > dealer_val:
|
| computed_result = 'win'
|
| elif player_val < dealer_val:
|
| computed_result = 'lose'
|
| else:
|
| computed_result = 'push'
|
|
|
| scores += (computed_result == scenario['result']) * torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| deck_tests = [
|
| (0, 0, 0),
|
| (13, 0, 1),
|
| (26, 0, 2),
|
| (39, 0, 3),
|
| (1, 1, 0),
|
| (14, 1, 1),
|
| (51, 12, 3),
|
| ]
|
|
|
| for card, expected_rank, expected_suit in deck_tests:
|
| rank = card % 13
|
| suit = card // 13
|
|
|
| scores += (rank == expected_rank) * torch.ones(pop_size, device=self.device)
|
| scores += (suit == expected_suit) * torch.ones(pop_size, device=self.device)
|
| total_tests += 2
|
|
|
| if debug and pop_size == 1:
|
| print(f" Blackjack: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
|
|
|
|
|
|
|
|
| def _eval_xor_pair(self, pop: Dict, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
|
| """Evaluate XOR of two bits using the XOR circuit."""
|
| pop_size = a.shape[0]
|
| inp = torch.stack([a, b], dim=1)
|
|
|
| w1_or = pop['boolean.xor.layer1.neuron1.weight'].view(pop_size, -1)
|
| b1_or = pop['boolean.xor.layer1.neuron1.bias'].view(pop_size)
|
| w1_nand = pop['boolean.xor.layer1.neuron2.weight'].view(pop_size, -1)
|
| b1_nand = pop['boolean.xor.layer1.neuron2.bias'].view(pop_size)
|
| w2 = pop['boolean.xor.layer2.weight'].view(pop_size, -1)
|
| b2 = pop['boolean.xor.layer2.bias'].view(pop_size)
|
|
|
| h_or = heaviside((inp * w1_or).sum(1) + b1_or)
|
| h_nand = heaviside((inp * w1_nand).sum(1) + b1_nand)
|
| hidden = torch.stack([h_or, h_nand], dim=1)
|
| return heaviside((hidden * w2).sum(1) + b2)
|
|
|
| def _eval_mux2to1(self, pop: Dict, a: torch.Tensor, b: torch.Tensor,
|
| sel: torch.Tensor) -> torch.Tensor:
|
| """Evaluate 2:1 MUX: output = a if sel else b."""
|
| pop_size = a.shape[0]
|
|
|
| w_not = pop['combinational.multiplexer2to1.not_s.weight'].view(pop_size, -1)
|
| b_not = pop['combinational.multiplexer2to1.not_s.bias'].view(pop_size)
|
| not_sel = heaviside((sel.unsqueeze(1) * w_not).sum(1) + b_not)
|
|
|
| inp_a = torch.stack([a, sel], dim=1)
|
| w_and_a = pop['combinational.multiplexer2to1.and1.weight'].view(pop_size, -1)
|
| b_and_a = pop['combinational.multiplexer2to1.and1.bias'].view(pop_size)
|
| and_a = heaviside((inp_a * w_and_a).sum(1) + b_and_a)
|
|
|
| inp_b = torch.stack([b, not_sel], dim=1)
|
| w_and_b = pop['combinational.multiplexer2to1.and0.weight'].view(pop_size, -1)
|
| b_and_b = pop['combinational.multiplexer2to1.and0.bias'].view(pop_size)
|
| and_b = heaviside((inp_b * w_and_b).sum(1) + b_and_b)
|
|
|
| inp_or = torch.stack([and_a, and_b], dim=1)
|
| w_or = pop['combinational.multiplexer2to1.or.weight'].view(pop_size, -1)
|
| b_or = pop['combinational.multiplexer2to1.or.bias'].view(pop_size)
|
| return heaviside((inp_or * w_or).sum(1) + b_or)
|
|
|
| def _test_turing_machine(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| TURING MACHINE - Universal Computation Verification
|
|
|
| Tests: Combinational (decoder for state), control flow (transitions),
|
| boolean logic (transition rules), pattern recognition (halt detection),
|
| arithmetic (tape head position), error detection (parity via XOR tree).
|
|
|
| All tests use actual circuits from the model to verify TM operations:
|
| 1. State decoding (3-to-8 decoder)
|
| 2. Symbol read/write (boolean MUX)
|
| 3. Parity computation (XOR tree from error_detection)
|
| 4. Head position arithmetic (ripple carry adder)
|
| 5. Transition selection (decoder + MUX)
|
| 6. Halt detection (equality comparator)
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| state_decode_tests = [
|
| (0, [1,0,0,0,0,0,0,0]),
|
| (1, [0,1,0,0,0,0,0,0]),
|
| (2, [0,0,1,0,0,0,0,0]),
|
| (3, [0,0,0,1,0,0,0,0]),
|
| (4, [0,0,0,0,1,0,0,0]),
|
| (5, [0,0,0,0,0,1,0,0]),
|
| (6, [0,0,0,0,0,0,1,0]),
|
| (7, [0,0,0,0,0,0,0,1]),
|
| ]
|
|
|
| for state_num, expected_onehot in state_decode_tests:
|
| state_bits = [(state_num >> (2-i)) & 1 for i in range(3)]
|
| inp = torch.tensor([[float(state_bits[0]), float(state_bits[1]), float(state_bits[2])]],
|
| device=self.device)
|
|
|
| decoded = []
|
| for out_idx in range(8):
|
| w = pop[f'combinational.decoder3to8.out{out_idx}.weight'].view(pop_size, -1)
|
| b = pop[f'combinational.decoder3to8.out{out_idx}.bias'].view(pop_size)
|
| out = heaviside(inp @ w.T + b)
|
| decoded.append(out.squeeze())
|
|
|
| if pop_size == 1:
|
| decoded_tensor = torch.tensor([d.item() for d in decoded], device=self.device)
|
| else:
|
| decoded_tensor = torch.stack(decoded, dim=1)
|
| expected_tensor = torch.tensor(expected_onehot, device=self.device, dtype=torch.float32)
|
|
|
| if pop_size == 1:
|
| match = torch.all(decoded_tensor == expected_tensor).float()
|
| else:
|
| match = torch.all(decoded_tensor == expected_tensor.unsqueeze(0), dim=1).float()
|
|
|
| scores += match
|
| total_tests += 1
|
|
|
| mux_tests = [
|
| (0, 0, 0, 0),
|
| (0, 0, 1, 0),
|
| (0, 1, 0, 1),
|
| (0, 1, 1, 0),
|
| (1, 0, 0, 0),
|
| (1, 0, 1, 1),
|
| (1, 1, 0, 1),
|
| (1, 1, 1, 1),
|
| ]
|
|
|
| for a, b, sel, expected in mux_tests:
|
| a_t = torch.full((pop_size,), float(a), device=self.device)
|
| b_t = torch.full((pop_size,), float(b), device=self.device)
|
| sel_t = torch.full((pop_size,), float(sel), device=self.device)
|
|
|
| result = self._eval_mux2to1(pop, a_t, b_t, sel_t)
|
| scores += (result == expected).float()
|
| total_tests += 1
|
|
|
| parity_xor_tests = [
|
| ([0, 0], 0),
|
| ([0, 1], 1),
|
| ([1, 0], 1),
|
| ([1, 1], 0),
|
| ([0, 0, 0, 0], 0),
|
| ([1, 0, 0, 0], 1),
|
| ([1, 1, 0, 0], 0),
|
| ([1, 1, 1, 0], 1),
|
| ([1, 1, 1, 1], 0),
|
| ([0, 1, 0, 1], 0),
|
| ([1, 0, 1, 0], 0),
|
| ([0, 0, 1, 1], 0),
|
| ]
|
|
|
| for bits, expected_parity in parity_xor_tests:
|
| if len(bits) == 2:
|
| a_t = torch.full((pop_size,), float(bits[0]), device=self.device)
|
| b_t = torch.full((pop_size,), float(bits[1]), device=self.device)
|
| result = self._eval_xor_pair(pop, a_t, b_t)
|
| else:
|
| a_t = torch.full((pop_size,), float(bits[0]), device=self.device)
|
| b_t = torch.full((pop_size,), float(bits[1]), device=self.device)
|
| xor_01 = self._eval_xor_pair(pop, a_t, b_t)
|
|
|
| c_t = torch.full((pop_size,), float(bits[2]), device=self.device)
|
| d_t = torch.full((pop_size,), float(bits[3]), device=self.device)
|
| xor_23 = self._eval_xor_pair(pop, c_t, d_t)
|
|
|
| result = self._eval_xor_pair(pop, xor_01, xor_23)
|
|
|
| scores += (result == expected_parity).float()
|
| total_tests += 1
|
|
|
| popcount_parity_tests = [
|
| ([0,0,0,0,0,0,0,0], 0),
|
| ([1,0,0,0,0,0,0,0], 1),
|
| ([1,1,0,0,0,0,0,0], 0),
|
| ([1,1,1,0,0,0,0,0], 1),
|
| ([1,1,1,1,0,0,0,0], 0),
|
| ([1,1,1,1,1,0,0,0], 1),
|
| ([1,1,1,1,1,1,0,0], 0),
|
| ([1,1,1,1,1,1,1,0], 1),
|
| ([1,1,1,1,1,1,1,1], 0),
|
| ([1,0,1,0,1,0,1,0], 0),
|
| ([0,1,0,1,0,1,0,1], 0),
|
| ([1,0,0,0,0,0,0,1], 0),
|
| ([0,0,0,1,1,0,0,0], 0),
|
| ([1,0,0,1,0,0,0,1], 1),
|
| ]
|
|
|
| for bits, expected_parity in popcount_parity_tests:
|
| input_tensor = torch.tensor([bits], device=self.device, dtype=torch.float32)
|
| input_tensor = input_tensor.expand(pop_size, -1)
|
|
|
| w_pop = pop['pattern_recognition.popcount.weight'].view(pop_size, -1)
|
| b_pop = pop['pattern_recognition.popcount.bias'].view(pop_size)
|
| popcount = (input_tensor * w_pop).sum(1) + b_pop
|
|
|
| circuit_parity = (popcount.long() % 2)
|
| scores += (circuit_parity == expected_parity).float()
|
| total_tests += 1
|
|
|
| head_movement_tests = [
|
| (0, 1, 1),
|
| (1, 1, 2),
|
| (5, 1, 6),
|
| (127, 1, 128),
|
| (254, 1, 255),
|
| (10, 1, 11),
|
| (100, 1, 101),
|
| (200, 1, 201),
|
| ]
|
|
|
| for head_pos, delta, expected_new in head_movement_tests:
|
| result = self._eval_8bit_add(pop, head_pos, delta)
|
| scores += (result == expected_new).float()
|
| total_tests += 1
|
|
|
| symbol_select_tests = [
|
| (0, 0, 0, 0),
|
| (0, 1, 0, 0),
|
| (0, 0, 1, 0),
|
| (0, 1, 1, 1),
|
| (1, 0, 0, 1),
|
| (1, 1, 0, 1),
|
| (1, 0, 1, 0),
|
| (1, 1, 1, 1),
|
| ]
|
|
|
| for current_sym, new_sym, write_enable, expected in symbol_select_tests:
|
| current_t = torch.full((pop_size,), float(current_sym), device=self.device)
|
| new_t = torch.full((pop_size,), float(new_sym), device=self.device)
|
| we_t = torch.full((pop_size,), float(write_enable), device=self.device)
|
|
|
| result = self._eval_mux2to1(pop, new_t, current_t, we_t)
|
| scores += (result == expected).float()
|
| total_tests += 1
|
|
|
| halt_tests = [
|
| (0, 0, True),
|
| (1, 1, True),
|
| (7, 7, True),
|
| (0, 1, False),
|
| (3, 5, False),
|
| (7, 0, False),
|
| (4, 4, True),
|
| (2, 6, False),
|
| ]
|
|
|
| for current_state, halt_state, expected_halt in halt_tests:
|
| current_bits = torch.tensor([[(current_state >> (7-i)) & 1 for i in range(8)]],
|
| device=self.device, dtype=torch.float32)
|
| halt_bits = torch.tensor([[(halt_state >> (7-i)) & 1 for i in range(8)]],
|
| device=self.device, dtype=torch.float32)
|
| current_bits = current_bits.expand(pop_size, -1)
|
| halt_bits = halt_bits.expand(pop_size, -1)
|
|
|
| xnor_results = []
|
| for bit in range(8):
|
| a_bit = current_bits[:, bit]
|
| b_bit = halt_bits[:, bit]
|
|
|
| inp = torch.stack([a_bit, b_bit], dim=1)
|
| w1_n1 = pop['boolean.xnor.layer1.neuron1.weight'].view(pop_size, -1)
|
| b1_n1 = pop['boolean.xnor.layer1.neuron1.bias'].view(pop_size)
|
| w1_n2 = pop['boolean.xnor.layer1.neuron2.weight'].view(pop_size, -1)
|
| b1_n2 = pop['boolean.xnor.layer1.neuron2.bias'].view(pop_size)
|
| w2 = pop['boolean.xnor.layer2.weight'].view(pop_size, -1)
|
| b2 = pop['boolean.xnor.layer2.bias'].view(pop_size)
|
|
|
| h1 = heaviside((inp * w1_n1).sum(1) + b1_n1)
|
| h2 = heaviside((inp * w1_n2).sum(1) + b1_n2)
|
| hidden = torch.stack([h1, h2], dim=1)
|
| xnor_out = heaviside((hidden * w2).sum(1) + b2)
|
| xnor_results.append(xnor_out)
|
|
|
| xnor_stack = torch.stack(xnor_results, dim=1)
|
| w_and8 = pop['threshold.alloutof8.weight'].view(pop_size, -1)
|
| b_and8 = pop['threshold.alloutof8.bias'].view(pop_size)
|
| is_equal = heaviside((xnor_stack * w_and8).sum(1) + b_and8)
|
|
|
| expected = 1.0 if expected_halt else 0.0
|
| scores += (is_equal == expected).float()
|
| total_tests += 1
|
|
|
| transition_tests = [
|
| (0, 0, 0, 1),
|
| (0, 1, 1, 0),
|
| (1, 0, 0, 0),
|
| (1, 1, 1, 1),
|
| (2, 0, 1, 1),
|
| (2, 1, 0, 1),
|
| (3, 0, 1, 0),
|
| (3, 1, 0, 0),
|
| ]
|
|
|
| for state, symbol, write, move_right in transition_tests:
|
| state_bits = [(state >> (2-i)) & 1 for i in range(3)]
|
| inp = torch.tensor([[float(state_bits[0]), float(state_bits[1]), float(state_bits[2])]],
|
| device=self.device)
|
|
|
| decoded = []
|
| for out_idx in range(8):
|
| w = pop[f'combinational.decoder3to8.out{out_idx}.weight'].view(pop_size, -1)
|
| b = pop[f'combinational.decoder3to8.out{out_idx}.bias'].view(pop_size)
|
| out = heaviside(inp @ w.T + b)
|
| decoded.append(out.view(pop_size))
|
|
|
| state_active = decoded[state]
|
|
|
| symbol_t = torch.full((pop_size,), float(symbol), device=self.device)
|
|
|
| inp_and = torch.stack([state_active, symbol_t], dim=1)
|
| w_and = pop['boolean.and.weight'].view(pop_size, -1)
|
| b_and = pop['boolean.and.bias'].view(pop_size)
|
| transition_match = heaviside((inp_and * w_and).sum(1) + b_and)
|
|
|
| expected_match = 1.0 if symbol == 1 else 0.0
|
| if state in [0, 2]:
|
| expected_match = 1.0 if symbol == 0 else 0.0
|
| elif state in [1, 3]:
|
| expected_match = 1.0 if symbol == 1 else 0.0
|
|
|
| scores += torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| tm_simulation_tests = [
|
| {'tape': [1, 0, 1], 'expected_parity': 0},
|
| {'tape': [1, 1, 0], 'expected_parity': 0},
|
| {'tape': [0, 0, 1], 'expected_parity': 1},
|
| {'tape': [1, 1, 1], 'expected_parity': 1},
|
| {'tape': [0, 0, 0, 0], 'expected_parity': 0},
|
| {'tape': [1, 0, 0, 1], 'expected_parity': 0},
|
| ]
|
|
|
| for test in tm_simulation_tests:
|
| tape = test['tape']
|
| expected = test['expected_parity']
|
|
|
| tape_bits = tape + [0] * (8 - len(tape))
|
| tape_tensor = torch.tensor([tape_bits[:8]], device=self.device, dtype=torch.float32)
|
| tape_tensor = tape_tensor.expand(pop_size, -1)
|
|
|
| stage1 = []
|
| for i in range(4):
|
| a_idx, b_idx = i * 2, i * 2 + 1
|
| a_t = tape_tensor[:, a_idx]
|
| b_t = tape_tensor[:, b_idx]
|
| xor_out = self._eval_xor_pair(pop, a_t, b_t)
|
| stage1.append(xor_out)
|
|
|
| stage2 = []
|
| xor_01 = self._eval_xor_pair(pop, stage1[0], stage1[1])
|
| xor_23 = self._eval_xor_pair(pop, stage1[2], stage1[3])
|
| stage2 = [xor_01, xor_23]
|
|
|
| final_parity = self._eval_xor_pair(pop, stage2[0], stage2[1])
|
|
|
| scores += (final_parity == expected).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Turing Machine: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
|
|
|
|
|
|
|
|
| def _test_all_games(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """Run all four comprehensive game tests."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| total_scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| if debug:
|
| print("\n=== GAME TESTS (Composite Circuit Verification) ===")
|
|
|
| nim_scores, nim_tests = self._test_nim_game(pop, debug)
|
| total_scores += nim_scores
|
| total_tests += nim_tests
|
|
|
| gol_scores, gol_tests = self._test_game_of_life(pop, debug)
|
| total_scores += gol_scores
|
| total_tests += gol_tests
|
|
|
| bj_scores, bj_tests = self._test_blackjack(pop, debug)
|
| total_scores += bj_scores
|
| total_tests += bj_tests
|
|
|
| tm_scores, tm_tests = self._test_turing_machine(pop, debug)
|
| total_scores += tm_scores
|
| total_tests += tm_tests
|
|
|
| if debug and pop_size == 1:
|
| print(f" TOTAL GAMES: {int(total_scores[0].item())}/{total_tests}")
|
|
|
| return total_scores, total_tests
|
|
|
|
|
|
|
|
|
|
|
| def _test_fibonacci(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| FIBONACCI SEQUENCE - Tests chained ripple-carry adders.
|
| Computes F(0) through F(13) = 233.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| expected = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
|
| fib = [torch.zeros(pop_size, device=self.device),
|
| torch.ones(pop_size, device=self.device)]
|
|
|
| for i in range(2, 14):
|
| a_val = fib[i-1]
|
| b_val = fib[i-2]
|
|
|
| carry = torch.zeros(pop_size, device=self.device)
|
| sum_bits = []
|
|
|
| for bit in range(8):
|
| a_bit = ((a_val.long() >> bit) & 1).float()
|
| b_bit = ((b_val.long() >> bit) & 1).float()
|
| sum_bit, carry = self._eval_single_fa(pop, f'arithmetic.ripplecarry8bit.fa{bit}',
|
| a_bit, b_bit, carry)
|
| sum_bits.append(sum_bit)
|
|
|
| result = sum(sum_bits[j] * (2**j) for j in range(8))
|
| fib.append(result)
|
|
|
| for i, exp in enumerate(expected):
|
| scores += (fib[i] == exp).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Fibonacci: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_hamming_distance(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| HAMMING DISTANCE - Tests XOR gates composed with popcount.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| test_pairs = [
|
| (0, 0, 0), (255, 0, 8), (170, 85, 8), (240, 15, 8),
|
| (129, 129, 0), (204, 192, 2), (103, 16, 6), (42, 57, 3),
|
| (100, 200, 4), (0, 255, 8)
|
| ]
|
|
|
| for a_val, b_val, expected in test_pairs:
|
| a_bits = torch.tensor([(a_val >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| b_bits = torch.tensor([(b_val >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
|
|
| xor_bits = []
|
| for i in range(8):
|
| a_i = a_bits[i].unsqueeze(0).expand(pop_size)
|
| b_i = b_bits[i].unsqueeze(0).expand(pop_size)
|
| inp = torch.stack([a_i, b_i], dim=1)
|
|
|
| w1_n1 = pop['boolean.xor.layer1.neuron1.weight'].view(pop_size, -1)
|
| b1_n1 = pop['boolean.xor.layer1.neuron1.bias'].view(pop_size)
|
| w1_n2 = pop['boolean.xor.layer1.neuron2.weight'].view(pop_size, -1)
|
| b1_n2 = pop['boolean.xor.layer1.neuron2.bias'].view(pop_size)
|
| w2 = pop['boolean.xor.layer2.weight'].view(pop_size, -1)
|
| b2 = pop['boolean.xor.layer2.bias'].view(pop_size)
|
|
|
| h1 = heaviside((inp * w1_n1).sum(1) + b1_n1)
|
| h2 = heaviside((inp * w1_n2).sum(1) + b1_n2)
|
| hidden = torch.stack([h1, h2], dim=1)
|
| xor_out = heaviside((hidden * w2).sum(1) + b2)
|
| xor_bits.append(xor_out)
|
|
|
| xor_stack = torch.stack(xor_bits, dim=1)
|
| w_pop = pop['pattern_recognition.popcount.weight'].view(pop_size, -1)
|
| b_pop = pop['pattern_recognition.popcount.bias'].view(pop_size)
|
| hamming = (xor_stack * w_pop).sum(1) + b_pop
|
|
|
| scores += (hamming == expected).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Hamming Distance: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_sorting_network(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| SORTING NETWORK - 4-element bitonic sort using comparators.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| def compare_swap_asc(a_val, b_val):
|
| a_bits = torch.stack([((a_val.long() >> (7-i)) & 1).float() for i in range(8)], dim=1)
|
| b_bits = torch.stack([((b_val.long() >> (7-i)) & 1).float() for i in range(8)], dim=1)
|
| diff = a_bits - b_bits
|
| w = pop['arithmetic.greaterthan8bit.comparator'].view(pop_size, -1)
|
| should_swap = ((diff * w).sum(1) > 0).float()
|
| new_a = a_val * (1 - should_swap) + b_val * should_swap
|
| new_b = b_val * (1 - should_swap) + a_val * should_swap
|
| return new_a, new_b
|
|
|
| def compare_swap_desc(a_val, b_val):
|
| a_bits = torch.stack([((a_val.long() >> (7-i)) & 1).float() for i in range(8)], dim=1)
|
| b_bits = torch.stack([((b_val.long() >> (7-i)) & 1).float() for i in range(8)], dim=1)
|
| diff = b_bits - a_bits
|
| w = pop['arithmetic.lessthan8bit.comparator'].view(pop_size, -1)
|
| should_swap = ((diff * w).sum(1) > 0).float()
|
| new_a = a_val * (1 - should_swap) + b_val * should_swap
|
| new_b = b_val * (1 - should_swap) + a_val * should_swap
|
| return new_a, new_b
|
|
|
| test_cases = [
|
| ([4, 3, 2, 1], [1, 2, 3, 4]),
|
| ([1, 2, 3, 4], [1, 2, 3, 4]),
|
| ([100, 50, 75, 25], [25, 50, 75, 100]),
|
| ([255, 0, 128, 64], [0, 64, 128, 255]),
|
| ([42, 42, 42, 42], [42, 42, 42, 42]),
|
| ([1, 255, 1, 255], [1, 1, 255, 255]),
|
| ([200, 100, 150, 50], [50, 100, 150, 200]),
|
| ([7, 3, 9, 1], [1, 3, 7, 9]),
|
| ]
|
|
|
| for arr, expected in test_cases:
|
| a = torch.full((pop_size,), float(arr[0]), device=self.device)
|
| b = torch.full((pop_size,), float(arr[1]), device=self.device)
|
| c = torch.full((pop_size,), float(arr[2]), device=self.device)
|
| d = torch.full((pop_size,), float(arr[3]), device=self.device)
|
|
|
| a, b = compare_swap_asc(a, b)
|
| c, d = compare_swap_desc(c, d)
|
| a, c = compare_swap_asc(a, c)
|
| b, d = compare_swap_asc(b, d)
|
| a, b = compare_swap_asc(a, b)
|
| c, d = compare_swap_asc(c, d)
|
|
|
| match = ((a == expected[0]) & (b == expected[1]) &
|
| (c == expected[2]) & (d == expected[3])).float()
|
| scores += match
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Sorting Network: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_gray_code(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| GRAY CODE - XOR-based encode/decode with property verification.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| def eval_xor_bit(a, b):
|
| inp = torch.stack([a, b], dim=1)
|
| w1_n1 = pop['boolean.xor.layer1.neuron1.weight'].view(pop_size, -1)
|
| b1_n1 = pop['boolean.xor.layer1.neuron1.bias'].view(pop_size)
|
| w1_n2 = pop['boolean.xor.layer1.neuron2.weight'].view(pop_size, -1)
|
| b1_n2 = pop['boolean.xor.layer1.neuron2.bias'].view(pop_size)
|
| w2 = pop['boolean.xor.layer2.weight'].view(pop_size, -1)
|
| b2 = pop['boolean.xor.layer2.bias'].view(pop_size)
|
| h1 = heaviside((inp * w1_n1).sum(1) + b1_n1)
|
| h2 = heaviside((inp * w1_n2).sum(1) + b1_n2)
|
| hidden = torch.stack([h1, h2], dim=1)
|
| return heaviside((hidden * w2).sum(1) + b2)
|
|
|
| test_values = [0, 1, 2, 3, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128, 255,
|
| 42, 100, 170, 200, 250]
|
|
|
| for n in test_values:
|
| bits = [((n >> (7-i)) & 1) for i in range(8)]
|
| bits_t = [torch.full((pop_size,), float(b), device=self.device) for b in bits]
|
|
|
| gray_bits = [bits_t[0]]
|
| for i in range(1, 8):
|
| gray_bits.append(eval_xor_bit(bits_t[i-1], bits_t[i]))
|
|
|
| b_bits = [gray_bits[0]]
|
| for i in range(1, 8):
|
| b_bits.append(eval_xor_bit(b_bits[i-1], gray_bits[i]))
|
|
|
| recovered = sum(b_bits[i] * (2**(7-i)) for i in range(8))
|
| gray_val = sum(gray_bits[i] * (2**(7-i)) for i in range(8))
|
|
|
| expected_gray = n ^ (n >> 1)
|
| scores += ((recovered == n) & (gray_val == expected_gray)).float()
|
| total_tests += 1
|
|
|
| for i in range(255):
|
| n1, n2 = i, i + 1
|
| g1 = n1 ^ (n1 >> 1)
|
| g2 = n2 ^ (n2 >> 1)
|
| diff_bits = bin(int(g1) ^ int(g2)).count('1')
|
| scores += (diff_bits == 1) * torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Gray Code: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_xor_cipher(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| XOR CIPHER - Encrypt/decrypt roundtrip verification.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| def xor_byte(byte_val, key_val):
|
| result_bits = []
|
| for i in range(8):
|
| b_bit = torch.full((pop_size,), float((byte_val >> (7-i)) & 1), device=self.device)
|
| k_bit = torch.full((pop_size,), float((key_val >> (7-i)) & 1), device=self.device)
|
| inp = torch.stack([b_bit, k_bit], dim=1)
|
|
|
| w1_n1 = pop['boolean.xor.layer1.neuron1.weight'].view(pop_size, -1)
|
| b1_n1 = pop['boolean.xor.layer1.neuron1.bias'].view(pop_size)
|
| w1_n2 = pop['boolean.xor.layer1.neuron2.weight'].view(pop_size, -1)
|
| b1_n2 = pop['boolean.xor.layer1.neuron2.bias'].view(pop_size)
|
| w2 = pop['boolean.xor.layer2.weight'].view(pop_size, -1)
|
| b2 = pop['boolean.xor.layer2.bias'].view(pop_size)
|
|
|
| h1 = heaviside((inp * w1_n1).sum(1) + b1_n1)
|
| h2 = heaviside((inp * w1_n2).sum(1) + b1_n2)
|
| hidden = torch.stack([h1, h2], dim=1)
|
| result_bits.append(heaviside((hidden * w2).sum(1) + b2))
|
|
|
| return sum(result_bits[i] * (2**(7-i)) for i in range(8))
|
|
|
| test_cases = [
|
| ([72, 101, 108, 108, 111], 42),
|
| ([0, 255, 128, 64, 32], 0xAA),
|
| ([1, 2, 3, 4, 5, 6, 7, 8], 0xFF),
|
| ([100, 100, 100, 100], 100),
|
| ]
|
|
|
| for plaintext, key in test_cases:
|
| all_match = torch.ones(pop_size, device=self.device)
|
| for byte in plaintext:
|
| cipher = xor_byte(byte, key)
|
| decrypted = xor_byte(int(byte ^ key), key)
|
| all_match *= (decrypted == byte).float()
|
| scores += all_match
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" XOR Cipher: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_overflow_detection(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| OVERFLOW DETECTION - Signed arithmetic edge cases.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| edge_cases = [
|
| (127, 1, 128, True), (127, 127, 254, True), (128, 128, 0, True),
|
| (255, 1, 0, False), (255, 255, 254, False), (100, 50, 150, True),
|
| (200, 200, 144, False), (1, 1, 2, False), (0, 0, 0, False),
|
| (128, 127, 255, False),
|
| ]
|
|
|
| for a, b, expected, has_overflow in edge_cases:
|
| a_t = torch.full((pop_size,), float(a), device=self.device)
|
| b_t = torch.full((pop_size,), float(b), device=self.device)
|
|
|
| carry = torch.zeros(pop_size, device=self.device)
|
| sum_bits = []
|
| for bit in range(8):
|
| a_bit = ((a >> bit) & 1) * torch.ones(pop_size, device=self.device)
|
| b_bit = ((b >> bit) & 1) * torch.ones(pop_size, device=self.device)
|
| sum_bit, carry = self._eval_single_fa(pop, f'arithmetic.ripplecarry8bit.fa{bit}',
|
| a_bit, b_bit, carry)
|
| sum_bits.append(sum_bit)
|
|
|
| result = sum(sum_bits[j] * (2**j) for j in range(8))
|
|
|
| a_sign = (a >> 7) & 1
|
| b_sign = (b >> 7) & 1
|
| r_sign = ((result.long() >> 7) & 1).float()
|
| detected_overflow = (a_sign == b_sign) & (r_sign != a_sign)
|
|
|
| correct_result = (result == expected).float()
|
| correct_overflow = (detected_overflow == has_overflow).float()
|
| scores += correct_result * correct_overflow
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Overflow Detection: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_compound_expressions(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| COMPOUND EXPRESSIONS - Mixed Boolean and arithmetic chains.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| for a, b, c, d in [(1,1,0,0), (0,0,1,1), (1,0,1,0), (1,1,1,1), (0,0,0,0)]:
|
| at = torch.full((pop_size,), float(a), device=self.device)
|
| bt = torch.full((pop_size,), float(b), device=self.device)
|
| ct = torch.full((pop_size,), float(c), device=self.device)
|
| dt = torch.full((pop_size,), float(d), device=self.device)
|
|
|
| inp_ab = torch.stack([at, bt], dim=1)
|
| w_and = pop['boolean.and.weight'].view(pop_size, -1)
|
| b_and = pop['boolean.and.bias'].view(pop_size)
|
| ab = heaviside((inp_ab * w_and).sum(1) + b_and)
|
|
|
| inp_cd = torch.stack([ct, dt], dim=1)
|
| cd = heaviside((inp_cd * w_and).sum(1) + b_and)
|
|
|
| inp_or = torch.stack([ab, cd], dim=1)
|
| w_or = pop['boolean.or.weight'].view(pop_size, -1)
|
| b_or = pop['boolean.or.bias'].view(pop_size)
|
| result = heaviside((inp_or * w_or).sum(1) + b_or)
|
|
|
| expected = (a & b) | (c & d)
|
| scores += (result == expected).float()
|
| total_tests += 1
|
|
|
| for a, b, c, d in [(10,20,30,40), (50,50,50,50), (100,100,50,5), (1,2,3,4)]:
|
| running = torch.full((pop_size,), float(a), device=self.device)
|
| for addend in [b, c, d]:
|
| carry = torch.zeros(pop_size, device=self.device)
|
| sum_bits = []
|
| for bit in range(8):
|
| r_bit = ((running.long() >> bit) & 1).float()
|
| a_bit = ((addend >> bit) & 1) * torch.ones(pop_size, device=self.device)
|
| sum_bit, carry = self._eval_single_fa(pop, f'arithmetic.ripplecarry8bit.fa{bit}',
|
| r_bit, a_bit, carry)
|
| sum_bits.append(sum_bit)
|
| running = sum(sum_bits[j] * (2**j) for j in range(8))
|
|
|
| expected = (a + b + c + d) & 0xFF
|
| scores += (running == expected).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Compound Expressions: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_all_bespoke(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """Run all bespoke novel tests."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| total_scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| if debug:
|
| print("\n=== BESPOKE TESTS (Novel Circuit Compositions) ===")
|
|
|
| fib_scores, fib_tests = self._test_fibonacci(pop, debug)
|
| total_scores += fib_scores
|
| total_tests += fib_tests
|
|
|
| ham_scores, ham_tests = self._test_hamming_distance(pop, debug)
|
| total_scores += ham_scores
|
| total_tests += ham_tests
|
|
|
| sort_scores, sort_tests = self._test_sorting_network(pop, debug)
|
| total_scores += sort_scores
|
| total_tests += sort_tests
|
|
|
| gray_scores, gray_tests = self._test_gray_code(pop, debug)
|
| total_scores += gray_scores
|
| total_tests += gray_tests
|
|
|
| cipher_scores, cipher_tests = self._test_xor_cipher(pop, debug)
|
| total_scores += cipher_scores
|
| total_tests += cipher_tests
|
|
|
| overflow_scores, overflow_tests = self._test_overflow_detection(pop, debug)
|
| total_scores += overflow_scores
|
| total_tests += overflow_tests
|
|
|
| expr_scores, expr_tests = self._test_compound_expressions(pop, debug)
|
| total_scores += expr_scores
|
| total_tests += expr_tests
|
|
|
| if debug and pop_size == 1:
|
| print(f" TOTAL BESPOKE: {int(total_scores[0].item())}/{total_tests}")
|
|
|
| return total_scores, total_tests
|
|
|
|
|
|
|
|
|
|
|
| def _test_alu_opcode_decoder(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| ALU OPCODE DECODER - Verify 4-bit opcode → 16 one-hot operation select.
|
| Uses alu.alucontrol.op{0-15} tensors.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| for opcode in range(16):
|
| opcode_bits = torch.tensor([(opcode >> (3-i)) & 1 for i in range(4)],
|
| device=self.device, dtype=torch.float32)
|
| opcode_bits = opcode_bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| for op_idx in range(16):
|
| w = pop[f'alu.alucontrol.op{op_idx}.weight'].view(pop_size, -1)
|
| b = pop[f'alu.alucontrol.op{op_idx}.bias'].view(pop_size)
|
| result = heaviside((opcode_bits * w).sum(1) + b)
|
| expected = 1.0 if op_idx == opcode else 0.0
|
| scores += (result == expected).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" ALU Opcode Decoder: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_alu_8bit_operations(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| ALU 8-BIT OPERATIONS - Test each ALU operation using alu.alu8bit.* tensors.
|
| Operations: AND, OR, XOR, NOT, SHL, SHR (ADD uses ripple carry separately).
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| test_pairs = [
|
| (0x00, 0x00), (0xFF, 0xFF), (0xAA, 0x55), (0x0F, 0xF0),
|
| (0x12, 0x34), (0x80, 0x01), (0x7F, 0x80), (0x01, 0xFE),
|
| (0b10101010, 0b01010101), (0b11110000, 0b00001111),
|
| (0b11001100, 0b00110011), (0b10000001, 0b01111110)
|
| ]
|
|
|
| for a_val, b_val in test_pairs:
|
| a_bits = torch.tensor([(a_val >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| b_bits = torch.tensor([(b_val >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| a_bits = a_bits.unsqueeze(0).expand(pop_size, -1)
|
| b_bits = b_bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| ab_interleaved = torch.stack([a_bits, b_bits], dim=2).view(pop_size, 16)
|
|
|
| w_and = pop['alu.alu8bit.and.weight'].view(pop_size, -1)
|
| b_and = pop['alu.alu8bit.and.bias'].view(pop_size, -1)
|
| w_and_reshaped = w_and.view(pop_size, 8, 2)
|
| and_result = heaviside((ab_interleaved.view(pop_size, 8, 2) * w_and_reshaped).sum(2) + b_and)
|
| and_expected = torch.tensor([((a_val & b_val) >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| scores += (and_result == and_expected.unsqueeze(0)).float().sum(1)
|
| total_tests += 8
|
|
|
| w_or = pop['alu.alu8bit.or.weight'].view(pop_size, -1)
|
| b_or = pop['alu.alu8bit.or.bias'].view(pop_size, -1)
|
| w_or_reshaped = w_or.view(pop_size, 8, 2)
|
| or_result = heaviside((ab_interleaved.view(pop_size, 8, 2) * w_or_reshaped).sum(2) + b_or)
|
| or_expected = torch.tensor([((a_val | b_val) >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| scores += (or_result == or_expected.unsqueeze(0)).float().sum(1)
|
| total_tests += 8
|
|
|
| w_not = pop['alu.alu8bit.not.weight'].view(pop_size, -1)
|
| b_not = pop['alu.alu8bit.not.bias'].view(pop_size, -1)
|
| not_result = heaviside(a_bits * w_not + b_not)
|
| not_expected = torch.tensor([((~a_val & 0xFF) >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| scores += (not_result == not_expected.unsqueeze(0)).float().sum(1)
|
| total_tests += 8
|
|
|
| w1_or = pop['alu.alu8bit.xor.layer1.or.weight'].view(pop_size, -1)
|
| b1_or = pop['alu.alu8bit.xor.layer1.or.bias'].view(pop_size, -1)
|
| w1_nand = pop['alu.alu8bit.xor.layer1.nand.weight'].view(pop_size, -1)
|
| b1_nand = pop['alu.alu8bit.xor.layer1.nand.bias'].view(pop_size, -1)
|
| w2 = pop['alu.alu8bit.xor.layer2.weight'].view(pop_size, -1)
|
| b2 = pop['alu.alu8bit.xor.layer2.bias'].view(pop_size, -1)
|
|
|
| h_or = heaviside((ab_interleaved.view(pop_size, 8, 2) * w1_or.view(pop_size, 8, 2)).sum(2) + b1_or)
|
| h_nand = heaviside((ab_interleaved.view(pop_size, 8, 2) * w1_nand.view(pop_size, 8, 2)).sum(2) + b1_nand)
|
| hidden = torch.stack([h_or, h_nand], dim=2)
|
| xor_result = heaviside((hidden * w2.view(pop_size, 8, 2)).sum(2) + b2)
|
| xor_expected = torch.tensor([((a_val ^ b_val) >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| scores += (xor_result == xor_expected.unsqueeze(0)).float().sum(1)
|
| total_tests += 8
|
|
|
| for val in [0x00, 0x01, 0x7F, 0x80, 0xFE, 0xFF, 0xAA, 0x55]:
|
| val_bits = torch.tensor([(val >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| val_bits = val_bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| w_shl = pop['alu.alu8bit.shl.weight'].view(pop_size, -1)
|
| shl_result = val_bits * w_shl
|
| shl_expected = torch.tensor([(val >> (7-i)) & 1 if i > 0 else 0 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| scores += (shl_result == shl_expected.unsqueeze(0)).float().sum(1)
|
| total_tests += 8
|
|
|
| w_shr = pop['alu.alu8bit.shr.weight'].view(pop_size, -1)
|
| shr_result = val_bits * w_shr
|
| shr_expected = torch.tensor([(val >> (7-i)) & 1 if i < 7 else 0 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| scores += (shr_result == shr_expected.unsqueeze(0)).float().sum(1)
|
| total_tests += 8
|
|
|
| if debug and pop_size == 1:
|
| print(f" ALU 8-bit Operations: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_alu_flags(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| ALU FLAGS - Test Zero, Negative, Carry, Overflow flag computation.
|
| Uses alu.aluflags.* tensors.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| zero_tests = [
|
| (0x00, True), (0x01, False), (0x80, False), (0xFF, False),
|
| (0x7F, False), (0x10, False), (0x08, False), (0x04, False)
|
| ]
|
|
|
| for val, expected_zero in zero_tests:
|
| val_bits = torch.tensor([(val >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| val_bits = val_bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| w_zero = pop['alu.aluflags.zero.weight'].view(pop_size, -1)
|
| b_zero = pop['alu.aluflags.zero.bias'].view(pop_size)
|
| zero_flag = heaviside((val_bits * w_zero).sum(1) + b_zero)
|
| expected = 1.0 if expected_zero else 0.0
|
| scores += (zero_flag == expected).float()
|
| total_tests += 1
|
|
|
| neg_tests = [
|
| (0x00, False), (0x01, False), (0x7F, False), (0x80, True),
|
| (0x81, True), (0xFF, True), (0xFE, True), (0x40, False)
|
| ]
|
|
|
| for val, expected_neg in neg_tests:
|
| val_bits = torch.tensor([(val >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| val_bits = val_bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| w_neg = pop['alu.aluflags.negative.weight'].view(pop_size, -1)
|
| b_neg = pop['alu.aluflags.negative.bias'].view(pop_size)
|
| neg_flag = heaviside((val_bits * w_neg).sum(1) + b_neg)
|
| expected = 1.0 if expected_neg else 0.0
|
| scores += (neg_flag == expected).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" ALU Flags: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_alu_integration(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| ALU INTEGRATION - End-to-end: opcode → operation → result → flags.
|
| Tests the full ALU pipeline.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| alu_ops = [
|
| (0, 'AND', lambda a, b: a & b),
|
| (1, 'OR', lambda a, b: a | b),
|
| (2, 'XOR', lambda a, b: a ^ b),
|
| (3, 'NOT', lambda a, b: (~a) & 0xFF),
|
| (4, 'ADD', lambda a, b: (a + b) & 0xFF),
|
| (5, 'SUB', lambda a, b: (a - b) & 0xFF),
|
| (6, 'SHL', lambda a, b: (a << 1) & 0xFF),
|
| (7, 'SHR', lambda a, b: (a >> 1) & 0xFF),
|
| ]
|
|
|
| test_values = [
|
| (0x00, 0x00), (0xFF, 0xFF), (0xAA, 0x55), (0x0F, 0xF0),
|
| (0x01, 0x01), (0x80, 0x7F), (0x12, 0x34), (0x00, 0xFF)
|
| ]
|
|
|
| for opcode, op_name, op_fn in alu_ops:
|
| opcode_bits = torch.tensor([(opcode >> (3-i)) & 1 for i in range(4)],
|
| device=self.device, dtype=torch.float32)
|
| opcode_bits = opcode_bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| op_selectors = []
|
| for op_idx in range(16):
|
| w = pop[f'alu.alucontrol.op{op_idx}.weight'].view(pop_size, -1)
|
| b = pop[f'alu.alucontrol.op{op_idx}.bias'].view(pop_size)
|
| selector = heaviside((opcode_bits * w).sum(1) + b)
|
| op_selectors.append(selector)
|
|
|
| expected_selector = opcode
|
| scores += (op_selectors[expected_selector] == 1.0).float()
|
| total_tests += 1
|
|
|
| other_off = torch.ones(pop_size, device=self.device)
|
| for i in range(16):
|
| if i != expected_selector:
|
| other_off *= (op_selectors[i] == 0.0).float()
|
| scores += other_off
|
| total_tests += 1
|
|
|
| for a_val, b_val in test_values:
|
| expected_result = op_fn(a_val, b_val)
|
|
|
| result_bits = torch.tensor([(expected_result >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| result_bits = result_bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| w_zero = pop['alu.aluflags.zero.weight'].view(pop_size, -1)
|
| b_zero = pop['alu.aluflags.zero.bias'].view(pop_size)
|
| zero_flag = heaviside((result_bits * w_zero).sum(1) + b_zero)
|
| expected_zero = 1.0 if expected_result == 0 else 0.0
|
| scores += (zero_flag == expected_zero).float()
|
| total_tests += 1
|
|
|
| w_neg = pop['alu.aluflags.negative.weight'].view(pop_size, -1)
|
| b_neg = pop['alu.aluflags.negative.bias'].view(pop_size)
|
| neg_flag = heaviside((result_bits * w_neg).sum(1) + b_neg)
|
| expected_neg = 1.0 if (expected_result & 0x80) else 0.0
|
| scores += (neg_flag == expected_neg).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" ALU Integration: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_cpu_instruction_cycle(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| CPU INSTRUCTION CYCLE - Test fetch/decode/execute with control flow.
|
| Verifies conditional jumps interact correctly with ALU flags.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| flag_jump_tests = [
|
| ('jz', 0x00, True),
|
| ('jz', 0x01, False),
|
| ('jz', 0xFF, False),
|
| ('jnz', 0x00, False),
|
| ('jnz', 0x01, True),
|
| ('jnz', 0xFF, True),
|
| ('jn', 0x80, True),
|
| ('jn', 0x7F, False),
|
| ('jn', 0xFF, True),
|
| ('jn', 0x00, False),
|
| ('jp', 0x00, True),
|
| ('jp', 0x01, True),
|
| ('jp', 0x7F, True),
|
| ('jp', 0x80, False),
|
| ('jp', 0xFF, False),
|
| ]
|
|
|
| for jump_type, alu_result, should_jump in flag_jump_tests:
|
| result_bits = torch.tensor([(alu_result >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| result_bits = result_bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| if jump_type in ['jz', 'jnz']:
|
| w_zero = pop['alu.aluflags.zero.weight'].view(pop_size, -1)
|
| b_zero = pop['alu.aluflags.zero.bias'].view(pop_size)
|
| flag = heaviside((result_bits * w_zero).sum(1) + b_zero)
|
| condition = flag if jump_type == 'jz' else (1 - flag)
|
| elif jump_type in ['jn', 'jp']:
|
| w_neg = pop['alu.aluflags.negative.weight'].view(pop_size, -1)
|
| b_neg = pop['alu.aluflags.negative.bias'].view(pop_size)
|
| flag = heaviside((result_bits * w_neg).sum(1) + b_neg)
|
| condition = flag if jump_type == 'jn' else (1 - flag)
|
|
|
| expected = 1.0 if should_jump else 0.0
|
| scores += (condition == expected).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" CPU Instruction Cycle: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_cpu_programs(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| CPU PROGRAMS - Simulate simple programs using ALU and control flow.
|
| Tests: countdown, accumulator, bit manipulation sequences.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| countdown_tests = [10, 5, 15, 1, 20]
|
| for start_val in countdown_tests:
|
| val = start_val
|
| iterations = 0
|
| while val > 0 and iterations < 256:
|
| val_bits = torch.tensor([(val >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| val_bits = val_bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| w_zero = pop['alu.aluflags.zero.weight'].view(pop_size, -1)
|
| b_zero = pop['alu.aluflags.zero.bias'].view(pop_size)
|
| is_zero = heaviside((val_bits * w_zero).sum(1) + b_zero)
|
|
|
| if is_zero[0].item() == 1.0:
|
| break
|
|
|
| val = (val - 1) & 0xFF
|
| iterations += 1
|
|
|
| scores += float(iterations == start_val) * torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| accumulate_tests = [(1, 5), (3, 4), (10, 3), (7, 7)]
|
| for add_val, count in accumulate_tests:
|
| acc = 0
|
| for _ in range(count):
|
| acc = (acc + add_val) & 0xFF
|
| expected = (add_val * count) & 0xFF
|
|
|
| acc_bits = torch.tensor([(acc >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| exp_bits = torch.tensor([(expected >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| scores += (acc_bits == exp_bits).all().float() * torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| shift_tests = [0x01, 0x80, 0xAA, 0x55, 0xFF, 0x7E]
|
| for val in shift_tests:
|
| val_bits = torch.tensor([(val >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| val_bits = val_bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| w_shl = pop['alu.alu8bit.shl.weight'].view(pop_size, -1)
|
| after_shl_mask = (val_bits * w_shl)
|
|
|
| w_shr = pop['alu.alu8bit.shr.weight'].view(pop_size, -1)
|
| after_both_masks = (after_shl_mask * w_shr)
|
|
|
| middle_bits = val & 0x7E
|
| expected = torch.tensor([(middle_bits >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| scores += (after_both_masks == expected.unsqueeze(0)).float().sum(1)
|
| total_tests += 8
|
|
|
| if debug and pop_size == 1:
|
| print(f" CPU Programs: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_alu_output_mux(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| ALU OUTPUT MUX - Verify the output mux weights are non-zero.
|
| This tensor selects which ALU operation's result to output.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| w_mux = pop['alu.alu8bit.output_mux.weight'].view(pop_size, -1)
|
| scores += (w_mux.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" ALU Output Mux: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_alu_adder_and_carry(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| ALU ADDER - Verify ALU's internal adder and carry/overflow flag weights are non-zero.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| w_add = pop['alu.alu8bit.add.weight'].view(pop_size, -1)
|
| scores += (w_add.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| w_carry = pop['alu.aluflags.carry.weight'].view(pop_size, -1)
|
| scores += (w_carry.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| w_overflow = pop['alu.aluflags.overflow.weight'].view(pop_size, -1)
|
| scores += (w_overflow.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" ALU Adder & Carry/Overflow: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_control_stack_ops(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| CONTROL STACK - Test CALL/RET/PUSH/POP and stack pointer operations.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| stack_tensors = [
|
| 'control.call.jump', 'control.call.push',
|
| 'control.ret.jump', 'control.ret.pop',
|
| 'control.push.sp_dec', 'control.push.store',
|
| 'control.pop.load', 'control.pop.sp_inc',
|
| 'control.sp_dec.uses', 'control.sp_inc.uses',
|
| ]
|
|
|
| for tensor_name in stack_tensors:
|
| w = pop[tensor_name].view(pop_size, -1)
|
| scores += (w.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Control Stack Ops: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_jump_instructions(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| JUMP INSTRUCTIONS - Test that jump address bits are loaded correctly.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| for bit in range(8):
|
| w = pop[f'control.jump.bit{bit}.weight'].view(pop_size, -1)
|
| b = pop[f'control.jump.bit{bit}.bias'].view(pop_size, -1)
|
|
|
| scores += (w.abs().sum(1) > 0).float()
|
| total_tests += 1
|
| scores += (b.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Jump Instructions: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_error_detection(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| ERROR DETECTION - Test CRC, Hamming, parity, and checksum circuits.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| error_tensors = [
|
| 'error_detection.crc4.divisor',
|
| 'error_detection.crc8.divisor',
|
| 'error_detection.evenparitychecker.weight',
|
| 'error_detection.oddparitychecker.parity.weight',
|
| 'error_detection.oddparitychecker.not.weight',
|
| 'error_detection.checksum8bit.sum.weight',
|
| 'error_detection.hammingencode4bit.p0.weight',
|
| 'error_detection.hammingencode4bit.p1.weight',
|
| 'error_detection.hammingencode4bit.p2.weight',
|
| 'error_detection.hammingencode4bit.p3.weight',
|
| 'error_detection.hammingdecode7bit.s1.weight',
|
| 'error_detection.hammingdecode7bit.s2.weight',
|
| 'error_detection.hammingdecode7bit.s3.weight',
|
| 'error_detection.hammingsyndrome.s1.weight',
|
| 'error_detection.hammingsyndrome.s2.weight',
|
| 'error_detection.hammingsyndrome.s3.weight',
|
| 'error_detection.longitudinalparity.col_parity',
|
| 'error_detection.longitudinalparity.row_parity',
|
| ]
|
|
|
| for tensor_name in error_tensors:
|
| w = pop[tensor_name].view(pop_size, -1)
|
| scores += (w.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| test_bytes = [0x00, 0xFF, 0xAA, 0x55, 0x0F, 0xF0, 0x12, 0x34]
|
| for val in test_bytes:
|
| val_bits = torch.tensor([(val >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| val_bits = val_bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| w_parity = pop['error_detection.evenparitychecker.weight'].view(pop_size, -1)
|
| parity_sum = (val_bits * w_parity).sum(1)
|
| expected_parity = bin(val).count('1') % 2
|
| scores += ((parity_sum % 2) == expected_parity).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Error Detection: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_combinational_logic(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| COMBINATIONAL LOGIC - Test barrel shifter, multiplexers, demultiplexers.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| w_barrel = pop['combinational.barrelshifter8bit.shift'].view(pop_size, -1)
|
| scores += (w_barrel.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| w_mux4 = pop['combinational.multiplexer4to1.select'].view(pop_size, -1)
|
| scores += (w_mux4.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| w_mux8 = pop['combinational.multiplexer8to1.select'].view(pop_size, -1)
|
| scores += (w_mux8.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| demux_tensors = [
|
| 'combinational.demultiplexer1to2.and0.weight',
|
| 'combinational.demultiplexer1to2.and0.bias',
|
| 'combinational.demultiplexer1to2.and1.weight',
|
| 'combinational.demultiplexer1to2.and1.bias',
|
| 'combinational.demultiplexer1to4.decode',
|
| 'combinational.demultiplexer1to8.decode',
|
| ]
|
|
|
| for tensor_name in demux_tensors:
|
| w = pop[tensor_name].view(pop_size, -1)
|
| scores += (w.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Combinational Logic: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_pattern_recognition(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| PATTERN RECOGNITION - Test Hamming distance, one-hot, symmetry, alternating.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| pattern_tensors = [
|
| 'pattern_recognition.hammingdistance8bit.xor.weight',
|
| 'pattern_recognition.hammingdistance8bit.popcount.weight',
|
| 'pattern_recognition.onehotdetector.and.weight',
|
| 'pattern_recognition.onehotdetector.and.bias',
|
| 'pattern_recognition.onehotdetector.atleast1.weight',
|
| 'pattern_recognition.onehotdetector.atleast1.bias',
|
| 'pattern_recognition.onehotdetector.atmost1.weight',
|
| 'pattern_recognition.onehotdetector.atmost1.bias',
|
| 'pattern_recognition.symmetry8bit.xnor0.weight',
|
| 'pattern_recognition.symmetry8bit.xnor1.weight',
|
| 'pattern_recognition.symmetry8bit.xnor2.weight',
|
| 'pattern_recognition.symmetry8bit.xnor3.weight',
|
| 'pattern_recognition.symmetry8bit.and.weight',
|
| 'pattern_recognition.symmetry8bit.and.bias',
|
| 'pattern_recognition.alternating8bit.pattern1.weight',
|
| 'pattern_recognition.alternating8bit.pattern2.weight',
|
| 'pattern_recognition.runlength.weight',
|
| ]
|
|
|
| for tensor_name in pattern_tensors:
|
| w = pop[tensor_name].view(pop_size, -1)
|
| scores += (w.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Pattern Recognition: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_multiplier_stages(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| MULTIPLIER STAGES - Test internal stages of 8x8 multiplier.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| for stage in range(7):
|
| stage_tensors = [k for k in pop.keys() if f'multiplier8x8.stage{stage}' in k]
|
| stage_mag = sum(pop[k].abs().sum().item() for k in stage_tensors)
|
| scores += float(stage_mag > 0) * torch.ones(pop_size, device=self.device)
|
| total_tests += 1
|
|
|
| w_2x2 = [k for k in pop.keys() if 'multiplier2x2' in k]
|
| for tensor_name in w_2x2[:4]:
|
| w = pop[tensor_name].view(pop_size, -1)
|
| scores += (w.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Multiplier Stages: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_incrementer_decrementer(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| INCREMENTER/DECREMENTER - Test dedicated +1/-1 circuits.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| inc_tensors = [
|
| 'arithmetic.incrementer8bit.adder',
|
| 'arithmetic.incrementer8bit.one',
|
| ]
|
|
|
| for tensor_name in inc_tensors:
|
| w = pop[tensor_name].view(pop_size, -1)
|
| scores += (w.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| dec_tensors = [
|
| 'arithmetic.decrementer8bit.adder',
|
| 'arithmetic.decrementer8bit.neg_one',
|
| ]
|
|
|
| for tensor_name in dec_tensors:
|
| w = pop[tensor_name].view(pop_size, -1)
|
| scores += (w.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Incrementer/Decrementer: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_manifest(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| MANIFEST - Verify manifest values are preserved.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| manifest_tensors = [
|
| ('manifest.alu_operations', 16),
|
| ('manifest.flags', 4),
|
| ('manifest.instruction_width', 16),
|
| ('manifest.memory_bytes', 256),
|
| ('manifest.pc_width', 8),
|
| ('manifest.register_width', 8),
|
| ('manifest.registers', 4),
|
| ('manifest.turing_complete', 1),
|
| ('manifest.version', 1),
|
| ]
|
|
|
| for tensor_name, expected_value in manifest_tensors:
|
| w = pop[tensor_name].view(pop_size, -1)
|
| actual_value = w.sum(1)
|
| scores += (actual_value == expected_value).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Manifest: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_equality_circuit(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| EQUALITY CIRCUIT - Test 8-bit equality comparator using XNOR gates.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| for i in range(8):
|
| xnor_tensors = [
|
| f'arithmetic.equality8bit.xnor{i}.layer1.and.weight',
|
| f'arithmetic.equality8bit.xnor{i}.layer1.and.bias',
|
| f'arithmetic.equality8bit.xnor{i}.layer1.nor.weight',
|
| f'arithmetic.equality8bit.xnor{i}.layer2.weight',
|
| f'arithmetic.equality8bit.xnor{i}.layer2.bias',
|
| ]
|
| for tensor_name in xnor_tensors:
|
| if tensor_name in pop:
|
| w = pop[tensor_name].view(pop_size, -1)
|
| scores += (w.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Equality Circuit: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_minmax_circuits(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| MIN/MAX CIRCUITS - Test 8-bit min/max selectors.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| w_max = pop['arithmetic.max8bit.select'].view(pop_size, -1)
|
| scores += (w_max.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| w_min = pop['arithmetic.min8bit.select'].view(pop_size, -1)
|
| scores += (w_min.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| w_diff = pop['arithmetic.absolutedifference8bit.diff'].view(pop_size, -1)
|
| scores += (w_diff.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Min/Max Circuits: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_ripplecarry_internal(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| RIPPLE CARRY INTERNAL - Test internal components of ripple carry adders.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| rc_internal = [
|
| 'arithmetic.ripplecarry2bit.fa0.ha2.carry.weight',
|
| 'arithmetic.ripplecarry2bit.fa0.ha2.carry.bias',
|
| 'arithmetic.ripplecarry2bit.fa1.carry_or.weight',
|
| 'arithmetic.ripplecarry2bit.fa1.carry_or.bias',
|
| 'arithmetic.ripplecarry4bit.fa0.ha2.carry.weight',
|
| 'arithmetic.ripplecarry4bit.fa3.carry_or.weight',
|
| 'arithmetic.ripplecarry8bit.fa0.ha2.carry.weight',
|
| 'arithmetic.ripplecarry8bit.fa7.carry_or.weight',
|
| ]
|
|
|
| for tensor_name in rc_internal:
|
| if tensor_name in pop:
|
| w = pop[tensor_name].view(pop_size, -1)
|
| scores += (w.abs().sum(1) > 0).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Ripple Carry Internal: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_all_alu_cpu(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """Run all ALU and CPU integration tests."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| total_scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| if debug:
|
| print("\n=== ALU & CPU INTEGRATION TESTS ===")
|
|
|
| opcode_scores, opcode_tests = self._test_alu_opcode_decoder(pop, debug)
|
| total_scores += opcode_scores
|
| total_tests += opcode_tests
|
|
|
| ops_scores, ops_tests = self._test_alu_8bit_operations(pop, debug)
|
| total_scores += ops_scores
|
| total_tests += ops_tests
|
|
|
| flags_scores, flags_tests = self._test_alu_flags(pop, debug)
|
| total_scores += flags_scores
|
| total_tests += flags_tests
|
|
|
| integ_scores, integ_tests = self._test_alu_integration(pop, debug)
|
| total_scores += integ_scores
|
| total_tests += integ_tests
|
|
|
| cycle_scores, cycle_tests = self._test_cpu_instruction_cycle(pop, debug)
|
| total_scores += cycle_scores
|
| total_tests += cycle_tests
|
|
|
| prog_scores, prog_tests = self._test_cpu_programs(pop, debug)
|
| total_scores += prog_scores
|
| total_tests += prog_tests
|
|
|
| mux_scores, mux_tests = self._test_alu_output_mux(pop, debug)
|
| total_scores += mux_scores
|
| total_tests += mux_tests
|
|
|
| adder_scores, adder_tests = self._test_alu_adder_and_carry(pop, debug)
|
| total_scores += adder_scores
|
| total_tests += adder_tests
|
|
|
| stack_scores, stack_tests = self._test_control_stack_ops(pop, debug)
|
| total_scores += stack_scores
|
| total_tests += stack_tests
|
|
|
| jump_scores, jump_tests = self._test_jump_instructions(pop, debug)
|
| total_scores += jump_scores
|
| total_tests += jump_tests
|
|
|
| error_scores, error_tests = self._test_error_detection(pop, debug)
|
| total_scores += error_scores
|
| total_tests += error_tests
|
|
|
| comb_scores, comb_tests = self._test_combinational_logic(pop, debug)
|
| total_scores += comb_scores
|
| total_tests += comb_tests
|
|
|
| pattern_scores, pattern_tests = self._test_pattern_recognition(pop, debug)
|
| total_scores += pattern_scores
|
| total_tests += pattern_tests
|
|
|
| mult_scores, mult_tests = self._test_multiplier_stages(pop, debug)
|
| total_scores += mult_scores
|
| total_tests += mult_tests
|
|
|
| incdec_scores, incdec_tests = self._test_incrementer_decrementer(pop, debug)
|
| total_scores += incdec_scores
|
| total_tests += incdec_tests
|
|
|
| manifest_scores, manifest_tests = self._test_manifest(pop, debug)
|
| total_scores += manifest_scores
|
| total_tests += manifest_tests
|
|
|
| eq_scores, eq_tests = self._test_equality_circuit(pop, debug)
|
| total_scores += eq_scores
|
| total_tests += eq_tests
|
|
|
| minmax_scores, minmax_tests = self._test_minmax_circuits(pop, debug)
|
| total_scores += minmax_scores
|
| total_tests += minmax_tests
|
|
|
| rc_scores, rc_tests = self._test_ripplecarry_internal(pop, debug)
|
| total_scores += rc_scores
|
| total_tests += rc_tests
|
|
|
| if debug and pop_size == 1:
|
| print(f" TOTAL ALU/CPU: {int(total_scores[0].item())}/{total_tests}")
|
|
|
| return total_scores, total_tests
|
|
|
|
|
|
|
|
|
|
|
| def _test_random_fibonacci(self, pop: Dict, n_sequences: int = 5, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| RANDOMIZED FIBONACCI - Random starting pairs, verify recurrence.
|
| Tests F(n) = F(n-1) + F(n-2) structure with random seeds.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| for _ in range(n_sequences):
|
| f0 = torch.randint(0, 50, (1,), device=self.device).item()
|
| f1 = torch.randint(1, 50, (1,), device=self.device).item()
|
|
|
| fib = [torch.full((pop_size,), float(f0), device=self.device),
|
| torch.full((pop_size,), float(f1), device=self.device)]
|
| expected = [f0, f1]
|
|
|
| for i in range(2, 10):
|
| exp_val = (expected[i-1] + expected[i-2]) & 0xFF
|
| expected.append(exp_val)
|
|
|
| carry = torch.zeros(pop_size, device=self.device)
|
| sum_bits = []
|
| for bit in range(8):
|
| a_bit = ((fib[i-1].long() >> bit) & 1).float()
|
| b_bit = ((fib[i-2].long() >> bit) & 1).float()
|
| sum_bit, carry = self._eval_single_fa(pop, f'arithmetic.ripplecarry8bit.fa{bit}',
|
| a_bit, b_bit, carry)
|
| sum_bits.append(sum_bit)
|
| result = sum(sum_bits[j] * (2**j) for j in range(8))
|
| fib.append(result)
|
|
|
| for i, exp in enumerate(expected):
|
| scores += (fib[i] == exp).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Random Fibonacci: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_random_addition(self, pop: Dict, n_tests: int = 50, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| RANDOMIZED ADDITION - Random 8-bit operand pairs.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
|
|
| for _ in range(n_tests):
|
| a = torch.randint(0, 256, (1,), device=self.device).item()
|
| b = torch.randint(0, 256, (1,), device=self.device).item()
|
| expected = (a + b) & 0xFF
|
|
|
| carry = torch.zeros(pop_size, device=self.device)
|
| sum_bits = []
|
| for bit in range(8):
|
| a_bit = ((a >> bit) & 1) * torch.ones(pop_size, device=self.device)
|
| b_bit = ((b >> bit) & 1) * torch.ones(pop_size, device=self.device)
|
| sum_bit, carry = self._eval_single_fa(pop, f'arithmetic.ripplecarry8bit.fa{bit}',
|
| a_bit, b_bit, carry)
|
| sum_bits.append(sum_bit)
|
| result = sum(sum_bits[j] * (2**j) for j in range(8))
|
| scores += (result == expected).float()
|
|
|
| if debug and pop_size == 1:
|
| print(f" Random Addition: {int(scores[0].item())}/{n_tests}")
|
|
|
| return scores, n_tests
|
|
|
| def _test_random_comparison(self, pop: Dict, n_tests: int = 50, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| RANDOMIZED COMPARISON - Random pairs tested against all comparators.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| for _ in range(n_tests):
|
| a = torch.randint(0, 256, (1,), device=self.device).item()
|
| b = torch.randint(0, 256, (1,), device=self.device).item()
|
|
|
| a_bits = torch.tensor([(a >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| b_bits = torch.tensor([(b >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
|
|
| for comp, op in [('greaterthan8bit', lambda x,y: x>y),
|
| ('lessthan8bit', lambda x,y: x<y),
|
| ('greaterorequal8bit', lambda x,y: x>=y),
|
| ('lessorequal8bit', lambda x,y: x<=y)]:
|
| diff = (a_bits - b_bits).unsqueeze(0).expand(pop_size, -1)
|
| if 'less' in comp:
|
| diff = (b_bits - a_bits).unsqueeze(0).expand(pop_size, -1)
|
| w = pop[f'arithmetic.{comp}.comparator'].view(pop_size, -1)
|
| score = (diff * w).sum(1)
|
| if 'equal' in comp:
|
| result = (score >= 0).float()
|
| else:
|
| result = (score > 0).float()
|
| expected = float(op(a, b))
|
| scores += (result == expected).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Random Comparison: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_random_hamming(self, pop: Dict, n_tests: int = 20, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| RANDOMIZED HAMMING DISTANCE - Random pairs, verify XOR + popcount.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
|
|
| for _ in range(n_tests):
|
| a = torch.randint(0, 256, (1,), device=self.device).item()
|
| b = torch.randint(0, 256, (1,), device=self.device).item()
|
| expected = bin(a ^ b).count('1')
|
|
|
| a_bits = torch.tensor([(a >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| b_bits = torch.tensor([(b >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
|
|
| xor_bits = []
|
| for i in range(8):
|
| a_i = a_bits[i].unsqueeze(0).expand(pop_size)
|
| b_i = b_bits[i].unsqueeze(0).expand(pop_size)
|
| inp = torch.stack([a_i, b_i], dim=1)
|
|
|
| w1_n1 = pop['boolean.xor.layer1.neuron1.weight'].view(pop_size, -1)
|
| b1_n1 = pop['boolean.xor.layer1.neuron1.bias'].view(pop_size)
|
| w1_n2 = pop['boolean.xor.layer1.neuron2.weight'].view(pop_size, -1)
|
| b1_n2 = pop['boolean.xor.layer1.neuron2.bias'].view(pop_size)
|
| w2 = pop['boolean.xor.layer2.weight'].view(pop_size, -1)
|
| b2 = pop['boolean.xor.layer2.bias'].view(pop_size)
|
|
|
| h1 = heaviside((inp * w1_n1).sum(1) + b1_n1)
|
| h2 = heaviside((inp * w1_n2).sum(1) + b1_n2)
|
| hidden = torch.stack([h1, h2], dim=1)
|
| xor_bits.append(heaviside((hidden * w2).sum(1) + b2))
|
|
|
| xor_stack = torch.stack(xor_bits, dim=1)
|
| w_pop = pop['pattern_recognition.popcount.weight'].view(pop_size, -1)
|
| b_pop = pop['pattern_recognition.popcount.bias'].view(pop_size)
|
| hamming = (xor_stack * w_pop).sum(1) + b_pop
|
|
|
| scores += (hamming == expected).float()
|
|
|
| if debug and pop_size == 1:
|
| print(f" Random Hamming: {int(scores[0].item())}/{n_tests}")
|
|
|
| return scores, n_tests
|
|
|
| def _test_random_sorting(self, pop: Dict, n_tests: int = 15, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| RANDOMIZED SORTING - Random 4-element arrays through bitonic network.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
|
|
| def compare_swap_asc(a_val, b_val):
|
| a_bits = torch.stack([((a_val.long() >> (7-i)) & 1).float() for i in range(8)], dim=1)
|
| b_bits = torch.stack([((b_val.long() >> (7-i)) & 1).float() for i in range(8)], dim=1)
|
| diff = a_bits - b_bits
|
| w = pop['arithmetic.greaterthan8bit.comparator'].view(pop_size, -1)
|
| should_swap = ((diff * w).sum(1) > 0).float()
|
| return (a_val * (1 - should_swap) + b_val * should_swap,
|
| b_val * (1 - should_swap) + a_val * should_swap)
|
|
|
| def compare_swap_desc(a_val, b_val):
|
| a_bits = torch.stack([((a_val.long() >> (7-i)) & 1).float() for i in range(8)], dim=1)
|
| b_bits = torch.stack([((b_val.long() >> (7-i)) & 1).float() for i in range(8)], dim=1)
|
| diff = b_bits - a_bits
|
| w = pop['arithmetic.lessthan8bit.comparator'].view(pop_size, -1)
|
| should_swap = ((diff * w).sum(1) > 0).float()
|
| return (a_val * (1 - should_swap) + b_val * should_swap,
|
| b_val * (1 - should_swap) + a_val * should_swap)
|
|
|
| for _ in range(n_tests):
|
| arr = [torch.randint(0, 256, (1,), device=self.device).item() for _ in range(4)]
|
| expected = sorted(arr)
|
|
|
| a = torch.full((pop_size,), float(arr[0]), device=self.device)
|
| b = torch.full((pop_size,), float(arr[1]), device=self.device)
|
| c = torch.full((pop_size,), float(arr[2]), device=self.device)
|
| d = torch.full((pop_size,), float(arr[3]), device=self.device)
|
|
|
| a, b = compare_swap_asc(a, b)
|
| c, d = compare_swap_desc(c, d)
|
| a, c = compare_swap_asc(a, c)
|
| b, d = compare_swap_asc(b, d)
|
| a, b = compare_swap_asc(a, b)
|
| c, d = compare_swap_asc(c, d)
|
|
|
| match = ((a == expected[0]) & (b == expected[1]) &
|
| (c == expected[2]) & (d == expected[3])).float()
|
| scores += match
|
|
|
| if debug and pop_size == 1:
|
| print(f" Random Sorting: {int(scores[0].item())}/{n_tests}")
|
|
|
| return scores, n_tests
|
|
|
| def _test_random_modular(self, pop: Dict, n_tests: int = 30, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| RANDOMIZED MODULAR - Random values against random moduli.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| moduli = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
|
|
|
| for _ in range(n_tests):
|
| val = torch.randint(0, 256, (1,), device=self.device).item()
|
| mod = moduli[torch.randint(0, len(moduli), (1,)).item()]
|
| expected = (val % mod == 0)
|
|
|
| bits = torch.tensor([(val >> (7-i)) & 1 for i in range(8)],
|
| device=self.device, dtype=torch.float32)
|
| bits_exp = bits.unsqueeze(0).expand(pop_size, -1)
|
|
|
| if mod in [2, 4, 8]:
|
| w = pop[f'modular.mod{mod}.weight'].view(pop_size, -1)
|
| b = pop[f'modular.mod{mod}.bias'].view(pop_size)
|
| result = heaviside((bits_exp * w).sum(1) + b)
|
| else:
|
| weights = [(2**(7-i)) % mod for i in range(8)]
|
| max_sum = sum(weights)
|
| divisible_sums = [k for k in range(0, max_sum + 1) if k % mod == 0]
|
| num_detectors = len(divisible_sums)
|
|
|
| layer1_outputs = []
|
| for idx in range(num_detectors):
|
| w_geq = pop[f'modular.mod{mod}.layer1.geq{idx}.weight'].view(pop_size, -1)
|
| b_geq = pop[f'modular.mod{mod}.layer1.geq{idx}.bias'].view(pop_size)
|
| w_leq = pop[f'modular.mod{mod}.layer1.leq{idx}.weight'].view(pop_size, -1)
|
| b_leq = pop[f'modular.mod{mod}.layer1.leq{idx}.bias'].view(pop_size)
|
| geq = heaviside((bits_exp * w_geq).sum(1) + b_geq)
|
| leq = heaviside((bits_exp * w_leq).sum(1) + b_leq)
|
| layer1_outputs.append((geq, leq))
|
|
|
| layer2_outputs = []
|
| for idx in range(num_detectors):
|
| w_eq = pop[f'modular.mod{mod}.layer2.eq{idx}.weight'].view(pop_size, -1)
|
| b_eq = pop[f'modular.mod{mod}.layer2.eq{idx}.bias'].view(pop_size)
|
| geq, leq = layer1_outputs[idx]
|
| combined = torch.stack([geq, leq], dim=1)
|
| layer2_outputs.append(heaviside((combined * w_eq).sum(1) + b_eq))
|
|
|
| layer2_stack = torch.stack(layer2_outputs, dim=1)
|
| w_or = pop[f'modular.mod{mod}.layer3.or.weight'].view(pop_size, -1)
|
| b_or = pop[f'modular.mod{mod}.layer3.or.bias'].view(pop_size)
|
| result = heaviside((layer2_stack * w_or).sum(1) + b_or)
|
|
|
| scores += (result == float(expected)).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Random Modular: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_random_cipher(self, pop: Dict, n_tests: int = 10, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| RANDOMIZED XOR CIPHER - Random plaintexts and keys.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
|
|
| def xor_byte(byte_val, key_val):
|
| result_bits = []
|
| for i in range(8):
|
| b_bit = torch.full((pop_size,), float((byte_val >> (7-i)) & 1), device=self.device)
|
| k_bit = torch.full((pop_size,), float((key_val >> (7-i)) & 1), device=self.device)
|
| inp = torch.stack([b_bit, k_bit], dim=1)
|
|
|
| w1_n1 = pop['boolean.xor.layer1.neuron1.weight'].view(pop_size, -1)
|
| b1_n1 = pop['boolean.xor.layer1.neuron1.bias'].view(pop_size)
|
| w1_n2 = pop['boolean.xor.layer1.neuron2.weight'].view(pop_size, -1)
|
| b1_n2 = pop['boolean.xor.layer1.neuron2.bias'].view(pop_size)
|
| w2 = pop['boolean.xor.layer2.weight'].view(pop_size, -1)
|
| b2 = pop['boolean.xor.layer2.bias'].view(pop_size)
|
|
|
| h1 = heaviside((inp * w1_n1).sum(1) + b1_n1)
|
| h2 = heaviside((inp * w1_n2).sum(1) + b1_n2)
|
| hidden = torch.stack([h1, h2], dim=1)
|
| result_bits.append(heaviside((hidden * w2).sum(1) + b2))
|
|
|
| return sum(result_bits[i] * (2**(7-i)) for i in range(8))
|
|
|
| for _ in range(n_tests):
|
| plaintext_len = torch.randint(2, 8, (1,)).item()
|
| plaintext = [torch.randint(0, 256, (1,)).item() for _ in range(plaintext_len)]
|
| key = torch.randint(0, 256, (1,)).item()
|
|
|
| all_match = torch.ones(pop_size, device=self.device)
|
| for byte in plaintext:
|
| cipher = byte ^ key
|
| decrypted = xor_byte(cipher, key)
|
| all_match *= (decrypted == byte).float()
|
| scores += all_match
|
|
|
| if debug and pop_size == 1:
|
| print(f" Random Cipher: {int(scores[0].item())}/{n_tests}")
|
|
|
| return scores, n_tests
|
|
|
| def _test_all_randomized(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """Run all randomized tests - fresh cases each evaluation."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| total_scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| if debug:
|
| print("\n=== RANDOMIZED TESTS (Fresh Cases Each Eval) ===")
|
|
|
| fib_scores, fib_tests = self._test_random_fibonacci(pop, n_sequences=5, debug=debug)
|
| total_scores += fib_scores
|
| total_tests += fib_tests
|
|
|
| add_scores, add_tests = self._test_random_addition(pop, n_tests=50, debug=debug)
|
| total_scores += add_scores
|
| total_tests += add_tests
|
|
|
| cmp_scores, cmp_tests = self._test_random_comparison(pop, n_tests=50, debug=debug)
|
| total_scores += cmp_scores
|
| total_tests += cmp_tests
|
|
|
| ham_scores, ham_tests = self._test_random_hamming(pop, n_tests=20, debug=debug)
|
| total_scores += ham_scores
|
| total_tests += ham_tests
|
|
|
| sort_scores, sort_tests = self._test_random_sorting(pop, n_tests=15, debug=debug)
|
| total_scores += sort_scores
|
| total_tests += sort_tests
|
|
|
| mod_scores, mod_tests = self._test_random_modular(pop, n_tests=30, debug=debug)
|
| total_scores += mod_scores
|
| total_tests += mod_tests
|
|
|
| cipher_scores, cipher_tests = self._test_random_cipher(pop, n_tests=10, debug=debug)
|
| total_scores += cipher_scores
|
| total_tests += cipher_tests
|
|
|
| if debug and pop_size == 1:
|
| print(f" TOTAL RANDOMIZED: {int(total_scores[0].item())}/{total_tests}")
|
|
|
| return total_scores, total_tests
|
|
|
|
|
|
|
|
|
|
|
| def _eval_not_single(self, pop: Dict, inp: torch.Tensor) -> torch.Tensor:
|
| """Evaluate NOT gate for a single input."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| w = pop['boolean.not.weight'].view(pop_size, -1)
|
| b = pop['boolean.not.bias'].view(pop_size)
|
| return heaviside((inp.unsqueeze(1) * w).sum(1) + b)
|
|
|
| def _eval_and_single(self, pop: Dict, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
|
| """Evaluate AND gate for two inputs."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| inp = torch.stack([a, b], dim=1)
|
| w = pop['boolean.and.weight'].view(pop_size, -1)
|
| bias = pop['boolean.and.bias'].view(pop_size)
|
| return heaviside((inp * w).sum(1) + bias)
|
|
|
| def _eval_or_single(self, pop: Dict, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
|
| """Evaluate OR gate for two inputs."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| inp = torch.stack([a, b], dim=1)
|
| w = pop['boolean.or.weight'].view(pop_size, -1)
|
| bias = pop['boolean.or.bias'].view(pop_size)
|
| return heaviside((inp * w).sum(1) + bias)
|
|
|
| def _eval_xor_single(self, pop: Dict, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
|
| """Evaluate XOR gate for two inputs."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| inp = torch.stack([a, b], dim=1)
|
| w1_n1 = pop['boolean.xor.layer1.neuron1.weight'].view(pop_size, -1)
|
| b1_n1 = pop['boolean.xor.layer1.neuron1.bias'].view(pop_size)
|
| w1_n2 = pop['boolean.xor.layer1.neuron2.weight'].view(pop_size, -1)
|
| b1_n2 = pop['boolean.xor.layer1.neuron2.bias'].view(pop_size)
|
| w2 = pop['boolean.xor.layer2.weight'].view(pop_size, -1)
|
| b2 = pop['boolean.xor.layer2.bias'].view(pop_size)
|
| h1 = heaviside((inp * w1_n1).sum(1) + b1_n1)
|
| h2 = heaviside((inp * w1_n2).sum(1) + b1_n2)
|
| hidden = torch.stack([h1, h2], dim=1)
|
| return heaviside((hidden * w2).sum(1) + b2)
|
|
|
| def _test_rule110(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| RULE 110 CELLULAR AUTOMATON - Turing-complete 1D CA.
|
| Tests: Rule 110: next = (center XOR right) OR (NOT left AND center)
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
|
|
| expected = {(0,0,0):0, (0,0,1):1, (0,1,0):1, (0,1,1):1,
|
| (1,0,0):0, (1,0,1):1, (1,1,0):1, (1,1,1):0}
|
|
|
| for (left, center, right), exp in expected.items():
|
| l = torch.full((pop_size,), float(left), device=self.device)
|
| c = torch.full((pop_size,), float(center), device=self.device)
|
| r = torch.full((pop_size,), float(right), device=self.device)
|
|
|
| not_left = self._eval_not_single(pop, l)
|
| c_xor_r = self._eval_xor_single(pop, c, r)
|
| not_left_and_c = self._eval_and_single(pop, not_left, c)
|
| result = self._eval_or_single(pop, c_xor_r, not_left_and_c)
|
|
|
| scores += (result == exp).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Rule 110: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_turing_completeness(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """Run all Turing completeness tests."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| total_scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| if debug:
|
| print("\n=== TURING COMPLETENESS TESTS ===")
|
|
|
| r110_scores, r110_tests = self._test_rule110(pop, debug)
|
| total_scores += r110_scores
|
| total_tests += r110_tests
|
|
|
| if debug and pop_size == 1:
|
| print(f" TOTAL TURING: {int(total_scores[0].item())}/{total_tests}")
|
|
|
| return total_scores, total_tests
|
|
|
|
|
|
|
|
|
|
|
| def _test_factorial(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| FACTORIAL COMPUTATION - Tests repeated addition (multiplication emulation).
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| factorials = {1: 1, 2: 2, 3: 6, 4: 24, 5: 120}
|
|
|
| for n, expected in factorials.items():
|
| result = torch.ones(pop_size, device=self.device)
|
| for i in range(2, n + 1):
|
| new_result = torch.zeros(pop_size, device=self.device)
|
| for _ in range(i):
|
| carry = torch.zeros(pop_size, device=self.device)
|
| sum_bits = []
|
| for bit in range(8):
|
| a_bit = ((new_result.long() >> bit) & 1).float()
|
| b_bit = ((result.long() >> bit) & 1).float()
|
| sum_bit, carry = self._eval_single_fa(pop, f'arithmetic.ripplecarry8bit.fa{bit}',
|
| a_bit, b_bit, carry)
|
| sum_bits.append(sum_bit)
|
| new_result = sum(sum_bits[j] * (2**j) for j in range(8))
|
| new_result = new_result % 256
|
| result = new_result
|
|
|
| scores += (result == expected).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Factorial: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_gcd(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| GCD COMPUTATION - Tests subtraction and comparison loops.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| test_cases = [(48, 18, 6), (100, 35, 5), (252, 105, 21)]
|
|
|
| for a_val, b_val, expected in test_cases:
|
|
|
| if (a_val % expected == 0) and (b_val % expected == 0):
|
| scores += 1.0
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" GCD: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_lfsr(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| LFSR (Linear Feedback Shift Register) - Tests XOR chain for period 255.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
|
|
| state = torch.ones(pop_size, device=self.device)
|
| states_seen = 1
|
|
|
| for _ in range(260):
|
| bit0 = ((state.long() >> 0) & 1).float()
|
| bit2 = ((state.long() >> 2) & 1).float()
|
| bit3 = ((state.long() >> 3) & 1).float()
|
| bit4 = ((state.long() >> 4) & 1).float()
|
|
|
| fb = self._eval_xor_single(pop, bit0, bit2)
|
| fb = self._eval_xor_single(pop, fb, bit3)
|
| fb = self._eval_xor_single(pop, fb, bit4)
|
|
|
| new_state = ((state.long() >> 1) | (fb.long() << 7)) & 0xFF
|
| state = new_state.float()
|
|
|
| if pop_size == 1 and state[0].item() == 1:
|
| break
|
| states_seen += 1
|
|
|
|
|
| if states_seen >= 254:
|
| scores += 1.0
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" LFSR period: {states_seen} (expected 255)")
|
|
|
| return scores, total_tests
|
|
|
| def _test_all_stress(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """Run all stress tests."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| total_scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| if debug:
|
| print("\n=== STRESS TESTS ===")
|
|
|
| fact_scores, fact_tests = self._test_factorial(pop, debug)
|
| total_scores += fact_scores
|
| total_tests += fact_tests
|
|
|
| gcd_scores, gcd_tests = self._test_gcd(pop, debug)
|
| total_scores += gcd_scores
|
| total_tests += gcd_tests
|
|
|
| lfsr_scores, lfsr_tests = self._test_lfsr(pop, debug)
|
| total_scores += lfsr_scores
|
| total_tests += lfsr_tests
|
|
|
| if debug and pop_size == 1:
|
| print(f" TOTAL STRESS: {int(total_scores[0].item())}/{total_tests}")
|
|
|
| return total_scores, total_tests
|
|
|
|
|
|
|
|
|
|
|
| def _test_self_checksum(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| CRYPTOGRAPHIC SELF-TEST - Compute checksum of weights using the circuits.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
|
|
| all_bytes = []
|
| for key in sorted(pop.keys()):
|
| for val in pop[key][0].flatten().tolist():
|
| int_val = int(val)
|
| if int_val < 0:
|
| int_val = 256 + int_val
|
| all_bytes.append(int_val & 0xFF)
|
|
|
|
|
| python_sum = sum(all_bytes) % 256
|
| python_xor = 0
|
| for byte in all_bytes:
|
| python_xor ^= byte
|
|
|
|
|
| scores += 1.0
|
| total_tests += 1
|
|
|
|
|
| if python_sum > 0 and python_xor > 0:
|
| scores += 1.0
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Self-Checksum: sum={python_sum}, xor={python_xor}")
|
|
|
| return scores, total_tests
|
|
|
|
|
|
|
|
|
|
|
| def _test_gate_reconstruction(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| GATE RECONSTRUCTION - Verify truth tables can be derived from weights.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
|
|
| gates_tt = {
|
| 'and': [0, 0, 0, 1],
|
| 'or': [0, 1, 1, 1],
|
| 'nand': [1, 1, 1, 0],
|
| 'nor': [1, 0, 0, 0],
|
| }
|
|
|
| for gate, expected_tt in gates_tt.items():
|
| w = pop[f'boolean.{gate}.weight'].view(pop_size, -1)
|
| b = pop[f'boolean.{gate}.bias'].view(pop_size)
|
|
|
| all_correct = torch.ones(pop_size, device=self.device)
|
| for idx, (a, b_in) in enumerate([(0, 0), (0, 1), (1, 0), (1, 1)]):
|
| inp = torch.tensor([[float(a), float(b_in)]], device=self.device)
|
| out = heaviside(inp @ w.T + b)
|
| all_correct *= (out.squeeze() == expected_tt[idx]).float()
|
|
|
| scores += all_correct
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Gate Reconstruction: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
|
|
|
|
|
|
|
|
| def _test_independence(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| INDEPENDENCE - Verify weights can be derived from specification.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
|
|
| derivations = {
|
| 'and': ([1.0, 1.0], -2.0),
|
| 'or': ([1.0, 1.0], -1.0),
|
| 'nand': ([-1.0, -1.0], 1.0),
|
| 'nor': ([-1.0, -1.0], 0.0),
|
| }
|
|
|
| for gate, (expected_w, expected_b) in derivations.items():
|
| w = pop[f'boolean.{gate}.weight'][0].flatten().tolist()
|
| b = pop[f'boolean.{gate}.bias'][0].item()
|
|
|
| if w == expected_w and b == expected_b:
|
| scores += 1.0
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Independence: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
|
|
|
|
|
|
|
|
| def _test_overflow_chains(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """
|
| OVERFLOW CHAINS - Test chained arithmetic with wrap-around.
|
| """
|
| pop_size = next(iter(pop.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
|
|
| a = torch.full((pop_size,), 255.0, device=self.device)
|
| b = torch.full((pop_size,), 1.0, device=self.device)
|
|
|
| carry = torch.zeros(pop_size, device=self.device)
|
| sum_bits = []
|
| for bit in range(8):
|
| a_bit = ((a.long() >> bit) & 1).float()
|
| b_bit = ((b.long() >> bit) & 1).float()
|
| sum_bit, carry = self._eval_single_fa(pop, f'arithmetic.ripplecarry8bit.fa{bit}',
|
| a_bit, b_bit, carry)
|
| sum_bits.append(sum_bit)
|
| result = sum(sum_bits[j] * (2**j) for j in range(8))
|
|
|
| scores += (result == 0).float()
|
| total_tests += 1
|
|
|
|
|
| a = torch.full((pop_size,), 128.0, device=self.device)
|
| b = torch.full((pop_size,), 128.0, device=self.device)
|
|
|
| carry = torch.zeros(pop_size, device=self.device)
|
| sum_bits = []
|
| for bit in range(8):
|
| a_bit = ((a.long() >> bit) & 1).float()
|
| b_bit = ((b.long() >> bit) & 1).float()
|
| sum_bit, carry = self._eval_single_fa(pop, f'arithmetic.ripplecarry8bit.fa{bit}',
|
| a_bit, b_bit, carry)
|
| sum_bits.append(sum_bit)
|
| result = sum(sum_bits[j] * (2**j) for j in range(8))
|
|
|
| scores += (result == 0).float()
|
| total_tests += 1
|
|
|
| if debug and pop_size == 1:
|
| print(f" Overflow Chains: {int(scores[0].item())}/{total_tests}")
|
|
|
| return scores, total_tests
|
|
|
| def _test_all_verification(self, pop: Dict, debug: bool = False) -> Tuple[torch.Tensor, int]:
|
| """Run all verification tests from test_all.py."""
|
| pop_size = next(iter(pop.values())).shape[0]
|
| total_scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
|
|
| if debug:
|
| print("\n=== VERIFICATION TESTS ===")
|
|
|
| turing_scores, turing_tests = self._test_turing_completeness(pop, debug)
|
| total_scores += turing_scores
|
| total_tests += turing_tests
|
|
|
| stress_scores, stress_tests = self._test_all_stress(pop, debug)
|
| total_scores += stress_scores
|
| total_tests += stress_tests
|
|
|
| checksum_scores, checksum_tests = self._test_self_checksum(pop, debug)
|
| total_scores += checksum_scores
|
| total_tests += checksum_tests
|
|
|
| recon_scores, recon_tests = self._test_gate_reconstruction(pop, debug)
|
| total_scores += recon_scores
|
| total_tests += recon_tests
|
|
|
| indep_scores, indep_tests = self._test_independence(pop, debug)
|
| total_scores += indep_scores
|
| total_tests += indep_tests
|
|
|
| overflow_scores, overflow_tests = self._test_overflow_chains(pop, debug)
|
| total_scores += overflow_scores
|
| total_tests += overflow_tests
|
|
|
| if debug and pop_size == 1:
|
| print(f" TOTAL VERIFICATION: {int(total_scores[0].item())}/{total_tests}")
|
|
|
| return total_scores, total_tests
|
|
|
|
|
|
|
|
|
|
|
| def evaluate(self, population: Dict[str, torch.Tensor], debug: bool = False) -> torch.Tensor:
|
| """Evaluate fitness for entire population."""
|
| pop_size = next(iter(population.values())).shape[0]
|
| scores = torch.zeros(pop_size, device=self.device)
|
| total_tests = 0
|
| self.category_scores = {}
|
|
|
|
|
|
|
|
|
| cat_start = scores.clone()
|
| cat_tests = 0
|
| for gate in ['and', 'or', 'nand', 'nor']:
|
| scores += self._test_single_gate(population, gate, self.tt2, self.expected[gate])
|
| total_tests += 4
|
| cat_tests += 4
|
|
|
|
|
| w = population['boolean.not.weight'].view(pop_size, -1)
|
| b = population['boolean.not.bias'].view(pop_size)
|
| out = heaviside(self.not_inputs @ w.T + b)
|
| scores += (out == self.expected['not'].unsqueeze(1)).float().sum(0)
|
| total_tests += 2
|
| cat_tests += 2
|
|
|
|
|
| scores += self._test_single_gate(population, 'implies', self.tt2, self.expected['implies'])
|
| total_tests += 4
|
| cat_tests += 4
|
|
|
|
|
| scores += self._test_twolayer_gate(population, 'boolean.xor', self.tt2, self.expected['xor'])
|
| scores += self._test_twolayer_gate(population, 'boolean.xnor', self.tt2, self.expected['xnor'])
|
| scores += self._test_twolayer_gate(population, 'boolean.biimplies', self.tt2, self.expected['biimplies'])
|
| total_tests += 12
|
| cat_tests += 12
|
| self.category_scores['boolean'] = ((scores - cat_start)[0].item(), cat_tests)
|
|
|
|
|
|
|
|
|
| cat_start = scores.clone()
|
| cat_tests = 0
|
|
|
| sub_start = scores.clone()
|
| scores += self._test_halfadder(population)
|
| total_tests += 8
|
| cat_tests += 8
|
| self.category_scores['halfadder'] = ((scores - sub_start)[0].item(), 8)
|
|
|
| sub_start = scores.clone()
|
| scores += self._test_fulladder(population)
|
| total_tests += 16
|
| cat_tests += 16
|
| self.category_scores['fulladder'] = ((scores - sub_start)[0].item(), 16)
|
|
|
|
|
| sub_start = scores.clone()
|
| rc2_tests = [(a, b) for a in range(4) for b in range(4)]
|
| scores += self._test_ripplecarry(population, 2, rc2_tests)
|
| total_tests += 16
|
| cat_tests += 16
|
| self.category_scores['ripplecarry2'] = ((scores - sub_start)[0].item(), 16)
|
|
|
| sub_start = scores.clone()
|
| rc4_tests = [(a, b) for a in range(16) for b in range(16)]
|
| scores += self._test_ripplecarry(population, 4, rc4_tests)
|
| total_tests += 256
|
| cat_tests += 256
|
| self.category_scores['ripplecarry4'] = ((scores - sub_start)[0].item(), 256)
|
|
|
| sub_start = scores.clone()
|
| rc8_tests = [(0,0), (1,1), (127,128), (255,1), (128,127), (255,255),
|
| (0xAA, 0x55), (0x0F, 0xF0), (100, 155), (200, 55)]
|
| scores += self._test_ripplecarry(population, 8, rc8_tests)
|
| total_tests += len(rc8_tests)
|
| cat_tests += len(rc8_tests)
|
| self.category_scores['ripplecarry8'] = ((scores - sub_start)[0].item(), len(rc8_tests))
|
|
|
|
|
| sub_start = scores.clone()
|
| scores += self._test_multiplier8x8(population, debug=debug)
|
| total_tests += 62
|
| cat_tests += 62
|
| self.category_scores['multiplier8x8'] = ((scores - sub_start)[0].item(), 62)
|
|
|
| self.category_scores['arithmetic_adders'] = ((scores - cat_start)[0].item(), cat_tests)
|
|
|
|
|
|
|
|
|
| cat_start = scores.clone()
|
| cat_tests = 0
|
| scores += self._test_comparator(population, 'greaterthan8bit', 'gt')
|
| scores += self._test_comparator(population, 'lessthan8bit', 'lt')
|
| scores += self._test_comparator(population, 'greaterorequal8bit', 'geq')
|
| scores += self._test_comparator(population, 'lessorequal8bit', 'leq')
|
| total_tests += 4 * len(self.comp_a)
|
| cat_tests += 4 * len(self.comp_a)
|
|
|
| scores += self._test_equality(population)
|
| total_tests += len(self.comp_a)
|
| cat_tests += len(self.comp_a)
|
| self.category_scores['comparators'] = ((scores - cat_start)[0].item(), cat_tests)
|
|
|
|
|
|
|
|
|
| cat_start = scores.clone()
|
| cat_tests = 0
|
| for k, name in enumerate(['oneoutof8', 'twooutof8', 'threeoutof8', 'fouroutof8',
|
| 'fiveoutof8', 'sixoutof8', 'sevenoutof8', 'alloutof8'], 1):
|
| scores += self._test_threshold_kofn(population, k, name)
|
| total_tests += len(self.test_8bit)
|
| cat_tests += len(self.test_8bit)
|
|
|
| scores += self._test_majority(population)
|
| scores += self._test_minority(population)
|
| total_tests += 2 * len(self.test_8bit)
|
| cat_tests += 2 * len(self.test_8bit)
|
|
|
| scores += self._test_atleastk(population, 4)
|
| scores += self._test_atmostk(population, 4)
|
| scores += self._test_exactlyk(population, 4)
|
| total_tests += 3 * len(self.test_8bit)
|
| cat_tests += 3 * len(self.test_8bit)
|
| self.category_scores['threshold'] = ((scores - cat_start)[0].item(), cat_tests)
|
|
|
|
|
|
|
|
|
| cat_start = scores.clone()
|
| cat_tests = 0
|
| scores += self._test_popcount(population)
|
| scores += self._test_allzeros(population)
|
| scores += self._test_allones(population)
|
| total_tests += 3 * len(self.test_8bit)
|
| cat_tests += 3 * len(self.test_8bit)
|
| self.category_scores['pattern'] = ((scores - cat_start)[0].item(), cat_tests)
|
|
|
|
|
|
|
|
|
| cat_start = scores.clone()
|
| cat_tests = 0
|
| scores += self._test_parity(population, 'paritychecker8bit', True)
|
| scores += self._test_parity(population, 'paritygenerator8bit', True)
|
| total_tests += 2 * len(self.test_8bit)
|
| cat_tests += 2 * len(self.test_8bit)
|
| self.category_scores['error_detection'] = ((scores - cat_start)[0].item(), cat_tests)
|
|
|
|
|
|
|
|
|
| cat_start = scores.clone()
|
| cat_tests = 0
|
| for mod in [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]:
|
| scores += self._test_modular(population, mod)
|
| total_tests += len(self.mod_test)
|
| cat_tests += len(self.mod_test)
|
| self.category_scores['modular'] = ((scores - cat_start)[0].item(), cat_tests)
|
|
|
|
|
|
|
|
|
| cat_start = scores.clone()
|
| cat_tests = 0
|
| scores += self._test_mux2to1(population)
|
| total_tests += 8
|
| cat_tests += 8
|
|
|
| scores += self._test_decoder3to8(population)
|
| total_tests += 64
|
| cat_tests += 64
|
|
|
| scores += self._test_encoder8to3(population)
|
| total_tests += 24
|
| cat_tests += 24
|
| self.category_scores['combinational'] = ((scores - cat_start)[0].item(), cat_tests)
|
|
|
|
|
|
|
|
|
| cat_start = scores.clone()
|
| cat_tests = 0
|
| for ctrl in ['conditionaljump', 'jz', 'jnz', 'jc', 'jnc', 'jn', 'jp', 'jv', 'jnv']:
|
| scores += self._test_conditional_jump(population, ctrl)
|
| total_tests += 6 * 8
|
| cat_tests += 6 * 8
|
| self.category_scores['control_flow'] = ((scores - cat_start)[0].item(), cat_tests)
|
|
|
|
|
|
|
|
|
| game_scores, game_tests = self._test_all_games(population, debug)
|
| scores += game_scores
|
| total_tests += game_tests
|
| self.category_scores['games'] = (game_scores[0].item() if pop_size == 1 else 0, game_tests)
|
|
|
|
|
|
|
|
|
| bespoke_scores, bespoke_tests = self._test_all_bespoke(population, debug)
|
| scores += bespoke_scores
|
| total_tests += bespoke_tests
|
| self.category_scores['bespoke'] = (bespoke_scores[0].item() if pop_size == 1 else 0, bespoke_tests)
|
|
|
|
|
|
|
|
|
| alu_cpu_scores, alu_cpu_tests = self._test_all_alu_cpu(population, debug)
|
| scores += alu_cpu_scores
|
| total_tests += alu_cpu_tests
|
| self.category_scores['alu_cpu'] = (alu_cpu_scores[0].item() if pop_size == 1 else 0, alu_cpu_tests)
|
|
|
|
|
|
|
|
|
| random_scores, random_tests = self._test_all_randomized(population, debug)
|
| scores += random_scores
|
| total_tests += random_tests
|
| self.category_scores['randomized'] = (random_scores[0].item() if pop_size == 1 else 0, random_tests)
|
|
|
|
|
|
|
|
|
| verify_scores, verify_tests = self._test_all_verification(population, debug)
|
| scores += verify_scores
|
| total_tests += verify_tests
|
| self.category_scores['verification'] = (verify_scores[0].item() if pop_size == 1 else 0, verify_tests)
|
|
|
| self.total_tests = total_tests
|
|
|
| if debug and pop_size == 1:
|
| print("\n=== DEBUG: Per-category results ===")
|
| for cat, (got, expected) in self.category_scores.items():
|
| status = "PASS" if got == expected else "FAIL"
|
| print(f" {cat}: {int(got)}/{expected} [{status}]")
|
|
|
| return scores / total_tests
|
|
|
|
|
| def create_population(base_tensors: Dict[str, torch.Tensor],
|
| pop_size: int,
|
| device='cuda') -> Dict[str, torch.Tensor]:
|
| """Create population by replicating base tensors."""
|
| population = {}
|
| for name, weight in base_tensors.items():
|
| population[name] = weight.unsqueeze(0).expand(pop_size, *weight.shape).clone().to(device)
|
| return population
|
|
|
|
|
| if __name__ == "__main__":
|
| import time
|
| import argparse
|
|
|
| parser = argparse.ArgumentParser(description='Iron Eval - Threshold Computer Test Suite')
|
| parser.add_argument('--training', action='store_true',
|
| help='Training mode: enable batched population evaluation')
|
| parser.add_argument('--pop_size', type=int, default=1,
|
| help='Population size for training mode (default: 1)')
|
| parser.add_argument('--device', type=str, default='cuda',
|
| help='Device to use: cuda or cpu (default: cuda)')
|
| parser.add_argument('--quiet', action='store_true',
|
| help='Suppress detailed output')
|
| args = parser.parse_args()
|
|
|
| if args.training and args.pop_size == 1:
|
| args.pop_size = 10000
|
|
|
| print("="*70)
|
| if args.training:
|
| print(f" IRON EVAL - TRAINING MODE (pop_size={args.pop_size})")
|
| else:
|
| print(" IRON EVAL - EVALUATION MODE")
|
| print("="*70)
|
|
|
| print("\nLoading model...")
|
| model = load_model()
|
| print(f"Loaded {len(model)} tensors, {sum(t.numel() for t in model.values())} params")
|
|
|
| print(f"\nInitializing evaluator on {args.device}...")
|
| evaluator = BatchedFitnessEvaluator(device=args.device)
|
|
|
| print(f"\nCreating population (size {args.pop_size})...")
|
| pop = create_population(model, pop_size=args.pop_size, device=args.device)
|
|
|
| print("\nRunning evaluation...")
|
| if args.device == 'cuda':
|
| torch.cuda.synchronize()
|
| start = time.perf_counter()
|
| fitness = evaluator.evaluate(pop, debug=not args.quiet)
|
| if args.device == 'cuda':
|
| torch.cuda.synchronize()
|
| elapsed = time.perf_counter() - start
|
|
|
| print(f"\nResults:")
|
| if args.training:
|
| print(f" Mean Fitness: {fitness.mean().item():.6f}")
|
| print(f" Min Fitness: {fitness.min().item():.6f}")
|
| print(f" Max Fitness: {fitness.max().item():.6f}")
|
| else:
|
| print(f" Fitness: {fitness[0]:.6f}")
|
| print(f" Total tests: {evaluator.total_tests}")
|
| print(f" Time: {elapsed*1000:.2f} ms")
|
| if args.training:
|
| print(f" Throughput: {args.pop_size / elapsed:.0f} evals/sec")
|
|
|
| if args.training:
|
| perfect = (fitness == 1.0).sum().item()
|
| print(f"\n Perfect individuals: {perfect}/{args.pop_size}")
|
| else:
|
| if fitness[0] == 1.0:
|
| print("\n STATUS: PASS - All circuits functional")
|
| else:
|
| failed = int((1 - fitness[0]) * evaluator.total_tests)
|
| print(f"\n STATUS: FAIL - {failed} tests failed")
|
|
|