threshold-hamming1511-encoder / create_safetensors.py
CharlesCNorton
Add Hamming(15,11) encoder threshold circuit
7310771
import torch
from safetensors.torch import save_file
weights = {}
# Hamming(15,11) Encoder
# 11 data bits (d1-d11) -> 15 coded bits (4 parity + 11 data)
#
# Bit positions:
# 1=p1, 2=p2, 3=d1, 4=p4, 5=d2, 6=d3, 7=d4, 8=p8, 9=d5, 10=d6, 11=d7, 12=d8, 13=d9, 14=d10, 15=d11
#
# Parity equations (XOR of data bits where position has that parity bit set):
# p1 covers positions with bit 0 set: 1,3,5,7,9,11,13,15 -> d1,d2,d4,d5,d7,d9,d11
# p2 covers positions with bit 1 set: 2,3,6,7,10,11,14,15 -> d1,d3,d4,d6,d7,d10,d11
# p4 covers positions with bit 2 set: 4,5,6,7,12,13,14,15 -> d2,d3,d4,d8,d9,d10,d11
# p8 covers positions with bit 3 set: 8,9,10,11,12,13,14,15 -> d5,d6,d7,d8,d9,d10,d11
#
# Each parity bit is a 7-way XOR, implemented as a tree of 2-way XORs.
# XOR7 = XOR(XOR4(a,b,c,d), XOR3(e,f,g))
# XOR4(a,b,c,d) = XOR(XOR(a,b), XOR(c,d))
# XOR3(e,f,g) = XOR(XOR(e,f), g)
#
# Input indices: d1=0, d2=1, d3=2, d4=3, d5=4, d6=5, d7=6, d8=7, d9=8, d10=9, d11=10
def add_xor2_weights(prefix, idx_a, idx_b, total_inputs):
"""Add weights for XOR(a, b) from direct inputs."""
w_or = [0.0] * total_inputs
w_or[idx_a] = 1.0
w_or[idx_b] = 1.0
weights[f'{prefix}.or.weight'] = torch.tensor([w_or], dtype=torch.float32)
weights[f'{prefix}.or.bias'] = torch.tensor([-1.0], dtype=torch.float32)
w_nand = [0.0] * total_inputs
w_nand[idx_a] = -1.0
w_nand[idx_b] = -1.0
weights[f'{prefix}.nand.weight'] = torch.tensor([w_nand], dtype=torch.float32)
weights[f'{prefix}.nand.bias'] = torch.tensor([1.0], dtype=torch.float32)
weights[f'{prefix}.and.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
weights[f'{prefix}.and.bias'] = torch.tensor([-2.0], dtype=torch.float32)
def add_xor2_stage_weights(prefix):
"""Add weights for XOR of two intermediate signals."""
weights[f'{prefix}.or.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
weights[f'{prefix}.or.bias'] = torch.tensor([-1.0], dtype=torch.float32)
weights[f'{prefix}.nand.weight'] = torch.tensor([[-1.0, -1.0]], dtype=torch.float32)
weights[f'{prefix}.nand.bias'] = torch.tensor([1.0], dtype=torch.float32)
weights[f'{prefix}.and.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
weights[f'{prefix}.and.bias'] = torch.tensor([-2.0], dtype=torch.float32)
# p1 = d1 XOR d2 XOR d4 XOR d5 XOR d7 XOR d9 XOR d11 (indices: 0,1,3,4,6,8,10)
# Tree: ((d1 XOR d2) XOR (d4 XOR d5)) XOR ((d7 XOR d9) XOR d11)
add_xor2_weights('p1.x12', 0, 1, 11) # d1 XOR d2
add_xor2_weights('p1.x45', 3, 4, 11) # d4 XOR d5
add_xor2_weights('p1.x79', 6, 8, 11) # d7 XOR d9
add_xor2_stage_weights('p1.x1245') # (d1^d2) XOR (d4^d5)
add_xor2_stage_weights('p1.x79_11') # (d7^d9) XOR d11 (d11 needs special handling)
add_xor2_stage_weights('p1.final') # combine
# p2 = d1 XOR d3 XOR d4 XOR d6 XOR d7 XOR d10 XOR d11 (indices: 0,2,3,5,6,9,10)
add_xor2_weights('p2.x13', 0, 2, 11)
add_xor2_weights('p2.x46', 3, 5, 11)
add_xor2_weights('p2.x7_10', 6, 9, 11)
add_xor2_stage_weights('p2.x1346')
add_xor2_stage_weights('p2.x7_10_11')
add_xor2_stage_weights('p2.final')
# p4 = d2 XOR d3 XOR d4 XOR d8 XOR d9 XOR d10 XOR d11 (indices: 1,2,3,7,8,9,10)
add_xor2_weights('p4.x23', 1, 2, 11)
add_xor2_weights('p4.x34', 2, 3, 11) # Wait, this should be different pairs
# Let me redo: ((d2 XOR d3) XOR (d4 XOR d8)) XOR ((d9 XOR d10) XOR d11)
add_xor2_weights('p4.x23', 1, 2, 11) # d2 XOR d3
add_xor2_weights('p4.x48', 3, 7, 11) # d4 XOR d8
add_xor2_weights('p4.x9_10', 8, 9, 11) # d9 XOR d10
add_xor2_stage_weights('p4.x2348')
add_xor2_stage_weights('p4.x9_10_11')
add_xor2_stage_weights('p4.final')
# p8 = d5 XOR d6 XOR d7 XOR d8 XOR d9 XOR d10 XOR d11 (indices: 4,5,6,7,8,9,10)
add_xor2_weights('p8.x56', 4, 5, 11)
add_xor2_weights('p8.x78', 6, 7, 11)
add_xor2_weights('p8.x9_10', 8, 9, 11)
add_xor2_stage_weights('p8.x5678')
add_xor2_stage_weights('p8.x9_10_11')
add_xor2_stage_weights('p8.final')
# Data pass-through (11 neurons)
for i in range(11):
w = [0.0] * 11
w[i] = 1.0
weights[f'd{i+1}.weight'] = torch.tensor([w], dtype=torch.float32)
weights[f'd{i+1}.bias'] = torch.tensor([-1.0], dtype=torch.float32)
save_file(weights, 'model.safetensors')
def xor2(a, b):
return a ^ b
def parity7(bits):
result = 0
for b in bits:
result ^= b
return result
def hamming1511_encode_ref(d):
"""Reference implementation."""
d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11 = d
p1 = parity7([d1, d2, d4, d5, d7, d9, d11])
p2 = parity7([d1, d3, d4, d6, d7, d10, d11])
p4 = parity7([d2, d3, d4, d8, d9, d10, d11])
p8 = parity7([d5, d6, d7, d8, d9, d10, d11])
return [p1, p2, d1, p4, d2, d3, d4, p8, d5, d6, d7, d8, d9, d10, d11]
print("Verifying Hamming(15,11) Encoder reference...")
print("Data bits -> Encoded (15 bits)")
print("-" * 50)
for i in range(32): # Test first 32 patterns
bits = [(i >> j) & 1 for j in range(11)]
encoded = hamming1511_encode_ref(bits)
data_str = ''.join(map(str, bits))
enc_str = ''.join(map(str, encoded))
print(f"{data_str} -> {enc_str}")
mag = sum(t.abs().sum().item() for t in weights.values())
print(f"\nMagnitude: {mag:.0f}")
print(f"Parameters: {sum(t.numel() for t in weights.values())}")
print(f"Neurons: {len([k for k in weights.keys() if 'weight' in k])}")