phanerozoic commited on
Commit
0efc839
·
verified ·
1 Parent(s): 4984f97

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. README.md +124 -0
  2. config.json +9 -0
  3. create_safetensors.py +52 -0
  4. model.py +27 -0
  5. model.safetensors +3 -0
README.md ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - pytorch
5
+ - safetensors
6
+ - threshold-logic
7
+ - neuromorphic
8
+ - prefix
9
+ - parity
10
+ ---
11
+
12
+ # threshold-prefix-xor
13
+
14
+ 4-bit parallel prefix XOR (running parity). Computes cumulative XOR from MSB to each position. Essential for parity-based error detection.
15
+
16
+ ## Circuit
17
+
18
+ ```
19
+ x3 x2 x1 x0
20
+ │ │ │ │
21
+ ▼ │ │ │
22
+ ┌───┐ │ │ │
23
+ │y3 │ │ │ │
24
+ │=x3│ │ │ │
25
+ └───┘ ▼ │ │
26
+ │ ┌─────┐ │ │
27
+ └────►│ XOR │ │ │
28
+ │ y2 │ │ │
29
+ └─────┘ ▼ │
30
+ │ ┌─────┐ │
31
+ └────►│ XOR │ │
32
+ │ y1 │ │
33
+ └─────┘ ▼
34
+ │ ┌─────┐
35
+ └────►│ XOR │
36
+ │ y0 │
37
+ └─────┘
38
+ ```
39
+
40
+ ## Function
41
+
42
+ ```
43
+ prefix_xor(x3, x2, x1, x0) -> (y3, y2, y1, y0)
44
+
45
+ y3 = x3
46
+ y2 = x3 XOR x2
47
+ y1 = x3 XOR x2 XOR x1
48
+ y0 = x3 XOR x2 XOR x1 XOR x0 (full parity)
49
+ ```
50
+
51
+ Each output yi is the XOR (parity) of all inputs from x3 down to xi.
52
+
53
+ ## Truth Table (selected)
54
+
55
+ | x3 x2 x1 x0 | y3 y2 y1 y0 | y0 = parity |
56
+ |-------------|-------------|-------------|
57
+ | 0 0 0 0 | 0 0 0 0 | even (0) |
58
+ | 0 0 0 1 | 0 0 0 1 | odd (1) |
59
+ | 0 0 1 1 | 0 0 1 0 | even (0) |
60
+ | 0 1 1 1 | 0 1 0 1 | odd (1) |
61
+ | 1 1 1 1 | 1 0 1 0 | even (0) |
62
+ | 1 0 1 0 | 1 1 0 0 | even (0) |
63
+ | 1 1 0 0 | 1 0 0 0 | even (0) |
64
+
65
+ ## Mechanism
66
+
67
+ Unlike prefix-AND and prefix-OR, prefix-XOR requires sequential XOR gates because XOR is not a simple threshold function.
68
+
69
+ **Architecture:** Chain of 3 XOR gates (each XOR = 3 neurons)
70
+
71
+ | Stage | Computes | Neurons |
72
+ |-------|----------|---------|
73
+ | 1 | y3 = x3 (passthrough) | 0 |
74
+ | 2 | y2 = y3 XOR x2 | 3 |
75
+ | 3 | y1 = y2 XOR x1 | 3 |
76
+ | 4 | y0 = y1 XOR x0 | 3 |
77
+
78
+ ## Parameters
79
+
80
+ | | |
81
+ |---|---|
82
+ | Inputs | 4 |
83
+ | Outputs | 4 |
84
+ | Neurons | 9 |
85
+ | Layers | 6 |
86
+ | Parameters | 27 |
87
+ | Magnitude | 30 |
88
+
89
+ ## Applications
90
+
91
+ - **Running parity:** y_i gives parity of bits from MSB to position i
92
+ - **Error detection:** Final y0 is overall parity bit
93
+ - **Gray code generation:** Related to Gray code conversions
94
+ - **Checksum computation:** Partial checksums at each position
95
+
96
+ ## Usage
97
+
98
+ ```python
99
+ from safetensors.torch import load_file
100
+ import torch
101
+
102
+ w = load_file('model.safetensors')
103
+
104
+ def xor2(a, b, prefix):
105
+ inp = torch.tensor([float(a), float(b)])
106
+ or_out = int((inp @ w[f'{prefix}.or.weight'].T + w[f'{prefix}.or.bias'] >= 0).item())
107
+ nand_out = int((inp @ w[f'{prefix}.nand.weight'].T + w[f'{prefix}.nand.bias'] >= 0).item())
108
+ l1 = torch.tensor([float(or_out), float(nand_out)])
109
+ return int((l1 @ w[f'{prefix}.and.weight'].T + w[f'{prefix}.and.bias'] >= 0).item())
110
+
111
+ def prefix_xor(x3, x2, x1, x0):
112
+ y3 = x3
113
+ y2 = xor2(y3, x2, 'xor2')
114
+ y1 = xor2(y2, x1, 'xor1')
115
+ y0 = xor2(y1, x0, 'xor0')
116
+ return y3, y2, y1, y0
117
+
118
+ print(prefix_xor(1, 1, 1, 1)) # (1, 0, 1, 0) - even parity
119
+ print(prefix_xor(1, 0, 0, 0)) # (1, 1, 1, 1) - odd parity
120
+ ```
121
+
122
+ ## License
123
+
124
+ MIT
config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "threshold-prefix-xor",
3
+ "description": "4-bit parallel prefix XOR (running parity)",
4
+ "inputs": 4,
5
+ "outputs": 4,
6
+ "neurons": 9,
7
+ "layers": 6,
8
+ "parameters": 27
9
+ }
create_safetensors.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import save_file
3
+
4
+ weights = {}
5
+
6
+ def add_xor(prefix):
7
+ weights[f'{prefix}.or.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
8
+ weights[f'{prefix}.or.bias'] = torch.tensor([-1.0], dtype=torch.float32)
9
+ weights[f'{prefix}.nand.weight'] = torch.tensor([[-1.0, -1.0]], dtype=torch.float32)
10
+ weights[f'{prefix}.nand.bias'] = torch.tensor([1.0], dtype=torch.float32)
11
+ weights[f'{prefix}.and.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
12
+ weights[f'{prefix}.and.bias'] = torch.tensor([-2.0], dtype=torch.float32)
13
+
14
+ # y3 = x3 (passthrough, no gate needed)
15
+ # y2 = x3 XOR x2
16
+ add_xor('xor2')
17
+ # y1 = y2 XOR x1
18
+ add_xor('xor1')
19
+ # y0 = y1 XOR x0
20
+ add_xor('xor0')
21
+
22
+ save_file(weights, 'model.safetensors')
23
+
24
+ def xor2(a, b, prefix):
25
+ inp = torch.tensor([float(a), float(b)])
26
+ or_out = int((inp @ weights[f'{prefix}.or.weight'].T + weights[f'{prefix}.or.bias'] >= 0).item())
27
+ nand_out = int((inp @ weights[f'{prefix}.nand.weight'].T + weights[f'{prefix}.nand.bias'] >= 0).item())
28
+ l1 = torch.tensor([float(or_out), float(nand_out)])
29
+ return int((l1 @ weights[f'{prefix}.and.weight'].T + weights[f'{prefix}.and.bias'] >= 0).item())
30
+
31
+ def prefix_xor(x3, x2, x1, x0):
32
+ y3 = x3
33
+ y2 = xor2(y3, x2, 'xor2')
34
+ y1 = xor2(y2, x1, 'xor1')
35
+ y0 = xor2(y1, x0, 'xor0')
36
+ return y3, y2, y1, y0
37
+
38
+ print("Verifying prefix-xor...")
39
+ errors = 0
40
+ for i in range(16):
41
+ x3, x2, x1, x0 = (i >> 3) & 1, (i >> 2) & 1, (i >> 1) & 1, i & 1
42
+ y3, y2, y1, y0 = prefix_xor(x3, x2, x1, x0)
43
+ exp_y3 = x3
44
+ exp_y2 = x3 ^ x2
45
+ exp_y1 = x3 ^ x2 ^ x1
46
+ exp_y0 = x3 ^ x2 ^ x1 ^ x0
47
+ if (y3, y2, y1, y0) != (exp_y3, exp_y2, exp_y1, exp_y0):
48
+ errors += 1
49
+ print(f"ERROR: {x3}{x2}{x1}{x0} -> {y3}{y2}{y1}{y0}, expected {exp_y3}{exp_y2}{exp_y1}{exp_y0}")
50
+ if errors == 0:
51
+ print("All 16 test cases passed!")
52
+ print(f"Magnitude: {sum(t.abs().sum().item() for t in weights.values()):.0f}")
model.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import load_file
3
+
4
+ def load_model(path='model.safetensors'):
5
+ return load_file(path)
6
+
7
+ def xor2(a, b, prefix, w):
8
+ inp = torch.tensor([float(a), float(b)])
9
+ or_out = int((inp @ w[f'{prefix}.or.weight'].T + w[f'{prefix}.or.bias'] >= 0).item())
10
+ nand_out = int((inp @ w[f'{prefix}.nand.weight'].T + w[f'{prefix}.nand.bias'] >= 0).item())
11
+ l1 = torch.tensor([float(or_out), float(nand_out)])
12
+ return int((l1 @ w[f'{prefix}.and.weight'].T + w[f'{prefix}.and.bias'] >= 0).item())
13
+
14
+ def prefix_xor(x3, x2, x1, x0, w):
15
+ y3 = x3
16
+ y2 = xor2(y3, x2, 'xor2', w)
17
+ y1 = xor2(y2, x1, 'xor1', w)
18
+ y0 = xor2(y1, x0, 'xor0', w)
19
+ return y3, y2, y1, y0
20
+
21
+ if __name__ == '__main__':
22
+ w = load_model()
23
+ print('Prefix-XOR (running parity):')
24
+ for i in [0b0000, 0b0001, 0b0011, 0b0111, 0b1111, 0b1010]:
25
+ x3, x2, x1, x0 = (i >> 3) & 1, (i >> 2) & 1, (i >> 1) & 1, i & 1
26
+ y3, y2, y1, y0 = prefix_xor(x3, x2, x1, x0, w)
27
+ print(f'{x3}{x2}{x1}{x0} -> {y3}{y2}{y1}{y0} (parity={y0})')
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ced04353db713e635011658c29ea9681a07764f1bced9dd44d6f2103454fa785
3
+ size 1364