CharlesCNorton commited on
Commit
df35550
·
0 Parent(s):

8-bit popcount threshold circuit, magnitude 163

Browse files
Files changed (5) hide show
  1. README.md +61 -0
  2. config.json +9 -0
  3. create_safetensors.py +175 -0
  4. model.py +22 -0
  5. model.safetensors +0 -0
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - pytorch
5
+ - safetensors
6
+ - threshold-logic
7
+ - neuromorphic
8
+ ---
9
+
10
+ # threshold-popcount8
11
+
12
+ 8-bit population count. Counts the number of 1 bits in an 8-bit input.
13
+
14
+ ## Function
15
+
16
+ popcount8(a7..a0) = count of 1 bits (0 to 8)
17
+
18
+ Output is 4 bits: y3y2y1y0 representing count in binary.
19
+
20
+ ## Truth Table (selected rows)
21
+
22
+ | Input | Count | Output |
23
+ |-------|-------|--------|
24
+ | 00000000 | 0 | 0000 |
25
+ | 00000001 | 1 | 0001 |
26
+ | 01010101 | 4 | 0100 |
27
+ | 11110000 | 4 | 0100 |
28
+ | 11111111 | 8 | 1000 |
29
+
30
+ ## Architecture
31
+
32
+ - y3 = (sum == 8)
33
+ - y2 = (sum >= 4) AND (sum <= 7)
34
+ - y1 = ((sum >= 2) AND (sum <= 3)) OR ((sum >= 6) AND (sum <= 7))
35
+ - y0 = XOR8 (parity of all 8 bits)
36
+
37
+ The parity computation uses a tree of XOR gates: XOR pairs, then XOR the results.
38
+
39
+ ## Parameters
40
+
41
+ | | |
42
+ |---|---|
43
+ | Inputs | 8 |
44
+ | Outputs | 4 |
45
+ | Neurons | 31 |
46
+ | Layers | 5 |
47
+ | Parameters | 177 |
48
+ | Magnitude | 163 |
49
+
50
+ ## Usage
51
+
52
+ ```python
53
+ from safetensors.torch import load_file
54
+ # See create_safetensors.py for full implementation
55
+
56
+ # popcount8(1,0,1,0,1,0,1,0) = [0,1,0,0] = 4
57
+ ```
58
+
59
+ ## License
60
+
61
+ MIT
config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "threshold-popcount8",
3
+ "description": "8-bit population count (count 1 bits)",
4
+ "inputs": 8,
5
+ "outputs": 4,
6
+ "neurons": 31,
7
+ "layers": 5,
8
+ "parameters": 177
9
+ }
create_safetensors.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import save_file
3
+
4
+ weights = {}
5
+
6
+ # 8-bit popcount: count number of 1s, output 0-8 in binary (4 bits)
7
+ # y3 = (sum == 8)
8
+ # y2 = (sum >= 4) AND (sum <= 7)
9
+ # y1 = ((sum >= 2) AND (sum <= 3)) OR ((sum >= 6) AND (sum <= 7))
10
+ # y0 = XOR8 (parity)
11
+
12
+ # Layer 1: threshold comparisons and XOR components
13
+ ones = [1.0] * 8
14
+ neg_ones = [-1.0] * 8
15
+
16
+ # y3 = sum >= 8
17
+ weights['y3.weight'] = torch.tensor([ones], dtype=torch.float32)
18
+ weights['y3.bias'] = torch.tensor([-8.0], dtype=torch.float32)
19
+
20
+ # ge4 = sum >= 4
21
+ weights['ge4.weight'] = torch.tensor([ones], dtype=torch.float32)
22
+ weights['ge4.bias'] = torch.tensor([-4.0], dtype=torch.float32)
23
+
24
+ # le7 = sum <= 7
25
+ weights['le7.weight'] = torch.tensor([neg_ones], dtype=torch.float32)
26
+ weights['le7.bias'] = torch.tensor([7.0], dtype=torch.float32)
27
+
28
+ # ge2 = sum >= 2
29
+ weights['ge2.weight'] = torch.tensor([ones], dtype=torch.float32)
30
+ weights['ge2.bias'] = torch.tensor([-2.0], dtype=torch.float32)
31
+
32
+ # le3 = sum <= 3
33
+ weights['le3.weight'] = torch.tensor([neg_ones], dtype=torch.float32)
34
+ weights['le3.bias'] = torch.tensor([3.0], dtype=torch.float32)
35
+
36
+ # ge6 = sum >= 6
37
+ weights['ge6.weight'] = torch.tensor([ones], dtype=torch.float32)
38
+ weights['ge6.bias'] = torch.tensor([-6.0], dtype=torch.float32)
39
+
40
+ # XOR pairs for parity: XOR(a0,a1), XOR(a2,a3), XOR(a4,a5), XOR(a6,a7)
41
+ for i, pair in enumerate([(0,1), (2,3), (4,5), (6,7)]):
42
+ w = [0.0] * 8
43
+ w[pair[0]] = 1.0
44
+ w[pair[1]] = 1.0
45
+ weights[f'xor_{i}_or.weight'] = torch.tensor([w], dtype=torch.float32)
46
+ weights[f'xor_{i}_or.bias'] = torch.tensor([-1.0], dtype=torch.float32)
47
+
48
+ w_nand = [0.0] * 8
49
+ w_nand[pair[0]] = -1.0
50
+ w_nand[pair[1]] = -1.0
51
+ weights[f'xor_{i}_nand.weight'] = torch.tensor([w_nand], dtype=torch.float32)
52
+ weights[f'xor_{i}_nand.bias'] = torch.tensor([1.0], dtype=torch.float32)
53
+
54
+ # Layer 2: y2, first-level XORs, and y1 components
55
+ # y2 = AND(ge4, le7)
56
+ weights['y2.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
57
+ weights['y2.bias'] = torch.tensor([-2.0], dtype=torch.float32)
58
+
59
+ # in23 = AND(ge2, le3)
60
+ weights['in23.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
61
+ weights['in23.bias'] = torch.tensor([-2.0], dtype=torch.float32)
62
+
63
+ # in67 = AND(ge6, le7)
64
+ weights['in67.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
65
+ weights['in67.bias'] = torch.tensor([-2.0], dtype=torch.float32)
66
+
67
+ # XOR results
68
+ for i in range(4):
69
+ weights[f'xor_{i}.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
70
+ weights[f'xor_{i}.bias'] = torch.tensor([-2.0], dtype=torch.float32)
71
+
72
+ # Layer 3: y1 = OR(in23, in67), XOR pairs for next level
73
+ weights['y1.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
74
+ weights['y1.bias'] = torch.tensor([-1.0], dtype=torch.float32)
75
+
76
+ # XOR(xor_0, xor_1)
77
+ weights['xor_01_or.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
78
+ weights['xor_01_or.bias'] = torch.tensor([-1.0], dtype=torch.float32)
79
+ weights['xor_01_nand.weight'] = torch.tensor([[-1.0, -1.0]], dtype=torch.float32)
80
+ weights['xor_01_nand.bias'] = torch.tensor([1.0], dtype=torch.float32)
81
+
82
+ # XOR(xor_2, xor_3)
83
+ weights['xor_23_or.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
84
+ weights['xor_23_or.bias'] = torch.tensor([-1.0], dtype=torch.float32)
85
+ weights['xor_23_nand.weight'] = torch.tensor([[-1.0, -1.0]], dtype=torch.float32)
86
+ weights['xor_23_nand.bias'] = torch.tensor([1.0], dtype=torch.float32)
87
+
88
+ # Layer 4
89
+ weights['xor_01.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
90
+ weights['xor_01.bias'] = torch.tensor([-2.0], dtype=torch.float32)
91
+ weights['xor_23.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
92
+ weights['xor_23.bias'] = torch.tensor([-2.0], dtype=torch.float32)
93
+
94
+ # XOR(xor_01, xor_23)
95
+ weights['xor_final_or.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
96
+ weights['xor_final_or.bias'] = torch.tensor([-1.0], dtype=torch.float32)
97
+ weights['xor_final_nand.weight'] = torch.tensor([[-1.0, -1.0]], dtype=torch.float32)
98
+ weights['xor_final_nand.bias'] = torch.tensor([1.0], dtype=torch.float32)
99
+
100
+ # Layer 5
101
+ weights['y0.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
102
+ weights['y0.bias'] = torch.tensor([-2.0], dtype=torch.float32)
103
+
104
+ save_file(weights, 'model.safetensors')
105
+
106
+ # Verify
107
+ def popcount8(a7, a6, a5, a4, a3, a2, a1, a0):
108
+ inp = torch.tensor([float(a7), float(a6), float(a5), float(a4),
109
+ float(a3), float(a2), float(a1), float(a0)])
110
+
111
+ # Layer 1
112
+ y3 = int((inp @ weights['y3.weight'].T + weights['y3.bias'] >= 0).item())
113
+ ge4 = int((inp @ weights['ge4.weight'].T + weights['ge4.bias'] >= 0).item())
114
+ le7 = int((inp @ weights['le7.weight'].T + weights['le7.bias'] >= 0).item())
115
+ ge2 = int((inp @ weights['ge2.weight'].T + weights['ge2.bias'] >= 0).item())
116
+ le3 = int((inp @ weights['le3.weight'].T + weights['le3.bias'] >= 0).item())
117
+ ge6 = int((inp @ weights['ge6.weight'].T + weights['ge6.bias'] >= 0).item())
118
+
119
+ xor_ors = []
120
+ xor_nands = []
121
+ for i in range(4):
122
+ xor_ors.append(int((inp @ weights[f'xor_{i}_or.weight'].T + weights[f'xor_{i}_or.bias'] >= 0).item()))
123
+ xor_nands.append(int((inp @ weights[f'xor_{i}_nand.weight'].T + weights[f'xor_{i}_nand.bias'] >= 0).item()))
124
+
125
+ # Layer 2
126
+ y2 = int((torch.tensor([float(ge4), float(le7)]) @ weights['y2.weight'].T + weights['y2.bias'] >= 0).item())
127
+ in23 = int((torch.tensor([float(ge2), float(le3)]) @ weights['in23.weight'].T + weights['in23.bias'] >= 0).item())
128
+ in67 = int((torch.tensor([float(ge6), float(le7)]) @ weights['in67.weight'].T + weights['in67.bias'] >= 0).item())
129
+
130
+ xors = []
131
+ for i in range(4):
132
+ x = int((torch.tensor([float(xor_ors[i]), float(xor_nands[i])]) @ weights[f'xor_{i}.weight'].T + weights[f'xor_{i}.bias'] >= 0).item())
133
+ xors.append(x)
134
+
135
+ # Layer 3
136
+ y1 = int((torch.tensor([float(in23), float(in67)]) @ weights['y1.weight'].T + weights['y1.bias'] >= 0).item())
137
+
138
+ xor_01_or = int((torch.tensor([float(xors[0]), float(xors[1])]) @ weights['xor_01_or.weight'].T + weights['xor_01_or.bias'] >= 0).item())
139
+ xor_01_nand = int((torch.tensor([float(xors[0]), float(xors[1])]) @ weights['xor_01_nand.weight'].T + weights['xor_01_nand.bias'] >= 0).item())
140
+ xor_23_or = int((torch.tensor([float(xors[2]), float(xors[3])]) @ weights['xor_23_or.weight'].T + weights['xor_23_or.bias'] >= 0).item())
141
+ xor_23_nand = int((torch.tensor([float(xors[2]), float(xors[3])]) @ weights['xor_23_nand.weight'].T + weights['xor_23_nand.bias'] >= 0).item())
142
+
143
+ # Layer 4
144
+ xor_01 = int((torch.tensor([float(xor_01_or), float(xor_01_nand)]) @ weights['xor_01.weight'].T + weights['xor_01.bias'] >= 0).item())
145
+ xor_23 = int((torch.tensor([float(xor_23_or), float(xor_23_nand)]) @ weights['xor_23.weight'].T + weights['xor_23.bias'] >= 0).item())
146
+
147
+ xor_final_or = int((torch.tensor([float(xor_01), float(xor_23)]) @ weights['xor_final_or.weight'].T + weights['xor_final_or.bias'] >= 0).item())
148
+ xor_final_nand = int((torch.tensor([float(xor_01), float(xor_23)]) @ weights['xor_final_nand.weight'].T + weights['xor_final_nand.bias'] >= 0).item())
149
+
150
+ # Layer 5
151
+ y0 = int((torch.tensor([float(xor_final_or), float(xor_final_nand)]) @ weights['y0.weight'].T + weights['y0.bias'] >= 0).item())
152
+
153
+ return [y3, y2, y1, y0]
154
+
155
+ print("Verifying popcount8...")
156
+ errors = 0
157
+ for i in range(256):
158
+ bits = [(i >> j) & 1 for j in range(7, -1, -1)]
159
+ result = popcount8(*bits)
160
+ count = sum(bits)
161
+ expected = [(count >> 3) & 1, (count >> 2) & 1, (count >> 1) & 1, count & 1]
162
+ if result != expected:
163
+ errors += 1
164
+ if errors <= 5:
165
+ print(f"ERROR: {i:08b} count={count} -> {result}, expected {expected}")
166
+
167
+ if errors == 0:
168
+ print("All 256 test cases passed!")
169
+ else:
170
+ print(f"FAILED: {errors} errors")
171
+
172
+ mag = sum(t.abs().sum().item() for t in weights.values())
173
+ print(f"Magnitude: {mag:.0f}")
174
+ print(f"Neurons: {len([k for k in weights.keys() if 'weight' in k])}")
175
+ print(f"Parameters: {sum(t.numel() for t in weights.values())}")
model.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import load_file
3
+
4
+ def load_model(path='model.safetensors'):
5
+ return load_file(path)
6
+
7
+ def popcount8(a7, a6, a5, a4, a3, a2, a1, a0, weights):
8
+ """8-bit population count: returns count of 1 bits as 4-bit value"""
9
+ inp = torch.tensor([float(a7), float(a6), float(a5), float(a4),
10
+ float(a3), float(a2), float(a1), float(a0)])
11
+ # See create_safetensors.py for full implementation
12
+ # Returns [y3, y2, y1, y0] where y3y2y1y0 is the count in binary
13
+ pass
14
+
15
+ if __name__ == '__main__':
16
+ w = load_model()
17
+ print('Popcount8 examples:')
18
+ examples = [0b00000000, 0b00000001, 0b01010101, 0b11111111, 0b11110000]
19
+ for val in examples:
20
+ bits = [(val >> j) & 1 for j in range(7, -1, -1)]
21
+ count = sum(bits)
22
+ print(f' {val:08b} -> {count}')
model.safetensors ADDED
Binary file (5.01 kB). View file