CharlesCNorton commited on
Commit
5f77b99
·
0 Parent(s):

4-bit subtractor threshold circuit, magnitude 88

Browse files
Files changed (5) hide show
  1. README.md +64 -0
  2. config.json +9 -0
  3. create_safetensors.py +181 -0
  4. model.py +63 -0
  5. model.safetensors +0 -0
README.md ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - pytorch
5
+ - safetensors
6
+ - threshold-logic
7
+ - neuromorphic
8
+ - arithmetic
9
+ ---
10
+
11
+ # threshold-subtractor4bit
12
+
13
+ 4-bit subtractor. Computes A - B (modulo 16) with borrow output.
14
+
15
+ ## Function
16
+
17
+ subtractor4bit(A, B) = (A - B) mod 16, borrow_out
18
+
19
+ ## Truth Table (selected rows)
20
+
21
+ | A | B | Diff | Borrow |
22
+ |---|---|------|--------|
23
+ | 7 | 3 | 4 | 0 |
24
+ | 5 | 5 | 0 | 0 |
25
+ | 3 | 7 | 12 | 1 |
26
+ | 0 | 1 | 15 | 1 |
27
+
28
+ ## Architecture
29
+
30
+ Ripple-borrow subtractor using full subtractor units.
31
+
32
+ For each bit i:
33
+ - diff_i = a_i XOR b_i XOR borrow_{i-1}
34
+ - borrow_i = majority(NOT(a_i), b_i, borrow_{i-1})
35
+
36
+ The borrow signal indicates A < B (unsigned comparison).
37
+
38
+ ## Parameters
39
+
40
+ | | |
41
+ |---|---|
42
+ | Inputs | 8 (a3-a0, b3-b0) |
43
+ | Outputs | 5 (d3-d0, bout) |
44
+ | Neurons | 25 |
45
+ | Layers | 8 |
46
+ | Parameters | 86 |
47
+ | Magnitude | 88 |
48
+
49
+ ## Usage
50
+
51
+ ```python
52
+ from safetensors.torch import load_file
53
+ # See model.py for full implementation
54
+
55
+ # 7 - 3 = 4
56
+ # subtractor4(0,1,1,1, 0,0,1,1) = [0,1,0,0, 0]
57
+
58
+ # 3 - 7 = 12 (wraps), borrow=1
59
+ # subtractor4(0,0,1,1, 0,1,1,1) = [1,1,0,0, 1]
60
+ ```
61
+
62
+ ## License
63
+
64
+ MIT
config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "threshold-subtractor4bit",
3
+ "description": "4-bit subtractor (A - B mod 16)",
4
+ "inputs": 8,
5
+ "outputs": 5,
6
+ "neurons": 25,
7
+ "layers": 8,
8
+ "parameters": 88
9
+ }
create_safetensors.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import save_file
3
+
4
+ weights = {}
5
+
6
+ # 4-bit subtractor: A - B (mod 16)
7
+ # Input: [a3, a2, a1, a0, b3, b2, b1, b0]
8
+ # Output: [d3, d2, d1, d0, bout] where bout is final borrow
9
+
10
+ # For each bit i:
11
+ # diff_i = a_i XOR b_i XOR bout_{i-1}
12
+ # bout_i = majority(NOT(a_i), b_i, bout_{i-1}) = (b_i + bout_{i-1} > a_i)
13
+ # = threshold with weights [-1, 1, 1] on [a_i, b_i, bout_{i-1}], bias -1
14
+
15
+ # Bit 0 (no borrow in):
16
+ # d0 = a0 XOR b0
17
+ # bout0 = NOT(a0) AND b0 = threshold([-1, 1], -1) on [a0, b0]
18
+
19
+ # XOR components for d0: OR and NAND on [a0, b0]
20
+ weights['d0_or.weight'] = torch.tensor([[0., 0., 0., 1., 0., 0., 0., 1.]], dtype=torch.float32)
21
+ weights['d0_or.bias'] = torch.tensor([-1.], dtype=torch.float32)
22
+ weights['d0_nand.weight'] = torch.tensor([[0., 0., 0., -1., 0., 0., 0., -1.]], dtype=torch.float32)
23
+ weights['d0_nand.bias'] = torch.tensor([1.], dtype=torch.float32)
24
+
25
+ # bout0 = NOT(a0) AND b0
26
+ weights['bout0.weight'] = torch.tensor([[0., 0., 0., -1., 0., 0., 0., 1.]], dtype=torch.float32)
27
+ weights['bout0.bias'] = torch.tensor([-1.], dtype=torch.float32)
28
+
29
+ # d0 = AND(d0_or, d0_nand)
30
+ weights['d0.weight'] = torch.tensor([[1., 1.]], dtype=torch.float32)
31
+ weights['d0.bias'] = torch.tensor([-2.], dtype=torch.float32)
32
+
33
+ # Bit 1: d1 = XOR3(a1, b1, bout0), bout1 = majority(NOT(a1), b1, bout0)
34
+ # Using flat XOR3 architecture: 3 hidden neurons + 1 output
35
+ # But we need to handle the cascading borrow... Let's use a simpler cascade approach
36
+
37
+ # For bits 1-3, we use XOR(XOR(ai, bi), bout_{i-1})
38
+ # This needs 2 XOR2 gates per bit = 6 neurons per bit for diff
39
+ # Plus 1 neuron for borrow = 7 neurons per bit
40
+
41
+ # Bit 1:
42
+ # First XOR: a1 XOR b1
43
+ weights['xor1_or.weight'] = torch.tensor([[0., 0., 1., 0., 0., 0., 1., 0.]], dtype=torch.float32)
44
+ weights['xor1_or.bias'] = torch.tensor([-1.], dtype=torch.float32)
45
+ weights['xor1_nand.weight'] = torch.tensor([[0., 0., -1., 0., 0., 0., -1., 0.]], dtype=torch.float32)
46
+ weights['xor1_nand.bias'] = torch.tensor([1.], dtype=torch.float32)
47
+ weights['xor1.weight'] = torch.tensor([[1., 1.]], dtype=torch.float32)
48
+ weights['xor1.bias'] = torch.tensor([-2.], dtype=torch.float32)
49
+
50
+ # Second XOR for d1: (a1 XOR b1) XOR bout0 - needs bout0 from layer 1
51
+ # d1_or takes [xor1, bout0], d1_nand takes [xor1, bout0]
52
+ weights['d1_or.weight'] = torch.tensor([[1., 1.]], dtype=torch.float32)
53
+ weights['d1_or.bias'] = torch.tensor([-1.], dtype=torch.float32)
54
+ weights['d1_nand.weight'] = torch.tensor([[-1., -1.]], dtype=torch.float32)
55
+ weights['d1_nand.bias'] = torch.tensor([1.], dtype=torch.float32)
56
+ weights['d1.weight'] = torch.tensor([[1., 1.]], dtype=torch.float32)
57
+ weights['d1.bias'] = torch.tensor([-2.], dtype=torch.float32)
58
+
59
+ # bout1 = majority(NOT(a1), b1, bout0) - threshold with [-1, 1, 1] bias -1
60
+ # But we need to include bout0 in the input... This will be computed in forward pass
61
+ weights['bout1.weight'] = torch.tensor([[1., 1., 1.]], dtype=torch.float32) # [NOT(a1), b1, bout0]
62
+ weights['bout1.bias'] = torch.tensor([-2.], dtype=torch.float32) # fires when 2+ are true
63
+
64
+ # Similar for bits 2 and 3
65
+ weights['xor2_or.weight'] = torch.tensor([[0., 1., 0., 0., 0., 1., 0., 0.]], dtype=torch.float32)
66
+ weights['xor2_or.bias'] = torch.tensor([-1.], dtype=torch.float32)
67
+ weights['xor2_nand.weight'] = torch.tensor([[0., -1., 0., 0., 0., -1., 0., 0.]], dtype=torch.float32)
68
+ weights['xor2_nand.bias'] = torch.tensor([1.], dtype=torch.float32)
69
+ weights['xor2.weight'] = torch.tensor([[1., 1.]], dtype=torch.float32)
70
+ weights['xor2.bias'] = torch.tensor([-2.], dtype=torch.float32)
71
+
72
+ weights['d2_or.weight'] = torch.tensor([[1., 1.]], dtype=torch.float32)
73
+ weights['d2_or.bias'] = torch.tensor([-1.], dtype=torch.float32)
74
+ weights['d2_nand.weight'] = torch.tensor([[-1., -1.]], dtype=torch.float32)
75
+ weights['d2_nand.bias'] = torch.tensor([1.], dtype=torch.float32)
76
+ weights['d2.weight'] = torch.tensor([[1., 1.]], dtype=torch.float32)
77
+ weights['d2.bias'] = torch.tensor([-2.], dtype=torch.float32)
78
+
79
+ weights['bout2.weight'] = torch.tensor([[1., 1., 1.]], dtype=torch.float32)
80
+ weights['bout2.bias'] = torch.tensor([-2.], dtype=torch.float32)
81
+
82
+ weights['xor3_or.weight'] = torch.tensor([[1., 0., 0., 0., 1., 0., 0., 0.]], dtype=torch.float32)
83
+ weights['xor3_or.bias'] = torch.tensor([-1.], dtype=torch.float32)
84
+ weights['xor3_nand.weight'] = torch.tensor([[-1., 0., 0., 0., -1., 0., 0., 0.]], dtype=torch.float32)
85
+ weights['xor3_nand.bias'] = torch.tensor([1.], dtype=torch.float32)
86
+ weights['xor3.weight'] = torch.tensor([[1., 1.]], dtype=torch.float32)
87
+ weights['xor3.bias'] = torch.tensor([-2.], dtype=torch.float32)
88
+
89
+ weights['d3_or.weight'] = torch.tensor([[1., 1.]], dtype=torch.float32)
90
+ weights['d3_or.bias'] = torch.tensor([-1.], dtype=torch.float32)
91
+ weights['d3_nand.weight'] = torch.tensor([[-1., -1.]], dtype=torch.float32)
92
+ weights['d3_nand.bias'] = torch.tensor([1.], dtype=torch.float32)
93
+ weights['d3.weight'] = torch.tensor([[1., 1.]], dtype=torch.float32)
94
+ weights['d3.bias'] = torch.tensor([-2.], dtype=torch.float32)
95
+
96
+ weights['bout3.weight'] = torch.tensor([[1., 1., 1.]], dtype=torch.float32)
97
+ weights['bout3.bias'] = torch.tensor([-2.], dtype=torch.float32)
98
+
99
+ save_file(weights, 'model.safetensors')
100
+
101
+ # Verify with direct computation
102
+ def subtractor4(a3, a2, a1, a0, b3, b2, b1, b0):
103
+ inp = torch.tensor([float(a3), float(a2), float(a1), float(a0),
104
+ float(b3), float(b2), float(b1), float(b0)])
105
+
106
+ # Bit 0
107
+ d0_or = int((inp @ weights['d0_or.weight'].T + weights['d0_or.bias'] >= 0).item())
108
+ d0_nand = int((inp @ weights['d0_nand.weight'].T + weights['d0_nand.bias'] >= 0).item())
109
+ d0 = int((torch.tensor([float(d0_or), float(d0_nand)]) @ weights['d0.weight'].T + weights['d0.bias'] >= 0).item())
110
+ bout0 = int((inp @ weights['bout0.weight'].T + weights['bout0.bias'] >= 0).item())
111
+
112
+ # Bit 1
113
+ xor1_or = int((inp @ weights['xor1_or.weight'].T + weights['xor1_or.bias'] >= 0).item())
114
+ xor1_nand = int((inp @ weights['xor1_nand.weight'].T + weights['xor1_nand.bias'] >= 0).item())
115
+ xor1 = int((torch.tensor([float(xor1_or), float(xor1_nand)]) @ weights['xor1.weight'].T + weights['xor1.bias'] >= 0).item())
116
+
117
+ d1_in = torch.tensor([float(xor1), float(bout0)])
118
+ d1_or = int((d1_in @ weights['d1_or.weight'].T + weights['d1_or.bias'] >= 0).item())
119
+ d1_nand = int((d1_in @ weights['d1_nand.weight'].T + weights['d1_nand.bias'] >= 0).item())
120
+ d1 = int((torch.tensor([float(d1_or), float(d1_nand)]) @ weights['d1.weight'].T + weights['d1.bias'] >= 0).item())
121
+
122
+ # bout1 = majority(NOT(a1), b1, bout0)
123
+ not_a1 = 1 - a1
124
+ bout1_in = torch.tensor([float(not_a1), float(b1), float(bout0)])
125
+ bout1 = int((bout1_in @ weights['bout1.weight'].T + weights['bout1.bias'] >= 0).item())
126
+
127
+ # Bit 2
128
+ xor2_or = int((inp @ weights['xor2_or.weight'].T + weights['xor2_or.bias'] >= 0).item())
129
+ xor2_nand = int((inp @ weights['xor2_nand.weight'].T + weights['xor2_nand.bias'] >= 0).item())
130
+ xor2 = int((torch.tensor([float(xor2_or), float(xor2_nand)]) @ weights['xor2.weight'].T + weights['xor2.bias'] >= 0).item())
131
+
132
+ d2_in = torch.tensor([float(xor2), float(bout1)])
133
+ d2_or = int((d2_in @ weights['d2_or.weight'].T + weights['d2_or.bias'] >= 0).item())
134
+ d2_nand = int((d2_in @ weights['d2_nand.weight'].T + weights['d2_nand.bias'] >= 0).item())
135
+ d2 = int((torch.tensor([float(d2_or), float(d2_nand)]) @ weights['d2.weight'].T + weights['d2.bias'] >= 0).item())
136
+
137
+ not_a2 = 1 - a2
138
+ bout2_in = torch.tensor([float(not_a2), float(b2), float(bout1)])
139
+ bout2 = int((bout2_in @ weights['bout2.weight'].T + weights['bout2.bias'] >= 0).item())
140
+
141
+ # Bit 3
142
+ xor3_or = int((inp @ weights['xor3_or.weight'].T + weights['xor3_or.bias'] >= 0).item())
143
+ xor3_nand = int((inp @ weights['xor3_nand.weight'].T + weights['xor3_nand.bias'] >= 0).item())
144
+ xor3 = int((torch.tensor([float(xor3_or), float(xor3_nand)]) @ weights['xor3.weight'].T + weights['xor3.bias'] >= 0).item())
145
+
146
+ d3_in = torch.tensor([float(xor3), float(bout2)])
147
+ d3_or = int((d3_in @ weights['d3_or.weight'].T + weights['d3_or.bias'] >= 0).item())
148
+ d3_nand = int((d3_in @ weights['d3_nand.weight'].T + weights['d3_nand.bias'] >= 0).item())
149
+ d3 = int((torch.tensor([float(d3_or), float(d3_nand)]) @ weights['d3.weight'].T + weights['d3.bias'] >= 0).item())
150
+
151
+ not_a3 = 1 - a3
152
+ bout3_in = torch.tensor([float(not_a3), float(b3), float(bout2)])
153
+ bout3 = int((bout3_in @ weights['bout3.weight'].T + weights['bout3.bias'] >= 0).item())
154
+
155
+ return [d3, d2, d1, d0, bout3]
156
+
157
+ print("Verifying subtractor4bit...")
158
+ errors = 0
159
+ for a in range(16):
160
+ for b in range(16):
161
+ a3, a2, a1, a0 = (a >> 3) & 1, (a >> 2) & 1, (a >> 1) & 1, a & 1
162
+ b3, b2, b1, b0 = (b >> 3) & 1, (b >> 2) & 1, (b >> 1) & 1, b & 1
163
+ result = subtractor4(a3, a2, a1, a0, b3, b2, b1, b0)
164
+
165
+ expected_val = (a - b) % 16
166
+ expected_bout = 1 if a < b else 0
167
+ expected = [(expected_val >> 3) & 1, (expected_val >> 2) & 1,
168
+ (expected_val >> 1) & 1, expected_val & 1, expected_bout]
169
+
170
+ if result != expected:
171
+ errors += 1
172
+ if errors <= 5:
173
+ print(f"ERROR: {a} - {b} = {result}, expected {expected}")
174
+
175
+ if errors == 0:
176
+ print("All 256 test cases passed!")
177
+ else:
178
+ print(f"FAILED: {errors} errors")
179
+
180
+ mag = sum(t.abs().sum().item() for t in weights.values())
181
+ print(f"Magnitude: {mag:.0f}")
model.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import load_file
3
+
4
+ def load_model(path='model.safetensors'):
5
+ return load_file(path)
6
+
7
+ def subtractor4(a3, a2, a1, a0, b3, b2, b1, b0, weights):
8
+ """4-bit subtractor: returns (A - B) mod 16 and borrow out"""
9
+ inp = torch.tensor([float(a3), float(a2), float(a1), float(a0),
10
+ float(b3), float(b2), float(b1), float(b0)])
11
+
12
+ # Bit 0
13
+ d0_or = int((inp @ weights['d0_or.weight'].T + weights['d0_or.bias'] >= 0).item())
14
+ d0_nand = int((inp @ weights['d0_nand.weight'].T + weights['d0_nand.bias'] >= 0).item())
15
+ d0 = int((torch.tensor([float(d0_or), float(d0_nand)]) @ weights['d0.weight'].T + weights['d0.bias'] >= 0).item())
16
+ bout0 = int((inp @ weights['bout0.weight'].T + weights['bout0.bias'] >= 0).item())
17
+
18
+ # Bit 1
19
+ xor1_or = int((inp @ weights['xor1_or.weight'].T + weights['xor1_or.bias'] >= 0).item())
20
+ xor1_nand = int((inp @ weights['xor1_nand.weight'].T + weights['xor1_nand.bias'] >= 0).item())
21
+ xor1 = int((torch.tensor([float(xor1_or), float(xor1_nand)]) @ weights['xor1.weight'].T + weights['xor1.bias'] >= 0).item())
22
+ d1_in = torch.tensor([float(xor1), float(bout0)])
23
+ d1_or = int((d1_in @ weights['d1_or.weight'].T + weights['d1_or.bias'] >= 0).item())
24
+ d1_nand = int((d1_in @ weights['d1_nand.weight'].T + weights['d1_nand.bias'] >= 0).item())
25
+ d1 = int((torch.tensor([float(d1_or), float(d1_nand)]) @ weights['d1.weight'].T + weights['d1.bias'] >= 0).item())
26
+ not_a1 = 1 - a1
27
+ bout1 = int((torch.tensor([float(not_a1), float(b1), float(bout0)]) @ weights['bout1.weight'].T + weights['bout1.bias'] >= 0).item())
28
+
29
+ # Bit 2
30
+ xor2_or = int((inp @ weights['xor2_or.weight'].T + weights['xor2_or.bias'] >= 0).item())
31
+ xor2_nand = int((inp @ weights['xor2_nand.weight'].T + weights['xor2_nand.bias'] >= 0).item())
32
+ xor2 = int((torch.tensor([float(xor2_or), float(xor2_nand)]) @ weights['xor2.weight'].T + weights['xor2.bias'] >= 0).item())
33
+ d2_in = torch.tensor([float(xor2), float(bout1)])
34
+ d2_or = int((d2_in @ weights['d2_or.weight'].T + weights['d2_or.bias'] >= 0).item())
35
+ d2_nand = int((d2_in @ weights['d2_nand.weight'].T + weights['d2_nand.bias'] >= 0).item())
36
+ d2 = int((torch.tensor([float(d2_or), float(d2_nand)]) @ weights['d2.weight'].T + weights['d2.bias'] >= 0).item())
37
+ not_a2 = 1 - a2
38
+ bout2 = int((torch.tensor([float(not_a2), float(b2), float(bout1)]) @ weights['bout2.weight'].T + weights['bout2.bias'] >= 0).item())
39
+
40
+ # Bit 3
41
+ xor3_or = int((inp @ weights['xor3_or.weight'].T + weights['xor3_or.bias'] >= 0).item())
42
+ xor3_nand = int((inp @ weights['xor3_nand.weight'].T + weights['xor3_nand.bias'] >= 0).item())
43
+ xor3 = int((torch.tensor([float(xor3_or), float(xor3_nand)]) @ weights['xor3.weight'].T + weights['xor3.bias'] >= 0).item())
44
+ d3_in = torch.tensor([float(xor3), float(bout2)])
45
+ d3_or = int((d3_in @ weights['d3_or.weight'].T + weights['d3_or.bias'] >= 0).item())
46
+ d3_nand = int((d3_in @ weights['d3_nand.weight'].T + weights['d3_nand.bias'] >= 0).item())
47
+ d3 = int((torch.tensor([float(d3_or), float(d3_nand)]) @ weights['d3.weight'].T + weights['d3.bias'] >= 0).item())
48
+ not_a3 = 1 - a3
49
+ bout3 = int((torch.tensor([float(not_a3), float(b3), float(bout2)]) @ weights['bout3.weight'].T + weights['bout3.bias'] >= 0).item())
50
+
51
+ return [d3, d2, d1, d0, bout3]
52
+
53
+ if __name__ == '__main__':
54
+ w = load_model()
55
+ print('Subtractor4bit examples:')
56
+ examples = [(7, 3), (5, 5), (3, 7), (15, 1), (0, 1)]
57
+ for a, b in examples:
58
+ a3, a2, a1, a0 = (a >> 3) & 1, (a >> 2) & 1, (a >> 1) & 1, a & 1
59
+ b3, b2, b1, b0 = (b >> 3) & 1, (b >> 2) & 1, (b >> 1) & 1, b & 1
60
+ result = subtractor4(a3, a2, a1, a0, b3, b2, b1, b0, w)
61
+ diff = result[0]*8 + result[1]*4 + result[2]*2 + result[3]
62
+ bout = result[4]
63
+ print(f' {a:2d} - {b:2d} = {diff:2d} (bout={bout})')
model.safetensors ADDED
Binary file (3.94 kB). View file