CharlesCNorton commited on
Commit
cdd5bc3
·
0 Parent(s):

8-bit count leading zeros, magnitude 69

Browse files
Files changed (5) hide show
  1. README.md +74 -0
  2. config.json +9 -0
  3. create_safetensors.py +123 -0
  4. model.py +47 -0
  5. model.safetensors +0 -0
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - pytorch
5
+ - safetensors
6
+ - threshold-logic
7
+ - neuromorphic
8
+ ---
9
+
10
+ # threshold-clz8
11
+
12
+ 8-bit count leading zeros.
13
+
14
+ ## Function
15
+
16
+ clz8(a7, a6, a5, a4, a3, a2, a1, a0) = number of leading zeros from MSB (0-8)
17
+
18
+ ## Truth Table (selected rows)
19
+
20
+ | Input | First 1 | CLZ | Output |
21
+ |-------|---------|-----|--------|
22
+ | 1xxxxxxx | bit 7 | 0 | 0000 |
23
+ | 01xxxxxx | bit 6 | 1 | 0001 |
24
+ | 001xxxxx | bit 5 | 2 | 0010 |
25
+ | 0001xxxx | bit 4 | 3 | 0011 |
26
+ | 00001xxx | bit 3 | 4 | 0100 |
27
+ | 000001xx | bit 2 | 5 | 0101 |
28
+ | 0000001x | bit 1 | 6 | 0110 |
29
+ | 00000001 | bit 0 | 7 | 0111 |
30
+ | 00000000 | none | 8 | 1000 |
31
+
32
+ ## Architecture
33
+
34
+ ```
35
+ Layer 1: Priority detection from MSB (9 neurons)
36
+ has7 = a7 (MSB is set, clz=0)
37
+ has6_first = a6 AND NOT(a7) (clz=1)
38
+ has5_first = a5 AND NOT(a6) AND NOT(a7) (clz=2)
39
+ ...
40
+ has0_first = a0 AND NOT(a1..a7) (clz=7)
41
+ all_zero = NOT(any bit) (clz=8)
42
+
43
+ Layer 2: Binary encoding (4 neurons)
44
+ y0 = has6_first OR has4_first OR has2_first OR has0_first
45
+ y1 = has5_first OR has4_first OR has1_first OR has0_first
46
+ y2 = has3_first OR has2_first OR has1_first OR has0_first
47
+ y3 = all_zero
48
+ ```
49
+
50
+ ## Parameters
51
+
52
+ | | |
53
+ |---|---|
54
+ | Inputs | 8 |
55
+ | Outputs | 4 |
56
+ | Neurons | 13 |
57
+ | Layers | 2 |
58
+ | Parameters | 117 |
59
+ | Magnitude | 69 |
60
+
61
+ ## Usage
62
+
63
+ ```python
64
+ from safetensors.torch import load_file
65
+ # See model.py for full implementation
66
+
67
+ # clz8(1,0,0,0,0,0,0,0) = [0,0,0,0] = 0 (MSB set)
68
+ # clz8(0,0,0,0,1,0,0,0) = [0,1,0,0] = 4 (four leading zeros)
69
+ # clz8(0,0,0,0,0,0,0,0) = [1,0,0,0] = 8 (all zeros)
70
+ ```
71
+
72
+ ## License
73
+
74
+ MIT
config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "threshold-clz8",
3
+ "description": "8-bit count leading zeros",
4
+ "inputs": 8,
5
+ "outputs": 4,
6
+ "neurons": 13,
7
+ "layers": 2,
8
+ "parameters": 117
9
+ }
create_safetensors.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import save_file
3
+
4
+ weights = {}
5
+
6
+ # Input order: [a7, a6, a5, a4, a3, a2, a1, a0] (a7 is MSB)
7
+ # clz returns count of leading zeros (0-8)
8
+
9
+ # Layer 1: Priority detection from MSB
10
+ # has7: a7 is set (clz = 0)
11
+ weights['has7.weight'] = torch.tensor([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=torch.float32)
12
+ weights['has7.bias'] = torch.tensor([-1.0], dtype=torch.float32)
13
+
14
+ # has6_first: a6 is first set from MSB (clz = 1)
15
+ weights['has6_first.weight'] = torch.tensor([[-1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=torch.float32)
16
+ weights['has6_first.bias'] = torch.tensor([-1.0], dtype=torch.float32)
17
+
18
+ # has5_first: a5 is first set from MSB (clz = 2)
19
+ weights['has5_first.weight'] = torch.tensor([[-1.0, -1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=torch.float32)
20
+ weights['has5_first.bias'] = torch.tensor([-1.0], dtype=torch.float32)
21
+
22
+ # has4_first: a4 is first set from MSB (clz = 3)
23
+ weights['has4_first.weight'] = torch.tensor([[-1.0, -1.0, -1.0, 1.0, 0.0, 0.0, 0.0, 0.0]], dtype=torch.float32)
24
+ weights['has4_first.bias'] = torch.tensor([-1.0], dtype=torch.float32)
25
+
26
+ # has3_first: a3 is first set from MSB (clz = 4)
27
+ weights['has3_first.weight'] = torch.tensor([[-1.0, -1.0, -1.0, -1.0, 1.0, 0.0, 0.0, 0.0]], dtype=torch.float32)
28
+ weights['has3_first.bias'] = torch.tensor([-1.0], dtype=torch.float32)
29
+
30
+ # has2_first: a2 is first set from MSB (clz = 5)
31
+ weights['has2_first.weight'] = torch.tensor([[-1.0, -1.0, -1.0, -1.0, -1.0, 1.0, 0.0, 0.0]], dtype=torch.float32)
32
+ weights['has2_first.bias'] = torch.tensor([-1.0], dtype=torch.float32)
33
+
34
+ # has1_first: a1 is first set from MSB (clz = 6)
35
+ weights['has1_first.weight'] = torch.tensor([[-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 1.0, 0.0]], dtype=torch.float32)
36
+ weights['has1_first.bias'] = torch.tensor([-1.0], dtype=torch.float32)
37
+
38
+ # has0_first: a0 is first set from MSB (clz = 7)
39
+ weights['has0_first.weight'] = torch.tensor([[-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 1.0]], dtype=torch.float32)
40
+ weights['has0_first.bias'] = torch.tensor([-1.0], dtype=torch.float32)
41
+
42
+ # all_zero: no bits set (clz = 8)
43
+ weights['all_zero.weight'] = torch.tensor([[-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0]], dtype=torch.float32)
44
+ weights['all_zero.bias'] = torch.tensor([0.0], dtype=torch.float32)
45
+
46
+ # Layer 2: Encode to binary (4 bits for 0-8)
47
+ # Input order: [has7, has6_first, has5_first, has4_first, has3_first, has2_first, has1_first, has0_first, all_zero]
48
+ # clz: 0=0000, 1=0001, 2=0010, 3=0011, 4=0100, 5=0101, 6=0110, 7=0111, 8=1000
49
+
50
+ # y0 (bit 0): clz is 1,3,5,7 (has6_first, has4_first, has2_first, has0_first)
51
+ weights['y0.weight'] = torch.tensor([[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]], dtype=torch.float32)
52
+ weights['y0.bias'] = torch.tensor([-1.0], dtype=torch.float32)
53
+
54
+ # y1 (bit 1): clz is 2,3,6,7 (has5_first, has4_first, has1_first, has0_first)
55
+ weights['y1.weight'] = torch.tensor([[0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0]], dtype=torch.float32)
56
+ weights['y1.bias'] = torch.tensor([-1.0], dtype=torch.float32)
57
+
58
+ # y2 (bit 2): clz is 4,5,6,7 (has3_first, has2_first, has1_first, has0_first)
59
+ weights['y2.weight'] = torch.tensor([[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0]], dtype=torch.float32)
60
+ weights['y2.bias'] = torch.tensor([-1.0], dtype=torch.float32)
61
+
62
+ # y3 (bit 3): clz is 8 (all_zero)
63
+ weights['y3.weight'] = torch.tensor([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]], dtype=torch.float32)
64
+ weights['y3.bias'] = torch.tensor([-1.0], dtype=torch.float32)
65
+
66
+ save_file(weights, 'model.safetensors')
67
+
68
+ # Verify
69
+ def clz8(a7, a6, a5, a4, a3, a2, a1, a0):
70
+ inp = torch.tensor([float(a7), float(a6), float(a5), float(a4),
71
+ float(a3), float(a2), float(a1), float(a0)])
72
+
73
+ # Layer 1
74
+ has7 = int((inp @ weights['has7.weight'].T + weights['has7.bias'] >= 0).item())
75
+ has6_first = int((inp @ weights['has6_first.weight'].T + weights['has6_first.bias'] >= 0).item())
76
+ has5_first = int((inp @ weights['has5_first.weight'].T + weights['has5_first.bias'] >= 0).item())
77
+ has4_first = int((inp @ weights['has4_first.weight'].T + weights['has4_first.bias'] >= 0).item())
78
+ has3_first = int((inp @ weights['has3_first.weight'].T + weights['has3_first.bias'] >= 0).item())
79
+ has2_first = int((inp @ weights['has2_first.weight'].T + weights['has2_first.bias'] >= 0).item())
80
+ has1_first = int((inp @ weights['has1_first.weight'].T + weights['has1_first.bias'] >= 0).item())
81
+ has0_first = int((inp @ weights['has0_first.weight'].T + weights['has0_first.bias'] >= 0).item())
82
+ all_zero = int((inp @ weights['all_zero.weight'].T + weights['all_zero.bias'] >= 0).item())
83
+
84
+ # Layer 2
85
+ l1 = torch.tensor([float(has7), float(has6_first), float(has5_first), float(has4_first),
86
+ float(has3_first), float(has2_first), float(has1_first), float(has0_first),
87
+ float(all_zero)])
88
+ y0 = int((l1 @ weights['y0.weight'].T + weights['y0.bias'] >= 0).item())
89
+ y1 = int((l1 @ weights['y1.weight'].T + weights['y1.bias'] >= 0).item())
90
+ y2 = int((l1 @ weights['y2.weight'].T + weights['y2.bias'] >= 0).item())
91
+ y3 = int((l1 @ weights['y3.weight'].T + weights['y3.bias'] >= 0).item())
92
+
93
+ return [y3, y2, y1, y0]
94
+
95
+ print("Verifying clz8...")
96
+ errors = 0
97
+ for i in range(256):
98
+ bits = [(i >> j) & 1 for j in range(7, -1, -1)]
99
+ a7, a6, a5, a4, a3, a2, a1, a0 = bits
100
+
101
+ # Compute expected clz
102
+ expected_clz = 8
103
+ for j in range(8):
104
+ if bits[j]:
105
+ expected_clz = j
106
+ break
107
+
108
+ expected = [(expected_clz >> 3) & 1, (expected_clz >> 2) & 1,
109
+ (expected_clz >> 1) & 1, expected_clz & 1]
110
+ result = clz8(a7, a6, a5, a4, a3, a2, a1, a0)
111
+
112
+ if result != expected:
113
+ errors += 1
114
+ if errors <= 5:
115
+ print(f"ERROR: {bits} clz={expected_clz} -> {result}, expected {expected}")
116
+
117
+ if errors == 0:
118
+ print("All 256 test cases passed!")
119
+ else:
120
+ print(f"FAILED: {errors} errors")
121
+
122
+ mag = sum(t.abs().sum().item() for t in weights.values())
123
+ print(f"Magnitude: {mag:.0f}")
model.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import load_file
3
+
4
+ def load_model(path='model.safetensors'):
5
+ return load_file(path)
6
+
7
+ def clz8(a7, a6, a5, a4, a3, a2, a1, a0, weights):
8
+ """8-bit count leading zeros. Returns 4-bit binary encoding of 0-8."""
9
+ inp = torch.tensor([float(a7), float(a6), float(a5), float(a4),
10
+ float(a3), float(a2), float(a1), float(a0)])
11
+
12
+ # Layer 1: priority detection
13
+ has7 = int((inp @ weights['has7.weight'].T + weights['has7.bias'] >= 0).item())
14
+ has6_first = int((inp @ weights['has6_first.weight'].T + weights['has6_first.bias'] >= 0).item())
15
+ has5_first = int((inp @ weights['has5_first.weight'].T + weights['has5_first.bias'] >= 0).item())
16
+ has4_first = int((inp @ weights['has4_first.weight'].T + weights['has4_first.bias'] >= 0).item())
17
+ has3_first = int((inp @ weights['has3_first.weight'].T + weights['has3_first.bias'] >= 0).item())
18
+ has2_first = int((inp @ weights['has2_first.weight'].T + weights['has2_first.bias'] >= 0).item())
19
+ has1_first = int((inp @ weights['has1_first.weight'].T + weights['has1_first.bias'] >= 0).item())
20
+ has0_first = int((inp @ weights['has0_first.weight'].T + weights['has0_first.bias'] >= 0).item())
21
+ all_zero = int((inp @ weights['all_zero.weight'].T + weights['all_zero.bias'] >= 0).item())
22
+
23
+ # Layer 2: binary encoding
24
+ l1 = torch.tensor([float(has7), float(has6_first), float(has5_first), float(has4_first),
25
+ float(has3_first), float(has2_first), float(has1_first), float(has0_first),
26
+ float(all_zero)])
27
+ y0 = int((l1 @ weights['y0.weight'].T + weights['y0.bias'] >= 0).item())
28
+ y1 = int((l1 @ weights['y1.weight'].T + weights['y1.bias'] >= 0).item())
29
+ y2 = int((l1 @ weights['y2.weight'].T + weights['y2.bias'] >= 0).item())
30
+ y3 = int((l1 @ weights['y3.weight'].T + weights['y3.bias'] >= 0).item())
31
+
32
+ return [y3, y2, y1, y0]
33
+
34
+ if __name__ == '__main__':
35
+ w = load_model()
36
+ print('clz8 examples:')
37
+ test_cases = [
38
+ (1, 0, 0, 0, 0, 0, 0, 0), # 0 leading zeros
39
+ (0, 1, 0, 0, 0, 0, 0, 0), # 1 leading zero
40
+ (0, 0, 0, 0, 1, 0, 0, 0), # 4 leading zeros
41
+ (0, 0, 0, 0, 0, 0, 0, 1), # 7 leading zeros
42
+ (0, 0, 0, 0, 0, 0, 0, 0), # 8 leading zeros
43
+ ]
44
+ for bits in test_cases:
45
+ result = clz8(*bits, w)
46
+ clz_val = result[0]*8 + result[1]*4 + result[2]*2 + result[3]
47
+ print(f' {bits} -> {result} = {clz_val}')
model.safetensors ADDED
Binary file (2.3 kB). View file