CharlesCNorton commited on
Commit
7bf820e
·
0 Parent(s):

8-to-3 priority encoder, magnitude 68

Browse files
Files changed (5) hide show
  1. README.md +68 -0
  2. config.json +9 -0
  3. create_safetensors.py +92 -0
  4. model.py +31 -0
  5. model.safetensors +0 -0
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - pytorch
5
+ - safetensors
6
+ - threshold-logic
7
+ - neuromorphic
8
+ - encoder
9
+ ---
10
+
11
+ # threshold-priorityencoder8
12
+
13
+ 8-to-3 priority encoder. Outputs 3-bit binary encoding of highest-priority active input.
14
+
15
+ ## Function
16
+
17
+ priority_encode(i7..i0) -> (y2, y1, y0, valid)
18
+
19
+ - i7 = highest priority, i0 = lowest priority
20
+ - y2,y1,y0 = 3-bit binary encoding of highest active input index
21
+ - valid = 1 if any input is active
22
+
23
+ ## Architecture
24
+
25
+ **Layer 1: 8 neurons (h7..h0)**
26
+
27
+ Each hk detects "ik is the highest active input":
28
+ - hk fires when ik=1 AND all higher-priority inputs are 0
29
+ - h7: weights [1,0,0,0,0,0,0,0], bias -1
30
+ - h6: weights [-1,1,0,0,0,0,0,0], bias -1
31
+ - ...
32
+ - h0: weights [-1,-1,-1,-1,-1,-1,-1,1], bias -1
33
+
34
+ **Layer 2: 4 neurons**
35
+
36
+ - y2 = h7 OR h6 OR h5 OR h4
37
+ - y1 = h7 OR h6 OR h3 OR h2
38
+ - y0 = h7 OR h5 OR h3 OR h1
39
+ - v = any h active
40
+
41
+ ## Parameters
42
+
43
+ | | |
44
+ |---|---|
45
+ | Inputs | 8 |
46
+ | Outputs | 4 |
47
+ | Neurons | 12 |
48
+ | Layers | 2 |
49
+ | Parameters | 108 |
50
+ | Magnitude | 68 |
51
+
52
+ ## Usage
53
+
54
+ ```python
55
+ from safetensors.torch import load_file
56
+ import torch
57
+
58
+ w = load_file('model.safetensors')
59
+
60
+ # (see model.py for full implementation)
61
+
62
+ # Example: i5 is highest active (index 5 = 101)
63
+ # priority_encode(0,0,1,0,0,0,0,0, w) -> (1, 0, 1, 1)
64
+ ```
65
+
66
+ ## License
67
+
68
+ MIT
config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "threshold-priorityencoder8",
3
+ "description": "8-to-3 priority encoder as threshold circuit",
4
+ "inputs": 8,
5
+ "outputs": 4,
6
+ "neurons": 12,
7
+ "layers": 2,
8
+ "parameters": 108
9
+ }
create_safetensors.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import save_file
3
+
4
+ # Input order: i7, i6, i5, i4, i3, i2, i1, i0 (i7 = highest priority)
5
+ # Layer 1: hk = "ik is the highest active input"
6
+ # Layer 2: y2, y1, y0 (3-bit encoding), v (valid)
7
+
8
+ weights = {}
9
+
10
+ # Layer 1: 8 neurons detecting "this input is highest active"
11
+ # layer1.hk detects input k being highest
12
+ for k in range(8):
13
+ w = [0.0] * 8
14
+ # Input ik is at position (7-k) in the input array
15
+ w[7-k] = 1.0
16
+ # All higher-priority inputs (indices k+1 to 7) need to be 0
17
+ # These are at positions 0 to (6-k) in the input array
18
+ for j in range(7-k):
19
+ w[j] = -1.0
20
+ bias = -1.0
21
+ weights[f'layer1.h{k}.weight'] = torch.tensor([w], dtype=torch.float32)
22
+ weights[f'layer1.h{k}.bias'] = torch.tensor([bias], dtype=torch.float32)
23
+
24
+ # Layer 2: combine h outputs
25
+ # h array will be [h0, h1, h2, h3, h4, h5, h6, h7]
26
+ # y2 = h4 OR h5 OR h6 OR h7 (indices 4,5,6,7 have bit 2 set)
27
+ weights['layer2.y2.weight'] = torch.tensor([[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]], dtype=torch.float32)
28
+ weights['layer2.y2.bias'] = torch.tensor([-1.0], dtype=torch.float32)
29
+
30
+ # y1 = h2 OR h3 OR h6 OR h7 (indices 2,3,6,7 have bit 1 set)
31
+ weights['layer2.y1.weight'] = torch.tensor([[0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0]], dtype=torch.float32)
32
+ weights['layer2.y1.bias'] = torch.tensor([-1.0], dtype=torch.float32)
33
+
34
+ # y0 = h1 OR h3 OR h5 OR h7 (indices 1,3,5,7 have bit 0 set)
35
+ weights['layer2.y0.weight'] = torch.tensor([[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0]], dtype=torch.float32)
36
+ weights['layer2.y0.bias'] = torch.tensor([-1.0], dtype=torch.float32)
37
+
38
+ # v = any h active
39
+ weights['layer2.v.weight'] = torch.tensor([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]], dtype=torch.float32)
40
+ weights['layer2.v.bias'] = torch.tensor([-1.0], dtype=torch.float32)
41
+
42
+ save_file(weights, 'model.safetensors')
43
+
44
+ def priority_encode(inputs):
45
+ inp = torch.tensor([float(x) for x in inputs])
46
+ # Layer 1: h[k] = hk output
47
+ h = []
48
+ for k in range(8):
49
+ hk = int((inp @ weights[f'layer1.h{k}.weight'].T + weights[f'layer1.h{k}.bias'] >= 0).item())
50
+ h.append(hk)
51
+ h_tensor = torch.tensor([float(x) for x in h])
52
+ # Layer 2
53
+ y2 = int((h_tensor @ weights['layer2.y2.weight'].T + weights['layer2.y2.bias'] >= 0).item())
54
+ y1 = int((h_tensor @ weights['layer2.y1.weight'].T + weights['layer2.y1.bias'] >= 0).item())
55
+ y0 = int((h_tensor @ weights['layer2.y0.weight'].T + weights['layer2.y0.bias'] >= 0).item())
56
+ v = int((h_tensor @ weights['layer2.v.weight'].T + weights['layer2.v.bias'] >= 0).item())
57
+ return y2, y1, y0, v
58
+
59
+ print("Verifying priorityencoder8...")
60
+ errors = 0
61
+ for val in range(256):
62
+ inputs = [(val >> (7-j)) & 1 for j in range(8)]
63
+ y2, y1, y0, v = priority_encode(inputs)
64
+
65
+ # Find highest active input (i7 has highest priority)
66
+ highest = -1
67
+ for k in range(7, -1, -1):
68
+ if inputs[7-k]: # inputs[7-k] is ik
69
+ highest = k
70
+ break
71
+
72
+ if highest == -1:
73
+ exp_v = 0
74
+ exp_y2, exp_y1, exp_y0 = 0, 0, 0
75
+ else:
76
+ exp_v = 1
77
+ exp_y2 = (highest >> 2) & 1
78
+ exp_y1 = (highest >> 1) & 1
79
+ exp_y0 = highest & 1
80
+
81
+ if v != exp_v or (v == 1 and (y2 != exp_y2 or y1 != exp_y1 or y0 != exp_y0)):
82
+ errors += 1
83
+ if errors <= 3:
84
+ print(f"ERROR: val={val}, inputs={inputs}, got ({y2},{y1},{y0},{v}), expected ({exp_y2},{exp_y1},{exp_y0},{exp_v})")
85
+
86
+ if errors == 0:
87
+ print("All 256 test cases passed!")
88
+ else:
89
+ print(f"FAILED: {errors} errors")
90
+
91
+ mag = sum(t.abs().sum().item() for t in weights.values())
92
+ print(f"Magnitude: {mag:.0f}")
model.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import load_file
3
+
4
+ def load_model(path='model.safetensors'):
5
+ return load_file(path)
6
+
7
+ def priority_encode(i7, i6, i5, i4, i3, i2, i1, i0, weights):
8
+ """8-to-3 priority encoder. Returns (y2, y1, y0, valid)."""
9
+ inputs = [i7, i6, i5, i4, i3, i2, i1, i0]
10
+ inp = torch.tensor([float(x) for x in inputs])
11
+ # Layer 1
12
+ h = []
13
+ for k in range(8):
14
+ hk = int((inp @ weights[f'layer1.h{k}.weight'].T + weights[f'layer1.h{k}.bias'] >= 0).item())
15
+ h.append(hk)
16
+ h_tensor = torch.tensor([float(x) for x in h])
17
+ # Layer 2
18
+ y2 = int((h_tensor @ weights['layer2.y2.weight'].T + weights['layer2.y2.bias'] >= 0).item())
19
+ y1 = int((h_tensor @ weights['layer2.y1.weight'].T + weights['layer2.y1.bias'] >= 0).item())
20
+ y0 = int((h_tensor @ weights['layer2.y0.weight'].T + weights['layer2.y0.bias'] >= 0).item())
21
+ v = int((h_tensor @ weights['layer2.v.weight'].T + weights['layer2.v.bias'] >= 0).item())
22
+ return y2, y1, y0, v
23
+
24
+ if __name__ == '__main__':
25
+ w = load_model()
26
+ print('Priority Encoder 8 (selected tests)')
27
+ for val in [0, 1, 2, 4, 8, 16, 32, 64, 128, 255]:
28
+ inputs = [(val >> (7-j)) & 1 for j in range(8)]
29
+ y2, y1, y0, v = priority_encode(*inputs, w)
30
+ idx = 4*y2 + 2*y1 + y0
31
+ print(f' {val:3d} ({val:08b}) -> y={idx} ({y2}{y1}{y0}) v={v}')
model.safetensors ADDED
Binary file (2.16 kB). View file