CharlesCNorton commited on
Commit
eca4062
·
0 Parent(s):

4-to-16 binary decoder, magnitude 96

Browse files
Files changed (5) hide show
  1. README.md +69 -0
  2. config.json +9 -0
  3. create_safetensors.py +46 -0
  4. model.py +22 -0
  5. model.safetensors +0 -0
README.md ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - pytorch
5
+ - safetensors
6
+ - threshold-logic
7
+ - neuromorphic
8
+ - decoder
9
+ ---
10
+
11
+ # threshold-4to16decoder
12
+
13
+ 4-to-16 binary decoder. Converts 4-bit binary input to one-hot 16-bit output.
14
+
15
+ ## Function
16
+
17
+ decode(a3, a2, a1, a0) -> [y0..y15] where yi=1 iff input=i
18
+
19
+ ## One-Hot Encoding
20
+
21
+ | Input | a3a2a1a0 | Output |
22
+ |------:|:--------:|--------|
23
+ | 0 | 0000 | 1000000000000000 |
24
+ | 1 | 0001 | 0100000000000000 |
25
+ | 5 | 0101 | 0000010000000000 |
26
+ | 10 | 1010 | 0000000000100000 |
27
+ | 15 | 1111 | 0000000000000001 |
28
+
29
+ ## Architecture
30
+
31
+ Single layer with 16 neurons. Each neuron yi is a pattern matcher for i:
32
+ - Weight +1 for bit positions that should be 1
33
+ - Weight -1 for bit positions that should be 0
34
+ - Bias = -(number of 1 bits in i)
35
+
36
+ All neurons run in parallel - no dependencies.
37
+
38
+ ## Parameters
39
+
40
+ | | |
41
+ |---|---|
42
+ | Inputs | 4 |
43
+ | Outputs | 16 |
44
+ | Neurons | 16 |
45
+ | Layers | 1 |
46
+ | Parameters | 80 |
47
+ | Magnitude | 96 |
48
+
49
+ ## Usage
50
+
51
+ ```python
52
+ from safetensors.torch import load_file
53
+ import torch
54
+
55
+ w = load_file('model.safetensors')
56
+
57
+ def decode_4to16(a3, a2, a1, a0):
58
+ inp = torch.tensor([float(a3), float(a2), float(a1), float(a0)])
59
+ return [int((inp * w[f'y{i}.weight']).sum() + w[f'y{i}.bias'] >= 0)
60
+ for i in range(16)]
61
+
62
+ # Input 10 -> output 10 is hot
63
+ outputs = decode_4to16(1, 0, 1, 0)
64
+ print(outputs) # [0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0]
65
+ ```
66
+
67
+ ## License
68
+
69
+ MIT
config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "threshold-4to16decoder",
3
+ "description": "4-to-16 binary decoder as threshold circuit",
4
+ "inputs": 4,
5
+ "outputs": 16,
6
+ "neurons": 16,
7
+ "layers": 1,
8
+ "parameters": 80
9
+ }
create_safetensors.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import save_file
3
+
4
+ weights = {}
5
+
6
+ # Input order: a3, a2, a1, a0 (MSB to LSB)
7
+ # Output: one-hot y0..y15 where yi=1 iff input=i
8
+
9
+ for i in range(16):
10
+ # Pattern matcher: +1 for bits that should be 1, -1 for bits that should be 0
11
+ w = []
12
+ for bit_pos in range(3, -1, -1): # a3, a2, a1, a0
13
+ bit_val = (i >> bit_pos) & 1
14
+ w.append(1.0 if bit_val else -1.0)
15
+ # Bias: -(number of 1 bits in i)
16
+ bias = -bin(i).count('1')
17
+
18
+ weights[f'y{i}.weight'] = torch.tensor([w], dtype=torch.float32)
19
+ weights[f'y{i}.bias'] = torch.tensor([float(bias)], dtype=torch.float32)
20
+
21
+ save_file(weights, 'model.safetensors')
22
+
23
+ # Verify
24
+ def decode(a3, a2, a1, a0):
25
+ inp = torch.tensor([float(a3), float(a2), float(a1), float(a0)])
26
+ outputs = []
27
+ for i in range(16):
28
+ y = int((inp * weights[f'y{i}.weight']).sum() + weights[f'y{i}.bias'] >= 0)
29
+ outputs.append(y)
30
+ return outputs
31
+
32
+ print("Verifying 4to16decoder...")
33
+ errors = 0
34
+ for val in range(16):
35
+ a3, a2, a1, a0 = (val >> 3) & 1, (val >> 2) & 1, (val >> 1) & 1, val & 1
36
+ result = decode(a3, a2, a1, a0)
37
+ expected = [1 if i == val else 0 for i in range(16)]
38
+ if result != expected:
39
+ errors += 1
40
+ print(f"ERROR: {val} ({a3}{a2}{a1}{a0}) -> {result}")
41
+
42
+ if errors == 0:
43
+ print("All 16 test cases passed!")
44
+
45
+ mag = sum(t.abs().sum().item() for t in weights.values())
46
+ print(f"Magnitude: {mag:.0f}")
model.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import load_file
3
+
4
+ def load_model(path='model.safetensors'):
5
+ return load_file(path)
6
+
7
+ def decode_4to16(a3, a2, a1, a0, weights):
8
+ """4-to-16 decoder: converts 4-bit binary to one-hot 16-bit output."""
9
+ inp = torch.tensor([float(a3), float(a2), float(a1), float(a0)])
10
+ outputs = []
11
+ for i in range(16):
12
+ y = int((inp * weights[f'y{i}.weight']).sum() + weights[f'y{i}.bias'] >= 0)
13
+ outputs.append(y)
14
+ return outputs
15
+
16
+ if __name__ == '__main__':
17
+ w = load_model()
18
+ print('4-to-16 Decoder')
19
+ for val in range(16):
20
+ a3, a2, a1, a0 = (val >> 3) & 1, (val >> 2) & 1, (val >> 1) & 1, val & 1
21
+ outputs = decode_4to16(a3, a2, a1, a0, w)
22
+ print(f" {val:2d} ({a3}{a2}{a1}{a0}) -> {''.join(map(str, outputs))}")
model.safetensors ADDED
Binary file (2.4 kB). View file