CharlesCNorton commited on
Commit
3f960d8
·
0 Parent(s):

Add 2-to-4 one-hot encoder threshold circuit

Browse files

4 neurons, 1 layer, 12 parameters, magnitude 12.

Files changed (6) hide show
  1. .gitattributes +1 -0
  2. README.md +93 -0
  3. config.json +9 -0
  4. create_safetensors.py +85 -0
  5. model.py +24 -0
  6. model.safetensors +3 -0
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - pytorch
5
+ - safetensors
6
+ - threshold-logic
7
+ - neuromorphic
8
+ - encoding
9
+ ---
10
+
11
+ # threshold-onehot-encoder
12
+
13
+ 2-to-4 one-hot encoder. Converts a 2-bit binary value to a 4-bit one-hot representation.
14
+
15
+ ## Function
16
+
17
+ onehot_encode(a1, a0) -> (y3, y2, y1, y0)
18
+
19
+ Exactly one output bit is set, corresponding to the input value.
20
+
21
+ ## Truth Table
22
+
23
+ | a1 | a0 | y3 | y2 | y1 | y0 | Value |
24
+ |----|----|----|----|----|-----|-------|
25
+ | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
26
+ | 0 | 1 | 0 | 0 | 1 | 0 | 1 |
27
+ | 1 | 0 | 0 | 1 | 0 | 0 | 2 |
28
+ | 1 | 1 | 1 | 0 | 0 | 0 | 3 |
29
+
30
+ ## Architecture
31
+
32
+ Single-layer implementation using threshold logic:
33
+
34
+ ```
35
+ a1 a0
36
+ │ │
37
+ ┌───┴─────┴───┐
38
+ │ │
39
+ ▼ ▼ ▼ ▼
40
+ ┌───┬───┬───┬───┐
41
+ │y3 │y2 │y1 │y0 │ Layer 1
42
+ │AND│A·B│A·B│NOR│
43
+ └───┴───┴───┴───┘
44
+ │ │ │ │
45
+ ▼ ▼ ▼ ▼
46
+ ```
47
+
48
+ Each output is a single threshold neuron:
49
+ - y0 = NOR(a1, a0): w=[-1,-1], b=0
50
+ - y1 = NOT(a1) AND a0: w=[-1,1], b=-1
51
+ - y2 = a1 AND NOT(a0): w=[1,-1], b=-1
52
+ - y3 = a1 AND a0: w=[1,1], b=-2
53
+
54
+ ## Parameters
55
+
56
+ | | |
57
+ |---|---|
58
+ | Inputs | 2 |
59
+ | Outputs | 4 |
60
+ | Neurons | 4 |
61
+ | Layers | 1 |
62
+ | Parameters | 12 |
63
+ | Magnitude | 12 |
64
+
65
+ ## Usage
66
+
67
+ ```python
68
+ from safetensors.torch import load_file
69
+ import torch
70
+
71
+ w = load_file('model.safetensors')
72
+
73
+ def onehot(a1, a0):
74
+ inp = torch.tensor([float(a1), float(a0)])
75
+ y0 = int((inp @ w['y0.weight'].T + w['y0.bias'] >= 0).item())
76
+ y1 = int((inp @ w['y1.weight'].T + w['y1.bias'] >= 0).item())
77
+ y2 = int((inp @ w['y2.weight'].T + w['y2.bias'] >= 0).item())
78
+ y3 = int((inp @ w['y3.weight'].T + w['y3.bias'] >= 0).item())
79
+ return y3, y2, y1, y0
80
+
81
+ # onehot(1, 0) = (0, 1, 0, 0) # value 2
82
+ ```
83
+
84
+ ## Applications
85
+
86
+ - Address decoding
87
+ - Memory select lines
88
+ - State machine encoding
89
+ - Neural network input preprocessing
90
+
91
+ ## License
92
+
93
+ MIT
config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "threshold-onehot-encoder",
3
+ "description": "2-to-4 one-hot encoder",
4
+ "inputs": 2,
5
+ "outputs": 4,
6
+ "neurons": 4,
7
+ "layers": 1,
8
+ "parameters": 12
9
+ }
create_safetensors.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import save_file
3
+
4
+ weights = {}
5
+
6
+ # One-Hot Encoder (2-to-4)
7
+ # Inputs: a1, a0 (binary value 0-3)
8
+ # Outputs: y3, y2, y1, y0 (one-hot encoding)
9
+ #
10
+ # a1 a0 | y3 y2 y1 y0
11
+ # ------+------------
12
+ # 0 0 | 0 0 0 1
13
+ # 0 1 | 0 0 1 0
14
+ # 1 0 | 0 1 0 0
15
+ # 1 1 | 1 0 0 0
16
+ #
17
+ # Single layer implementation:
18
+ # y0 = NOR(a1, a0) = 1 iff a1 + a0 = 0
19
+ # y1 = NOT(a1) AND a0 = 1 iff a0 - a1 >= 1
20
+ # y2 = a1 AND NOT(a0) = 1 iff a1 - a0 >= 1
21
+ # y3 = a1 AND a0 = 1 iff a1 + a0 >= 2
22
+
23
+ # y0 = NOR(a1, a0): neither input is 1
24
+ weights['y0.weight'] = torch.tensor([[-1.0, -1.0]], dtype=torch.float32)
25
+ weights['y0.bias'] = torch.tensor([0.0], dtype=torch.float32)
26
+
27
+ # y1 = NOT(a1) AND a0: a0 is 1 but a1 is 0
28
+ weights['y1.weight'] = torch.tensor([[-1.0, 1.0]], dtype=torch.float32)
29
+ weights['y1.bias'] = torch.tensor([-1.0], dtype=torch.float32)
30
+
31
+ # y2 = a1 AND NOT(a0): a1 is 1 but a0 is 0
32
+ weights['y2.weight'] = torch.tensor([[1.0, -1.0]], dtype=torch.float32)
33
+ weights['y2.bias'] = torch.tensor([-1.0], dtype=torch.float32)
34
+
35
+ # y3 = a1 AND a0: both inputs are 1
36
+ weights['y3.weight'] = torch.tensor([[1.0, 1.0]], dtype=torch.float32)
37
+ weights['y3.bias'] = torch.tensor([-2.0], dtype=torch.float32)
38
+
39
+ save_file(weights, 'model.safetensors')
40
+
41
+ def onehot_encode(a1, a0):
42
+ inp = torch.tensor([float(a1), float(a0)])
43
+
44
+ y0 = int((inp @ weights['y0.weight'].T + weights['y0.bias'] >= 0).item())
45
+ y1 = int((inp @ weights['y1.weight'].T + weights['y1.bias'] >= 0).item())
46
+ y2 = int((inp @ weights['y2.weight'].T + weights['y2.bias'] >= 0).item())
47
+ y3 = int((inp @ weights['y3.weight'].T + weights['y3.bias'] >= 0).item())
48
+
49
+ return y3, y2, y1, y0
50
+
51
+ def reference_onehot(a1, a0):
52
+ val = a1 * 2 + a0
53
+ return (1 if val == 3 else 0,
54
+ 1 if val == 2 else 0,
55
+ 1 if val == 1 else 0,
56
+ 1 if val == 0 else 0)
57
+
58
+ print("Verifying One-Hot Encoder (2-to-4)...")
59
+ errors = 0
60
+ for a1 in range(2):
61
+ for a0 in range(2):
62
+ result = onehot_encode(a1, a0)
63
+ expected = reference_onehot(a1, a0)
64
+ if result != expected:
65
+ errors += 1
66
+ print(f"ERROR: ({a1},{a0}) -> {result}, expected {expected}")
67
+
68
+ if errors == 0:
69
+ print("All 4 test cases passed!")
70
+ else:
71
+ print(f"FAILED: {errors} errors")
72
+
73
+ print("\nTruth Table:")
74
+ print("a1 a0 | y3 y2 y1 y0 | value")
75
+ print("-" * 30)
76
+ for a1 in range(2):
77
+ for a0 in range(2):
78
+ y3, y2, y1, y0 = onehot_encode(a1, a0)
79
+ val = a1 * 2 + a0
80
+ print(f" {a1} {a0} | {y3} {y2} {y1} {y0} | {val}")
81
+
82
+ mag = sum(t.abs().sum().item() for t in weights.values())
83
+ print(f"\nMagnitude: {mag:.0f}")
84
+ print(f"Parameters: {sum(t.numel() for t in weights.values())}")
85
+ print(f"Neurons: {len([k for k in weights.keys() if 'weight' in k])}")
model.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import load_file
3
+
4
+ def load_model(path='model.safetensors'):
5
+ return load_file(path)
6
+
7
+ def onehot_encode(a1, a0, weights):
8
+ """Convert 2-bit binary to 4-bit one-hot encoding."""
9
+ inp = torch.tensor([float(a1), float(a0)])
10
+
11
+ y0 = int((inp @ weights['y0.weight'].T + weights['y0.bias'] >= 0).item())
12
+ y1 = int((inp @ weights['y1.weight'].T + weights['y1.bias'] >= 0).item())
13
+ y2 = int((inp @ weights['y2.weight'].T + weights['y2.bias'] >= 0).item())
14
+ y3 = int((inp @ weights['y3.weight'].T + weights['y3.bias'] >= 0).item())
15
+
16
+ return y3, y2, y1, y0
17
+
18
+ if __name__ == '__main__':
19
+ w = load_model()
20
+ print('One-Hot Encoder (2-to-4):')
21
+ for val in range(4):
22
+ a1, a0 = (val >> 1) & 1, val & 1
23
+ y3, y2, y1, y0 = onehot_encode(a1, a0, w)
24
+ print(f' {val} ({a1}{a0}) -> {y3}{y2}{y1}{y0}')
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b68212cf121754735cc150c45cdb9587afe702f91a23356c4f210e00ecf60fd5
3
+ size 560