CharlesCNorton
commited on
Commit
·
96d9c09
0
Parent(s):
Add 8-bit magnitude comparator threshold circuit
Browse files3 neurons, 2 layers, 37 parameters, magnitude 1024.
- .gitattributes +1 -0
- README.md +101 -0
- config.json +9 -0
- create_safetensors.py +64 -0
- model.py +26 -0
- model.safetensors +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
tags:
|
| 4 |
+
- pytorch
|
| 5 |
+
- safetensors
|
| 6 |
+
- threshold-logic
|
| 7 |
+
- neuromorphic
|
| 8 |
+
- comparison
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# threshold-comparator8bit
|
| 12 |
+
|
| 13 |
+
8-bit magnitude comparator. Compares two 8-bit unsigned values.
|
| 14 |
+
|
| 15 |
+
## Function
|
| 16 |
+
|
| 17 |
+
compare8(A, B) -> (GT, LT, EQ)
|
| 18 |
+
|
| 19 |
+
- GT = 1 if A > B
|
| 20 |
+
- LT = 1 if A < B
|
| 21 |
+
- EQ = 1 if A = B
|
| 22 |
+
|
| 23 |
+
Exactly one output is always active.
|
| 24 |
+
|
| 25 |
+
## Architecture
|
| 26 |
+
|
| 27 |
+
```
|
| 28 |
+
A[7:0] B[7:0]
|
| 29 |
+
│ │
|
| 30 |
+
└───────┬───────┘
|
| 31 |
+
│
|
| 32 |
+
┌──────┴──────┐
|
| 33 |
+
│ │
|
| 34 |
+
▼ ▼
|
| 35 |
+
┌───┐ ┌───┐
|
| 36 |
+
│GT │ │LT │ Layer 1
|
| 37 |
+
│A-B│ │B-A│
|
| 38 |
+
│>=1│ │>=1│
|
| 39 |
+
└───┘ └───┘
|
| 40 |
+
│ │
|
| 41 |
+
└──────┬──────┘
|
| 42 |
+
│
|
| 43 |
+
▼
|
| 44 |
+
┌─────┐
|
| 45 |
+
│ EQ │ Layer 2
|
| 46 |
+
│ NOR │
|
| 47 |
+
└─────┘
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
Uses positional weighting: treats inputs as binary numbers.
|
| 51 |
+
|
| 52 |
+
**GT neuron:** weights A bits positively (128,64,32,16,8,4,2,1), B bits negatively.
|
| 53 |
+
Fires when weighted sum A - B >= 1.
|
| 54 |
+
|
| 55 |
+
**LT neuron:** opposite weights. Fires when B - A >= 1.
|
| 56 |
+
|
| 57 |
+
**EQ neuron:** NOR of GT and LT. Fires when both are zero.
|
| 58 |
+
|
| 59 |
+
## Parameters
|
| 60 |
+
|
| 61 |
+
| | |
|
| 62 |
+
|---|---|
|
| 63 |
+
| Inputs | 16 |
|
| 64 |
+
| Outputs | 3 |
|
| 65 |
+
| Neurons | 3 |
|
| 66 |
+
| Layers | 2 |
|
| 67 |
+
| Parameters | 37 |
|
| 68 |
+
| Magnitude | 1024 |
|
| 69 |
+
|
| 70 |
+
## Truth Table (examples)
|
| 71 |
+
|
| 72 |
+
| A | B | GT | LT | EQ |
|
| 73 |
+
|-----|-----|----|----|---|
|
| 74 |
+
| 0 | 0 | 0 | 0 | 1 |
|
| 75 |
+
| 100 | 50 | 1 | 0 | 0 |
|
| 76 |
+
| 50 | 100 | 0 | 1 | 0 |
|
| 77 |
+
| 255 | 255 | 0 | 0 | 1 |
|
| 78 |
+
|
| 79 |
+
## Usage
|
| 80 |
+
|
| 81 |
+
```python
|
| 82 |
+
from safetensors.torch import load_file
|
| 83 |
+
import torch
|
| 84 |
+
|
| 85 |
+
w = load_file('model.safetensors')
|
| 86 |
+
|
| 87 |
+
def compare(a, b):
|
| 88 |
+
a_bits = [(a >> (7-i)) & 1 for i in range(8)]
|
| 89 |
+
b_bits = [(b >> (7-i)) & 1 for i in range(8)]
|
| 90 |
+
inp = torch.tensor([float(x) for x in a_bits + b_bits])
|
| 91 |
+
gt = int((inp @ w['gt.weight'].T + w['gt.bias'] >= 0).item())
|
| 92 |
+
lt = int((inp @ w['lt.weight'].T + w['lt.bias'] >= 0).item())
|
| 93 |
+
eq = int((torch.tensor([float(gt), float(lt)]) @ w['eq.weight'].T + w['eq.bias'] >= 0).item())
|
| 94 |
+
return gt, lt, eq
|
| 95 |
+
|
| 96 |
+
# compare(200, 100) = (1, 0, 0) # 200 > 100
|
| 97 |
+
```
|
| 98 |
+
|
| 99 |
+
## License
|
| 100 |
+
|
| 101 |
+
MIT
|
config.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "threshold-comparator8bit",
|
| 3 |
+
"description": "8-bit magnitude comparator",
|
| 4 |
+
"inputs": 16,
|
| 5 |
+
"outputs": 3,
|
| 6 |
+
"neurons": 3,
|
| 7 |
+
"layers": 2,
|
| 8 |
+
"parameters": 37
|
| 9 |
+
}
|
create_safetensors.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from safetensors.torch import save_file
|
| 3 |
+
|
| 4 |
+
weights = {}
|
| 5 |
+
|
| 6 |
+
# 8-bit Magnitude Comparator
|
| 7 |
+
# Inputs: A7, A6, A5, A4, A3, A2, A1, A0, B7, B6, B5, B4, B3, B2, B1, B0 (16 inputs)
|
| 8 |
+
# Outputs: GT (A>B), LT (A<B), EQ (A=B)
|
| 9 |
+
#
|
| 10 |
+
# Using positional weighting: A = sum(Ai * 2^i), B = sum(Bi * 2^i)
|
| 11 |
+
# GT fires when A - B >= 1
|
| 12 |
+
# LT fires when B - A >= 1
|
| 13 |
+
# EQ fires when neither GT nor LT
|
| 14 |
+
|
| 15 |
+
# Layer 1: GT and LT neurons
|
| 16 |
+
# GT: A - B >= 1
|
| 17 |
+
weights['gt.weight'] = torch.tensor([[128.0, 64.0, 32.0, 16.0, 8.0, 4.0, 2.0, 1.0,
|
| 18 |
+
-128.0, -64.0, -32.0, -16.0, -8.0, -4.0, -2.0, -1.0]], dtype=torch.float32)
|
| 19 |
+
weights['gt.bias'] = torch.tensor([-1.0], dtype=torch.float32)
|
| 20 |
+
|
| 21 |
+
# LT: B - A >= 1
|
| 22 |
+
weights['lt.weight'] = torch.tensor([[-128.0, -64.0, -32.0, -16.0, -8.0, -4.0, -2.0, -1.0,
|
| 23 |
+
128.0, 64.0, 32.0, 16.0, 8.0, 4.0, 2.0, 1.0]], dtype=torch.float32)
|
| 24 |
+
weights['lt.bias'] = torch.tensor([-1.0], dtype=torch.float32)
|
| 25 |
+
|
| 26 |
+
# Layer 2: EQ = NOR(GT, LT)
|
| 27 |
+
weights['eq.weight'] = torch.tensor([[-1.0, -1.0]], dtype=torch.float32)
|
| 28 |
+
weights['eq.bias'] = torch.tensor([0.0], dtype=torch.float32)
|
| 29 |
+
|
| 30 |
+
save_file(weights, 'model.safetensors')
|
| 31 |
+
|
| 32 |
+
def compare8(a, b):
|
| 33 |
+
a_bits = [(a >> (7-i)) & 1 for i in range(8)]
|
| 34 |
+
b_bits = [(b >> (7-i)) & 1 for i in range(8)]
|
| 35 |
+
inp = torch.tensor([float(x) for x in a_bits + b_bits])
|
| 36 |
+
|
| 37 |
+
gt = int((inp @ weights['gt.weight'].T + weights['gt.bias'] >= 0).item())
|
| 38 |
+
lt = int((inp @ weights['lt.weight'].T + weights['lt.bias'] >= 0).item())
|
| 39 |
+
gt_lt = torch.tensor([float(gt), float(lt)])
|
| 40 |
+
eq = int((gt_lt @ weights['eq.weight'].T + weights['eq.bias'] >= 0).item())
|
| 41 |
+
return gt, lt, eq
|
| 42 |
+
|
| 43 |
+
print("Verifying comparator8bit...")
|
| 44 |
+
errors = 0
|
| 45 |
+
for a in range(256):
|
| 46 |
+
for b in range(256):
|
| 47 |
+
gt, lt, eq = compare8(a, b)
|
| 48 |
+
exp_gt = 1 if a > b else 0
|
| 49 |
+
exp_lt = 1 if a < b else 0
|
| 50 |
+
exp_eq = 1 if a == b else 0
|
| 51 |
+
if (gt, lt, eq) != (exp_gt, exp_lt, exp_eq):
|
| 52 |
+
errors += 1
|
| 53 |
+
if errors <= 5:
|
| 54 |
+
print(f"ERROR: A={a}, B={b}, got ({gt},{lt},{eq}), expected ({exp_gt},{exp_lt},{exp_eq})")
|
| 55 |
+
|
| 56 |
+
if errors == 0:
|
| 57 |
+
print("All 65536 test cases passed!")
|
| 58 |
+
else:
|
| 59 |
+
print(f"FAILED: {errors} errors")
|
| 60 |
+
|
| 61 |
+
mag = sum(t.abs().sum().item() for t in weights.values())
|
| 62 |
+
print(f"Magnitude: {mag:.0f}")
|
| 63 |
+
print(f"Parameters: {sum(t.numel() for t in weights.values())}")
|
| 64 |
+
print(f"Neurons: {len([k for k in weights.keys() if 'weight' in k])}")
|
model.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from safetensors.torch import load_file
|
| 3 |
+
|
| 4 |
+
def load_model(path='model.safetensors'):
|
| 5 |
+
return load_file(path)
|
| 6 |
+
|
| 7 |
+
def compare8(a, b, weights):
|
| 8 |
+
"""8-bit magnitude comparator. Returns (GT, LT, EQ)."""
|
| 9 |
+
a_bits = [(a >> (7-i)) & 1 for i in range(8)]
|
| 10 |
+
b_bits = [(b >> (7-i)) & 1 for i in range(8)]
|
| 11 |
+
inp = torch.tensor([float(x) for x in a_bits + b_bits])
|
| 12 |
+
|
| 13 |
+
gt = int((inp @ weights['gt.weight'].T + weights['gt.bias'] >= 0).item())
|
| 14 |
+
lt = int((inp @ weights['lt.weight'].T + weights['lt.bias'] >= 0).item())
|
| 15 |
+
gt_lt = torch.tensor([float(gt), float(lt)])
|
| 16 |
+
eq = int((gt_lt @ weights['eq.weight'].T + weights['eq.bias'] >= 0).item())
|
| 17 |
+
return gt, lt, eq
|
| 18 |
+
|
| 19 |
+
if __name__ == '__main__':
|
| 20 |
+
w = load_model()
|
| 21 |
+
print('8-bit Magnitude Comparator:')
|
| 22 |
+
examples = [(0, 0), (1, 0), (0, 1), (127, 128), (255, 255), (200, 100)]
|
| 23 |
+
for a, b in examples:
|
| 24 |
+
gt, lt, eq = compare8(a, b, w)
|
| 25 |
+
rel = '>' if gt else ('<' if lt else '=')
|
| 26 |
+
print(f' {a:3d} {rel} {b:3d} (GT={gt}, LT={lt}, EQ={eq})')
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e2b4fa4ab4b9964a96a9ff608677af613acbb387d32a6d9a79129fd9a6811296
|
| 3 |
+
size 540
|