Upload folder using huggingface_hub
Browse files- README.md +16 -0
- config.json +9 -0
- create_safetensors.py +57 -0
- model.safetensors +3 -0
README.md
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
tags:
|
| 4 |
+
- pytorch
|
| 5 |
+
- safetensors
|
| 6 |
+
- threshold-logic
|
| 7 |
+
- neuromorphic
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# threshold-priority-arbiter
|
| 11 |
+
|
| 12 |
+
priority-arbiter threshold logic implementation.
|
| 13 |
+
|
| 14 |
+
## License
|
| 15 |
+
|
| 16 |
+
MIT
|
config.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "threshold-priority-arbiter",
|
| 3 |
+
"description": "priority-arbiter circuit",
|
| 4 |
+
"inputs": 8,
|
| 5 |
+
"outputs": 8,
|
| 6 |
+
"neurons": 8,
|
| 7 |
+
"layers": 2,
|
| 8 |
+
"parameters": 64
|
| 9 |
+
}
|
create_safetensors.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from safetensors.torch import save_file
|
| 3 |
+
|
| 4 |
+
weights = {}
|
| 5 |
+
|
| 6 |
+
# 4-input Fixed Priority Arbiter
|
| 7 |
+
# Priority: REQ0 > REQ1 > REQ2 > REQ3
|
| 8 |
+
|
| 9 |
+
def add_neuron(name, w_list, bias):
|
| 10 |
+
weights[f'{name}.weight'] = torch.tensor([w_list], dtype=torch.float32)
|
| 11 |
+
weights[f'{name}.bias'] = torch.tensor([bias], dtype=torch.float32)
|
| 12 |
+
|
| 13 |
+
# Input: REQ3, REQ2, REQ1, REQ0
|
| 14 |
+
# Grant 0: REQ0
|
| 15 |
+
add_neuron('g0', [0.0, 0.0, 0.0, 1.0], -1.0)
|
| 16 |
+
|
| 17 |
+
# Grant 1: REQ1 AND NOT REQ0
|
| 18 |
+
add_neuron('g1', [0.0, 0.0, 1.0, -1.0], 0.0)
|
| 19 |
+
|
| 20 |
+
# Grant 2: REQ2 AND NOT REQ1 AND NOT REQ0
|
| 21 |
+
add_neuron('g2', [0.0, 1.0, -1.0, -1.0], 1.0)
|
| 22 |
+
|
| 23 |
+
# Grant 3: REQ3 AND NOT REQ2 AND NOT REQ1 AND NOT REQ0
|
| 24 |
+
add_neuron('g3', [1.0, -1.0, -1.0, -1.0], 2.0)
|
| 25 |
+
|
| 26 |
+
save_file(weights, 'model.safetensors')
|
| 27 |
+
|
| 28 |
+
def priority_arb(r3, r2, r1, r0):
|
| 29 |
+
if r0:
|
| 30 |
+
return 0, 0, 0, 1
|
| 31 |
+
elif r1:
|
| 32 |
+
return 0, 0, 1, 0
|
| 33 |
+
elif r2:
|
| 34 |
+
return 0, 1, 0, 0
|
| 35 |
+
elif r3:
|
| 36 |
+
return 1, 0, 0, 0
|
| 37 |
+
return 0, 0, 0, 0
|
| 38 |
+
|
| 39 |
+
print("Verifying priority arbiter...")
|
| 40 |
+
errors = 0
|
| 41 |
+
for reqs in range(16):
|
| 42 |
+
r3, r2, r1, r0 = (reqs>>3)&1, (reqs>>2)&1, (reqs>>1)&1, reqs&1
|
| 43 |
+
result = priority_arb(r3, r2, r1, r0)
|
| 44 |
+
grant_count = sum(result)
|
| 45 |
+
if reqs > 0 and grant_count != 1:
|
| 46 |
+
errors += 1
|
| 47 |
+
if reqs == 0 and grant_count != 0:
|
| 48 |
+
errors += 1
|
| 49 |
+
|
| 50 |
+
if errors == 0:
|
| 51 |
+
print("All 16 test cases passed!")
|
| 52 |
+
else:
|
| 53 |
+
print(f"FAILED: {errors} errors")
|
| 54 |
+
|
| 55 |
+
mag = sum(t.abs().sum().item() for t in weights.values())
|
| 56 |
+
print(f"Magnitude: {mag:.0f}")
|
| 57 |
+
print(f"Parameters: {sum(t.numel() for t in weights.values())}")
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a2c5721d8e0b4e71a725b313b52fdc4aa385badc254ca9358f6f0f8873279d8d
|
| 3 |
+
size 592
|