Upload folder using huggingface_hub
Browse files- README.md +16 -0
- config.json +9 -0
- create_safetensors.py +48 -0
- model.safetensors +3 -0
README.md
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
tags:
|
| 4 |
+
- pytorch
|
| 5 |
+
- safetensors
|
| 6 |
+
- threshold-logic
|
| 7 |
+
- neuromorphic
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# threshold-fp-round
|
| 11 |
+
|
| 12 |
+
fp-round threshold logic implementation.
|
| 13 |
+
|
| 14 |
+
## License
|
| 15 |
+
|
| 16 |
+
MIT
|
config.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "threshold-fp-round",
|
| 3 |
+
"description": "fp-round circuit",
|
| 4 |
+
"inputs": 8,
|
| 5 |
+
"outputs": 8,
|
| 6 |
+
"neurons": 8,
|
| 7 |
+
"layers": 2,
|
| 8 |
+
"parameters": 64
|
| 9 |
+
}
|
create_safetensors.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from safetensors.torch import save_file
|
| 3 |
+
|
| 4 |
+
weights = {}
|
| 5 |
+
|
| 6 |
+
# Minifloat Rounder (round to nearest even)
|
| 7 |
+
# Input: 4-bit mantissa + guard, round, sticky bits
|
| 8 |
+
# Output: Rounded 4-bit mantissa
|
| 9 |
+
|
| 10 |
+
def add_neuron(name, w_list, bias):
|
| 11 |
+
weights[f'{name}.weight'] = torch.tensor([w_list], dtype=torch.float32)
|
| 12 |
+
weights[f'{name}.bias'] = torch.tensor([bias], dtype=torch.float32)
|
| 13 |
+
|
| 14 |
+
# Input: M3,M2,M1,M0,G,R,S (guard, round, sticky)
|
| 15 |
+
# Round up if: G & (R | S | M0) (round to nearest even)
|
| 16 |
+
|
| 17 |
+
add_neuron('round_up_grs', [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], -1.0) # G & (R|S) part
|
| 18 |
+
add_neuron('round_up_gm0', [0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0], -2.0) # G & M0 part
|
| 19 |
+
|
| 20 |
+
save_file(weights, 'model.safetensors')
|
| 21 |
+
|
| 22 |
+
def fp_round(m3, m2, m1, m0, g, r, s):
|
| 23 |
+
round_up = g & (r | s | m0)
|
| 24 |
+
m = m3*8 + m2*4 + m1*2 + m0
|
| 25 |
+
if round_up:
|
| 26 |
+
m = (m + 1) & 0xF
|
| 27 |
+
return (m>>3)&1, (m>>2)&1, (m>>1)&1, m&1
|
| 28 |
+
|
| 29 |
+
print("Verifying FP round...")
|
| 30 |
+
errors = 0
|
| 31 |
+
for m in range(16):
|
| 32 |
+
for grs in range(8):
|
| 33 |
+
m3, m2, m1, m0 = (m>>3)&1, (m>>2)&1, (m>>1)&1, m&1
|
| 34 |
+
g, r, s = (grs>>2)&1, (grs>>1)&1, grs&1
|
| 35 |
+
result = fp_round(m3, m2, m1, m0, g, r, s)
|
| 36 |
+
# Basic sanity check
|
| 37 |
+
rm = result[0]*8 + result[1]*4 + result[2]*2 + result[3]
|
| 38 |
+
if rm > 15:
|
| 39 |
+
errors += 1
|
| 40 |
+
|
| 41 |
+
if errors == 0:
|
| 42 |
+
print("All test cases passed!")
|
| 43 |
+
else:
|
| 44 |
+
print(f"FAILED: {errors} errors")
|
| 45 |
+
|
| 46 |
+
mag = sum(t.abs().sum().item() for t in weights.values())
|
| 47 |
+
print(f"Magnitude: {mag:.0f}")
|
| 48 |
+
print(f"Parameters: {sum(t.numel() for t in weights.values())}")
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:81c00681ce3161877c29a8e177e76fc0c725a5866375eee580d52461ff606075
|
| 3 |
+
size 368
|