phanerozoic commited on
Commit
267b930
Β·
verified Β·
1 Parent(s): 1731723

Rename from tiny-mod8-verified

Browse files
Files changed (4) hide show
  1. README.md +81 -0
  2. config.json +23 -0
  3. model.py +35 -0
  4. model.safetensors +3 -0
README.md ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - pytorch
5
+ - safetensors
6
+ - threshold-logic
7
+ - neuromorphic
8
+ - modular-arithmetic
9
+ ---
10
+
11
+ # threshold-mod8
12
+
13
+ Computes Hamming weight mod 8 directly on inputs. Single-layer circuit.
14
+
15
+ ## Circuit
16
+
17
+ ```
18
+ xβ‚€ x₁ xβ‚‚ x₃ xβ‚„ xβ‚… x₆ x₇
19
+ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚
20
+ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚
21
+ w: 1 1 1 1 1 1 1 -7
22
+ β””β”€β”€β”΄β”€β”€β”΄β”€β”€β”΄β”€β”€β”Όβ”€β”€β”΄β”€β”€β”΄β”€β”€β”΄β”€β”€β”˜
23
+ β–Ό
24
+ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”
25
+ β”‚ b: 0 β”‚
26
+ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
27
+ β”‚
28
+ β–Ό
29
+ HW mod 8
30
+ ```
31
+
32
+ ## Algebraic Insight
33
+
34
+ Position 8 gets weight 1-8 = -7:
35
+
36
+ - Positions 1-7: weight +1
37
+ - Position 8: weight -7
38
+
39
+ ```
40
+ HW=0: sum=0 β†’ 0 mod 8
41
+ ...
42
+ HW=7: sum=7 β†’ 7 mod 8
43
+ HW=8: sum=0 β†’ 0 mod 8 (reset: 1+1+1+1+1+1+1-7=0)
44
+ ```
45
+
46
+ The only non-trivial case is HW=8, which resets to 0.
47
+
48
+ ## Parameters
49
+
50
+ | | |
51
+ |---|---|
52
+ | Weights | [1, 1, 1, 1, 1, 1, 1, -7] |
53
+ | Bias | 0 |
54
+ | Total | 9 parameters |
55
+
56
+ ## Usage
57
+
58
+ ```python
59
+ from safetensors.torch import load_file
60
+ import torch
61
+
62
+ w = load_file('model.safetensors')
63
+
64
+ def mod8(bits):
65
+ inputs = torch.tensor([float(b) for b in bits])
66
+ return int((inputs * w['weight']).sum() + w['bias'])
67
+ ```
68
+
69
+ ## Files
70
+
71
+ ```
72
+ threshold-mod8/
73
+ β”œβ”€β”€ model.safetensors
74
+ β”œβ”€β”€ model.py
75
+ β”œβ”€β”€ config.json
76
+ └── README.md
77
+ ```
78
+
79
+ ## License
80
+
81
+ MIT
config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "threshold_network",
3
+ "task": "mod8_classification",
4
+ "architecture": "8 -> 1",
5
+ "input_size": 8,
6
+ "output_size": 1,
7
+ "num_neurons": 1,
8
+ "num_parameters": 9,
9
+ "modulus": 8,
10
+ "activation": "heaviside",
11
+ "weight_constraints": "integer",
12
+ "weight_pattern": "[1, 1, 1, 1, 1, 1, 1, -7]",
13
+ "verification": {
14
+ "method": "coq_proof",
15
+ "exhaustive": true,
16
+ "inputs_tested": 256
17
+ },
18
+ "accuracy": {
19
+ "all_inputs": "256/256",
20
+ "percentage": 100.0
21
+ },
22
+ "github": "https://github.com/CharlesCNorton/coq-circuits"
23
+ }
model.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Threshold Network for MOD-8 Circuit
3
+
4
+ A formally verified threshold network computing Hamming weight mod 8.
5
+ Uses the algebraic weight pattern [1, 1, 1, 1, 1, 1, 1, -7].
6
+ """
7
+
8
+ import torch
9
+ from safetensors.torch import load_file
10
+
11
+
12
+ class ThresholdMod8:
13
+ def __init__(self, weights_dict):
14
+ self.weight = weights_dict['weight']
15
+ self.bias = weights_dict['bias']
16
+
17
+ def __call__(self, bits):
18
+ inputs = torch.tensor([float(b) for b in bits])
19
+ weighted_sum = (inputs * self.weight).sum() + self.bias
20
+ return weighted_sum
21
+
22
+ @classmethod
23
+ def from_safetensors(cls, path="model.safetensors"):
24
+ return cls(load_file(path))
25
+
26
+
27
+ if __name__ == "__main__":
28
+ weights = load_file("model.safetensors")
29
+ model = ThresholdMod8(weights)
30
+
31
+ print("MOD-8 Circuit Tests:")
32
+ for hw in range(9):
33
+ bits = [1]*hw + [0]*(8-hw)
34
+ out = model(bits).item()
35
+ print(f"HW={hw}: weighted_sum={out:.0f}, HW mod 8 = {hw % 8}")
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44a0290658bd88ba3c0b3d387c2e159270787c0db94e9ea8745cdeb639c352a6
3
+ size 164