phanerozoic commited on
Commit
6dfdbee
Β·
verified Β·
1 Parent(s): e70b80d

Rename from tiny-mod6-verified

Browse files
Files changed (4) hide show
  1. README.md +83 -0
  2. config.json +23 -0
  3. model.py +43 -0
  4. model.safetensors +3 -0
README.md ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - pytorch
5
+ - safetensors
6
+ - threshold-logic
7
+ - neuromorphic
8
+ - modular-arithmetic
9
+ ---
10
+
11
+ # threshold-mod6
12
+
13
+ Computes Hamming weight mod 6 directly on inputs. Single-layer circuit.
14
+
15
+ ## Circuit
16
+
17
+ ```
18
+ xβ‚€ x₁ xβ‚‚ x₃ xβ‚„ xβ‚… x₆ x₇
19
+ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚
20
+ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚
21
+ w: 1 1 1 1 1 -5 1 1
22
+ β””β”€β”€β”΄β”€β”€β”΄β”€β”€β”΄β”€β”€β”Όβ”€β”€β”΄β”€β”€β”΄β”€β”€β”΄β”€β”€β”˜
23
+ β–Ό
24
+ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”
25
+ β”‚ b: 0 β”‚
26
+ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
27
+ β”‚
28
+ β–Ό
29
+ HW mod 6
30
+ ```
31
+
32
+ ## Algebraic Insight
33
+
34
+ For 8 inputs and mod 6, position 6 gets weight 1-6 = -5:
35
+
36
+ - Positions 1-5: weight +1
37
+ - Position 6: weight -5 (reset: 1+1+1+1+1-5 = 0)
38
+ - Positions 7-8: weight +1
39
+
40
+ ```
41
+ HW=0: sum=0 β†’ 0 mod 6
42
+ HW=1: sum=1 β†’ 1 mod 6
43
+ ...
44
+ HW=5: sum=5 β†’ 5 mod 6
45
+ HW=6: sum=0 β†’ 0 mod 6 (reset)
46
+ HW=7: sum=1 β†’ 1 mod 6
47
+ HW=8: sum=2 β†’ 2 mod 6
48
+ ```
49
+
50
+ ## Parameters
51
+
52
+ | | |
53
+ |---|---|
54
+ | Weights | [1, 1, 1, 1, 1, -5, 1, 1] |
55
+ | Bias | 0 |
56
+ | Total | 9 parameters |
57
+
58
+ ## Usage
59
+
60
+ ```python
61
+ from safetensors.torch import load_file
62
+ import torch
63
+
64
+ w = load_file('model.safetensors')
65
+
66
+ def mod6(bits):
67
+ inputs = torch.tensor([float(b) for b in bits])
68
+ return int((inputs * w['weight']).sum() + w['bias'])
69
+ ```
70
+
71
+ ## Files
72
+
73
+ ```
74
+ threshold-mod6/
75
+ β”œβ”€β”€ model.safetensors
76
+ β”œβ”€β”€ model.py
77
+ β”œβ”€β”€ config.json
78
+ └── README.md
79
+ ```
80
+
81
+ ## License
82
+
83
+ MIT
config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "threshold_network",
3
+ "task": "mod6_classification",
4
+ "architecture": "8 -> 1",
5
+ "input_size": 8,
6
+ "output_size": 1,
7
+ "num_neurons": 1,
8
+ "num_parameters": 9,
9
+ "modulus": 6,
10
+ "activation": "heaviside",
11
+ "weight_constraints": "integer",
12
+ "weight_pattern": "[1, 1, 1, 1, 1, -5, 1, 1]",
13
+ "verification": {
14
+ "method": "coq_proof",
15
+ "exhaustive": true,
16
+ "inputs_tested": 256
17
+ },
18
+ "accuracy": {
19
+ "all_inputs": "256/256",
20
+ "percentage": 100.0
21
+ },
22
+ "github": "https://github.com/CharlesCNorton/coq-circuits"
23
+ }
model.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Threshold Network for MOD-6 Circuit
3
+
4
+ A formally verified threshold network computing Hamming weight mod 6.
5
+ Uses the algebraic weight pattern [1, 1, 1, 1, 1, -5, 1, 1].
6
+ """
7
+
8
+ import torch
9
+ from safetensors.torch import load_file
10
+
11
+
12
+ class ThresholdMod6:
13
+ """
14
+ MOD-6 circuit using threshold logic.
15
+
16
+ Weight pattern: (1, 1, 1, 1, 1, 1-m) for m=6 at position 6
17
+ """
18
+
19
+ def __init__(self, weights_dict):
20
+ self.weight = weights_dict['weight']
21
+ self.bias = weights_dict['bias']
22
+
23
+ def __call__(self, bits):
24
+ inputs = torch.tensor([float(b) for b in bits])
25
+ weighted_sum = (inputs * self.weight).sum() + self.bias
26
+ return weighted_sum
27
+
28
+ @classmethod
29
+ def from_safetensors(cls, path="model.safetensors"):
30
+ return cls(load_file(path))
31
+
32
+
33
+ if __name__ == "__main__":
34
+ weights = load_file("model.safetensors")
35
+ model = ThresholdMod6(weights)
36
+
37
+ print("MOD-6 Circuit Tests:")
38
+ print("-" * 40)
39
+ for hw in range(9):
40
+ bits = [1]*hw + [0]*(8-hw)
41
+ out = model(bits).item()
42
+ expected = hw % 6
43
+ print(f"HW={hw}: weighted_sum={out:.0f}, HW mod 6 = {expected}")
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71dffc100e820dcd2a3b86ddc80e67989b7fa20305c9af9973cf01fcf1cb4258
3
+ size 164