phanerozoic commited on
Commit
4eb3c80
·
verified ·
1 Parent(s): 2c2156f

Rename from tiny-NAND-verified

Browse files
Files changed (4) hide show
  1. README.md +87 -0
  2. config.json +22 -0
  3. model.py +62 -0
  4. model.safetensors +3 -0
README.md ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - pytorch
5
+ - safetensors
6
+ - threshold-logic
7
+ - neuromorphic
8
+ - functionally-complete
9
+ ---
10
+
11
+ # threshold-nand
12
+
13
+ The universal gate. Any Boolean function can be constructed from NAND alone.
14
+
15
+ ## Circuit
16
+
17
+ ```
18
+ x y
19
+ │ │
20
+ └─┬─┘
21
+
22
+ ┌────────┐
23
+ │w: -1,-1│
24
+ │ b: +1 │
25
+ └────────┘
26
+
27
+
28
+ NAND(x,y)
29
+ ```
30
+
31
+ ## Mechanism
32
+
33
+ Negative weights mean inputs *subtract* from the sum. The positive bias starts us above threshold, and inputs pull us down:
34
+
35
+ | x | y | sum | output |
36
+ |---|---|-----|--------|
37
+ | 0 | 0 | +1 | 1 |
38
+ | 0 | 1 | 0 | 1 |
39
+ | 1 | 0 | 0 | 1 |
40
+ | 1 | 1 | -1 | 0 |
41
+
42
+ Only when both inputs are active do we fall below threshold. This is AND with inverted output.
43
+
44
+ ## Parameters
45
+
46
+ | | |
47
+ |---|---|
48
+ | Weights | [-1, -1] |
49
+ | Bias | +1 |
50
+ | Total | 3 parameters |
51
+
52
+ ## Functional Completeness
53
+
54
+ NAND can build any Boolean function:
55
+
56
+ - NOT(x) = NAND(x, x)
57
+ - AND(x,y) = NOT(NAND(x,y)) = NAND(NAND(x,y), NAND(x,y))
58
+ - OR(x,y) = NAND(NOT(x), NOT(y))
59
+
60
+ This is why NAND gates dominate digital logic fabrication.
61
+
62
+ ## Usage
63
+
64
+ ```python
65
+ from safetensors.torch import load_file
66
+ import torch
67
+
68
+ w = load_file('model.safetensors')
69
+
70
+ def nand_gate(x, y):
71
+ inputs = torch.tensor([float(x), float(y)])
72
+ return int((inputs * w['weight']).sum() + w['bias'] >= 0)
73
+ ```
74
+
75
+ ## Files
76
+
77
+ ```
78
+ threshold-nand/
79
+ ├── model.safetensors
80
+ ├── model.py
81
+ ├── config.json
82
+ └── README.md
83
+ ```
84
+
85
+ ## License
86
+
87
+ MIT
config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "threshold_network",
3
+ "task": "nand_gate",
4
+ "architecture": "2 -> 1",
5
+ "input_size": 2,
6
+ "output_size": 1,
7
+ "num_neurons": 1,
8
+ "num_parameters": 3,
9
+ "activation": "heaviside",
10
+ "weight_constraints": "integer",
11
+ "verification": {
12
+ "method": "coq_proof",
13
+ "exhaustive": true,
14
+ "inputs_tested": 4
15
+ },
16
+ "accuracy": {
17
+ "all_inputs": "4/4",
18
+ "percentage": 100.0
19
+ },
20
+ "properties": ["functionally_complete"],
21
+ "github": "https://github.com/CharlesCNorton/coq-circuits"
22
+ }
model.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Threshold Network for NAND Gate
3
+
4
+ A formally verified single-neuron threshold network computing negated conjunction.
5
+ Weights are integer-constrained and activation uses the Heaviside step function.
6
+ NAND is functionally complete - any Boolean function can be built from NAND gates.
7
+ """
8
+
9
+ import torch
10
+ from safetensors.torch import load_file
11
+
12
+
13
+ class ThresholdNAND:
14
+ """
15
+ NAND gate implemented as a threshold neuron.
16
+
17
+ Circuit: output = (w1*x1 + w2*x2 + bias >= 0)
18
+ With weights=[-1,-1], bias=1: fires unless both inputs are 1.
19
+ """
20
+
21
+ def __init__(self, weights_dict):
22
+ self.weight = weights_dict['weight']
23
+ self.bias = weights_dict['bias']
24
+
25
+ def __call__(self, x1, x2):
26
+ inputs = torch.tensor([float(x1), float(x2)])
27
+ weighted_sum = (inputs * self.weight).sum() + self.bias
28
+ return (weighted_sum >= 0).float()
29
+
30
+ @classmethod
31
+ def from_safetensors(cls, path="model.safetensors"):
32
+ return cls(load_file(path))
33
+
34
+
35
+ def forward(x, weights):
36
+ """
37
+ Forward pass with Heaviside activation.
38
+
39
+ Args:
40
+ x: Input tensor of shape [..., 2]
41
+ weights: Dict with 'weight' and 'bias' tensors
42
+
43
+ Returns:
44
+ NAND(x[0], x[1])
45
+ """
46
+ x = torch.as_tensor(x, dtype=torch.float32)
47
+ weighted_sum = (x * weights['weight']).sum(dim=-1) + weights['bias']
48
+ return (weighted_sum >= 0).float()
49
+
50
+
51
+ if __name__ == "__main__":
52
+ weights = load_file("model.safetensors")
53
+ model = ThresholdNAND(weights)
54
+
55
+ print("NAND Gate Truth Table:")
56
+ print("-" * 25)
57
+ for x1 in [0, 1]:
58
+ for x2 in [0, 1]:
59
+ out = int(model(x1, x2).item())
60
+ expected = 1 - (x1 & x2)
61
+ status = "OK" if out == expected else "FAIL"
62
+ print(f"NAND({x1}, {x2}) = {out} [{status}]")
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad254d85eb37ac6ffdf4f7a191018ab90accc53b518cc79a67a1a52be2086ebe
3
+ size 140