Rename from tiny-NOT-verified
Browse files- README.md +74 -0
- config.json +21 -0
- model.py +58 -0
- model.safetensors +3 -0
README.md
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
tags:
|
| 4 |
+
- pytorch
|
| 5 |
+
- safetensors
|
| 6 |
+
- threshold-logic
|
| 7 |
+
- neuromorphic
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# threshold-not
|
| 11 |
+
|
| 12 |
+
The minimal threshold circuit. One neuron, one weight, one bias.
|
| 13 |
+
|
| 14 |
+
## Circuit
|
| 15 |
+
|
| 16 |
+
```
|
| 17 |
+
x
|
| 18 |
+
β
|
| 19 |
+
βΌ
|
| 20 |
+
βββββββββ
|
| 21 |
+
β w: -1 β
|
| 22 |
+
β b: 0 β
|
| 23 |
+
βββββββββ
|
| 24 |
+
β
|
| 25 |
+
βΌ
|
| 26 |
+
NOT(x)
|
| 27 |
+
```
|
| 28 |
+
|
| 29 |
+
## Mechanism
|
| 30 |
+
|
| 31 |
+
A threshold neuron fires when its weighted input plus bias reaches zero. NOT uses weight -1 and bias 0:
|
| 32 |
+
|
| 33 |
+
- Input 0: sum = 0, fires (output 1)
|
| 34 |
+
- Input 1: sum = -1, silent (output 0)
|
| 35 |
+
|
| 36 |
+
The negative weight flips the relationship between input magnitude and firing.
|
| 37 |
+
|
| 38 |
+
## Parameters
|
| 39 |
+
|
| 40 |
+
| | |
|
| 41 |
+
|---|---|
|
| 42 |
+
| Weight | -1 |
|
| 43 |
+
| Bias | 0 |
|
| 44 |
+
| Total | 2 parameters |
|
| 45 |
+
|
| 46 |
+
## Properties
|
| 47 |
+
|
| 48 |
+
- Involutive: NOT(NOT(x)) = x
|
| 49 |
+
- Foundation for NAND, NOR
|
| 50 |
+
|
| 51 |
+
## Usage
|
| 52 |
+
|
| 53 |
+
```python
|
| 54 |
+
from safetensors.torch import load_file
|
| 55 |
+
|
| 56 |
+
w = load_file('model.safetensors')
|
| 57 |
+
|
| 58 |
+
def not_gate(x):
|
| 59 |
+
return int(x * w['weight'].item() + w['bias'].item() >= 0)
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
## Files
|
| 63 |
+
|
| 64 |
+
```
|
| 65 |
+
threshold-not/
|
| 66 |
+
βββ model.safetensors
|
| 67 |
+
βββ model.py
|
| 68 |
+
βββ config.json
|
| 69 |
+
βββ README.md
|
| 70 |
+
```
|
| 71 |
+
|
| 72 |
+
## License
|
| 73 |
+
|
| 74 |
+
MIT
|
config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model_type": "threshold_network",
|
| 3 |
+
"task": "not_gate",
|
| 4 |
+
"architecture": "1 -> 1",
|
| 5 |
+
"input_size": 1,
|
| 6 |
+
"output_size": 1,
|
| 7 |
+
"num_neurons": 1,
|
| 8 |
+
"num_parameters": 2,
|
| 9 |
+
"activation": "heaviside",
|
| 10 |
+
"weight_constraints": "integer",
|
| 11 |
+
"verification": {
|
| 12 |
+
"method": "coq_proof",
|
| 13 |
+
"exhaustive": true,
|
| 14 |
+
"inputs_tested": 2
|
| 15 |
+
},
|
| 16 |
+
"accuracy": {
|
| 17 |
+
"all_inputs": "2/2",
|
| 18 |
+
"percentage": 100.0
|
| 19 |
+
},
|
| 20 |
+
"github": "https://github.com/CharlesCNorton/coq-circuits"
|
| 21 |
+
}
|
model.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Threshold Network for NOT Gate
|
| 3 |
+
|
| 4 |
+
A formally verified single-neuron threshold network computing logical negation.
|
| 5 |
+
Weights are integer-constrained and activation uses the Heaviside step function.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
from safetensors.torch import load_file
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class ThresholdNOT:
|
| 13 |
+
"""
|
| 14 |
+
NOT gate implemented as a threshold neuron.
|
| 15 |
+
|
| 16 |
+
Circuit: output = (weight * input + bias >= 0)
|
| 17 |
+
With weight=-1, bias=0: NOT(0)=1, NOT(1)=0
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def __init__(self, weights_dict):
|
| 21 |
+
self.weight = weights_dict['weight']
|
| 22 |
+
self.bias = weights_dict['bias']
|
| 23 |
+
|
| 24 |
+
def __call__(self, x):
|
| 25 |
+
x = torch.as_tensor(x, dtype=torch.float32)
|
| 26 |
+
return (x * self.weight + self.bias >= 0).float()
|
| 27 |
+
|
| 28 |
+
@classmethod
|
| 29 |
+
def from_safetensors(cls, path="model.safetensors"):
|
| 30 |
+
return cls(load_file(path))
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def forward(x, weights):
|
| 34 |
+
"""
|
| 35 |
+
Forward pass with Heaviside activation.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
x: Input tensor (0 or 1)
|
| 39 |
+
weights: Dict with 'weight' and 'bias' tensors
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
NOT(x)
|
| 43 |
+
"""
|
| 44 |
+
x = torch.as_tensor(x, dtype=torch.float32)
|
| 45 |
+
return (x * weights['weight'] + weights['bias'] >= 0).float()
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
if __name__ == "__main__":
|
| 49 |
+
weights = load_file("model.safetensors")
|
| 50 |
+
model = ThresholdNOT(weights)
|
| 51 |
+
|
| 52 |
+
print("NOT Gate Truth Table:")
|
| 53 |
+
print("-" * 20)
|
| 54 |
+
for inp in [0, 1]:
|
| 55 |
+
out = int(model(inp).item())
|
| 56 |
+
expected = 1 - inp
|
| 57 |
+
status = "OK" if out == expected else "FAIL"
|
| 58 |
+
print(f"NOT({inp}) = {out} [{status}]")
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0cd33805f56cd2ea686b4023cd7fc70b4846140e7823a80dea77f1c9fcd86224
|
| 3 |
+
size 136
|