CharlesCNorton commited on
Commit
37fa69d
·
0 Parent(s):

Add D latch threshold circuit

Browse files

6 neurons, 2 layers, 26 parameters, magnitude 18.

Files changed (6) hide show
  1. .gitattributes +1 -0
  2. README.md +88 -0
  3. config.json +9 -0
  4. create_safetensors.py +95 -0
  5. model.py +30 -0
  6. model.safetensors +3 -0
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - pytorch
5
+ - safetensors
6
+ - threshold-logic
7
+ - neuromorphic
8
+ - sequential
9
+ - latch
10
+ ---
11
+
12
+ # threshold-d-latch
13
+
14
+ D latch (level-sensitive) next-state logic as threshold circuit.
15
+
16
+ ## Circuit
17
+
18
+ ```
19
+ E ───────┐
20
+ D ───────┼──► D-Latch ──┬──► Q
21
+ Q_prev ──┘ └──► Qn
22
+ ```
23
+
24
+ ## Modes
25
+
26
+ - **E=1 (Transparent):** Q follows D
27
+ - **E=0 (Hold):** Q holds previous value
28
+
29
+ ## Truth Table
30
+
31
+ | E | D | Q_prev | Q | Qn | Mode |
32
+ |---|---|--------|---|----|----|
33
+ | 0 | X | 0 | 0 | 1 | Hold |
34
+ | 0 | X | 1 | 1 | 0 | Hold |
35
+ | 1 | 0 | X | 0 | 1 | Transparent |
36
+ | 1 | 1 | X | 1 | 0 | Transparent |
37
+
38
+ ## Logic
39
+
40
+ ```
41
+ Q = (E AND D) OR (NOT_E AND Q_prev)
42
+ Qn = (E AND NOT_D) OR (NOT_E AND NOT_Q_prev)
43
+ ```
44
+
45
+ ## Architecture
46
+
47
+ | Layer | Neurons |
48
+ |-------|---------|
49
+ | 1 | e_and_d, e_and_notd, note_and_qprev, note_and_notqprev |
50
+ | 2 | Q, Qn |
51
+
52
+ **Total: 6 neurons, 26 parameters, 2 layers**
53
+
54
+ ## D-Latch vs D-Flip-Flop
55
+
56
+ - **D-Latch:** Level-sensitive. Q changes while E is high.
57
+ - **D-Flip-Flop:** Edge-triggered. Q changes only on clock edge.
58
+
59
+ D-latches are simpler but can cause timing issues (race conditions) if not carefully designed. Flip-flops are safer for synchronous designs.
60
+
61
+ ## Parameters
62
+
63
+ | | |
64
+ |---|---|
65
+ | Inputs | 3 |
66
+ | Outputs | 2 |
67
+ | Neurons | 6 |
68
+ | Layers | 2 |
69
+ | Parameters | 26 |
70
+ | Magnitude | 18 |
71
+
72
+ ## Usage
73
+
74
+ ```python
75
+ from safetensors.torch import load_file
76
+
77
+ w = load_file('model.safetensors')
78
+
79
+ # Simulate latch behavior
80
+ q = 0
81
+ for e, d in [(1, 1), (1, 0), (0, 1), (0, 0)]:
82
+ q_next = compute(e, d, q, w)
83
+ q = q_next
84
+ ```
85
+
86
+ ## License
87
+
88
+ MIT
config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "threshold-d-latch",
3
+ "description": "D latch (level-sensitive)",
4
+ "inputs": 3,
5
+ "outputs": 2,
6
+ "neurons": 6,
7
+ "layers": 2,
8
+ "parameters": 26
9
+ }
create_safetensors.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import save_file
3
+
4
+ weights = {}
5
+
6
+ # D-Latch (level-sensitive)
7
+ # Inputs: E (enable), D (data), Q_prev (previous state)
8
+ # Outputs: Q, Qn
9
+ #
10
+ # When E=1: Q = D (transparent mode)
11
+ # When E=0: Q = Q_prev (hold mode)
12
+ #
13
+ # Q = (E AND D) OR (NOT_E AND Q_prev)
14
+ # Qn = (E AND NOT_D) OR (NOT_E AND NOT_Q_prev)
15
+
16
+ # Layer 1: Compute intermediate terms directly from inputs
17
+ # e_and_d = E AND D
18
+ weights['e_and_d.weight'] = torch.tensor([[1.0, 1.0, 0.0]], dtype=torch.float32)
19
+ weights['e_and_d.bias'] = torch.tensor([-2.0], dtype=torch.float32)
20
+
21
+ # e_and_notd = E AND NOT_D
22
+ weights['e_and_notd.weight'] = torch.tensor([[1.0, -1.0, 0.0]], dtype=torch.float32)
23
+ weights['e_and_notd.bias'] = torch.tensor([-1.0], dtype=torch.float32)
24
+
25
+ # note_and_qprev = NOT_E AND Q_prev
26
+ weights['note_and_qprev.weight'] = torch.tensor([[-1.0, 0.0, 1.0]], dtype=torch.float32)
27
+ weights['note_and_qprev.bias'] = torch.tensor([-1.0], dtype=torch.float32)
28
+
29
+ # note_and_notqprev = NOT_E AND NOT_Q_prev
30
+ weights['note_and_notqprev.weight'] = torch.tensor([[-1.0, 0.0, -1.0]], dtype=torch.float32)
31
+ weights['note_and_notqprev.bias'] = torch.tensor([0.0], dtype=torch.float32)
32
+
33
+ # Layer 2: Combine for outputs
34
+ # Q = OR(e_and_d, note_and_qprev)
35
+ weights['q.weight'] = torch.tensor([[1.0, 0.0, 1.0, 0.0]], dtype=torch.float32)
36
+ weights['q.bias'] = torch.tensor([-1.0], dtype=torch.float32)
37
+
38
+ # Qn = OR(e_and_notd, note_and_notqprev)
39
+ weights['qn.weight'] = torch.tensor([[0.0, 1.0, 0.0, 1.0]], dtype=torch.float32)
40
+ weights['qn.bias'] = torch.tensor([-1.0], dtype=torch.float32)
41
+
42
+ save_file(weights, 'model.safetensors')
43
+
44
+ def d_latch(e, d, q_prev):
45
+ inp = torch.tensor([float(e), float(d), float(q_prev)])
46
+
47
+ # Layer 1
48
+ e_and_d = int((inp @ weights['e_and_d.weight'].T + weights['e_and_d.bias'] >= 0).item())
49
+ e_and_notd = int((inp @ weights['e_and_notd.weight'].T + weights['e_and_notd.bias'] >= 0).item())
50
+ note_and_qprev = int((inp @ weights['note_and_qprev.weight'].T + weights['note_and_qprev.bias'] >= 0).item())
51
+ note_and_notqprev = int((inp @ weights['note_and_notqprev.weight'].T + weights['note_and_notqprev.bias'] >= 0).item())
52
+
53
+ # Layer 2
54
+ l1 = torch.tensor([float(e_and_d), float(e_and_notd), float(note_and_qprev), float(note_and_notqprev)])
55
+ q = int((l1 @ weights['q.weight'].T + weights['q.bias'] >= 0).item())
56
+ qn = int((l1 @ weights['qn.weight'].T + weights['qn.bias'] >= 0).item())
57
+
58
+ return q, qn
59
+
60
+ def reference_d_latch(e, d, q_prev):
61
+ if e == 1:
62
+ return d, 1 - d
63
+ else:
64
+ return q_prev, 1 - q_prev
65
+
66
+ print("Verifying D-Latch...")
67
+ errors = 0
68
+ for e in range(2):
69
+ for d in range(2):
70
+ for q_prev in range(2):
71
+ result = d_latch(e, d, q_prev)
72
+ expected = reference_d_latch(e, d, q_prev)
73
+ if result != expected:
74
+ errors += 1
75
+ print(f"ERROR: E={e}, D={d}, Q_prev={q_prev} -> {result}, expected {expected}")
76
+
77
+ if errors == 0:
78
+ print("All 8 test cases passed!")
79
+ else:
80
+ print(f"FAILED: {errors} errors")
81
+
82
+ print("\nTruth Table:")
83
+ print("E D Q_prev | Q Qn | Mode")
84
+ print("-" * 30)
85
+ for e in range(2):
86
+ for d in range(2):
87
+ for q_prev in range(2):
88
+ q, qn = d_latch(e, d, q_prev)
89
+ mode = "Transparent" if e == 1 else "Hold"
90
+ print(f"{e} {d} {q_prev} | {q} {qn} | {mode}")
91
+
92
+ mag = sum(t.abs().sum().item() for t in weights.values())
93
+ print(f"\nMagnitude: {mag:.0f}")
94
+ print(f"Parameters: {sum(t.numel() for t in weights.values())}")
95
+ print(f"Neurons: {len([k for k in weights.keys() if 'weight' in k])}")
model.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import load_file
3
+
4
+ def load_model(path='model.safetensors'):
5
+ return load_file(path)
6
+
7
+ def d_latch(e, d, q_prev, weights):
8
+ """D-Latch: E=1 transparent (Q=D), E=0 hold (Q=Q_prev)."""
9
+ inp = torch.tensor([float(e), float(d), float(q_prev)])
10
+
11
+ e_and_d = int((inp @ weights['e_and_d.weight'].T + weights['e_and_d.bias'] >= 0).item())
12
+ e_and_notd = int((inp @ weights['e_and_notd.weight'].T + weights['e_and_notd.bias'] >= 0).item())
13
+ note_and_qprev = int((inp @ weights['note_and_qprev.weight'].T + weights['note_and_qprev.bias'] >= 0).item())
14
+ note_and_notqprev = int((inp @ weights['note_and_notqprev.weight'].T + weights['note_and_notqprev.bias'] >= 0).item())
15
+
16
+ l1 = torch.tensor([float(e_and_d), float(e_and_notd), float(note_and_qprev), float(note_and_notqprev)])
17
+ q = int((l1 @ weights['q.weight'].T + weights['q.bias'] >= 0).item())
18
+ qn = int((l1 @ weights['qn.weight'].T + weights['qn.bias'] >= 0).item())
19
+
20
+ return q, qn
21
+
22
+ if __name__ == '__main__':
23
+ w = load_model()
24
+ print('D-Latch:')
25
+ print('E D Q_prev | Q Qn')
26
+ for e in range(2):
27
+ for d in range(2):
28
+ for q_prev in range(2):
29
+ q, qn = d_latch(e, d, q_prev, w)
30
+ print(f'{e} {d} {q_prev} | {q} {qn}')
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b18fc0e51728fa4235734ff480012f543fe7fb9a0212ddf3bf4b6eaf5be001a
3
+ size 952