CharlesCNorton commited on
Commit
3baa17d
·
1 Parent(s): 52beb94

Add unified build.py with .inputs metadata for all gates

Browse files

- Merge build_memory.py and build_inputs.py into single build.py
- Add subcommands: memory, inputs, all
- Enrich neural_computer.safetensors with 3133 .inputs tensors
- Each gate now has signal IDs documenting input sources
- Signal registry stored in safetensors metadata (1046 signals)
- Remove eval/build_memory.py (consolidated into root build.py)

Files changed (3) hide show
  1. build.py +691 -0
  2. eval/build_memory.py +0 -148
  3. neural_computer.safetensors +2 -2
build.py ADDED
@@ -0,0 +1,691 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Build tools for 8-bit Threshold Computer safetensors.
3
+
4
+ Subcommands:
5
+ python build.py memory - Generate 64KB memory circuits
6
+ python build.py inputs - Add .inputs metadata tensors
7
+ python build.py all - Run both (memory first, then inputs)
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import argparse
13
+ import json
14
+ import re
15
+ from pathlib import Path
16
+ from typing import Dict, Iterable, List, Set
17
+
18
+ import torch
19
+ from safetensors import safe_open
20
+ from safetensors.torch import save_file
21
+
22
+
23
+ MODEL_PATH = Path(__file__).resolve().parent / "neural_computer.safetensors"
24
+ MANIFEST_PATH = Path(__file__).resolve().parent / "tensors.txt"
25
+
26
+ ADDR_BITS = 16
27
+ MEM_BYTES = 1 << ADDR_BITS
28
+
29
+
30
+ def load_tensors(path: Path) -> Dict[str, torch.Tensor]:
31
+ tensors: Dict[str, torch.Tensor] = {}
32
+ with safe_open(str(path), framework="pt") as f:
33
+ for name in f.keys():
34
+ tensors[name] = f.get_tensor(name).clone()
35
+ return tensors
36
+
37
+
38
+ def get_all_gates(tensors: Dict[str, torch.Tensor]) -> Set[str]:
39
+ gates = set()
40
+ for name in tensors:
41
+ if name.endswith('.weight'):
42
+ gates.add(name[:-7])
43
+ return gates
44
+
45
+
46
+ class SignalRegistry:
47
+ def __init__(self):
48
+ self.name_to_id: Dict[str, int] = {}
49
+ self.id_to_name: Dict[int, str] = {}
50
+ self.next_id = 0
51
+ self.register("#0")
52
+ self.register("#1")
53
+
54
+ def register(self, name: str) -> int:
55
+ if name not in self.name_to_id:
56
+ self.name_to_id[name] = self.next_id
57
+ self.id_to_name[self.next_id] = name
58
+ self.next_id += 1
59
+ return self.name_to_id[name]
60
+
61
+ def get_id(self, name: str) -> int:
62
+ return self.name_to_id.get(name, -1)
63
+
64
+ def to_metadata(self) -> str:
65
+ return json.dumps(self.id_to_name)
66
+
67
+
68
+ def add_gate(tensors: Dict[str, torch.Tensor], name: str, weight: Iterable[float], bias: Iterable[float]) -> None:
69
+ w_key = f"{name}.weight"
70
+ b_key = f"{name}.bias"
71
+ if w_key in tensors or b_key in tensors:
72
+ raise ValueError(f"Gate already exists: {name}")
73
+ tensors[w_key] = torch.tensor(list(weight), dtype=torch.float32)
74
+ tensors[b_key] = torch.tensor(list(bias), dtype=torch.float32)
75
+
76
+
77
+ def drop_prefixes(tensors: Dict[str, torch.Tensor], prefixes: List[str]) -> None:
78
+ for key in list(tensors.keys()):
79
+ if any(key.startswith(prefix) for prefix in prefixes):
80
+ del tensors[key]
81
+
82
+
83
+ def add_decoder(tensors: Dict[str, torch.Tensor]) -> None:
84
+ weights = torch.empty((MEM_BYTES, ADDR_BITS), dtype=torch.float32)
85
+ bias = torch.empty((MEM_BYTES,), dtype=torch.float32)
86
+ for addr in range(MEM_BYTES):
87
+ bits = [(addr >> (ADDR_BITS - 1 - i)) & 1 for i in range(ADDR_BITS)]
88
+ weights[addr] = torch.tensor([1.0 if bit == 1 else -1.0 for bit in bits], dtype=torch.float32)
89
+ bias[addr] = -float(sum(bits))
90
+ tensors["memory.addr_decode.weight"] = weights
91
+ tensors["memory.addr_decode.bias"] = bias
92
+
93
+
94
+ def add_memory_read_mux(tensors: Dict[str, torch.Tensor]) -> None:
95
+ and_weight = torch.ones((8, MEM_BYTES, 2), dtype=torch.float32)
96
+ and_bias = torch.full((8, MEM_BYTES), -2.0, dtype=torch.float32)
97
+ or_weight = torch.ones((8, MEM_BYTES), dtype=torch.float32)
98
+ or_bias = torch.full((8,), -1.0, dtype=torch.float32)
99
+ tensors["memory.read.and.weight"] = and_weight
100
+ tensors["memory.read.and.bias"] = and_bias
101
+ tensors["memory.read.or.weight"] = or_weight
102
+ tensors["memory.read.or.bias"] = or_bias
103
+
104
+
105
+ def add_memory_write_cells(tensors: Dict[str, torch.Tensor]) -> None:
106
+ sel_weight = torch.ones((MEM_BYTES, 2), dtype=torch.float32)
107
+ sel_bias = torch.full((MEM_BYTES,), -2.0, dtype=torch.float32)
108
+ nsel_weight = torch.full((MEM_BYTES, 1), -1.0, dtype=torch.float32)
109
+ nsel_bias = torch.zeros((MEM_BYTES,), dtype=torch.float32)
110
+ and_old_weight = torch.ones((MEM_BYTES, 8, 2), dtype=torch.float32)
111
+ and_old_bias = torch.full((MEM_BYTES, 8), -2.0, dtype=torch.float32)
112
+ and_new_weight = torch.ones((MEM_BYTES, 8, 2), dtype=torch.float32)
113
+ and_new_bias = torch.full((MEM_BYTES, 8), -2.0, dtype=torch.float32)
114
+ or_weight = torch.ones((MEM_BYTES, 8, 2), dtype=torch.float32)
115
+ or_bias = torch.full((MEM_BYTES, 8), -1.0, dtype=torch.float32)
116
+ tensors["memory.write.sel.weight"] = sel_weight
117
+ tensors["memory.write.sel.bias"] = sel_bias
118
+ tensors["memory.write.nsel.weight"] = nsel_weight
119
+ tensors["memory.write.nsel.bias"] = nsel_bias
120
+ tensors["memory.write.and_old.weight"] = and_old_weight
121
+ tensors["memory.write.and_old.bias"] = and_old_bias
122
+ tensors["memory.write.and_new.weight"] = and_new_weight
123
+ tensors["memory.write.and_new.bias"] = and_new_bias
124
+ tensors["memory.write.or.weight"] = or_weight
125
+ tensors["memory.write.or.bias"] = or_bias
126
+
127
+
128
+ def add_fetch_load_store_buffers(tensors: Dict[str, torch.Tensor]) -> None:
129
+ for bit in range(16):
130
+ add_gate(tensors, f"control.fetch.ir.bit{bit}", [1.0], [-1.0])
131
+ for bit in range(8):
132
+ add_gate(tensors, f"control.load.bit{bit}", [1.0], [-1.0])
133
+ add_gate(tensors, f"control.store.bit{bit}", [1.0], [-1.0])
134
+ for bit in range(ADDR_BITS):
135
+ add_gate(tensors, f"control.mem_addr.bit{bit}", [1.0], [-1.0])
136
+
137
+
138
+ def update_manifest(tensors: Dict[str, torch.Tensor]) -> None:
139
+ tensors["manifest.memory_bytes"] = torch.tensor([float(MEM_BYTES)], dtype=torch.float32)
140
+ tensors["manifest.pc_width"] = torch.tensor([float(ADDR_BITS)], dtype=torch.float32)
141
+ tensors["manifest.version"] = torch.tensor([3.0], dtype=torch.float32)
142
+
143
+
144
+ def write_manifest(path: Path, tensors: Dict[str, torch.Tensor]) -> None:
145
+ lines: List[str] = []
146
+ lines.append("# Tensor Manifest")
147
+ lines.append(f"# Total: {len(tensors)} tensors")
148
+ for name in sorted(tensors.keys()):
149
+ t = tensors[name]
150
+ values = ", ".join(f"{v:.1f}" for v in t.flatten().tolist())
151
+ lines.append(f"{name}: shape={list(t.shape)}, values=[{values}]")
152
+ path.write_text("\n".join(lines) + "\n", encoding="utf-8")
153
+
154
+
155
+ def infer_boolean_inputs(gate: str, reg: SignalRegistry) -> List[int]:
156
+ if gate == 'boolean.not':
157
+ return [reg.register("$x")]
158
+ if gate in ['boolean.and', 'boolean.or', 'boolean.nand', 'boolean.nor', 'boolean.implies']:
159
+ return [reg.register("$a"), reg.register("$b")]
160
+ if '.layer1.neuron1' in gate or '.layer1.neuron2' in gate or '.layer1.or' in gate or '.layer1.nand' in gate:
161
+ return [reg.register("$a"), reg.register("$b")]
162
+ if '.layer2' in gate:
163
+ parent = gate.rsplit('.layer2', 1)[0]
164
+ if '.layer1.neuron1' in parent or 'xor' in parent or 'xnor' in parent or 'biimplies' in parent:
165
+ parent = parent.rsplit('.layer1', 1)[0] if '.layer1' in parent else parent
166
+ return [reg.register(f"{parent}.layer1.or"), reg.register(f"{parent}.layer1.nand")]
167
+ return []
168
+
169
+
170
+ def infer_halfadder_inputs(gate: str, prefix: str, reg: SignalRegistry) -> List[int]:
171
+ a = reg.register(f"{prefix}.$a")
172
+ b = reg.register(f"{prefix}.$b")
173
+ if '.sum.layer1' in gate:
174
+ return [a, b]
175
+ if '.sum.layer2' in gate:
176
+ return [reg.register(f"{prefix}.sum.layer1.or"), reg.register(f"{prefix}.sum.layer1.nand")]
177
+ if '.carry' in gate and '.layer' not in gate:
178
+ return [a, b]
179
+ return [a, b]
180
+
181
+
182
+ def infer_fulladder_inputs(gate: str, prefix: str, reg: SignalRegistry) -> List[int]:
183
+ a = reg.register(f"{prefix}.$a")
184
+ b = reg.register(f"{prefix}.$b")
185
+ cin = reg.register(f"{prefix}.$cin")
186
+ if '.ha1.sum.layer1' in gate:
187
+ return [a, b]
188
+ if '.ha1.sum.layer2' in gate:
189
+ return [reg.register(f"{prefix}.ha1.sum.layer1.or"), reg.register(f"{prefix}.ha1.sum.layer1.nand")]
190
+ if '.ha1.carry' in gate and '.layer' not in gate:
191
+ return [a, b]
192
+ if '.ha2.sum.layer1' in gate:
193
+ return [reg.register(f"{prefix}.ha1.sum.layer2"), cin]
194
+ if '.ha2.sum.layer2' in gate:
195
+ return [reg.register(f"{prefix}.ha2.sum.layer1.or"), reg.register(f"{prefix}.ha2.sum.layer1.nand")]
196
+ if '.ha2.carry' in gate and '.layer' not in gate:
197
+ return [reg.register(f"{prefix}.ha1.sum.layer2"), cin]
198
+ if '.carry_or' in gate:
199
+ return [reg.register(f"{prefix}.ha1.carry"), reg.register(f"{prefix}.ha2.carry")]
200
+ return []
201
+
202
+
203
+ def infer_ripplecarry_inputs(gate: str, prefix: str, bits: int, reg: SignalRegistry) -> List[int]:
204
+ for i in range(bits):
205
+ reg.register(f"{prefix}.$a[{i}]")
206
+ reg.register(f"{prefix}.$b[{i}]")
207
+ m = re.search(r'\.fa(\d+)\.', gate)
208
+ if not m:
209
+ return []
210
+ bit = int(m.group(1))
211
+ a_bit = reg.get_id(f"{prefix}.$a[{bit}]")
212
+ b_bit = reg.get_id(f"{prefix}.$b[{bit}]")
213
+ cin = reg.get_id("#0") if bit == 0 else reg.register(f"{prefix}.fa{bit-1}.carry_or")
214
+ fa_prefix = f"{prefix}.fa{bit}"
215
+ if '.ha1.sum.layer1' in gate:
216
+ return [a_bit, b_bit]
217
+ if '.ha1.sum.layer2' in gate:
218
+ return [reg.register(f"{fa_prefix}.ha1.sum.layer1.or"), reg.register(f"{fa_prefix}.ha1.sum.layer1.nand")]
219
+ if '.ha1.carry' in gate and '.layer' not in gate:
220
+ return [a_bit, b_bit]
221
+ if '.ha2.sum.layer1' in gate:
222
+ return [reg.register(f"{fa_prefix}.ha1.sum.layer2"), cin]
223
+ if '.ha2.sum.layer2' in gate:
224
+ return [reg.register(f"{fa_prefix}.ha2.sum.layer1.or"), reg.register(f"{fa_prefix}.ha2.sum.layer1.nand")]
225
+ if '.ha2.carry' in gate and '.layer' not in gate:
226
+ return [reg.register(f"{fa_prefix}.ha1.sum.layer2"), cin]
227
+ if '.carry_or' in gate:
228
+ return [reg.register(f"{fa_prefix}.ha1.carry"), reg.register(f"{fa_prefix}.ha2.carry")]
229
+ return []
230
+
231
+
232
+ def infer_adcsbc_inputs(gate: str, prefix: str, is_sub: bool, reg: SignalRegistry) -> List[int]:
233
+ for i in range(8):
234
+ reg.register(f"{prefix}.$a[{i}]")
235
+ reg.register(f"{prefix}.$b[{i}]")
236
+ reg.register(f"{prefix}.$cin")
237
+ if is_sub and '.notb' in gate:
238
+ m = re.search(r'\.notb(\d+)', gate)
239
+ if m:
240
+ return [reg.get_id(f"{prefix}.$b[{int(m.group(1))}]")]
241
+ return []
242
+ m = re.search(r'\.fa(\d+)\.', gate)
243
+ if not m:
244
+ return []
245
+ bit = int(m.group(1))
246
+ if is_sub:
247
+ a_bit = reg.get_id(f"{prefix}.$a[{bit}]")
248
+ notb = reg.register(f"{prefix}.notb{bit}")
249
+ else:
250
+ a_bit = reg.get_id(f"{prefix}.$a[{bit}]")
251
+ notb = reg.get_id(f"{prefix}.$b[{bit}]")
252
+ cin = reg.get_id(f"{prefix}.$cin") if bit == 0 else reg.register(f"{prefix}.fa{bit-1}.or_carry")
253
+ fa_prefix = f"{prefix}.fa{bit}"
254
+ if '.xor1.layer1' in gate:
255
+ return [a_bit, notb if is_sub else reg.get_id(f"{prefix}.$b[{bit}]")]
256
+ if '.xor1.layer2' in gate:
257
+ return [reg.register(f"{fa_prefix}.xor1.layer1.or"), reg.register(f"{fa_prefix}.xor1.layer1.nand")]
258
+ if '.xor2.layer1' in gate:
259
+ return [reg.register(f"{fa_prefix}.xor1.layer2"), cin]
260
+ if '.xor2.layer2' in gate:
261
+ return [reg.register(f"{fa_prefix}.xor2.layer1.or"), reg.register(f"{fa_prefix}.xor2.layer1.nand")]
262
+ if '.and1' in gate:
263
+ return [a_bit, notb if is_sub else reg.get_id(f"{prefix}.$b[{bit}]")]
264
+ if '.and2' in gate:
265
+ return [reg.register(f"{fa_prefix}.xor1.layer2"), cin]
266
+ if '.or_carry' in gate:
267
+ return [reg.register(f"{fa_prefix}.and1"), reg.register(f"{fa_prefix}.and2")]
268
+ return []
269
+
270
+
271
+ def infer_sub8bit_inputs(gate: str, reg: SignalRegistry) -> List[int]:
272
+ prefix = "arithmetic.sub8bit"
273
+ for i in range(8):
274
+ reg.register(f"{prefix}.$a[{i}]")
275
+ reg.register(f"{prefix}.$b[{i}]")
276
+ if gate == f"{prefix}.carry_in":
277
+ return [reg.get_id("#1")]
278
+ if '.notb' in gate:
279
+ m = re.search(r'\.notb(\d+)', gate)
280
+ if m:
281
+ return [reg.get_id(f"{prefix}.$b[{int(m.group(1))}]")]
282
+ return []
283
+ m = re.search(r'\.fa(\d+)\.', gate)
284
+ if not m:
285
+ return []
286
+ bit = int(m.group(1))
287
+ a_bit = reg.get_id(f"{prefix}.$a[{bit}]")
288
+ notb = reg.register(f"{prefix}.notb{bit}")
289
+ cin = reg.get_id("#1") if bit == 0 else reg.register(f"{prefix}.fa{bit-1}.or_carry")
290
+ fa_prefix = f"{prefix}.fa{bit}"
291
+ if '.xor1.layer1' in gate:
292
+ return [a_bit, notb]
293
+ if '.xor1.layer2' in gate:
294
+ return [reg.register(f"{fa_prefix}.xor1.layer1.or"), reg.register(f"{fa_prefix}.xor1.layer1.nand")]
295
+ if '.xor2.layer1' in gate:
296
+ return [reg.register(f"{fa_prefix}.xor1.layer2"), cin]
297
+ if '.xor2.layer2' in gate:
298
+ return [reg.register(f"{fa_prefix}.xor2.layer1.or"), reg.register(f"{fa_prefix}.xor2.layer1.nand")]
299
+ if '.and1' in gate:
300
+ return [a_bit, notb]
301
+ if '.and2' in gate:
302
+ return [reg.register(f"{fa_prefix}.xor1.layer2"), cin]
303
+ if '.or_carry' in gate:
304
+ return [reg.register(f"{fa_prefix}.and1"), reg.register(f"{fa_prefix}.and2")]
305
+ return []
306
+
307
+
308
+ def infer_threshold_inputs(gate: str, reg: SignalRegistry) -> List[int]:
309
+ for i in range(8):
310
+ reg.register(f"$x[{i}]")
311
+ return [reg.get_id(f"$x[{i}]") for i in range(8)]
312
+
313
+
314
+ def infer_modular_inputs(gate: str, reg: SignalRegistry) -> List[int]:
315
+ for i in range(8):
316
+ reg.register(f"$x[{i}]")
317
+ if '.layer1' in gate or '.layer2' in gate or '.layer3' in gate:
318
+ if 'layer1.geq' in gate or 'layer1.leq' in gate:
319
+ return [reg.get_id(f"$x[{i}]") for i in range(8)]
320
+ if 'layer2.eq' in gate:
321
+ m = re.search(r'layer2\.eq(\d+)', gate)
322
+ if m:
323
+ idx = m.group(1)
324
+ parent = gate.rsplit('.layer2', 1)[0]
325
+ return [reg.register(f"{parent}.layer1.geq{idx}"), reg.register(f"{parent}.layer1.leq{idx}")]
326
+ if 'layer3.or' in gate:
327
+ parent = gate.rsplit('.layer3', 1)[0]
328
+ eq_gates = []
329
+ for i in range(256):
330
+ eq_gate = f"{parent}.layer2.eq{i}"
331
+ if eq_gate in reg.name_to_id:
332
+ eq_gates.append(reg.get_id(eq_gate))
333
+ return eq_gates if eq_gates else [reg.get_id(f"$x[{i}]") for i in range(8)]
334
+ return [reg.get_id(f"$x[{i}]") for i in range(8)]
335
+
336
+
337
+ def infer_control_jump_inputs(gate: str, prefix: str, reg: SignalRegistry) -> List[int]:
338
+ for i in range(8):
339
+ reg.register(f"{prefix}.$pc[{i}]")
340
+ reg.register(f"{prefix}.$target[{i}]")
341
+ flag = "$cond"
342
+ if "jz" in prefix:
343
+ flag = "$zero"
344
+ elif "jc" in prefix:
345
+ flag = "$carry"
346
+ elif "jn" in prefix and "jnc" not in prefix and "jnz" not in prefix and "jnv" not in prefix:
347
+ flag = "$negative"
348
+ elif "jv" in prefix and "jnv" not in prefix:
349
+ flag = "$overflow"
350
+ elif "jp" in prefix:
351
+ flag = "$positive"
352
+ elif "jnc" in prefix:
353
+ flag = "$not_carry"
354
+ elif "jnz" in prefix:
355
+ flag = "$not_zero"
356
+ elif "jnv" in prefix:
357
+ flag = "$not_overflow"
358
+ reg.register(f"{prefix}.{flag}")
359
+ m = re.search(r'\.bit(\d+)\.', gate)
360
+ if not m:
361
+ return []
362
+ bit = int(m.group(1))
363
+ bit_prefix = f"{prefix}.bit{bit}"
364
+ if '.not_sel' in gate:
365
+ return [reg.get_id(f"{prefix}.{flag}")]
366
+ if '.and_a' in gate:
367
+ return [reg.get_id(f"{prefix}.$pc[{bit}]"), reg.register(f"{bit_prefix}.not_sel")]
368
+ if '.and_b' in gate:
369
+ return [reg.get_id(f"{prefix}.$target[{bit}]"), reg.get_id(f"{prefix}.{flag}")]
370
+ if '.or' in gate:
371
+ return [reg.register(f"{bit_prefix}.and_a"), reg.register(f"{bit_prefix}.and_b")]
372
+ return []
373
+
374
+
375
+ def infer_buffer_inputs(gate: str, reg: SignalRegistry) -> List[int]:
376
+ m = re.search(r'\.bit(\d+)$', gate)
377
+ if m:
378
+ bit = int(m.group(1))
379
+ prefix = gate.rsplit('.bit', 1)[0]
380
+ return [reg.register(f"{prefix}.$data[{bit}]")]
381
+ return [reg.register("$data")]
382
+
383
+
384
+ def infer_memory_inputs(gate: str, reg: SignalRegistry) -> List[int]:
385
+ if 'addr_decode' in gate:
386
+ return [reg.register(f"$addr[{i}]") for i in range(16)]
387
+ if 'read' in gate:
388
+ return [reg.register("$mem"), reg.register("$sel")]
389
+ if 'write' in gate:
390
+ return [reg.register("$mem"), reg.register("$data"), reg.register("$sel"), reg.register("$we")]
391
+ return []
392
+
393
+
394
+ def infer_alu_inputs(gate: str, reg: SignalRegistry) -> List[int]:
395
+ for i in range(8):
396
+ reg.register(f"$a[{i}]")
397
+ reg.register(f"$b[{i}]")
398
+ for i in range(4):
399
+ reg.register(f"$opcode[{i}]")
400
+ if 'alucontrol' in gate:
401
+ return [reg.get_id(f"$opcode[{i}]") for i in range(4)]
402
+ if 'aluflags' in gate:
403
+ return [reg.register("$result"), reg.register("$carry"), reg.register("$overflow")]
404
+ if '.and' in gate or '.or' in gate or '.xor' in gate:
405
+ m = re.search(r'bit(\d+)', gate)
406
+ if m:
407
+ bit = int(m.group(1))
408
+ return [reg.get_id(f"$a[{bit}]"), reg.get_id(f"$b[{bit}]")]
409
+ return [reg.get_id(f"$a[{i}]") for i in range(8)] + [reg.get_id(f"$b[{i}]") for i in range(8)]
410
+ if '.not' in gate:
411
+ m = re.search(r'bit(\d+)', gate)
412
+ if m:
413
+ return [reg.get_id(f"$a[{int(m.group(1))}]")]
414
+ return [reg.get_id(f"$a[{i}]") for i in range(8)]
415
+ if 'layer1' in gate or 'layer2' in gate:
416
+ m = re.search(r'bit(\d+)', gate)
417
+ if m:
418
+ bit = int(m.group(1))
419
+ if 'layer1' in gate:
420
+ return [reg.get_id(f"$a[{bit}]"), reg.get_id(f"$b[{bit}]")]
421
+ parent = gate.rsplit('.layer2', 1)[0]
422
+ return [reg.register(f"{parent}.layer1.or"), reg.register(f"{parent}.layer1.nand")]
423
+ return [reg.get_id(f"$a[{i}]") for i in range(8)]
424
+
425
+
426
+ def infer_pattern_inputs(gate: str, reg: SignalRegistry) -> List[int]:
427
+ for i in range(8):
428
+ reg.register(f"$x[{i}]")
429
+ if 'hammingdistance' in gate:
430
+ for i in range(8):
431
+ reg.register(f"$a[{i}]")
432
+ reg.register(f"$b[{i}]")
433
+ return [reg.get_id(f"$a[{i}]") for i in range(8)] + [reg.get_id(f"$b[{i}]") for i in range(8)]
434
+ return [reg.get_id(f"$x[{i}]") for i in range(8)]
435
+
436
+
437
+ def infer_error_detection_inputs(gate: str, reg: SignalRegistry) -> List[int]:
438
+ for i in range(8):
439
+ reg.register(f"$x[{i}]")
440
+ if 'hamming' in gate:
441
+ if 'encode' in gate:
442
+ for i in range(4):
443
+ reg.register(f"$d[{i}]")
444
+ return [reg.get_id(f"$d[{i}]") for i in range(4)]
445
+ if 'decode' in gate or 'syndrome' in gate:
446
+ for i in range(7):
447
+ reg.register(f"$c[{i}]")
448
+ return [reg.get_id(f"$c[{i}]") for i in range(7)]
449
+ if 'crc' in gate:
450
+ return [reg.register(f"$data[{i}]") for i in range(8)]
451
+ if 'parity' in gate and 'stage' in gate:
452
+ m = re.search(r'stage(\d+)\.xor(\d+)', gate)
453
+ if m:
454
+ stage = int(m.group(1))
455
+ idx = int(m.group(2))
456
+ if stage == 1:
457
+ return [reg.get_id(f"$x[{2*idx}]"), reg.get_id(f"$x[{2*idx+1}]")]
458
+ parent = gate.rsplit(f'.stage{stage}', 1)[0]
459
+ prev_stage = stage - 1
460
+ return [
461
+ reg.register(f"{parent}.stage{prev_stage}.xor{2*idx}.layer2"),
462
+ reg.register(f"{parent}.stage{prev_stage}.xor{2*idx+1}.layer2")
463
+ ]
464
+ if 'output.not' in gate:
465
+ parent = gate.rsplit('.output', 1)[0]
466
+ return [reg.register(f"{parent}.stage3.xor0.layer2")]
467
+ return [reg.get_id(f"$x[{i}]") for i in range(8)]
468
+
469
+
470
+ def infer_combinational_inputs(gate: str, reg: SignalRegistry) -> List[int]:
471
+ if 'decoder3to8' in gate:
472
+ for i in range(3):
473
+ reg.register(f"$sel[{i}]")
474
+ return [reg.get_id(f"$sel[{i}]") for i in range(3)]
475
+ if 'encoder8to3' in gate:
476
+ for i in range(8):
477
+ reg.register(f"$x[{i}]")
478
+ return [reg.get_id(f"$x[{i}]") for i in range(8)]
479
+ if 'multiplexer' in gate:
480
+ if '2to1' in gate:
481
+ return [reg.register("$a"), reg.register("$b"), reg.register("$sel")]
482
+ if '4to1' in gate:
483
+ return [reg.register(f"$x[{i}]") for i in range(4)] + [reg.register(f"$sel[{i}]") for i in range(2)]
484
+ if '8to1' in gate:
485
+ return [reg.register(f"$x[{i}]") for i in range(8)] + [reg.register(f"$sel[{i}]") for i in range(3)]
486
+ if 'demultiplexer' in gate:
487
+ return [reg.register("$x"), reg.register("$sel")]
488
+ if 'regmux4to1' in gate:
489
+ for r in range(4):
490
+ for i in range(8):
491
+ reg.register(f"$r{r}[{i}]")
492
+ for i in range(2):
493
+ reg.register(f"$sel[{i}]")
494
+ if gate == "combinational.regmux4to1.not_s0":
495
+ return [reg.get_id("$sel[0]")]
496
+ if gate == "combinational.regmux4to1.not_s1":
497
+ return [reg.get_id("$sel[1]")]
498
+ m = re.search(r'bit(\d+)', gate)
499
+ if m:
500
+ bit = int(m.group(1))
501
+ if '.not_s' in gate:
502
+ sidx = 0 if 's0' in gate else 1
503
+ return [reg.get_id(f"$sel[{sidx}]")]
504
+ if '.and' in gate:
505
+ and_m = re.search(r'\.and(\d+)', gate)
506
+ if and_m:
507
+ and_idx = int(and_m.group(1))
508
+ sel0 = "combinational.regmux4to1.not_s0" if (and_idx & 1) == 0 else "$sel[0]"
509
+ sel1 = "combinational.regmux4to1.not_s1" if (and_idx & 2) == 0 else "$sel[1]"
510
+ return [reg.get_id(f"$r{and_idx}[{bit}]"), reg.register(sel0), reg.register(sel1)]
511
+ if '.or' in gate:
512
+ return [reg.register(f"combinational.regmux4to1.bit{bit}.and{i}") for i in range(4)]
513
+ return []
514
+ if 'barrelshifter' in gate or 'priorityencoder' in gate:
515
+ for i in range(8):
516
+ reg.register(f"$x[{i}]")
517
+ return [reg.get_id(f"$x[{i}]") for i in range(8)]
518
+ return []
519
+
520
+
521
+ def infer_inputs_for_gate(gate: str, reg: SignalRegistry, tensors: Dict[str, torch.Tensor]) -> List[int]:
522
+ if gate.startswith('manifest.'):
523
+ return []
524
+ if gate.startswith('boolean.'):
525
+ return infer_boolean_inputs(gate, reg)
526
+ if gate.startswith('arithmetic.'):
527
+ if 'halfadder' in gate:
528
+ return infer_halfadder_inputs(gate, "arithmetic.halfadder", reg)
529
+ if 'fulladder' in gate:
530
+ return infer_fulladder_inputs(gate, "arithmetic.fulladder", reg)
531
+ if 'ripplecarry2bit' in gate:
532
+ return infer_ripplecarry_inputs(gate, "arithmetic.ripplecarry2bit", 2, reg)
533
+ if 'ripplecarry4bit' in gate:
534
+ return infer_ripplecarry_inputs(gate, "arithmetic.ripplecarry4bit", 4, reg)
535
+ if 'ripplecarry8bit' in gate:
536
+ return infer_ripplecarry_inputs(gate, "arithmetic.ripplecarry8bit", 8, reg)
537
+ if 'adc8bit' in gate:
538
+ return infer_adcsbc_inputs(gate, "arithmetic.adc8bit", False, reg)
539
+ if 'sbc8bit' in gate:
540
+ return infer_adcsbc_inputs(gate, "arithmetic.sbc8bit", True, reg)
541
+ if 'sub8bit' in gate:
542
+ return infer_sub8bit_inputs(gate, reg)
543
+ for i in range(8):
544
+ reg.register(f"$a[{i}]")
545
+ reg.register(f"$b[{i}]")
546
+ return [reg.get_id(f"$a[{i}]") for i in range(8)]
547
+ if gate.startswith('threshold.'):
548
+ return infer_threshold_inputs(gate, reg)
549
+ if gate.startswith('modular.'):
550
+ return infer_modular_inputs(gate, reg)
551
+ if gate.startswith('control.'):
552
+ if any(j in gate for j in ['jz', 'jc', 'jn', 'jv', 'jp', 'jnz', 'jnc', 'jnv', 'conditionaljump']):
553
+ prefix = gate.split('.bit')[0] if '.bit' in gate else gate.rsplit('.', 1)[0]
554
+ return infer_control_jump_inputs(gate, prefix, reg)
555
+ if any(b in gate for b in ['fetch', 'load', 'store', 'mem_addr']):
556
+ return infer_buffer_inputs(gate, reg)
557
+ return [reg.register("$ctrl")]
558
+ if gate.startswith('memory.'):
559
+ return infer_memory_inputs(gate, reg)
560
+ if gate.startswith('alu.'):
561
+ return infer_alu_inputs(gate, reg)
562
+ if gate.startswith('pattern_recognition.'):
563
+ return infer_pattern_inputs(gate, reg)
564
+ if gate.startswith('error_detection.'):
565
+ return infer_error_detection_inputs(gate, reg)
566
+ if gate.startswith('combinational.'):
567
+ return infer_combinational_inputs(gate, reg)
568
+ weight_key = f"{gate}.weight"
569
+ if weight_key in tensors:
570
+ w = tensors[weight_key]
571
+ n_inputs = w.shape[0] if w.dim() == 1 else w.shape[-1]
572
+ for i in range(n_inputs):
573
+ reg.register(f"$input[{i}]")
574
+ return [reg.get_id(f"$input[{i}]") for i in range(n_inputs)]
575
+ return []
576
+
577
+
578
+ def build_inputs(tensors: Dict[str, torch.Tensor]) -> tuple[Dict[str, torch.Tensor], SignalRegistry, dict]:
579
+ reg = SignalRegistry()
580
+ gates = get_all_gates(tensors)
581
+ stats = {"added": 0, "skipped": 0, "empty": 0}
582
+ for gate in sorted(gates):
583
+ inputs_key = f"{gate}.inputs"
584
+ if inputs_key in tensors:
585
+ stats["skipped"] += 1
586
+ continue
587
+ inputs = infer_inputs_for_gate(gate, reg, tensors)
588
+ if inputs:
589
+ tensors[inputs_key] = torch.tensor(inputs, dtype=torch.int64)
590
+ stats["added"] += 1
591
+ else:
592
+ stats["empty"] += 1
593
+ return tensors, reg, stats
594
+
595
+
596
+ def cmd_memory(args) -> None:
597
+ print("=" * 60)
598
+ print(" BUILD MEMORY CIRCUITS")
599
+ print("=" * 60)
600
+ print(f"\nLoading: {args.model}")
601
+ tensors = load_tensors(args.model)
602
+ print(f" Loaded {len(tensors)} tensors")
603
+ print("\nDropping existing memory/control tensors...")
604
+ drop_prefixes(tensors, [
605
+ "memory.addr_decode.", "memory.read.", "memory.write.",
606
+ "control.fetch.ir.", "control.load.", "control.store.", "control.mem_addr.",
607
+ ])
608
+ print(f" Now {len(tensors)} tensors")
609
+ print("\nGenerating memory circuits...")
610
+ add_decoder(tensors)
611
+ add_memory_read_mux(tensors)
612
+ add_memory_write_cells(tensors)
613
+ print(" Added decoder, read mux, write cells")
614
+ print("\nGenerating buffer gates...")
615
+ try:
616
+ add_fetch_load_store_buffers(tensors)
617
+ print(" Added fetch/load/store/mem_addr buffers")
618
+ except ValueError as e:
619
+ print(f" Buffers already exist: {e}")
620
+ print("\nUpdating manifest...")
621
+ update_manifest(tensors)
622
+ print(f" memory_bytes={MEM_BYTES}, pc_width={ADDR_BITS}")
623
+ if args.apply:
624
+ print(f"\nSaving: {args.model}")
625
+ save_file(tensors, str(args.model))
626
+ if args.manifest:
627
+ write_manifest(MANIFEST_PATH, tensors)
628
+ print(f" Wrote manifest: {MANIFEST_PATH}")
629
+ print(" Done.")
630
+ else:
631
+ print("\n[DRY-RUN] Use --apply to save.")
632
+ print(f"\nTotal: {len(tensors)} tensors")
633
+ print("=" * 60)
634
+
635
+
636
+ def cmd_inputs(args) -> None:
637
+ print("=" * 60)
638
+ print(" BUILD .inputs TENSORS")
639
+ print("=" * 60)
640
+ print(f"\nLoading: {args.model}")
641
+ tensors = load_tensors(args.model)
642
+ print(f" Loaded {len(tensors)} tensors")
643
+ gates = get_all_gates(tensors)
644
+ print(f" Found {len(gates)} gates")
645
+ print("\nBuilding .inputs tensors...")
646
+ tensors, reg, stats = build_inputs(tensors)
647
+ print(f"\nResults:")
648
+ print(f" Added: {stats['added']}")
649
+ print(f" Skipped: {stats['skipped']}")
650
+ print(f" Empty: {stats['empty']}")
651
+ print(f" Signals: {len(reg.name_to_id)}")
652
+ print(f" Total: {len(tensors)}")
653
+ if args.apply:
654
+ print(f"\nSaving: {args.model}")
655
+ metadata = {"signal_registry": reg.to_metadata()}
656
+ save_file(tensors, str(args.model), metadata=metadata)
657
+ print(" Done.")
658
+ else:
659
+ print("\n[DRY-RUN] Use --apply to save.")
660
+ print("=" * 60)
661
+
662
+
663
+ def cmd_all(args) -> None:
664
+ print("Running: memory")
665
+ cmd_memory(args)
666
+ print("\nRunning: inputs")
667
+ cmd_inputs(args)
668
+
669
+
670
+ def main() -> None:
671
+ parser = argparse.ArgumentParser(description="Build tools for threshold computer safetensors")
672
+ parser.add_argument("--model", type=Path, default=MODEL_PATH, help="Model path")
673
+ parser.add_argument("--apply", action="store_true", help="Apply changes (default: dry-run)")
674
+ parser.add_argument("--manifest", action="store_true", help="Write tensors.txt manifest (memory only)")
675
+ subparsers = parser.add_subparsers(dest="command", help="Subcommands")
676
+ subparsers.add_parser("memory", help="Generate 64KB memory circuits")
677
+ subparsers.add_parser("inputs", help="Add .inputs metadata tensors")
678
+ subparsers.add_parser("all", help="Run memory then inputs")
679
+ args = parser.parse_args()
680
+ if args.command == "memory":
681
+ cmd_memory(args)
682
+ elif args.command == "inputs":
683
+ cmd_inputs(args)
684
+ elif args.command == "all":
685
+ cmd_all(args)
686
+ else:
687
+ parser.print_help()
688
+
689
+
690
+ if __name__ == "__main__":
691
+ main()
eval/build_memory.py DELETED
@@ -1,148 +0,0 @@
1
- """
2
- Generate 64KB memory circuits and fetch/load/store buffers for the 8-bit threshold computer.
3
- Updates neural_computer.safetensors and tensors.txt in-place.
4
- """
5
-
6
- from __future__ import annotations
7
-
8
- from pathlib import Path
9
- from typing import Dict, Iterable, List
10
-
11
- import torch
12
- from safetensors import safe_open
13
- from safetensors.torch import save_file
14
-
15
-
16
- MODEL_PATH = Path(__file__).resolve().parent.parent / "neural_computer.safetensors"
17
- MANIFEST_PATH = Path(__file__).resolve().parent.parent / "tensors.txt"
18
-
19
- ADDR_BITS = 16
20
- MEM_BYTES = 1 << ADDR_BITS
21
-
22
-
23
- def load_tensors(path: Path) -> Dict[str, torch.Tensor]:
24
- tensors: Dict[str, torch.Tensor] = {}
25
- with safe_open(str(path), framework="pt") as f:
26
- for name in f.keys():
27
- tensors[name] = f.get_tensor(name).float().cpu().clone()
28
- return tensors
29
-
30
-
31
- def add_gate(tensors: Dict[str, torch.Tensor], name: str, weight: Iterable[float], bias: Iterable[float]) -> None:
32
- w_key = f"{name}.weight"
33
- b_key = f"{name}.bias"
34
- if w_key in tensors or b_key in tensors:
35
- raise ValueError(f"Gate already exists: {name}")
36
- tensors[w_key] = torch.tensor(list(weight), dtype=torch.float32)
37
- tensors[b_key] = torch.tensor(list(bias), dtype=torch.float32)
38
-
39
-
40
- def drop_prefixes(tensors: Dict[str, torch.Tensor], prefixes: List[str]) -> None:
41
- for key in list(tensors.keys()):
42
- if any(key.startswith(prefix) for prefix in prefixes):
43
- del tensors[key]
44
-
45
-
46
- def add_decoder(tensors: Dict[str, torch.Tensor]) -> None:
47
- weights = torch.empty((MEM_BYTES, ADDR_BITS), dtype=torch.float32)
48
- bias = torch.empty((MEM_BYTES,), dtype=torch.float32)
49
- for addr in range(MEM_BYTES):
50
- bits = [(addr >> (ADDR_BITS - 1 - i)) & 1 for i in range(ADDR_BITS)] # MSB-first
51
- weights[addr] = torch.tensor([1.0 if bit == 1 else -1.0 for bit in bits], dtype=torch.float32)
52
- bias[addr] = -float(sum(bits))
53
- tensors["memory.addr_decode.weight"] = weights
54
- tensors["memory.addr_decode.bias"] = bias
55
-
56
-
57
- def add_memory_read_mux(tensors: Dict[str, torch.Tensor]) -> None:
58
- # Packed AND/OR weights for read mux.
59
- and_weight = torch.ones((8, MEM_BYTES, 2), dtype=torch.float32)
60
- and_bias = torch.full((8, MEM_BYTES), -2.0, dtype=torch.float32)
61
- or_weight = torch.ones((8, MEM_BYTES), dtype=torch.float32)
62
- or_bias = torch.full((8,), -1.0, dtype=torch.float32)
63
- tensors["memory.read.and.weight"] = and_weight
64
- tensors["memory.read.and.bias"] = and_bias
65
- tensors["memory.read.or.weight"] = or_weight
66
- tensors["memory.read.or.bias"] = or_bias
67
-
68
-
69
- def add_memory_write_cells(tensors: Dict[str, torch.Tensor]) -> None:
70
- # Packed write gate weights.
71
- sel_weight = torch.ones((MEM_BYTES, 2), dtype=torch.float32)
72
- sel_bias = torch.full((MEM_BYTES,), -2.0, dtype=torch.float32)
73
- nsel_weight = torch.full((MEM_BYTES, 1), -1.0, dtype=torch.float32)
74
- nsel_bias = torch.zeros((MEM_BYTES,), dtype=torch.float32)
75
-
76
- and_old_weight = torch.ones((MEM_BYTES, 8, 2), dtype=torch.float32)
77
- and_old_bias = torch.full((MEM_BYTES, 8), -2.0, dtype=torch.float32)
78
- and_new_weight = torch.ones((MEM_BYTES, 8, 2), dtype=torch.float32)
79
- and_new_bias = torch.full((MEM_BYTES, 8), -2.0, dtype=torch.float32)
80
- or_weight = torch.ones((MEM_BYTES, 8, 2), dtype=torch.float32)
81
- or_bias = torch.full((MEM_BYTES, 8), -1.0, dtype=torch.float32)
82
-
83
- tensors["memory.write.sel.weight"] = sel_weight
84
- tensors["memory.write.sel.bias"] = sel_bias
85
- tensors["memory.write.nsel.weight"] = nsel_weight
86
- tensors["memory.write.nsel.bias"] = nsel_bias
87
- tensors["memory.write.and_old.weight"] = and_old_weight
88
- tensors["memory.write.and_old.bias"] = and_old_bias
89
- tensors["memory.write.and_new.weight"] = and_new_weight
90
- tensors["memory.write.and_new.bias"] = and_new_bias
91
- tensors["memory.write.or.weight"] = or_weight
92
- tensors["memory.write.or.bias"] = or_bias
93
-
94
-
95
- def add_fetch_load_store_buffers(tensors: Dict[str, torch.Tensor]) -> None:
96
- # Buffer gates: output = input (weight=1, bias=-1)
97
- for bit in range(16):
98
- add_gate(tensors, f"control.fetch.ir.bit{bit}", [1.0], [-1.0])
99
- for bit in range(8):
100
- add_gate(tensors, f"control.load.bit{bit}", [1.0], [-1.0])
101
- add_gate(tensors, f"control.store.bit{bit}", [1.0], [-1.0])
102
- for bit in range(ADDR_BITS):
103
- add_gate(tensors, f"control.mem_addr.bit{bit}", [1.0], [-1.0])
104
-
105
-
106
- def update_manifest(tensors: Dict[str, torch.Tensor]) -> None:
107
- # Update manifest constants to reflect 16-bit address space.
108
- tensors["manifest.memory_bytes"] = torch.tensor([float(MEM_BYTES)], dtype=torch.float32)
109
- tensors["manifest.pc_width"] = torch.tensor([float(ADDR_BITS)], dtype=torch.float32)
110
- tensors["manifest.version"] = torch.tensor([3.0], dtype=torch.float32)
111
-
112
-
113
- def write_manifest(path: Path, tensors: Dict[str, torch.Tensor]) -> None:
114
- lines: List[str] = []
115
- lines.append("# Tensor Manifest")
116
- lines.append(f"# Total: {len(tensors)} tensors")
117
- for name in sorted(tensors.keys()):
118
- t = tensors[name]
119
- values = ", ".join(f"{v:.1f}" for v in t.flatten().tolist())
120
- lines.append(f"{name}: shape={list(t.shape)}, values=[{values}]")
121
- path.write_text("\n".join(lines) + "\n", encoding="utf-8")
122
-
123
-
124
- def main() -> None:
125
- tensors = load_tensors(MODEL_PATH)
126
- drop_prefixes(
127
- tensors,
128
- [
129
- "memory.addr_decode.",
130
- "memory.read.",
131
- "memory.write.",
132
- "control.fetch.ir.",
133
- "control.load.",
134
- "control.store.",
135
- "control.mem_addr.",
136
- ],
137
- )
138
- add_decoder(tensors)
139
- add_memory_read_mux(tensors)
140
- add_memory_write_cells(tensors)
141
- add_fetch_load_store_buffers(tensors)
142
- update_manifest(tensors)
143
- save_file(tensors, str(MODEL_PATH))
144
- write_manifest(MANIFEST_PATH, tensors)
145
-
146
-
147
- if __name__ == "__main__":
148
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
neural_computer.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ba0c0e7e6286bc5a55d66ecbda8a1d43084a72e6a960d898b268fb6558c473a4
3
- size 33725820
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64bf038473b731ab149cfb74cf0f4aa65617b52d5f81f140c6ab3b763834f256
3
+ size 34268956