quant-iota commited on
Commit
72f4dbc
·
verified ·
1 Parent(s): 7eed62e

Upload 10 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ data/MNIST/raw/t10k-images-idx3-ubyte filter=lfs diff=lfs merge=lfs -text
37
+ data/MNIST/raw/train-images-idx3-ubyte filter=lfs diff=lfs merge=lfs -text
38
+ logo.png filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SKA Single Digit Entropy State Explorer
2
+ import torch
3
+ import torch.nn as nn
4
+ import numpy as np
5
+ import matplotlib
6
+ matplotlib.use('Agg')
7
+ import matplotlib.pyplot as plt
8
+ from torchvision import datasets, transforms
9
+ import gradio as gr
10
+ import io
11
+ from datetime import datetime
12
+ from PIL import Image
13
+
14
+ # Load MNIST from local data
15
+ transform = transforms.Compose([transforms.ToTensor()])
16
+ mnist_dataset = datasets.MNIST(root='./data', train=True, download=False, transform=transform)
17
+
18
+
19
+ class SKAModel(nn.Module):
20
+ def __init__(self, input_size=784, layer_sizes=[256, 128, 64, 10], K=50):
21
+ super(SKAModel, self).__init__()
22
+ self.input_size = input_size
23
+ self.layer_sizes = layer_sizes
24
+ self.K = K
25
+
26
+ self.weights = nn.ParameterList()
27
+ self.biases = nn.ParameterList()
28
+ prev_size = input_size
29
+ for size in layer_sizes:
30
+ self.weights.append(nn.Parameter(torch.randn(prev_size, size) * 0.01))
31
+ self.biases.append(nn.Parameter(torch.zeros(size)))
32
+ prev_size = size
33
+
34
+ self.Z = [None] * len(layer_sizes)
35
+ self.Z_prev = [None] * len(layer_sizes)
36
+ self.D = [None] * len(layer_sizes)
37
+ self.D_prev = [None] * len(layer_sizes)
38
+ self.delta_D = [None] * len(layer_sizes)
39
+ self.entropy = [None] * len(layer_sizes)
40
+ self.entropy_history = [[] for _ in range(len(layer_sizes))]
41
+ self.cosine_history = [[] for _ in range(len(layer_sizes))]
42
+ self.frobenius_history = [[] for _ in range(len(layer_sizes))]
43
+ self.weight_frobenius_history = [[] for _ in range(len(layer_sizes))]
44
+ self.net_history = [[] for _ in range(len(layer_sizes))]
45
+ self.tensor_net_total = [0.0] * len(layer_sizes)
46
+ self.output_history = []
47
+
48
+ def forward(self, x):
49
+ batch_size = x.shape[0]
50
+ x = x.view(batch_size, -1)
51
+ for l in range(len(self.layer_sizes)):
52
+ z = torch.mm(x, self.weights[l]) + self.biases[l]
53
+ self.frobenius_history[l].append(torch.norm(z, p='fro').item())
54
+ d = torch.sigmoid(z)
55
+ self.Z[l] = z
56
+ self.D[l] = d
57
+ x = d
58
+ return x
59
+
60
+ def calculate_entropy(self):
61
+ total_entropy = 0
62
+ for l in range(len(self.layer_sizes)):
63
+ if self.Z[l] is not None and self.D_prev[l] is not None and self.D[l] is not None and self.Z_prev[l] is not None:
64
+ self.delta_D[l] = self.D[l] - self.D_prev[l]
65
+ delta_Z = self.Z[l] - self.Z_prev[l]
66
+ H_lk = (-1 / np.log(2)) * (self.Z[l] * self.delta_D[l])
67
+ layer_entropy = torch.sum(H_lk)
68
+ self.entropy[l] = layer_entropy.item()
69
+ self.entropy_history[l].append(layer_entropy.item())
70
+
71
+ dot_product = torch.sum(self.Z[l] * self.delta_D[l])
72
+ z_norm = torch.norm(self.Z[l])
73
+ delta_d_norm = torch.norm(self.delta_D[l])
74
+ if z_norm > 0 and delta_d_norm > 0:
75
+ self.cosine_history[l].append((dot_product / (z_norm * delta_d_norm)).item())
76
+ else:
77
+ self.cosine_history[l].append(0.0)
78
+
79
+ total_entropy += layer_entropy
80
+ D_prime = self.D[l] * (1 - self.D[l])
81
+ nabla_z_H = (1 / np.log(2)) * self.Z[l] * D_prime
82
+ tensor_net_step = torch.sum(delta_Z * (self.D[l] - nabla_z_H))
83
+ self.net_history[l].append(tensor_net_step.item())
84
+ self.tensor_net_total[l] += tensor_net_step.item()
85
+ return total_entropy
86
+
87
+ def ska_update(self, inputs, learning_rate=0.01):
88
+ for l in range(len(self.layer_sizes)):
89
+ if self.delta_D[l] is not None:
90
+ prev_output = inputs.view(inputs.shape[0], -1) if l == 0 else self.D_prev[l-1]
91
+ d_prime = self.D[l] * (1 - self.D[l])
92
+ gradient = -1 / np.log(2) * (self.Z[l] * d_prime + self.delta_D[l])
93
+ dW = torch.matmul(prev_output.t(), gradient) / prev_output.shape[0]
94
+ self.weights[l] = self.weights[l] - learning_rate * dW
95
+ self.biases[l] = self.biases[l] - learning_rate * gradient.mean(dim=0)
96
+
97
+ def initialize_tensors(self, batch_size):
98
+ for l in range(len(self.layer_sizes)):
99
+ self.Z[l] = None
100
+ self.Z_prev[l] = None
101
+ self.D[l] = None
102
+ self.D_prev[l] = None
103
+ self.delta_D[l] = None
104
+ self.entropy[l] = None
105
+ self.entropy_history[l] = []
106
+ self.cosine_history[l] = []
107
+ self.frobenius_history[l] = []
108
+ self.weight_frobenius_history[l] = []
109
+ self.net_history[l] = []
110
+ self.tensor_net_total[l] = 0.0
111
+ self.output_history = []
112
+
113
+
114
+ def get_mnist_single_digit(digit, samples, data_seed=0):
115
+ targets = mnist_dataset.targets.numpy()
116
+ rng = np.random.RandomState(data_seed)
117
+ all_indices = np.where(targets == digit)[0]
118
+ rng.shuffle(all_indices)
119
+ images_list = [mnist_dataset[idx][0] for idx in all_indices[:samples]]
120
+ return torch.stack(images_list)
121
+
122
+
123
+ def plot_convergence_comparison(history):
124
+ if not history:
125
+ fig, ax = plt.subplots()
126
+ ax.text(0.5, 0.5, "No history yet — run at least one architecture.", ha='center', va='center')
127
+ buf = io.BytesIO()
128
+ fig.savefig(buf, format='png', bbox_inches='tight')
129
+ plt.close(fig)
130
+ buf.seek(0)
131
+ return Image.open(buf)
132
+
133
+ colors = plt.cm.tab10(np.linspace(0, 1, max(len(history), 1)))
134
+
135
+ fig = plt.figure(figsize=(14, 30))
136
+ ax1 = fig.add_subplot(311, projection='3d') # L1, L2, L3
137
+ ax2 = fig.add_subplot(312, projection='3d') # L1, L2, L4
138
+ ax3 = fig.add_subplot(313, projection='3d') # L2, L3, L4
139
+
140
+ for i, run in enumerate(history):
141
+ h = run["entropy_history_norm"]
142
+ if len(h) < 3:
143
+ continue
144
+
145
+ H1 = np.array(h[0])
146
+ H2 = np.array(h[1])
147
+ H3 = np.array(h[2])
148
+ H4 = np.array(h[3]) if len(h) > 3 else np.zeros_like(H1)
149
+ color = colors[i % len(colors)]
150
+ label = f"{run['architecture']} K={run['K']} τ={run['tau']:.2f}"
151
+
152
+ ax1.plot(H1, H2, H3, color=color, linewidth=1.5, alpha=0.8, label=label)
153
+ ax1.scatter(H1[0], H2[0], H3[0], color='green', s=60, zorder=5)
154
+ ax1.scatter(H1[-1], H2[-1], H3[-1], color='red', s=60, zorder=5)
155
+ for k in range(0, len(H1), max(1, len(H1) // 5)):
156
+ ax1.scatter(H1[k], H2[k], H3[k], color='black', s=15, zorder=5)
157
+
158
+ ax2.plot(H1, H2, H4, color=color, linewidth=1.5, alpha=0.8, label=label)
159
+ ax2.scatter(H1[0], H2[0], H4[0], color='green', s=60, zorder=5)
160
+ ax2.scatter(H1[-1], H2[-1], H4[-1], color='red', s=60, zorder=5)
161
+ for k in range(0, len(H1), max(1, len(H1) // 5)):
162
+ ax2.scatter(H1[k], H2[k], H4[k], color='black', s=15, zorder=5)
163
+
164
+ ax3.plot(H2, H3, H4, color=color, linewidth=1.5, alpha=0.8, label=label)
165
+ ax3.scatter(H2[0], H3[0], H4[0], color='green', s=60, zorder=5)
166
+ ax3.scatter(H2[-1], H3[-1], H4[-1], color='red', s=60, zorder=5)
167
+ for k in range(0, len(H2), max(1, len(H2) // 5)):
168
+ ax3.scatter(H2[k], H3[k], H4[k], color='black', s=15, zorder=5)
169
+
170
+ digit = history[0]["digit"]
171
+ ax1.set_xlabel("Layer 1 (h/n)", fontsize=8)
172
+ ax1.set_ylabel("Layer 2 (h/n)", fontsize=8)
173
+ ax1.set_zlabel("Layer 3 (h/n)", fontsize=8)
174
+ ax1.set_title("3D Trajectory (L1, L2, L3)\n● start ● end", fontsize=10)
175
+ ax1.legend(fontsize=6, loc='upper left')
176
+
177
+ ax2.set_xlabel("Layer 1 (h/n)", fontsize=8)
178
+ ax2.set_ylabel("Layer 2 (h/n)", fontsize=8)
179
+ ax2.set_zlabel("Layer 4 (h/n)", fontsize=8)
180
+ ax2.set_title("3D Trajectory (L1, L2, L4)\n● start ● end", fontsize=10)
181
+ ax2.legend(fontsize=6, loc='upper left')
182
+
183
+ ax3.set_xlabel("Layer 2 (h/n)", fontsize=8)
184
+ ax3.set_ylabel("Layer 3 (h/n)", fontsize=8)
185
+ ax3.set_zlabel("Layer 4 (h/n)", fontsize=8)
186
+ ax3.set_title("3D Trajectory (L2, L3, L4)\n● start ● end", fontsize=10)
187
+ ax3.legend(fontsize=6, loc='upper left')
188
+
189
+ fig.suptitle(f"4D Entropy State Trajectories — Digit {digit} — Architecture Comparison", fontsize=12, y=1.01)
190
+ fig.tight_layout()
191
+ buf = io.BytesIO()
192
+ fig.savefig(buf, format='png', dpi=100, bbox_inches='tight')
193
+ plt.close(fig)
194
+ buf.seek(0)
195
+ return Image.open(buf)
196
+
197
+
198
+ def run_ska(digit, n1, n2, n3, n4, K, tau, samples, data_seed, history):
199
+ digit = int(digit)
200
+ layer_sizes = [int(n1), int(n2), int(n3), int(n4)]
201
+ neurons_str = ", ".join(str(n) for n in layer_sizes)
202
+
203
+ K = int(K)
204
+ samples = int(samples)
205
+ data_seed = int(data_seed)
206
+ learning_rate = tau / K
207
+
208
+ inputs = get_mnist_single_digit(digit, samples, data_seed)
209
+
210
+ torch.manual_seed(42)
211
+ np.random.seed(42)
212
+ model = SKAModel(input_size=784, layer_sizes=layer_sizes, K=K)
213
+ model.initialize_tensors(inputs.size(0))
214
+
215
+ for k in range(K):
216
+ outputs = model.forward(inputs)
217
+ model.output_history.append(outputs.mean(dim=0).detach().cpu().numpy())
218
+ if k > 0:
219
+ model.calculate_entropy()
220
+ model.ska_update(inputs, learning_rate)
221
+ for l in range(len(model.layer_sizes)):
222
+ model.weight_frobenius_history[l].append(torch.norm(model.weights[l], p='fro').item())
223
+ model.D_prev = [d.clone().detach() if d is not None else None for d in model.D]
224
+ model.Z_prev = [z.clone().detach() if z is not None else None for z in model.Z]
225
+
226
+ num_layers = len(layer_sizes)
227
+
228
+ convergence_state = [
229
+ model.entropy_history[l][-1] / layer_sizes[l] if model.entropy_history[l] else 0.0
230
+ for l in range(num_layers)
231
+ ]
232
+
233
+ entropy_history_norm = [
234
+ [v / layer_sizes[l] for v in model.entropy_history[l]]
235
+ for l in range(num_layers)
236
+ ]
237
+
238
+ run = {
239
+ "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
240
+ "digit": digit,
241
+ "architecture": neurons_str,
242
+ "K": K,
243
+ "tau": tau,
244
+ "samples": samples,
245
+ "seed": data_seed,
246
+ "convergence_state": convergence_state,
247
+ "entropy_history_norm": entropy_history_norm,
248
+ }
249
+ history = history + [run]
250
+
251
+ # Plot 1: normalized entropy trajectory (current run)
252
+ fig1, axes1 = plt.subplots(num_layers, 1, figsize=(10, 3 * num_layers), sharex=True)
253
+ if num_layers == 1:
254
+ axes1 = [axes1]
255
+ for l in range(num_layers):
256
+ axes1[l].plot(entropy_history_norm[l])
257
+ axes1[l].set_title(f"Layer {l+1} ({layer_sizes[l]} neurons): Normalized Entropy", fontsize=11)
258
+ axes1[l].set_ylabel("h / n_neurons")
259
+ axes1[l].grid(True)
260
+ axes1[-1].set_xlabel("Step Index K")
261
+ fig1.suptitle(f"Digit {digit} | Architecture: [{neurons_str}] | K={K} | τ={tau:.2f}", fontsize=12)
262
+ fig1.tight_layout()
263
+
264
+ fig2 = plot_convergence_comparison(history)
265
+
266
+ return fig1, fig2, history
267
+
268
+
269
+ def clear_history():
270
+ return plot_convergence_comparison([]), []
271
+
272
+
273
+ with gr.Blocks(title="SKA Single Digit Explorer") as demo:
274
+ gr.Image("logo.png", show_label=False, height=100, container=False)
275
+ gr.Markdown("# SKA Single Digit Explorer")
276
+ gr.Markdown("Explore the 4D entropy state trajectory for a single digit across different architectures.")
277
+
278
+ with gr.Row():
279
+ with gr.Column(scale=1):
280
+ digit_selector = gr.Radio(
281
+ choices=[str(d) for d in range(10)],
282
+ value="0",
283
+ label="Select Digit"
284
+ )
285
+ n1_input = gr.Slider(8, 512, value=256, step=8, label="Layer 1 — neurons")
286
+ n2_input = gr.Slider(8, 512, value=128, step=8, label="Layer 2 — neurons")
287
+ n3_input = gr.Slider(8, 256, value=64, step=8, label="Layer 3 — neurons")
288
+ n4_input = gr.Slider(2, 64, value=10, step=1, label="Layer 4 — neurons")
289
+ k_slider = gr.Slider(1, 200, value=50, step=1, label="K (forward steps)")
290
+ tau_slider = gr.Slider(0.1, 0.75, value=0.5, step=0.01, label="Learning budget τ (τ = η.K)")
291
+ samples_slider = gr.Slider(1, 100, value=100, step=1, label="Samples")
292
+ seed_slider = gr.Slider(0, 99, value=0, step=1, label="Data seed")
293
+ run_btn = gr.Button("Run & Archive", variant="primary")
294
+ clear_btn = gr.Button("Clear History", variant="stop")
295
+
296
+ gr.Markdown("---")
297
+ gr.Markdown("### Reference Paper")
298
+ gr.HTML('<a href="https://arxiv.org/abs/2503.13942v1" target="_blank">arXiv:2503.13942v1</a>')
299
+
300
+ gr.Markdown("---")
301
+ gr.Markdown("### SKA Explorer Suite")
302
+ gr.HTML('<a href="https://huggingface.co/quant-iota" target="_blank">⬅ All Apps</a>')
303
+ gr.Markdown("---")
304
+ gr.Markdown("### About this App")
305
+ gr.Markdown("Select a digit and explore how its 4D entropy state trajectory changes with architecture. Each digit has a unique geometric fingerprint — compare architectures for the same digit to probe the entropy manifold.")
306
+
307
+ with gr.Column(scale=2):
308
+ plot_current = gr.Plot(label="Current Run: Normalized Entropy Trajectory")
309
+ plot_comparison = gr.Image(label="4D Entropy State Trajectory")
310
+
311
+ history_state = gr.State([])
312
+
313
+ run_btn.click(
314
+ fn=run_ska,
315
+ inputs=[digit_selector, n1_input, n2_input, n3_input, n4_input, k_slider, tau_slider, samples_slider, seed_slider, history_state],
316
+ outputs=[plot_current, plot_comparison, history_state],
317
+ )
318
+ clear_btn.click(
319
+ fn=clear_history,
320
+ inputs=[],
321
+ outputs=[plot_comparison, history_state],
322
+ )
323
+
324
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
data/MNIST/raw/t10k-images-idx3-ubyte ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fa7898d509279e482958e8ce81c8e77db3f2f8254e26661ceb7762c4d494ce7
3
+ size 7840016
data/MNIST/raw/t10k-images-idx3-ubyte.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d422c7b0a1c1c79245a5bcf07fe86e33eeafee792b84584aec276f5a2dbc4e6
3
+ size 1648877
data/MNIST/raw/t10k-labels-idx1-ubyte ADDED
Binary file (10 kB). View file
 
data/MNIST/raw/t10k-labels-idx1-ubyte.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7ae60f92e00ec6debd23a6088c31dbd2371eca3ffa0defaefb259924204aec6
3
+ size 4542
data/MNIST/raw/train-images-idx3-ubyte ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba891046e6505d7aadcbbe25680a0738ad16aec93bde7f9b65e87a2fc25776db
3
+ size 47040016
data/MNIST/raw/train-images-idx3-ubyte.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:440fcabf73cc546fa21475e81ea370265605f56be210a4024d2ca8f203523609
3
+ size 9912422
data/MNIST/raw/train-labels-idx1-ubyte ADDED
Binary file (60 kB). View file
 
data/MNIST/raw/train-labels-idx1-ubyte.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3552534a0a558bbed6aed32b30c495cca23d567ec52cac8be1a0730e8010255c
3
+ size 28881
logo.png ADDED

Git LFS Details

  • SHA256: a1fbe3d70086c916cd7b844c8e3be454b6d2ecb308cc048a4b719e1dfb0eb381
  • Pointer size: 131 Bytes
  • Size of remote file: 268 kB