# SKA Per-Class Entropy Explorer - Gradio App
import torch
import torch.nn as nn
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
import gradio as gr
# Load MNIST from local data
transform = transforms.Compose([transforms.ToTensor()])
mnist_dataset = datasets.MNIST(root='./data', train=True, download=False, transform=transform)
class SKAModel(nn.Module):
def __init__(self, input_size=784, layer_sizes=[256, 128, 64, 10], K=50):
super(SKAModel, self).__init__()
self.input_size = input_size
self.layer_sizes = layer_sizes
self.K = K
self.weights = nn.ParameterList()
self.biases = nn.ParameterList()
prev_size = input_size
for size in layer_sizes:
self.weights.append(nn.Parameter(torch.randn(prev_size, size) * 0.01))
self.biases.append(nn.Parameter(torch.zeros(size)))
prev_size = size
self.Z = [None] * len(layer_sizes)
self.Z_prev = [None] * len(layer_sizes)
self.D = [None] * len(layer_sizes)
self.D_prev = [None] * len(layer_sizes)
self.delta_D = [None] * len(layer_sizes)
self.entropy = [None] * len(layer_sizes)
self.entropy_history = [[] for _ in range(len(layer_sizes))]
self.cosine_history = [[] for _ in range(len(layer_sizes))]
self.output_history = []
self.frobenius_history = [[] for _ in range(len(layer_sizes))]
self.weight_frobenius_history = [[] for _ in range(len(layer_sizes))]
self.net_history = [[] for _ in range(len(layer_sizes))]
self.tensor_net_total = [0.0] * len(layer_sizes)
def forward(self, x):
batch_size = x.shape[0]
x = x.view(batch_size, -1)
for l in range(len(self.layer_sizes)):
z = torch.mm(x, self.weights[l]) + self.biases[l]
frobenius_norm = torch.norm(z, p='fro')
self.frobenius_history[l].append(frobenius_norm.item())
d = torch.sigmoid(z)
self.Z[l] = z
self.D[l] = d
x = d
return x
def calculate_entropy(self):
total_entropy = 0
for l in range(len(self.layer_sizes)):
if self.Z[l] is not None and self.D_prev[l] is not None and self.D[l] is not None and self.Z_prev[l] is not None:
self.delta_D[l] = self.D[l] - self.D_prev[l]
delta_Z = self.Z[l] - self.Z_prev[l]
H_lk = (-1 / np.log(2)) * (self.Z[l] * self.delta_D[l])
layer_entropy = torch.sum(H_lk)
self.entropy[l] = layer_entropy.item()
self.entropy_history[l].append(layer_entropy.item())
dot_product = torch.sum(self.Z[l] * self.delta_D[l])
z_norm = torch.norm(self.Z[l])
delta_d_norm = torch.norm(self.delta_D[l])
if z_norm > 0 and delta_d_norm > 0:
cos_theta = dot_product / (z_norm * delta_d_norm)
self.cosine_history[l].append(cos_theta.item())
else:
self.cosine_history[l].append(0.0)
total_entropy += layer_entropy
D_prime = self.D[l] * (1 - self.D[l])
nabla_z_H = (1 / np.log(2)) * self.Z[l] * D_prime
tensor_net_step = torch.sum(delta_Z * (self.D[l] - nabla_z_H))
self.net_history[l].append(tensor_net_step.item())
self.tensor_net_total[l] += tensor_net_step.item()
return total_entropy
def ska_update(self, inputs, learning_rate=0.01):
for l in range(len(self.layer_sizes)):
if self.delta_D[l] is not None:
prev_output = inputs.view(inputs.shape[0], -1) if l == 0 else self.D_prev[l-1]
d_prime = self.D[l] * (1 - self.D[l])
gradient = -1 / np.log(2) * (self.Z[l] * d_prime + self.delta_D[l])
dW = torch.matmul(prev_output.t(), gradient) / prev_output.shape[0]
self.weights[l] = self.weights[l] - learning_rate * dW
self.biases[l] = self.biases[l] - learning_rate * gradient.mean(dim=0)
def initialize_tensors(self, batch_size):
for l in range(len(self.layer_sizes)):
self.Z[l] = None
self.Z_prev[l] = None
self.D[l] = None
self.D_prev[l] = None
self.delta_D[l] = None
self.entropy[l] = None
self.entropy_history[l] = []
self.cosine_history[l] = []
self.frobenius_history[l] = []
self.weight_frobenius_history[l] = []
self.net_history[l] = []
self.tensor_net_total[l] = 0.0
self.output_history = []
def get_mnist_per_class(samples_per_class, data_seed=0):
"""Select N samples per class from MNIST, return dict of {digit: images}."""
targets = mnist_dataset.targets.numpy()
rng = np.random.RandomState(data_seed)
digit_images = {}
for digit in range(10):
all_indices = np.where(targets == digit)[0]
rng.shuffle(all_indices)
indices = all_indices[:samples_per_class]
images_list = []
for idx in indices:
img, _ = mnist_dataset[idx]
images_list.append(img)
digit_images[digit] = torch.stack(images_list)
return digit_images
def run_ska_per_class(neurons_str, K, tau, samples_per_class, data_seed):
# Parse layer sizes
try:
layer_sizes = [int(x.strip()) for x in neurons_str.split(",")]
except ValueError:
return None
K = int(K)
samples_per_class = int(samples_per_class)
data_seed = int(data_seed)
learning_rate = tau / K
# Get data per class
digit_images = get_mnist_per_class(samples_per_class, data_seed)
# Run SKA separately for each digit
all_entropy_histories = {}
for digit in range(10):
inputs = digit_images[digit]
# Fresh model with same seed for each digit
torch.manual_seed(42)
np.random.seed(42)
model = SKAModel(input_size=784, layer_sizes=layer_sizes, K=K)
model.initialize_tensors(inputs.size(0))
for k in range(K):
outputs = model.forward(inputs)
if k > 0:
model.calculate_entropy()
model.ska_update(inputs, learning_rate)
model.D_prev = [d.clone().detach() if d is not None else None for d in model.D]
model.Z_prev = [z.clone().detach() if z is not None else None for z in model.Z]
all_entropy_histories[digit] = [list(model.entropy_history[l]) for l in range(len(layer_sizes))]
num_layers = len(layer_sizes)
colors = plt.cm.tab10(np.linspace(0, 1, 10))
# Plot: per-class entropy trajectory per layer
fig, axes = plt.subplots(num_layers, 1, figsize=(12, 4 * num_layers), sharex=True)
if num_layers == 1:
axes = [axes]
for l in range(num_layers):
ax = axes[l]
for digit in range(10):
h = all_entropy_histories[digit][l]
ax.plot(h, color=colors[digit], label=f"Digit {digit}")
ax.set_title(f"Layer {l+1}: Per-Class Entropy Trajectory", fontsize=13)
ax.set_ylabel("Entropy")
ax.grid(True)
ax.legend(ncol=5, fontsize=8)
axes[-1].set_xlabel("Step Index K")
fig.tight_layout()
return fig
with gr.Blocks(title="SKA Per-Class Entropy Explorer") as demo:
gr.Image("logo.png", show_label=False, height=100, container=False)
gr.Markdown("# SKA Per-Class Entropy Explorer")
gr.Markdown("Runs SKA independently for each digit class and overlays entropy trajectories. Each digit has its own model and weights — the entropy trajectory is a pure fingerprint of that digit's structure.")
with gr.Row():
with gr.Column(scale=1):
neurons_input = gr.Textbox(label="Layer sizes (comma-separated)", value="256, 128, 64, 10")
k_slider = gr.Slider(1, 200, value=50, step=1, label="K (forward steps)")
tau_slider = gr.Slider(0.1, 0.75, value=0.5, step=0.01, label="Learning budget τ (τ = η.K)")
samples_slider = gr.Slider(1, 100, value=100, step=1, label="Samples per class")
seed_slider = gr.Slider(0, 99, value=0, step=1, label="Data seed (shuffle samples)")
run_btn = gr.Button("Run SKA Per-Class", variant="primary")
gr.Markdown("---")
gr.Markdown("### Reference Paper")
gr.HTML('arXiv:2503.13942v1')
gr.Markdown("""
**Abstract**
We introduce the Structured Knowledge Accumulation (SKA) framework, which reinterprets entropy as a dynamic, layer-wise measure of knowledge alignment in neural networks. Instead of relying on traditional gradient-based optimization, SKA defines entropy in terms of knowledge vectors and their influence on decision probabilities across multiple layers. This formulation naturally leads to the emergence of activation functions such as the sigmoid as a consequence of entropy minimization. Unlike conventional backpropagation, SKA allows each layer to optimize independently by aligning its knowledge representation with changes in decision probabilities. As a result, total network entropy decreases in a hierarchical manner, allowing knowledge structures to evolve progressively. This approach provides a scalable, biologically plausible alternative to gradient-based learning, bridging information theory and artificial intelligence while offering promising applications in resource-constrained and parallel computing environments.
""")
gr.Markdown("---")
gr.Markdown("### SKA Explorer Suite")
gr.HTML('⬅ All Apps')
gr.Markdown("---")
gr.Markdown("### About this App")
gr.Markdown("SKA runs independently for each digit. Each class traces its own entropy trajectory — revealing phase differences, amplitude inversion, and the hierarchical structure of digit recognition. No labels are used.")
with gr.Column(scale=2):
plot_entropy = gr.Plot(label="Per-Class Entropy Trajectories")
run_btn.click(
fn=run_ska_per_class,
inputs=[neurons_input, k_slider, tau_slider, samples_slider, seed_slider],
outputs=[plot_entropy],
)
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)