Spaces:
Runtime error
Runtime error
Commit
·
e06de86
1
Parent(s):
9a5fece
Delete demo.py
Browse files
demo.py
DELETED
|
@@ -1,246 +0,0 @@
|
|
| 1 |
-
from init import *
|
| 2 |
-
import random
|
| 3 |
-
import numpy as np
|
| 4 |
-
from autograd.core.engine import Value
|
| 5 |
-
from autograd.core.nn import Neuron, Layer, MLP
|
| 6 |
-
from autograd.core.Graph import draw_dot
|
| 7 |
-
import time
|
| 8 |
-
from graphviz import Digraph
|
| 9 |
-
import imageio
|
| 10 |
-
from functools import partial
|
| 11 |
-
import matplotlib.animation as animation
|
| 12 |
-
import shutil
|
| 13 |
-
from IPython.display import HTML
|
| 14 |
-
import os
|
| 15 |
-
import pandas as pd
|
| 16 |
-
import plotly.subplots as sp
|
| 17 |
-
import plotly.express as px
|
| 18 |
-
import plotly.graph_objects as go
|
| 19 |
-
from plotly.subplots import make_subplots
|
| 20 |
-
import matplotlib.pyplot as plt
|
| 21 |
-
from matplotlib.animation import FuncAnimation ,FFMpegWriter
|
| 22 |
-
import matplotlib.pyplot as plt
|
| 23 |
-
from uilit import *
|
| 24 |
-
os.environ["PATH"] += os.pathsep + './dev/lib/Python 3.11/site-packages/graphviz'
|
| 25 |
-
|
| 26 |
-
path_data = 'digit-recognizer/data/'
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
# loss function
|
| 31 |
-
def loss(model,X_train , y_train , batch_size=None):
|
| 32 |
-
|
| 33 |
-
# inline DataLoader :)
|
| 34 |
-
if batch_size is None:
|
| 35 |
-
Xb, yb = X_train, y_train
|
| 36 |
-
else:
|
| 37 |
-
ri = np.random.permutation(X_train.shape[0])[:batch_size]
|
| 38 |
-
Xb, yb = X_train[ri], y_train[ri]
|
| 39 |
-
|
| 40 |
-
inputs = [list(map(Value, xrow)) for xrow in Xb]
|
| 41 |
-
|
| 42 |
-
# forward the model to get scores
|
| 43 |
-
scores = list(map(model, inputs))
|
| 44 |
-
|
| 45 |
-
# svm "max-margin" loss
|
| 46 |
-
losses = [(1 + -yi*scorei).relu() for yi, scorei in zip(yb, scores)]
|
| 47 |
-
data_loss = sum(losses) * (1.0 / len(losses))
|
| 48 |
-
# L2 regularization
|
| 49 |
-
alpha = 0.05
|
| 50 |
-
reg_loss = alpha * sum((p*p for p in model.parameters()))
|
| 51 |
-
total_loss = data_loss + reg_loss
|
| 52 |
-
|
| 53 |
-
# also get accuracy
|
| 54 |
-
accuracy = [(yi > 0) == (scorei.data > 0) for yi, scorei in zip(yb, scores)]
|
| 55 |
-
return total_loss, sum(accuracy) / len(accuracy) ,scores
|
| 56 |
-
|
| 57 |
-
def Optimization_training_progress_realtime(Task,num_epoch, learning_rate ,num_layer,values_wieghts):
|
| 58 |
-
filename = f"assets/plot_res_{num_epoch-1}.png"
|
| 59 |
-
filename_ = f"assets/graph_wights_update_{num_epoch-1}.png"
|
| 60 |
-
if os.path.exists(filename) or os.path.exists(filename_):
|
| 61 |
-
shutil.rmtree('assets/')
|
| 62 |
-
os.makedirs('assets/')
|
| 63 |
-
# Create empty lists for loss and accuracy
|
| 64 |
-
loss_data = []
|
| 65 |
-
accuracy_data = []
|
| 66 |
-
model = MLP(int(num_layer), [int(values_wieghts),int(values_wieghts),1]) # 2-layer neural network
|
| 67 |
-
# Create subplots with shared x-axis
|
| 68 |
-
fig = make_subplots(rows=2, cols=1, shared_xaxes=True, subplot_titles=("Loss", "Accuracy"))
|
| 69 |
-
|
| 70 |
-
# Initialize empty lists for loss and accuracy
|
| 71 |
-
loss_data = []
|
| 72 |
-
accuracy_data = []
|
| 73 |
-
|
| 74 |
-
# Create initial empty traces
|
| 75 |
-
loss_trace = go.Scatter(x=[], y=[], mode="lines", name="Loss")
|
| 76 |
-
accuracy_trace = go.Scatter(x=[], y=[], mode="lines", name="Accuracy")
|
| 77 |
-
|
| 78 |
-
# Add initial traces to the subplots
|
| 79 |
-
fig.add_trace(loss_trace, row=1, col=1)
|
| 80 |
-
fig.add_trace(accuracy_trace, row=2, col=1)
|
| 81 |
-
# Update layout
|
| 82 |
-
fig.update_layout(
|
| 83 |
-
title="Training Progress",
|
| 84 |
-
xaxis_title="Epoch",
|
| 85 |
-
yaxis=dict(title="Loss"),
|
| 86 |
-
yaxis2=dict(title="Accuracy"),
|
| 87 |
-
showlegend=False,
|
| 88 |
-
hovermode='x',
|
| 89 |
-
height=500,
|
| 90 |
-
width=500,
|
| 91 |
-
template='plotly_white'
|
| 92 |
-
)
|
| 93 |
-
# Define animation frames
|
| 94 |
-
frames = []
|
| 95 |
-
if Task in "Sparsity":
|
| 96 |
-
X_train , Y_train = initialize_data(n_samples=100 ,noise=0.1)
|
| 97 |
-
|
| 98 |
-
elif Task in "Classification":
|
| 99 |
-
FILES , _ = extract_path_df(path_data,10)
|
| 100 |
-
X_train, X_test, Y_train, Y_test = loading_df_to_numpy(FILES[0])
|
| 101 |
-
for k in range(int(num_epoch)):
|
| 102 |
-
# Forward pass
|
| 103 |
-
total_loss, acc,scores = loss(model, X_train, Y_train, batch_size=None)
|
| 104 |
-
|
| 105 |
-
# Backward pass
|
| 106 |
-
model.zero_grad()
|
| 107 |
-
total_loss.backward()
|
| 108 |
-
draw_dot(model(scores),f'graph_wights_update_{k}')
|
| 109 |
-
|
| 110 |
-
# Update (SGD)
|
| 111 |
-
|
| 112 |
-
learning_rate = 1.0 - 0.9 * k / 100
|
| 113 |
-
|
| 114 |
-
for p in model.parameters():
|
| 115 |
-
p.data -= learning_rate * p.grad
|
| 116 |
-
|
| 117 |
-
if k % 2 == 0:
|
| 118 |
-
print(f"step {k} loss {total_loss.data}, accuracy {acc*100}%")
|
| 119 |
-
|
| 120 |
-
# Append data to lists
|
| 121 |
-
loss_data.append(total_loss.data)
|
| 122 |
-
accuracy_data.append(acc)
|
| 123 |
-
|
| 124 |
-
# Update traces
|
| 125 |
-
with fig.batch_update():
|
| 126 |
-
fig.data[0].x = list(range(k+1))
|
| 127 |
-
fig.data[0].y = loss_data
|
| 128 |
-
fig.data[1].x = list(range(k+1))
|
| 129 |
-
fig.data[1].y = accuracy_data
|
| 130 |
-
|
| 131 |
-
# Append current frame to frames list
|
| 132 |
-
frames.append(go.Frame(data=[fig.data[0], fig.data[1]]))
|
| 133 |
-
|
| 134 |
-
if Task in "Sparsity":
|
| 135 |
-
h = 0.25
|
| 136 |
-
x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1
|
| 137 |
-
y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1
|
| 138 |
-
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
|
| 139 |
-
np.arange(y_min, y_max, h))
|
| 140 |
-
|
| 141 |
-
Xmesh = np.c_[xx.ravel(), yy.ravel()]
|
| 142 |
-
inputs = [list(map(Value, xrow)) for xrow in Xmesh]
|
| 143 |
-
scores = list(map(model, inputs))
|
| 144 |
-
Z = np.array([s.data > 0 for s in scores])
|
| 145 |
-
Z = Z.reshape(xx.shape)
|
| 146 |
-
|
| 147 |
-
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha=0.8)
|
| 148 |
-
plt.scatter(X_train[:, 0], X_train[:, 1], c=Y_train, s=40, cmap=plt.cm.Spectral)
|
| 149 |
-
plt.xlim(xx.min(), xx.max())
|
| 150 |
-
plt.ylim(yy.min(), yy.max())
|
| 151 |
-
plt.savefig(f'assets/plot_res_{k}.png')
|
| 152 |
-
# Add frames to animation
|
| 153 |
-
fig.frames = frames
|
| 154 |
-
nframes = int(num_epoch)
|
| 155 |
-
interval = int(num_epoch) * 2
|
| 156 |
-
|
| 157 |
-
# Create animation
|
| 158 |
-
animation = go.Figure(fig)
|
| 159 |
-
|
| 160 |
-
# Set animation settings
|
| 161 |
-
animation.update_layout(
|
| 162 |
-
updatemenus=[
|
| 163 |
-
{
|
| 164 |
-
"buttons": [
|
| 165 |
-
{
|
| 166 |
-
"args": [None, {"frame": {"duration": 500, "redraw": True},
|
| 167 |
-
"fromcurrent": True, "transition": {"duration": 0}}],
|
| 168 |
-
"label": "Play",
|
| 169 |
-
"method": "animate"
|
| 170 |
-
}
|
| 171 |
-
],
|
| 172 |
-
"showactive": False,
|
| 173 |
-
"type": "buttons"
|
| 174 |
-
}
|
| 175 |
-
]
|
| 176 |
-
)
|
| 177 |
-
|
| 178 |
-
# Display animation
|
| 179 |
-
# Save animation as GIF
|
| 180 |
-
|
| 181 |
-
if Task in "Sparsity":
|
| 182 |
-
graph_trace("graph_wights_update", nframes,interval)
|
| 183 |
-
fig_2 = plt.figure()
|
| 184 |
-
def animate_predicion(i):
|
| 185 |
-
im1 = plt.imread(f"assets/plot_res_{i}.png")
|
| 186 |
-
plt.imshow(im1)
|
| 187 |
-
plt.title(f"Epoch: {i+1}\nLoss: {loss_data[i]:.4f} - Accuracy: {accuracy_data[i]:.4f}")
|
| 188 |
-
plt.xlabel("prediction")
|
| 189 |
-
plt.axis('off')
|
| 190 |
-
|
| 191 |
-
fig_2.tight_layout()
|
| 192 |
-
|
| 193 |
-
lin_ani = FuncAnimation(fig_2, animate_predicion, frames=nframes, interval=200)
|
| 194 |
-
FFwriter = FFMpegWriter(fps=10)
|
| 195 |
-
|
| 196 |
-
lin_ani.save('out/training.mp4', writer=FFwriter)
|
| 197 |
-
# Display the animation
|
| 198 |
-
# Show the figure
|
| 199 |
-
return animation ,'out/training.mp4', 'out/Graph.mp4'
|
| 200 |
-
|
| 201 |
-
if Task in "Classification":
|
| 202 |
-
graph_trace("graph_wights_update", nframes,interval)
|
| 203 |
-
inputs_test = [list(map(Value, xrow)) for xrow in X_test]
|
| 204 |
-
predictions = [scorei.data.argmax() for scorei in list(map(model, inputs_test))]
|
| 205 |
-
|
| 206 |
-
# Plot a few examples
|
| 207 |
-
num_examples = 8
|
| 208 |
-
fig_1, axes = plt.subplots(2, 2, figsize=(12, 6))
|
| 209 |
-
fig_1.subplots_adjust(hspace=0.4, wspace=0.4)
|
| 210 |
-
|
| 211 |
-
def animate(i):
|
| 212 |
-
for j, ax in enumerate(axes.flatten()):
|
| 213 |
-
if j < num_examples:
|
| 214 |
-
random_index = random.randint(0, X_test.shape[1] - 1)
|
| 215 |
-
ax.imshow(X_test[:, random_index, None].reshape(28, 28), cmap="gray")
|
| 216 |
-
ax.set_title(f"Predicted: {Y_test[random_index]}")
|
| 217 |
-
ax.axis('off')
|
| 218 |
-
else:
|
| 219 |
-
ax.axis('off')
|
| 220 |
-
|
| 221 |
-
fig_1.tight_layout()
|
| 222 |
-
|
| 223 |
-
lin_ani = FuncAnimation(fig_1, animate, frames=nframes, interval=200)
|
| 224 |
-
FFwriter = FFMpegWriter(fps=10)
|
| 225 |
-
|
| 226 |
-
lin_ani.save('out/Predicted.mp4', writer=FFwriter)
|
| 227 |
-
# fig_1.savefig("reulst.png")
|
| 228 |
-
return animation , 'out/Predicted.mp4', 'out/Graph.mp4'
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
if __name__ == "__main__":
|
| 236 |
-
|
| 237 |
-
np.random.seed(1337)
|
| 238 |
-
random.seed(1337)
|
| 239 |
-
models = Optimization_training_progress_realtime(
|
| 240 |
-
Task="Sparsity",num_epoch=5, learning_rate=0.002 ,
|
| 241 |
-
num_layer=2,values_wieghts=4)
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|