dudl / code_challenge_fixed_vs._dynamic_learning_rate.py
BytePhantom
up
ec69f9c
# %%
# import all necessary modules
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
display.set_matplotlib_formats('svg')
# %%
# define a range for x
x = np.linspace(-2, 2, 2001)
# function (as a function)
def fx(x):
return 3*x**2 - 3*x + 4
# derivative function
def deriv(x):
return 6*x - 3
# %%
# G.D. using a fixed learning rate
# random starting point
localmin = np.random.choice(x,1)
initial = localmin[:] # store the initial value
# learning parameters
learning_rate = 0.01
training_epochs = 50
# run through training and stroe all the results
modelparamsFixed = np.zeros((training_epochs,3))
for i in range(training_epochs):
# compute gradient
grad = deriv(localmin)
# non-adaptive learning rate
lr = learning_rate
# update the local minimum
localmin = localmin - lr * grad
# store the parameters
modelparamsFixed[i, 0] = localmin
modelparamsFixed[i, 1] = grad
modelparamsFixed[i, 2] = lr
# %%
# G.D. using a gradient-based learning rate
localmin = np.random.choice(x,1)
initval = localmin[:] # store the initial value
# learning parameters
learning_rate = 0.01
training_epochs = 50
# run through training and stroe all the results
modelparamsGrad = np.zeros((training_epochs,3))
for i in range(training_epochs):
# compute gradient
grad = deriv(localmin)
# adapt the learning rate according to the gradient
lr = learning_rate*np.abs(grad)
# update parameter according to the gradient
localmin = localmin - lr * grad
# store the parameters
modelparamsGrad[i, 0] = localmin
modelparamsGrad[i, 1] = grad
modelparamsGrad[i, 2] = lr
# %%
# G. D. using a time-based learning rate
# redefine parameters
learning_rate = 0.1
localmin = initval
# run through training and store all the results
modelparamsTime = np.zeros((training_epochs,3))
for i in range(training_epochs):
# compute gradient
grad = deriv(localmin)
# adapt the learning rate according to the iteration
lr = learning_rate*(1-(i+1)/training_epochs)
# update parameter according to the gradient
localmin = localmin - lr * grad
# store the parameters
modelparamsTime[i, 0] = localmin
modelparamsTime[i, 1] = grad
modelparamsTime[i, 2] = lr
# %%
# plot the results
fig, ax = plt.subplots(1, 3, figsize=(12, 3))
# generate the plots
for i in range(3):
ax[i].plot(modelparamsFixed[:, i], 'o-', markerfacecolor='w')
ax[i].plot(modelparamsGrad[:, i], 'o-', markerfacecolor='w')
ax[i].plot(modelparamsTime[:, i], 'o-',markerfacecolor='w')
ax[i].set_xlabel('Iteration')
ax[0].set_ylabel('Local minimum')
ax[1].set_ylabel('Derivative')
ax[2].set_ylabel('Learning rate')
ax[2].legend(['Fixed l.r.','Grad-based l.r','Time-based l.r.'])
plt.tight_layout()
plt.show()
# %%
# plot the function and its derivative
# define a range for x
x = np.linspace(-2, 2, 2001)
# plotting
plt.plot(x,fx(x), x, deriv(x))
plt.xlim(x[[0,-1]])
plt.grid()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend(['y','dy'])
plt.show()
# %%
# random starting point
localmin = np.random.choice(x,1)
print(localmin)
# learning parameters
learning_rate = 0.01
training_epochs = 100
# run through training
for i in range(training_epochs):
grad = deriv(localmin)
localmin = localmin - learning_rate * grad
print(localmin)
# %%
# plot the result
plt.plot(x,fx(x), x, deriv(x))
plt.plot(localmin, deriv(localmin), 'ro')
plt.plot(localmin, fx(localmin), 'ro')
plt.xlim(x[[0,-1]])
plt.grid()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend(['f(x)','df','f(x) min'])
plt.title('Emprical local minimum: %s'%localmin[0])
plt.show()
# %%
# random starting point
localmin = np.random.choice(x,1)
print(localmin)
# learning parameters
learning_rate = 0.0001
training_epochs = 100
# run through training
for i in range(training_epochs):
learning_rate += 0.00001
grad = deriv(localmin)
localmin = localmin - learning_rate * grad
print(localmin)
# %%
# plot the result
plt.plot(x,fx(x), x, deriv(x))
plt.plot(localmin, deriv(localmin), 'ro')
plt.plot(localmin, fx(localmin), 'ro')
plt.xlim(x[[0,-1]])
plt.grid()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend(['f(x)','df','f(x) min'])
plt.title('Emprical local minimum: %s'%localmin[0])
plt.show()
# %%
# random starting point
localmin = np.random.choice(x,1)
# learning parameters
learning_rate = 0.01
training_epochs = 100
# run through training and store all the results
modelparams = np.zeros((training_epochs,2))
for i in range(training_epochs):
grad = deriv(localmin)
localmin = localmin - learning_rate * grad
#modelparams[i,:] = localmin,grad
modelparams[i, 0] = localmin
modelparams[i, 1] = grad
# %%
# Plot the gradient over iterations
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
for i in range(2):
ax[i].plot(modelparams[:, i], 'o-')
ax[i].set_xlabel('Iteration')
ax[i].set_title(f'Final estimated minimum: {localmin[0]:.5f}')
ax[0].set_ylabel('Local minimum')
ax[1].set_ylabel('Derivative')
plt.show()