dudl / code_challenge_unfortunate_starting_value.py
BytePhantom
up
ec69f9c
# %%
# import all necessary modules
import numpy as np
import matplotlib.pyplot as plt
import matplotlib_inline
matplotlib_inline.backend_inline.set_matplotlib_formats('svg')
# %%
# Gradient descent in 1D
def fx(x):
return np.cos(2*np.pi*x) + x**2
# derivative function
def deriv(x):
return -2*(np.pi*np.sin(2*np.pi*x)-x)
# %%
# plot the function and its derivative
# define a range for x
x = np.linspace(-1, 1, 2001)
# ploting
plt.plot(x,fx(x), x, deriv(x))
plt.xlim(x[[0,-1]])
plt.grid()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend(['y','dy'])
plt.show()
# %%
# random starting point
localmin = np.random.choice(x,1)
print(localmin)
# learning parameters
learning_rate = 0.01
training_epochs = 100
# run through training
for i in range(training_epochs):
grad = deriv(localmin)
localmin = localmin - learning_rate * grad
print(localmin)
# %%
# plot the result
plt.plot(x,fx(x), x, deriv(x))
plt.plot(localmin, deriv(localmin), 'ro')
plt.plot(localmin, fx(localmin), 'ro')
plt.xlim(x[[0,-1]])
plt.grid()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend(['f(x)','df','f(x) min'])
plt.title('Emprical local minimum: %s' % localmin[0])
plt.show()
# %%
# random starting point
localmin = np.random.choice(x,1)
# learning parameters
learning_rate = 0.01
training_epochs = 100
# run through training and store all the results
modelparams = np.zeros((training_epochs,2))
for i in range(training_epochs):
grad = deriv(localmin)
localmin = localmin - learning_rate * grad
modelparams[i,0] = localmin
modelparams[i,1] = grad
# %%
# Plot the gradient over iterations
fig, ax = plt.subplots(1,2, figsize=(12,4))
for i in range(2):
ax[i].plot(modelparams[:,i], 'o-')
ax[i].set_xlabel('Iterations')
ax[i].set_title(f'Final estimated minimum: {localmin[0]:.5f}')
ax[0].set_xlabel('Local minimum')
ax[1].set_ylabel('Derivative')
plt.show()
# %%
# random starting point
localmin = np.array([0])
# learning parameters
learning_rate = 0.01
training_epochs = 100
# run through training and store all the results
modelparams = np.zeros((training_epochs,2))
for i in range(training_epochs):
grad = deriv(localmin)
localmin = localmin - learning_rate * grad
modelparams[i,0] = localmin
modelparams[i,1] = grad
# %%
# Plot the gradient over iterations
fig, ax = plt.subplots(1,2, figsize=(12,4))
for i in range(2):
ax[i].plot(modelparams[:,i], 'o-')
ax[i].set_xlabel('Iterations')
ax[i].set_title(f'Final estimated minimum: {localmin[0]:.5f}')
ax[0].set_xlabel('Local minimum')
ax[1].set_ylabel('Derivative')
plt.show()
# %%