dudl / code_challenge_2d_gradient_ascent.py
BytePhantom
up
ec69f9c
# %%
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym # sympy to compute partial derivatives
from IPython import display
display.set_matplotlib_formats('svg')
# %%
# Gradient ascent in 2D
# the "peaks" function
def peaks(x,y):
# expand to a 2D mesh
x,y = np.meshgrid(x,y)
z = 3*(1-x)**2 * np.exp(-(x**2) - (y+1)**2) \
- 10*(x/5 - x**3 - y**5) * np.exp(-x**2-y**2) \
- 1/3*np.exp(-(x+1)**2 - y**2)
return z
# %%
# create the landscape
x = np.linspace(-3,3,201)
y = np.linspace(-3,3,201)
Z = peaks(x,y)
# let's have a look!
plt.imshow(Z,extent=[x[0],x[-1],y[0],y[-1]], vmin=-5, vmax=5, origin='lower')
plt.show()
# %%
# create derivative functions using sympy
sx, sy = sym.symbols('sx sy')
sZ = 3*(1-sx)**2 * sym.exp(-(sx**2) - (sy+1)**2) \
-10*(sx/5 - sx**3 - sy**5) * sym.exp(-sx**2-sy**2) \
- 1/3*sym.exp(-(sx+1)**2 - sy**2)
# create functions from the sympy-computed derivatives
df_x = sym.lambdify((sx,sy),sym.diff(sZ,sx),'sympy')
df_y = sym.lambdify((sx,sy),sym.diff(sZ,sy),'sympy')
df_x(1,1).evalf()
# %%
# random starting point (uniform between -2 and +2)
localmax = np.random.rand(2)*4-2 # also try specifying coordinates
startpnt = localmax[:] # make a copy, not re-assign
# learning parameters
learning_rate = 0.01
training_epochs = 1000
# run through training
trajectory = np.zeros((training_epochs,2))
for i in range(training_epochs):
# compute the gradient at the current point
grad = np.array([df_x(localmax[0],localmax[1]).evalf(),df_y(localmin[0],localmin[1]).evalf()])
# take a step
localmax = localmax + learning_rate*grad
# store the current point
trajectory[i,:] = localmax
print(localmax)
print(startpnt)
# %%
# let's have a look!
plt.imshow(Z,extent=[x[0],x[-1],y[0],y[-1]], vmin=-5, vmax=5, origin='lower')
plt.plot(startpnt[0],startpnt[1],'bs')
plt.plot(localmax[0],localmax[1],'ro')
plt.plot(trajectory[:,0],trajectory[:,1],'r')
plt.legend(['rnd start','local max'])
plt.colorbar()
plt.show()
# %%