| | |
| | |
| | import numpy as np |
| | import matplotlib.pyplot as plt |
| |
|
| | from IPython import display |
| | display.set_matplotlib_formats('svg') |
| | |
| | |
| | x = np.linspace(-2, 2, 2001) |
| |
|
| | |
| | def fx(x): |
| | return 3*x**2 - 3*x + 4 |
| |
|
| | |
| | def deriv(x): |
| | return 6*x - 3 |
| | |
| | |
| |
|
| | |
| | localmin = np.random.choice(x,1) |
| | initial = localmin[:] |
| |
|
| | |
| | learning_rate = 0.01 |
| | training_epochs = 50 |
| |
|
| | |
| | modelparamsFixed = np.zeros((training_epochs,3)) |
| | for i in range(training_epochs): |
| | |
| | |
| | grad = deriv(localmin) |
| |
|
| |
|
| | |
| | lr = learning_rate |
| |
|
| | |
| | localmin = localmin - lr * grad |
| |
|
| | |
| | modelparamsFixed[i, 0] = localmin |
| | modelparamsFixed[i, 1] = grad |
| | modelparamsFixed[i, 2] = lr |
| | |
| | |
| | localmin = np.random.choice(x,1) |
| | initval = localmin[:] |
| |
|
| | |
| | learning_rate = 0.01 |
| | training_epochs = 50 |
| |
|
| | |
| | modelparamsGrad = np.zeros((training_epochs,3)) |
| | for i in range(training_epochs): |
| |
|
| | |
| | grad = deriv(localmin) |
| |
|
| | |
| | lr = learning_rate*np.abs(grad) |
| |
|
| | |
| | localmin = localmin - lr * grad |
| |
|
| | |
| | modelparamsGrad[i, 0] = localmin |
| | modelparamsGrad[i, 1] = grad |
| | modelparamsGrad[i, 2] = lr |
| | |
| | |
| |
|
| | |
| | learning_rate = 0.1 |
| | localmin = initval |
| |
|
| | |
| | modelparamsTime = np.zeros((training_epochs,3)) |
| | for i in range(training_epochs): |
| |
|
| | |
| | grad = deriv(localmin) |
| |
|
| | |
| | lr = learning_rate*(1-(i+1)/training_epochs) |
| |
|
| | |
| | localmin = localmin - lr * grad |
| |
|
| | |
| | modelparamsTime[i, 0] = localmin |
| | modelparamsTime[i, 1] = grad |
| | modelparamsTime[i, 2] = lr |
| | |
| | |
| | fig, ax = plt.subplots(1, 3, figsize=(12, 3)) |
| |
|
| | |
| | for i in range(3): |
| | ax[i].plot(modelparamsFixed[:, i], 'o-', markerfacecolor='w') |
| | ax[i].plot(modelparamsGrad[:, i], 'o-', markerfacecolor='w') |
| | ax[i].plot(modelparamsTime[:, i], 'o-',markerfacecolor='w') |
| | ax[i].set_xlabel('Iteration') |
| |
|
| | ax[0].set_ylabel('Local minimum') |
| | ax[1].set_ylabel('Derivative') |
| | ax[2].set_ylabel('Learning rate') |
| | ax[2].legend(['Fixed l.r.','Grad-based l.r','Time-based l.r.']) |
| |
|
| | plt.tight_layout() |
| | plt.show() |
| | |
| | |
| |
|
| | |
| | x = np.linspace(-2, 2, 2001) |
| |
|
| | |
| | plt.plot(x,fx(x), x, deriv(x)) |
| | plt.xlim(x[[0,-1]]) |
| | plt.grid() |
| | plt.xlabel('x') |
| | plt.ylabel('f(x)') |
| | plt.legend(['y','dy']) |
| | plt.show() |
| | |
| | |
| | localmin = np.random.choice(x,1) |
| |
|
| | print(localmin) |
| |
|
| | |
| | learning_rate = 0.01 |
| | training_epochs = 100 |
| |
|
| | |
| | for i in range(training_epochs): |
| | grad = deriv(localmin) |
| | localmin = localmin - learning_rate * grad |
| |
|
| | print(localmin) |
| | |
| |
|
| | |
| | plt.plot(x,fx(x), x, deriv(x)) |
| | plt.plot(localmin, deriv(localmin), 'ro') |
| | plt.plot(localmin, fx(localmin), 'ro') |
| |
|
| | plt.xlim(x[[0,-1]]) |
| | plt.grid() |
| | plt.xlabel('x') |
| | plt.ylabel('f(x)') |
| | plt.legend(['f(x)','df','f(x) min']) |
| | plt.title('Emprical local minimum: %s'%localmin[0]) |
| | plt.show() |
| |
|
| | |
| | |
| | localmin = np.random.choice(x,1) |
| |
|
| | print(localmin) |
| |
|
| | |
| | learning_rate = 0.0001 |
| | training_epochs = 100 |
| |
|
| | |
| | for i in range(training_epochs): |
| | learning_rate += 0.00001 |
| | grad = deriv(localmin) |
| | localmin = localmin - learning_rate * grad |
| |
|
| | print(localmin) |
| | |
| |
|
| | |
| | plt.plot(x,fx(x), x, deriv(x)) |
| | plt.plot(localmin, deriv(localmin), 'ro') |
| | plt.plot(localmin, fx(localmin), 'ro') |
| |
|
| | plt.xlim(x[[0,-1]]) |
| | plt.grid() |
| | plt.xlabel('x') |
| | plt.ylabel('f(x)') |
| | plt.legend(['f(x)','df','f(x) min']) |
| | plt.title('Emprical local minimum: %s'%localmin[0]) |
| | plt.show() |
| |
|
| | |
| | |
| | localmin = np.random.choice(x,1) |
| |
|
| | |
| | learning_rate = 0.01 |
| | training_epochs = 100 |
| |
|
| | |
| | modelparams = np.zeros((training_epochs,2)) |
| | for i in range(training_epochs): |
| | grad = deriv(localmin) |
| | localmin = localmin - learning_rate * grad |
| | |
| | modelparams[i, 0] = localmin |
| | modelparams[i, 1] = grad |
| |
|
| | |
| |
|
| | |
| | fig, ax = plt.subplots(1, 2, figsize=(12, 4)) |
| |
|
| | for i in range(2): |
| | ax[i].plot(modelparams[:, i], 'o-') |
| | ax[i].set_xlabel('Iteration') |
| | ax[i].set_title(f'Final estimated minimum: {localmin[0]:.5f}') |
| |
|
| | ax[0].set_ylabel('Local minimum') |
| | ax[1].set_ylabel('Derivative') |
| |
|
| | plt.show() |