File size: 2,605 Bytes
ec69f9c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
# %%
# import all necessary modules
import numpy as np
import matplotlib.pyplot as plt

import matplotlib_inline
matplotlib_inline.backend_inline.set_matplotlib_formats('svg')
# %%
# Gradient descent in 1D

def fx(x):
    return np.cos(2*np.pi*x) + x**2

# derivative function
def deriv(x):
    return -2*(np.pi*np.sin(2*np.pi*x)-x)

# %%
# plot the function and its derivative

# define a range for x
x = np.linspace(-1, 1, 2001)

# ploting 
plt.plot(x,fx(x), x, deriv(x))
plt.xlim(x[[0,-1]])
plt.grid()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend(['y','dy'])
plt.show()
# %%
# random starting point
localmin = np.random.choice(x,1)

print(localmin)

# learning parameters
learning_rate = 0.01
training_epochs = 100

# run through training
for i in range(training_epochs):
    grad = deriv(localmin)
    localmin = localmin - learning_rate * grad

print(localmin)
# %%

# plot the result 
plt.plot(x,fx(x), x, deriv(x))
plt.plot(localmin, deriv(localmin), 'ro')
plt.plot(localmin, fx(localmin), 'ro')

plt.xlim(x[[0,-1]])
plt.grid()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend(['f(x)','df','f(x) min'])
plt.title('Emprical local minimum: %s' % localmin[0])
plt.show()

# %%
# random starting point 
localmin = np.random.choice(x,1)

# learning parameters
learning_rate = 0.01
training_epochs = 100

# run through training and store all the results
modelparams = np.zeros((training_epochs,2))
for i in range(training_epochs):
    grad = deriv(localmin)
    localmin = localmin - learning_rate * grad
    modelparams[i,0] = localmin
    modelparams[i,1] = grad
# %%

# Plot the gradient over iterations
fig, ax = plt.subplots(1,2, figsize=(12,4))

for i in range(2):
    ax[i].plot(modelparams[:,i], 'o-')
    ax[i].set_xlabel('Iterations')
    ax[i].set_title(f'Final estimated minimum: {localmin[0]:.5f}')


ax[0].set_xlabel('Local minimum')
ax[1].set_ylabel('Derivative')

plt.show()
 
# %%
# random starting point 
localmin = np.array([0])

# learning parameters
learning_rate = 0.01
training_epochs = 100

# run through training and store all the results
modelparams = np.zeros((training_epochs,2))
for i in range(training_epochs):
    grad = deriv(localmin)
    localmin = localmin - learning_rate * grad
    modelparams[i,0] = localmin
    modelparams[i,1] = grad
# %%

# Plot the gradient over iterations
fig, ax = plt.subplots(1,2, figsize=(12,4))

for i in range(2):
    ax[i].plot(modelparams[:,i], 'o-')
    ax[i].set_xlabel('Iterations')
    ax[i].set_title(f'Final estimated minimum: {localmin[0]:.5f}')


ax[0].set_xlabel('Local minimum')
ax[1].set_ylabel('Derivative')

plt.show()
# %%