code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
```
from __future__ import division, print_function
import os
import sys
from collections import OrderedDict
# Third-party
import astropy.coordinates as coord
import astropy.units as u
import matplotlib as mpl
import matplotlib.pyplot as pl
import numpy as np
pl.style.use('apw-notebook')
%matplotlib inline
# Custom
import gala.dynamics as gd
import gala.integrate as gi
import gala.potential as gp
from gala.units import galactic
from scipy.misc import factorial
# from ophiuchus import barred_mw, static_mw
import ophiuchus.potential as op
plotpath = "/Users/adrian/projects/ophiuchus-paper/figures/"
if not os.path.exists(plotpath):
os.mkdir(plotpath)
barred_mw = op.load_potential("barred_mw_4")
static_mw = op.load_potential("static_mw")
# transform from H&O 1992 coefficients to Lowing 2011 coefficients
nlms = np.array([[0,0,0],
[1,0,0],
[2,0,0],
[3,0,0],
[0,2,0],
[1,2,0],
[2,2,0],
[0,2,2],
[1,2,2],
[2,2,2],
[0,4,0],
[1,4,0],
[0,4,2],
[1,4,2],
[0,4,4],
[1,4,4],
[0,6,0],
[0,6,2],
[0,6,4],
[0,6,6]])
_Snlm = np.array([1.509,-0.086,-0.033,-0.02,-2.606,
-0.221,-0.001,0.665,0.129,0.006,6.406,
1.295,-0.66,-0.14,0.044,-0.012,-5.859,
0.984,-0.03,0.001])
NEW_S = _Snlm.copy()
for i,(n,l,m) in zip(range(len(_Snlm)), nlms):
if l != 0:
fac = np.sqrt(4*np.pi) * np.sqrt((2*l+1) / (4*np.pi) * factorial(l-m) / factorial(l+m))
NEW_S[i] /= fac
nmax = 3
lmax = 6
Snlm = np.zeros((nmax+1,lmax+1,lmax+1))
for (n,l,m),A in zip(nlms,NEW_S):
Snlm[n,l,m] = A
static_mw
barred_mw
# barpars = barred_mw.parameters.copy()
# barpars['halo']['q_z'] = 1.
# barpars['spheroid']['c'] = 0.2
# barpars['spheroid']['m'] = 5E9
# barpars['disk']['m'] = 4E10
# barpars['bar']['r_s'] = 1.2
# barpars['bar']['m'] = barpars['bar']['m']
# barred_mw = op.OphiuchusPotential(**barpars)
# stapars = static_mw.parameters.copy()
# stapars['halo']['q_z'] = 1.
# stapars['spheroid']['c'] = 0.3
# stapars['spheroid']['m'] = 1.2E10
# stapars['disk']['m'] = 6E10
# static_mw = op.OphiuchusPotential(**stapars)
potential_classes = OrderedDict()
potential_classes['disk'] = gp.MiyamotoNagaiPotential
potential_classes['halo'] = gp.FlattenedNFWPotential
potential_classes['bar'] = op.WangZhaoBarPotential
potential_classes['spheroid'] = gp.HernquistPotential
(0.19*u.kpc/u.Myr).to(u.km/u.s)
```
---
### Mass profile
```
ix = 0
xyz = np.zeros((3,128))
xyz[ix] = np.linspace(0.,10.,xyz.shape[1])
for pot in [static_mw, barred_mw]:
Menc = pot.mass_enclosed(xyz)
pl.loglog(xyz[ix], Menc, marker='')
pl.axvline(1)
pl.axhline(1E10)
```
---
```
def density_on_grid(potential, t=0., grid_lim=(-15,15), ngrid=128):
grid = np.linspace(grid_lim[0], grid_lim[1], ngrid)
xyz = np.vstack(map(np.ravel, np.meshgrid(grid,grid,grid)))
# val = np.zeros((ngrid*ngrid*ngrid,))
val = potential.density(xyz, t=t).value
val[np.isnan(val)] = val[np.isfinite(val)].min()
val[val < 0] = 1.
gridx = xyz[0].reshape(ngrid,ngrid,ngrid)[:,:,0]
gridy = xyz[1].reshape(ngrid,ngrid,ngrid)[:,:,0]
return gridx, gridy, val
ngrid = 128
xx,yy,barred_dens = density_on_grid(barred_mw, ngrid=ngrid)
xx,yy,static_dens = density_on_grid(static_mw, ngrid=ngrid)
```
## Surface density plots
```
def side_by_side_surface_dens(xx, yy, dens):
ngrid = xx.shape[0]
fig,axes = pl.subplots(1, 2, figsize=(8,4),
sharex=True, sharey=True)
axes[0].pcolormesh(xx, yy, dens.reshape(ngrid,ngrid,ngrid).sum(axis=2),
cmap='Greys_r',
norm=mpl.colors.LogNorm(),
vmin=1E7, vmax=5E9)
axes[0].text(-8., 0, r"$\odot$", ha='center', va='center', fontsize=18, color='w')
axes[1].pcolormesh(xx, yy, dens.reshape(ngrid,ngrid,ngrid).sum(axis=0).T,
cmap='Greys_r',
norm=mpl.colors.LogNorm(),
vmin=1E7, vmax=5E9)
axes[0].set_xlim(xx.min(), xx.max())
axes[0].set_ylim(yy.min(), yy.max())
# TODO: fix the damn aspect ratio
# for ax in axes:
# ax.set_aspect('equal')
fig.tight_layout()
return fig
fig = side_by_side_surface_dens(xx, yy, barred_dens)
fig = side_by_side_surface_dens(xx, yy, static_dens)
```
## Contour plots
```
def side_by_side_contour_plots(xx, yy, dens, levels=10**np.arange(7,12,0.25)):
ngrid = xx.shape[0]
fig,axes = pl.subplots(1,2,figsize=(7.8,4),sharex=True,sharey=True)
im = axes[0].contour(xx, yy, dens.reshape(ngrid,ngrid,ngrid).sum(axis=2),
colors='k',
levels=levels,
rasterized=True)
axes[0].text(-8., 0, r"$\odot$", ha='center', va='center', fontsize=18)
_ = axes[1].contour(xx, yy, dens.reshape(ngrid,ngrid,ngrid).sum(axis=1).T,
colors='k',
levels=levels,
rasterized=True)
# fig.subplots_adjust(bottom=0.2, right=0.85, wspace=0.25)
for ax in axes:
ax.xaxis.set_ticks([-10,0,10])
ax.yaxis.set_ticks([-10,0,10])
axes[0].set_xlabel("$x$ [kpc]")
axes[0].set_ylabel("$y$ [kpc]")
axes[1].set_xlabel("$y$ [kpc]")
axes[1].set_ylabel("$z$ [kpc]")
axes[0].set_xlim(xx.min(), xx.max())
axes[0].set_ylim(yy.min(), yy.max())
fig.tight_layout()
return fig
barred_fig = side_by_side_contour_plots(xx, yy, barred_dens)
static_fig = side_by_side_contour_plots(xx, yy, static_dens)
# barred_fig.savefig(os.path.join(plotpath, "barred-surface-density-contour.pdf"), bbox_inches='tight')
# barred_fig.savefig(os.path.join(plotpath, "barred-surface-density-contour.png"), dpi=400, bbox_inches='tight')
# static_fig.savefig(os.path.join(plotpath, "static-surface-density-contour.pdf"), bbox_inches='tight')
# static_fig.savefig(os.path.join(plotpath, "static-surface-density-contour.png"), dpi=400, bbox_inches='tight')
```
## Portail et al. (2015)
```
ngrid = 65
grid = np.linspace(-2,2,ngrid)
xyz = np.vstack(map(np.ravel, np.meshgrid(grid,grid,grid)))
val2 = np.zeros((ngrid*ngrid*ngrid,))
# for k in potentials.keys():
# val += potentials[k].density(xyz)
val2 += potentials['bar'].density(xyz)
val2[np.isnan(val2)] = val2[np.isfinite(val2)].max()
surf_dens = (val2.reshape(ngrid,ngrid,ngrid).sum(axis=1).T*u.Msun/(u.kpc**2)/ngrid).to(u.Msun/u.pc**2)
pl.figure(figsize=(6,3))
pl.contourf(xyz[0].reshape(ngrid,ngrid,ngrid)[:,:,0],
xyz[1].reshape(ngrid,ngrid,ngrid)[:,:,0],
surf_dens.value,
norm=mpl.colors.LogNorm(),
levels=np.logspace(1., 4, 8),
cmap='Blues')
# cmap='Greys_r',
# norm=mpl.colors.LogNorm(),
# vmin=5E8, vmax=5E10)
pl.xlim(-2,2)
pl.ylim(-1.1,1.1)
pl.colorbar()
pl.tight_layout()
```
## Circular velocity curve
```
def circ_vel_plot(potential, name):
""" name = barred, static """
rr = np.linspace(0.1, 20., 1024)
xyz = np.zeros((3, len(rr)))
xyz[0] = rr
potentials = OrderedDict()
for k,P in potential_classes.items():
potentials[k] = P(units=galactic, **potential.parameters[k])
# vcirc = (np.sqrt(potential.G * potential.mass_enclosed(xyz) / rr)*u.kpc/u.Myr).to(u.km/u.s).value
vcirc = (np.sqrt(potential.G * np.sum([p.mass_enclosed(xyz) for p in potentials.values()], axis=0) / rr)*u.kpc/u.Myr).to(u.km/u.s).value
fig,ax = pl.subplots(1,1,figsize=(6,5))
ax.plot(rr, vcirc, marker='', lw=3.)
styles = dict(
halo=dict(lw=2, ls='-.'),
bar=dict(lw=3., ls=':'),
spheroid=dict(lw=3., ls=':'),
disk=dict(lw=2., ls='--')
)
for k,p in potentials.items():
if k != 'halo' and potential.parameters[k]['m'] == 0:
continue
if k == 'bar':
continue
if name == 'static':
disk_other = 'Spher'
elif name == 'barred':
disk_other = 'Bar+Spher'
vc = (np.sqrt(potential.G * p.mass_enclosed(xyz).value / rr)*u.kpc/u.Myr).to(u.km/u.s).value
if name == 'barred' and k == 'spheroid':
menc_sph = p.mass_enclosed(xyz)
p = potentials['bar']
vc = (np.sqrt(potential.G * (menc_sph + p.mass_enclosed(xyz)).value / rr)*u.kpc/u.Myr).to(u.km/u.s).value
label = 'Bar+Spheroid'
else:
label = k.capitalize()
ax.plot(rr, vc, marker='', label=label, **styles[k])
if name == 'barred':
vc = (np.sqrt(potential.G * (potentials['spheroid'].mass_enclosed(xyz)+potentials['bar'].mass_enclosed(xyz)+potentials['disk'].mass_enclosed(xyz)).value / rr)*u.kpc/u.Myr).to(u.km/u.s).value
ax.plot(rr, vc, marker='', label='Disk+Bar+Spher', lw=2.)
else:
vc = (np.sqrt(potential.G * (potentials['spheroid'].mass_enclosed(xyz)+potentials['disk'].mass_enclosed(xyz)).value / rr)*u.kpc/u.Myr).to(u.km/u.s).value
ax.set_xlabel("$R$ [kpc]")
ax.set_ylabel(r"$v_c$ [${\rm km}\,{\rm s}^{-1}$]")
ax.legend(loc='upper right', fontsize=12)
ax.set_ylim(0,300)
# ax.set_ylim(150,300)
# ax.axhline(220, alpha=0.2, lw=1.)
# ax.axvline(8., color='#cccccc', lw=2., zorder=-100)
rcolor = '#dddddd'
rect = mpl.patches.Rectangle((0.,215), rr.max(), 20., zorder=-100, color=rcolor)
ax.add_patch(rect)
rect2 = mpl.patches.Rectangle((8.,0), 0.3, ax.get_ylim()[1], zorder=-100, color=rcolor)
ax.add_patch(rect2)
fig.tight_layout()
return fig
fig = circ_vel_plot(barred_mw, 'barred')
# fig.savefig(os.path.join(plotpath, "barred-circ-vel.pdf"))
# fig.savefig(os.path.join(plotpath, "barred-circ-vel.png"), dpi=400)
fig = circ_vel_plot(static_mw, name='static')
# fig.savefig(os.path.join(plotpath, "static-circ-vel.pdf"))
# fig.savefig(os.path.join(plotpath, "static-circ-vel.png"), dpi=400)
```
## A new figure with all four panels
```
fig,axes = pl.subplots(2,2,figsize=(9,8.5),sharex='col')
# Circular velocity
styles = dict(
halo=dict(lw=2, ls='-.'),
bar=dict(lw=3., ls=':'),
spheroid=dict(lw=3., ls=':'),
disk=dict(lw=2., ls='--')
)
# Contour
levels = 10**np.arange(7,12,0.25)
rr = np.linspace(0.1, 22., 1024)
fac = static_mw.G / rr
xyz = np.zeros((3, len(rr)))
xyz[0] = rr
for i,(name,pot,dens) in enumerate(zip(['barred','static'], [barred_mw, static_mw],[barred_dens, static_dens])):
# Circular velocity
ax = axes[i,0]
potentials = OrderedDict()
for k,P in potential_classes.items():
potentials[k] = P(units=galactic, **pot.parameters[k])
# vcirc = (np.sqrt(potential.G * potential.mass_enclosed(xyz) / rr)*u.kpc/u.Myr).to(u.km/u.s).value
vcirc = (np.sqrt(pot.G * np.sum([p.mass_enclosed(xyz) for p in potentials.values()], axis=0) / rr)*u.kpc/u.Myr)\
.to(u.km/u.s).value
ax.plot(rr, vcirc, marker='', lw=3.)
menc = dict()
for k,p in potentials.items():
menc[k] = p.mass_enclosed(xyz)
# Halo
vc = np.sqrt(fac * menc['halo'].value)
ax.plot(rr, (vc*u.kpc/u.Myr).to(u.km/u.s),
marker='', label='Halo', **styles['halo'])
# disk, etc.
if name == 'static':
vc = np.sqrt(fac * (menc['disk']+menc['spheroid']).value)
ax.plot(rr, (vc*u.kpc/u.Myr).to(u.km/u.s),
marker='', label='Disk+Sph', **styles['disk'])
elif name == 'barred':
vc = np.sqrt(fac * (menc['disk']+menc['spheroid']+menc['bar']).value)
ax.plot(rr, (vc*u.kpc/u.Myr).to(u.km/u.s),
marker='', label='Disk+Sph+Bar', **styles['disk'])
ax.legend(loc='upper right', fontsize=12)
ax.set_ylim(0,300)
# ax.set_ylim(150,300)
# ax.axhline(220, alpha=0.2, lw=1.)
# ax.axvline(8., color='#cccccc', lw=2., zorder=-100)
rcolor = '#dddddd'
rect = mpl.patches.Rectangle((0.,215), rr.max(), 22., zorder=-100, color=rcolor)
ax.add_patch(rect)
rect2 = mpl.patches.Rectangle((8.,0), 0.3, ax.get_ylim()[1], zorder=-100, color=rcolor)
ax.add_patch(rect2)
# Surface density
ngrid = xx.shape[0]
ax = axes[i,1]
im = ax.contour(xx, yy, dens.reshape(ngrid,ngrid,ngrid).sum(axis=2),
colors='k', levels=levels, rasterized=True)
ax.text(-8., 0, r"$\odot$", ha='center', va='center', fontsize=18)
ax.xaxis.set_ticks([-10,0,10])
ax.yaxis.set_ticks([-10,0,10])
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
if i == 0:
ax = axes[0,0]
ax.text(8.4, 40, r'$R_\odot$', fontsize=18, color='#666666')
# ax.annotate(r'$R_\odot$', xy=(8.3, 50), xytext=(12, 75.),
# fontsize=18,
# xycoords='data', textcoords='data',
# arrowprops=dict(arrowstyle="fancy",
# fc="0.6", ec="none",
# patchB=rect2,
# connectionstyle="angle3,angleA=0,angleB=90"),
# )
axes[0,0].text(1, 260, "Barred", fontsize=24, fontstyle='italic', ha='left')
axes[1,0].text(1, 260, "Static", fontsize=24, fontstyle='italic', ha='left')
axes[1,0].set_xlabel("$R$ [kpc]")
axes[1,1].set_xlabel("$x$ [kpc]")
axes[0,0].set_ylabel(r"$v_c$ [${\rm km}\,{\rm s}^{-1}$]")
axes[1,0].set_ylabel(r"$v_c$ [${\rm km}\,{\rm s}^{-1}$]")
axes[0,0].set_xlim(0,22)
axes[0,1].set_ylabel("$y$ [kpc]")
axes[1,1].set_ylabel("$y$ [kpc]")
axes[0,1].yaxis.set_label_position('right')
axes[1,1].yaxis.set_label_position('right')
axes[0,1].yaxis.tick_right()
axes[1,1].yaxis.tick_right()
axes[1,1].set_aspect('equal')
fig.tight_layout()
# fig.savefig(os.path.join(plotpath, "potentials-four.pdf"))
# fig.savefig(os.path.join(plotpath, "potentials-four.png"), dpi=400)
```
---
### What direction is it rotating? I hope clockwise...
```
pot = op.WangZhaoBarPotential(**barred_mw.parameters['bar'])
T = (2*np.pi/(60*u.km/u.s/u.kpc)).to(u.Myr).value
for time in np.linspace(0.,T/4,4):
xx,yy,_dens = density_on_grid(pot, t=time, ngrid=64)
fig = side_by_side_surface_dens(xx, yy, _dens)
```
---
```
pars = barred_mw.parameters['bar'].copy()
pars['alpha'] = 0.
pot = op.WangZhaoBarPotential(**pars)
X = np.linspace(-15,15,256)
_xyz = np.zeros((X.size,3))
_xyz[:,0] = X
along_x = pot.acceleration(_xyz)[:,0]
_xyz = np.zeros((X.size,3))
_xyz[:,1] = X
along_y = pot.acceleration(_xyz)[:,1]
pl.plot(X, np.abs(along_x))
pl.plot(X, np.abs(along_y))
engrid = 32
derp = np.linspace(-15,15,engrid)
xy = np.vstack(map(np.ravel, np.meshgrid(derp,derp))).T
xyz = np.zeros((len(xy),3))
xyz[:,[0,2]] = xy
dens = pot.density(xyz, t=0)
dens[np.isnan(dens)] = dens[np.isfinite(dens)].max()
xx = xyz[:,0].reshape(engrid,engrid)
yy = xyz[:,2].reshape(engrid,engrid)
pl.figure(figsize=(5,5))
pl.contour(xx, yy, dens.reshape(engrid,engrid),
colors='k', rasterized=True)
```
| github_jupyter |
## Basic core
This module contains all the basic functions we need in other modules of the fastai library (split with [`torch_core`](/torch_core.html#torch_core) that contains the ones requiring pytorch). Its documentation can easily be skipped at a first read, unless you want to know what a given function does.
```
from fastai.gen_doc.nbdoc import *
from fastai.core import *
```
## Global constants
`default_cpus = min(16, num_cpus())` <div style="text-align: right"><a href="https://github.com/fastai/fastai/blob/master/fastai/core.py#L45">[source]</a></div>
## Check functions
```
show_doc(has_arg)
```
Examples for two [`fastai.core`](/core.html#core) functions. Docstring shown before calling [`has_arg`](/core.html#has_arg) for reference
```
has_arg(download_url,'url')
has_arg(index_row,'x')
has_arg(index_row,'a')
show_doc(ifnone)
param,alt_param = None,5
ifnone(param,alt_param)
param,alt_param = None,[1,2,3]
ifnone(param,alt_param)
show_doc(is1d)
two_d_array = np.arange(12).reshape(6,2)
print( two_d_array )
print( is1d(two_d_array) )
is1d(two_d_array.flatten())
show_doc(is_listy)
```
Check if `x` is a `Collection`. `Tuple` or `List` qualify
```
some_data = [1,2,3]
is_listy(some_data)
some_data = (1,2,3)
is_listy(some_data)
some_data = 1024
print( is_listy(some_data) )
print( is_listy( [some_data] ) )
some_data = dict([('a',1),('b',2),('c',3)])
print( some_data )
print( some_data.keys() )
print( is_listy(some_data) )
print( is_listy(some_data.keys()) )
print( is_listy(list(some_data.keys())) )
show_doc(is_tuple)
```
Check if `x` is a `tuple`.
```
print( is_tuple( [1,2,3] ) )
print( is_tuple( (1,2,3) ) )
```
## Collection related functions
```
show_doc(arange_of)
arange_of([5,6,7])
type(arange_of([5,6,7]))
show_doc(array)
array([1,2,3])
```
Note that after we call the generator, we do not reset. So the [`array`](/core.html#array) call has 5 less entries than it would if we ran from the start of the generator.
```
def data_gen():
i = 100.01
while i<200:
yield i
i += 1.
ex_data_gen = data_gen()
for _ in range(5):
print(next(ex_data_gen))
array(ex_data_gen)
ex_data_gen_int = data_gen()
array(ex_data_gen_int,dtype=int) #Cast output to int array
show_doc(arrays_split)
data_a = np.arange(15)
data_b = np.arange(15)[::-1]
mask_a = (data_a > 10)
print(data_a)
print(data_b)
print(mask_a)
arrays_split(mask_a,data_a)
np.vstack([data_a,data_b]).transpose().shape
arrays_split(mask_a,np.vstack([data_a,data_b]).transpose()) #must match on dimension 0
show_doc(chunks)
```
You can transform a `Collection` into an `Iterable` of 'n' sized chunks by calling [`chunks`](/core.html#chunks):
```
data = [0,1,2,3,4,5,6,7,8,9]
for chunk in chunks(data, 2):
print(chunk)
for chunk in chunks(data, 3):
print(chunk)
show_doc(df_names_to_idx)
ex_df = pd.DataFrame.from_dict({"a":[1,1,1],"b":[2,2,2]})
print(ex_df)
df_names_to_idx('b',ex_df)
show_doc(extract_kwargs)
key_word_args = {"a":2,"some_list":[1,2,3],"param":'mean'}
key_word_args
(extracted_val,remainder) = extract_kwargs(['param'],key_word_args)
print( extracted_val,remainder )
show_doc(idx_dict)
idx_dict(['a','b','c'])
show_doc(index_row)
data = [0,1,2,3,4,5,6,7,8,9]
index_row(data,4)
index_row(pd.Series(data),7)
data_df = pd.DataFrame([data[::-1],data]).transpose()
data_df
index_row(data_df,7)
show_doc(listify)
to_match = np.arange(12)
listify('a',to_match)
listify('a',5)
listify(77.1,3)
listify( (1,2,3) )
listify((1,2,3),('a','b','c'))
show_doc(random_split)
```
Splitting is done here with `random.uniform()` so you may not get the exact split percentage for small data sets
```
data = np.arange(20).reshape(10,2)
data.tolist()
random_split(0.20,data.tolist())
random_split(0.20,pd.DataFrame(data))
show_doc(range_of)
range_of([5,4,3])
range_of(np.arange(10)[::-1])
show_doc(series2cat)
data_df = pd.DataFrame.from_dict({"a":[1,1,1,2,2,2],"b":['f','e','f','g','g','g']})
data_df
data_df['b']
series2cat(data_df,'b')
data_df['b']
series2cat(data_df,'a')
data_df['a']
show_doc(split_kwargs_by_func)
key_word_args = {'url':'http://fast.ai','dest':'./','new_var':[1,2,3],'testvalue':42}
split_kwargs_by_func(key_word_args,download_url)
show_doc(to_int)
to_int(3.1415)
data = [1.2,3.4,7.25]
to_int(data)
show_doc(uniqueify)
uniqueify( pd.Series(data=['a','a','b','b','f','g']) )
```
## Files management and downloads
```
show_doc(download_url)
show_doc(find_classes)
show_doc(join_path)
show_doc(join_paths)
show_doc(loadtxt_str)
show_doc(save_texts)
```
## Multiprocessing
```
show_doc(num_cpus)
show_doc(parallel)
show_doc(partition)
show_doc(partition_by_cores)
```
## Data block API
```
show_doc(ItemBase, title_level=3)
```
All items used in fastai should subclass this. Must have a [`data`](/tabular.data.html#tabular.data) field that will be used when collating in mini-batches.
```
show_doc(ItemBase.apply_tfms)
show_doc(ItemBase.show)
```
The default behavior is to set the string representation of this object as title of `ax`.
```
show_doc(Category, title_level=3)
```
Create a [`Category`](/core.html#Category) with an `obj` of index [`data`](/tabular.data.html#tabular.data) in a certain classes list.
```
show_doc(EmptyLabel, title_level=3)
show_doc(MultiCategory, title_level=3)
```
Create a [`MultiCategory`](/core.html#MultiCategory) with an `obj` that is a collection of labels. [`data`](/tabular.data.html#tabular.data) corresponds to the one-hot encoded labels and `raw` is a list of associated string.
```
show_doc(FloatItem)
```
## Others
```
show_doc(camel2snake)
camel2snake('DeviceDataLoader')
show_doc(even_mults)
```
In linear scales each element is equidistant from its neighbors:
```
# from 1 to 10 in 5 steps
t = np.linspace(1, 10, 5)
t
for i in range(len(t) - 1):
print(t[i+1] - t[i])
```
In logarithmic scales, each element is a multiple of the previous entry:
```
t = even_mults(1, 10, 5)
t
# notice how each number is a multiple of its predecessor
for i in range(len(t) - 1):
print(t[i+1] / t[i])
show_doc(func_args)
func_args(download_url)
```
Additionally, [`func_args`](/core.html#func_args) can be used with functions that do not belong to the fastai library
```
func_args(np.linspace)
show_doc(noop)
```
Return `x`.
```
# object is returned as-is
noop([1,2,3])
show_doc(one_hot)
```
One-hot encoding is a standard machine learning technique. Assume we are dealing with a 10-class classification problem and we are supplied a list of labels:
```
y = [1, 4, 4, 5, 7, 9, 2, 4, 0]
jekyll_note("""y is zero-indexed, therefore its first element (1) belongs to class 2, its second element (4) to class 5 and so on.""")
len(y)
```
y can equivalently be expressed as a matrix of 9 rows and 10 columns, where each row represents one element of the original y.
```
for label in y:
print(one_hot(label, 10))
show_doc(show_some)
# select 3 elements from a list
some_data = show_some([10, 20, 30, 40, 50], 3)
some_data
type(some_data)
# the separator can be changed
some_data = show_some([10, 20, 30, 40, 50], 3, sep = '---')
some_data
some_data[:-3]
```
[`show_some`](/core.html#show_some) can take as input any class with \_\_len\_\_ and \_\_getitem\_\_
```
class Any(object):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self,i):
return self.data[i]
some_other_data = Any('nice')
show_some(some_other_data, 2)
show_doc(subplots)
show_doc(text2html_table)
```
## Undocumented Methods - Methods moved below this line will intentionally be hidden
## New Methods - Please document or move to the undocumented section
```
show_doc(is_dict)
```
| github_jupyter |
# 3-tier
Separates presentation, application processing, and data management functions.
reference: https://shunnien.github.io/2017/07/29/3-tier-and-mvc-introduction/
```
class Data(object):
products = {
'milk': {'price': 1.5, 'quantity': 10},
'eggs': {'price': 0.2, 'quantity': 100},
'cheese': {'price': 2.0, 'quantity': 50},
}
def __get__(self, obj, klas):
print("(Fetching from Data Store)")
return {'products': self.products}
class BusinessLogic(object):
data = Data()
def product_list(self):
return self.data['products'].keys()
def product_information(self, product):
return self.data['products'].get(product, None)
class Ui(object):
def __init__(self):
self.business_logic = BusinessLogic()
def get_product_list(self):
print('PRODUCT LIST:')
for product in self.business_logic.product_list():
print(product)
print('')
def get_product_information(self, product):
product_info = self.business_logic.product_information(product)
if product_info:
print('PRODUCT INFORMATION:')
print(
"Name: {0}, Price: {1:.2f}, Quantity: {2}".format(
product.title(), product_info.get('price', 0), product_info.get('quantity', 0)
)
)
else:
print('That product "{0}" does not exist in the records'.format(product))
ui = Ui()
ui.get_product_list()
ui.get_product_information('cheese')
ui.get_product_information('eggs')
ui.get_product_information('milk')
ui.get_product_information('arepas')
class Data(object):
products = {
'milk': {'price': 20, 'quantity': 1},
'egg': {'price': 30, 'quantity': 2}
}
def __get__(self, instance, owner):
return {'products': self.products}
class BusinessLogic(object):
data = Data()
def product_list(self):
return self.data['products'].keys()
def product_info(self, product):
return self.data['products'].get(product, None)
class Ui(object):
def __init__(self):
self.business_logic = BusinessLogic()
def get_product_list(self):
for i in self.business_logic.product_list():
print(i, end=' ')
print()
def get_product_info(self, product):
product_info = self.business_logic.product_info(product)
print(product)
for k, v in product_info.items():
print(k, v)
ui = Ui()
ui.get_product_list()
ui.get_product_info('egg')
```
| github_jupyter |
<a href="https://colab.research.google.com/github/denikn/Machine-Learning-MIT-Assignment/blob/main/Week%2002%20-%20Perceptrons/Week02_Homework_02.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#MIT 6.036 Spring 2019: Homework 2#
This colab notebook provides code and a framework for problems 7-10 of [the homework](https://openlearninglibrary.mit.edu/courses/course-v1:MITx+6.036+1T2019/courseware/Week2/week2_homework/1). You can work out your solutions here, then submit your results back on the homework page when ready.
## <section>**Setup**</section>
First, download the code distribution for this homework that contains test cases and helper functions (such as `positive`).
Run the next code block to download and import the code for this lab.
```
!rm -f code_for_hw02.py*
!wget --no-check-certificate --quiet https://introml_oll.odl.mit.edu/6.036/static/homework/hw02/code_for_hw02.py
from code_for_hw02 import *
help(tidy_plot)
```
```
def test(a):
return a + 53
def methodB(a):
return test(a)
def someMethod():
test = 7
return methodB(test + 3)
someMethod()
```
# <section>**7) Implement perceptron**</section>
Implement [the perceptron algorithm](https://lms.mitx.mit.edu/courses/course-v1:MITx+6.036+2019_Spring/courseware/Week2/perceptron/2), where
* `data` is a numpy array of dimension $d$ by $n$
* `labels` is numpy array of dimension $1$ by $n$
* `params` is a dictionary specifying extra parameters to this algorithm; your algorithm should run a number of iterations equal to $T$
* `hook` is either None or a function that takes the tuple `(th, th0)` as an argument and displays the separator graphically. We won't be testing this in the Tutor, but it will help you in debugging on your own machine.
It should return a tuple of $\theta$ (a $d$ by 1 array) and $\theta_0$ (a 1 by 1 array).
We have given you some data sets in the code file for you to test your implementation.
Your function should initialize all parameters to 0, then run through the data, in the order it is given, performing an update to the parameters whenever the current parameters would make a mistake on that data point. Perform $T$ iterations through the data.
```
import numpy as np
import numpy as np
# x is dimension d by 1
# th is dimension d by 1
# th0 is dimension 1 by 1
# return 1 by 1 matrix of +1, 0, -1
def positive(x, th, th0):
return np.sign(th.T@x + th0)
# Perceptron algorithm with offset.
# data is dimension d by n
# labels is dimension 1 by n
# T is a positive integer number of steps to run
# Perceptron algorithm with offset.
# data is dimension d by n
# labels is dimension 1 by n
# T is a positive integer number of steps to run
def perceptron(data, labels, params = {}, hook = None):
# if T not in params, default to 100
T = params.get('T', 100)
(d, n) = data.shape
theta = np.zeros((d, 1)); theta_0 = np.zeros((1, 1))
for t in range(T):
for i in range(n):
x = data[:,i:i+1]
y = labels[:,i:i+1]
if y * positive(x, theta, theta_0) <= 0.0:
theta = theta + y * x
theta_0 = theta_0 + y
if hook: hook((theta, theta_0))
return theta, theta_0
test_perceptron(perceptron)
```
# <section>8) Implement averaged perceptron</section>
Regular perceptron can be somewhat sensitive to the most recent examples that it sees. Instead, averaged perceptron produces a more stable output by outputting the average value of `th` and `th0` across all iterations.
Implement averaged perceptron with the same spec as regular perceptron, and using the pseudocode below as a guide.
<pre>
procedure averaged_perceptron({(x^(i), y^(i)), i=1,...n}, T)
th = 0 (d by 1); th0 = 0 (1 by 1)
ths = 0 (d by 1); th0s = 0 (1 by 1)
for t = 1,...,T do:
for i = 1,...,n do:
if y^(i)(th . x^(i) + th0) <= 0 then
th = th + y^(i)x^(i)
th0 = th0 + y^(i)
ths = ths + th
th0s = th0s + th0
return ths/(nT), th0s/(nT)
</pre>
```
import numpy as np
# x is dimension d by 1
# th is dimension d by 1
# th0 is dimension 1 by 1
# return 1 by 1 matrix of +1, 0, -1
def positive(x, th, th0):
return np.sign(th.T@x + th0)
def averaged_perceptron(data, labels, params = {}, hook = None):
T = params.get('T', 100)
(d, n) = data.shape
theta = np.zeros((d, 1)); theta_0 = np.zeros((1, 1))
theta_sum = theta.copy()
theta_0_sum = theta_0.copy()
for t in range(T):
for i in range(n):
x = data[:,i:i+1]
y = labels[:,i:i+1]
if y * positive(x, theta, theta_0) <= 0.0:
theta = theta + y * x
theta_0 = theta_0 + y
if hook: hook((theta, theta_0))
theta_sum = theta_sum + theta
theta_0_sum = theta_0_sum + theta_0
theta_avg = theta_sum / (T*n)
theta_0_avg = theta_0_sum / (T*n)
if hook: hook((theta_avg, theta_0_avg))
return theta_avg, theta_0_avg
test_averaged_perceptron(averaged_perceptron)
```
# 9) Implement evaluation strategies
## 9.1) Evaluating a classifier
To evaluate a classifier, we are interested in how well it performs on data that it wasn't trained on. Construct a testing procedure that uses a training data set, calls a learning algorithm to get a linear separator (a tuple of $\theta, \theta_0$), and then reports the percentage correct on a new testing set as a float between 0. and 1..
The learning algorithm is passed as a function that takes a data array and a labels vector. Your evaluator should be able to interchangeably evaluate `perceptron` or `averaged_perceptron` (or future algorithms with the same spec), depending on what is passed through the `learner` parameter.
The `eval_classifier` function should accept the following parameters:
* <tt>learner</tt> - a function, such as perceptron or averaged_perceptron
* <tt>data_train</tt> - training data
* <tt>labels_train</tt> - training labels
* <tt>data_test</tt> - test data
* <tt>labels_test</tt> - test labels
Assume that you have available the function `score` from HW 1, which takes inputs:
* <tt>data</tt>: a <tt>d</tt> by <tt>n</tt> array of floats (representing <tt>n</tt> data points in <tt>d</tt> dimensions)
* <tt>labels</tt>: a <tt>1</tt> by <tt>n</tt> array of elements in <tt>(+1, -1)</tt>, representing target labels
* <tt>th</tt>: a <tt>d</tt> by <tt>1</tt> array of floats that together with
* <tt>th0</tt>: a single scalar or 1 by 1 array, represents a hyperplane
and returns 1 by 1 matrix with an integer indicating number of data points correct for the separator.
```
import numpy as np
def eval_classifier(learner, data_train, labels_train, data_test, labels_test):
th, th0 = learner(data_train, labels_train)
return score(data_test, labels_test, th, th0)/data_test.shape[1]
test_eval_classifier(eval_classifier,perceptron)
```
## <subsection>9.2) Evaluating a learning algorithm using a data source</subsection>
Construct a testing procedure that takes a learning algorithm and a data source as input and runs the learning algorithm multiple times, each time evaluating the resulting classifier as above. It should report the overall average classification accuracy.
You can use our implementation of `eval_classifier` as above.
Write the function `eval_learning_alg` that takes:
* <tt>learner</tt> - a function, such as perceptron or averaged_perceptron
* <tt>data_gen</tt> - a data generator, call it with a desired data set size; returns a tuple (data, labels)
* <tt>n_train</tt> - the size of the learning sets
* <tt>n_test</tt> - the size of the test sets
* <tt>it</tt> - the number of iterations to average over
and returns the average classification accuracy as a float between 0. and 1..
** Note: Be sure to generate your training data and then testing data in that order, to ensure that the pseudorandomly generated data matches that in the test code. **
```
import numpy as np
def eval_learning_alg(learner, data_gen, n_train, n_test, it):
score_sum = 0
for i in range(it):
data_train, labels_train = data_gen(n_train)
data_test, labels_test = data_gen(n_test)
score_sum += eval_classifier(learner, data_train, labels_train,
data_test, labels_test)
return score_sum/it
test_eval_learning_alg(eval_learning_alg,perceptron)
```
## <subsection>9.3) Evaluating a learning algorithm with a fixed dataset</subsection>
Cross-validation is a strategy for evaluating a learning algorithm, using a single training set of size $n$. Cross-validation takes in a learning algorithm $L$, a fixed data set $\mathcal{D}$, and a parameter $k$. It will run the learning algorithm $k$ different times, then evaluate the accuracy of the resulting classifier, and ultimately return the average of the accuracies over each of the $k$ "runs" of $L$. It is structured like this:
<pre><code>divide D into k parts, as equally as possible; call them D_i for i == 0 .. k-1
# be sure the data is shuffled in case someone put all the positive examples first in the data!
for j from 0 to k-1:
D_minus_j = union of all the datasets D_i, except for D_j
h_j = L(D_minus_j)
score_j = accuracy of h_j measured on D_j
return average(score0, ..., score(k-1))
</code></pre>
So, each time, it trains on $k−1$ of the pieces of the data set and tests the resulting hypothesis on the piece that was not used for training.
When $k=n$, it is called *leave-one-out cross validation*.
Implement cross validation **assuming that the input data is shuffled already** so that the positives and negatives are distributed randomly. If the size of the data does not evenly divide by k, split the data into n % k sub-arrays of size n//k + 1 and the rest of size n//k. (Hint: You can use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.array_split.html">numpy.array_split</a>
and <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html">numpy.concatenate</a> with axis arguments to split and rejoin the data as you desire.)
Note: In Python, n//k indicates integer division, e.g. 2//3 gives 0 and 4//3 gives 1.
```
import numpy as np
def xval_learning_alg(learner, data, labels, k):
s_data = np.array_split(data, k, axis=1)
s_labels = np.array_split(labels, k, axis=1)
score_sum = 0
for i in range(k):
data_train = np.concatenate(s_data[:i] + s_data[i+1:], axis=1)
labels_train = np.concatenate(s_labels[:i] + s_labels[i+1:], axis=1)
data_test = np.array(s_data[i])
labels_test = np.array(s_labels[i])
score_sum += eval_classifier(learner, data_train, labels_train,
data_test, labels_test)
return score_sum/k
test_xval_learning_alg(xval_learning_alg,perceptron)
```
## 10) Testing
In this section, we compare the effectiveness of perceptron and averaged perceptron on some data that are not necessarily linearly separable.
Use your `eval_learning_alg` and the `gen_flipped_lin_separable` generator in the code file to evaluate the accuracy of `perceptron` vs. a`veraged_perceptron`. `gen_flipped_lin_separable` can be called with an integer to return a data set and labels. Note that this generates linearly separable data and then "flips" the labels with some specified probability (the argument pflip); so most of the results will not be linearly separable. You can also specifiy pflip in the call to the generator. You should use the default values of th and th_0 to retain consistency with the Tutor.
Run enough trials so that you can confidently predict the accuracy of these algorithms on new data from that same generator; assume training/test sets on the order of 20 points. The Tutor will check that your answer is within 0.025 of the answer we got using the same generator.
```
print(eval_learning_alg(perceptron, gen_flipped_lin_separable(pflip=.1), 20, 20, 5))
```
| github_jupyter |
```
# # this just to make sure we are using only on CPU
# import os
# os.environ["CUDA_VISIBLE_DEVICES"]="-1"
%cd ..
import time
import os.path as op
import numpy as np
import torch
from torch.optim import Adam
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm_notebook
from cascading import CascadeNet
from data_torch import MaskedUntouched2DDataset, MaskedUntouched2DAllLoadedDataset
from torch_training import fit_torch, torch_psnr
# paths
train_path = '/media/Zaccharie/UHRes/singlecoil_train/singlecoil_train/'
val_path = '/media/Zaccharie/UHRes/singlecoil_val/'
test_path = '/media/Zaccharie/UHRes/singlecoil_test/'
n_samples_train = 34742
n_samples_val = 7135
n_volumes_train = 973
n_volumes_val = 199
# data loader
# generators
AF = 4
# train_gen = MaskedUntouched2DAllLoadedDataset(train_path, af=AF, inner_slices=1)
train_gen = MaskedUntouched2DDataset(train_path, af=AF, inner_slices=1)
val_gen = MaskedUntouched2DDataset(val_path, af=AF)
run_params = {
'n_cascade': 5,
'n_convs': 5,
'n_filters': 48,
}
n_epochs = 500
run_id = f'cascadenet_torch_af{AF}_{int(time.time())}'
chkpt_path = f'checkpoints/{run_id}' + '-{epoch:02d}.hdf5'
log_dir = op.join('logs', run_id)
print(run_id)
model = CascadeNet(**run_params)
optimizer = Adam(model.parameters(), lr=1e-3, weight_decay=1e-7)
writer = SummaryWriter(log_dir=log_dir)
model.cuda();
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
params
# def overfit_epoch(model, data, optimizer, device):
# model.train()
# kspace, mask, image_gt = data
# kspace = kspace[0] * 1e6
# mask = mask[0]
# image_gt = image_gt[0] * 1e6
# kspace = kspace.to(device)
# mask = mask.to(device)
# image_gt = image_gt.to(device)
# image_pred = model(kspace, mask)
# loss = F.l1_loss(image_pred, image_gt)
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# psnr = torch_psnr(image_pred, image_gt)
# print('Training PSNR:', psnr)
# %%time
# i, data = next(enumerate(train_loader))
# for _ in tqdm_notebook(range(500)):
# overfit_epoch(model, data, optimizer, 'cuda')
%%time
train_gen.filenames = train_gen.filenames[:10]
val_gen.filenames = val_gen.filenames[:1]
train_loader = DataLoader(
dataset=train_gen,
batch_size=1,
shuffle=False,
num_workers=10,
pin_memory=True,
)
val_loader = DataLoader(
dataset=val_gen,
batch_size=1,
# num_workers=35,
pin_memory=True,
shuffle=False,
)
fit_torch(
model,
train_loader,
val_loader,
n_epochs,
writer,
optimizer,
chkpt_path,
run_id=run_id,
device='cuda',
save_freq=500,
tqdm_wrapper=tqdm_notebook,
)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import seaborn as sns
import csv
import matplotlib as mpl
import matplotlib.pyplot as plt
path_to_file = 'flight_delays.csv'
df = pd.read_csv(path_to_file, sep=',')
f = df[(df['dep_delayed_15min'] == 'N')]
f
```
1.Доля всех задержек ко всем вылетам
```
df.groupby('dep_delayed_15min')['UniqueCarrier'].count()
import plotly.express as px
fig = px.pie(df.groupby('dep_delayed_15min')['UniqueCarrier'].count(), values='UniqueCarrier', names='UniqueCarrier', title='Delays')
fig.show()
```
2. зависимость количества задержек от длины пути, который предстоит пролететь самолёту
```
from_distance = f.groupby('Distance')['dep_delayed_15min'].count()
from_distance = from_distance.plot(kind="bar", rot=5, fontsize=10, color = 'seagreen')
from_distance.set_ylabel("Number of delay")
from_distance.set_xlabel("Distance")
plt.ylim([0, 340])
```
3. Tоп 5 направлений, для которых чаще всего происходят задержки
```
five_bad_directions = f.groupby('Dest')['dep_delayed_15min'].count().sort_values(ascending=False).head(5)
five_bad_directions = five_bad_directions.plot(kind="bar", rot=5, fontsize=10, color = 'purple')
five_bad_directions.set_ylabel("Number of delay")
five_bad_directions.set_xlabel("Direction")
```
4. В какие времена года чаще всего происходят задержки рейсов
```
per_month = f.groupby('Month')['dep_delayed_15min'].count()
seasons = ['Winter','Spring', 'Summer', 'Autumn']
starts_of_seasons = [1, 4, 7, 9]
data = []
per_season = 0
for i in range(1, 13):
per_season += per_month[f'c-{i}']
if i%3==0:
data.append(per_season)
per_season = 0
seasons_data = pd.DataFrame(data, index = seasons, columns = ['delay'])
temp = seasons_data.plot(kind='bar', rot=75, color='maroon');
temp.set_xlabel("Season")
temp.set_ylabel("Number of delays")
plt.ylim([19100, 21000])
```
5. Топ 10 самых хороших перевозчиков, которые реже всего задерживают свои рейсы
```
tail_ten_comp = f.groupby('UniqueCarrier')['dep_delayed_15min'].count().sort_values(ascending=True).tail(10)
tail_ten_comp = tail_ten_comp.plot(kind="bar", rot=10, fontsize=10, color = 'rebeccapurple')
tail_ten_comp.set_ylabel("Number of delay")
tail_ten_comp.set_xlabel("Company")
```
6. Топ 10 самых безответственных аэропортов, в которых чаще всего происходят задержки
```
bad_airports = f.groupby('Origin')['dep_delayed_15min'].count().sort_values(ascending=False).head(10)
Smth_after_that = bad_airports.plot(x="airport", y="Delay", kind="bar", rot=10, fontsize=10, color = 'darkkhaki')
Smth_after_that.set_ylabel("Number of delay")
Smth_after_that.set_xlabel("Airport")
```
| github_jupyter |
# Logistic Regression with a Neural Network mindset
Welcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.
**Instructions:**
- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.
**You will learn to:**
- Build the general architecture of a learning algorithm, including:
- Initializing parameters
- Calculating the cost function and its gradient
- Using an optimization algorithm (gradient descent)
- Gather all three functions above into a main model function, in the right order.
## <font color='darkblue'>Updates</font>
This notebook has been updated over the past few months. The prior version was named "v5", and the current versionis now named '6a'
#### If you were working on a previous version:
* You can find your prior work by looking in the file directory for the older files (named by version name).
* To view the file directory, click on the "Coursera" icon in the top left corner of this notebook.
* Please copy your work from the older versions to the new version, in order to submit your work for grading.
#### List of Updates
* Forward propagation formula, indexing now starts at 1 instead of 0.
* Optimization function comment now says "print cost every 100 training iterations" instead of "examples".
* Fixed grammar in the comments.
* Y_prediction_test variable name is used consistently.
* Plot's axis label now says "iterations (hundred)" instead of "iterations".
* When testing the model, the test image is normalized by dividing by 255.
## 1 - Packages ##
First, let's run the cell below to import all the packages that you will need during this assignment.
- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.
- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.
- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.
- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
```
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
%matplotlib inline
```
## 2 - Overview of the Problem set ##
**Problem Statement**: You are given a dataset ("data.h5") containing:
- a training set of m_train images labeled as cat (y=1) or non-cat (y=0)
- a test set of m_test images labeled as cat or non-cat
- each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).
You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.
Let's get more familiar with the dataset. Load the data by running the following code.
```
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
```
We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).
Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images.
```
# Example of a picture
index = 25
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
```
Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs.
**Exercise:** Find the values for:
- m_train (number of training examples)
- m_test (number of test examples)
- num_px (= height = width of a training image)
Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.
```
### START CODE HERE ### (≈ 3 lines of code)
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
```
**Expected Output for m_train, m_test and num_px**:
<table style="width:15%">
<tr>
<td>**m_train**</td>
<td> 209 </td>
</tr>
<tr>
<td>**m_test**</td>
<td> 50 </td>
</tr>
<tr>
<td>**num_px**</td>
<td> 64 </td>
</tr>
</table>
For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.
**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1).
A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use:
```python
X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X
```
```
# Reshape the training and test examples
### START CODE HERE ### (≈ 2 lines of code)
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
```
**Expected Output**:
<table style="width:35%">
<tr>
<td>**train_set_x_flatten shape**</td>
<td> (12288, 209)</td>
</tr>
<tr>
<td>**train_set_y shape**</td>
<td>(1, 209)</td>
</tr>
<tr>
<td>**test_set_x_flatten shape**</td>
<td>(12288, 50)</td>
</tr>
<tr>
<td>**test_set_y shape**</td>
<td>(1, 50)</td>
</tr>
<tr>
<td>**sanity check after reshaping**</td>
<td>[17 31 56 22 33]</td>
</tr>
</table>
To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.
One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).
<!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !-->
Let's standardize our dataset.
```
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
```
<font color='blue'>
**What you need to remember:**
Common steps for pre-processing a new dataset are:
- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)
- Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1)
- "Standardize" the data
## 3 - General Architecture of the learning algorithm ##
It's time to design a simple algorithm to distinguish cat images from non-cat images.
You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!**
<img src="images/LogReg_kiank.png" style="width:650px;height:400px;">
**Mathematical expression of the algorithm**:
For one example $x^{(i)}$:
$$z^{(i)} = w^T x^{(i)} + b \tag{1}$$
$$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$
$$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$
The cost is then computed by summing over all training examples:
$$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$
**Key steps**:
In this exercise, you will carry out the following steps:
- Initialize the parameters of the model
- Learn the parameters for the model by minimizing the cost
- Use the learned parameters to make predictions (on the test set)
- Analyse the results and conclude
## 4 - Building the parts of our algorithm ##
The main steps for building a Neural Network are:
1. Define the model structure (such as number of input features)
2. Initialize the model's parameters
3. Loop:
- Calculate current loss (forward propagation)
- Calculate current gradient (backward propagation)
- Update parameters (gradient descent)
You often build 1-3 separately and integrate them into one function we call `model()`.
### 4.1 - Helper functions
**Exercise**: Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().
```
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (≈ 1 line of code)
s = 1 / (1 + np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
```
**Expected Output**:
<table>
<tr>
<td>**sigmoid([0, 2])**</td>
<td> [ 0.5 0.88079708]</td>
</tr>
</table>
### 4.2 - Initializing parameters
**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.
```
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ### (≈ 1 line of code)
w = np.zeros([dim, 1])
b = 0
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
```
**Expected Output**:
<table style="width:15%">
<tr>
<td> ** w ** </td>
<td> [[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td> ** b ** </td>
<td> 0 </td>
</tr>
</table>
For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1).
### 4.3 - Forward and Backward propagation
Now that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.
**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.
**Hints**:
Forward Propagation:
- You get X
- You compute $A = \sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$
- You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$
Here are the two formulas you will be using:
$$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$
$$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$
```
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
A = sigmoid(np.dot(w.T,X) + b) # compute activation
cost = -1 / m * (np.dot(Y,np.log(A).T) + np.dot((1-Y),np.log(1 - A).T)) # compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = 1 / m * (np.dot(X,(A- Y).T))
db = 1 / m * (np.sum(A - Y))
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
```
**Expected Output**:
<table style="width:50%">
<tr>
<td> ** dw ** </td>
<td> [[ 0.99845601]
[ 2.39507239]]</td>
</tr>
<tr>
<td> ** db ** </td>
<td> 0.00145557813678 </td>
</tr>
<tr>
<td> ** cost ** </td>
<td> 5.801545319394553 </td>
</tr>
</table>
### 4.4 - Optimization
- You have initialized your parameters.
- You are also able to compute a cost function and its gradient.
- Now, you want to update the parameters using gradient descent.
**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate.
```
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
### START CODE HERE ###
grads, cost = propagate(w,b,X,Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = w - learning_rate * dw
b = b - learning_rate * db
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training iterations
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
```
**Expected Output**:
<table style="width:40%">
<tr>
<td> **w** </td>
<td>[[ 0.19033591]
[ 0.12259159]] </td>
</tr>
<tr>
<td> **b** </td>
<td> 1.92535983008 </td>
</tr>
<tr>
<td> **dw** </td>
<td> [[ 0.67752042]
[ 1.41625495]] </td>
</tr>
<tr>
<td> **db** </td>
<td> 0.219194504541 </td>
</tr>
</table>
**Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions:
1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$
2. Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this).
```
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
A = sigmoid(np.dot(w.T,X) + b)
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (≈ 4 lines of code)
if(A[0][i] <= 0.5):
Y_prediction[0][i] = 0
else:
Y_prediction[0][i] = 1
pass
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
w = np.array([[0.1124579],[0.23106775]])
b = -0.3
X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])
print ("predictions = " + str(predict(w, b, X)))
```
**Expected Output**:
<table style="width:30%">
<tr>
<td>
**predictions**
</td>
<td>
[[ 1. 1. 0.]]
</td>
</tr>
</table>
<font color='blue'>
**What to remember:**
You've implemented several functions that:
- Initialize (w,b)
- Optimize the loss iteratively to learn parameters (w,b):
- computing the cost and its gradient
- updating the parameters using gradient descent
- Use the learned (w,b) to predict the labels for a given set of examples
## 5 - Merge all functions into a model ##
You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.
**Exercise:** Implement the model function. Use the following notation:
- Y_prediction_test for your predictions on the test set
- Y_prediction_train for your predictions on the train set
- w, costs, grads for the outputs of optimize()
```
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
# initialize parameters with zeros (≈ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
```
Run the following cell to train your model.
```
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
```
**Expected Output**:
<table style="width:40%">
<tr>
<td> **Cost after iteration 0 ** </td>
<td> 0.693147 </td>
</tr>
<tr>
<td> <center> $\vdots$ </center> </td>
<td> <center> $\vdots$ </center> </td>
</tr>
<tr>
<td> **Train Accuracy** </td>
<td> 99.04306220095694 % </td>
</tr>
<tr>
<td>**Test Accuracy** </td>
<td> 70.0 % </td>
</tr>
</table>
**Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test accuracy is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!
Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.
```
# Example of a picture that was wrongly classified.
index = 1
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.")
```
Let's also plot the cost function and the gradients.
```
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
```
**Interpretation**:
You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting.
## 6 - Further analysis (optional/ungraded exercise) ##
Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$.
#### Choice of learning rate ####
**Reminder**:
In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.
Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens.
```
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
```
**Interpretation**:
- Different learning rates give different costs and thus different predictions results.
- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost).
- A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.
- In deep learning, we usually recommend that you:
- Choose the learning rate that better minimizes the cost function.
- If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.)
## 7 - Test with your own image (optional/ungraded exercise) ##
Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Change your image's name in the following code
4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
```
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "my_image.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
image = image/255.
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
```
<font color='blue'>
**What to remember from this assignment:**
1. Preprocessing the dataset is important.
2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model().
3. Tuning the learning rate (which is an example of a "hyperparameter") can make a big difference to the algorithm. You will see more examples of this later in this course!
Finally, if you'd like, we invite you to try different things on this Notebook. Make sure you submit before trying anything. Once you submit, things you can play with include:
- Play with the learning rate and the number of iterations
- Try different initialization methods and compare the results
- Test other preprocessings (center the data, or divide each row by its standard deviation)
Bibliography:
- http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/
- https://stats.stackexchange.com/questions/211436/why-do-we-normalize-images-by-subtracting-the-datasets-image-mean-and-not-the-c
| github_jupyter |
# Self-Driving Car Engineer Nanodegree
## Project: **Finding Lane Lines on the Road**
***
In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.
---
Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
---
**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**
---
<figure>
<img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
</figcaption>
</figure>
<p></p>
<figure>
<img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
</figcaption>
</figure>
**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
## Import Packages
```
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
```
## Read in an Image
```
#reading in an image
image = mpimg.imread('test_images/solidWhiteCurve.jpg')
ysize = image.shape[0] # 540
xsize = image.shape[1] # 960
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
```
## Ideas for Lane Detection Pipeline
**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
`cv2.inRange()` for color selection
`cv2.fillPoly()` for regions selection
`cv2.line()` to draw lines on an image given endpoints
`cv2.addWeighted()` to coadd / overlay two images
`cv2.cvtColor()` to grayscale or change color
`cv2.imwrite()` to output images to file
`cv2.bitwise_and()` to apply a mask to an image
**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
## My Thought Process
***
In the project report, I will present two methods that I developed.
### Method 1
Image Pre-processing: grayscale -> canny edge detection -> excessive gaussian blur to provide the single-line lane marks.
Methodology: using the maximum brightness detection in each rows in the defined regions of interest, which are left and right. After finding the points in (row, column) that indicating the lanes, I used linear regression to interpolate/extrapolate the lanes. I found that method 1 is more stable than the Method 2.
### Method 2
Image Pre-processing: grayscale -> canny edge detection -> gentle gaussian blur to outline the lanes.
Methodology: Use hough transformation to find all the lines in the defined regions of interest, which are left and right. To filter the slope of lines in a reasonable range, and interpolate/extrapolate selected lines to the top and bottom highlights. Average the top and bottom in x-position and link them as two single lines.
Test sample images: this method is much more sensitive to the parameters I used. s
Test sample videos: The segmented lane is shaky.
## Helper Functions
Below are some helper functions to help get you started. They should look familiar from the lesson!
```
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
def grayscale(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
return cv2.addWeighted(initial_img, α, img, β, γ)
def plot(img, cmap='gray'):
plt.imshow(img, cmap=cmap)
plt.show()
```
## Test Images
Build your pipeline to work on the images in the directory "test_images"
**You should make sure your pipeline works well on these images before you try the videos.**
```
import os
test_image_list= os.listdir("test_images/")
for img_name in test_image_list:
print (img_name[:-4])
test_img = mpimg.imread("test_images/" + img_name[:-4]+'.jpg')
```
## Build a Lane Finding Pipeline - Method 1
Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
```
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
import os
test_img_list= os.listdir("test_images/")
for img_name in test_img_list:
print (img_name)
test_img = mpimg.imread("test_images/" + img_name)
# Grab the x and y size and make a copy of the image
ysize = test_img.shape[0] # 540
xsize = test_img.shape[1] # 960
# Applies the Grayscale transform
gray_img=grayscale(test_img)
#print("Convert to grayscale:")
#plot(gray_img)
#plt.imsave("test_images_output/" + img_name[:-4] + "_gray.jpg", gray_img, cmap='gray')
# Applies the Canny transform
low_threshold=200
high_threshold=250
#print("Apply Canny transform")
canny_img=canny(gray_img, low_threshold, high_threshold)
#plot(canny_img, cmap='gray')
#plt.imsave("test_images_output/" + img_name[:-4] + "_canny.jpg", canny_img, cmap='gray')
# Apply a Gaussian Noise kernel
#print("Apply Gaussian Noise")
blurred_img=gaussian_blur(canny_img, 51)
#plot(blurred_img)
#plt.imsave("test_images_output/" + img_name[:-4] + "_blurred.jpg", blurred_img, cmap='gray')
# Apply an image mask = left and right
vertices_right= np.array([[[xsize*0.5,ysize*0.6],[xsize*0.55,ysize*0.6],[xsize*0.95,ysize*0.9],[xsize*0.5,ysize*0.9]]], dtype=np.int32)
vertices_left= np.array([[[xsize*0.1,ysize*0.9],[xsize*0.45,ysize*0.6],[xsize*0.5,ysize*0.6],[xsize*0.5,ysize*0.9]]], dtype=np.int32)
region_right=region_of_interest(blurred_img,vertices_right)
region_left=region_of_interest(blurred_img,vertices_left)
# Find the max brightness after left_mask image
data_left=np.argmax(region_left,axis=1)
x_left=np.arange(len(data_left));
data_left_extracted= data_left[data_left > 10]
x_left_extracted=x_left[data_left > 10]
fitplot_left=np.poly1d(np.polyfit(x_left_extracted,data_left_extracted, 1))
# Find the max brightness after right_mask image
data_right=np.argmax(region_right,axis=1)
x_right=np.arange(len(data_right));
data_right_extracted= data_right[data_right > 10]
x_right_extracted=x_right[data_right > 10]
fitplot_right=np.poly1d(np.polyfit(x_right_extracted,data_right_extracted, 1))
# extroploate the points of interest
line_image = np.copy(test_img)*0 # creating a blank to draw lines on
lines=[[[int(fitplot_left(ysize)),int(ysize),int(fitplot_left(ysize*0.6)),int(ysize*0.6)],[int(fitplot_right(ysize)),int(ysize),int(fitplot_right(ysize*0.6)),int(ysize*0.6)]]]
draw_lines(line_image, lines, color=[255, 0, 0], thickness=10)
lines_edges = weighted_img(line_image, test_img, 0.9, 1, 0)
#plt.imshow(lines_edges, cmap='gray')
#plt.show()
plot(lines_edges, cmap='gray')
#plt.imsave("test_images_output/" + img_name[:-4] + "_final.jpg", lines_edges, cmap='gray')
```
## Build a Lane Finding Pipeline - Method 2
```
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
import os
test_img_list= os.listdir("test_images/")
for img_name in test_img_list:
print (img_name)
test_img = mpimg.imread("test_images/" + img_name)
test_image = mpimg.imread('test_images/solidWhiteCurve.jpg')
# Grab the x and y size and make a copy of the image
ysize = test_img.shape[0] # 540
xsize = test_img.shape[1] # 960
# Applies the Grayscale transform
gray_img=grayscale(test_img)
#print("Convert to grayscale:")
#plot(gray_img)
#plt.imsave("test_images_output/" + img_name[:-4] + "_gray_2.jpg", gray_img, cmap='gray')
# Applies the Canny transform
low_threshold=200
high_threshold=240
#print("Apply Canny transform")
canny_img=canny(gray_img, low_threshold, high_threshold)
#plot(canny_img, cmap='gray')
#plt.imsave("test_images_output/" + img_name[:-4] + "_canny.jpg_2", canny_img, cmap='gray')
# Apply a Gaussian Noise kernel
print("Apply Gaussian Blur")
blurred_img=gaussian_blur(canny_img, 11)
#plot(blurred_img)
#plt.imsave("test_images_output/" + img_name[:-4] + "_blurred.jpg_2", blurred_img, cmap='gray')
# Applies an image mask.
vertices_right= np.array([[[xsize*0.5,ysize*0.55],[xsize*0.55,ysize*0.55],[xsize*0.9,ysize],[xsize*0.5,ysize]]], dtype=np.int32)
vertices_left= np.array([[[xsize*0.1,ysize],[xsize*0.45,ysize*0.55],[xsize*0.5,ysize*0.55],[xsize*0.5,ysize]]], dtype=np.int32)
region_right=region_of_interest(blurred_img,vertices_right)
region_left=region_of_interest(blurred_img,vertices_left)
# Apply Hough Lines
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 5 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 100 #minimum number of pixels making up a line
max_line_gap = 100 # maximum gap in pixels between connectable line segments
line_image = np.copy(test_img)*0 # creating a blank to draw lines on
# Average the position of each of the lines and extrapolate to the top and bottom of the lane.
lines_right = cv2.HoughLinesP(region_right, rho, theta, threshold, np.array([]),min_line_length, max_line_gap)
lines_left = cv2.HoughLinesP(region_left, rho, theta, threshold, np.array([]),min_line_length, max_line_gap)
line_correct_right=[]
line_correct_left=[]
slope_right=[]
slope_left=[]
x1_right=[]
x2_right=[]
x1_left=[]
x2_left=[]
for line in lines_right:
for x1,y1,x2,y2 in line:
#slope= (y1-y2)/(x1-x2)
[slope,b] = np.polyfit([y1, y2],[x1, x2], 1)
if slope < 2 and slope > 1:
#line_correct_right.append(line)
slope_right.append(slope)
f=np.poly1d([slope,b])
x1_right.append(f(ysize*0.6))
x2_right.append(f(ysize))
for line in lines_left:
for x1,y1,x2,y2 in line:
#slope= (y1-y2)/(x1-x2)
[slope,b] = np.polyfit([y1, y2],[x1, x2],1)
if slope < -1 and slope > -2:
#line_correct_left.append(line)
slope_left.append(slope)
f=np.poly1d([slope,b])
x1_left.append(f(ysize*0.6))
x2_left.append(f(ysize))
line_correct=[[[int(np.mean(x1_right)),int(ysize*0.6),int(np.mean(x2_right)),int(ysize)],[int(np.mean(x1_left)),int(ysize*0.6),int(np.mean(x2_left)),int(ysize)]]]
draw_plot=draw_lines(line_image, line_correct, color=[255, 0, 0], thickness=10)
lines_edges = cv2.addWeighted(test_img, 0.8, line_image, 1, 0)
#print(line_correct)
#print(slope_right)
#print(slope_left)
plot(lines_edges, cmap='gray')
#plt.imsave("test_images_output/" + img_name[:-4] + "_final_2.jpg", lines_edges, cmap='gray')
```
## Test on Videos
You know what's cooler than drawing lanes over images? Drawing lanes over video!
We can test our solution on two provided videos:
`solidWhiteRight.mp4`
`solidYellowLeft.mp4`
**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
**If you get an error that looks like this:**
```
NeedDownloadError: Need ffmpeg exe.
You can download it by calling:
imageio.plugins.ffmpeg.download()
```
**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
```
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
```
## Pipeline 1 based on Method 1
```
# Pipeline 1
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
t = 60
low_threshold=200 #100
high_threshold=250
# make a copy
test_img = np.copy(image)
ysize = test_img.shape[0] # 540
xsize = test_img.shape[1] # 960
# PIPELINE
# 0 convert to grayscale
gray_img=grayscale(test_img)
# 1 apply the Canny transform
canny_img=canny(gray_img, low_threshold, high_threshold)
# 2 apply blur
blurred_img = gaussian_blur(canny_img, 51)
# 3 apply ROI mask
# Apply an image mask = left and right
vertices_right= np.array([[[xsize*0.5,ysize*0.65],[xsize*0.55,ysize*0.65],[xsize*0.95,ysize*0.85],[xsize*0.5,ysize*0.85]]], dtype=np.int32)
vertices_left= np.array([[[xsize*0.1,ysize*0.85],[xsize*0.45,ysize*0.65],[xsize*0.5,ysize*0.65],[xsize*0.5,ysize*0.85]]], dtype=np.int32)
region_right=region_of_interest(blurred_img,vertices_right)
region_left=region_of_interest(blurred_img,vertices_left)
# Find the max brightness after left_mask image
data_left=np.argmax(region_left,axis=1)
x_left=np.arange(len(data_left));
data_left_extracted= data_left[data_left > 10]
x_left_extracted=x_left[data_left > 10]
fitplot_left=np.poly1d(np.polyfit(x_left_extracted,data_left_extracted, 1))
# Find the max brightness after right_mask image
data_right=np.argmax(region_right,axis=1)
x_right=np.arange(len(data_right));
data_right_extracted= data_right[data_right > 10]
x_right_extracted=x_right[data_right > 10]
fitplot_right=np.poly1d(np.polyfit(x_right_extracted,data_right_extracted, 1))
# 4 get lines image
# extroploate the points of interest
line_image = np.copy(test_img)*0 # creating a blank to draw lines on
lines=[[[int(fitplot_left(ysize)),int(ysize),int(fitplot_left(ysize*0.65)),int(ysize*0.65)],[int(fitplot_right(ysize)),int(ysize),int(fitplot_right(ysize*0.65)),int(ysize*0.65)]]]
draw_lines(line_image, lines, color=[255, 0, 0], thickness=10)
# 5 superimpose result on top of original image
final_img = weighted_img(line_image, test_img, 0.9, 1, 0)
#final_img = weighted_img(lines_edges, test_img)
return final_img
```
## Pipeline 2 based on Method 2
```
# Pipeline 2
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
t = 60
low_threshold=200
high_threshold=240
# make a copy
test_img = np.copy(image)
ysize = test_img.shape[0] # 540
xsize = test_img.shape[1] # 960
# PIPELINE
# 0 convert to grayscale
gray_img=grayscale(test_img)
# 1 apply the Canny transform
canny_img=canny(gray_img, low_threshold, high_threshold)
# 2 apply blur
blurred_img = gaussian_blur(canny_img, 11)
# 3 apply ROI mask
vertices_right= np.array([[[xsize*0.5,ysize*0.55],[xsize*0.55,ysize*0.55],[xsize*0.9,ysize],[xsize*0.5,ysize]]], dtype=np.int32)
vertices_left= np.array([[[xsize*0.1,ysize],[xsize*0.45,ysize*0.55],[xsize*0.5,ysize*0.55],[xsize*0.5,ysize]]], dtype=np.int32)
region_right=region_of_interest(blurred_img,vertices_right)
region_left=region_of_interest(blurred_img,vertices_left)
# 4 apply Hough Lines
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 5 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 100 #minimum number of pixels making up a line
max_line_gap = 100 # maximum gap in pixels between connectable line segments
# 5 Average the position of each of the lines and extrapolate to the top and bottom of the lane.
lines_right = cv2.HoughLinesP(region_right, rho, theta, threshold, np.array([]),min_line_length, max_line_gap)
lines_left = cv2.HoughLinesP(region_left, rho, theta, threshold, np.array([]),min_line_length, max_line_gap)
line_correct_right=[]
line_correct_left=[]
slope_right=[]
slope_left=[]
x1_right=[]
x2_right=[]
x1_left=[]
x2_left=[]
for line in lines_right:
for x1,y1,x2,y2 in line:
#slope= (y1-y2)/(x1-x2)
[slope,b] = np.polyfit([y1, y2],[x1, x2], 1)
if slope < 2 and slope > 1:
#line_correct_right.append(line)
slope_right.append(slope)
f=np.poly1d([slope,b])
x1_right.append(f(ysize*0.6))
x2_right.append(f(ysize))
for line in lines_left:
for x1,y1,x2,y2 in line:
#slope= (y1-y2)/(x1-x2)
[slope,b] = np.polyfit([y1, y2],[x1, x2],1)
if slope < -1 and slope > -2:
#line_correct_left.append(line)
slope_left.append(slope)
f=np.poly1d([slope,b])
x1_left.append(f(ysize*0.6))
x2_left.append(f(ysize))
# 6 get lines image
line_image = np.copy(image)*0 # creating a blank to draw lines on
line_correct=[[[int(np.mean(x1_right)),int(ysize*0.6),int(np.mean(x2_right)),int(ysize)],[int(np.mean(x1_left)),int(ysize*0.6),int(np.mean(x2_left)),int(ysize)]]]
draw_plot=draw_lines(line_image, line_correct, color=[255, 0, 0], thickness=10)
# 7 superimpose result on top of original image
final_img = weighted_img(line_image, test_img, 0.9, 1, 0)
#final_img = weighted_img(lines_edges, test_img)
return final_img
```
Let's try the one with the solid white lane on the right first ...
```
image = mpimg.imread('test_images/solidWhiteCurve.jpg')
abc=process_image(image)
plt.imshow(abc)
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
```
Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
```
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
```
## Improve the draw_lines() function
**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
Now for the one with the solid yellow lane on the left. This one's more tricky!
```
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
```
## Optional Challenge
Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!
```
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
```
## Reflection
Please see all 3 videos in the folder incluing the challenge.
Method 1 is more robust than Method 2, as you can see that most of the time, method 1 can find the lanes accurately in both solidWhiteRight.mp4 and solidYellowLeft.mp4.
To manage this challenge.mp4 is the best practice to validate the robustness of the pipeline. There are a few improvement I made to make finding the lanes in the challenge video based on Method 1. Still it is not preferct.
Here are some ideas of improvement.
(1) The bottom of images was covered by the body of vehicle that certainly disturb the pipeline process. To avoid that, simply to make the region of interest smaller and avoid that area of image.
(2) I also noticed that the pipeline failed when car was driven under tree shade, so the next step is to sharpen the lanes in such image.
(3) When the road condition changes, from old gray road to new black asphalt road, there are the horizontal lines/features to interfere the original algorithm. Therefore, if I combine method 1 and method 2, then I could have avoid this.
(4) Method 1 can be exptended to curved lane by making the trajectory.
Overall, this is a fun project, and I sharpen my basic python skills in a short time.
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
```
## PIN computation
To compute the PIN of a given day, we need to optimize the product of the likelihood computed on each time interval in the day.
In particular we fix a time interval of 5 minutes to discretize time, and since we are dealing with the dta of a single trade day we only comppute the corresponding PIN, without further analysis of its time evolution.
Note that this problem must be approached by taking particular care about the optimization method choosen. We tested all the methods from scipy.optimize.minimize for bounded problems, both gradient-based and gredient-free, but most of the results exhibited high dependence on the initial guess for the set of parameters. We then choose to apply powell method, which is a gradient-free method, since it is the only one which actually exhibits an evolution and results to be unbiased by the initial point.
```
def likelihood(x, bid, ask, T): #x = [alpha, delta, eps, mu]
"""
likelihood function for the model
args:
x: parameters of the model
bid: observation of the bid side
ask: observation of the ask side
T: time bins
"""
#compute likelihood with Ealsy's (15) notation
from scipy.stats import poisson
likelihood = (1-x[0])*poisson.pmf(k=bid,mu=x[2]*T)*poisson.pmf(k=ask,mu=x[2]*T)+\
+x[0]*x[1]*poisson.pmf(k=bid,mu=x[2]*T)*poisson.pmf(k=ask,mu=(x[2]+x[3])*T)+\
+x[0]*(1-x[1])*poisson.pmf(k=bid,mu=(x[2]+x[3])*T)*poisson.pmf(k=ask,mu=x[2]*T)
return likelihood
def loss (x, bid, ask, T):
"""
loss function for the model
args:
x: parameters of the model (to train)
bid: list of observations of the bid side
ask: list of observations of the ask side
T: time bin width (assumed the same for each bin)
"""
prod=[]
#restricting the loss function to values which do not kill the output
for b, a in zip(bid, ask):
l=likelihood(x, b, a, T)
if l>0: prod.append(l)
else: continue
return -np.prod(prod)
from scipy.optimize import minimize
from tqdm import tqdm
from datetime import timedelta
time_delta = timedelta(minutes=1)
occurrences = pd.read_csv("../data_cleaned/occurrences.csv")
np.random.seed(0)
r=minimize(loss, x0=np.random.uniform(size=4),#
args=(occurrences['bid_observations'], occurrences['ask_observations'], time_delta.total_seconds()),
method='powell', bounds=[(0, 1), (0, 1), (0, None), (0, None)])
params = {'alpha': r.x[0], 'delta': r.x[0], 'eps': r.x[0], 'mu': r.x[0]}
PIN = params['alpha']*params['mu']/(params['alpha']*params['mu']+2*params['eps'])
print('PIN: {:.2f}'.format(PIN))
print('alpha: {:.2f}'.format(params['alpha']))
print('delta: {:.2f}'.format(params['delta']))
```
| github_jupyter |
# Move Function
Now that you know how a robot uses sensor measurements to update its idea of its own location, let's see how we can incorporate motion into this location. In this notebook, let's go over the steps a robot takes to help localize itself from an initial, uniform distribution to sensing, moving and updating that distribution.
We include the `sense` function that you've seen, which updates an initial distribution based on whether a robot senses a grid color: red or green.
Next, you're tasked with writing a function `move` that incorporates motion into the distribution. As seen below, **one motion `U= 1` to the right, causes all values in a distribution to shift one grid cell to the right.**
<img src='images/motion_1.png' width=50% height=50% />
First let's include our usual resource imports and display function.
```
# importing resources
import matplotlib.pyplot as plt
import numpy as np
```
A helper function for visualizing a distribution.
```
def display_map(grid, bar_width=0.9):
if(len(grid) > 0):
x_labels = range(len(grid))
plt.bar(x_labels, height=grid, width=bar_width, color='b')
plt.xlabel('Grid Cell')
plt.ylabel('Probability')
plt.ylim(0, 1) # range of 0-1 for probability values
plt.title('Probability of the robot being at each cell in the grid')
plt.xticks(np.arange(min(x_labels), max(x_labels)+1, 1))
plt.show()
else:
print('Grid is empty')
```
You are given the initial variables and the complete `sense` function, below.
```
# given initial variables
p=[0, 1, 0, 0, 0]
# the color of each grid cell in the 1D world
world=['green', 'red', 'red', 'green', 'green']
# Z, the sensor reading ('red' or 'green')
Z = 'red'
pHit = 0.6
pMiss = 0.2
# You are given the complete sense function
def sense(p, Z):
''' Takes in a current probability distribution, p, and a sensor reading, Z.
Returns a *normalized* distribution after the sensor measurement has been made, q.
This should be accurate whether Z is 'red' or 'green'. '''
q=[]
# loop through all grid cells
for i in range(len(p)):
# check if the sensor reading is equal to the color of the grid cell
# if so, hit = 1
# if not, hit = 0
hit = (Z == world[i])
q.append(p[i] * (hit * pHit + (1-hit) * pMiss))
# sum up all the components
s = sum(q)
# divide all elements of q by the sum to normalize
for i in range(len(p)):
q[i] = q[i] / s
return q
# Commented out code for measurements
# for k in range(len(measurements)):
# p = sense(p, measurements)
```
### QUIZ: Program a function that returns a new distribution q, shifted to the right by the motion (U) units.
This function should shift a distribution with the motion, U. Keep in mind that this world is cyclic and that if U=0, q should be the same as the given p. You should see all the values in `p` are moved to the right by 1, for U=1.
```
## TODO: Complete this move function so that it shifts a probability distribution, p
## by a given motion, U
def move(p, U):
q=[]
# Your code here
if U == 0:
return p
q = p[-U:]
q.extend(p[:-U])
return q
p = move(p,4)
print(p)
display_map(p)
```
| github_jupyter |
```
import cartopy.crs as ccrs
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
from itertools import product
import pandas as pd
import os
import time
from datetime import timedelta
import rasterio.warp as rasteriowarp
SATELLITE_DATA_PATH = os.path.expanduser('~/data/EUMETSAT/reprojected_subsetted/')
PV_DATA_FILENAME = os.path.expanduser('~/data/pvoutput.org/UK_PV_timeseries_batch.nc')
PV_METADATA_FILENAME = os.path.expanduser('~/data/pvoutput.org/UK_PV_metadata.csv')
DST_CRS = {
'ellps': 'WGS84',
'proj': 'tmerc', # Transverse Mercator
'units': 'm' # meters
}
# Geospatial boundary in Transverse Mercator projection (meters)
SOUTH = 5513500
NORTH = 6613500
WEST = -889500
EAST = 410500
```
## Load and convert PV metadata
```
pv_metadata = pd.read_csv(PV_METADATA_FILENAME, index_col='system_id')
pv_metadata.dropna(subset=['longitude', 'latitude'], how='any', inplace=True)
# Convert lat lons to Transverse Mercator
pv_metadata['x'], pv_metadata['y'] = rasteriowarp.transform(
src_crs={'init': 'EPSG:4326'},
dst_crs=DST_CRS,
xs=pv_metadata['longitude'].values,
ys=pv_metadata['latitude'].values)
# Filter 3 PV systems which apparently aren't in the UK!
pv_metadata = pv_metadata[
(pv_metadata.x >= WEST) &
(pv_metadata.x <= EAST) &
(pv_metadata.y <= NORTH) &
(pv_metadata.y >= SOUTH)]
len(pv_metadata)
```
## Load and normalise PV power data
```
%%time
pv_power = xr.load_dataset(PV_DATA_FILENAME)
pv_power_selected = pv_power.loc[dict(datetime=slice('2018-06-01', '2019-07-01'))]
pv_power_df = pv_power_selected.to_dataframe().dropna(axis='columns', how='all')
pv_power_df = pv_power_df.clip(lower=0, upper=5E7)
pv_power_df.columns = [np.int64(col) for col in pv_power_df.columns]
pv_power_df = pv_power_df.tz_localize('Europe/London').tz_convert('UTC')
del pv_power
del pv_power_selected
# A bit of hand-crafted cleaning
# TODO: Is this still relevant?
pv_power_df[30248][:'2019-01-03'] = np.NaN
# Scale to the range [0, 1]
pv_power_df -= pv_power_df.min()
pv_power_df /= pv_power_df.max()
# Drop systems which are producing over night
NIGHT_YIELD_THRESHOLD = 0.4
night_hours = list(range(21, 24)) + list(range(0, 4))
bad_systems = np.where(
(pv_power_df[pv_power_df.index.hour.isin(night_hours)] > NIGHT_YIELD_THRESHOLD).sum()
)[0]
bad_systems = pv_power_df.columns[bad_systems]
print(len(bad_systems), 'bad systems found.')
#ax = pv_power_df[bad_systems].plot(figsize=(40, 10), alpha=0.5)
#ax.set_title('Bad PV systems');
pv_power_df.drop(bad_systems, axis='columns', inplace=True)
%%time
# Interpolate up to 15 minutes ahead.
pv_power_df = pv_power_df.interpolate(limit=3)
# Sort the columns
pv_power_df = pv_power_df[np.sort(pv_power_df.columns)]
len(pv_power_df.columns)
#pv_power_df.plot(figsize=(40, 10), alpha=0.5, legend=False);
# Sort the metadata in the same order as the PV power data
pv_metadata = pv_metadata.reindex(pv_power_df.columns, axis='index')
pv_power_df.head()
```
## Load satellite data
```
from glob import glob
from torch.utils.data import Dataset
RECTANGLE_WIDTH = 128000 # in meters
RECTANGLE_HEIGHT = RECTANGLE_WIDTH
def get_rectangle(data_array, time, centre_x, centre_y, width=RECTANGLE_WIDTH, height=RECTANGLE_HEIGHT):
half_width = width / 2
half_height = height / 2
north = centre_y + half_height
south = centre_y - half_height
east = centre_x + half_width
west = centre_x - half_width
data = data_array.loc[dict(
x=slice(west, east),
y=slice(north, south))]
MEAN = 20.444992
STD = 8.766013
data = data - MEAN
data = data / STD
return data
class SatelliteLoader(Dataset):
"""
Attributes:
index: pd.Series which maps from UTC datetime to full filename of satellite data.
_data_array_cache: The last lazily opened xr.DataArray that __getitem__ was asked to open.
Useful so that we don't have to re-open the DataArray if we're asked to get
data from the same file on several different calls.
"""
def __init__(self, file_pattern):
self._load_sat_index(file_pattern)
self._data_array_cache = None
self._last_filename_requested = None
def __getitem__(self, dt):
sat_filename = self.index[dt]
if self._data_array_cache is None or sat_filename != self._last_filename_requested:
self._data_array_cache = xr.open_dataarray(sat_filename)
self._last_filename_requested = sat_filename
return self._data_array_cache
def close(self):
if self._data_array_cache is not None:
self._data_array_cache.close()
def __len__(self):
return len(self.index)
def _load_sat_index(self, file_pattern):
"""Opens all satellite files in `file_pattern` and loads all their datetime indicies into self.index."""
sat_filenames = glob(file_pattern)
sat_filenames.sort()
n_filenames = len(sat_filenames)
sat_index = []
for i_filename, sat_filename in enumerate(sat_filenames):
if i_filename % 10 == 0 or i_filename == (n_filenames - 1):
print('\r {:5d} of {:5d}'.format(i_filename + 1, n_filenames), end='', flush=True)
data_array = xr.open_dataarray(sat_filename, drop_variables=['x', 'y'])
sat_index.extend([(sat_filename, t) for t in data_array.time.values])
sat_index = pd.DataFrame(sat_index, columns=['filename', 'datetime']).set_index('datetime').squeeze()
self.index = sat_index.tz_localize('UTC')
def get_rectangles_for_all_data(self, centre_x, centre_y, width=RECTANGLE_WIDTH, height=RECTANGLE_HEIGHT):
"""Iterate through all satellite filenames and load rectangle of imagery."""
sat_filenames = np.sort(np.unique(self.index.values))
for sat_filename in sat_filenames:
data_array = xr.open_dataarray(sat_filename)
yield get_rectangle(data_array, time, centre_x, centre_y, width, height)
def get_rectangle(self, time, centre_x, centre_y, width=RECTANGLE_WIDTH, height=RECTANGLE_HEIGHT):
data_array = self[time]
return get_rectangle(data_array, time, centre_x, centre_y, width, height)
%%time
sat_loader = SatelliteLoader(os.path.join(SATELLITE_DATA_PATH, '*.nc'))
print()
len(sat_loader)
# Test get rectangle
dt = pd.Timestamp('2019-02-21 10:15')
pv_system_id = pv_metadata.index[1]
x, y = pv_metadata.loc[pv_system_id][['x', 'y']]
%%time
sat_data = sat_loader.get_rectangle(time=dt, centre_x=x, centre_y=y) #, width=512000, height=512000)
fig = plt.figure(figsize=(10, 10))
crs = ccrs.TransverseMercator()
ax = plt.axes(projection=crs)
ax.coastlines(resolution='10m', alpha=0.5, color='pink')
img = sat_data.isel(time=10).plot.imshow(ax=ax, cmap='gray', origin='upper', add_colorbar=True)
path_collection = ax.scatter(x=x, y=y, alpha=0.7)
import pvlib
from pvlib.location import Location
location = Location(
latitude=pv_metadata['latitude'][pv_system_id],
longitude=pv_metadata['longitude'][pv_system_id],
tz='UTC',
name=pv_metadata['system_name'][pv_system_id])
location
fig, ax = plt.subplots(figsize=(20, 7))
pv_data_to_plot = pv_power_df[pv_system_id][dt - timedelta(hours=48):dt + timedelta(hours=48)]
ax.plot(pv_data_to_plot, label='PV yield')
#ax.plot((dt, dt), (0, 1), linewidth=1, color='black', label='datetime of image above')
ax.set_title(dt)
ax.set_ylim((0, 1))
ax2 = ax.twinx()
clearsky = location.get_clearsky(pv_data_to_plot.index)
lines = ax2.plot(clearsky)
for line, label in zip(lines, clearsky.columns):
line.set_label(label);
ax2.legend(loc='upper left');
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from collections import OrderedDict
%%time
# Load all satellite data rectangles into RAM
dims = OrderedDict()
dims['time'] = sat_loader.index.index.values
dims['y'] = sat_data.y
dims['x'] = sat_data.x
shape = [len(values) for values in dims.values()]
print('Creating huge numpy array!', flush=True)
data = np.zeros(shape, dtype=np.float16)
print('Setting to NaN', flush=True)
data[:, :, :] = np.NaN
print('Creating huge DataArray!', flush=True)
sat_data_master = xr.DataArray(
data,
coords=dims,
dims=dims.keys(),
name='HRV')
del data, dims, shape
for data_array in sat_loader.get_rectangles_for_all_data(centre_x=x, centre_y=y):
print('\r', data_array.time.values[0], flush=True, end='')
sat_data_master.loc[data_array.time.values, :, :] = data_array
print()
sat_data_master = sat_data_master.dropna(dim='time', how='any')
# Align with PV
pv_data = pv_power_df[pv_system_id].dropna()
sat_data_index = pd.DatetimeIndex(sat_data_master.time.values, tz='UTC')
datetime_index = pv_data.index.intersection(sat_data_index)
len(datetime_index)
datetime_index.tz
sat_data_master = sat_data_master.loc[datetime_index.tz_convert(None)]
pv_data = pv_data[datetime_index]
pv_data_cuda = torch.cuda.HalfTensor(pv_data.values[:, np.newaxis])
pv_data_cuda.shape
sat_data_master_cuda = torch.cuda.HalfTensor(sat_data_master.values[:, np.newaxis])
sat_data_master_cuda.shape
torch.cuda.get_device_name(0)
print('{:,.0f} MB CUDA memory allocated.'.format(torch.cuda.memory_allocated() / 1E6))
# Split train & test by days
days = np.unique(datetime_index.date)
len(days)
# Use every 5th day for testing
testing_days = days[::5]
len(testing_days)
training_days = np.array(list(set(days) - set(testing_days)))
training_days = np.sort(training_days)
len(training_days)
def get_index_into_datetime_index(training_or_testing_days):
return np.where(pd.Series(datetime_index.date).isin(training_or_testing_days))[0]
training_index = get_index_into_datetime_index(training_days)
testing_index = get_index_into_datetime_index(testing_days)
assert not set(training_index).intersection(testing_index)
len(training_index), len(testing_index)
hours_of_day = datetime_index.hour.values.astype(np.float32)
hours_of_day -= hours_of_day.mean()
hours_of_day /= hours_of_day.std()
hours_of_day = torch.cuda.HalfTensor(hours_of_day[:, np.newaxis])
clearsky = location.get_clearsky(datetime_index)
clearsky -= clearsky.mean()
clearsky /= clearsky.std()
clearsky = torch.cuda.HalfTensor(clearsky.values)
class Net(nn.Module):
def __init__(self, dropout_proportion=0.1):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=12, kernel_size=5)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(in_channels=12, out_channels=16, kernel_size=5)
HOURS_OF_DAY_CHANNELS = 1
CLEARSKY_CHANNELS = 3
self.fc1 = nn.Linear(16 * 29 * 29, 120)
self.fc2 = nn.Linear(120 + HOURS_OF_DAY_CHANNELS + CLEARSKY_CHANNELS, 84)
self.fc3 = nn.Linear(84, 1)
self.dropout_layer = nn.Dropout(p=dropout_proportion)
def forward(self, x, hour_of_day, clearsky):
#x = self.dropout_layer(x)
x = self.pool(F.relu(self.conv1(x)))
# x is now <batch_size>, 6, 62, 62.
# 62 is 124 / 2. 124 is the 128-dim input - 4
x = self.dropout_layer(x)
x = self.pool(F.relu(self.conv2(x)))
# x is now <batch_size>, 16, 29, 29
x = x.view(-1, 16 * 29 * 29)
# x is now <batch_size>, 16 x 29 x 29
x = self.dropout_layer(x)
x = F.relu(self.fc1(x))
x = self.dropout_layer(x)
x = torch.cat((x, hour_of_day, clearsky), dim=1)
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net().cuda().half()
optimizer = optim.SGD(net.parameters(), lr=0.01)
loss_func = nn.MSELoss()
mae_loss_func = nn.L1Loss()
train_losses = []
train_mae_losses = []
test_losses = []
test_mae_losses = []
%%time
STATS_PERIOD = 1000
TRAINING_BATCH_SIZE = 128
TESTING_BATCH_SIZE = 256
TESTING_BATCH_INDEX = testing_index[:TESTING_BATCH_SIZE]
TESTING_INPUTS = sat_data_master_cuda[TESTING_BATCH_INDEX]
TESTING_TARGET = pv_data_cuda[TESTING_BATCH_INDEX]
TESTING_HOURS_OF_DAY = hours_of_day[TESTING_BATCH_INDEX]
TESTING_CLEARSKY = clearsky[TESTING_BATCH_INDEX]
running_train_loss = 0.0
running_train_mae = 0.0
t0 = time.time()
training_index_len_minus_1 = len(training_index)-1
for i_batch in range(20000 * 4 * 3):
print('\rBatch: {:4d}'.format(i_batch + 1), end='', flush=True)
# Create batch
batch_index = np.random.randint(low=0, high=training_index_len_minus_1, size=TRAINING_BATCH_SIZE)
batch_index = training_index[batch_index]
inputs = sat_data_master_cuda[batch_index]
hours_of_day_for_batch = hours_of_day[batch_index]
clearsky_for_batch = clearsky[batch_index]
target = pv_data_cuda[batch_index]
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
net.train()
outputs = net(inputs, hours_of_day_for_batch, clearsky_for_batch)
train_loss = loss_func(outputs, target)
train_loss.backward()
optimizer.step()
running_train_loss += train_loss.item()
# MAE
train_mae = mae_loss_func(outputs, target)
running_train_mae += train_mae.item()
# print statistics
if i_batch % STATS_PERIOD == STATS_PERIOD - 1: # print every STATS_PERIOD mini-batches
t1 = time.time()
# Train loss
mean_train_loss = running_train_loss / STATS_PERIOD
train_losses.append(mean_train_loss)
mean_train_mae = running_train_mae / STATS_PERIOD
train_mae_losses.append(mean_train_mae)
# Test loss
net.eval()
test_outputs = net(TESTING_INPUTS, TESTING_HOURS_OF_DAY, TESTING_CLEARSKY)
test_loss = loss_func(test_outputs, TESTING_TARGET).item()
test_losses.append(test_loss)
test_mae = mae_loss_func(test_outputs, TESTING_TARGET).item()
test_mae_losses.append(test_mae)
print(
'\n time = {:.2f} milli seconds per batch.\n'
' train loss = {:8.5f}\n'
' train MAE = {:8.5f}\n'
' test loss = {:8.5f}\n'
' test MAE = {:8.5f}'.format(
((t1 - t0) / STATS_PERIOD) * 1000,
mean_train_loss,
mean_train_mae,
test_loss,
test_mae
))
running_train_loss = 0.0
running_train_mae = 0.0
t0 = time.time()
print('Finished Training')
fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True, figsize=(20, 10))
ax1.plot(test_losses, label='testing')
ax1.plot(train_losses, label='training')
ax1.set_title('MSE (training objective)')
ax1.set_ylabel('MSE')
ax1.legend()
ax2.plot(test_mae_losses, label='testing')
ax2.plot(train_mae_losses, label='training')
ax2.set_title('MAE')
ax2.set_ylabel('MAE')
ax2.legend();
# Get MAPE across entire testing dataset :)
net.eval()
start_i = 0
mae_on_all_testset = []
while start_i < len(testing_index) - 1:
end_i = start_i + TESTING_BATCH_SIZE
test_index_batch = testing_index[start_i:end_i]
start_i = end_i
inputs = sat_data_master_cuda[test_index_batch]
testing_hour_of_day = hours_of_day[test_index_batch]
testing_clearsky = clearsky[test_index_batch]
target = pv_data_cuda[test_index_batch]
output = net(inputs, testing_hour_of_day, testing_clearsky)
mae = mae_loss_func(output, target).item()
mae_on_all_testset.append(mae)
np.mean(mae_on_all_testset)
%%time
# Plot some results!
#batch_index = np.random.randint(low=0, high=len(testing_index)-1, size=BATCH_SIZE)
START = 500
batch_index = range(START, START+TESTING_BATCH_SIZE + 512)
batch_index = testing_index[batch_index]
inputs = sat_data_master_cuda[batch_index]
testing_hour_of_day = hours_of_day[batch_index]
testing_clearsky = clearsky[batch_index]
target = pv_data_cuda[batch_index]
net.eval()
output = net(inputs, testing_hour_of_day, testing_clearsky)
i = 30
plt.imshow(
inputs[i, 0].to(device=torch.device('cpu'), dtype=torch.float32),
origin='upper')
output[i, 0].detach().cpu()
target[i, 0].detach().cpu()
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, figsize=(13,10))
#dt_index = datetime_index[batch_index]
ax1.set_title('5-minutely data')
ax1.plot(output[:, 0].detach().cpu(), label='net output')
ax1.plot(target[:, 0].detach().cpu(), label='target')
ax2.set_title('Hourly rolling means')
ax2.plot(pd.Series(output[:, 0].detach().cpu()).rolling(12, center=True).mean().values, label='net output (hourly rolling mean)')
ax2.plot(pd.Series(target[:, 0].detach().cpu()).rolling(12, center=True).mean().values, label='target (hourly rolling mean)')
ax3.plot(testing_clearsky.detach().cpu())
ax3.set_title('Clearsky irradiance (scaled to have mean=0 and std=1)')
ax1.legend()
ax2.legend()
ax1.set_ylabel('PV yield')
ax2.set_ylabel('PV yield')
ax3.set_xlabel('timestep (5 minutes between timesteps)')
fig.tight_layout();
np.unique(datetime_index[batch_index].date)
```
| github_jupyter |
# Self-Driving Car Engineer Nanodegree
## Project: **Finding Lane Lines on the Road**
***
In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.
---
Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
---
**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**
---
<figure>
<img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
</figcaption>
</figure>
<p></p>
<figure>
<img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
</figcaption>
</figure>
**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
## Import Packages
```
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
```
## Read in an Image
```
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
```
## Ideas for Lane Detection Pipeline
**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
`cv2.inRange()` for color selection
`cv2.fillPoly()` for regions selection
`cv2.line()` to draw lines on an image given endpoints
`cv2.addWeighted()` to coadd / overlay two images
`cv2.cvtColor()` to grayscale or change color
`cv2.imwrite()` to output images to file
`cv2.bitwise_and()` to apply a mask to an image
**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
## Helper Functions
Below are some helper functions to help get you started. They should look familiar from the lesson!
```
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def grayscale_HSV(img):
# img_gray = (img_gray - img_gray.min()) / (img_gray.max() = img_gray.min()) * 255
img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
#img_h = img_hsv[:,:,0]
#img_s = img_hsv[:,:,1]
#img_v = img_hsv[:,:,2]
return img_hsv
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def single_line_points(img,lines):
"""finds the coordinates of single line on left and right side """
#if (lines[0]!= 0):
y1 = int(img.shape[0])
y2 = int(0.64*y1)
x1 =int((y1-lines[1])/lines[0])
x2 =int((y2-lines[1])/lines[0])
#print([x1,y1,x2,y2])
return [[x1, y1, x2, y2]]
def remove_outliers(lines):
data_median = np.median(lines, axis = 0)
data_std = np.std(lines, axis = 0)
cut_off = data_std * 2
lower, upper = data_median - cut_off, data_median + cut_off
lines_outlier_removed = [[x,y] for x,y in lines if (lower[0]<x<upper[0]) and (lower[1]<y<upper[1])]
return lines_outlier_removed
def averaged_line(img,lines):
"""Seperates the left and right lane lines and calculates the average slope """
right_lane = []
left_lane = []
if lines is None:
return None
for line in lines:
for x1,y1,x2,y2 in line:
if (x2-x1)!=0:
slope = (y2-y1)/(x2-x1) # calculate slope
theta = np.arctan(slope) * 180 / np.pi
#exclude outlier slopes
if (15>abs(theta) or 40<abs(theta)):
continue
if(math.isnan(theta) or math.isinf(theta)):
continue
intercept = y1 - slope*x1 # calculate intercept
if theta < 0: ## finds slopes less than zero
left_lane.append([slope,intercept])
#print(theta)
else:
right_lane.append([slope,intercept])
## removing any outlier in left and right lane
if left_lane == [] or right_lane == [] :
left_lane_clean = [-1.4,650]
left_single_line = single_line_points(img,left_lane_clean)
right_lane_clean = [1.7,-50]
right_single_line = single_line_points(img,right_lane_clean)
combined_new_line = [left_single_line, right_single_line]
else:
left_lane_clean = remove_outliers(left_lane)
left_lane_average = np.mean(left_lane_clean, axis = 0)
right_lane_clean = remove_outliers(right_lane)
right_lane_average = np.mean(right_lane_clean, axis = 0)
left_single_line = single_line_points(img,left_lane_average)
right_single_line = single_line_points(img,right_lane_average)
combined_new_line = [left_single_line, right_single_line]
#print('right_lane_clean = ', right_lane_clean)
### taking average of slope and intercept to form one line
#print('left_lane_average =',left_lane_average)
#print('right_lane_average =',right_lane_average)
#print("combined_new_line=",combined_new_line)
return combined_new_line
def draw_lines(img, lines):
"""
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
line_image = np.zeros_like(img)
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)
return line_image
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
weighted_image = cv2.addWeighted(initial_img, α, img, β, γ)
return weighted_image
# Read each image from the directory, then make a copy of each image
import os
test_image_dir = "test_images/"
test_images = os.listdir(test_image_dir)
for img in test_images:
image = mpimg.imread(test_image_dir + img)
image_resize =cv2.resize(image,(960,540))
lane_image = np.copy(image_resize)
print(img)
# grayscale the image
gray = grayscale(lane_image)
# Define a kernel size and apply Gaussian smoothing
kernel_size = 5
blur_gray = gaussian_blur(gray, kernel_size)
# Define our parameters for Canny and apply
low_threshold = 50
high_threshold = 150
canny= cv2.Canny(blur_gray, low_threshold, high_threshold)
# Define region of interest and creat a maked image
imshape = lane_image.shape
vertices = np.array([[(150,imshape[0]),(950, imshape[0]), (550, 320), (440,320)]], dtype=np.int32)
masked_image = region_of_interest(canny, vertices)
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 1
theta = np.pi/180
threshold = 35
minLineLength = 5
maxLineGap = 2
# Run Hough on edge detected image
lines = cv2.HoughLinesP(masked_image, rho, theta, threshold, np.array([]), minLineLength, maxLineGap)
print("lines =", lines)
print("line_shape =", lines.shape)
print("lines_length =",len(lines))
# find the left and right averaged lines
combined_new_line = averaged_line(lane_image,lines)
# Iterate over the output "lines" and draw lines on the blank
line_image = draw_lines(lane_image, combined_new_line)
weighted_image = weighted_img(line_image, lane_image)
plt.imshow(weighted_image)
plt.show()
plt.imsave('test_images_output/' + img, weighted_image)
```
## Build a Lane Finding Pipeline
Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
```
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
```
## Test on Videos
You know what's cooler than drawing lanes over images? Drawing lanes over video!
We can test our solution on two provided videos:
`solidWhiteRight.mp4`
`solidYellowLeft.mp4`
**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
**If you get an error that looks like this:**
```
NeedDownloadError: Need ffmpeg exe.
You can download it by calling:
imageio.plugins.ffmpeg.download()
```
**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
```
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
# grayscale the image
lane_image = np.copy(image)
gray = grayscale(lane_image)
# Define a kernel size and apply Gaussian smoothing
kernel_size = 5
blur_gray = gaussian_blur(gray, kernel_size)
# Define our parameters for Canny and apply
low_threshold = 50
high_threshold = 150
canny= cv2.Canny(blur_gray, low_threshold, high_threshold)
# Define region of interest and creat a maked image
imshape = lane_image.shape
vertices = np.array([[(150,imshape[0]),(950, imshape[0]), (550, 320), (440,320)]], dtype=np.int32)
masked_image = region_of_interest(canny, vertices)
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 1
theta = np.pi/180
threshold = 15
minLineLength = 20
maxLineGap = 10
# Run Hough on edge detected image
lines = cv2.HoughLinesP(masked_image, rho, theta, threshold, np.array([]), minLineLength, maxLineGap)
#print(lines.shape)
# find the left and right averaged lines
combined_new_line = averaged_line(lane_image,lines)
# Iterate over the output "lines" and draw lines on the blank
line_image = draw_lines(lane_image, combined_new_line)
results = weighted_img(line_image, lane_image)
return results
```
Let's try the one with the solid white lane on the right first ...
```
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
```
Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
```
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
```
## Improve the draw_lines() function
**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
Now for the one with the solid yellow lane on the left. This one's more tricky!
```
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
```
## Optional Challenge
Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!
```
def process_image_challenge(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
# grayscale the image
lane_image = np.copy(image)
hsv = grayscale_HSV(lane_image)
lower_yellow = np.array([20, 100, 100])
upper_yellow = np.array([40, 255, 255])
yellow_mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
lower_white = np.array([0, 0, 215])
upper_white = np.array([180, 40, 255])
white_mask = cv2.inRange(hsv, lower_white, upper_white)
color_mask = cv2.bitwise_or(yellow_mask, white_mask)
gray_img = grayscale(lane_image) # convert to gray image
darken = (gray_img / 3).astype(np.uint8)
color_masked = cv2.bitwise_or(darken, color_mask)
gauss_img = gaussian_blur(color_masked, 7) # 低通过滤器,抑制高频部分,从而消除噪点
#canny_img = canny(gauss_img, 100, 150) # edge detection
# Define a kernel size and apply Gaussian smoothing
#kernel_size = 5
#blur_gray = gaussian_blur(gray, kernel_size)
# Define our parameters for Canny and apply
low_threshold = 50
high_threshold = 150
canny= cv2.Canny(gauss_img , low_threshold, high_threshold)
# Define region of interest and creat a maked image
imshape = lane_image.shape
vertices = np.array([[(150,imshape[0]),(950, imshape[0]), (550, 320), (440,320)]], dtype=np.int32)
masked_image = region_of_interest(canny, vertices)
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 1
theta = np.pi/180
threshold = 15
minLineLength = 20
maxLineGap = 10
# Run Hough on edge detected image
lines = cv2.HoughLinesP(masked_image, rho, theta, threshold, np.array([]), minLineLength, maxLineGap)
#print(lines.shape)
# find the left and right averaged lines
combined_new_line = averaged_line(lane_image,lines)
# Iterate over the output "lines" and draw lines on the blank
line_image = draw_lines(lane_image, combined_new_line)
results = weighted_img(line_image, lane_image)
return results
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image_challenge)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))
```
| github_jupyter |
# Stochastic Volatility model
## Imports & Settings
```
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import seaborn as sns
import pymc3 as pm
from pymc3.distributions.timeseries import GaussianRandomWalk
sns.set_style('whitegrid')
# model_path = Path('models')
```
## Model assumptions
Asset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others very stable. Stochastic volatility models model this with a latent volatility variable, modeled as a stochastic process. The following model is similar to the one described in the No-U-Turn Sampler paper, Hoffman (2011) p21.
$$\begin{align*}
\sigma &\sim \text{Exponential}(50)\\
\nu &\sim \text{Exponential}(.1)\\
s_i &\sim \text{Normal}(s_{i-1}, \sigma^{-2})\\
\log(r_i) &\sim t(\nu, 0, \exp(-2 s_i))
\end{align*}$$
Here, $r$ is the daily return series and $s$ is the latent log volatility process.
## Get Return Data
First we load some daily returns of the S&P 500.
```
prices = pd.read_hdf('../data/assets.h5', key='sp500/stooq').loc['2000':, 'close']
log_returns = np.log(prices).diff().dropna()
ax = log_returns.plot(figsize=(15, 4),
title='S&P 500 | Daily Log Returns',
rot=0)
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.0%}'.format(y)))
sns.despine()
plt.tight_layout();
```
As you can see, the volatility seems to change over time quite a bit while clustering around certain time-periods, most notably the 2009 financial crash.
## Specify Model in PyMC3
Specifying the model in `PyMC3` mirrors its statistical specification.
```
with pm.Model() as model:
step_size = pm.Exponential('sigma', 50.)
s = GaussianRandomWalk('s', sd=step_size,
shape=len(log_returns))
nu = pm.Exponential('nu', .1)
r = pm.StudentT('r', nu=nu,
lam=pm.math.exp(-2*s),
observed=log_returns)
pm.model_to_graphviz(model)
```
## Fit Model
For this model, the full maximum a posteriori (MAP) point is degenerate and has infinite density. NUTS, however, gives the correct posterior.
```
with model:
trace = pm.sample(tune=2000,
draws=5000,
chains=4,
cores=1,
target_accept=.9)
```
Optionally, persist result as pickle:
```
# with open('model_vol.pkl', 'wb') as buff:
# pickle.dump({'model': model, 'trace': trace}, buff)
```
## Evaluate results
### Trace Plot
```
pm.traceplot(trace, varnames=['sigma', 'nu']);
```
Looking at the returns over time and overlaying the estimated standard deviation we can see how the model tracks the volatility over time.
### In-Sample Predictions
```
pm.trace_to_dataframe(trace).info()
fig, ax = plt.subplots(figsize=(15, 5))
log_returns.plot(ax=ax, lw=.5, xlim=('2000', '2020'), rot=0,
title='In-Sample Fit of Stochastic Volatility Model')
ax.plot(log_returns.index, np.exp(trace[s]).T, 'r', alpha=.03, lw=.5);
ax.set(xlabel='Time', ylabel='Returns')
ax.legend(['S&P 500 (log returns)', 'Stochastic Volatility Model'])
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.0%}'.format(y)))
sns.despine()
fig.tight_layout();
```
| github_jupyter |
# Fusing graphblas.matrix_multiply with graphblas.apply
This example will go over how to use the `--graphblas-structuralize ` and `--graphblas-optimize` passes from `graphblas-opt` to fuse `graphblas.matrix_multiply` ops with `graphblas.apply` ops into `graphblas.matrix_multiply_generic` ops.
Let's first import some necessary libraries.
```
import tempfile
from mlir_graphblas.cli import GRAPHBLAS_OPT_EXE
```
Since [sparse tensor encodings](https://mlir.llvm.org/docs/Dialects/SparseTensorOps/#sparsetensorencodingattr) can be very verbose in MLIR, let's import some helpers to make the MLIR code more readable.
```
from mlir_graphblas.tools import tersify_mlir
```
## Fusion Details
Recall that `graphblas.matrix_multiply` ops can lower into `graphblas.matrix_multiply_generic` ops, which take blocks that specify exact behavior at several points during the matrix multiply. One of those blocks is a "transform_out" block.
Since `graphblas.apply` ops only change tensors in an element-wise fashion, we can perform these element-wise changes in the "transform_out" block of a `graphblas.matrix_multiply_generic` op if the `graphblas.apply` op is run on the result of a `graphblas.matrix_multiply` op.
## Simple Fusion
Here, we'll show the simplest example of how we can fuse a `graphblas.matrix_multiply` op with a `graphblas.apply` op.
```
mlir_text = """
#CSR64 = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
dimOrdering = affine_map<(i,j) -> (i,j)>,
pointerBitWidth = 64,
indexBitWidth = 64
}>
#CSC64 = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
dimOrdering = affine_map<(i,j) -> (j,i)>,
pointerBitWidth = 64,
indexBitWidth = 64
}>
func @fuse_adjacent(%A: tensor<?x?xf64, #CSR64>, %B: tensor<?x?xf64, #CSC64>, %thunk: f64) -> tensor<?x?xf64, #CSR64> {
%C = graphblas.matrix_multiply %A, %B { semiring = "plus_plus" } : (tensor<?x?xf64, #CSR64>, tensor<?x?xf64, #CSC64>) to tensor<?x?xf64, #CSR64>
%apply_result = graphblas.apply %C, %thunk { apply_operator = "min" } : (tensor<?x?xf64, #CSR64>, f64) to tensor<?x?xf64, #CSR64>
return %apply_result : tensor<?x?xf64, #CSR64>
}
"""
with tempfile.NamedTemporaryFile() as temp:
temp_file_name = temp.name
with open(temp_file_name, 'w') as f:
f.write(mlir_text)
temp.flush()
output_mlir = ! cat $temp_file_name | $GRAPHBLAS_OPT_EXE --graphblas-structuralize --graphblas-optimize
output_mlir = "\n".join(output_mlir)
output_mlir = tersify_mlir(output_mlir)
print(output_mlir)
```
Note how this function now only has one op from the GraphBLAS dialect. Notice how this one op, i.e. the `graphblas.matrix_multiply_generic`, has a "transform_out" block that performs the exact behavior specified by the `graphblas.apply` op in the original code.
It's noteworthy that this fusion also works if the `graphblas.matrix_multiply` use takes a mask. Rather than explicitly demonstrating this, we'll leave it as an exercise for the reader as it's a fairly straightforward.
If the intermediate result from the `graphblas.matrix_multiply` op is used in other places outside of the `graphblas.apply` op, this fusion cannot apply.
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Explainability-with-Amazon-SageMaker-Debugger" data-toc-modified-id="Explainability-with-Amazon-SageMaker-Debugger-1">Explainability with Amazon SageMaker Debugger</a></span><ul class="toc-item"><li><span><a href="#Introduction-" data-toc-modified-id="Introduction--1.1">Introduction <a id="intro"></a></a></span><ul class="toc-item"><li><span><a href="#Saving-tensors" data-toc-modified-id="Saving-tensors-1.1.1">Saving model parameters</a></span></li><li><span><a href="#Analysis" data-toc-modified-id="Analysis-1.1.2">Analysis</a></span></li></ul></li><li><span><a href="#Section-1---Setup-" data-toc-modified-id="Section-1---Setup--1.2">Section 1 - Setup <a id="setup"></a></a></span><ul class="toc-item"><li><span><a href="#1.1-Import-necessary-libraries" data-toc-modified-id="1.1-Import-necessary-libraries-1.2.1">1.1 Import necessary libraries</a></span></li><li><span><a href="#1.2-AWS-region-and--IAM-Role" data-toc-modified-id="1.2-AWS-region-and--IAM-Role-1.2.2">1.2 AWS region and IAM Role</a></span></li><li><span><a href="#1.3-S3-bucket-and-prefix-to-hold-training-data,-debugger-information-and-model-artifact" data-toc-modified-id="1.3-S3-bucket-and-prefix-to-hold-training-data,-debugger-information-and-model-artifact-1.2.3">1.3 S3 bucket and prefix to hold training data, debugger information and model artifact</a></span></li></ul></li><li><span><a href="#Section-2---Data-preparation-" data-toc-modified-id="Section-2---Data-preparation--1.3">Section 2 - Data preparation <a id="prep-data"></a></a></span></li><li><span><a href="#Section-3---Train-XGBoost-model-in-Amazon-SageMaker-with--debugger-enabled.-" data-toc-modified-id="Section-3---Train-XGBoost-model-in-Amazon-SageMaker-with--debugger-enabled.--1.4">Section 3 - Train XGBoost model in Amazon SageMaker with debugger enabled. <a id="train"></a></a></span><ul class="toc-item"><li><span><a href="#3.1-Install-the-'smdebug'-open-source-library" data-toc-modified-id="3.1-Install-the-'smdebug'-open-source-library-1.4.1">3.1 Install the 'smdebug' open source library</a></span></li><li><span><a href="#3.2-Build-the-XGBoost-container" data-toc-modified-id="3.2-Build-the-XGBoost-container-1.4.2">3.2 Build the XGBoost container</a></span></li><li><span><a href="#3.3-Enabling-Debugger-in-Estimator-object" data-toc-modified-id="3.3-Enabling-Debugger-in-Estimator-object-1.4.3">3.3 Enabling Debugger in Estimator object</a></span><ul class="toc-item"><li><span><a href="#DebuggerHookConfig" data-toc-modified-id="DebuggerHookConfig-1.4.3.1">DebuggerHookConfig</a></span></li><li><span><a href="#Rules" data-toc-modified-id="Rules-1.4.3.2">Rules</a></span></li></ul></li><li><span><a href="#3.4-Result" data-toc-modified-id="3.4-Result-1.4.4">3.4 Result</a></span></li><li><span><a href="#3.5-Check-the-status-of-the-Rule-Evaluation-Job" data-toc-modified-id="3.5-Check-the-status-of-the-Rule-Evaluation-Job-1.4.5">3.5 Check the status of the Rule Evaluation Job</a></span></li></ul></li><li><span><a href="#Section-4---Analyze-debugger-output-" data-toc-modified-id="Section-4---Analyze-debugger-output--1.5">Section 4 - Analyze debugger output <a id="analyze-debugger-ouput"></a></a></span><ul class="toc-item"><li><span><a href="#Retrieving-and-Analyzing-tensors" data-toc-modified-id="Retrieving-and-Analyzing-tensors-1.5.1">Retrieving and Analyzing model parameters</a></span></li><li><span><a href="#Plot-Performance-metrics" data-toc-modified-id="Plot-Performance-metrics-1.5.2">Plot Performance metrics</a></span></li><li><span><a href="#Feature-importance" data-toc-modified-id="Feature-importance-1.5.3">Feature importance</a></span></li><li><span><a href="#SHAP" data-toc-modified-id="SHAP-1.5.4">SHAP</a></span></li><li><span><a href="#Global-explanations" data-toc-modified-id="Global-explanations-1.5.5">Global explanations</a></span></li><li><span><a href="#Local-explanations" data-toc-modified-id="Local-explanations-1.5.6">Local explanations</a></span><ul class="toc-item"><li><span><a href="#Force-plot" data-toc-modified-id="Force-plot-1.5.6.1">Force plot</a></span></li><li><span><a href="#Stacked-force-plot" data-toc-modified-id="Stacked-force-plot-1.5.6.2">Stacked force plot</a></span></li></ul></li><li><span><a href="#Outliers" data-toc-modified-id="Outliers-1.5.7">Outliers</a></span></li></ul></li><li><span><a href="#Conclusion" data-toc-modified-id="Conclusion-1.6">Conclusion</a></span></li></ul></li></ul></div>
# Explainability with Amazon SageMaker Debugger
_**Explain a XGBoost model that predicts an individual's income**_
This notebook demonstrates how to use Amazon SageMaker Debugger to capture the feature importance and SHAP values for a XGBoost model.
*This notebook was created and tested on an ml.t2.medium notebook instance.*
## Introduction <a id='intro'></a>
Amazon SageMaker Debugger is the capability of Amazon SageMaker that allows debugging machine learning training. The capability helps you monitor the training jobs in near real time using rules and alert you once it has detected inconsistency in training.
Using Amazon SageMaker Debugger is a two step process: Saving model parameters and Analysis.
Let's look at each one of them closely.
### Saving model parameters
In machine learning process, model parameters are updated every forward and backward pass and can describe the state of the training job at any particular instant in an ML lifecycle. Amazon SageMaker Debugger allows you to capture the model parameters and save them for analysis. Although XGBoost is not a deep learning algorithm, Amazon SageMaker Debugger is highly customizable and can help you interpret results by saving insightful metrics. For example, performance metrics or the importance of features at different frequencies. Refer to [SageMaker Debugger documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-configuration.html) for details on how to save the metrics you want.
The saved model parameters in this notebook include feature importance and SHAP values for all features in the dataset. The feature importance and SHAP values are what we will use to provide local and global explainability.
### Analysis
After the model parameters are saved, perform automatic analysis by running debugging ***Rules***.
On a very broad level, a rule is Python code used to detect certain conditions during training.
Some of the conditions that a data scientist training an algorithm may care about are monitoring for gradients getting too large or too small, detecting overfitting, and so on.
Amazon SageMaker Debugger comes pre-packaged with certain rules that can be invoked on Amazon SageMaker. Users can also write their own rules using the Amazon SageMaker Debugger APIs.
For more information about automatic analysis using a rule, see the [rules documentation](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/analysis.md).
## Section 1 - Setup <a id='setup'></a>
In this section, we will import the necessary libraries, setup variables and examine dataset used. that was used to train the XGBoost model to predict an individual's income.
Let's start by specifying:
* The AWS region used to host your model.
* The IAM role associated with this SageMaker notebook instance.
* The S3 bucket used to store the data used to train the model, save debugger information during training and the trained model artifact.
<font color='red'>**Important**</font>: To use the new Debugger features, you need to upgrade the SageMaker Python SDK and the SMDebug libary. In the following cell, change the third line to `install_needed=True` and run to upgrade the libraries.
```
import sys
import IPython
install_needed = False # Set to True to upgrade
if install_needed:
print("installing deps and restarting kernel")
!{sys.executable} -m pip install -U sagemaker
!{sys.executable} -m pip install -U smdebug
IPython.Application.instance().kernel.do_shutdown(True)
```
### 1.1 Import necessary libraries
```
import boto3
import sagemaker
import os
import pandas as pd
from sagemaker import get_execution_role
```
### 1.2 AWS region and IAM Role
```
region = boto3.Session().region_name
print("AWS Region: {}".format(region))
role = get_execution_role()
print("RoleArn: {}".format(role))
```
### 1.3 S3 bucket and prefix to hold training data, debugger information, and model artifact
```
bucket = sagemaker.Session().default_bucket()
prefix = "DEMO-smdebug-xgboost-adult-income-prediction"
```
## Section 2 - Data preparation <a id='prep-data'></a>
We'll be using the [Adult Census dataset](https://archive.ics.uci.edu/ml/datasets/adult) for this exercise.
This data was extracted from the [1994 Census bureau database](http://www.census.gov/en.html) by Ronny Kohavi and Barry Becker (Data Mining and Visualization, Silicon Graphics), with the task being to predict if an individual person makes over 50K a year.
We'll be using the [SHAP](https://github.com/slundberg/shap) library to perform visual analysis. The library contains the dataset pre-loaded which we will utilize here.
```
!python -m pip install shap
import shap
X, y = shap.datasets.adult()
X_display, y_display = shap.datasets.adult(display=True)
feature_names = list(X.columns)
feature_names
# create a train/test split
from sklearn.model_selection import train_test_split # For splitting the dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=7)
X_train_display = X_display.loc[X_train.index]
train = pd.concat(
[pd.Series(y_train, index=X_train.index, name="Income>50K", dtype=int), X_train], axis=1
)
test = pd.concat(
[pd.Series(y_test, index=X_test.index, name="Income>50K", dtype=int), X_test], axis=1
)
# Use 'csv' format to store the data
# The first column is expected to be the output column
train.to_csv("train.csv", index=False, header=False)
test.to_csv("validation.csv", index=False, header=False)
boto3.Session().resource("s3").Bucket(bucket).Object(
os.path.join(prefix, "data/train.csv")
).upload_file("train.csv")
boto3.Session().resource("s3").Bucket(bucket).Object(
os.path.join(prefix, "data/validation.csv")
).upload_file("validation.csv")
```
## Section 3 - Train XGBoost model in Amazon SageMaker with debugger enabled. <a id='train'></a>
Now train an XGBoost model with Amazon SageMaker Debugger enabled and monitor the training jobs. This is done using the Amazon SageMaker Estimator API. While the training job is running, use Amazon SageMaker Debugger API to access saved model parameters in real time and visualize them. You can rely on Amazon SageMaker Debugger to take care of downloading a fresh set of model parameters every time you query for them.
Amazon SageMaker Debugger is available in Amazon SageMaker XGBoost container version 0.90-2 or later. If you want to use XGBoost with Amazon SageMaker Debugger, you have to specify `repo_version='0.90-2'` in the `get_image_uri` function.
### 3.2 Build the XGBoost container
Amazon SageMaker Debugger is available in Amazon SageMaker XGBoost container version 0.90-2 or later.
```
container = sagemaker.image_uris.retrieve("xgboost", region, "0.90-2")
base_job_name = "demo-smdebug-xgboost-adult-income-prediction-classification"
bucket_path = "s3://{}".format(bucket)
hyperparameters = {
"max_depth": "5",
"eta": "0.2",
"gamma": "4",
"min_child_weight": "6",
"subsample": "0.7",
"silent": "0",
"objective": "binary:logistic",
"num_round": "51",
}
save_interval = 5
```
### 3.3 Enabling Debugger in Estimator object
#### DebuggerHookConfig
Enabling Amazon SageMaker Debugger in training job can be accomplished by adding its configuration into Estimator object constructor:
```python
from sagemaker.debugger import DebuggerHookConfig, CollectionConfig
estimator = Estimator(
...,
debugger_hook_config = DebuggerHookConfig(
s3_output_path="s3://{bucket_name}/{location_in_bucket}", # Required
collection_configs=[
CollectionConfig(
name="metrics",
parameters={
"save_interval": "10"
}
)
]
)
)
```
Here, the `DebuggerHookConfig` object instructs `Estimator` what data we are interested in.
Two parameters are provided in the example:
- `s3_output_path`: Points to an S3 bucket where you intend to store model parameters. Amount of data saved depends on multiple factors, major ones are training job, data set, model, frequency of saving model parameters. This S3 bucket should be in your AWS account so that you have full access to control over the stored data. **Note**: The S3 bucket should be originally created in the same Region where your training job is running, otherwise you might run into problems with cross-Region access.
- `collection_configs`: It enumerates named collections of model parameters to save. Collections are a convenient way to organize relevant model parameters under same umbrella to make it easy to navigate them during analysis. In this particular example, you are interested in a single collection named metrics. You also configured Amazon SageMaker Debugger to save metrics every 10 iterations. See [Collection](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/api.md#collection) documentation for all parameters that are supported by Collections and DebuggerConfig documentation for more details about all parameters DebuggerConfig supports.
#### Rules
Enabling Rules in training job can be accomplished by adding the `rules` configuration into Estimator object constructor.
- `rules`: This parameter will accept a list of rules you want to evaluate against training jobs.
For rules, Amazon SageMaker Debugger supports two types:
- SageMaker Rules: These are rules specially curated by the data science and engineering teams in Amazon SageMaker which you can opt to evaluate against your training job.
- Custom Rules: You can optionally choose to write your own rule as a Python source file and have it evaluated against your training job.
To provide Amazon SageMaker Debugger to evaluate this rule, you would have to provide the S3 location of the rule source and the evaluator image.
In this example, you will use a Amazon SageMaker's LossNotDecreasing rule, which helps you identify if you are running into a situation where the training loss is not going down.
```python
from sagemaker.debugger import rule_configs, Rule
estimator = Estimator(
...,
rules=[
Rule.sagemaker(
rule_configs.loss_not_decreasing(),
rule_parameters={
"collection_names": "metrics",
"num_steps": "10",
},
),
],
)
```
- `rule_parameters`: In this parameter, you provide the runtime values of the parameter in your constructor.
You can still choose to pass in other values which may be necessary for your rule to be evaluated.
In this example, you will use Amazon SageMaker's LossNotDecreasing rule to monitor the `metircs` collection.
The rule will alert you if the loss value in the `metrics` collection has not decreased for more than 10 steps.
```
from sagemaker.debugger import rule_configs, Rule, DebuggerHookConfig, CollectionConfig
from sagemaker.estimator import Estimator
xgboost_estimator = Estimator(
role=role,
base_job_name=base_job_name,
instance_count=1,
instance_type="ml.m5.4xlarge",
image_uri=container,
hyperparameters=hyperparameters,
max_run=1800,
debugger_hook_config=DebuggerHookConfig(
s3_output_path=bucket_path, # Required
collection_configs=[
CollectionConfig(name="metrics", parameters={"save_interval": str(save_interval)}),
CollectionConfig(
name="feature_importance", parameters={"save_interval": str(save_interval)}
),
CollectionConfig(name="full_shap", parameters={"save_interval": str(save_interval)}),
CollectionConfig(name="average_shap", parameters={"save_interval": str(save_interval)}),
],
),
rules=[
Rule.sagemaker(
rule_configs.loss_not_decreasing(),
rule_parameters={
"collection_names": "metrics",
"num_steps": str(save_interval * 2),
},
),
],
)
```
With the next step, start a training job by using the Estimator object you created above. This job is started in an asynchronous, non-blocking way. This means that control is passed back to the notebook and further commands can be run while the training job is progressing.
```
from sagemaker.session import TrainingInput
train_input = TrainingInput(
"s3://{}/{}/{}".format(bucket, prefix, "data/train.csv"), content_type="csv"
)
validation_input = TrainingInput(
"s3://{}/{}/{}".format(bucket, prefix, "data/validation.csv"), content_type="csv"
)
xgboost_estimator.fit(
{"train": train_input, "validation": validation_input},
# This is a fire and forget event. By setting wait=False, you submit the job to run in the background.
# Amazon SageMaker starts one training job and release control to next cells in the notebook.
# Follow this notebook to see status of the training job.
wait=False,
)
```
### 3.4 Result
As a result of the above command, Amazon SageMaker starts **one training job and one rule job** for you. The first one is the job that produces the model parameters to be analyzed. The second one analyzes the model parameters to check if `train-error` and `validation-error` are not decreasing at any point during training.
Check the status of the training job below.
After your training job is started, Amazon SageMaker starts a rule-execution job to run the LossNotDecreasing rule.
The cell below will block till the training job is complete.
```
import time
for _ in range(36):
job_name = xgboost_estimator.latest_training_job.name
client = xgboost_estimator.sagemaker_session.sagemaker_client
description = client.describe_training_job(TrainingJobName=job_name)
training_job_status = description["TrainingJobStatus"]
rule_job_summary = xgboost_estimator.latest_training_job.rule_job_summary()
rule_evaluation_status = rule_job_summary[0]["RuleEvaluationStatus"]
print(
"Training job status: {}, Rule Evaluation Status: {}".format(
training_job_status, rule_evaluation_status
)
)
if training_job_status in ["Completed", "Failed"]:
break
time.sleep(10)
```
### 3.5 Check the status of the Rule Evaluation Job
To get the rule evaluation job that Amazon SageMaker started for you, run the command below. The results show you the `RuleConfigurationName`, `RuleEvaluationJobArn`, `RuleEvaluationStatus`, `StatusDetails`, and `RuleEvaluationJobArn`.
If the model parameters meet a rule evaluation condition, the rule execution job throws a client error with `RuleEvaluationConditionMet`.
The logs of the rule evaluation job are available in the Cloudwatch Logstream `/aws/sagemaker/ProcessingJobs` with `RuleEvaluationJobArn`.
You can see that once the rule execution job starts, it identifies the loss not decreasing situation in the training job, it raises the `RuleEvaluationConditionMet` exception, and it ends the job.
```
xgboost_estimator.latest_training_job.rule_job_summary()
```
## Section 4 - Analyze debugger output <a id='analyze-debugger-ouput'></a>
Now that you've trained the system, analyze the data. Here, you focus on after-the-fact analysis.
You import a basic analysis library, which defines the concept of trial, which represents a single training run.
### Retrieving and Analyzing tensors
Before getting to analysis, here are some notes on concepts being used in Amazon SageMaker Debugger that help with analysis.
- ***Trial*** - Object that is a centerpiece of the SageMaker Debugger API when it comes to getting access to model parameters. It is a top level abstract that represents a single run of a training job. All model parameters emitted by a training job are associated with its trial.
- ***Tensor*** - Object that represents model parameters, such as weights, gradients, accuracy, and loss, that are saved during training job.
For more details on aforementioned concepts as well as on SageMaker Debugger API in general (including examples) see [SageMaker Debugger Analysis API](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/analysis.md) documentation.
In the following code cell, use a ***Trial*** to access model parameters. You can do that by inspecting currently running training job and extract necessary parameters from its debug configuration to instruct SageMaker Debugger where the data you are looking for is located. Keep in mind the following:
- model parameters are being stored in your own S3 bucket to which you can navigate and manually inspect its content if desired.
- You might notice a slight delay before trial object is created. This is normal as SageMaker Debugger monitors the corresponding bucket and waits until model parameters to appear. The delay is introduced by less than instantaneous upload of model parameters from a training container to your S3 bucket.
```
from smdebug.trials import create_trial
s3_output_path = xgboost_estimator.latest_job_debugger_artifacts_path()
trial = create_trial(s3_output_path)
```
You can list all model parameters that you want to analyze. Each one of these names is the name of a model parameter. The name is a combination of the feature name, which in these cases, is auto-assigned by XGBoost, and whether it's an evaluation metric, feature importance, or SHAP value.
```
trial.tensor_names()
```
For each model parameter, we can get the values at all saved steps.
```
trial.tensor("average_shap/f1").values()
```
### Plot Performance metrics
You can also create a simple function that visualizes the training and validation errors as the training progresses.
The error should get smaller over time, as the system converges to a good solution.
```
from itertools import islice
import matplotlib.pyplot as plt
import re
MAX_PLOTS = 35
def get_data(trial, tname):
"""
For the given tensor name, walks though all the iterations
for which you have data and fetches the values.
Returns the set of steps and the values.
"""
tensor = trial.tensor(tname)
steps = tensor.steps()
vals = [tensor.value(s) for s in steps]
return steps, vals
def match_tensor_name_with_feature_name(tensor_name, feature_names=feature_names):
feature_tag = tensor_name.split("/")
for ifeat, feature_name in enumerate(feature_names):
if feature_tag[-1] == "f{}".format(str(ifeat)):
return feature_name
return tensor_name
def plot_collection(trial, collection_name, regex=".*", figsize=(8, 6)):
"""
Takes a `trial` and a collection name, and
plots all tensors that match the given regex.
"""
fig, ax = plt.subplots(figsize=figsize)
tensors = trial.collection(collection_name).tensor_names
matched_tensors = [t for t in tensors if re.match(regex, t)]
for tensor_name in islice(matched_tensors, MAX_PLOTS):
steps, data = get_data(trial, tensor_name)
ax.plot(steps, data, label=match_tensor_name_with_feature_name(tensor_name))
ax.legend(loc="center left", bbox_to_anchor=(1, 0.5))
ax.set_xlabel("Iteration")
plot_collection(trial, "metrics")
```
### Feature importance
You can also visualize the feature priorities as determined by
[xgboost.get_score()](https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.Booster.get_score).
If you instructed Estimator to log the `feature_importance` collection, all importance types supported by `xgboost.get_score()` will be available in the collection.
```
def plot_feature_importance(trial, importance_type="weight"):
SUPPORTED_IMPORTANCE_TYPES = ["weight", "gain", "cover", "total_gain", "total_cover"]
if importance_type not in SUPPORTED_IMPORTANCE_TYPES:
raise ValueError(f"{importance_type} is not one of the supported importance types.")
plot_collection(trial, "feature_importance", regex=f"feature_importance/{importance_type}/.*")
plot_feature_importance(trial, importance_type="cover")
```
### SHAP
[SHAP](https://github.com/slundberg/shap) (SHapley Additive exPlanations) is
another approach to explain the output of machine learning models.
SHAP values represent a feature's contribution to a change in the model output.
You instructed Estimator to log the average SHAP values in this example so the SHAP values (as calculated by [xgboost.predict(pred_contribs=True)](https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.Booster.predict)) will be available the `average_shap` collection.
```
plot_collection(trial, "average_shap")
```
### Global explanations
Global explanatory methods allow understanding the model and its feature contributions in aggregate over multiple datapoints. Here we show an aggregate bar plot that plots the mean absolute SHAP value for each feature.
Specifically, the below plot indicates that the value of relationship (Wife=5, Husband=4, Own-child=3, Other-relative=2, Unmarried=1, Not-in-family=0) plays the most important role in predicting the income probability being higher than 50K.
```
shap_values = trial.tensor("full_shap/f0").value(trial.last_complete_step)
shap_no_base = shap_values[:, :-1]
shap_base_value = shap_values[0, -1]
shap.summary_plot(shap_no_base, plot_type="bar", feature_names=feature_names)
shap_base_value
```
The detailed summary plot below can provide more context over the above bar chart. It tells which features are most important and, in addition, their range of effects over the dataset. The color allows us to match how changes in the value of a feature effect the change in prediction.
The 'red' indicates higher value of the feature and 'blue' indicates lower (normalized over the features). This allows conclusions such as 'increase in age leads to higher log odds for prediction, eventually leading to `True` predictions more often.
```
shap.summary_plot(shap_no_base, X_train)
```
### Local explanations
Local explainability aims to explain model behavior for a fixed input point. This can be used for either auditing models before deployment or to provide explanations for specific inference predictions.
```
shap.initjs()
```
#### Force plot
A force plot explanation shows how features are contributing to push the model output from the base value (the average model output over the dataset) to the model output. Features pushing the prediction higher are shown in **red**, those pushing the prediction lower are in **blue**.
Plot below indicates that for this particular data point the prediction probability (0.48) is higher than the average (~0.2) primarily because this person is in a relationship (`Relationship = Wife`), and to smaller degree because of the higher-than-average age. Similarly the model reduces the probability due specific `Sex` and `Race` values indicating existence of bias in model behavior (possibly due to bias in the data).
```
shap.force_plot(
shap_base_value,
shap_no_base[100, :],
X_train_display.iloc[100, :],
link="logit",
matplotlib=False,
)
```
#### Stacked force plot
SHAP allows stacking multiple force-plots after rotating 90 degress to understand the explanations for multiple datapoints. If Javascript is enabled, then in the notebook this plot is interactive, allowing understanding the change in output for each feature independently. This stacking of force plots provides a balance between local and global explainability.
```
import numpy as np
N_ROWS = shap_no_base.shape[0]
N_SAMPLES = min(100, N_ROWS)
sampled_indices = np.random.randint(N_ROWS, size=N_SAMPLES)
shap.force_plot(
shap_base_value,
shap_no_base[sampled_indices, :],
X_train_display.iloc[sampled_indices, :],
link="logit",
)
```
### Outliers
Outliers are extreme values that deviate from other observations on data. It's useful to understand the influence of various features for outlier predictions to determine if it's a novelty, an experimental error, or a shortcoming in the model.
Here we show force plot for prediction outliers that are on either side of the baseline value.
```
# top outliers
from scipy import stats
N_OUTLIERS = 3 # number of outliers on each side of the tail
shap_sum = np.sum(shap_no_base, axis=1)
z_scores = stats.zscore(shap_sum)
outlier_indices = (np.argpartition(z_scores, -N_OUTLIERS)[-N_OUTLIERS:]).tolist()
outlier_indices += (np.argpartition(z_scores, N_OUTLIERS)[:N_OUTLIERS]).tolist()
for fig_index, outlier_index in enumerate(outlier_indices, start=1):
shap.force_plot(
shap_base_value,
shap_no_base[outlier_index, :],
X_train_display.iloc[outlier_index, :],
matplotlib=False,
link="logit",
)
```
## Conclusion
This notebook discussed the importance of explainability for improved ML
adoption and. We introduced the Amazon SageMaker Debugger capability with built-in
model parameter collections to enable model explainability.
The notebook walked you through training an ML model for a financial services use case
of individual income prediction. We further analyzed the global and local
explanations of the model by visualizing the captured model parameters.
| github_jupyter |
# Programming in Python
## Session 1
### Aim of the Session
Learn/review the basics
- what is ...
- how to ...
### 'Hello World!'
```
# the culturally-expected introductory statement
```
### Literals
Values of a _type_, presented literally
```
# example name type designation
42 # integer int
2.016 # float float*
"Homo sapiens" # string str
```
- int: whole numbers e.g. 1, 1000, 6000000000
- float: 'floating point' non-whole numbers e.g. 1.9, 30.01, 10e3, 1e-3
- string: ordered sequence of characters, enclosed in quotation marks (single, double, _triple_)
```
# type conversions
```
#### Aside - Comments
Comments are preceded by a **#**, and are completely ignored by the python interpreter.
Comments can be on their own line or after a line of code.
Comments are an incredibly useful way to keep track of what you are doing in
your code. Use comments to document what you do as much as possible, it will
pay off in the long run.
### Exercises 1
```
# print some strings
# print some numbers (ints or floats)
# print multiple values of different types all at once
# (hints: use comma to separate values with a space, or + to join strings)
# print a string containing quote marks
```
### Variables
Store values (information) in memory, and (re-)use them. We give variables names (identifiers) so that we have a means of referring to the information on demand.
```
# variable assignment is done with '='
```
#### Variable naming
Rules:
- identifier lookup is case-sensitive
- `myname` & `MyName` are different
- must be unique in your working environment
- existing variable will be __over-written without warning__
- cannot start with a number, or any special symbol (e.g. $, %, @, -, etc...) except for "_" (underscore), which is OK.
- cannot have any spaces or special characters (except for "-" (hyphen) and "_" (underscore))
Conventions/good practice:
- identifiers (usually) begin with a lowercase letter
- followed by letters, numbers, underscores
- use a strategy to make reading easier
- `myName`
- `exciting_variable`
- long, descriptive > short, vague
### String Formatting
Create formatted strings, with variable values substituted in.
```
# two ways to do it in Python
name = 'Florence'
age = 73
print('%s is %d years old' % (name, age)) # common amongst many programming languages
print('{} is {} years old'.format(name, age)) # perhaps more consistent with stardard Python syntax
```
### Operators & Operands
Using Python as a calculator: `+`, `-`, `/`, `*` etc are _operators_, the values/variables that they work on are _operands_.
```
# standard mathematical operations can be performed in Python
# and some less common ones
```
_Note: check out numpy, scipy, stats modules if you want to do a lot of maths_
### Data Structures
Programming generally requires building/working with much larger and more complex sets of data than the single values/words/sentences that we have looked at so far. In fact, finding ways to operate effectively (and efficiently) on complex structures in order to extract/produce information, _is_ (data) programming.
Python has two most commonly-used structures for storing multiple pieces of data - _lists_ and _dictionaries_. Let's look at these, and a few more, now.
#### Lists
```
# sequence of entries, in order and of any type
numbers = [32, 72, 42]
mixed_list = [1, 'b', 3.0, 'd']
empty_list = []
another_empty_list = list()
letters = list('abcdefghi')
```
#### What more can we do with a list?
```
# creating a sensible list
# Sugar per person (g per day) in 2004: ref: https://www.gapminder.org/data/
top_suger_consumers = ['United States', 'Canada', 'Estonia', 'Croatia', 'New Zealand', 'Switzerland']
# adding/removing entries
## next 3 top countries is Denmark, can we add that to the list
## how can keep this list with info on only Americas and Europe only
```
#### Objects, Methods, and How To Get Help
In Python, everything is an _object_ - some value(s), packaged up with a set of things that can be done with/to it (___methods___), and pieces of information about it (___attributes___). This makes it very easy to perform the most commonly-needed operations for that/those type of value(s). The language has a standard syntax for accessing methods:
```
string_object = 'data for 2004 based on a rough extrapolation'
# methods - object.something()
print(string_object.upper())
# more...
# help()
# sets
```
### Exercises 2
```
# add 'New Zealand' back to the list of top_suger_consumers
# access the fifth entry of the list
# access the last entry of the list
# join the list with a new list from another 8 countries
next_high_suger_consumers = ['Barbados', 'Costa Rica',
'Saint Kitts and Nevis', 'Trinidad and Tobago',
'Brazil', 'Grenada', 'Iceland', 'Belgium']
# access the last entry of the list now
```
### Range
We can access range of items from the list by defining the start index and stop index, separated by a colon symbol ":".
For example, to access the item 3-5 from the a list, we will use the following syntax `list[2:5]`
```
top_suger_consumers[2:5]
```
Please note that such ranges in python are defined as **left inclusive and right exclusive** meaning that the right number is excluded while accessing the items. Which in this case the 6th item (index 5).
```
# access top 4 items
# access last 4 items
```
#### Dictionaries
```
# collection of paired information - keys and values
student_marks = {'United States': 191.78, 'Costa Rica': 156.16, 'Belgium': 150.69}
empty_dict = {}
another_empty_dict = dict()
# accessing dict entries
# adding/changing/deleting entries
```
#### Mutable?
Object types can be divided into two categories - mutable & immutable. _Mutable_ objects can be changed 'in-place' - their value can be updated, added to, re-ordered etc without the need to create a whole new object every time. _Immutable_ types cannot be changed in place - once they have a value, this value cannot be altered. though, of course, it can __always__ be overwritten.
```
# lists are mutable
top_consumers_europe = ["Estonia", "Croatia", "Switzerland", "Denmark", "Belgium"]
cities[4] = 'Iceland'
# strings are immutable
funfact = "These lessons use: sad cancer examples"
funfact[17] = 'd'
print(funfact)
```
### Exercise 3
Below is a set of commands working with a list of common cancers.
- First, the list is extended by adding 'Liver' onto the end.
- Then 'Prostate' - the fourth element in the list - is assigned to the variable fourth_common_cancer.
Some of the code has been removed (replaced with ---). Fill in the blanks in the code block to make it work.
```
# Examples of most common cancers worldwide
# Ref: https://www.wcrf.org/int/cancer-facts-figures/worldwide-data
common_cancers = ['Lung', 'Breast', 'Colorectum', 'Prostate', 'Stomach']
# add 'Liver' onto the end of the list
common_cancers.---('Liver')
# access the fourth entry in the list
fourth_common_cancer = common_cancers[---]
```
### Debugging Exercise
```
coffee_break = ['coffee', 'tea'; 'cookies', 'fruits']
coffee_break.append['water']
print(coffee_break)
```
| github_jupyter |
# Метод сопряжённых градиентов (Conjugate gradient method): гадкий утёнок
## На прошлом занятии...
1. Методы спуска
2. Направление убывания
3. Градиентный метод
4. Правила выбора шага
5. Теоремы сходимости
6. Эксперименты
## Система линейных уравнений vs. задача безусловной минимизации
Рассмотрим задачу
$$
\min_{x \in \mathbb{R}^n} \frac{1}{2}x^{\top}Ax - b^{\top}x,
$$
где $A \in \mathbb{S}^n_{++}$.
Из необходимого условия экстремума имеем
$$
Ax^* = b
$$
Также обозначим $f'(x_k) = Ax_k - b = r_k$
## Как решить систему $Ax = b$?
- Прямые методы основаны на матричных разложениях:
- Плотная матрица $A$: для размерностей не больше нескольких тысяч
- Разреженная (sparse) матрица $A$: для размерностей порядка $10^4 - 10^5$
- Итерационные методы: хороши во многих случаях, единственный подход для задач с размерностью $ > 10^6$
## Немного истории...
M. Hestenes и E. Stiefel предложили *метод сопряжённых градиентов* для решения систем линейных уравнений в 1952 году как **прямой** метод.
Также долгое время считалось, что метод представляет только теоретический интерес поскольку
- метод сопряжённых градиентов не работает на логарифмической линейке
- метод сопряжённых градиентов имеет небольшое преимущество перед исключением Гаусса при вычислениях на калькуляторе
- для вычислений на "human computers" слишком много обменов данными
<img src="./human_computer.jpeg">
Метод сопряжённых градиентов необходимо рассматривать как **итерационный метод**, то есть останавливаться до точной сходимости!
Подробнее [здесь](https://www.siam.org/meetings/la09/talks/oleary.pdf)
## Метод сопряжённых направлений
В градиентном спуске направления убывания - анти-градиенты, но для функций с плохо обусловленным гессианом сходимость **медленная**.
**Идея:** двигаться вдоль направлений, которые гарантируют сходимость за $n$ шагов.
**Определение.** Множество ненулевых векторов $\{p_0, \ldots, p_l\}$ называется *сопряжённым* относительно матрицы $A \in \mathbb{S}^n_{++}$, если
$$
p^{\top}_iAp_j = 0, \qquad i \neq j
$$
**Утверждение.** Для любой $x_0 \in \mathbb{R}^n$ последовательность $\{x_k\}$, генерируемая методом сопряжённых направлений, сходится к решению системы $Ax = b$ максимум за $n$ шагов.
```python
def ConjugateDirections(x0, A, b, p):
x = x0
r = A.dot(x) - b
for i in range(len(p)):
alpha = - (r.dot(p[i])) / (p[i].dot(A.dot(p[i])))
x = x + alpha * p[i]
r = A.dot(x) - b
return x
```
### Примеры сопряжённых направлений
- Собственные векторы матрицы $A$
- Для любого набора из $n$ векторов можно провести аналог ортогонализации Грама-Шмидта и получить сопряжённые направления
**Вопрос:** что такое ортогонализация Грама-Шмидта? :)
### Геометрическая интерпретация (Mathematics Stack Exchange)
<center><img src="./cg.png" ></center>
## Метод сопряжённых градиентов
**Идея:** новое направление $p_k$ ищется в виде $p_k = -r_k + \beta_k p_{k-1}$, где $\beta_k$ выбирается, исходя из требования сопряжённости $p_k$ и $p_{k-1}$:
$$
\beta_k = \dfrac{p^{\top}_{k-1}Ar_k}{p^{\top}_{k-1}Ap_{k-1}}
$$
Таким образом, для получения следующего сопряжённого направления $p_k$ необходимо хранить только сопряжённое направление $p_{k-1}$ и остаток $r_k$ с предыдущей итерации.
**Вопрос:** как находить размер шага $\alpha_k$?
## Сопряжённость сопряжённых градиентов
**Теорема**
Пусть после $k$ итераций $x_k \neq x^*$. Тогда
- $\langle r_k, r_i \rangle = 0, \; i = 1, \ldots k - 1$
- $\mathtt{span}(r_0, \ldots, r_k) = \mathtt{span}(r_0, Ar_0, \ldots, A^kr_0)$
- $\mathtt{span}(p_0, \ldots, p_k) = \mathtt{span}(r_0, Ar_0, \ldots, A^kr_0)$
- $p_k^{\top}Ap_i = 0$, $i = 1,\ldots,k-1$
### Теоремы сходимости
**Теорема 1.** Если матрица $A$ имеет только $r$ различных собственных значений, то метод сопряжённых градиентов cойдётся за $r$ итераций.
**Теорема 2.** Имеет место следующая оценка сходимости
$$
\| x_{k} - x^* \|_A \leq 2\left( \dfrac{\sqrt{\kappa(A)} - 1}{\sqrt{\kappa(A)} + 1} \right)^k \|x_0 - x^*\|_A,
$$
где $\|x\|_A = x^{\top}Ax$ и $\kappa(A) = \frac{\lambda_1(A)}{\lambda_n(A)}$ - число обусловленности матрицы $A$, $\lambda_1(A) \geq ... \geq \lambda_n(A)$ - собственные значения матрицы $A$
**Замечание:** сравните коэффициент геометрической прогрессии с аналогом в градиентном спуске.
### Интерпретации метода сопряжённых градиентов
- Градиентный спуск в пространстве $y = Sx$, где $S = [p_0, \ldots, p_n]$, в котором матрица $A$ становится диагональной (или единичной в случае ортонормированности сопряжённых направлений)
- Поиск оптимального решения в [Крыловском подпространстве](https://stanford.edu/class/ee364b/lectures/conj_grad_slides.pdf) $\mathcal{K}_k(A) = \{b, Ab, A^2b, \ldots A^{k-1}b\}$
$$
x_k = \arg\min_{x \in \mathcal{K}_k} f(x)
$$
- Однако естественный базис Крыловского пространства неортогональный и, более того, **плохо обусловлен**.
**Упражнение** Проверьте численно, насколько быстро растёт обусловленность матрицы из векторов $\{b, Ab, ... \}$
- Поэтому его необходимо ортогонализовать, что и происходит в методе сопряжённых градиентов
### Основное свойство
$$
A^{-1}b \in \mathcal{K}_n(A)
$$
Доказательство
- Теорема Гамильтона-Кэли: $p(A) = 0$, где $p(\lambda) = \det(A - \lambda I)$
- $p(A)b = A^nb + a_1A^{n-1}b + \ldots + a_{n-1}Ab + a_n b = 0$
- $A^{-1}p(A)b = A^{n-1}b + a_1A^{n-2}b + \ldots + a_{n-1}b + a_nA^{-1}b = 0$
- $A^{-1}b = -\frac{1}{a_n}(A^{n-1}b + a_1A^{n-2}b + \ldots + a_{n-1}b)$
### Сходимость по функции и по аргументу
- Решение: $x^* = A^{-1}b$
- Минимум функции:
$$
f^* = \frac{1}{2}b^{\top}A^{-\top}AA^{-1}b - b^{\top}A^{-1}b = -\frac{1}{2}b^{\top}A^{-1}b = -\frac{1}{2}\|x^*\|^2_A
$$
- Оценка сходимости по функции:
$$
f(x) - f^* = \frac{1}{2}x^{\top}Ax - b^{\top}x + \frac{1}{2}\|x^*\|_A^2 =\frac{1}{2}\|x\|_A^2 - x^{\top}Ax^* + \frac{1}{2}\|x^*\|_A^2 = \frac{1}{2}\|x - x^*\|_A^2
$$
### Доказательство сходимости
- $x_k$ лежит в $\mathcal{K}_k$
- $x_k = \sum\limits_{i=1}^k c_i A^{i-1}b = p(A)b$, где $p(x)$ некоторый полином степени не выше $k-1$
- $x_k$ минимизирует $f$ на $\mathcal{K}_k$, отсюда
$$
2(f_k - f^*) = \inf_{x \in \mathcal{K}_k} \|x - x^* \|^2_A = \inf_{\mathrm{deg}(p) < k} \|(p(A) - A^{-1})b\|^2_A
$$
- Спектральное разложение $A = U\Lambda U^*$ даёт
$$
2(f_k - f^*) = \inf_{\mathrm{deg}(p) < k} \|(p(\Lambda) - \Lambda^{-1})d\|^2_{\Lambda} = \inf_{\mathrm{deg}(p) < k} \sum_{i=1}^n\frac{d_i^2 (\lambda_ip(\lambda_i) - 1)^2}{\lambda_i} = \inf_{\mathrm{deg}(q) \leq k, q(0) = 1} \sum_{i=1}^n\frac{d_i^2 q(\lambda_i)^2}{\lambda_i}
$$
- Сведём задачу к поиску некоторого многочлена
$$
f_k - f^* \leq \left(\sum_{i=1}^n \frac{d_i^2}{2\lambda_i}\right) \inf_{\mathrm{deg}(q) \leq k, q(0) = 1}\left(\max_{i=1,\ldots,n} q(\lambda_i)^2 \right) = \frac{1}{2}\|x^*\|^2_A \inf_{\mathrm{deg}(q) \leq k, q(0) = 1}\left(\max_{i=1,\ldots,n} q(\lambda_i)^2 \right)
$$
- Пусть $A$ имеет $m$ различных собственных значений, тогда для
$$
r(y) = \frac{(-1)^m}{\lambda_1 \cdot \ldots \cdot \lambda_m}(y - \lambda_i)\cdot \ldots \cdot (y - \lambda_m)
$$
выполнено $\mathrm{deg}(r) = m$ и $r(0) = 1$
- Значение для оптимального полинома степени не выше $k$ оценим сверху значением для полинома $r$ степени $m$
$$
0 \leq f_k - f^* \leq \frac{1}{2}\|x^*\|_A^2 \max_{i=1,\ldots,m} r(\lambda_i) = 0
$$
- Метод сопряжённых градиентов сошёлся за $m$ итераций
### Улучшенная версия метода сопряжённых градиентов
На практике используются следующие формулы для шага $\alpha_k$ и коэффициента $\beta_{k}$:
$$
\alpha_k = \dfrac{r^{\top}_k r_k}{p^{\top}_{k}Ap_{k}} \qquad \beta_k = \dfrac{r^{\top}_k r_k}{r^{\top}_{k-1} r_{k-1}}
$$
**Вопрос:** чем они лучше базовой версии?
### Псевдокод метода сопряжённых градиентов
```python
def ConjugateGradientQuadratic(x0, A, b, eps):
r = A.dot(x0) - b
p = -r
while np.linalg.norm(r) > eps:
alpha = r.dot(r) / p.dot(A.dot(p))
x = x + alpha * p
r_next = r + alpha * A.dot(p)
beta = r_next.dot(r_next) / r.dot(r)
p = -r_next + beta * p
r = r_next
return x
```
## Метод сопряжённых градиентов для неквадратичной функции
**Идея:** использовать градиенты $f'(x_k)$ неквадратичной функции вместо остатков $r_k$ и линейный поиск шага $\alpha_k$ вместо аналитического вычисления. Получим метод Флетчера-Ривса.
```python
def ConjugateGradientFR(f, gradf, x0, eps):
x = x0
grad = gradf(x)
p = -grad
while np.linalg.norm(gradf(x)) > eps:
alpha = StepSearch(x, f, gradf, **kwargs)
x = x + alpha * p
grad_next = gradf(x)
beta = grad_next.dot(grad_next) / grad.dot(grad)
p = -grad_next + beta * p
grad = grad_next
if restart_condition:
p = -gradf(x)
return x
```
### Теорема сходимости
**Теорема.** Пусть
- множество уровней $\mathcal{L}$ ограничено
- существует $\gamma > 0$: $\| f'(x) \|_2 \leq \gamma$ для $x \in \mathcal{L}$
Тогда
$$
\lim_{j \to \infty} \| f'(x_{k_j}) \|_2 = 0
$$
### Перезапуск (restart)
1. Для ускорения метода сопряжённых градиентов используют технику перезапусков: удаление ранее накопленной истории и перезапуск метода с текущей точки, как будто это точка $x_0$
2. Существуют разные условия, сигнализирующие о том, что надо делать перезапуск, например
- $k = n$
- $\dfrac{|\langle f'(x_k), f'(x_{k-1}) \rangle |}{\| f'(x_k) \|_2^2} \geq \nu \approx 0.1$
3. Можно показать (см. Nocedal, Wright Numerical Optimization, Ch. 5, p. 125), что запуск метода Флетчера-Ривза без использования перезапусков на некоторых итерациях может приводить к крайне медленной сходимости!
4. Метод Полака-Рибьера и его модификации лишены подобного недостатка.
### Комментарии
- Замечательная методичка "An Introduction to the Conjugate Gradient Method Without the Agonizing Pain" размещена [тут](https://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf)
- Помимо метода Флетчера-Ривса существуют другие способы вычисления $\beta_k$: метод Полака-Рибьера, метод Хестенса-Штифеля...
- Для метода сопряжённых градиентов требуется 4 вектора: каких?
- Самой дорогой операцией является умножение матрицы на вектор
## Эксперименты
### Квадратичная целевая функция
```
import numpy as np
n = 100
# Random
A = np.random.randn(n, n)
A = A.T.dot(A)
# Clustered eigenvalues
# A = np.diagflat([np.ones(n//4), 10 * np.ones(n//4), 100*np.ones(n//4), 1000* np.ones(n//4)])
# U = np.random.rand(n, n)
# Q, _ = np.linalg.qr(U)
# A = Q.dot(A).dot(Q.T)
# A = (A + A.T) * 0.5
print("A is normal matrix: ||AA* - A*A|| =", np.linalg.norm(A.dot(A.T) - A.T.dot(A)))
b = np.random.randn(n)
# Hilbert matrix
# A = np.array([[1.0 / (i+j - 1) for i in range(1, n+1)] for j in range(1, n+1)]) + 1e-3 * np.eye(n)
# b = np.ones(n)
f = lambda x: 0.5 * x.dot(A.dot(x)) - b.dot(x)
grad_f = lambda x: A.dot(x) - b
x0 = np.zeros(n)
```
#### Распределение собственных значений
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.rc("text", usetex=True)
plt.rc("font", family='serif')
import seaborn as sns
sns.set_context("talk")
eigs = np.linalg.eigvalsh(A)
cond_A = np.linalg.cond(A)
print((np.sqrt(cond_A) - 1) / (np.sqrt(cond_A) + 1))
print((cond_A - 1) / (cond_A + 1))
plt.semilogy(np.unique(eigs))
plt.ylabel("Eigenvalues", fontsize=20)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
```
#### Правильный ответ
```
import scipy.optimize as scopt
def callback(x, array):
array.append(x)
scopt_cg_array = []
scopt_cg_callback = lambda x: callback(x, scopt_cg_array)
x = scopt.minimize(f, x0, method="CG", jac=grad_f, callback=scopt_cg_callback)
x = x.x
print("||f'(x*)|| =", np.linalg.norm(A.dot(x) - b))
print("f* =", f(x))
```
#### Реализация метода сопряжённых градиентов
```
def ConjugateGradientQuadratic(x0, A, b, tol=1e-8, callback=None):
x = x0
r = A.dot(x0) - b
p = -r
while np.linalg.norm(r) > tol:
alpha = r.dot(r) / p.dot(A.dot(p))
x = x + alpha * p
if callback is not None:
callback(x)
r_next = r + alpha * A.dot(p)
beta = r_next.dot(r_next) / r.dot(r)
p = -r_next + beta * p
r = r_next
return x
import liboptpy.unconstr_solvers as methods
import liboptpy.step_size as ss
print("\t CG quadratic")
cg_quad = methods.fo.ConjugateGradientQuad(A, b)
x_cg = cg_quad.solve(x0, max_iter=1000, tol=1e-7, disp=True)
print("\t Gradient Descent")
gd = methods.fo.GradientDescent(f, grad_f, ss.ExactLineSearch4Quad(A, b))
x_gd = gd.solve(x0, tol=1e-7, disp=True)
print("Condition number of A =", abs(max(eigs)) / abs(min(eigs)))
```
#### График сходимости
```
plt.figure(figsize=(8,6))
plt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_quad.get_convergence()], label=r"$\|f'(x_k)\|^{CG}_2$", linewidth=2)
plt.semilogy([np.linalg.norm(grad_f(x)) for x in scopt_cg_array[:5000]], label=r"$\|f'(x_k)\|^{CG_{PR}}_2$", linewidth=2)
plt.semilogy([np.linalg.norm(grad_f(x)) for x in gd.get_convergence()], label=r"$\|f'(x_k)\|^{G}_2$", linewidth=2)
plt.legend(loc="best", fontsize=20)
plt.xlabel(r"Iteration number, $k$", fontsize=20)
plt.ylabel("Convergence rate", fontsize=20)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
print([np.linalg.norm(grad_f(x)) for x in cg_quad.get_convergence()])
plt.figure(figsize=(8,6))
plt.plot([f(x) for x in cg_quad.get_convergence()], label=r"$f(x^{CG}_k)$", linewidth=2)
plt.plot([f(x) for x in scopt_cg_array], label=r"$f(x^{CG_{PR}}_k)$", linewidth=2)
plt.plot([f(x) for x in gd.get_convergence()], label=r"$f(x^{G}_k)$", linewidth=2)
plt.legend(loc="best", fontsize=20)
plt.xlabel(r"Iteration number, $k$", fontsize=20)
plt.ylabel("Function value", fontsize=20)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
```
### Неквадратичная функция
```
import numpy as np
import sklearn.datasets as skldata
import scipy.special as scspec
n = 300
m = 1000
X, y = skldata.make_classification(n_classes=2, n_features=n, n_samples=m, n_informative=n//3)
C = 1
def f(w):
return np.linalg.norm(w)**2 / 2 + C * np.mean(np.logaddexp(np.zeros(X.shape[0]), -y * X.dot(w)))
def grad_f(w):
denom = scspec.expit(-y * X.dot(w))
return w - C * X.T.dot(y * denom) / X.shape[0]
# f = lambda x: -np.sum(np.log(1 - A.T.dot(x))) - np.sum(np.log(1 - x*x))
# grad_f = lambda x: np.sum(A.dot(np.diagflat(1 / (1 - A.T.dot(x)))), axis=1) + 2 * x / (1 - np.power(x, 2))
x0 = np.zeros(n)
print("Initial function value = {}".format(f(x0)))
print("Initial gradient norm = {}".format(np.linalg.norm(grad_f(x0))))
```
#### Реализация метода Флетчера-Ривса
```
def ConjugateGradientFR(f, gradf, x0, num_iter=100, tol=1e-8, callback=None, restart=False):
x = x0
grad = gradf(x)
p = -grad
it = 0
while np.linalg.norm(gradf(x)) > tol and it < num_iter:
alpha = utils.backtracking(x, p, method="Wolfe", beta1=0.1, beta2=0.4, rho=0.5, f=f, grad_f=gradf)
if alpha < 1e-18:
break
x = x + alpha * p
if callback is not None:
callback(x)
grad_next = gradf(x)
beta = grad_next.dot(grad_next) / grad.dot(grad)
p = -grad_next + beta * p
grad = grad_next.copy()
it += 1
if restart and it % restart == 0:
grad = gradf(x)
p = -grad
return x
```
#### График сходимости
```
import scipy.optimize as scopt
import liboptpy.restarts as restarts
n_restart = 60
tol = 1e-5
max_iter = 600
scopt_cg_array = []
scopt_cg_callback = lambda x: callback(x, scopt_cg_array)
x = scopt.minimize(f, x0, tol=tol, method="CG", jac=grad_f, callback=scopt_cg_callback, options={"maxiter": max_iter})
x = x.x
print("\t CG by Polak-Rebiere")
print("Norm of garient = {}".format(np.linalg.norm(grad_f(x))))
print("Function value = {}".format(f(x)))
print("\t CG by Fletcher-Reeves")
cg_fr = methods.fo.ConjugateGradientFR(f, grad_f, ss.Backtracking("Wolfe", rho=0.9, beta1=0.1, beta2=0.4, init_alpha=1.))
x = cg_fr.solve(x0, tol=tol, max_iter=max_iter, disp=True)
print("\t CG by Fletcher-Reeves with restart n")
cg_fr_rest = methods.fo.ConjugateGradientFR(f, grad_f, ss.Backtracking("Wolfe", rho=0.9, beta1=0.1, beta2=0.4,
init_alpha=1.), restarts.Restart(n // n_restart))
x = cg_fr_rest.solve(x0, tol=tol, max_iter=max_iter, disp=True)
print("\t Gradient Descent")
gd = methods.fo.GradientDescent(f, grad_f, ss.Backtracking("Wolfe", rho=0.9, beta1=0.1, beta2=0.4, init_alpha=1.))
x = gd.solve(x0, max_iter=max_iter, tol=tol, disp=True)
plt.figure(figsize=(8, 6))
plt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_fr.get_convergence()], label=r"$\|f'(x_k)\|^{CG_{FR}}_2$ no restart", linewidth=2)
plt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_fr_rest.get_convergence()], label=r"$\|f'(x_k)\|^{CG_{FR}}_2$ restart", linewidth=2)
plt.semilogy([np.linalg.norm(grad_f(x)) for x in scopt_cg_array], label=r"$\|f'(x_k)\|^{CG_{PR}}_2$", linewidth=2)
plt.semilogy([np.linalg.norm(grad_f(x)) for x in gd.get_convergence()], label=r"$\|f'(x_k)\|^{G}_2$", linewidth=2)
plt.legend(loc="best", fontsize=16)
plt.xlabel(r"Iteration number, $k$", fontsize=20)
plt.ylabel("Convergence rate", fontsize=20)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
```
#### Время выполнения
```
%timeit scopt.minimize(f, x0, method="CG", tol=tol, jac=grad_f, options={"maxiter": max_iter})
%timeit cg_fr.solve(x0, tol=tol, max_iter=max_iter)
%timeit cg_fr_rest.solve(x0, tol=tol, max_iter=max_iter)
%timeit gd.solve(x0, tol=tol, max_iter=max_iter)
```
## Резюме
1. Сопряжённые направления
2. Метод сопряжённых градиентов
3. Сходимость
4. Эксперименты
| github_jupyter |
# Reading Data
## Connect to store (using sina local file)
First let's create an empty database with you as a single user
In a real application only admin user should have write permission to the file
```
import os
import sys
import shlex
from subprocess import Popen, PIPE
import kosh
kosh_example_sql_file = "kosh_example_read.sql"
# Create a new store (erase if exists)
store = kosh.create_new_db(kosh_example_sql_file)
```
## Adding datasets to the store
Let's add a dataset and associate hdf5 file with it.
```
dataset = store.create()
dataset.associate("../tests/baselines/node_extracts2/node_extracts2.hdf5", mime_type="hdf5", absolute_path=False)
```
## Querying Data
In Kosh data retrievable are called "features"
Let's see which feature are associated with this dataset:
```
features = dataset.list_features()
print(features)
```
Let's get more information on a specific features
```
info = dataset.describe_feature("node/metrics_5")
print(info)
```
## Opening Data
We might want to simply acces the URI (to add ata to it for example).
for this we will need the *id* of the associated_uri
```
associated_id = dataset.search(mime_type="hdf5", ids_only=True)[0]
h5_file = dataset.open(associated_id)
h5_file
```
## Getting Data
Let's access this feature by calling the `get_execution_graph()` function.
This returns a Kosh representation of how to get to a feature's data.
Note that is just a representation (a path) to the data, not the data itself.
```
feature = dataset.get_execution_graph("node/metrics_5")
feature
```
This can be shorten as:
```
feature = dataset["node/metrics_5"]
feature
```
This gives us a handle to this feature's data, no data has actually been read yet.
Let's retrieve the data by calling the `traverse` function. This will connect the feature's origin (uri) to the data, applying any *transformer* or *operator* to it (see other notebooks to learn about these)
```
data = feature.traverse()
print(data)
```
Which is equivalent to:
```
data = feature()
print(data)
```
This is equivalent of what versions 1.1 and below used to do:
```
data = dataset.get("node/metrics_5")
print(data)
```
Note that you can also slice the feature directly
```
data = feature[:]
print(data)
# If you know the dims you can select by value and/or indices
print(dataset.describe_feature("node/metrics_1"))
feature2 = dataset["node/metrics_1"]
data2 = feature2(cycles=slice(0,1), elements=[17, 15])
print(data2.shape)
```
## Associating Multiple Sources
Let's add an image file
```
dataset.associate("../share/icons/png/Kosh_Logo_K_blue.png", mime_type="png", absolute_path=False)
dataset.list_features()
img = dataset["image"]
print(img[:].shape)
try:
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(img[...,-1]) # Plot last channel
except ImportError:
print("You will need matplotlib to plot the picture")
```
We can also retrieve the png as the raw binary data
```
raw = img(format="bytes")
len(raw), type(raw)
```
We can associate many image files but this leads to duplicate "image" feature
```
# let's remove hdf5 for clarity
dataset.dissociate("../tests/baselines/node_extracts2/node_extracts2.hdf5", absolute_path=False)
dataset.list_features()
```
Now let's associate a second image file
```
dataset.associate("../share/icons/png/Kosh_Logo_K_orange.png", mime_type="png", absolute_path=False)
dataset.list_features() # URI is now added to feature to disambiguate them
dataset.describe_feature("image_@_../share/icons/png/Kosh_Logo_K_orange.png")
try:
plt.imshow(dataset.get("image_@_../share/icons/png/Kosh_Logo_K_orange.png")) # Plot last channel
except Exception:
print("With matplotlib you would have seen a ")
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
data = fetch_openml(data_id=1590, as_frame=True)
X = pd.get_dummies(data.data)
y_true = (data.target == '>50K') * 1
sex = data.data[['sex', 'race']]
sex.value_counts()
from fairlearn.metrics import group_summary
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(min_samples_leaf=10, max_depth=4)
classifier.fit(X, y_true)
y_pred = classifier.predict(X)
#group_summary(accuracy_score, y_true, y_pred, sensitive_features=sex)
from fairlearn.metrics import selection_rate_group_summary
#selection_rate_group_summary(y_true, y_pred, sensitive_features=sex)
from fairlearn.widget import FairlearnDashboard
FairlearnDashboard(sensitive_features=sex,
sensitive_feature_names=['sex', 'race'],
y_true=y_true,
y_pred={"initial model": y_pred})
```
Can we find intersectional discrimination with Fairlearn?
```
import numpy as np
X = pd.DataFrame(np.random.randint(0, high=2, size=(100, 3), dtype='l'), columns=['sex', 'race', 'Y'])
X['cnt'] = 1
counts = X.groupby(['sex', 'race']).Y.count()
f = lambda x: [np.random.choice([0,1], 17, p=[0.65, 0.35])[0] for _ in range(x)]
X.at[(X['sex'] == 1) & (X['race'] == 1),'result'] = f(counts.loc[1,1])
X.groupby(['sex', 'race']).agg({'result':'sum', 'Y':['sum', 'count']})
# now let's create a biased scoring function
```
Idea: first sample from the biased distribution p_bias, then calculate the expectancy value of the unbiased distribution p_0 and caluculate how much you need to bias p_0 to get the exectancy of value of the unbiased distribution p_0 -> p_correction
```
X[(X[['sex', 'race']] == 1).all(1)].shape
X.groupby(['sex', 'race']).agg({'result':'sum', 'Y':['sum', 'count']}).loc[[1]*len()]
a = tuple([1 for _ in range(len(counts.index.levels))])
a
counts.loc[a]
def biased_score(df, sensitive_cols, biased_prob):
#todo make this agnostic of specific columns
counts = df.groupby(sensitive_cols).agg({sensitive_cols[0]:'sum'})
indexer = tuple([1 for _ in range(len(counts.index.levels))])
df[(df[sensitive_cols] == 1).all(axis=1)]['result'] = np.random.choice([0,1], counts.loc[indexer].values, p=[biased_prob, 1-biased_prob])
return df
type(counts)
biased_score(X, ['sex', 'race'], 0.3)
def shift_prop(counts, expected_distribution):
expected_values = counts.sum() * expected_distribution
counts.sum()
counts
counts.loc[1,:].sum()
i = 1000003054
i
i = i + 1
a = i * 3
i = 2
i
i == 2
type(i)
type("adfaserer")
"1" == 1
```
| github_jupyter |
#### New to Plotly?
Plotly's Python library is free and open source! [Get started](https://plotly.com/python/getting-started/) by downloading the client and [reading the primer](https://plotly.com/python/getting-started/).
<br>You can set up Plotly to work in [online](https://plotly.com/python/getting-started/#initialization-for-online-plotting) or [offline](https://plotly.com/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plotly.com/python/getting-started/#start-plotting-online).
<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
#### Version Check
Note: Ternary Plots are available in version 1.9.10+
Run pip install plotly --upgrade to update your Plotly version
```
import plotly
plotly.__version__
```
### Basic Ternary Plot with Markers
```
import plotly.plotly as py
import plotly.graph_objs as go
rawData = [
{'journalist':75,'developer':25,'designer':0,'label':'point 1'},
{'journalist':70,'developer':10,'designer':20,'label':'point 2'},
{'journalist':75,'developer':20,'designer':5,'label':'point 3'},
{'journalist':5,'developer':60,'designer':35,'label':'point 4'},
{'journalist':10,'developer':80,'designer':10,'label':'point 5'},
{'journalist':10,'developer':90,'designer':0,'label':'point 6'},
{'journalist':20,'developer':70,'designer':10,'label':'point 7'},
{'journalist':10,'developer':20,'designer':70,'label':'point 8'},
{'journalist':15,'developer':5,'designer':80,'label':'point 9'},
{'journalist':10,'developer':10,'designer':80,'label':'point 10'},
{'journalist':20,'developer':10,'designer':70,'label':'point 11'},
];
def makeAxis(title, tickangle):
return {
'title': title,
'titlefont': { 'size': 20 },
'tickangle': tickangle,
'tickfont': { 'size': 15 },
'tickcolor': 'rgba(0,0,0,0)',
'ticklen': 5,
'showline': True,
'showgrid': True
}
data = [{
'type': 'scatterternary',
'mode': 'markers',
'a': [i for i in map(lambda x: x['journalist'], rawData)],
'b': [i for i in map(lambda x: x['developer'], rawData)],
'c': [i for i in map(lambda x: x['designer'], rawData)],
'text': [i for i in map(lambda x: x['label'], rawData)],
'marker': {
'symbol': 100,
'color': '#DB7365',
'size': 14,
'line': { 'width': 2 }
},
}]
layout = {
'ternary': {
'sum': 100,
'aaxis': makeAxis('Journalist', 0),
'baxis': makeAxis('<br>Developer', 45),
'caxis': makeAxis('<br>Designer', -45)
},
'annotations': [{
'showarrow': False,
'text': 'Simple Ternary Plot with Markers',
'x': 0.5,
'y': 1.3,
'font': { 'size': 15 }
}]
}
fig = {'data': data, 'layout': layout}
py.iplot(fig, validate=False)
```
#### Reference
See https://plotly.com/python/reference/#scatterternary for more information and chart attribute options!
```
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'ternary.ipynb', 'python/ternary-plots/', 'Python Ternary Plots | plotly',
'How to make Ternary plots in Python with Plotly.',
name = 'Ternary Plots',
thumbnail='thumbnail/ternary.jpg', language='python',
page_type='example_index', has_thumbnail='true', display_as='scientific', order=9,
ipynb= '~notebook_demo/39')
```
| github_jupyter |
# **PARAMETER FITTING DETAILED EXAMPLE**
This provides a detailed example of parameter fitting using the python-based tool ``SBstoat``.
Details about the tool can be found at in this [github repository](https://github.com/sys-bio/SBstoat).
# Preliminaries
```
IS_COLAB = True
if IS_COLAB:
!pip install -q SBstoat
!pip install -q tellurium
pass
# Python packages used in this chapter
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import urllib.request # use this library to download file from GitHub
import tellurium as te
from SBstoat.namedTimeseries import NamedTimeseries, TIME
from SBstoat.modelFitter import ModelFitter
import SBstoat
```
# Constants and Helper Functions
```
def getSharedCodes(moduleName):
"""
Obtains common codes from the github repository.
Parameters
----------
moduleName: str
name of the python module in the src directory
"""
if IS_COLAB:
url = "https://github.com/sys-bio/network-modeling-summer-school-2021/raw/main/src/%s.py" % moduleName
local_python = "python.py"
_, _ = urllib.request.urlretrieve(url=url, filename=local_python)
else:
local_python = "../../src/%s.py" % moduleName
with open(local_python, "r") as fd:
codeStr = "".join(fd.readlines())
print(codeStr)
exec(codeStr, globals())
# Acquire codes
getSharedCodes("util")
# TESTS
assert(isinstance(LINEAR_PATHWAY_DF, pd.DataFrame))
def plotTS(ts, title=""):
"""
Plots columns in a timeseries.
Parameters
----------
ts: NamedTimeseries
"""
p = plt.plot(ts[TIME], ts[ts.colnames])
_ = plt.legend(p, ts.colnames, bbox_to_anchor=(1.05, 1), loc='upper left')
_ = plt.title(title)
```
# Running SBstoat
``SBstoat`` is a python package intended to simplify the programmatic aspects of fitting. The package provides handles the programming details
of the interactions between the optimization codes (``lmfit``) and ``tellurium`` simulations.
The required inputs to ``SBstoat`` are:
- the model for which parameter values are being estimated;
- observational data; and
- specification of the parameters, their value ranges, and initial values.
For the linear pathway model, we ``LINEAR_PATHWAY_MODEL`` and ``LINEAR_PATHWAY_DF`` for the model and data, respectively.
The description of the paarameters is done using a python dictionary, as shown below.
```
# Name, minimal value, initial value, and maximum value of each parameter
LINEAR_PATHWAY_PARAMETERS = [
SBstoat.Parameter("k1", lower=1, value=50, upper=100),
SBstoat.Parameter("k2", lower=1, value=50, upper=100),
SBstoat.Parameter("k3", lower=1, value=50, upper=100),
SBstoat.Parameter("k4", lower=1, value=50, upper=100),
]
```
The python class ``ModelFitter`` does fitting for ``SBstoat``. Details of using this can be found below.
```
?ModelFitter
fitter = ModelFitter(LINEAR_PATHWAY_MODEL,
NamedTimeseries(dataframe=LINEAR_PATHWAY_DF),
parametersToFit=LINEAR_PATHWAY_PARAMETERS)
fitter.fitModel()
```
``SBstoat`` provides a textual report of the results of the fit.
```
print(fitter.reportFit())
```
The report is in three sections.
The first section contains measures of the fit quality. The most commonly used measures are chi-square and reduced chai-square.
We want both of these to be "small", although small is relative.
These measures are most useful when comparing different fit results.
The "Variables" section gives parameter estimates. We se that the estimates obtained are fairly close to
the true values in the original models.
The final section provides information about the relationships between parameter estimates. This can be useful
in models where the structure of the model makes it difficult to separate one parameter from another.
In these cases, there will be a large correlation between parameter (absolute) parameter values.
``SBstoat`` provides many plots to aid in understanding the fitting results.
You can see these by typing in a Jupyter code cell ``fitter.pl`` and then pressing the tab key.
Arguably the most useful is ``plotFitAll``, which, for each floating species (column in observed data), plots the fitted and observed values.
This is shown below.
```
fitter.plotFitAll(figsize=(20, 5), numCol=5, color=["red", "blue"], titlePosition=(0.5, 1.05))
```
These fits seem to be quite consistent with the observed data, with the possible exception of ``S5``.
In the latter case, there is considerable variability that likely makes a good fit more difficult.
```
# See the options for plotFitAll
?fitter.plotFitAll
```
If you are interested in a more complete analysis of the residuals, use ``plotResidualsAll``.
```
fitter.plotResidualsAll(figsize=(20, 10))
```
# Workflow for fitting the Linear Pathway Model
Although ``SBstoat`` eliminates the burden of programming details, fitting is often complex.
This is because of the complexity of the fitting surface, as illustrated earlier in this chaper.
This section illustrates how to use ``SBstoat`` to explore complex fitting surfaces.
``SBstoat`` allows you to explore fitting along three dimensions.
1. **Fitting surface**. The fitting surface changes based on the following:
a. the selection of float species (columns in the observed data) that we attempt to fit;
b. the time span we fit over
1. **Optimization Algorithms**. As we noted previously, gradient descent is fast, but it only works well for convex fitting surfaces. We might want to try both gradient descent and differential evolution to see which works best for our model. Also, some optimization algorithms are stochastic, and so the search strategy may also choose to run
the same algorithm multiple times. Finally, it may be desirable to do multiple optimizations in succession, using the results of the $n-1$-st to be the starting point for the $n$-th.
1. **Search start & scope**. This refers to the initial values of parameter values and the range of parameter values that are explored.
In the following explorations of the above dimensions of parameter fitting, we use the above workflow that consists of:
1. Select a subset of the observed data based on a specified time span (in this case, just ending time)
1. Construct a fitter for the linear pathway, observed data, columns to consider in fitting, the fitting methods, and parameter ranges/initial values.
1. Fit the model.
1. Print the fitting report.
1. Plot observed and fitted values.
This workflow is encapsulated in the the function ``doFit``.
The arguments of the function have default that reproduce the
results in the previous section.
```
def doFit(selectedColumns=None,
endTime=10,
fitterMethods=["differential_evolution", "leastsq"],
parametersToFit=LINEAR_PATHWAY_PARAMETERS,
isTest=False):
"""
Encapsulates the workflow to fit the linear pathway model.
Parameters
----------
selectedColumns: list-str
endTime: int
fitterMethods: list-str
parametersToFit: list-SBstoat.Parameter
isTest: bool
Test mode
Returns
-------
ModelFitter
"""
model = te.loada(LINEAR_PATHWAY_MODEL)
observedTS = NamedTimeseries(dataframe=LINEAR_PATHWAY_DF)
# Find the last index to use
lastIdx = len([t for t in observedTS[TIME] if t <= endTime])
observedTS = observedTS[:lastIdx]
# Construct the fitter and do the fit
fitter = ModelFitter(model, observedTS, selectedColumns=selectedColumns,
fitterMethods=fitterMethods,
parametersToFit=parametersToFit)
fitter.fitModel()
if not isTest:
print(fitter.reportFit())
fitter.plotFitAll(figsize=(20, 5), numCol=5, color=["red", "blue"],
titlePosition=(0.5, 1.05))
return fitter
# TESTS
result = doFit(isTest=True)
assert(isinstance(fitter, ModelFitter))
doFit()
```
## Fitting Surface
We begin by exploring the effect of the fitting surface.
We can control the fitting surface in two ways. The first is by the selection of columns that are matched with observational data.
For example, suppose that we only consider ``S5`` and so the fitting surface is residuals
from fitting ``S5``.
```
doFit(selectedColumns=["S5"])
```
We see that we get poor estimates for most of the parameters, something that we can check because we know the true values of the parameters (``k1=1``, ``k2=2``, ``k3=3``, ``k4=4``).
Another consideration is to focus on a subset of the dynamics. Below, we only consider through 2 seconds.
```
doFit(endTime=2)
```
This improved the quality of the fit. We see this visually in the above plots and also in the significant reduction in chi-square. A lot of this improvement
is a result of not inluding regions of high variability in observed values for ``S5``.
## Optimization Algorithms
The main consideration here is the choice of optimization algorithms.
Any valid ``method`` for ``lmfit`` can be used, and multiple methods can be used in combination. We illustrate this below.
```
# Fit with Levenberg-Marquardt
doFit(fitterMethods=["leastsq"])
# Fit with differential evolution
doFit(fitterMethods=["differential_evolution"])
# Fit with differential evolution and then Levenberg-Marquardt
doFit(fitterMethods=["differential_evolution", "leastsq"])
```
For this model, we see that Levenberg-Marquardt works better than differential evolution, and doing the two in combination offers no benefit.
## Search Start & Scope
Where we start the search and how far we search depends on the ranges of parameter values and the specification of initial values.
This is specified by the ``parameterDct`` argument to ``ModelFitter``. This argument defaults to ``LINEAR_PATHWAY_PARAMETER_DCT``.
If we create a bad parameter range, then we get very poor fits. Below, we start the search with a negative value for each parameter.
Note that the observed values appear to be constant because of the large scale of the fitted values.
```
parametersToFit = [
SBstoat.Parameter("k1", lower=-11, value=-1, upper=1),
SBstoat.Parameter("k2", lower=-11, value=-1, upper=1),
SBstoat.Parameter("k3", lower=-11, value=-1, upper=1),
SBstoat.Parameter("k4", lower=-11, value=-1, upper=1),
]
doFit(parametersToFit=parametersToFit)
```
# Exercise
This exercise is about fitting parameters in the Wolf model for glycolytic oscillations.
The model is ``WOLF_MODEL`` and the observational data for this model are ``WOLF_DF``.
1. Implement a ``doFit`` function that encapsulates the workflow for the Wolf model.
1. Try fitting the model using ``WOLF_PARAMETERS``. First try ``leastSquares`` (a graident descent method) and then ``differential_evolution``. How did the two methods differ as to fitting time and quality? Why? What would you try next to get better fits?
1. Limit the parameter values so that the upper value is twice the true value. Try fits using leastsqs and differential evolution.
| github_jupyter |
## Version
```
System.out.println(System.getProperty("java.version"));
```
## Identifiers
1. Available characters: lowercase letters (a to z), uppercase letters(A to Z), digits (0 to 9), underscore `_`, and dollar sign `$`
2. Cannot start with a digit
3. Case sensitive
## Keywords
- abstract
- continue
- for
- new
- switch
- assert
- default
- package
- synchronized
- boolean
- do
- if
- private
- this
- break
- double
- implements
- protected
- throw
- byte
- else
- import
- public
- throws
- case
- enum
- instanceof
- return
- transient
- catch
- extends
- int
- short
- try
- char
- final
- interface
- static
- void
- class
- finally
- long
- strictfp
- volatile
- float
- native
- super
- while
## Comments
```
// this is a single-line comment
/*
* this is
* a multi-line
* comment
*/
```
## Multi-line statement
```
String total = "line 1 " +
"line 2 " +
"line 3";
System.out.println(total);
```
## Empty statement
```
{}
```
## Basic Data Type
```
int a = 1;
double b = 1;
char c = 97;
boolean d = false;
System.out.printf("%d %f %s %s %s\n", a, b, c, d, null);
Integer.SIZE
Integer.MIN_VALUE;
Integer.MAX_VALUE;
Double.SIZE
Double.MIN_VALUE;
Double.MAX_VALUE;
Character.SIZE
Integer.valueOf(Character.MIN_VALUE);
Integer.valueOf(Character.MAX_VALUE)
Boolean.FALSE;
Boolean.TRUE;
Byte.SIZE
Byte.MIN_VALUE;
Byte.MAX_VALUE;
Short.SIZE
Short.MIN_VALUE;
Short.MAX_VALUE;
Long.SIZE
Long.MIN_VALUE;
Long.MAX_VALUE;
Float.SIZE
Float.MIN_VALUE;
Float.MAX_VALUE;
```
## Variable assignment
```
int a = 1, b = 2, c = 3;
System.out.printf("%d %d %d\n", a, b, c);
a = b = 1;
System.out.printf("%d %d", a, b);
```
## Operations
```
1+2
4.3-2
3*7
2/4
4/2
2.0/4
4/2.0
5 % 3
-5 % 3
5 % -3
-5 % -3
int a = 1;
a++;
a
int a = 1;
a--;
a
1 < 2
1 == 1.0
1 != 2
5 >= 1
3 <= 5
int a = 1;
a += 1;
a -= 1;
a *= 1;
a /= 1;
a %= 1;
a
int a = 0b0011;
int b = 0b0101;
printf("%d %d\n", a, b);
System.out.println(String.format("%4s", Integer.toBinaryString(a)).replace(" ", "0"));
System.out.println(String.format("%4s", Integer.toBinaryString(b)).replace(" ", "0"));
System.out.println(String.format("%4s", Integer.toBinaryString(a & b)).replace(" ", "0"));
System.out.println(String.format("%4s", Integer.toBinaryString(a | b)).replace(" ", "0"));
System.out.println(String.format("%4s", Integer.toBinaryString(a ^ b)).replace(" ", "0"));
~a
a = -1;
System.out.println(String.format("%32s", Integer.toBinaryString(a ^ b)).replace(" ", "0"));
System.out.println(String.format("%32s", Integer.toBinaryString(a >> 1)).replace(" ", "0"));
System.out.println(String.format("%32s", Integer.toBinaryString(a >>> 1)).replace(" ", "0"));
System.out.println(String.format("%32s", Integer.toBinaryString(a << 1)).replace(" ", "0"));
a &= 1;
a |= 1;
a ^= 1;
a >>= 1;
a <<= 1;
a >>>= 1;
a
true && false
true || false
!true
String a = "abc";
String b = "ab";
b += "c";
System.out.println(a);
System.out.println(b);
System.out.println(a == b);
System.out.println(a.equals(b));
true || false && false
true || (false && false)
(true || false) && false
true ? 1 : 0
false ? 1 : 0
Integer.valueOf(1) instanceof Integer
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels as sm
%matplotlib inline
from sklearn.preprocessing import MinMaxScaler, StandardScaler, Normalizer, Binarizer
```
# Prepare data for Machine Learning
```
#Scripts for preprocessing data for machine learning
#Borrowed generously from https://machinelearningmastery.com/prepare-data-machine-learning-python-scikit-learn/
class Prepare:
'''
Prepare data for machine learning in Python using scikit-learn.
Functions: prepare_data_from_csv, describe_data, rescale_data, standardize_data, normalize_data, binarize_data
Input: file_path_as_string, y_column_name, column_names=None, header=0
'''
def __init__(self, file_path_as_string, y_column_name, column_names, header):
self.file_path_as_string = file_path_as_string
self.y_column_name = y_column_name
self.column_names = column_names
self.header = 0
def prepare_data_from_csv(self):
'''Import and prepare data'''
dataframe = pd.read_csv(self.file_path_as_string, names=self.column_names, delimiter=',',header=self.header)
if self.header != None:
dataframe.columns = [x.replace(' ', '_') for x in dataframe.columns]
print(dataframe.head())
self.X = dataframe.drop(self.y_column_name, axis=1)
self.y = dataframe[self.y_column_name]
prepared_df = dataframe
#self.prepared_df = dataframe
return prepared_df, self.X, self.y #self.prepared_df
def describe_data(self, prepared_df):
''' Print shape and descriptive statistics'''
print('\nColumns: ','\n'+'--'*25 + f'\n{prepared_df.columns}')
print('--'*25, '\n'+'--'*25)
print('\nInfo: ','\n'+'--'*25 + f'\n{prepared_df.info()}')
print('--'*25, '\n'+'--'*25)
print('\nUnique: ','\n'+'--'*25 + f'\n{prepared_df.nunique()}')
print('--'*25, '\n'+'--'*25)
print('\nNulls: ', '\n'+'--'*25 + f'\n{prepared_df.isnull().sum()}')
print('--'*25, '\n'+'--'*25)
print('\nDescribe: ', '\n'+'--'*25 + f'\n{prepared_df.describe()}')
print('--'*25, '\n'+'--'*25)
print('\nHead: ', '\n'+'--'*25 + f'\n{prepared_df.head()}')
print('--'*25, '\n'+'--'*25)
def rescale_data(self, X):
'''
When your data is comprised of attributes with varying scales, many machine learning algorithms
can benefit from rescaling the attributes to all have the same scale.Often this is referred to as
normalization and attributes are often rescaled into the range between 0 and 1. This is useful for
optimization algorithms in used in the core of machine learning algorithms like gradient descent.
It is also useful for algorithms that weight inputs like regression and neural networks and algorithms
that use distance measures like K-Nearest Neighbors.
Input: dataframe data to be used for features
Return: scaled data
'''
scaler = MinMaxScaler(feature_range=(0, 1))
rescaledX = scaler.fit_transform(self.X)
# summarize transformed data
np.set_printoptions(precision=3)
print(rescaledX[0:5,:])
return rescaledX
def standardize_data(self, X):
'''
Standardize attributes with a Gaussian distribution and differing means and standard deviations
to a standard Gaussian distribution with a mean of 0 and a standard deviation of 1. It is most suitable
for techniques that assume a Gaussian distribution in the input variables and work better with rescaled
data, such as linear regression, logistic regression and linear discriminate analysis.
Input: dataframe data to be used for features
Return: standardized data
'''
stand_scaler = StandardScaler().fit(X)
stand_rescaledX = stand_scaler.transform(self.X)
# summarize transformed data
np.set_printoptions(precision=3)
print(stand_rescaledX[0:5,:])
return stand_rescaledX
def normalize_data(self, X):
'''
Rescale each observation (row) to have a length of 1 (called a unit norm in linear algebra). This
preprocessing can be useful for sparse datasets (lots of zeros) with attributes of varying scales when
using algorithms that weight input values such as neural networks and algorithms that use distance measures
such as K-Nearest Neighbors.
Input: dataframe data to be used for features
Return: normalized data
'''
norm_scaler = Normalizer().fit(self.X)
normalizedX = norm_scaler.transform(self.X)
# summarize transformed data
np.set_printoptions(precision=3)
print(normalizedX[0:5,:])
return normalizedX
def binarize_data(self, X):
'''
Transform data using a binary threshold. All values above the threshold are marked 1 and all
equal to or below are marked as 0. This is called binarizing your data or threshold your data. It can
be useful when you have probabilities that you want to make crisp values. It is also useful when feature
engineering and you want to add new features that indicate something meaningful.
Input: dataframe data to be used for features
Return: binarized data
'''
binarizer = Binarizer(threshold=0.0).fit(self.X)
binaryX = binarizer.transform(X)
# summarize transformed data
np.set_printoptions(precision=3)
print(binaryX[0:5,:])
return binaryX
```
### Tests
```
# names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
# prep_obj = Prepare('data/pima-indians-diabetes.data copy.csv', 'class', names, 0)
# prepared_df, X, y = prep_obj.prepare_data_from_csv()
#prep_obj.describe_data(prepared_df)
#resc_x = prep_obj.rescale_data(X)
#stand_x = prep_obj.standardize_data(X)
#norm_x = prep_obj.normalize_data(X)
#bin_x = prep_obj.binarize_data(X)
```
| github_jupyter |
```
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import sklearn
import sys
import tensorflow as tf
import time
from tensorflow import keras
print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
class_names = [
'airplane',
'automobile',
'bird',
'cat',
'deer',
'dog',
'frog',
'horse',
'ship',
'truck',
]
train_lables_file = './cifar10/trainLabels.csv'
test_csv_file = './cifar10/sampleSubmission.csv'
train_folder = './cifar10/train/'
test_folder = './cifar10/test'
def parse_csv_file(filepath, folder):
"""Parses csv files into (filename(path), label) format"""
results = []
with open(filepath, 'r') as f:
lines = f.readlines()[1:]
for line in lines:
image_id, label_str = line.strip('\n').split(',')
image_full_path = os.path.join(folder, image_id + '.png')
results.append((image_full_path, label_str))
return results
train_labels_info = parse_csv_file(train_lables_file, train_folder)
test_csv_info = parse_csv_file(test_csv_file, test_folder)
import pprint
pprint.pprint(train_labels_info[0:5])
pprint.pprint(test_csv_info[0:5])
print(len(train_labels_info), len(test_csv_info))
# train_df = pd.DataFrame(train_labels_info)
train_df = pd.DataFrame(train_labels_info[0:45000])
valid_df = pd.DataFrame(train_labels_info[45000:])
test_df = pd.DataFrame(test_csv_info)
train_df.columns = ['filepath', 'class']
valid_df.columns = ['filepath', 'class']
test_df.columns = ['filepath', 'class']
print(train_df.head())
print(valid_df.head())
print(test_df.head())
height = 32
width = 32
channels = 3
batch_size = 32
num_classes = 10
train_datagen = keras.preprocessing.image.ImageDataGenerator(
rescale = 1./255,
rotation_range = 40,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = 'nearest',
)
train_generator = train_datagen.flow_from_dataframe(
train_df,
directory = './',
x_col = 'filepath',
y_col = 'class',
classes = class_names,
target_size = (height, width),
batch_size = batch_size,
seed = 7,
shuffle = True,
class_mode = 'sparse',
)
valid_datagen = keras.preprocessing.image.ImageDataGenerator(
rescale = 1./255)
valid_generator = valid_datagen.flow_from_dataframe(
valid_df,
directory = './',
x_col = 'filepath',
y_col = 'class',
classes = class_names,
target_size = (height, width),
batch_size = batch_size,
seed = 7,
shuffle = False,
class_mode = "sparse")
train_num = train_generator.samples
valid_num = valid_generator.samples
print(train_num, valid_num)
for i in range(2):
x, y = train_generator.next()
print(x.shape, y.shape)
print(y)
model = keras.models.Sequential([
keras.layers.Conv2D(filters=128, kernel_size=3, padding='same',
activation='relu',
input_shape=[width, height, channels]),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(filters=128, kernel_size=3, padding='same',
activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=2),
keras.layers.Conv2D(filters=256, kernel_size=3, padding='same',
activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(filters=256, kernel_size=3, padding='same',
activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=2),
keras.layers.Conv2D(filters=512, kernel_size=3, padding='same',
activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(filters=512, kernel_size=3, padding='same',
activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dense(num_classes, activation='softmax'),
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer="adam", metrics=['accuracy'])
model.summary()
epochs = 20
history = model.fit_generator(train_generator,
steps_per_epoch = train_num // batch_size,
epochs = epochs,
validation_data = valid_generator,
validation_steps = valid_num // batch_size)
def plot_learning_curves(history, label, epcohs, min_value, max_value):
data = {}
data[label] = history.history[label]
data['val_'+label] = history.history['val_'+label]
pd.DataFrame(data).plot(figsize=(8, 5))
plt.grid(True)
plt.axis([0, epochs, min_value, max_value])
plt.show()
plot_learning_curves(history, 'acc', epochs, 0, 1)
plot_learning_curves(history, 'loss', epochs, 0, 2)
test_datagen = keras.preprocessing.image.ImageDataGenerator(
rescale = 1./255)
test_generator = valid_datagen.flow_from_dataframe(
test_df,
directory = './',
x_col = 'filepath',
y_col = 'class',
classes = class_names,
target_size = (height, width),
batch_size = batch_size,
seed = 7,
shuffle = False,
class_mode = "sparse")
test_num = test_generator.samples
print(test_num)
test_predict = model.predict_generator(test_generator,
workers = 10,
use_multiprocessing = True)
print(test_predict.shape)
print(test_predict[0:5])
test_predict_class_indices = np.argmax(test_predict, axis = 1)
print(test_predict_class_indices[0:5])
test_predict_class = [class_names[index]
for index in test_predict_class_indices]
print(test_predict_class[0:5])
def generate_submissions(filename, predict_class):
with open(filename, 'w') as f:
f.write('id,label\n')
for i in range(len(predict_class)):
f.write('%d,%s\n' % (i+1, predict_class[i]))
output_file = "./cifar10/submission.csv"
generate_submissions(output_file, test_predict_class)
```
| github_jupyter |
```
"""
Script of petro-inversion of gravity over TKC
Notes:
This version of the script uses data with less noises
but still invert with a higher assumed noise level.
This is equivalent to increase the chi-factor.
This has been needed in order to fit both geophysical
and petrophysical data set.
"""
# Script of petro-inversion of gravity over TKC
import SimPEG.PF as PF
from SimPEG import *
from SimPEG.Utils import io_utils
import matplotlib
import time as tm
import mpl_toolkits.mplot3d as a3
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import scipy as sp
from scipy.interpolate import NearestNDInterpolator
from sklearn.mixture import GaussianMixture
import numpy as np
import copy
import pickle
from pymatsolver import PardisoSolver
%matplotlib inline
matplotlib.rcParams['font.size'] = 14
import seaborn
import sys
sys.path.append('../../../')
from DO27_Utils import *
# Nicer plot
seaborn.set()
# Reproducible Science
np.random.seed(518936)
# We first need to create a susceptibility model.
# Based on a set of parametric surfaces representing TKC,
# we use VTK to discretize the 3-D space.
# Reproducible Science
np.random.seed(518936)
# Load Mesh
mesh = Mesh.load_mesh('../../../Forward/mesh_inverse')
# Define no-data-value
ndv = -100
# Load topography file in UBC format and find the active cells
# Import Topo
model_dir = '../../../Geology_Surfaces/'
topofile = model_dir + 'TKCtopo.dat'
topo = np.genfromtxt(topofile, skip_header=1)
# Find the active cells
actv = Utils.surface2ind_topo(mesh, topo, gridLoc='N')
# Create active map to go from reduce set to full
actvMap = Maps.InjectActiveCells(mesh, actv, ndv)
print("Active cells created from topography!")
# Load data
survey = io_utils.readUBCgravityObservations(
"../../../Forward/GRAV_noisydata.obs"
)
# Now that we have a survey we can build the linear system ...
nactv = np.int(np.sum(actv))
# Creat reduced identity map
idenMap = Maps.IdentityMap(nP=nactv)
# Create the forward model operator
prob = PF.Gravity.GravityIntegral(mesh, rhoMap=idenMap, actInd=actv)
# Pair the survey and problem
survey.pair(prob)
# If you formed the sensitivity gravity matrix before, you can load it here
#G = np.load('../../../Forward/G_Grav_Inverse.npy')
#prob._G = G
# Define noise level
std = 0.01
eps = 0.
survey.std = std
survey.eps = eps
# **Inverse problem**
# Petro Inversion
# It is potential fields, so we will need to push the inverison down
# Create distance weights from our linera forward operator
# rxLoc = survey.srcField.rxList[0].locs
# wr = PF.Magnetics.get_dist_wgt(mesh, rxLoc, actv, 3., np.min(mesh.hx)/4.)
# wr = wr**2.
wr = np.sum(prob.G**2., axis=0)**0.5
wr = (wr / np.max(wr))
#Initial model
m0 = np.ones(idenMap.nP) * -1e-4
# Load ground-truth models for comparison
model_grav = mesh.readModelUBC(
'../../../Forward/model_grav.den'
)
geomodel = mesh.readModelUBC(
'../../../Forward/geomodel'
)
model_grav = model_grav[model_grav != -100.]
# Load petrophysics
clf = pickle.load(open('../../../Petrophysics/gmm_density.p','rb'))
n = clf.n_components
# wires map
wires = Maps.Wires(('m', m0.shape[0]))
# PGI Regularization
reg = Regularization.MakeSimplePetroRegularization(
GMmref=clf,
GMmodel=clf,
mesh=mesh,
wiresmap=wires,
maplist=[idenMap],
mref=m0,
indActive=actv,
alpha_s=1.0, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0,
alpha_xx=0., alpha_yy=0., alpha_zz=0.,
cell_weights_list=[wr]
)
reg.mrefInSmooth = False
reg.approx_gradient = True
reg.objfcts[0].evaltype = 'approx'
# Data misfit
dmis = DataMisfit.l2_DataMisfit(survey)
# Assign flat uncertainties of 0.01mGal
wd = np.ones(len(survey.dobs)) * 0.01
dmis.W = 1 / wd
# Optimization scheme
opt = Optimization.ProjectedGNCG(
maxIter=50, lower=-1., upper=0., maxIterLS=20, maxIterCG=100, tolCG=1e-3
)
#Create inverse problem
invProb = InvProblem.BaseInvProblem(dmis, reg, opt)
# Add directives to the inversion
# Smoothness weights
Alphas = Directives.AlphasSmoothEstimate_ByEig(
alpha0_ratio=1.,
ninit=10, verbose=True
)
# Beta initialization
beta = Directives.BetaEstimate_ByEig(beta0_ratio=1., ninit=10)
#Beta Schedule
betaIt = Directives.PetroBetaReWeighting(
verbose=True, rateCooling=5., rateWarming=1.,
tolerance=0.1, UpdateRate=1,
ratio_in_cooling=False,
progress=0.2,
update_prior_confidence=False,
progress_gamma_cooling=1.,
ratio_in_gamma_cooling=False,
alphadir_rateCooling=1.,
kappa_rateCooling=1.,
nu_rateCooling=1.,
)
# Targets misfits
targets = Directives.PetroTargetMisfit(verbose=True)
# Include mref in Smoothness
MrefInSmooth = Directives.AddMrefInSmooth(
wait_till_stable=True,
verbose=True
)
# GMM, mref and Ws updates
petrodir = Directives.GaussianMixtureUpdateModel(
keep_ref_fixed_in_Smooth=True,
verbose=False,
nu=1e8,
kappa=1e8,
alphadir=1e8
)
# Pre-conditioner
update_Jacobi = Directives.UpdatePreconditioner()
# Create inversion
inv = Inversion.BaseInversion(
invProb,
directiveList=[
Alphas, beta,
petrodir, targets,
betaIt, MrefInSmooth, update_Jacobi
]
)
vmin, vmax = -1.2,0
plt.plot(
np.linspace(vmin, vmax, 100), np.exp(
clf.score_samples(np.linspace(vmin, vmax, 100)[:, np.newaxis])
),
color='blue'
)
plt.plot(
np.linspace(vmin, vmax, 100), (
clf.predict(np.linspace(vmin, vmax, 100)[:, np.newaxis])
),
color='red'
)
plt.show()
# Run inversion...
mcluster = inv.run(m0)
# Get the final model back to full space
m_petro = actvMap * mcluster
m_petro[m_petro == ndv] = np.nan
# Plot the recoverd models
mesh = Mesh.TensorMesh([mesh.hx, mesh.hy, mesh.hz], x0="CCN")
npad = 10
X, Y = np.meshgrid(mesh.vectorCCx[npad:-npad:2], mesh.vectorCCy[npad:-npad:2])
vmin, vmax = -1.2, 0.1
fig, ax = plt.subplots(3, 1, figsize=(10, 12))
ax = Utils.mkvc(ax)
mesh.plotSlice(
m_petro, ax=ax[0], normal='Y',
clim=np.r_[vmin, vmax], pcolorOpts={'cmap':'viridis'}
)
ax[0].set_aspect('equal')
ax[0].set_title('Petro model')
dat_true = mesh.plotSlice(
actvMap*model_grav, ax=ax[1], normal='Y',
clim=np.r_[vmin, vmax], pcolorOpts={'cmap':'viridis'}
)
ax[1].set_aspect('equal')
ax[1].set_title('True model')
pos = ax[1].get_position()
cbarax = fig.add_axes(
[pos.x0 - 0.15, pos.y0, pos.width * 0.1, pos.height * 0.75]
) # the parameters are the specified position you set
cb = fig.colorbar(
dat_true[0], cax=cbarax, orientation="vertical",
ax=ax[1], ticks=np.linspace(vmin, vmax, 4)
)
mcluster = m_petro[~np.isnan(m_petro)]
ax[2].hist(mcluster, bins=100, density=True)
ax[2].plot(
np.linspace(vmin, vmax, 100), np.exp(
clf.score_samples(np.linspace(vmin, vmax, 100)[:, np.newaxis])
),
color='blue'
)
ax[2].plot(
np.linspace(vmin, vmax, 100), np.exp(
reg.objfcts[0].GMmodel.score_samples(np.linspace(vmin, vmax, 100)[:, np.newaxis])
),
color='k'
)
ax[2].set_ylim([0., 5.])
plt.show()
```
| github_jupyter |
```
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_openml
#from sklearn.datasets import fetch_openml
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_random_state
print(__doc__)
# 输出文件开头注释的内容
to = time.time()
train_samples = 5000
```
X, y = fetch_openml('mnist_784', version= 1, return_X_y=True)
```
mnist_data = fetch_openml("mnist_784")
X= mnist_data["data"]
y=mnist_data["target"]
type(X)
print(X.shape)
print(y.shape)
random_state = check_random_state(0)
permutation = random_state.permutation(X.shape[0])
X = X[permutation]
y = y[permutation]
X = X.reshape((X.shape[0], -1))
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size = train_samples, test_size = 10000)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
```
scikit-learn中fit_transform()与transform()
二者的功能都是对数据进行某种统一处理(比如标准化~N(0,1),将数据缩放(映射)到某个固定区间,归一化,正则化等)
fit_transform(partData)对部分数据先拟合fit,找到该part的整体指标,如均值、方差、最大值最小值等等(根据具体转换的目的),然后对该partData进行转换transform,从而实现数据的标准化、归一化等等。
根据对之前部分fit的整体指标,对剩余的数据(restData)使用同样的均值、方差、最大最小值等指标进行转换transform(restData),从而保证part、rest处理方式相同。
必须先用fit_transform(partData),之后再transform(restData)
如果直接transform(partData),程序会报错
如果fit_transfrom(partData)后,使用fit_transform(restData)而不用transform(restData),虽然也能归一化,但是两个结果不是在同一个“标准”下的,具有明显差异。
```
clf = LogisticRegression(C = 50./train_samples,
multi_class = 'multinomial',
penalty = 'l1', solver = 'saga' ,tol = 0.1)
# C 正则化系数,其越小正则化越强
# penalty 惩罚项 'netton-cg', 'sag', 'lbfgs'只支持'l2',这三种算法需要损失函数的一阶或二阶连续可导。
# multi_class 决定了我们分类方式的选择 'multinomial'即为'multinomial'即为MvM
# solver:逻辑回归损失函数的优化方法
# 'sag':随机平均梯度下降。每次迭代仅仅用一部分的样本来计算梯度,适合于样本数据多的时候。
# saga:线性收敛的随机优化算法的的变重
# tol:优化算法停止的条件。当迭代前后的函数差值小于等于tol时就停止。
clf.fit(X_train , y_train)
sparsity = np.mean(clf.coef_ == 0)*100
score = clf.score(X_test , y_test)
coef = clf.coef_.copy()
plt.figure(figsize = (10, 5))
scale = np.abs(coef).max()
plt.show()
for i in range(10):
l2_plot = plt.subplot(2, 5, i+1)
l2_plot.imshow(coef[i].reshape(28,28), interpolation='nearest',
cmap = plt.cm.RdBu, vmin = -scale ,vmax = scale)
l2_plot.set_xticks(())
l2_plot.set_yticks(())
l2_plot.set_xlabel(('Class %i' % i))
#interpolation代表的是插值运算,'nearest'只是选取了其中的一种插值方式。
# cmap表示绘图时的样式,这里选择的是RdBu主题。
plt.suptitle('Classification vector for ...')
run_time = time.time() - to
print('Example run in %.3f s' % run_time)
plt.show()
```
| github_jupyter |
```
from pathlib import Path
import cv2
import matplotlib.pyplot as plt
import random
from skimage.filters import (threshold_otsu, threshold_niblack,threshold_sauvola)
random.seed(10)
#for debug
import numpy as np
#TODO introduce a variabl n to increase speed while debugging
def get_raw_data(path):
p = Path(path).glob('**/*.jpg')
files = [x for x in p if x.is_file()]
#return files
imgs = {}
for file in files:
imgs[str(file)] = cv2.imread(str(file))
return imgs
path = '/Users/beantown/PycharmProjects/master-thesis/data/raw'
files = get_raw_data(path)
def show_example(files, keep_size=True, n=1, hide_spines=False, gray=False, add_to_title=None):
for i in range(n):
dpi = 80
key, value = random.choice(list(files.items()))
if not gray:
im_data = cv2.cvtColor(value, cv2.COLOR_BGR2RGB)
else:
im_data = value.copy()
title = str(key).split('/')[-1].split('.')[0]
if add_to_title != None:
title = add_to_title + ': ' + title
if keep_size:
if gray:
height, width = im_data.shape
else:
height, width, _ = im_data.shape
# What size does the figure need to be in inches to fit the image?
figsize = width / float(dpi), height / float(dpi)
# Create a figure of the right size with one axes that takes up the full figure
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0, 0, 1, 1])
# Hide spines, ticks, etc.
#ax.axis('off')
if hide_spines:
ax.axis('off')
else:
ax.tick_params(axis='both', which='major', labelsize=40)
ax.tick_params(axis='both', which='minor', labelsize=30)
ax.set_title(title, pad=30, fontsize=50)
# Display the image.
if gray:
ax.imshow(im_data, cmap='gray')
else:
ax.imshow(im_data)
else:
plt.title(title, pad=20, fontsize=20)
if hide_spines:
plt.axis('off')
if gray:
plt.imshow(im_data, cmap='gray')
else:
plt.imshow(im_data)
plt.show()
show_example(files, keep_size=False, n=2, hide_spines=False)
key, value = random.choice(list(files.items()))
test_file = {key:value}
show_example(test_file, keep_size=True, n=1, hide_spines=False)
def get_forground(files, method='otsu'):
# use a specific method if nothing else is given or it uses global theshold with otsu as default
forground_files = {}
window_size = 25
if method == 'niblack':
for key, value in files.items():
# Prprocessing
image = cv2.cvtColor(value, cv2.COLOR_BGR2GRAY)
#blurred = cv2.GaussianBlur(im_gray, (7, 7), 0)
thresh_niblack = threshold_niblack(image, window_size=window_size, k=0.8)
binary_niblack = image > thresh_niblack
forground_files[key] = binary_niblack
elif method == 'sauvola':
for key, value in files.items():
# Prprocessing
image = cv2.cvtColor(value, cv2.COLOR_BGR2GRAY)
#blurred = cv2.GaussianBlur(im_gray, (7, 7), 0)
thresh_sauvola = threshold_sauvola(image, window_size=window_size)
binary_sauvola = image > thresh_sauvola
forground_files[key] = binary_sauvola
else:
for key, value in files.items():
# Prprocessing
image = cv2.cvtColor(value, cv2.COLOR_BGR2GRAY)
#blurred = cv2.GaussianBlur(im_gray, (7, 7), 0)
binary_global = image > threshold_otsu(image)
forground_files[key] = binary_global
return forground_files
otsu_files = get_forground(files,)
sauvola_files = get_forground(files, method = 'sauvola')
niblack_files = get_forground(files, method = 'niblack')
otsu_img = {key: otsu_files[key]}
sauvola_img = {key: sauvola_files[key]}
niblack_img = {key: niblack_files[key]}
show_example(otsu_img, keep_size=True, n=1, hide_spines=False, gray=True, add_to_title='otsu_img')
show_example(sauvola_img, keep_size=True, n=1, hide_spines=False, gray=True, add_to_title='sauvola_img')
show_example(niblack_img, keep_size=True, n=1, hide_spines=False, gray=True, add_to_title='niblack_img')
```
| github_jupyter |
# ML Pipeline Preparation
Follow the instructions below to help you create your ML pipeline.
### 1. Import libraries and load data from database.
- Import Python libraries
- Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)
- Define feature and target variables X and Y
```
# import libraries
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
import nltk
nltk.download(['punkt', 'wordnet', 'stopwords'])
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import RegexpTokenizer
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import re
import pickle
# load data from database into DataFrame df
engine = create_engine('sqlite:///DisasterResponse.db')
df = pd.read_sql('DisasterData', con=engine)
# split Dataframe df into X and y
X = df['message']
y = df.iloc[:, 4:]
```
### 2. Write a tokenization function to process your text data
```
def tokenize(text):
# tokenize text and instantiate lemmatizer
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
# remove stopwords
tokens = [token for token in tokens if token not in stopwords.words('english')]
# remove punctuaction
tokens = [token for token in tokens if token.isalpha()]
# create clean tokens
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
```
### 3. Build a machine learning pipeline
This machine pipeline should take in the `message` column as input and output classification results on the other 36 categories in the dataset. You may find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables.
```
# build pipeline for the text transformation and for estimator
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer = tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
```
### 4. Train pipeline
- Split data into train and test sets
- Train pipeline
```
# split data into training and test data
X_train, X_test, y_train, y_test = train_test_split(X, y)
%%time
# train data
pipeline.fit(X_train, y_train)
y_test.columns
```
### 5. Test your model
Report the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's `classification_report` on each.
```
# predict responses for basic model
Y_pred = pipeline.predict(X_test)
# print classification_report
print(classification_report(y_test, Y_pred, target_names = y_test.columns, digits = 2))
```
### 6. Improve your model
Use grid search to find better parameters.
```
# build pipeline for the text transformation and for estimator
cv_pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
MultiOutputClassifier(RandomForestClassifier()).get_params()
# reduced hyperparameter tuning and cross validation due to runtime
parameters = {
'clf__estimator__n_estimators': [4, 6, 9],
'clf__estimator__min_samples_split': [2, 3, 5],
}
cv_forest = GridSearchCV(cv_pipeline, param_grid = parameters, cv = 2, verbose = 2, n_jobs = 4)
%%time
# train improved model
cv_forest.fit(X_train, y_train)
# display the best performing parameters
cv_forest.best_params_
```
### 7. Test your model
Show the accuracy, precision, and recall of the tuned model.
Since this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio!
```
# predict responses for improved model
Y_pred_cv = cv_forest.predict(X_test)
# print classification_report
print(classification_report(y_test, Y_pred_cv, target_names = y_test.columns, digits = 2))
```
### 8. Try improving your model further. Here are a few ideas:
* try other machine learning algorithms
* add other features besides the TF-IDF
I'm going to use the KNeighborsClassifier like in my previous submission and see how well it performs in this case.
```
# using KNeighborsClassifier
pipeline_knn = Pipeline([
('vect', CountVectorizer(tokenizer = tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(KNeighborsClassifier()))
])
# train KNeighborsClassifier model
pipeline_knn.fit(X_train, y_train)
# predict responses for KNeighborsClassifier model
Y_pred_knn = pipeline_knn.predict(X_test)
# print classification_report
print(classification_report(y_test, Y_pred_knn, target_names = y_test.columns, digits = 2))
```
### 9. Export your model as a pickle file
```
pickle_out = open('model.pkl','wb')
pickle.dump(cv_forest, pickle_out)
pickle_out.close()
```
### 10. Use this notebook to complete `train.py`
Use the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.
| github_jupyter |
<a href="https://colab.research.google.com/github/mrdbourke/tensorflow-deep-learning/blob/main/07_food_vision_milestone_project_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# 07. Milestone Project 1: 🍔👁 Food Vision Big™
In the previous notebook ([transfer learning part 3: scaling up](https://github.com/mrdbourke/tensorflow-deep-learning/blob/main/06_transfer_learning_in_tensorflow_part_3_scaling_up.ipynb)) we built Food Vision mini: a transfer learning model which beat the original results of the [Food101 paper](https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/) with only 10% of the data.
But you might be wondering, what would happen if we used all the data?
Well, that's what we're going to find out in this notebook!
We're going to be building Food Vision Big™, using all of the data from the Food101 dataset.
Yep. All 75,750 training images and 25,250 testing images.
And guess what...
This time **we've got the goal of beating [DeepFood](https://www.researchgate.net/publication/304163308_DeepFood_Deep_Learning-Based_Food_Image_Recognition_for_Computer-Aided_Dietary_Assessment)**, a 2016 paper which used a Convolutional Neural Network trained for 2-3 days to achieve 77.4% top-1 accuracy.
> 🔑 **Note:** **Top-1 accuracy** means "accuracy for the top softmax activation value output by the model" (because softmax ouputs a value for every class, but top-1 means only the highest one is evaluated). **Top-5 accuracy** means "accuracy for the top 5 softmax activation values output by the model", in other words, did the true label appear in the top 5 activation values? Top-5 accuracy scores are usually noticeably higher than top-1.
| | 🍔👁 Food Vision Big™ | 🍔👁 Food Vision mini |
|-----|-----|-----|
| Dataset source | TensorFlow Datasets | Preprocessed download from Kaggle |
| Train data | 75,750 images | 7,575 images |
| Test data | 25,250 images | 25,250 images |
| Mixed precision | Yes | No |
| Data loading | Performanant tf.data API | TensorFlow pre-built function |
| Target results | 77.4% top-1 accuracy (beat [DeepFood paper](https://arxiv.org/abs/1606.05675)) | 50.76% top-1 accuracy (beat [Food101 paper](https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/static/bossard_eccv14_food-101.pdf)) |
*Table comparing difference between Food Vision Big (this notebook) versus Food Vision mini (previous notebook).*
Alongside attempting to beat the DeepFood paper, we're going to learn about two methods to significantly improve the speed of our model training:
1. Prefetching
2. Mixed precision training
But more on these later.
## What we're going to cover
* Using TensorFlow Datasets to download and explore data
* Creating preprocessing function for our data
* Batching & preparing datasets for modelling (**making our datasets run fast**)
* Creating modelling callbacks
* Setting up **mixed precision training**
* Building a feature extraction model (see [transfer learning part 1: feature extraction](https://github.com/mrdbourke/tensorflow-deep-learning/blob/main/04_transfer_learning_in_tensorflow_part_1_feature_extraction.ipynb))
* Fine-tuning the feature extraction model (see [transfer learning part 2: fine-tuning](https://github.com/mrdbourke/tensorflow-deep-learning/blob/main/05_transfer_learning_in_tensorflow_part_2_fine_tuning.ipynb))
* Viewing training results on TensorBoard
## How you should approach this notebook
You can read through the descriptions and the code (it should all run, except for the cells which error on purpose), but there's a better option.
Write all of the code yourself.
Yes. I'm serious. Create a new notebook, and rewrite each line by yourself. Investigate it, see if you can break it, why does it break?
You don't have to write the text descriptions but writing the code yourself is a great way to get hands-on experience.
Don't worry if you make mistakes, we all do. The way to get better and make less mistakes is to write more code.
> 📖 **Resource:** See the full set of course materials on GitHub: https://github.com/mrdbourke/tensorflow-deep-learning
## Check GPU
For this notebook, we're going to be doing something different.
We're going to be using mixed precision training.
Mixed precision training was introduced in [TensorFlow 2.4.0](https://blog.tensorflow.org/2020/12/whats-new-in-tensorflow-24.html) (a very new feature at the time of writing).
What does **mixed precision training** do?
Mixed precision training uses a combination of single precision (float32) and half-preicison (float16) data types to speed up model training (up 3x on modern GPUs).
We'll talk about this more later on but in the meantime you can read the [TensorFlow documentation on mixed precision](https://www.tensorflow.org/guide/mixed_precision) for more details.
For now, before we can move forward if we want to use mixed precision training, we need to make sure the GPU powering our Google Colab instance (if you're using Google Colab) is compataible.
For mixed precision training to work, you need access to a GPU with a compute compability score of 7.0+.
Google Colab offers P100, K80 and T4 GPUs, however, **the P100 and K80 aren't compatible with mixed precision training**.
Therefore before we proceed we need to make sure we have **access to a Tesla T4 GPU in our Google Colab instance**.
If you're not using Google Colab, you can find a list of various [Nvidia GPU compute capabilities on Nvidia's developer website](https://developer.nvidia.com/cuda-gpus#compute).
> 🔑 **Note:** If you run the cell below and see a P100 or K80, try going to to Runtime -> Factory Reset Runtime (note: this will remove any saved variables and data from your Colab instance) and then retry to get a T4.
>
> **You can still run the code *without* a GPU capable of mixed precision** (it'll just be a little slower).
```
# If using Google Colab, this should output "Tesla T4" otherwise,
# you won't be able to use mixed precision training
!nvidia-smi -L
```
Since mixed precision training was introduced in TensorFlow 2.4.0, make sure you've got at least TensorFlow 2.4.0+.
```
# Hide warning logs (see: https://stackoverflow.com/a/38645250/7900723)
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Check TensorFlow version (should be 2.4.0+)
import tensorflow as tf
print(tf.__version__)
```
## Get helper functions
We've created a series of helper functions throughout the previous notebooks in the course. Instead of rewriting them (tedious), we'll import the [`helper_functions.py`](https://github.com/mrdbourke/tensorflow-deep-learning/blob/main/extras/helper_functions.py) file from the GitHub repo.
```
# Get helper functions file
if not os.path.exists("helper_functions.py"):
print("Downloading helper functions...")
!wget https://raw.githubusercontent.com/mrdbourke/tensorflow-deep-learning/main/extras/helper_functions.py
else:
print("Helper functions file already exists, skipping download...")
# Import series of helper functions for the notebook (we've created/used these in previous notebooks)
from helper_functions import create_tensorboard_callback, plot_loss_curves, compare_historys
```
## Use TensorFlow Datasets to Download Data
In previous notebooks, we've downloaded our food images (from the [Food101 dataset](https://www.kaggle.com/dansbecker/food-101/home)) from Google Storage.
And this is a typical workflow you'd use if you're working on your own datasets.
However, there's another way to get datasets ready to use with TensorFlow.
For many of the most popular datasets in the machine learning world (often referred to and used as benchmarks), you can access them through [TensorFlow Datasets (TFDS)](https://www.tensorflow.org/datasets/overview).
What is **TensorFlow Datasets**?
A place for prepared and ready-to-use machine learning datasets.
Why use TensorFlow Datasets?
* Load data already in Tensors
* Practice on well established datasets
* Experiment with differet data loading techniques (like we're going to use in this notebook)
* Experiment with new TensorFlow features quickly (such as mixed precision training)
Why *not* use TensorFlow Datasets?
* The datasets are static (they don't change, like your real-world datasets would)
* Might not be suited for your particular problem (but great for experimenting)
To begin using TensorFlow Datasets we can import it under the alias `tfds`.
```
# Get TensorFlow Datasets
import tensorflow_datasets as tfds
```
To find all of the available datasets in TensorFlow Datasets, you can use the `list_builders()` method.
After doing so, we can check to see if the one we're after (`"food101"`) is present.
```
# List available datasets
datasets_list = tfds.list_builders() # get all available datasets in TFDS
print("food101" in datasets_list) # is the dataset we're after available?
```
Beautiful! It looks like the dataset we're after is available (note there are plenty more available but we're on Food101).
To get access to the Food101 dataset from the TFDS, we can use the [`tfds.load()`](https://www.tensorflow.org/datasets/api_docs/python/tfds/load) method.
In particular, we'll have to pass it a few parameters to let it know what we're after:
* `name` (str) : the target dataset (e.g. `"food101"`)
* `split` (list, optional) : what splits of the dataset we're after (e.g. `["train", "validation"]`)
* the `split` parameter is quite tricky. See [the documentation for more](https://github.com/tensorflow/datasets/blob/master/docs/splits.md).
* `shuffle_files` (bool) : whether or not to shuffle the files on download, defaults to `False`
* `as_supervised` (bool) : `True` to download data samples in tuple format (`(data, label)`) or `False` for dictionary format
* `with_info` (bool) : `True` to download dataset metadata (labels, number of samples, etc)
> 🔑 **Note:** Calling the `tfds.load()` method will start to download a target dataset to disk if the `download=True` parameter is set (default). This dataset could be 100GB+, so make sure you have space.
```
# Load in the data (takes about 5-6 minutes in Google Colab)
(train_data, test_data), ds_info = tfds.load(name="food101", # target dataset to get from TFDS
split=["train", "validation"], # what splits of data should we get? note: not all datasets have train, valid, test
shuffle_files=True, # shuffle files on download?
as_supervised=True, # download data in tuple format (sample, label), e.g. (image, label)
with_info=True) # include dataset metadata? if so, tfds.load() returns tuple (data, ds_info)
```
Wonderful! After a few minutes of downloading, we've now got access to entire Food101 dataset (in tensor format) ready for modelling.
Now let's get a little information from our dataset, starting with the class names.
Getting class names from a TensorFlow Datasets dataset requires downloading the "`dataset_info`" variable (by using the `as_supervised=True` parameter in the `tfds.load()` method, **note:** this will only work for supervised datasets in TFDS).
We can access the class names of a particular dataset using the `dataset_info.features` attribute and accessing `names` attribute of the the `"label"` key.
```
# Features of Food101 TFDS
ds_info.features
# Get class names
class_names = ds_info.features["label"].names
class_names[:10]
```
### Exploring the Food101 data from TensorFlow Datasets
Now we've downloaded the Food101 dataset from TensorFlow Datasets, how about we do what any good data explorer should?
In other words, "visualize, visualize, visualize".
Let's find out a few details about our dataset:
* The shape of our input data (image tensors)
* The datatype of our input data
* What the labels of our input data look like (e.g. one-hot encoded versus label-encoded)
* Do the labels match up with the class names?
To do, let's take one sample off the training data (using the [`.take()` method](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#take)) and explore it.
```
# Take one sample off the training data
train_one_sample = train_data.take(1) # samples are in format (image_tensor, label)
```
Because we used the `as_supervised=True` parameter in our `tfds.load()` method above, data samples come in the tuple format structure `(data, label)` or in our case `(image_tensor, label)`.
```
# What does one sample of our training data look like?
train_one_sample
```
Let's loop through our single training sample and get some info from the `image_tensor` and `label`.
```
# Output info about our training sample
for image, label in train_one_sample:
print(f"""
Image shape: {image.shape}
Image dtype: {image.dtype}
Target class from Food101 (tensor form): {label}
Class name (str form): {class_names[label.numpy()]}
""")
```
Because we set the `shuffle_files=True` parameter in our `tfds.load()` method above, running the cell above a few times will give a different result each time.
Checking these you might notice some of the images have different shapes, for example `(512, 342, 3)` and `(512, 512, 3)` (height, width, color_channels).
Let's see what one of the image tensors from TFDS's Food101 dataset looks like.
```
# What does an image tensor from TFDS's Food101 look like?
image
# What are the min and max values?
tf.reduce_min(image), tf.reduce_max(image)
```
Alright looks like our image tensors have values of between 0 & 255 (standard red, green, blue colour values) and the values are of data type `unit8`.
We might have to preprocess these before passing them to a neural network. But we'll handle this later.
In the meantime, let's see if we can plot an image sample.
### Plot an image from TensorFlow Datasets
We've seen our image tensors in tensor format, now let's really adhere to our motto.
"Visualize, visualize, visualize!"
Let's plot one of the image samples using [`matplotlib.pyplot.imshow()`](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.imshow.html) and set the title to target class name.
```
# Plot an image tensor
import matplotlib.pyplot as plt
plt.imshow(image)
plt.title(class_names[label.numpy()]) # add title to image by indexing on class_names list
plt.axis(False);
```
Delicious!
Okay, looks like the Food101 data we've got from TFDS is similar to the datasets we've been using in previous notebooks.
Now let's preprocess it and get it ready for use with a neural network.
## Create preprocessing functions for our data
In previous notebooks, when our images were in folder format we used the method [`tf.keras.preprocessing.image_dataset_from_directory()`](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image_dataset_from_directory) to load them in.
Doing this meant our data was loaded into a format ready to be used with our models.
However, since we've downloaded the data from TensorFlow Datasets, there are a couple of preprocessing steps we have to take before it's ready to model.
More specifically, our data is currently:
* In `uint8` data type
* Comprised of all differnet sized tensors (different sized images)
* Not scaled (the pixel values are between 0 & 255)
Whereas, models like data to be:
* In `float32` data type
* Have all of the same size tensors (batches require all tensors have the same shape, e.g. `(224, 224, 3)`)
* Scaled (values between 0 & 1), also called normalized
To take care of these, we'll create a `preprocess_img()` function which:
* Resizes an input image tensor to a specified size using [`tf.image.resize()`](https://www.tensorflow.org/api_docs/python/tf/image/resize)
* Converts an input image tensor's current datatype to `tf.float32` using [`tf.cast()`](https://www.tensorflow.org/api_docs/python/tf/cast)
> 🔑 **Note:** Pretrained EfficientNetBX models in [`tf.keras.applications.efficientnet`](https://www.tensorflow.org/api_docs/python/tf/keras/applications/efficientnet) (what we're going to be using) have rescaling built-in. But for many other model architectures you'll want to rescale your data (e.g. get its values between 0 & 1). This could be incorporated inside your "`preprocess_img()`" function (like the one below) or within your model as a [`tf.keras.layers.experimental.preprocessing.Rescaling`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/Rescaling) layer.
```
# Make a function for preprocessing images
def preprocess_img(image, label, img_shape=224):
"""
Converts image datatype from 'uint8' -> 'float32' and reshapes image to
[img_shape, img_shape, color_channels]
"""
image = tf.image.resize(image, [img_shape, img_shape]) # reshape to img_shape
return tf.cast(image, tf.float32), label # return (float32_image, label) tuple
```
Our `preprocess_img()` function above takes image and label as input (even though it does nothing to the label) because our dataset is currently in the tuple structure `(image, label)`.
Let's try our function out on a target image.
```
# Preprocess a single sample image and check the outputs
preprocessed_img = preprocess_img(image, label)[0]
print(f"Image before preprocessing:\n {image[:2]}...,\nShape: {image.shape},\nDatatype: {image.dtype}\n")
print(f"Image after preprocessing:\n {preprocessed_img[:2]}...,\nShape: {preprocessed_img.shape},\nDatatype: {preprocessed_img.dtype}")
```
Excellent! Looks like our `preprocess_img()` function is working as expected.
The input image gets converted from `uint8` to `float32` and gets reshaped from its current shape to `(224, 224, 3)`.
How does it look?
```
# We can still plot our preprocessed image as long as we
# divide by 255 (for matplotlib capatibility)
plt.imshow(preprocessed_img/255.)
plt.title(class_names[label])
plt.axis(False);
```
All this food visualization is making me hungry. How about we start preparing to model it?
## Batch & prepare datasets
Before we can model our data, we have to turn it into batches.
Why?
Because computing on batches is memory efficient.
We turn our data from 101,000 image tensors and labels (train and test combined) into batches of 32 image and label pairs, thus enabling it to fit into the memory of our GPU.
To do this in effective way, we're going to be leveraging a number of methods from the [`tf.data` API](https://www.tensorflow.org/api_docs/python/tf/data).
> 📖 **Resource:** For loading data in the most performant way possible, see the TensorFlow docuemntation on [Better performance with the tf.data API](https://www.tensorflow.org/guide/data_performance).
Specifically, we're going to be using:
* [`map()`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map) - maps a predefined function to a target dataset (e.g. `preprocess_img()` to our image tensors)
* [`shuffle()`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle) - randomly shuffles the elements of a target dataset up `buffer_size` (ideally, the `buffer_size` is equal to the size of the dataset, however, this may have implications on memory)
* [`batch()`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#batch) - turns elements of a target dataset into batches (size defined by parameter `batch_size`)
* [`prefetch()`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#prefetch) - prepares subsequent batches of data whilst other batches of data are being computed on (improves data loading speed but costs memory)
* Extra: [`cache()`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#cache) - caches (saves them for later) elements in a target dataset, saving loading time (will only work if your dataset is small enough to fit in memory, standard Colab instances only have 12GB of memory)
Things to note:
- Can't batch tensors of different shapes (e.g. different image sizes, need to reshape images first, hence our `preprocess_img()` function)
- `shuffle()` keeps a buffer of the number you pass it images shuffled, ideally this number would be all of the samples in your training set, however, if your training set is large, this buffer might not fit in memory (a fairly large number like 1000 or 10000 is usually suffice for shuffling)
- For methods with the `num_parallel_calls` parameter available (such as `map()`), setting it to`num_parallel_calls=tf.data.AUTOTUNE` will parallelize preprocessing and significantly improve speed
- Can't use `cache()` unless your dataset can fit in memory
Woah, the above is alot. But once we've coded below, it'll start to make sense.
We're going to through things in the following order:
```
Original dataset (e.g. train_data) -> map() -> shuffle() -> batch() -> prefetch() -> PrefetchDataset
```
This is like saying,
> "Hey, map this preprocessing function across our training dataset, then shuffle a number of elements before batching them together and make sure you prepare new batches (prefetch) whilst the model is looking through the current batch".

*What happens when you use prefetching (faster) versus what happens when you don't use prefetching (slower). **Source:** Page 422 of [Hands-On Machine Learning with Scikit-Learn, Keras & TensorFlow Book by Aurélien Géron](https://www.oreilly.com/library/view/hands-on-machine-learning/9781492032632/).*
```
# Map preprocessing function to training data (and paralellize)
train_data = train_data.map(map_func=preprocess_img, num_parallel_calls=tf.data.AUTOTUNE)
# Shuffle train_data and turn it into batches and prefetch it (load it faster)
train_data = train_data.shuffle(buffer_size=1000).batch(batch_size=32).prefetch(buffer_size=tf.data.AUTOTUNE)
# Map prepreprocessing function to test data
test_data = test_data.map(preprocess_img, num_parallel_calls=tf.data.AUTOTUNE)
# Turn test data into batches (don't need to shuffle)
test_data = test_data.batch(32).prefetch(tf.data.AUTOTUNE)
```
And now let's check out what our prepared datasets look like.
```
train_data, test_data
```
Excellent! Looks like our data is now in tutples of `(image, label)` with datatypes of `(tf.float32, tf.int64)`, just what our model is after.
> 🔑 **Note:** You can get away without calling the `prefetch()` method on the end of your datasets, however, you'd probably see significantly slower data loading speeds when building a model. So most of your dataset input pipelines should end with a call to [`prefecth()`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#prefetch).
Onward.
## Create modelling callbacks
Since we're going to be training on a large amount of data and training could take a long time, it's a good idea to set up some modelling callbacks so we be sure of things like our model's training logs being tracked and our model being checkpointed (saved) after various training milestones.
To do each of these we'll use the following callbacks:
* [`tf.keras.callbacks.TensorBoard()`](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard) - allows us to keep track of our model's training history so we can inspect it later (**note:** we've created this callback before have imported it from `helper_functions.py` as `create_tensorboard_callback()`)
* [`tf.keras.callbacks.ModelCheckpoint()`](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/ModelCheckpoint) - saves our model's progress at various intervals so we can load it and resuse it later without having to retrain it
* Checkpointing is also helpful so we can start fine-tuning our model at a particular epoch and revert back to a previous state if fine-tuning offers no benefits
```
# Create TensorBoard callback (already have "create_tensorboard_callback()" from a previous notebook)
from helper_functions import create_tensorboard_callback
# Create ModelCheckpoint callback to save model's progress
checkpoint_path = "model_checkpoints/cp.ckpt" # saving weights requires ".ckpt" extension
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
montior="val_accuracy", # save the model weights with best validation accuracy
save_best_only=True, # only save the best weights
save_weights_only=True, # only save model weights (not whole model)
verbose=1) # don't print out whether or not model is being saved
```
## Setup mixed precision training
We touched on mixed precision training above.
However, we didn't quite explain it.
Normally, tensors in TensorFlow default to the float32 datatype (unless otherwise specified).
In computer science, float32 is also known as [single-precision floating-point format](https://en.wikipedia.org/wiki/Single-precision_floating-point_format). The 32 means it usually occupies 32 bits in computer memory.
Your GPU has a limited memory, therefore it can only handle a number of float32 tensors at the same time.
This is where mixed precision training comes in.
Mixed precision training involves using a mix of float16 and float32 tensors to make better use of your GPU's memory.
Can you guess what float16 means?
Well, if you thought since float32 meant single-precision floating-point, you might've guessed float16 means [half-precision floating-point format](https://en.wikipedia.org/wiki/Half-precision_floating-point_format). And if you did, you're right! And if not, no trouble, now you know.
For tensors in float16 format, each element occupies 16 bits in computer memory.
So, where does this leave us?
As mentioned before, when using mixed precision training, your model will make use of float32 and float16 data types to use less memory where possible and in turn run faster (using less memory per tensor means more tensors can be computed on simultaneously).
As a result, using mixed precision training can improve your performance on modern GPUs (those with a compute capability score of 7.0+) by up to 3x.
For a more detailed explanation, I encourage you to read through the [TensorFlow mixed precision guide](https://www.tensorflow.org/guide/mixed_precision) (I'd highly recommend at least checking out the summary).

*Because mixed precision training uses a combination of float32 and float16 data types, you may see up to a 3x speedup on modern GPUs.*
> 🔑 **Note:** If your GPU doesn't have a score of over 7.0+ (e.g. P100 in Colab), mixed precision won't work (see: ["Supported Hardware"](https://www.tensorflow.org/guide/mixed_precision#supported_hardware) in the mixed precision guide for more).
> 📖 **Resource:** If you'd like to learn more about precision in computer science (the detail to which a numerical quantity is expressed by a computer), see the [Wikipedia page](https://en.wikipedia.org/wiki/Precision_(computer_science)) (and accompanying resources).
Okay, enough talk, let's see how we can turn on mixed precision training in TensorFlow.
The beautiful thing is, the [`tensorflow.keras.mixed_precision`](https://www.tensorflow.org/api_docs/python/tf/keras/mixed_precision/) API has made it very easy for us to get started.
First, we'll import the API and then use the [`set_global_policy()`](https://www.tensorflow.org/api_docs/python/tf/keras/mixed_precision/set_global_policy) method to set the *dtype policy* to `"mixed_float16"`.
```
# Turn on mixed precision training
from tensorflow.keras import mixed_precision
mixed_precision.set_global_policy(policy="mixed_float16") # set global policy to mixed precision
```
Nice! As long as the GPU you're using has a compute capability of 7.0+ the cell above should run without error.
Now we can check the global dtype policy (the policy which will be used by layers in our model) using the [`mixed_precision.global_policy()`](https://www.tensorflow.org/api_docs/python/tf/keras/mixed_precision/global_policy) method.
```
mixed_precision.global_policy() # should output "mixed_float16"
```
Great, since the global dtype policy is now `"mixed_float16"` our model will automatically take advantage of float16 variables where possible and in turn speed up training.
## Build feature extraction model
Callbacks: ready to roll.
Mixed precision: turned on.
Let's build a model.
Because our dataset is quite large, we're going to move towards fine-tuning an existing pretrained model (EfficienetNetB0).
But before we get into fine-tuning, let's set up a feature-extraction model.
Recall, the typical order for using transfer learning is:
1. Build a feature extraction model (replace the top few layers of a pretrained model)
2. Train for a few epochs with lower layers frozen
3. Fine-tune if necessary with multiple layers unfrozen

*Before fine-tuning, it's best practice to train a feature extraction model with custom top layers.*
To build the feature extraction model (covered in [Transfer Learning in TensorFlow Part 1: Feature extraction](https://github.com/mrdbourke/tensorflow-deep-learning/blob/main/04_transfer_learning_in_tensorflow_part_1_feature_extraction.ipynb)), we'll:
* Use `EfficientNetB0` from [`tf.keras.applications`](https://www.tensorflow.org/api_docs/python/tf/keras/applications) pre-trained on ImageNet as our base model
* We'll download this without the top layers using `include_top=False` parameter so we can create our own output layers
* Freeze the base model layers so we can use the pre-learned patterns the base model has found on ImageNet
* Put together the input, base model, pooling and output layers in a [Functional model](https://keras.io/guides/functional_api/)
* Compile the Functional model using the Adam optimizer and [sparse categorical crossentropy](https://www.tensorflow.org/api_docs/python/tf/keras/losses/SparseCategoricalCrossentropy) as the loss function (since our labels **aren't** one-hot encoded)
* Fit the model for 3 epochs using the TensorBoard and ModelCheckpoint callbacks
> 🔑 **Note:** Since we're using mixed precision training, our model needs a separate output layer with a hard-coded `dtype=float32`, for example, `layers.Activation("softmax", dtype=tf.float32)`. This ensures the outputs of our model are returned back to the float32 data type which is more numerically stable than the float16 datatype (important for loss calculations). See the ["Building the model"](https://www.tensorflow.org/guide/mixed_precision#building_the_model) section in the TensorFlow mixed precision guide for more.

*Turning mixed precision on in TensorFlow with 3 lines of code.*
```
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
# Create base model
input_shape = (224, 224, 3)
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False # freeze base model layers
# Create Functional model
inputs = layers.Input(shape=input_shape, name="input_layer", dtype=tf.float16)
# Note: EfficientNetBX models have rescaling built-in but if your model didn't you could have a layer like below
# x = preprocessing.Rescaling(1./255)(x)
x = base_model(inputs, training=False) # set base_model to inference mode only
x = layers.GlobalAveragePooling2D(name="pooling_layer")(x)
x = layers.Dense(len(class_names))(x) # want one output neuron per class
# Separate activation of output layer so we can output float32 activations
outputs = layers.Activation("softmax", dtype=tf.float32, name="softmax_float32")(x)
model = tf.keras.Model(inputs, outputs)
# Compile the model
model.compile(loss="sparse_categorical_crossentropy", # Use sparse_categorical_crossentropy when labels are *not* one-hot
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
# Check out our model
model.summary()
```
## Checking layer dtype policies (are we using mixed precision?)
Model ready to go!
Before we said the mixed precision API will automatically change our layers' dtype policy's to whatever the global dtype policy is (in our case it's `"mixed_float16"`).
We can check this by iterating through our model's layers and printing layer attributes such as `dtype` and `dtype_policy`.
```
# Check the dtype_policy attributes of layers in our model
for layer in model.layers:
print(layer.name, layer.trainable, layer.dtype, layer.dtype_policy) # Check the dtype policy of layers
```
Going through the above we see:
* `layer.name` (str) : a layer's human-readable name, can be defined by the `name` parameter on construction
* `layer.trainable` (bool) : whether or not a layer is trainable (all of our layers are trainable except the efficientnetb0 layer since we set it's `trainable` attribute to `False`
* `layer.dtype` : the data type a layer stores its variables in
* `layer.dtype_policy` : the data type a layer computes in
> 🔑 **Note:** A layer can have a dtype of `float32` and a dtype policy of `"mixed_float16"` because it stores its variables (weights & biases) in `float32` (more numerically stable), however it computes in `float16` (faster).
We can also check the same details for our model's base model.
```
# Check the layers in the base model and see what dtype policy they're using
for layer in model.layers[1].layers[:20]: # only check the first 20 layers to save output space
print(layer.name, layer.trainable, layer.dtype, layer.dtype_policy)
```
> 🔑 **Note:** The mixed precision API automatically causes layers which can benefit from using the `"mixed_float16"` dtype policy to use it. It also prevents layers which shouldn't use it from using it (e.g. the normalization layer at the start of the base model).
## Fit the feature extraction model
Now that's one good looking model. Let's fit it to our data shall we?
Three epochs should be enough for our top layers to adjust their weights enough to our food image data.
To save time per epoch, we'll also only validate on 15% of the test data.
```
# Fit the model with callbacks
history_101_food_classes_feature_extract = model.fit(train_data,
epochs=3,
steps_per_epoch=len(train_data),
validation_data=test_data,
validation_steps=int(0.15 * len(test_data)),
callbacks=[create_tensorboard_callback("training_logs",
"efficientnetb0_101_classes_all_data_feature_extract"),
model_checkpoint])
```
Nice, looks like our feature extraction model is performing pretty well. How about we evaluate it on the whole test dataset?
```
# Evaluate model (unsaved version) on whole test dataset
results_feature_extract_model = model.evaluate(test_data)
results_feature_extract_model
```
And since we used the `ModelCheckpoint` callback, we've got a saved version of our model in the `model_checkpoints` directory.
Let's load it in and make sure it performs just as well.
## Load and evaluate checkpoint weights
We can load in and evaluate our model's checkpoints by:
1. Cloning our model using [`tf.keras.models.clone_model()`](https://www.tensorflow.org/api_docs/python/tf/keras/models/clone_model) to make a copy of our feature extraction model with reset weights.
2. Calling the `load_weights()` method on our cloned model passing it the path to where our checkpointed weights are stored.
3. Calling `evaluate()` on the cloned model with loaded weights.
A reminder, checkpoints are helpful for when you perform an experiment such as fine-tuning your model. In the case you fine-tune your feature extraction model and find it doesn't offer any improvements, you can always revert back to the checkpointed version of your model.
```
# Clone the model we created (this resets all weights)
cloned_model = tf.keras.models.clone_model(model)
cloned_model.summary()
!ls model_checkpoints/
# Where are our checkpoints stored?
checkpoint_path
# Load checkpointed weights into cloned_model
cloned_model.load_weights(checkpoint_path)
```
Each time you make a change to your model (including loading weights), you have to recompile.
```
# Compile cloned_model (with same parameters as original model)
cloned_model.compile(loss="sparse_categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
# Evalaute cloned model with loaded weights (should be same score as trained model)
results_cloned_model_with_loaded_weights = cloned_model.evaluate(test_data)
```
Our cloned model with loaded weight's results should be very close to the feature extraction model's results (if the cell below errors, something went wrong).
```
# Loaded checkpoint weights should return very similar results to checkpoint weights prior to saving
import numpy as np
assert np.isclose(results_feature_extract_model, results_cloned_model_with_loaded_weights).all() # check if all elements in array are close
```
Cloning the model preserves `dtype_policy`'s of layers (but doesn't preserve weights) so if we wanted to continue fine-tuning with the cloned model, we could and it would still use the mixed precision dtype policy.
```
# Check the layers in the base model and see what dtype policy they're using
for layer in cloned_model.layers[1].layers[:20]: # check only the first 20 layers to save space
print(layer.name, layer.trainable, layer.dtype, layer.dtype_policy)
```
## Save the whole model to file
We can also save the whole model using the [`save()`](https://www.tensorflow.org/api_docs/python/tf/keras/Model#save) method.
Since our model is quite large, you might want to save it to Google Drive (if you're using Google Colab) so you can load it in for use later.
> 🔑 **Note:** Saving to Google Drive requires mounting Google Drive (go to Files -> Mount Drive).
```
# ## Saving model to Google Drive (optional)
# # Create save path to drive
# save_dir = "drive/MyDrive/tensorflow_course/food_vision/07_efficientnetb0_feature_extract_model_mixed_precision/"
# # os.makedirs(save_dir) # Make directory if it doesn't exist
# # Save model
# model.save(save_dir)
```
We can also save it directly to our Google Colab instance.
> 🔑 **Note:** Google Colab storage is ephemeral and your model will delete itself (along with any other saved files) when the Colab session expires.
```
# Save model locally (if you're using Google Colab, your saved model will Colab instance terminates)
save_dir = "07_efficientnetb0_feature_extract_model_mixed_precision"
model.save(save_dir)
```
And again, we can check whether or not our model saved correctly by loading it in and evaluating it.
```
# Load model previously saved above
loaded_saved_model = tf.keras.models.load_model(save_dir)
```
Loading a `SavedModel` also retains all of the underlying layers `dtype_policy` (we want them to be `"mixed_float16"`).
```
# Check the layers in the base model and see what dtype policy they're using
for layer in loaded_saved_model.layers[1].layers[:20]: # check only the first 20 layers to save output space
print(layer.name, layer.trainable, layer.dtype, layer.dtype_policy)
# Check loaded model performance (this should be the same as results_feature_extract_model)
results_loaded_saved_model = loaded_saved_model.evaluate(test_data)
results_loaded_saved_model
# The loaded model's results should equal (or at least be very close) to the model's results prior to saving
# Note: this will only work if you've instatiated results variables
import numpy as np
assert np.isclose(results_feature_extract_model, results_loaded_saved_model).all()
```
That's what we want! Our loaded model performing as it should.
> 🔑 **Note:** We spent a fair bit of time making sure our model saved correctly because training on a lot of data can be time-consuming, so we want to make sure we don't have to continaully train from scratch.
## Preparing our model's layers for fine-tuning
Our feature-extraction model is showing some great promise after three epochs. But since we've got so much data, it's probably worthwhile that we see what results we can get with fine-tuning (fine-tuning usually works best when you've got quite a large amount of data).
Remember our goal of beating the [DeepFood paper](https://arxiv.org/pdf/1606.05675.pdf)?
They were able to achieve 77.4% top-1 accuracy on Food101 over 2-3 days of training.
Do you think fine-tuning will get us there?
Let's find out.
To start, let's load in our saved model.
> 🔑 **Note:** It's worth remembering a traditional workflow for fine-tuning is to freeze a pre-trained base model and then train only the output layers for a few iterations so their weights can be updated inline with your custom data (feature extraction). And then unfreeze a number or all of the layers in the base model and continue training until the model stops improving.
Like all good cooking shows, I've saved a model I prepared earlier (the feature extraction model from above) to Google Storage.
We can download it to make sure we're using the same model going forward.
```
# Download the saved model from Google Storage
!wget https://storage.googleapis.com/ztm_tf_course/food_vision/07_efficientnetb0_feature_extract_model_mixed_precision.zip
# Unzip the SavedModel downloaded from Google Stroage
!mkdir downloaded_gs_model # create new dir to store downloaded feature extraction model
!unzip 07_efficientnetb0_feature_extract_model_mixed_precision.zip -d downloaded_gs_model
# Load and evaluate downloaded GS model
tf.get_logger().setLevel('INFO') # hide warning logs
loaded_gs_model = tf.keras.models.load_model("downloaded_gs_model/07_efficientnetb0_feature_extract_model_mixed_precision")
# Get a summary of our downloaded model
loaded_gs_model.summary()
```
And now let's make sure our loaded model is performing as expected.
```
# How does the loaded model perform?
results_loaded_gs_model = loaded_gs_model.evaluate(test_data)
results_loaded_gs_model
```
Great, our loaded model is performing as expected.
When we first created our model, we froze all of the layers in the base model by setting `base_model.trainable=False` but since we've loaded in our model from file, let's check whether or not the layers are trainable or not.
```
# Are any of the layers in our model frozen?
for layer in loaded_gs_model.layers:
layer.trainable = True # set all layers to trainable
print(layer.name, layer.trainable, layer.dtype, layer.dtype_policy) # make sure loaded model is using mixed precision dtype_policy ("mixed_float16")
```
Alright, it seems like each layer in our loaded model is trainable. But what if we got a little deeper and inspected each of the layers in our base model?
> 🤔 **Question:** *Which layer in the loaded model is our base model?*
Before saving the Functional model to file, we created it with five layers (layers below are 0-indexed):
0. The input layer
1. The pre-trained base model layer (`tf.keras.applications.EfficientNetB0`)
2. The pooling layer
3. The fully-connected (dense) layer
4. The output softmax activation (with float32 dtype)
Therefore to inspect our base model layer, we can access the `layers` attribute of the layer at index 1 in our model.
```
# Check the layers in the base model and see what dtype policy they're using
for layer in loaded_gs_model.layers[1].layers[:20]:
print(layer.name, layer.trainable, layer.dtype, layer.dtype_policy)
```
Wonderful, it looks like each layer in our base model is trainable (unfrozen) and every layer which should be using the dtype policy `"mixed_policy16"` is using it.
Since we've got so much data (750 images x 101 training classes = 75750 training images), let's keep all of our base model's layers unfrozen.
> 🔑 **Note:** If you've got a small amount of data (less than 100 images per class), you may want to only unfreeze and fine-tune a small number of layers in the base model at a time. Otherwise, you risk overfitting.
## A couple more callbacks
We're about to start fine-tuning a deep learning model with over 200 layers using over 100,000 (75k+ training, 25K+ testing) images, which means our model's training time is probably going to be much longer than before.
> 🤔 **Question:** *How long does training take?*
It could be a couple of hours or in the case of the [DeepFood paper](https://arxiv.org/pdf/1606.05675.pdf) (the baseline we're trying to beat), their best performing model took 2-3 days of training time.
You will really only know how long it'll take once you start training.
> 🤔 **Question:** *When do you stop training?*
Ideally, when your model stops improving. But again, due to the nature of deep learning, it can be hard to know when exactly a model will stop improving.
Luckily, there's a solution: the [`EarlyStopping` callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/EarlyStopping).
The `EarlyStopping` callback monitors a specified model performance metric (e.g. `val_loss`) and when it stops improving for a specified number of epochs, automatically stops training.
Using the `EarlyStopping` callback combined with the `ModelCheckpoint` callback saving the best performing model automatically, we could keep our model training for an unlimited number of epochs until it stops improving.
Let's set both of these up to monitor our model's `val_loss`.
```
# Setup EarlyStopping callback to stop training if model's val_loss doesn't improve for 3 epochs
early_stopping = tf.keras.callbacks.EarlyStopping(monitor="val_loss", # watch the val loss metric
patience=3) # if val loss decreases for 3 epochs in a row, stop training
# Create ModelCheckpoint callback to save best model during fine-tuning
checkpoint_path = "fine_tune_checkpoints/"
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_best_only=True,
monitor="val_loss")
```
Woohoo! Fine-tuning callbacks ready.
If you're planning on training large models, the `ModelCheckpoint` and `EarlyStopping` are two callbacks you'll want to become very familiar with.
We're almost ready to start fine-tuning our model but there's one more callback we're going to implement: [`ReduceLROnPlateau`](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/ReduceLROnPlateau).
Remember how the learning rate is the most important model hyperparameter you can tune? (if not, treat this as a reminder).
Well, the `ReduceLROnPlateau` callback helps to tune the learning rate for you.
Like the `ModelCheckpoint` and `EarlyStopping` callbacks, the `ReduceLROnPlateau` callback montiors a specified metric and when that metric stops improving, it reduces the learning rate by a specified factor (e.g. divides the learning rate by 10).
> 🤔 **Question:** *Why lower the learning rate?*
Imagine having a coin at the back of the couch and you're trying to grab with your fingers.
Now think of the learning rate as the size of the movements your hand makes towards the coin.
The closer you get, the smaller you want your hand movements to be, otherwise the coin will be lost.
Our model's ideal performance is the equivalent of grabbing the coin. So as training goes on and our model gets closer and closer to it's ideal performance (also called **convergence**), we want the amount it learns to be less and less.
To do this we'll create an instance of the `ReduceLROnPlateau` callback to monitor the validation loss just like the `EarlyStopping` callback.
Once the validation loss stops improving for two or more epochs, we'll reduce the learning rate by a factor of 5 (e.g. `0.001` to `0.0002`).
And to make sure the learning rate doesn't get too low (and potentially result in our model learning nothing), we'll set the minimum learning rate to `1e-7`.
```
# Creating learning rate reduction callback
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss",
factor=0.2, # multiply the learning rate by 0.2 (reduce by 5x)
patience=2,
verbose=1, # print out when learning rate goes down
min_lr=1e-7)
```
Learning rate reduction ready to go!
Now before we start training, we've got to recompile our model.
We'll use sparse categorical crossentropy as the loss and since we're fine-tuning, we'll use a 10x lower learning rate than the Adam optimizers default (`1e-4` instead of `1e-3`).
```
# Compile the model
loaded_gs_model.compile(loss="sparse_categorical_crossentropy", # sparse_categorical_crossentropy for labels that are *not* one-hot
optimizer=tf.keras.optimizers.Adam(0.0001), # 10x lower learning rate than the default
metrics=["accuracy"])
```
Okay, model compiled.
Now let's fit it on all of the data.
We'll set it up to run for up to 100 epochs.
Since we're going to be using the `EarlyStopping` callback, it might stop before reaching 100 epochs.
> 🔑 **Note:** Running the cell below will set the model up to fine-tune all of the pre-trained weights in the base model on all of the Food101 data. Doing so with **unoptimized** data pipelines and **without** mixed precision training will take a fairly long time per epoch depending on what type of GPU you're using (about 15-20 minutes on Colab GPUs). But don't worry, **the code we've written above will ensure it runs much faster** (more like 4-5 minutes per epoch).
```
# Start to fine-tune (all layers)
history_101_food_classes_all_data_fine_tune = loaded_gs_model.fit(train_data,
epochs=100, # fine-tune for a maximum of 100 epochs
steps_per_epoch=len(train_data),
validation_data=test_data,
validation_steps=int(0.15 * len(test_data)), # validation during training on 15% of test data
callbacks=[create_tensorboard_callback("training_logs", "efficientb0_101_classes_all_data_fine_tuning"), # track the model training logs
model_checkpoint, # save only the best model during training
early_stopping, # stop model after X epochs of no improvements
reduce_lr]) # reduce the learning rate after X epochs of no improvements
```
> 🔑 **Note:** If you didn't use mixed precision or use techniques such as [`prefetch()`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#prefetch) in the *Batch & prepare datasets* section, your model fine-tuning probably takes up to 2.5-3x longer per epoch (see the output below for an example).
| | Prefetch and mixed precision | No prefetch and no mixed precision |
|-----|-----|-----|
| Time per epoch | ~280-300s | ~1127-1397s |
*Results from fine-tuning Food Vision Big™ on Food101 dataset using an EfficienetNetB0 backbone using a Google Colab Tesla T4 GPU.*
```
Saving TensorBoard log files to: training_logs/efficientB0_101_classes_all_data_fine_tuning/20200928-013008
Epoch 1/100
2368/2368 [==============================] - 1397s 590ms/step - loss: 1.2068 - accuracy: 0.6820 - val_loss: 1.1623 - val_accuracy: 0.6894
Epoch 2/100
2368/2368 [==============================] - 1193s 504ms/step - loss: 0.9459 - accuracy: 0.7444 - val_loss: 1.1549 - val_accuracy: 0.6872
Epoch 3/100
2368/2368 [==============================] - 1143s 482ms/step - loss: 0.7848 - accuracy: 0.7838 - val_loss: 1.0402 - val_accuracy: 0.7142
Epoch 4/100
2368/2368 [==============================] - 1127s 476ms/step - loss: 0.6599 - accuracy: 0.8149 - val_loss: 0.9599 - val_accuracy: 0.7373
```
*Example fine-tuning time for non-prefetched data as well as non-mixed precision training (~2.5-3x longer per epoch).*
Let's make sure we save our model before we start evaluating it.
```
# # Save model to Google Drive (optional)
# loaded_gs_model.save("/content/drive/MyDrive/tensorflow_course/food_vision/07_efficientnetb0_fine_tuned_101_classes_mixed_precision/")
# Save model locally (note: if you're using Google Colab and you save your model locally, it will be deleted when your Google Colab session ends)
loaded_gs_model.save("07_efficientnetb0_fine_tuned_101_classes_mixed_precision")
```
Looks like our model has gained a few performance points from fine-tuning, let's evaluate on the whole test dataset and see if managed to beat the [DeepFood paper's](https://arxiv.org/abs/1606.05675) result of 77.4% accuracy.
```
# Evaluate mixed precision trained loaded model
results_loaded_gs_model_fine_tuned = loaded_gs_model.evaluate(test_data)
results_loaded_gs_model_fine_tuned
```
Woohoo!!!! It looks like our model beat the results mentioned in the DeepFood paper for Food101 (DeepFood's 77.4% top-1 accuracy versus our ~79% top-1 accuracy).
## Download fine-tuned model from Google Storage
As mentioned before, training models can take a significant amount of time.
And again, like any good cooking show, here's something we prepared earlier...
It's a fine-tuned model exactly like the one we trained above but it's saved to Google Storage so it can be accessed, imported and evaluated.
```
# Download and evaluate fine-tuned model from Google Storage
!wget https://storage.googleapis.com/ztm_tf_course/food_vision/07_efficientnetb0_fine_tuned_101_classes_mixed_precision.zip
```
The downloaded model comes in zip format (`.zip`) so we'll unzip it into the Google Colab instance.
```
# Unzip fine-tuned model
!mkdir downloaded_fine_tuned_gs_model # create separate directory for fine-tuned model downloaded from Google Storage
!unzip 07_efficientnetb0_fine_tuned_101_classes_mixed_precision -d downloaded_fine_tuned_gs_model
```
Now we can load it using the [`tf.keras.models.load_model()`](https://www.tensorflow.org/tutorials/keras/save_and_load) method and get a summary (it should be the exact same as the model we created above).
```
# Load in fine-tuned model from Google Storage and evaluate
loaded_fine_tuned_gs_model = tf.keras.models.load_model("downloaded_fine_tuned_gs_model/07_efficientnetb0_fine_tuned_101_classes_mixed_precision")
# Get a model summary (same model architecture as above)
loaded_fine_tuned_gs_model.summary()
```
Finally, we can evaluate our model on the test data (this requires the `test_data` variable to be loaded.
```
# Note: Even if you're loading in the model from Google Storage, you will still need to load the test_data variable for this cell to work
results_downloaded_fine_tuned_gs_model = loaded_fine_tuned_gs_model.evaluate(test_data)
results_downloaded_fine_tuned_gs_model
```
Excellent! Our saved model is performing as expected (better results than the DeepFood paper!).
Congrautlations! You should be excited! You just trained a computer vision model with competitive performance to a research paper and in far less time (our model took ~20 minutes to train versus DeepFood's quoted 2-3 days).
In other words, you brought Food Vision life!
If you really wanted to step things up, you could try using the [`EfficientNetB4`](https://www.tensorflow.org/api_docs/python/tf/keras/applications/EfficientNetB4) model (a larger version of `EfficientNetB0`). At at the time of writing, the EfficientNet family has the [state of the art classification results](https://paperswithcode.com/sota/fine-grained-image-classification-on-food-101) on the Food101 dataset.
> 📖 **Resource:** To see which models are currently performing the best on a given dataset or problem type as well as the latest trending machine learning research, be sure to check out [paperswithcode.com](http://paperswithcode.com/) and [sotabench.com](https://sotabench.com/).
## View training results on TensorBoard
Since we tracked our model's fine-tuning training logs using the `TensorBoard` callback, let's upload them and inspect them on TensorBoard.dev.
```
# !tensorboard dev upload --logdir ./training_logs \
# --name "Fine-tuning EfficientNetB0 on all Food101 Data" \
# --description "Training results for fine-tuning EfficientNetB0 on Food101 Data with learning rate 0.0001" \
```
Viewing at our [model's training curves on TensorBoard.dev](https://tensorboard.dev/experiment/2KINdYxgSgW2bUg7dIvevw/), it looks like our fine-tuning model gains boost in performance but starts to overfit as training goes on.
See the training curves on TensorBoard.dev here: https://tensorboard.dev/experiment/2KINdYxgSgW2bUg7dIvevw/
To fix this, in future experiments, we might try things like:
* A different iteration of `EfficientNet` (e.g. `EfficientNetB4` instead of `EfficientNetB0`).
* Unfreezing less layers of the base model and training them rather than unfreezing the whole base model in one go.
You can also view and delete past experiments on TensorBoard.dev with the following commands.
```
# View past TensorBoard experiments
# !tensorboard dev list
# Delete past TensorBoard experiments
# !tensorboard dev delete --experiment_id YOUR_EXPERIMENT_ID
# Example
# !tensorboard dev delete --experiment_id OAE6KXizQZKQxDiqI3cnUQ
```
## Exercises
1. Use the same evaluation techniques on the large-scale Food Vision model as you did in the previous notebook ([Transfer Learning Part 3: Scaling up](https://github.com/mrdbourke/tensorflow-deep-learning/blob/main/06_transfer_learning_in_tensorflow_part_3_scaling_up.ipynb)). More specifically, it would be good to see:
* A confusion matrix between all of the model's predictions and true labels.
* A graph showing the f1-scores of each class.
* A visualization of the model making predictions on various images and comparing the predictions to the ground truth.
* For example, plot a sample image from the test dataset and have the title of the plot show the prediction, the prediction probability and the ground truth label.
2. Take 3 of your own photos of food and use the Food Vision model to make predictions on them. How does it go? Share your images/predictions with the other students.
3. Retrain the model (feature extraction and fine-tuning) we trained in this notebook, except this time use [`EfficientNetB4`](https://www.tensorflow.org/api_docs/python/tf/keras/applications/EfficientNetB4) as the base model instead of `EfficientNetB0`. Do you notice an improvement in performance? Does it take longer to train? Are there any tradeoffs to consider?
4. Name one important benefit of mixed precision training, how does this benefit take place?
## Extra-curriculum
* Read up on learning rate scheduling and the [learning rate scheduler callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/LearningRateScheduler). What is it? And how might it be helpful to this project?
* Read up on TensorFlow data loaders ([improving TensorFlow data loading performance](https://www.tensorflow.org/guide/data_performance)). Is there anything we've missed? What methods you keep in mind whenever loading data in TensorFlow? Hint: check the summary at the bottom of the page for a gret round up of ideas.
* Read up on the documentation for [TensorFlow mixed precision training](https://www.tensorflow.org/guide/mixed_precision). What are the important things to keep in mind when using mixed precision training?
| github_jupyter |
# CPSC 330 hw7
```
import numpy as np
import pandas as pd
### BEGIN SOLUTION
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OrdinalEncoder, OneHotEncoder
from sklearn.linear_model import Ridge
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.metrics import r2_score
### END SOLUTION
```
## Instructions
rubric={points:5}
Follow the [homework submission instructions](https://github.students.cs.ubc.ca/cpsc330-2019w-t2/home/blob/master/docs/homework_instructions.md).
## Exercise 1: time series prediction
In this exercise we'll be looking at a [dataset of avocado prices](https://www.kaggle.com/neuromusic/avocado-prices). You should start by downloading the dataset. As usual, please do not commit it to your repos.
```
df = pd.read_csv("avocado.csv", parse_dates=["Date"], index_col=0)
df.head()
df.shape
df["Date"].min()
df["Date"].max()
```
It looks like the data ranges from the start of 2015 to March 2018 (~2 years ago), for a total of 3.25 years or so. Let's split the data so that we have a 6 months of test data.
```
split_date = '20170925'
df_train = df[df["Date"] <= split_date]
df_test = df[df["Date"] > split_date]
assert len(df_train) + len(df_test) == len(df)
```
#### 1(a)
rubric={points:3}
In the Rain is Australia dataset from Lecture 16, we had different measurements for each Location. What about this dataset: for which categorical feature(s), if any, do we have separate measurements? Justify your answer by referencing the dataset.
### BEGIN SOLUTION
```
df.sort_values(by="Date").head()
```
From the above, we definitely see measurements on the same day at different regresion. Let's now group by region.
```
df.sort_values(by=["region", "Date"]).head()
```
From the above we see that, even in Albany, we have two measurements on the same date. This seems to be due to the type of avocado.
```
df.sort_values(by=["region", "type", "Date"]).head()
```
Great, now we have a sequence of dates with a single row per date. So, the answer is that we have a separate timeseries for each combination of `region` and `type`.
### END SOLUTION
#### 1(b)
rubric={points:3}
In the Rain in Australia dataset, the measurements were generally equally spaced but with some exceptions. How about with this dataset? Justify your answer by referencing the dataset.
### BEGIN SOLUTION
I think it's not unreasonable to do this on `df` rather than `df_train`, but either way is fine.
```
for name, group in df.groupby(['region', 'type']):
print("%-40s %s" % (name, group["Date"].sort_values().diff().min()))
for name, group in df.groupby(['region', 'type']):
print("%-40s %s" % (name, group["Date"].sort_values().diff().max()))
```
It looks almost perfect - just organic avocados in WestTexNewMexico seems to be missing a couple measurements.
```
name
group["Date"].sort_values().diff().value_counts()
```
So, in one case there's a 2-week jump, and in one cast there's a 3-week jump.
```
group["Date"].sort_values().reset_index(drop=True).diff().sort_values()
```
We can see the anomalies occur at index 48 and 127. (Note: I had to `reset_index` because the index was not unique to each row.)
```
group["Date"].sort_values().reset_index(drop=True)[45:50]
```
We can spot the first anomaly: a 2-week jump from Nov 29, 2015 to Dec 13, 2015.
```
group["Date"].sort_values().reset_index(drop=True)[125:130]
```
And we can spot the second anomaly: a 3-week jump from June 11, 2017 to July 2, 2017.
### END SOLUTION
#### 1(c)
rubric={points:1}
In the Rain is Australia dataset, each location was a different place in Australia. For this dataset, look at the names of the regions. Do you think the regions are also all distinct, or are there overlapping regions? Justify your answer by referencing the data.
### BEGIN SOLUTION
```
df["region"].unique()
```
There seems to be a hierarchical structure here: `TotalUS` is split into bigger regions like `West`, `Southeast`, `Northeast`, `Midsouth`; and `California` is split into cities like `Sacramento`, `SanDiego`, `LosAngeles`. It's a bit hard to figure out what's going on.
```
df.query("region == 'TotalUS' and type == 'conventional' and Date == '20150104'")["Total Volume"].values[0]
df.query("region != 'TotalUS' and type == 'conventional' and Date == '20150104'")["Total Volume"].sum()
```
Since the individual regions sum up to more than the total US, it seems that some of the other regions are double-counted, which is consistent with a hierarchical structure. For example, Los Angeles is probalby double counted because it's within `LosAngeles` but also within `California`. What a mess!
### END SOLUTION
We will use the entire dataset despite any location-based weirdness uncovered in the previous part.
We will be trying to forecast the avocado price, which is the `AveragePrice` column. The function below is adapted from Lecture 16, with some improvements.
```
def create_lag_feature(df, orig_feature, lag, groupby, new_feature_name=None, clip=False):
"""
Creates a new feature that's a lagged version of an existing one.
NOTE: assumes df is already sorted by the time columns and has unique indices.
Parameters
----------
df : pandas.core.frame.DataFrame
The dataset.
orig_feature : str
The column name of the feature we're copying
lag : int
The lag; negative lag means values from the past, positive lag means values from the future
groupby : list
Column(s) to group by in case df contains multiple time series
new_feature_name : str
Override the default name of the newly created column
clip : bool
If True, remove rows with a NaN values for the new feature
Returns
-------
pandas.core.frame.DataFrame
A new dataframe with the additional column added.
"""
if new_feature_name is None:
if lag < 0:
new_feature_name = "%s_lag%d" % (orig_feature, -lag)
else:
new_feature_name = "%s_ahead%d" % (orig_feature, lag)
new_df = df.assign(**{new_feature_name : np.nan})
for name, group in new_df.groupby(groupby):
if lag < 0: # take values from the past
new_df.loc[group.index[-lag:],new_feature_name] = group.iloc[:lag][orig_feature].values
else: # take values from the future
new_df.loc[group.index[:-lag], new_feature_name] = group.iloc[lag:][orig_feature].values
if clip:
new_df = new_df.dropna(subset=[new_feature_name])
return new_df
```
We first sort our dataframe properly:
```
df_sort = df.sort_values(by=["region", "type", "Date"]).reset_index(drop=True)
df_sort
```
We then call `create_lag_feature`. This creates a new column in the dataset `AveragePriceNextWeek`, which is the following week's `AveragePrice`. We have set `clip=True` which means it will remove rows where the target would be missing.
```
df_hastarget = create_lag_feature(df_sort, "AveragePrice", +1, ["region", "type"], "AveragePriceNextWeek", clip=True)
df_hastarget
```
I will now split the data:
```
df_train = df_hastarget[df_hastarget["Date"] <= split_date]
df_test = df_hastarget[df_hastarget["Date"] > split_date]
```
#### 1(d)
rubric={points:1}
Why was it reasonable for me to do this operation _before_ splitting the data, despite the fact that this usually constitutes a violation of the Golden Rule?
### BEGIN SOLUTION
Because we were only looking at the dates and creating the future feature. The difference is that the very last time point in our training set now contains the average price from the first time point in our test set. This is a realistic scenario if we wre actually using this model to forecast, so it's not a major concern.
### END SOLUTION
#### 1(e)
rubric={points:1}
Next we will want to build some models to forecast the average avocado price a week in advance. Before we start with any ML, let's try a baseline: just predicting the previous week's `AveragePrice`. What $R^2$ do you get with this approach?
### BEGIN SOLUTION
```
r2_score(df_train["AveragePriceNextWeek"], df_train["AveragePrice"])
r2_score(df_test["AveragePriceNextWeek"], df_test["AveragePrice"])
```
Interesting that this is a less effective prediction strategy in the later part of the dataset. I guess that means the price was fluctuating more in late 2017 / early 2018?
### END SOLUTION
#### 1(f)
rubric={points:10}
Build some models to forecast the average avocado price. Experiment with a few approachs for encoding the date. Justify the decisions you make. Which approach worked best? Report your test score and briefly discuss your results.
Benchmark: you should be able to achieve $R^2$ of at least 0.79 on the test set. I got to 0.80, but not beyond that. Let me know if you do better!
Note: because we only have 2 splits here, we need to be a bit wary of overfitting on the test set. Try not to test on it a ridiculous number of times. If you are interested in some proper ways of dealing with this, see for example sklearn's [TimeSeriesSplit](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.TimeSeriesSplit.html), which is like cross-validation for time series data.
### BEGIN SOLUTION
```
df_train.head()
(df_train.loc[:, "Small Bags": "XLarge Bags"].sum(axis=1) - df_train["Total Bags"]).abs().max()
```
It seems that `Total Bags` is (approximately) the sum of the other 3 bag features, so I will drop `Total Bags`.
```
(df_train.loc[:, "4046": "4770"].sum(axis=1) - df_train["Total Volume"]).abs().max()
```
It seems that `Total Volume` is _not_ the sum of the 3 avocado types, so I will keep all 4 columns.
```
df_train.info()
```
It seems there are no null values, so I will not do any imputation.
Will plot a single time series for exploration purposes:
```
df_train.query("region == 'TotalUS'").set_index("Date").groupby("type")["AveragePrice"].plot(legend=True);
df_train.query("region == 'TotalUS' and type == 'conventional'").plot(x="Date", y="Total Volume");
```
We see some seasonality in the total volume, but not much in the average price - interesting.
I will not scale the `AveragePrice` because I am not scaling `AveragePriceNextWeek` either, and it may be helpful to keep them the same. Alternatively, it may have been effective to predict the _change_ in price instead of next's week's price.
```
numeric_features = ["Total Volume", "4046", "4225", "4770", "Small Bags", "Large Bags", "XLarge Bags", "year"]
categorical_features = ["type", "region"]
keep_features = ["AveragePrice"]
drop_features = ["Date", "Total Bags"]
target_feature = "AveragePriceNextWeek"
```
Next, I grab the `preprocess_features` function from Lecture 16, with a minor modification to allow un-transformed features via `keep_features`:
```
def preprocess_features(df_train, df_test,
numeric_features,
categorical_features,
keep_features,
drop_features,
target_feature):
all_features = numeric_features + categorical_features + keep_features + drop_features + [target_feature]
if set(df_train.columns) != set(all_features):
print("Missing columns", set(df_train.columns) - set(all_features))
print("Extra columns", set(all_features) - set(df_train.columns))
raise Exception("Columns do not match")
# Put the columns in the order we want
df_train = df_train[all_features]
df_test = df_test[all_features]
numeric_transformer = Pipeline([
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())
])
categorical_transformer = Pipeline([
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(sparse=False, drop='first'))
])
preprocessor = ColumnTransformer([
('numeric', numeric_transformer, numeric_features),
('categorical', categorical_transformer, categorical_features)
], remainder='passthrough')
preprocessor.fit(df_train);
if len(categorical_features) > 0:
ohe = preprocessor.named_transformers_['categorical'].named_steps['onehot']
ohe_feature_names = list(ohe.get_feature_names(categorical_features))
new_columns = numeric_features + ohe_feature_names + keep_features + drop_features + [target_feature]
else:
new_columns = all_features
X_train_enc = pd.DataFrame(preprocessor.transform(df_train), index=df_train.index, columns=new_columns)
X_test_enc = pd.DataFrame(preprocessor.transform(df_test), index=df_test.index, columns=new_columns)
X_train_enc = X_train_enc.drop(columns=drop_features + [target_feature])
X_test_enc = X_test_enc.drop( columns=drop_features + [target_feature])
y_train = df_train[target_feature]
y_test = df_test[ target_feature]
return X_train_enc, y_train, X_test_enc, y_test
df_train_enc, y_train, df_test_enc, y_test = preprocess_features(df_train, df_test,
numeric_features,
categorical_features,
keep_features,
drop_features,
target_feature)
df_train_enc.head()
lr = Ridge()
lr.fit(df_train_enc, y_train);
lr.score(df_train_enc, y_train)
lr.score(df_test_enc, y_test)
lr_coef = pd.DataFrame(data=np.squeeze(lr.coef_), index=df_train_enc.columns, columns=["Coef"])
lr_coef.sort_values(by="Coef", ascending=False)
```
This is not a very impressive showing. We're doing almost the same as the baseline.
Let's see if encoding the date helps at all. We'll try to OHE the month.
```
df_train_month = df_train.assign(Month=df_train["Date"].apply(lambda x: x.month))
df_test_month = df_test.assign( Month=df_test[ "Date"].apply(lambda x: x.month))
df_train_month_enc, y_train, df_test_month_enc, y_test = preprocess_features(df_train_month, df_test_month,
numeric_features,
categorical_features + ["Month"],
keep_features,
drop_features,
target_feature)
df_train_month_enc.head()
lr = Ridge()
lr.fit(df_train_month_enc, y_train);
lr.score(df_train_month_enc, y_train)
lr.score(df_test_month_enc, y_test)
```
A tiny bit better.
```
pd.DataFrame(data=np.squeeze(lr.coef_), index=df_train_month_enc.columns, columns=["Coef"]).sort_values(by="Coef", ascending=False)
```
Let's add some lag features. I'm arbitrarily deciding on 4 lags for `AveragePrice` (the most important feature).
```
def add_lags(df):
df = create_lag_feature(df, "AveragePrice", -1, ["region", "type"])
df = create_lag_feature(df, "AveragePrice", -2, ["region", "type"])
df = create_lag_feature(df, "AveragePrice", -3, ["region", "type"])
df = create_lag_feature(df, "AveragePrice", -4, ["region", "type"])
return df
df_train_month_lag = add_lags(df_train_month)
df_test_month_lag = add_lags(df_test_month)
df_train_month_lag
df_train_month_lag_enc, y_train, df_test_month_lag_enc, y_test = preprocess_features(df_train_month_lag, df_test_month_lag,
numeric_features + ["AveragePrice_lag1", "AveragePrice_lag2", "AveragePrice_lag3", "AveragePrice_lag4"],
categorical_features + ["Month"],
keep_features,
drop_features,
target_feature)
lr = Ridge()
lr.fit(df_train_month_lag_enc, y_train);
lr.score(df_train_month_lag_enc, y_train)
lr.score(df_test_month_lag_enc, y_test)
```
This did not seem to help.
```
pd.DataFrame(data=np.squeeze(lr.coef_), index=df_train_month_lag_enc.columns, columns=["Coef"]).sort_values(by="Coef", ascending=False)
```
We can also try a random forest:
```
rf = RandomForestRegressor()
rf.fit(df_train_month_lag_enc, y_train);
rf.score(df_train_month_lag_enc, y_train)
rf.score(df_test_month_lag_enc, y_test)
```
For the random forest it may be helpful to model the difference between today and tomorrow. The linear model does not care about this because it just corresponds to changing the coefficient corresponding to `AveragePrice` by 1, but for the random forest it may help:
```
rf = RandomForestRegressor()
rf.fit(df_train_month_lag_enc, y_train - df_train_month_lag_enc["AveragePrice"]);
r2_score(y_train, rf.predict(df_train_month_lag_enc) + df_train_month_lag_enc["AveragePrice"])
r2_score(y_test, rf.predict(df_test_month_lag_enc) + df_test_month_lag_enc["AveragePrice"])
```
This massively overfits when we do this shifting. Let's try a simpler model...
```
rf = RandomForestRegressor(max_depth=8)
rf.fit(df_train_month_lag_enc, y_train - df_train_month_lag_enc["AveragePrice"]);
r2_score(y_train, rf.predict(df_train_month_lag_enc) + df_train_month_lag_enc["AveragePrice"])
r2_score(y_test, rf.predict(df_test_month_lag_enc) + df_test_month_lag_enc["AveragePrice"])
```
Doesn't realy help.
Also, we can just confirm that this shifting has no effect on the linear model (well, a small effect because it's `Ridge` instead of `LinearRegression`, but small):
```
lr = Ridge()
lr.fit(df_train_month_lag_enc, y_train - df_train_month_lag_enc["AveragePrice"]);
r2_score(y_train, lr.predict(df_train_month_lag_enc) + df_train_month_lag_enc["AveragePrice"])
r2_score(y_test, lr.predict(df_test_month_lag_enc) + df_test_month_lag_enc["AveragePrice"])
```
Indeed, this is essentially the same score we had before.
Overall, adding the month helped, but adding the lagged price was surprisingly unhelpful. Perhaps lagged version of other features would have been better, or other representations of the time of year, or dealing with the regions and avocado types a bit more carefully.
### END SOLUTION
#### 1(g)
rubric={points:3}
We talked a little bit about _seasonality_, which is the idea of a periodic component to the time series. For example, in Lecture 16 we attempted to capture this by encoding the month. Something we didn't discuss is _trends_, which are long-term variations in the quantity of interest. Aside from the effects of climate change, the amount of rain in Australia is likely to vary during the year but less likely to have long-term trends over the years. Avocado prices, on the other hand, could easily exhibit trends: for example avocados may just cost more in 2020 than they did in 2015.
Briefly discuss in ~1 paragraph: to what extent, if any, was your model above able to account for seasonality? What about trends?
### BEGIN SOLUTION
I tried to take seasonality into account by having the month as an OHE variable. As far as trends are concerned, the year is also a numeric variable in the model, so it could learn that the price in 2017 is higher than in 2015, say. However, there are very few years in the training set (2015, 16, 17), so that is not a lot of data to learn from. Perhaps including the number of months since the start of the dataset, or something like that, would enable the model to do a bit better with trends. Nonetheless, extrapolating is very hard so we can't necessarily trust our models' handing of trend.
```
pd.DataFrame(data=np.squeeze(lr.coef_), index=df_train_month_lag_enc.columns, columns=["Coef"]).loc["year"]
```
It seems that our linear model learned a small positive trend for the year. It would be cool to use SHAP and see what the random forest is doing.
### END SOLUTION
## Exercise 2: very short answer questions
Each question is worth 2 points.
#### 2(a)
rubric={points:4}
The following questions pertain to Lecture 16 on time series data:
1. Sometimes a time series has missing time points or, worse, time points that are unequally spaced in general. Give an example of a real world situation where the time series data would have unequally spaced time points.
2. In class we discussed two approaches to using temporal information: encoding the date as one or more features, and creating lagged versions of features. Which of these (one/other/both/neither) two approaches would struggle with unequally spaced time points? Briefly justify your answer.
### BEGIN SOLUTION
1. Many many examples: credit card transactions, log files, basically any situation where the frequency of the measurements could not be chosen by the person taking the measurements.
2. Encoding the date as, e.g. OHE month works just fine with unequally spaced points. However, the lag features are more problematic, because the "previous" measurement will be a different length of time away in each case.
### END SOLUTION
#### 2(b)
rubric={points:10}
The following questions pertain to Lecture 17 on survival analysis. We'll consider the use case of customer churn analysis.
1. What is the problem with simply labeling customers are "churned" or "not churned" and using standard supervised learning techniques, as we did in hw4?
2. Consider customer A who just joined last week vs. customer B who has been with the service for a year. Who do you expect will leave the service first: probably customer A, probably customer B, or we don't have enough information to answer? (This is a bit tricky - it's OK if you don't know the answer, but try to argue your case.)
3. One of the true/false questions from class was: "If a customer is censored after 5 months with the service, then all customers are censored after 5 months (i.e. no values of `tenure` above 5)." What is the answer if all customers joined the service at the same time? Briefly explain.
4. One of the true/false questions from class was: "If a customer is censored after 5 months with the service, then all customers are censored after 5 months (i.e. no values of `tenure` above 5)." What is the answer if customers did not necessarily join the service at the same time? Briefly explain.
5. If a customer's survival function is almost flat during a certain period, how do we interpret that?
### BEGIN SOLUTION
1. The "not churned" are censored - we don't know if they will churn shortly or in a long time. These people have the same label and our model will be impacted negatively.
2. Not enough information - it depends! Imagine a subscription service where you have to pay a starter fee after a month and then pay a huge fee after a year. Well, customer B just paid that huge fee and will probably stay a while, whereas customer A may leave before paying the huge fee, so customer A will probably leave first. But imagine a service where people are more and more likely to leave every day, e.g. a movie service with only 100 movies, so you can run out easily. In that case customer B will probably leave first.
3. True. If all started at the same time, and a customer is censored after 5 months, that means they all started 5 months ago and are all censored after 5 months.
4. False. That particular customer started 5 months ago, but you may have another customer who started much longer ago.
5. The customer is very unlikely to leave during that period.
### END SOLUTION
#### 2(c)
rubric={points:10}
The following questions pertain to Lecture 18 on clustering.
1. What's the main difference between unsupervised and supervised learning?
2. When choosing $k$ in $k$-means, why not just choose the $k$ that leads to the smallest inertia (sum of squared distances within clusters)?
3. You decide to use clustering for _outlier detection_; that is, to detect instances that are very atypical compared to all the rest. How might you do this with $k$-means?
4. You decide to use clustering for _outlier detection_; that is, to detect instances that are very atypical compared to all the rest. How might you do this with DBSCAN?
5. For hierarchical clustering, we briefly discussed a few different methods for merging clusters: single linkage, average linkage, etc. Why do we have this added complication here - can't we just minimize distance like we did with $k$-means?
### BEGIN SOLUTION
1. Supervised has target values ($y$), unsupervised does not.
2. Because inertia decreases with $k$, so you'd just choose $k=n$, which is not interesting.
3. Look for examples that are very far away from their cluster mean.
4. Look for examples that were not assigned to any cluster.
5. With $k$-means we had to find the distance between a point and a cluster mean. Here, we need to find the distance between two clusters, and, importantly, we have no cluster means. So it's ambiguous how to definite distance between two clusters.
### END SOLUTION
| github_jupyter |
```
import pandas as pd
df = pd.read_csv('../data/data-binary.csv')
df.dtypes
from sklearn.linear_model import LogisticRegression
def get_model(df, X_cols, y_col, solver='liblinear', penalty='l1', C=0.2):
X = df[X_cols]
y = df[y_col]
model = LogisticRegression(penalty=penalty, solver=solver, C=C)
model.fit(X, y)
return model
def extract_model_params(y, fields, model):
child = {'child': y}
intercepts = {'intercept': model.intercept_[0]}
coefs = {field: coef for field, coef in zip(fields, model.coef_[0])}
others = {field: 0.0 for field in fields[len(coefs):]}
p = {**child, **intercepts}
p = {**p, **coefs}
p = {**p, **others}
return p
args = [(list(df.columns[0:index]), y) for index, y in enumerate(df.columns) if index > 0]
models = [(y_col, get_model(df, X_cols, y_col)) for X_cols, y_col in args]
param_df = pd.DataFrame([extract_model_params(y, df.columns, model) for y, model in models])
param_df
import networkx as nx
from networkx.algorithms.dag import is_directed_acyclic_graph
from itertools import chain
def get_structure(param_df, threshold=0.0):
def get_edges(r, nodes):
edges = []
ch = r['child']
for pa in nodes:
if pa == ch:
break
if abs(r[pa]) > threshold:
edge = (pa, ch)
edges.append(edge)
return edges
nodes = [v for v in param_df.columns if v not in ['child', 'intercept']]
edges = list(chain(*[get_edges(r, nodes) for _, r in param_df.iterrows()]))
g = nx.DiGraph()
for n in nodes:
g.add_node(n)
for edge in edges:
g.add_edge(*edge)
if not is_directed_acyclic_graph(g):
g.remove_edge(*edge)
return g
g = get_structure(param_df)
print(g.nodes())
print(g.edges())
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(5, 5))
nx.draw(g, with_labels=True, font_weight='bold', node_color='r', ax=ax)
ddf = df.copy(deep=True)
for col in ddf.columns:
ddf[col] = ddf[col].astype(str)
ddf.dtypes
from itertools import combinations
import numpy as np
def get_parameters(ddf, g):
def get_filters(ch, parents, domains):
pas = parents[ch]
if len(pas) == 0:
ch_domain = domains[ch]
return [f'{ch}=="{v}"' for v in ch_domain]
else:
vals = [[(pa, v) for v in domains[pa]] for pa in pas]
vals = vals + [[(ch, v) for v in domains[ch]]]
vals = chain(*vals)
vals = combinations(vals, len(pas) + 1)
vals = filter(lambda tups: tups[0][0] != tups[1][0] and tups[0][0] != tups[2][0] and tups[1][0] != tups[2][0], vals)
vals = map(lambda tups: ' and '.join([f'{t[0]}=="{t[1]}"' for t in tups]), vals)
vals = list(vals)
return vals
def get_total(filters, n):
counts = [ddf.query(f).shape[0] for f in filters]
counts = [counts[i:i + n] for i in range(0, len(counts), n)]
counts = [list(np.array(arr) / sum(arr)) for arr in counts]
counts = list(chain(*counts))
return counts
nodes = list(g.nodes())
domains = {n: sorted(list(ddf[n].unique())) for n in nodes}
parents = {ch: list(g.predecessors(ch)) for ch in nodes}
return {ch: get_total(get_filters(ch, parents, domains), len(domains[ch])) for ch in nodes}
get_parameters(ddf, g)
```
| github_jupyter |
# Candlestick Hanging Man
https://www.investopedia.com/articles/active-trading/040914/understanding-hanging-man-optimistic-candlestick-pattern.asp
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import talib
import warnings
warnings.filterwarnings("ignore")
# yahoo finance is used to fetch data
import yfinance as yf
yf.pdr_override()
# input
symbol = 'AMD'
start = '2018-01-01'
end = '2021-10-08'
# Read data
df = yf.download(symbol,start,end)
# View Columns
df.head()
```
## Candlestick with Hanging Man
```
from matplotlib import dates as mdates
import datetime as dt
dfc = df.copy()
dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']
#dfc = dfc.dropna()
dfc = dfc.reset_index()
dfc['Date'] = pd.to_datetime(dfc['Date'])
dfc['Date'] = dfc['Date'].apply(mdates.date2num)
dfc.head()
from mplfinance.original_flavor import candlestick_ohlc
fig = plt.figure(figsize=(14,10))
ax = plt.subplot(2, 1, 1)
candlestick_ohlc(ax,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
ax.grid(True, which='both')
ax.minorticks_on()
axv = ax.twinx()
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
axv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
axv.axes.yaxis.set_ticklabels([])
axv.set_ylim(0, 3*df.Volume.max())
ax.set_title('Stock '+ symbol +' Closing Price')
ax.set_ylabel('Price')
hanging_man = talib.CDLHANGINGMAN(df['Open'], df['High'], df['Low'], df['Close'])
hanging_man = hanging_man[hanging_man != 0]
df['hanging_man'] = talib.CDLHANGINGMAN(df['Open'], df['High'], df['Low'], df['Close'])
df.loc[df['hanging_man'] !=0]
df['Adj Close'].loc[df['hanging_man'] !=0]
df['Adj Close'].loc[df['hanging_man'] !=0].index
hanging_man
hanging_man.index
df
fig = plt.figure(figsize=(20,16))
ax = plt.subplot(2, 1, 1)
candlestick_ohlc(ax,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
ax.grid(True, which='both')
ax.minorticks_on()
axv = ax.twinx()
ax.plot_date(df['Adj Close'].loc[df['hanging_man'] !=0].index, df['Adj Close'].loc[df['hanging_man'] !=0],
'Dc', # marker style 'o', color 'g'
fillstyle='none', # circle is not filled (with color)
ms=10.0)
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
axv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
axv.axes.yaxis.set_ticklabels([])
axv.set_ylim(0, 3*df.Volume.max())
ax.set_title('Stock '+ symbol +' Closing Price')
ax.set_ylabel('Price')
```
## Plot Certain dates
```
df = df['2021-07-01':'2021-08-01']
dfc = df.copy()
dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']
#dfc = dfc.dropna()
dfc = dfc.reset_index()
dfc['Date'] = pd.to_datetime(dfc['Date'])
dfc['Date'] = dfc['Date'].apply(mdates.date2num)
dfc.head()
fig = plt.figure(figsize=(20,16))
ax = plt.subplot(2, 1, 1)
ax.set_facecolor('black')
candlestick_ohlc(ax,dfc.values, width=0.5, colorup='tan', colordown='gold', alpha=1.0)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
#ax.grid(True, which='both')
#ax.minorticks_on()
axv = ax.twinx()
ax.plot_date(df['Adj Close'].loc[df['hanging_man'] !=0].index, df['Adj Close'].loc[df['hanging_man'] !=0],
'dr', # marker style 'o', color 'g'
fillstyle='none', # circle is not filled (with color)
ms=20.0)
colors = dfc.VolumePositive.map({True: 'tan', False: 'gold'})
axv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
axv.axes.yaxis.set_ticklabels([])
axv.set_ylim(0, 3*df.Volume.max())
ax.set_title('Stock '+ symbol +' Closing Price')
ax.set_ylabel('Price')
```
# Highlight Candlestick
```
from matplotlib.dates import date2num
from datetime import datetime
fig = plt.figure(figsize=(20,16))
ax = plt.subplot(2, 1, 1)
candlestick_ohlc(ax,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
#ax.grid(True, which='both')
#ax.minorticks_on()
axv = ax.twinx()
ax.axvspan(date2num(datetime(2021,7,19)), date2num(datetime(2021,7,21)),
label="Hanging Man Bearish",color="red", alpha=0.3)
ax.legend()
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
axv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
axv.axes.yaxis.set_ticklabels([])
axv.set_ylim(0, 3*df.Volume.max())
ax.set_title('Stock '+ symbol +' Closing Price')
ax.set_ylabel('Price')
```
| github_jupyter |
# Train an MNIST model with TensorFlow
MNIST is a widely-used dataset for handwritten digit classification. It consists of 70,000 labeled 28x28 pixel grayscale images of hand-written digits. The dataset is split into 60,000 training images and 10,000 test images. There are 10 classes (one for each of the 10 digits). This tutorial will show how to train a TensorFlow V2 model on MNIST model on SageMaker.
## Runtime
This notebook takes approximately 5 minutes to run.
## Contents
1. [TensorFlow Estimator](#TensorFlow-Estimator)
1. [Implement the training entry point](#Implement-the-training-entry-point)
1. [Set hyperparameters](#Set-hyperparameters)
1. [Set up channels for training and testing data](#Set-up-channels-for-training-and-testing-data)
1. [Run the training script on SageMaker](#Run-the-training-script-on-SageMaker)
1. [Inspect and store model data](#Inspect-and-store-model-data)
1. [Test and debug the entry point before running the training container](#Test-and-debug-the-entry-point-before-running-the-training-container)
```
import os
import json
import sagemaker
from sagemaker.tensorflow import TensorFlow
from sagemaker import get_execution_role
sess = sagemaker.Session()
role = get_execution_role()
output_path = "s3://" + sess.default_bucket() + "/DEMO-tensorflow/mnist"
```
## TensorFlow Estimator
The `TensorFlow` class allows you to run your training script on SageMaker
infrastracture in a containerized environment. In this notebook, we
refer to this container as the "training container."
Configure it with the following parameters to set up the environment:
- `entry_point`: A user-defined Python file used by the training container as the instructions for training. We will further discuss this file in the next subsection.
- `role`: An IAM role to make AWS service requests
- `instance_type`: The type of SageMaker instance to run your training script. Set it to `local` if you want to run the training job on the SageMaker instance you are using to run this notebook.
- `model_dir`: S3 bucket URI where the checkpoint data and models can be exported to during training (default: None).
To disable having model_dir passed to your training script, set `model_dir`=False
- `instance_count`: The number of instances to run your training job on. Multiple instances are needed for distributed training.
- `output_path`: the S3 bucket URI to save training output (model artifacts and output files).
- `framework_version`: The TensorFlow version to use.
- `py_version`: The Python version to use.
For more information, see the [EstimatorBase API reference](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html#sagemaker.estimator.EstimatorBase).
## Implement the training entry point
The entry point for training is a Python script that provides all
the code for training a TensorFlow model. It is used by the SageMaker
TensorFlow Estimator (`TensorFlow` class above) as the entry point for running the training job.
Under the hood, SageMaker TensorFlow Estimator downloads a docker image
with runtime environments
specified by the parameters to initiate the
estimator class and it injects the training script into the
docker image as the entry point to run the container.
In the rest of the notebook, we use *training image* to refer to the
docker image specified by the TensorFlow Estimator and *training container*
to refer to the container that runs the training image.
This means your training script is very similar to a training script
you might run outside Amazon SageMaker, but it can access the useful environment
variables provided by the training image. See [the complete list of environment variables](https://github.com/aws/sagemaker-training-toolkit/blob/master/ENVIRONMENT_VARIABLES.md) for a complete
description of all environment variables your training script
can access.
In this example, we use the training script `code/train.py`
as the entry point for our TensorFlow Estimator.
```
!pygmentize 'code/train.py'
```
## Set hyperparameters
In addition, the TensorFlow estimator allows you to parse command line arguments
to your training script via `hyperparameters`.
<span style="color:red"> Note: local mode is not supported in SageMaker Studio. </span>
```
# Set local_mode to be True if you want to run the training script on the machine that runs this notebook
local_mode = False
if local_mode:
instance_type = "local"
else:
instance_type = "ml.c4.xlarge"
est = TensorFlow(
entry_point="train.py",
source_dir="code", # directory of your training script
role=role,
framework_version="2.3.1",
model_dir=False, # don't pass --model_dir to your training script
py_version="py37",
instance_type=instance_type,
instance_count=1,
volume_size=250,
output_path=output_path,
hyperparameters={
"batch-size": 512,
"epochs": 1,
"learning-rate": 1e-3,
"beta_1": 0.9,
"beta_2": 0.999,
},
)
```
The training container runs your training script like:
```
python train.py --batch-size 32 --epochs 1 --learning-rate 0.001 --beta_1 0.9 --beta_2 0.999
```
## Set up channels for training and testing data
Tell `TensorFlow` estimator where to find the training and
testing data. It can be a path to an S3 bucket, or a path
in your local file system if you use local mode. In this example,
we download the MNIST data from a public S3 bucket and upload it
to your default bucket.
```
import logging
import boto3
from botocore.exceptions import ClientError
# Download training and testing data from a public S3 bucket
def download_from_s3(data_dir="./data", train=True):
"""Download MNIST dataset and convert it to numpy array
Args:
data_dir (str): directory to save the data
train (bool): download training set
Returns:
None
"""
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if train:
images_file = "train-images-idx3-ubyte.gz"
labels_file = "train-labels-idx1-ubyte.gz"
else:
images_file = "t10k-images-idx3-ubyte.gz"
labels_file = "t10k-labels-idx1-ubyte.gz"
# download objects
s3 = boto3.client("s3")
bucket = f"sagemaker-sample-files"
for obj in [images_file, labels_file]:
key = os.path.join("datasets/image/MNIST", obj)
dest = os.path.join(data_dir, obj)
if not os.path.exists(dest):
s3.download_file(bucket, key, dest)
return
download_from_s3("./data", True)
download_from_s3("./data", False)
# Upload to the default bucket
prefix = "DEMO-mnist"
bucket = sess.default_bucket()
loc = sess.upload_data(path="./data", bucket=bucket, key_prefix=prefix)
channels = {"training": loc, "testing": loc}
```
The keys of the `channels` dictionary are passed to the training image,
and it creates the environment variable `SM_CHANNEL_<key name>`.
In this example, `SM_CHANNEL_TRAINING` and `SM_CHANNEL_TESTING` are created in the training image (see
how `code/train.py` accesses these variables). For more information,
see: [SM_CHANNEL_{channel_name}](https://github.com/aws/sagemaker-training-toolkit/blob/master/ENVIRONMENT_VARIABLES.md#sm_channel_channel_name).
If you want, you can create a channel for validation:
```
channels = {
'training': train_data_loc,
'validation': val_data_loc,
'test': test_data_loc
}
```
You can then access this channel within your training script via
`SM_CHANNEL_VALIDATION`.
## Run the training script on SageMaker
Now, the training container has everything to run your training
script. Start the container by calling the `fit()` method.
```
est.fit(inputs=channels)
```
## Inspect and store model data
Now, the training is finished, and the model artifact has been saved in
the `output_path`.
```
tf_mnist_model_data = est.model_data
print("Model artifact saved at:\n", tf_mnist_model_data)
```
We store the variable `tf_mnist_model_data` in the current notebook kernel.
```
%store tf_mnist_model_data
```
## Test and debug the entry point before running the training container
The entry point `code/train.py` provided here has been tested and it can be runs in the training container.
When you develop your own training script, it is a good practice to simulate the container environment
in the local shell and test it before sending it to SageMaker, because debugging in a containerized environment
is rather cumbersome. The following script shows how you can test your training script:
```
!pygmentize code/test_train.py
```
## Conclusion
In this notebook, we trained a TensorFlow model on the MNIST dataset by fitting a SageMaker estimator. For next steps on how to deploy the trained model and perform inference, see [Deploy a Trained TensorFlow V2 Model](https://sagemaker-examples.readthedocs.io/en/latest/frameworks/tensorflow/get_started_mnist_deploy.html).
| github_jupyter |
# MASS Algorithm Tutorial
This notebook is a tutorial on how to use STUMPY and the MASS algorithm [1] to compute a **distance profile**, a vector containing all of the distances from a query subsequence to the subsequences of a time series.
In this tutorial we are going to reproduce one of the use case mentioned in this [presentation](https://www.cs.unm.edu/~mueen/Simple_Case_Studies_Using_MASS.pptx).
## Load Libraries and Data
```
%matplotlib inline
import numpy as np
from os import path
import pandas as pd
import matplotlib.pyplot as plt
import urllib.request
def change_plot_size(width, height, plt):
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = width
fig_size[1] = height
plt.rcParams["figure.figsize"] = fig_size
plt.rcParams['xtick.direction'] = 'out'
change_plot_size(20, 6, plt)
```
The use case dataset is the "Sony AIBO robot dog dataset" and comes from an accelerometer inside a Sony AIBO robot dog ([dataset source](https://www.cs.ucr.edu/~eamonn/time_series_data_2018/)). The query comes from a period when the dog was walking on carpet, the time series data we will search comes from a time the robot walked on cement (for 5000 data points), then carpet (for 3000 data points), then back onto cement.
```
#load SampleTarget (time series)
st_url = 'https://www.cs.unm.edu/~mueen/robot_dog.txt'
st_txt = urllib.request.urlopen(st_url)
sample_target = pd.read_csv(st_txt, sep="\s+", header = None)[0]
#load Pattern (query subsequence)
pattern_url = 'https://www.cs.unm.edu/~mueen/carpet_query.txt'
pattern_txt = urllib.request.urlopen(pattern_url)
pattern = pd.read_csv(pattern_txt, sep="\s+", header = None)[0]
pattern.shape, sample_target.shape
```
## Plot Data
```
plt.plot(sample_target)
plt.title('Robot Dog Accelerometer Sample')
plt.show()
plt.plot(pattern)
plt.title('Carpet Walk - Query Subsequence')
plt.show()
```
## Distance Profile
What we want to do is to perform a similarity search for our query subsequence (dog walking on the carpet). Thus, we search for a known pattern (the dog walking on the Carpet) in the entire time series and returns the most similar subsequences. To do so in Python, we can simply use the STUMPY function `core.mass`
```
from stumpy.core import mass
distance_profile = mass(pattern, sample_target)
#find k indexes pointing to k most similar subsequences
k = 16
min_idxs = np.argpartition(distance_profile, k)[:k]
min_idxs = min_idxs[np.argsort(distance_profile[min_idxs])]
#quick test
np.sort(distance_profile)[:k][0] == distance_profile[min_idxs][0]
#plot top k patterns
m = len(pattern)
plt.plot(sample_target, alpha=0.5)
plt.xlim((2000, 10000)) #limit x axis for better visualization
for i in range(k):
plt.plot(
list(range(min_idxs[i], (min_idxs[i] + m))),
sample_target[min_idxs[i] : (min_idxs[i] + m)],
c = np.random.rand(3,),
linewidth=3.0,
)
```
Note that the best matches all occur during the carpet walking period (5000 - 8000 range). The best match is plotted below together with the query subsequence to show their similarity.
```
#Plot Best Match
fig, axs = plt.subplots(2)
# pattern
axs[0].plot(pattern)
# best match
axs[1].plot(sample_target[min_idxs[0] : (min_idxs[0] + m)])
plt.show()
```
## References
[1] Abdullah Mueen, Yan Zhu, Michael Yeh, Kaveh Kamgar, Krishnamurthy Viswanathan, Chetan Kumar Gupta and Eamonn Keogh (2015), The Fastest Similarity Search Algorithm for Time Series Subsequences under Euclidean Distance, URL: http://www.cs.unm.edu/~mueen/FastestSimilaritySearch.html
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from cemag import entropy, prior_params, find_error_probability, std_eff
```
# Loading expression Data
The included dataset contains log2(TPM) normalized RNA-seq data for various tissues taken from adult zebrafish at different ages.
For details see [Kijima, Y. et al. Age-associated different transcriptome profiling in zebrafish and rat: insight into diversity of vertebrate aging. bioRxiv 478438 (2018)](https://www.biorxiv.org/content/10.1101/478438v1.abstract). The raw data is available here: https://www.ncbi.nlm.nih.gov/bioproject/PRJDB7713
```
# load dataset
expression_df = pd.read_csv('./data/gene_expression.tab', sep='\t', index_col=0)
data = expression_df.values
expression_df.head()
# make DataFrame for column attributes
ca = pd.DataFrame([c.split('_') for c in expression_df.columns],
columns=['tissue', 'age', 'replicate'])
ca.head()
ca.groupby(['tissue', 'age']).count()
```
# Inferring marker genes
## Marker genes between two groups of samples
Here, we simply want to find marker genes that can differentiate between samples taken from the brain at ages 2 months and 39 months.
```
# define groups that are compared against each other
groups = [['brain', '2mo'], ['brain', '39mo']]
groups_idx = [np.flatnonzero((ca.tissue == t)&(ca.age == a)) for t, a in groups]
groups_idx
# relative a priori size of different groups,
# discounting potential differences in no. of replicates
nc = [1,1]
# calculate conditional class entropy
# the parameters k, N determine how accurately the integrals are calculated
# one can start with smaller values and see how much of a difference it makes
H_result = entropy(data, groups_idx, ncond=nc, k=6, N=1e2)
# when there are only two classes, we can invert the entropy function
# H=p*log(p) and infer the probability p of misclassification given gene expression a measurement
# The function find_error_probability is an analytical solution valid only for small p
p_result = find_error_probability(H_result)
# create sorted list of top marker genes
df_result = pd.DataFrame({'H(c|x)':H_result, 'p_e':p_result}, index=expression_df.index)
df_result = df_result.sort_values(by='p_e')
df_result.head(20)
# check expression of top marker genes in both groups
for gene in df_result.index[:10]:
plot_data = tuple(expression_df.loc[gene][group].values for group in groups_idx)
plt.figure(figsize=(2,2))
plt.boxplot(plot_data, labels=['{}_{}'.format(tissue, age) for tissue, age in groups])
plt.suptitle(gene)
plt.ylabel('log2(TPM)')
```
## Marker genes between more than two groups of samples
Here, we simply want to find marker genes that can differentiate between samples taken from the brain at all ages (2 months, 7 months, 16 months, and 39 months).
```
# define groups that are compared against each other
groups = [['brain', '2mo'], ['brain', '7mo'], ['brain', '16mo'], ['brain', '39mo']]
groups_idx = [np.flatnonzero((ca.tissue == t)&(ca.age == a)) for t, a in groups]
groups_idx
# relative a priori size of different groups,
# discounting potential differences in no. of replicates
nc = [1,1,1,1]
# calculate conditional class entropy
# the parameters k, N determine how accurately the integrals are calculated
# one can start with smaller values and see how much of a difference it makes
H_result = entropy(data, groups_idx, ncond=nc, k=6, N=1e2)
# create sorted list of top marker genes
df_result = pd.DataFrame({'H(c|x)':H_result}, index=expression_df.index)
df_result = df_result.sort_values(by='H(c|x)')
df_result.head(20)
# For each set of replicates, we want to estimate the effective standard deviation of the measurements
# For this, we need to estimate the prior distribution of variances first in each set, which have two parameters
params = [prior_params(data[group]) for group in groups_idx]
params
# For this kind of data, it is more appropriate to plot time courses
# we can estimate error bars using the std_eff function
plot_color = '#377eb8'
for gene in df_result.index[:10]:
plot_data = tuple(expression_df.loc[gene][group].values for group in groups_idx)
plt.figure(figsize=(2,2))
y_mean = np.array([y.mean() for y in plot_data])
n = len(y_mean)
x = np.arange(n)
plt.plot(x, y_mean, color=plot_color)
for xi, y, (a,b) in zip(x, plot_data, params):
y_std = std_eff(y, a, b) # estimate effective standard deviation
plt.errorbar(xi, y=y.mean(), yerr=y_std, color=plot_color)
plt.plot([xi]*len(y), y, 'x', color=plot_color)
plt.xticks(ticks=x, labels=['{}_{}'.format(tissue, age) for tissue, age in groups],
rotation=45)
plt.suptitle(gene)
plt.ylabel('log2(TPM)')
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import Counter
import datetime
import matplotlib.pyplot as plt
from IPython import display
from scipy import stats
import math
import random
from sklearn import preprocessing
from sklearn.model_selection import ShuffleSplit, train_test_split, cross_val_score, StratifiedShuffleSplit
from sklearn.metrics import mean_squared_log_error
from xgboost import XGBRegressor
train_File = '../input/train.csv'
test_File = '../input/test.csv'
#train_File = 'train.csv'
#test_File = 'test.csv'
dd = display.display
```
# 1. Gather
```
def loadData():
df_train = pd.read_csv(train_File)
df_test = pd.read_csv(test_File)
df = pd.concat([df_train, df_test], axis=0,sort=True,ignore_index=True)
return df
df_before_clean = loadData()
dd(df_before_clean)
```
# 2. Assess Data : Inspecting Data for Quality and Tidiness Issues
#### 2.1 Quality Issues : Issues with content - missing, duplicate or incorrect data. a.k.a Dirty data
* 2.1.a Completeness : *"Are there any rows, columns or cells missing values?"*
* 35 columns have the missing values:
['Alley', 'BsmtCond', 'BsmtExposure', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtFinType1', 'BsmtFinType2', 'BsmtFullBath', 'BsmtHalfBath', 'BsmtQual', 'BsmtUnfSF', 'Electrical', 'Exterior1st', 'Exterior2nd', 'Fence', 'FireplaceQu', 'Functional', 'GarageArea', 'GarageCars', 'GarageCond', 'GarageFinish', 'GarageQual', 'GarageType', 'GarageYrBlt', 'KitchenQual', 'LotFrontage', 'MSZoning', 'MasVnrArea', 'MasVnrType', 'MiscFeature', 'PoolQC', 'SalePrice', 'SaleType', 'TotalBsmtSF', 'Utilities']
######
* 2.1.b Validity : *"Does the data comply to the data schema like duplicate patient id or zip code being < 5 digits or float data type?"*
######
* Following are Categorical Variables but currently are being considered as integer/float:
* MSSubClass
* OverallQual
* OverallCond
* FireplaceQu
* MoSold
######
* Following variables are supposed to be Integer type but Box-Cox or Scaling will anyway type cast them to float:
* LotFrontage, LotArea, YearBuilt, YearRemodAdd, MasVnrArea, BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, 1stFlrSF, 2ndFlrSF, LowQualFinSF, GrLivArea, BsmtFullBath, BsmtHalfBath, FullBath, HalfBath, BedroomAbvGr, KitchenAbvGr, TotRmsAbvGrd, Fireplaces, GarageCars, GarageArea, WoodDeckSF, OpenPorchSF, EnclosedPorch, 3SsnPorch, ScreenPorch, PoolArea, MiscVal, YrSold, SalePrice
* GarageYrBlt will be NA when garage is not available for the house and hence this variable needs to be dropped as it does not comply to the schema.
######
* 2.1.c Accuracy : *"Wrong data that is valid. like hieght = 300 inches; it still complies to the standard i.e. inches but data is in accurate."*
######
* MSZoning has 4 missing entries. Also, we do not have data samples to Agricultre (A), Industrial (I), Residential Low Density Park (RP). Therefore, there exist a probability that missing entries will be replaced with wrong data which are valid for the variables.
######
* Similarly, Utilites has 2 missing entries. Also, we do not have data samples for NoSewr ((Electricity, Gas, and Water (Septic Tank)) and ELO (Electricity only)
######
* Exterior1st, has no missing values in training data set. But has no samples for 'Other' and 'PreCast' Material. Testing data has one missing sample for the variable.
######
* Exterior2nd has no missing values in the training data set. But has no samples for "PreCast" material. Testing data has one missing sample for the variable.
######
* MasVnrType has 8 missing values in training data set. It has total of 5 valid values.But there are no samples for "CBlock" (Cylinder block). One in testing set also has a record with missing value for this variable.
######
* ExterQual has no samples for "Po" but fortunately there are no missing values for it in both training or testing data.
######
* BsmtQual has 37 missing entries. It has no samples for "Po" ((Poor (<70 inches)). Testing data set has 46 missing entries.
######
* BsmtCond has 37 missing entries. It has no samples for "Ex" (Excellent). There are 46 missing entries in the testing dataset.
######
* KitchenQual has no training samples on "Po" but testing sample has a missing entry.
* Functional has no training samples for "Po" but testing sample has a missing entry.
* PoolQC has no samples for "Typ" but testing sample has missing values.
* SaleType has no samples for "VWD" but testing sample has a missing value record.
######
######
* 2.1.d Consistency : *"Both valid and accurate but inconsistent. state = california and CA"*
######
* BsmtExposure has training samples as NA for both No Basement and also for missing values. There are also 2 testing samples with missing values as NA.
######
* BsmtFinType2 has training samples as NA for both No Basement and also for missing values.
######
* TotalBsmtSF has both 0 and NA representing as missing basement.
######
* BsmtExposure has NA for both missing and no basement houses.
* BsmtFinType2 has NA for both missing and no basement for a house.
#### 2.2 Tidiness Issues: Issues with structure - untidy or messy data
* 2.2.a Each observation is a row
* No Issues: Each observation is a unique house (no duplicate records)
######
* 2.2.b Each variable is a column
* No Issues: There are no colummns with multi data or concatenated data.
######
* 2.2.c Each observational unit is a table
* No Issues: There are no cross referring keys present in the table. Bsmt* and Garage* variables do form a logical group but there is no unique identities to the group.
######
##### Hypothesis 1: Bsmt__ variables are NA when TotalBsmtSF is 0
##### Proof:
* BsmtFinType1 is NA when TotalBsmtSF is 0
* BsmtUnfSF is 0 whenever TotalBsmtSF is 0; Even in testing set it is NA only when TotalBsmtSF is NA
* BsmtFullBath is 0 whenever TotalBsmtSF is 0; Even in testin set it is NA only when TotalBsmtSF is NA or 0.
* BsmtHalfBath is 0 whenever TotalBsmtSF is 0; Even in testin set it is NA only when TotalBsmtSF is NA or 0.
##### Key Observations:
* Dataset has House Prices which were sold in between 2006 - 2010.
* Surprised to see no bathroom and no bedroom but with kitchen Houses !!! Where do they sleep and shit after the heavy meal ?
* NA value in GarageType can be easily mis interpreted as missing value. However, it is not true. NA in GarageType clearly indicates no garage because in both train and testing dataset GarageArea = 0 in all those cases. Similarly, GarageYrBlt, GarageFinish, GarageQual, GarageCond are also NA when GarageArea = 0. And GarageCars,GarageArea = 0 ==> GarageArea = 0.
* How do we verify NA in Fence as missing entry or No Fence ??
* How do we verify NA in MiscFeatures as None or missing value ?? Note that MiscVal is zero for NA, Othr & Shed.
### 2.1.a Completeness : *"Are there any rows, columns or cells missing values?"*
```
def missingValueAssessment(df):
nan_columns = df.columns[df.isna().any()].tolist()
print('NaN columns :', nan_columns, "\n# :", len(nan_columns))
print("Duplicated rows count: ", df[df.duplicated()].shape)
df = df.fillna('NA')
print("Duplicated rows count: ", df[df.duplicated()].shape)
missingValueAssessment(df_before_clean)
```
### 2.2.a Each observation is a row
```
def checkHouseIsRepeated(df):
df_temp = df.groupby(['SalePrice','GrLivArea','YearBuilt','YearRemodAdd']).agg('count').reset_index()[['SalePrice','GrLivArea','YearBuilt','YearRemodAdd','Id']]
dd("Samples with same 'SalePrice','GrLivArea','YearBuilt','YearRemodAdd' : ",df_temp[df_temp.Id > 1])
checkHouseIsRepeated(df_before_clean)
```
* As there are no time series data: as in there is no variable indicating the time of the reading carried out, It is safe to assume the reading was done at one shot and there would not be any duplicate entries of a house.
* With the above assumption, group by 'SalePrice','GrLivArea','YearBuilt','YearRemodAdd' count indicates that there are no duplicate records.
```
df_before_clean.info()
```
# 3.0 Cleaning Data
##### Let us first do the cleaning activities where we have high confidence of imputing the values as listed in the above Assessment summary.
```
def convertOrdinalToNumber(df):
#Convert ordinal categorical values to numeric values
Ordinal_columns = ['BsmtCond','BsmtQual','ExterCond','ExterQual',
'FireplaceQu','GarageCond','GarageQual','HeatingQC',
'PoolQC','KitchenQual']
#Thankfully panda converts the missing values to -1 here
for c in Ordinal_columns:
df[c] = pd.Categorical(df[c], categories=['NA','Po', 'Fa', 'TA', 'Gd', 'Ex'],
ordered=True).codes
dd(c, df[c].unique())
df['BsmtExposure'] = pd.Categorical(df['BsmtExposure'],
categories=['NA', 'No', 'Mn', 'Av', 'Gd'],
ordered=True).codes
df['GarageFinish'] = pd.Categorical(df['GarageFinish'],
categories=['NA', 'Unf', 'RFn', 'Fin'],
ordered=True).codes
df['PavedDrive'] = pd.Categorical(df['PavedDrive'],
categories=['N', 'P', 'Y'],
ordered=True).codes
df['Utilities'] = pd.Categorical(df['Utilities'],
categories=['ELO', 'NoSeWa', 'NoSewr', 'AllPub'],
ordered=True).codes
df['LotShape'] = pd.Categorical(df['LotShape'],
categories=['Reg', 'IR1', 'IR2', 'IR3'],
ordered=True).codes
df['LandSlope'] = pd.Categorical(df['LandSlope'],
categories=['Gtl', 'Mod', 'Sev'],
ordered=True).codes
df['Functional'] = pd.Categorical(df['Functional'],
categories=['Sal', 'Sev', 'Maj2', 'Maj1', 'Mod',
'Min2', 'Min1', 'Typ'],
ordered=True).codes
df['BsmtFinType1'] = pd.Categorical(df['BsmtFinType1'],
categories=['NA', 'Unf', 'LwQ', 'Rec', 'BLQ',
'ALQ', 'GLQ'],
ordered=True).codes
df['BsmtFinType2'] = pd.Categorical(df['BsmtFinType2'],
categories=['NA', 'Unf', 'LwQ', 'Rec', 'BLQ',
'ALQ', 'GLQ'],
ordered=True).codes
dd('BldgType',df['BldgType'].unique(),df[df['BldgType'].isna()]) #'1Fam', '2fmCon', 'Duplex', 'TwnhsE', 'Twnhs']
df['BldgType'] = pd.Categorical(df['BldgType'],
categories=['1Fam', '2fmCon', 'Duplex', 'Twnhs', 'TwnhsE'],
ordered=True).codes
dd('BldgType',df['BldgType'].unique())
df['HouseStyle'] = pd.Categorical(df['HouseStyle'],
categories=['1Story', '1.5Unf', '1.5Fin', '2Story', '2.5Unf', '2.5Fin', 'SFoyer', 'SLvl'],
ordered=True).codes
df['LandContour'] = pd.Categorical(df['LandContour'],
categories=['Low', 'Lvl', 'Bnk', 'HLS'],
ordered=True).codes
df['Electrical'] = pd.Categorical(df['Electrical'],
categories=['Mix', 'FuseP', 'FuseF', 'FuseA', 'SBrkr'],
ordered=True).codes
return df
def cleanStage1(df):
#convert data type
#we are being little lineant to give float64 for YearBuilt, YrSold but those guys are going to be box-coxed
#so let them at least enjoy the bigger size for now
#Before changing thier data type, let us first convert them to continuous variable wherever applicable.
#i.e. YearBuilt, YearRemodAdd, YrSold & MoSold
df['AgeInMonths'] = df.YearBuilt.apply(lambda x: (2019 - x)*12)
df['AgeWhenSold'] = (df.YrSold - df.YearBuilt) * 12 + df.MoSold
df['AgeWhenRemod'] = (df.YearRemodAdd - df.YearBuilt) * 12
df= df.drop(['YearBuilt', 'YrSold', 'MoSold', 'YearRemodAdd'],axis=1)
float64_variables = ['LotFrontage', 'LotArea', 'AgeInMonths', 'AgeWhenRemod', 'MasVnrArea', \
'TotalBsmtSF', '1stFlrSF', '2ndFlrSF', 'LowQualFinSF', 'GrLivArea', 'BsmtFullBath', \
'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'Fireplaces',\
'GarageCars', 'GarageArea', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch', \
'PoolArea', 'MiscVal', 'AgeWhenSold', 'SalePrice', 'OverallQual', 'OverallCond']
for c in float64_variables:
df[c] = df[c].astype(np.float64)
'''int_to_categorical_variables = ['MSSubClass',]
for c in int_to_categorical_variables:
df[c] = df[c].astype(str)'''
dd(len(df.columns[df.isna().any()].tolist()), df.columns[df.isna().any()].tolist())
df = convertOrdinalToNumber(df)
dd("Number of na columns : ",len([c for c in list(df) if (-1 in df[c].unique()) or (df[df[c].isna()].shape[0] > 0)]))
dd("To be imputed columns : ", [c for c in list(df) if (-1 in df[c].unique()) or (df[df[c].isna()].shape[0] > 0)])
dd(df.KitchenQual.unique())
dd(df.BsmtCond.unique())
#return
#As per our data analysis TotalBsmtSF = 0 when TotalBsmtSF is missing.
df.loc[df.TotalBsmtSF.isna(), 'TotalBsmtSF'] = 0
#BsmtQual BsmtCond BsmtExposure BsmtFinType1 BsmtFinType2 BsmtFullBath BsmtHalfBath
#BsmtFinSF1 BsmtFinSF2 BsmtUnfSF
bsmt_cols = [c for c in list(df) if 'Bsmt' in c]
df.loc[df.TotalBsmtSF == 0, bsmt_cols] = 0
#verify almighty pandas half line solution to assignment
dd(df[df.TotalBsmtSF== 0][bsmt_cols])
dd(df.GarageType.unique())
#GarageYrBlt GarageFinish GarageQual GarageCond GarageCars GarageArea
df['GarageType'] = df.GarageType.fillna("NA")
gar_cols = [c for c in list(df) if ('Garage' in c) and ('GarageType' not in c)]
df.loc[df.GarageType == "NA", gar_cols] = 0
dd(df[df.GarageType== "NA"][gar_cols+['GarageType']])
#drop obsolete columns
df = df.drop(['Id','GarageYrBlt'], axis=1)
df['SalePrice'] = df.SalePrice.fillna(0)
dd(df.KitchenQual.unique())
dd(df.BsmtCond.unique())
return df
df_stage_1 = cleanStage1(df_before_clean.copy())
df_stage_1.info()
df_stage_1[df_stage_1.BsmtCond == -1]
df_stage_1[df_stage_1.GarageArea.isna()]
```
##### We had map when TotalBsmtSF =0 , there are couple of entries when it is TotalBsmtSF != 0. We may have to other Bsmt- attributes to impute the values for it but we will do such analysis after the straight forward missing value imputation.
##### Similarly, Garage variables are not completely imputed yet.
##### We have now 25 columns to look after for the first round of cleaning.
```
dd("Total columns in the input dataset : ",df_stage_1.shape[1])
dd("Number of na columns : ",len([c for c in list(df_stage_1) if (-1 in df_stage_1[c].unique()) or (df_stage_1[df_stage_1[c].isna()].shape[0] > 0)]))
dd("To be imputed columns : ", [c for c in list(df_stage_1) if (-1 in df_stage_1[c].unique()) or (df_stage_1[df_stage_1[c].isna()].shape[0] > 0)])
```
##### Before Imputing let us find if there are further more ordinal variables
```
_= [dd(c,df_stage_1[c].unique()) for c in df_stage_1.select_dtypes(include=np.object) ]
def checkOrdinal(df):
df['TotalSqFt'] = df['GrLivArea'] + df['TotalBsmtSF']
dd(df.groupby('BldgType').agg('median').reset_index().sort_values('TotRmsAbvGrd')[['BldgType', 'TotalSqFt','TotRmsAbvGrd','BedroomAbvGr']])
dd(df.groupby('BldgType').agg('max').reset_index().sort_values('TotRmsAbvGrd')[['BldgType', 'TotalSqFt','TotRmsAbvGrd','BedroomAbvGr']])
dd(df.groupby('HouseStyle').agg('median').reset_index().sort_values('TotalSqFt')[['HouseStyle', 'TotalSqFt','TotRmsAbvGrd','BedroomAbvGr']])
dd(df.groupby('HouseStyle').agg('max').reset_index().sort_values('TotalSqFt')[['HouseStyle', 'TotalSqFt','TotRmsAbvGrd','BedroomAbvGr']])
checkOrdinal(df_stage_1.copy())
```
##### We know from the domain knowledge "type of dwelling" that 1Fam < 2FmCon < Duplx < Twnhs. Now, between the town houses, I am finalizing the sequence through Total sq ft.
* final ordinal variable : 1Fam < 2FmCon < Duplx < Twnhs < TwnhsE
##### Similarly, for HouseStyle we have ,
* 1Story < 1.5Unf < 1.5Fin < 2Story < 2.5Unf < 2.5Fin < SFoyer < SLvl
##### I had confusion between Unf and Fin but I relied on again sq. ft, rooms and bedrooms. I could not get enough material to defend Sfoyer and SLvl, so again relied on those 3 parameters.
##### With domain knowledge(common sense) For LandContour,
* Low < Lvl < Bnk < HLS
```
def checkOrdinal1(df):
dd(df.groupby('MSZoning').agg('sum').reset_index().sort_values('TotRmsAbvGrd')[['MSZoning','TotRmsAbvGrd']])
checkOrdinal1(df_stage_1.copy())
```
##### Let us first see all the NaN values
```
def plotNAs(df):
nan_columns = df.columns[df.isna().any()].tolist()
#nan_columns.remove('SalePrice')
for c in df.fillna('NotAvailable')[nan_columns]:
df[[c,'SalePrice']].fillna('NotAvailable').\
groupby(by=c).agg('count').\
plot.bar(legend=None, title="Frequency Plot for "+c)
plt.xticks(rotation=45)
plt.show()
plotNAs(df_stage_1.copy())
```
### 1. Alley
```
def outerLandScape(df_temp):
df = df_temp.copy()
print("Initial Shape : ", df.shape)
beyond_house = ['Neighborhood','Street', 'PavedDrive', 'Alley']
df_temp = df.groupby(beyond_house).agg('count').reset_index()[['Neighborhood','Street', 'PavedDrive', 'Alley',"SalePrice"]]
dd()
def getAlley(Street, Neighborhood, PavedDrive):
try:
alley = df_temp[
(df_temp['Street'] == Street ) &
(df_temp['Neighborhood'] == Neighborhood ) &
(df_temp['PavedDrive'] == PavedDrive)
]['Alley'].tolist()[0]
except:
alley = 'NA'
return alley
df['Alley'] = df.Alley.fillna("NA")
df[['Alley','SalePrice']].fillna(0).\
groupby(by='Alley').agg('count').\
plot.bar(legend=None, title="Frequency Plot for "+'Alley')
plt.xticks(rotation=45)
plt.show()
na_alley_count = df[df.Alley == "NA"].shape[0]
gr_alley_count = df[df.Alley == "Grvl"].shape[0]
pa_alley_count = df[df.Alley == "Pave"].shape[0]
df['Alley'] = df.apply( lambda x: getAlley (x['Street']
,x['Neighborhood']
,x['PavedDrive']
) if x['Alley'] == "NA" else x['Alley']
,axis=1)
df[['Alley','SalePrice']].fillna(0).\
groupby(by='Alley').agg('count').\
plot.bar(legend=None, title="Frequency Plot for "+'Alley')
plt.xticks(rotation=45)
plt.show()
print("Alley Snapshot Before : NA -", na_alley_count,"Grvl - ", gr_alley_count, "Pave - ", pa_alley_count)
print("Alley Snapshot After : NA -", df[df.Alley == "NA"].shape[0],
"Grvl - ", df[df.Alley == "Grvl"].shape[0], "Pave - ", df[df.Alley == "Pave"].shape[0]
)
return df
df_alley = outerLandScape(df_stage_1)
def outerLandScape2():
df_train = pd.read_csv(train_File)
df_test = pd.read_csv(test_File)
df = pd.concat([df_train, df_test], axis=0,sort=True,ignore_index=True)
print("Initial Shape : ", df.shape)
'''beyond_house = [ 'MSZoning','Street', 'LotShape', 'LandContour', 'LotConfig', 'LandSlope', 'Neighborhood',
'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'PavedDrive','SaleCondition','Fence', 'Alley']
df_temp = df[beyond_house].copy()
for c in beyond_house:
df_temp = df_temp[df_temp[c].notna()]#.reset_index()
print(c, df_temp.shape)'''
#display.display(df_temp)
#beyond_house = ['Neighborhood','Street', 'PavedDrive', 'Alley']
#df = df.fillna('NNN')
beyond_house = ['Neighborhood','Street','Alley']
#beyond_house = ['Street', 'Alley']
#display.display(df.groupby(beyond_house).agg('count').reset_index()[['Neighborhood','Street', 'PavedDrive', 'Alley','Id']])
display.display(df.groupby(beyond_house).agg('count').reset_index()[['Neighborhood','Street', 'Alley','Id']])
outerLandScape2()
```
##### We can have the above table mapping missing Alley variable values. I am here assuming that Street and Alley would intersect. Therefore, there is a pattern with respect to location regarding the type of material used.
* However, the variable itself might not have such significance to target variable and hence can we can drop the imputation to it. The model which uses this variable would give lower significance level during training.
### 2. Electrical
```
def electricalWrangling(df_temp):
df = df_temp.copy()
df1 = df_temp.copy()
print("No of missing entries :", df[df.Electrical == -1].shape[0])
df['Electrical'] = df.Electrical.fillna('NA')
display.display(df[['Neighborhood','Electrical', 'SalePrice']].groupby(['Neighborhood','Electrical']).agg('count'))
#df1['Electrical'] = df1.Electrical.fillna('SBrkr')
df1.loc[df1['Electrical'] == -1,'Electrical'] = 4
display.display(df1[['Neighborhood','Electrical', 'SalePrice']].groupby(['Neighborhood','Electrical']).agg('count'))
return df1
df_electrical = electricalWrangling(df_stage_1.drop('Alley',axis=1))
```
##### At Timber, most of them have SBrkr as electrical system. So, it is a safe bet to have the missing entry replaced with 'SBrkr'
### 3. Exterior1st & Exterior2nd
```
def exteriorWrangle(df_temp):
df = df_temp.copy()
dd(df.shape)
dd(df[df.Exterior1st == df.Exterior2nd].shape)
dd(df[df.Exterior1st.isna()][['Exterior1st','Exterior2nd', 'Neighborhood','ExterQual', 'ExterCond', 'MSSubClass']])
dd(df[df.Exterior2nd.isna()][['Exterior1st','Exterior2nd', 'Neighborhood','ExterQual', 'ExterCond','MSSubClass']])
dt = df.\
groupby(['Exterior1st','Exterior2nd', 'Neighborhood','ExterQual', 'ExterCond', 'MSSubClass']).\
agg('count').reset_index().\
sort_values(by=['MSSubClass','SalePrice'],ascending=False)[['Exterior1st','Exterior2nd', 'Neighborhood','ExterQual', 'ExterCond','MSSubClass','SalePrice']]
#dd(dt)
dd(dt[(dt.MSSubClass == 30) & (dt.Neighborhood == 'Edwards')].head())
def bestExt1(Neighborhood,ExterQual, ExterCond, MSSubClass):
return dt[(dt.Neighborhood == Neighborhood)&
(dt.ExterQual == ExterQual)&
(dt.ExterCond == ExterCond)&
(dt.MSSubClass == MSSubClass)]['Exterior1st'].tolist()[0]
def bestExt2(Neighborhood,ExterQual, ExterCond, MSSubClass):
return dt[(dt.Neighborhood == Neighborhood)&
(dt.ExterQual == ExterQual)&
(dt.ExterCond == ExterCond)&
(dt.MSSubClass == MSSubClass)]['Exterior2nd'].tolist()[0]
df['Exterior1st'] = df.Exterior1st.fillna('NA')
df['Exterior2nd'] = df.Exterior2nd.fillna('NA')
df['Exterior1st'] = df.apply(lambda x: bestExt1(
x['Neighborhood'], x['ExterQual'], x['ExterCond'], x['MSSubClass']
) if x['Exterior1st'] =="NA" else x['Exterior1st'], axis=1)
df['Exterior2nd'] = df.apply(lambda x: bestExt2(
x['Neighborhood'], x['ExterQual'], x['ExterCond'], x['MSSubClass']
) if x['Exterior2nd'] =="NA" else x['Exterior2nd'], axis=1)
dd(df[df.index == 2151][['Exterior1st','Exterior2nd', 'Neighborhood','ExterQual', 'ExterCond', 'MSSubClass']])
return df
df_ext = exteriorWrangle(df_electrical)
```
##### it is observed that many of the houses have Exterior1st and Exterior2nd same values per neighborhood. Therefore, we will create a matrix of neighborhood and Exterior1st. We will first populate Exterior1st from neighborhood value and then we will populate Exterior2nd from Exterior1st.
### 4. Fence
```
def fenceWrangling(df_temp):
df = df_temp.copy()
print("Count of missing Fence : ",df[df.Fence.isna()].shape)
df['Fence'] = df.Fence.fillna('NA')
#dd(df.groupby(['MSSubClass', 'Neighborhood','Fence', ]).agg('count').reset_index()[['MSSubClass', 'Neighborhood','Fence', 'SalePrice']])
dd(df.groupby(['Neighborhood','Fence', ]).agg('count').reset_index()[['Neighborhood','Fence', 'SalePrice']])
return df
df_fence = fenceWrangling(df_ext)
```
##### It is too tedious to decide if missing value indicates "NA" - No Fence or the entry was missing. What does actually Fence depend on ?
* For now i m going to rely on NA for the missing entry. Safest assumption.
##### It is too risky to map from othe rvariables. Though it seems like it depends on LotArea or LotFrontage. It is not very clear if it depends solely on one of the variable or sort of combination of others. Let us keep it NA for missing values, so that it would mean no fence available.
```
def plotMinusOnes(df):
minusOneCols = [c for c in list(df) if -1 in df[c].unique()]
for c in df[minusOneCols]:
df[[c,'SalePrice']].\
groupby(by=c).agg('count').\
plot.bar(legend=None, title="Frequency Plot for "+c)
plt.xticks(rotation=45)
plt.show()
plotMinusOnes(df_stage_1.copy())
```
### 5.0 Functional
```
def functionalWrangle(df):
print("Unique values in Functional variable Before: ", df.Functional.unique())
print("How many missing entries are there in Functional variable : ",df[df.Functional == -1].shape[0])
co_qu_columns = [c for c in list(df) if ("Co" in c) or ("Qu" in c)]
co_qu_columns.append('SalePrice')
co_qu_columns.append('Functional')
print("Quality and Condition Related columns : ", co_qu_columns)
dd("Samples with missing functional feature values : " , df[df.Functional == -1][co_qu_columns])
dd("Let us see the samples which have similar Overall Condition and Quality")
dd(df[(df.OverallCond == 5) & (df.OverallQual == 1)][co_qu_columns])
dd(df[(df.OverallCond == 1) & (df.OverallQual == 4)][co_qu_columns])
df.loc[df.Functional == -1, "Functional"] = 7
print("Unique values in Functional variable : ", df.Functional.unique())
return df
df_funct = functionalWrangle(df_fence.copy())
```
##### the rule itself says, assume typical unless deductions are warranted. However, there is no entry of salvage in our data set. Though it is not mandatory to have all categorical values has to be there in the dataset but it always raises the question why not that variable ? Can *Cond and *Qu variable give us hint of not 'Sal' ?
* I am actually tempted to put 'Sal' but due to lack of samples for Sal, I will be putting it as 'Typ'
### 6. LotFrontage
```
def LotFrontagecheck(df):
print("Number of samples with missing LotFrontage variable values (Before): ", df[df.LotFrontage.isna()].shape[0])
df_LotFrontage = df[['Neighborhood','LotFrontage']].groupby('Neighborhood').agg(lambda x:x.value_counts().index[0]).reset_index()
df_dict = dict([tuple(x) for x in df_LotFrontage.values])
print("\nLotFrontage median valued per Neighborhood ", df_dict)
df['LotFrontage'] =df.LotFrontage.fillna(-1)
df['LotFrontage'] =df.apply(lambda x: df_dict[x['Neighborhood']] if x['LotFrontage'] == -1 else x['LotFrontage'],axis=1)
print("\nNumber of samples with missing LotFrontage variable values (After): ", df[df.LotFrontage.isna()].shape[0])
return df
df_lot = LotFrontagecheck(df_funct.copy())
```
##### LotFrontage: taking neighborhood as reference most occuring distance is used for filling missing values. Inspiration: neighboring house have same distance to road /gate.
* Lot area > 10000 & LotFrontage > 200 seems like outliers
### 7. MSZoning
```
def msZoningWrangle(df):
zone_related = ['LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1',
'Condition2', 'BldgType', 'OverallQual', 'OverallCond', 'MSZoning'
]
temp = df.groupby(['Neighborhood','MSSubClass','MSZoning']).\
agg('count').reset_index().\
sort_values(by=['SalePrice'],ascending=False)[['Neighborhood','MSSubClass','MSZoning','SalePrice']]
dd(temp[(temp.Neighborhood == 'IDOTRR') |(temp.Neighborhood == 'Mitchel')])
def returnmsZone(Neighborhood,MSSubClass):
return temp[(temp.Neighborhood == Neighborhood) &
(temp.MSSubClass == MSSubClass)]['MSZoning'].tolist()[0]
dd(df[df.MSZoning.isna()][['Neighborhood','MSSubClass','MSZoning']])
df['MSZoning'] = df.MSZoning.fillna("NA")
df['MSZoning'] = df.apply(lambda x: returnmsZone(x['Neighborhood'], x['MSSubClass'])
if x['MSZoning'] == "NA" else x['MSZoning'], axis=1)
dd(df[df.index.isin([1915,2216,2250,2904])][['Neighborhood','MSSubClass','MSZoning']])
return df
df_ms = msZoningWrangle(df_lot.copy())
```
##### MSZoning is general zoning classification. Therefore, it must be specific to an area and hence 'Neighborhood' is the variable to our rescue. MSZoning = RL when neighbor is 'Mitchel' and RM when neighbor is IDOTRR and they are is missing.
### 8.0 MasVnrType & MasVnrArea
```
def MasVnrTypeCheck(df_temp):
#df = pd.read_csv('train.csv')
df=df_temp.copy()
print(df[df.MasVnrType.isna()].shape, )
#display.display(df[['Neighborhood','MasVnrType','Id']].groupby(['Neighborhood','MasVnrType']).agg('count').reset_index())
dd(df[['MasVnrType','MasVnrArea']][df.MasVnrType.isna()].head())
dd(df[['MasVnrType','MasVnrArea']][df.MasVnrArea.isna()].head())
dd(df[['MasVnrType','MasVnrArea']][df.MasVnrArea == 0].head())
dd(df[['MasVnrType','MasVnrArea']][df.MasVnrType == 'None'].head())
df['MasVnrType'] = df.MasVnrType.fillna("None")
df['MasVnrArea'] = df.MasVnrArea.fillna(0)
df['MasVnrArea'] = df['MasVnrArea'].astype(np.float64)
return df
df_mas = MasVnrTypeCheck(df_ms)
```
* MasVnrArea nan count is equivalent to MasVnrType count.
* MasVnrArea == 0 is already present
* whenever MasVnrArea == 0 MasVnrType is also None
* Therefore, MasVnrArea will be mapped to zero when MasVnrType = None
##### Outlier: area > 1400 is only one sample which has low sale price. Its overall condition and quality is moderate and there are enough sample for those bands.
### 9. MiscFeature
```
def miscFeatureWrangle(df):
#df = df_temp.copy()
df_df = df.fillna('NA').groupby(['MiscFeature','MiscVal']).agg('count').reset_index()[['MiscFeature','MiscVal','SalePrice']]
dd("Before :",df_df[df_df.MiscVal == 0])
dd(df_df[df_df.MiscFeature == "NA"])
#dd(df_df[df_df.MiscVal == "NA"])
df['MiscFeature'] = df.apply(lambda x: "NA" if x['MiscVal'] == 0 else x['MiscFeature'],axis=1)
df['MiscFeature'] = df.apply(lambda x: "Gar2" if x['MiscVal'] == 17000.0 else x['MiscFeature'],axis=1)
df_df = df.fillna('NA').groupby(['MiscFeature','MiscVal']).agg('count').reset_index()[['MiscFeature','MiscVal','SalePrice']]
dd("After :",df_df[df_df.MiscVal == 0])
dd(df_df[df_df.MiscFeature == "NA"])
return df
df_misc = miscFeatureWrangle(df_mas.copy())
```
* When MiscVal == 0 ; MiscFeature is mostly NA (None). Note that it can be Shed or Other too. Will park it for next level fine tuning.
* High Values are dedicated to 'Gar2'. Therefore, testing set missing value is gar2 for sure.
### 10. PoolQC
```
def poolWrangling(df):
print("Count of Samples which have PoolQC valid values : ", df[df.PoolQC != -1].shape[0] )
print("Count of missing PoolQC : ",df[df.PoolQC == -1].shape[0])
dd("Pattern of missing PoolQC vs Overall Cond n Qual", df[df.PoolQC == -1][['PoolArea', 'PoolQC','OverallCond','OverallQual']].head())
dd("Pattern of PoolQC when PoolArea = 0",df[df.PoolArea == 0][['PoolArea', 'PoolQC','OverallCond','OverallQual']]['PoolQC'].unique())
dd("Dataset samples with PoolArea missing ",df[df.PoolArea.isna()])
dd("PoolQC vs Overall Cond and Qual : " , df.groupby(['OverallCond','OverallQual','PoolQC']).agg('count').reset_index()[['OverallCond','OverallQual','PoolQC','SalePrice']])
df.loc[(df['PoolArea'] == 0) & (df['PoolQC'] == -1), 'PoolQC'] = 0
df.loc[(df['PoolArea'] > 0) & (df['PoolQC'] == -1), 'PoolQC'] = 2
dd(df[df.PoolQC == -1][['PoolArea', 'PoolQC','OverallCond','OverallQual']])
dd(Counter(df.PoolQC))
return df
df_pool = poolWrangling(df_misc.copy())
```
##### When PoolArea is 0 PoolQC will be NA (no pool). When PoolArea > 0 it appears to be good candidates for "Fa".
* when PoolQC should be "NA" when PoolArea = 0 [Thumb rule / Common sense]
* Missing values have characterstics matching with that of "Fair" condition. It may be "TA" but we dont have enough evidence or rather no evidence for that.
### 11. SaleType
```
def saleTypeWrangling(df):
dd(df[df.SaleType.isna()][['MSSubClass', 'MSZoning', 'SaleCondition','SaleType']])
df1 = df.groupby(['MSSubClass', 'MSZoning', 'SaleCondition','SaleType']).\
agg('count').reset_index().sort_values('SalePrice',ascending=False)[['MSSubClass', 'MSZoning', 'SaleCondition','SaleType', 'SalePrice']]
dd(df1[df1.MSSubClass == 20])
def popSaleType(MSSubClass, MSZoning, SaleCondition):
return df1[(df1.MSSubClass == MSSubClass) &
(df1.MSZoning == MSZoning) &
(df1.SaleCondition == SaleCondition)
]['SaleType'].tolist()[0]
df['SaleType'] = df.SaleType.fillna("NA")
df['SaleType'] = df.apply(lambda x: popSaleType(x['MSSubClass'], x['MSZoning'], x['SaleCondition'])
if x['SaleType'] == "NA" else x['SaleType']
,
axis = 1)
dd(df[df.index == 2489][['MSSubClass', 'MSZoning', 'SaleCondition','SaleType']])
return df
df_sale = saleTypeWrangling(df_pool.copy())
```
### 12. Utilities
```
def utilWrangling(df):
dd("Missing entries for Utilities : ",df[df.Utilities == -1][['Neighborhood','Utilities']])
df1 = df.groupby(['Neighborhood','Utilities']).agg('count').reset_index().\
sort_values('SalePrice',ascending=False)[['Neighborhood','Utilities','SalePrice']]
dd("Relation between Neighborhood and Utilities : ",df1)
def returnUtil(Neighborhood):
return df1[(df1.Neighborhood == Neighborhood)]["Utilities"].tolist()[0]
df['Utilities'] = df.apply(lambda x: returnUtil(x['Neighborhood']) if x['Utilities'] == -1 else x["Utilities"],axis=1)
dd("Post imputation :", df[df.index.isin([1915,1945])][['Neighborhood','Utilities']])
return df
df_util = utilWrangling(df_sale.copy())
```
### 13. KitchenQual
```
def kitchenQual(df):
dd("Missing samples for KitchenQual", df[df.KitchenQual == -1][['OverallCond', 'OverallQual', 'KitchenAbvGr', 'KitchenQual']])
df1 = df.groupby(['OverallCond', 'OverallQual', 'KitchenAbvGr', 'KitchenQual']).agg('count').\
reset_index().sort_values('SalePrice', ascending= False)[['OverallCond', 'OverallQual', 'KitchenAbvGr', 'KitchenQual']]
dd("Kitchen Table : ", df1)
def returnkqual(OverallCond, OverallQual, KitchenAbvGr):
return df1[(df1.OverallCond == OverallCond)&
(df1.OverallQual == OverallQual) &
(df1.KitchenAbvGr == KitchenAbvGr)
]['KitchenQual'].tolist()[0]
df['KitchenQual'] = df.apply(lambda x: returnkqual(x['OverallCond'], x['OverallQual'],x['KitchenAbvGr'])
if x['KitchenQual'] == -1 else x['KitchenQual']
,axis=1)
dd(df[df.index == 1555][['OverallCond', 'OverallQual', 'KitchenAbvGr', 'KitchenQual']])
return df
df_kitchen = kitchenQual(df_util.copy())
```
### Checkpoint - 1
```
plotNAs(df_kitchen.copy())
plotMinusOnes(df_kitchen.copy())
def bsmtInterpolate(df):
bsmt_column = ['BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'BsmtQual']
print(bsmt_column)
bsmt_column2 = [c for c in list(df) if 'Bsmt' in c]
dd("BsmtCond missing entries : ", df[df.BsmtCond == -1][bsmt_column])
bsmt_index = []
bsmt_index += list(df[df.BsmtCond == -1].index)
dd("BsmtExposure missing entries : ", df[df.BsmtExposure == -1][bsmt_column])
bsmt_index += list(df[df.BsmtExposure == -1].index)
dd("BsmtFinType1 missing entries :",df[df.BsmtFinType1 == -1][bsmt_column])
bsmt_index += list(df[df.BsmtFinType1 == -1].index)
dd("BsmtFinType2 missing entries :",df[df.BsmtFinType2 == -1][bsmt_column])
bsmt_index += list(df[df.BsmtFinType2 == -1].index)
dd("BsmtQual missing entries : ", df[df.BsmtQual == -1][bsmt_column])
bsmt_index += list(df[df.BsmtQual == -1].index)
df1 = df.groupby(bsmt_column).agg('count').reset_index().sort_values('SalePrice', ascending=False)[(bsmt_column + ['SalePrice'])]
df1 = df1.sort_values('SalePrice',ascending=False)
#dd(df1)
def getBsmtCond(BsmtExposure, BsmtFinType1, BsmtFinType2, BsmtQual):
return df1[(df1.BsmtExposure == BsmtExposure)&
(df1.BsmtFinType1 == BsmtFinType1)&
(df1.BsmtFinType2 == BsmtFinType2)&
(df1.BsmtQual == BsmtQual)
]['BsmtCond'].tolist()[0]
def getBsmtExposure(BsmtCond, BsmtFinType1, BsmtFinType2, BsmtQual):
return df1[(df1.BsmtCond == BsmtCond)&
(df1.BsmtFinType1 == BsmtFinType1)&
(df1.BsmtFinType2 == BsmtFinType2)&
(df1.BsmtQual == BsmtQual)
]['BsmtExposure'].tolist()[0]
def getBsmtFinType1(BsmtExposure, BsmtCond, BsmtFinType2, BsmtQual):
return df1[(df1.BsmtExposure == BsmtExposure)&
(df1.BsmtCond == BsmtCond)&
(df1.BsmtFinType2 == BsmtFinType2)&
(df1.BsmtQual == BsmtQual)
]['BsmtFinType1'].tolist()[0]
def getBsmtFinType2(BsmtExposure, BsmtFinType1, BsmtCond, BsmtQual):
return df1[(df1.BsmtExposure == BsmtExposure)&
(df1.BsmtFinType1 == BsmtFinType1)&
(df1.BsmtCond == BsmtCond)&
(df1.BsmtQual == BsmtQual)
]['BsmtFinType2'].tolist()[0]
def getBsmtQual(BsmtExposure, BsmtFinType1, BsmtFinType2, BsmtCond):
return df1[(df1.BsmtExposure == BsmtExposure)&
(df1.BsmtFinType1 == BsmtFinType1)&
(df1.BsmtFinType2 == BsmtFinType2)&
(df1.BsmtCond == BsmtCond)
]['BsmtQual'].tolist()[0]
#['BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'BsmtQual']
df['BsmtCond'] = df.apply(lambda x: getBsmtCond(x['BsmtExposure'], x['BsmtFinType1'], x['BsmtFinType2'], x['BsmtQual'])
if x['BsmtCond'] == -1 else x['BsmtCond']
, axis = 1)
df['BsmtExposure'] = df.apply(lambda x: getBsmtExposure(x['BsmtCond'], x['BsmtFinType1'], x['BsmtFinType2'], x['BsmtQual'])
if x['BsmtExposure'] == -1 else x['BsmtExposure']
, axis = 1)
df['BsmtFinType1'] = df.apply(lambda x: getBsmtFinType1(x['BsmtExposure'], x['BsmtCond'], x['BsmtFinType2'], x['BsmtQual'])
if x['BsmtFinType1'] == -1 else x['BsmtFinType1']
, axis = 1)
df['BsmtFinType2'] = df.apply(lambda x: getBsmtFinType2(x['BsmtExposure'], x['BsmtFinType1'], x['BsmtCond'], x['BsmtQual'])
if x['BsmtFinType2'] == -1 else x['BsmtFinType2']
, axis = 1)
df['BsmtQual'] = df.apply(lambda x: getBsmtQual(x['BsmtExposure'], x['BsmtFinType1'], x['BsmtFinType2'], x['BsmtCond'])
if x['BsmtQual'] == -1 else x['BsmtQual']
, axis = 1)
dd("Post imputation :", df[df.index.isin(bsmt_index)][bsmt_column])
return df
df_bsmt_final = bsmtInterpolate(df_kitchen.copy())
```
### Garage*
```
def garageWrangling(df):
garage_columns = [c for c in df.select_dtypes(exclude=np.number) if "Garage" in c]
print(garage_columns)
garage_col2 = [c for c in list(df) if "Garage" in c]
grp_by = ['GarageType','MiscFeature','OverallQual','OverallCond','GarageFinish','GarageQual','GarageCond',\
'GarageArea','GarageCars']
new_col = list(set(garage_col2 + grp_by + ['MiscFeature']))
miss_index = []
for c in garage_col2:
df_temp = df[df[c] == -1][new_col]
dd("Missing Entries for "+c+ " : ",df_temp)
miss_index += list(df_temp.index)
df1 = df.groupby(grp_by).agg('count').reset_index().sort_values('SalePrice',ascending=False)[grp_by + ['SalePrice']]
dd("when garage type == Detchd : ", df1[df1.GarageType == 'Detchd'])
def getGarageArea( GarageType, MiscFeature, OverallQual, OverallCond):
return df1[
(df1.GarageType == GarageType) &
(df1.MiscFeature == MiscFeature) &
(df1.OverallQual == OverallQual) &
(df1.OverallCond == OverallCond)
]['GarageArea'].tolist()[0]
def getGarageCars( GarageType, MiscFeature, OverallQual, OverallCond):
return df1[
(df1.GarageType == GarageType) &
(df1.MiscFeature == MiscFeature) &
(df1.OverallQual == OverallQual) &
(df1.OverallCond == OverallCond)
]['GarageCars'].tolist()[0]
def getGarageCond(GarageType, MiscFeature, OverallQual, OverallCond):
return df1[
(df1.GarageType == GarageType) &
(df1.MiscFeature == MiscFeature) &
(df1.OverallQual == OverallQual) &
(df1.OverallCond == OverallCond)
]['GarageCond'].tolist()[0]
def getGarageFinish(GarageType, MiscFeature, OverallQual, OverallCond):
return df1[
(df1.GarageType == GarageType) &
(df1.MiscFeature == MiscFeature) &
(df1.OverallQual == OverallQual) &
(df1.OverallCond == OverallCond)
]['GarageFinish'].tolist()[0]
def getGarageQual(GarageType, MiscFeature, OverallQual, OverallCond):
return df1[
(df1.GarageType == GarageType) &
(df1.MiscFeature == MiscFeature) &
(df1.OverallQual == OverallQual) &
(df1.OverallCond == OverallCond)
]['GarageQual'].tolist()[0]
df['GarageArea'] = df['GarageArea'].fillna("NA")
df['GarageArea'] = df.apply(lambda x: getGarageArea( x['GarageType'], x['MiscFeature'], x['OverallQual'], x['OverallCond'])
if x['GarageArea'] =="NA" else x['GarageArea']
,axis =1)
df['GarageCars'] = df['GarageCars'].fillna("NA")
df['GarageCars'] = df.apply(lambda x: getGarageCars( x['GarageType'], x['MiscFeature'], x['OverallQual'], x['OverallCond'])
if x['GarageCars'] =="NA" else x['GarageCars']
,axis =1)
df['GarageCond'] = df.apply(lambda x: getGarageCond( x['GarageType'], x['MiscFeature'], x['OverallQual'], x['OverallCond'])
if x['GarageCond'] == -1 else x['GarageCond']
,axis =1)
df['GarageFinish'] = df.apply(lambda x: getGarageFinish( x['GarageType'], x['MiscFeature'], x['OverallQual'], x['OverallCond'])
if x['GarageFinish'] == -1 else x['GarageFinish']
,axis =1)
df['GarageQual'] = df.apply(lambda x: getGarageQual( x['GarageType'], x['MiscFeature'], x['OverallQual'], x['OverallCond'])
if x['GarageQual'] == -1 else x['GarageQual']
,axis =1)
dd(df[df.index.isin(miss_index)][new_col])
return df
df_garage_final = garageWrangling(df_bsmt_final.copy())
```
### Checkpoint 2
```
plotNAs(df_garage_final.copy())
plotMinusOnes(df_garage_final.copy())
did_we_miss_them = ['MSSubClass', 'OverallQual', 'OverallCond', 'FireplaceQu', ]
for c in did_we_miss_them:
dd(c, df_garage_final[c].unique())
def FireplaceWrangling(df_temp):
df = pd.read_csv(train_File)
print("Count of missing FireplaceQu : ",df[df.FireplaceQu.isna()].shape[0])
dd("Count of Fireplaces == 0",df[['Fireplaces', 'FireplaceQu']][df.Fireplaces == 0].shape)
dd(df[['Fireplaces', 'FireplaceQu']][df.FireplaceQu.isna()].head())
dd("When FireplaceQu == NA, Fireplaces:", df[['Fireplaces', 'FireplaceQu']][df.FireplaceQu.isna()]['Fireplaces'].unique())
dd("When Fireplaces == 0, FireplaceQu:",df[['Fireplaces', 'FireplaceQu']][df.Fireplaces == 0]['FireplaceQu'].unique())
#now we have NA as nan in our wrangled dataset; ideally this should not be problem but let us be consistent.
df = df_temp.copy()
df['FireplaceQu'] = df.apply(lambda x: 0 if x['Fireplaces'] == 0 else x['FireplaceQu'], axis=1 )
dd("Post imputing :",df.FireplaceQu.unique())
return df
df_fire = FireplaceWrangling(df_garage_final.copy())
```
##### FireplaceQu will be mapped to NA (no fireplace) whenever Fireplaces = 0
##### TotalBsmtSF = 0 indicates there is no basement.
* Therefore, BsmtQual = BsmtCond = BsmtExposure = BsmtFinType1 = BsmtFinType2 = "NA"; when TotalBsmtSF = 0
* Outlier: (df.BsmtFinSF1 > 2000) & (df.SalePrice < 200000) (2 of them) are outlier because it not only brings the co relation down but also there are enough samples for outlier's overall condition and quality samples.
##### Let us check the datatype if it is corrupted due to our imputate operation
```
post_imputing_cols = list(df_fire.select_dtypes(include=np.object))
cols_need_change = [c for c in list(df_before_clean.select_dtypes(include=np.number)) if c in post_imputing_cols]
#dd(df_before_clean[df_before_clean.MasVnrArea == 'BrkFace'])
'''for c in post_imputing_cols:
if c in cols_need_change:
print(c)
df_fire[c] = df_fire[c].astype(np.float64)'''
print()
post_imputing_cols = list(df_fire.select_dtypes(include=np.number))
[c for c in list(df_before_clean.select_dtypes(include=np.number)) if c not in post_imputing_cols]
post_imputing_cols = list(df_fire.select_dtypes(include=np.object))
[c for c in list(df_before_clean.select_dtypes(include=np.object)) if c not in post_imputing_cols]
post_imputing_cols = list(df_fire.select_dtypes(include=np.number))
_= [dd(c,df_fire[c].unique()) for c in list(df_fire.select_dtypes(exclude=np.number))]
print("Are there any missing entries for numerical variables ?: ", len([dd(df_fire[c].unique() ) for c in post_imputing_cols if -1 in df_fire[c].unique()]) > 0)
```
### Outliers
```
print("Shape of the imputed dataset : ", df_fire.shape)
df_out = df_fire[df_fire.SalePrice > 0].copy()
print("Shape of the Outlier Analysis dataset : ", df_out.shape)
def computeCorrCols(df):
df_corr = df.corr()
df1 = df_corr.stack().reset_index().rename(columns={'level_0': "C1", "level_1": "C2", 0 : "Corr_val"})
df1['Corr_val']= df1['Corr_val'].abs()
df1 = df1[df1['Corr_val'] < 1].sort_values('Corr_val',ascending=False)
df1 = df1.drop_duplicates('Corr_val').reset_index(drop=True)
dd(df1)
return df1
df_corr = computeCorrCols(df_out.copy())
def checkCorr(df_old):
df = pd.get_dummies(df_old)
corr_target = []
df_corr = df.corr()
for c in list(df_corr):
corr_target.append((c, np.abs(df_corr.loc[c,'SalePrice'])))
dd("Top 15 Numerical Variables with High Corr value for SalePrice :",
[x for x in sorted(corr_target,key=lambda x: x[1], reverse=True) if '_' not in x[0]][1:15])
dd("Top 15 Variables (All Types) with High Corr value for SalePrice :",
[x for x in sorted(corr_target,key=lambda x: x[1], reverse=True)][1:15])
checkCorr(df_out.copy())
```
##### There may be so many outliers but let us target the ones which are not only numerical but also impact the target variables. Let us target top 5 variables for outlier removal. i.e. OverallQual, GrLivArea, ExterQual, KitchenQual, GarageCars & TotalBsmtSF.
* Note that I am not selecte GarageArea for outlier detection following reasons:
* it has high co relation with GarageCars
* GarageCars and GarageArea both gets imputed in our last steps but the error margin is less for GarageCars when compared to GarageArea
```
df_out.GarageCars.unique()
def topCorrWithOthers(df):
top_5_corr = ['OverallQual', 'GrLivArea', 'ExterQual', 'KitchenQual', 'GarageCars', 'TotalBsmtSF']
print("Co relation of the top 5 columns with others : ")
for c in top_5_corr:
dd(c, df_corr[(df_corr.C1 == c) | (df_corr.C2 == c)].head())
topCorrWithOthers(df_fire.copy())
#df_out['SalePrice'] = df_out['SalePrice'] / df_out.GrLivArea
```
### 1. OverallQual Outlier Check
```
def overallQualOutlier(df):
df.plot.scatter("OverallQual", "SalePrice", title="Before Removal of Outlier")
df_corr = df.corr()
print("Co relation before removing the outlier : ",df_corr.loc['OverallQual','SalePrice'])
df1 = df[(df.OverallQual != 10) | (df.SalePrice >200000 )]
df1 = df1[(df1.OverallQual != 4) | (df1.SalePrice <200000 )]
df1 = df1[(df1.SalePrice >125000 ) | (df1.OverallQual != 7)]
df1 = df1[(df1.OverallQual != 8) | (df1.SalePrice >125000 )]
df1 = df1[(df1.OverallQual != 7) | (df1.SalePrice <360000)]
df1 = df1[(df1.OverallQual != 8) | (df1.SalePrice < 460000)]
#.plot.scatter("OverallQual", "SalePrice", title="Zoomed view on a outlier")
print("Outlier Count: ", df.shape[0]-df1.shape[0])
df1.plot.scatter("OverallQual", "SalePrice", title="After Removal of Outlier")
df_corr = df1.corr()
print("Co relation After removing the outlier : ",df_corr.loc['OverallQual','SalePrice'])
return df1
df_overallqual = overallQualOutlier(df_out.copy())
```
### 2. GrLivArea Outlier Check
```
def grLivAreaOutlier(df):
df.plot.scatter("GrLivArea", "SalePrice", title="Before Removal of Outlier")
df_corr = df.corr()
print("Co relation before removing the outlier : ",df_corr.loc['GrLivArea','SalePrice'])
df1 = df[(df.GrLivArea < 4000) | (df.SalePrice >250000 )]
df1 = df1[(df1.GrLivArea > 2000) | (df1.SalePrice < 380000)]
df1 = df1[(df1.GrLivArea < 3300) | (df1.SalePrice > 220000)]
print("Outlier Count: ", df.shape[0]-df1.shape[0])
#df1[(df1.GrLivArea > 3000)&(df.SalePrice <300000)].plot.scatter("GrLivArea", "SalePrice", title="Zoomed look on a outlier")
df1.plot.scatter("GrLivArea", "SalePrice", title="After Removal of Outlier")
#dd(corr(df1.GrLivArea, df1.SalePrice))
df_corr = df1.corr()
print("Co relation After removing the outlier : ",df_corr.loc['GrLivArea','SalePrice'])
return df1
df_liv = grLivAreaOutlier(df_overallqual.copy())
```
### 3. ExterQual Outlier Check
```
def exernalQualOutlier(df):
df.plot.scatter("ExterQual", "SalePrice", title="Before Removal of Outlier")
df_corr = df.corr()
print("Co relation before removing the outlier : ",df_corr.loc['ExterQual','SalePrice'])
print(df['SalePrice'][df.ExterQual == 4].min(), df['SalePrice'][df.ExterQual == 4].max())
#df1 = df[(df.SalePrice)]
df1 = df[(df.ExterQual != 4) | ((df.SalePrice != 52000.0) & ( df.SalePrice != 745000.0))]
print("Outlier Count : ", df.shape[0]-df[(df.ExterQual != 4) | ((df.SalePrice != 52000.0) & ( df.SalePrice != 745000.0))].shape[0])
df_corr = df1.corr()
print("Co relation After removing the outlier : ",df_corr.loc['ExterQual','SalePrice'])
df1.plot.scatter("ExterQual", "SalePrice", title="After Removal of Outlier")
return df1
df_external_qual = exernalQualOutlier(df_liv.copy())
```
### 4. KitchenQual Outlier Check
```
def kitchenQualOutlier(df):
df.plot.scatter("KitchenQual", "SalePrice", title="Before Removal of Outlier")
df_corr = df.corr()
print("Co relation before removing the outlier : ",df_corr.loc['KitchenQual','SalePrice'])
print("Max values when KitchenQual == 3 :", df[df.KitchenQual == 3]['SalePrice'].max())
print("Max values when KitchenQual == 4 :", df[df.KitchenQual == 4]['SalePrice'].max())
df1 = df[(df.KitchenQual != 3) | ( df.SalePrice != 359100.0) ]
df1 = df1[ (df1.KitchenQual != 4) | ( df1.SalePrice != 625000.0)]
print("Outlier Count : ", df.shape[0] - df1.shape[0])
df_corr = df1.corr()
print("Co relation After removing the outlier : ",df_corr.loc['KitchenQual','SalePrice'])
df1.plot.scatter("KitchenQual", "SalePrice", title="After Removal of Outlier")
return df1
df_kitchen_qual = kitchenQualOutlier(df_external_qual.copy())
```
### 5. GarageCars Outlier Check
```
def garageCarsOutlier(df):
df.plot.scatter("GarageCars", "SalePrice", title="Before Removal of Outlier")
df_corr = df.corr()
print("Co relation before removing the outlier : ",df_corr.loc['GarageCars','SalePrice'])
df1 = df[(df.GarageCars < 4)]
print("Outlier Count : ", df[(df.GarageCars == 4)].shape[0])
df_corr = df1.corr()
print("Co relation After removing the outlier : ",df_corr.loc['GarageCars','SalePrice'])
df1.plot.scatter("GarageCars", "SalePrice", title="After Removal of Outlier")
return df1
df_gar_car = garageCarsOutlier(df_kitchen_qual.copy())
```
### 6. TotalBsmtSF Outlier Check
```
def totalBsmtSFOutlier(df):
df.plot.scatter("TotalBsmtSF", "SalePrice", title="Before Removal of Outlier")
df_corr = df.corr()
print("Co relation before removing the outlier : ",df_corr.loc['TotalBsmtSF','SalePrice'])
df[df.TotalBsmtSF >3000].plot.scatter("TotalBsmtSF", "SalePrice", title="Zoomed look on Outlier")
df[df.TotalBsmtSF >3100].plot.scatter("TotalBsmtSF", "SalePrice", title="Further Zoomed look on Outlier")
df[(df.TotalBsmtSF >1000) & (df.TotalBsmtSF <1500) & (df.SalePrice <100000)].plot.scatter("TotalBsmtSF", "SalePrice", title="Further Zoomed look Outlier 2")
df[(df.TotalBsmtSF >2000) & (df.TotalBsmtSF <2500) & (df.SalePrice <210000)].plot.scatter("TotalBsmtSF", "SalePrice", title="Further Zoomed look Outlier 3")
df1 = df[(df.TotalBsmtSF < 3000) | (df.SalePrice > 300000)]
df1 = df1[(df1.TotalBsmtSF < 1000) | (df1.TotalBsmtSF > 1500) | (df1.SalePrice > 65000)]
df1 = df1[(df1.TotalBsmtSF < 2000) | (df1.TotalBsmtSF > 2500) | (df1.SalePrice > 150000)]
print("Outlier Count : ", df.shape[0] - df1.shape[0])
df_corr = df1.corr()
print("Co relation After removing the outlier : ",df_corr.loc['TotalBsmtSF','SalePrice'])
df1.plot.scatter("TotalBsmtSF", "SalePrice", title="After Removal of Outlier")
return df1
df_tot = totalBsmtSFOutlier(df_gar_car.copy())
checkCorr(df_tot.copy())
```
#### We have so far identified 31 outliers altogether from 6 variables from our input data set. These outliers have increased the co relation with target variable.
##### Let us check with XGBoost for the score.
```
def preProcessData(df, log=False):
print("Shape of the data set before pre processing : ", df.shape )
#get dummies
if log:
print("Categorical columns : ", list(df.select_dtypes(exclude=np.number)))
df = pd.get_dummies(df, dtype=np.float64)
print("\n\nShape of the data set after pre processing : ", df.shape )
if log:
print("Columns in the data set are : ",list(df))
return df
df_prep = preProcessData(df_fire.copy())
df_prep.info()
list(df_prep)
def removeOutlier(df):
print("Shape before removing outlier : ", df.shape)
'''df = df[(df.GrLivArea < 4000) | (df.SalePrice >250000) | (df.SalePrice == 0) ]
df = df[(df.GarageCars < 4) | (df.SalePrice == 0)]
df = df[((df.GarageArea < 1200) | (df.SalePrice > 300000)) | (df.SalePrice ==0)]
df = df[((df.TotalBsmtSF < 3000) | (df.SalePrice > 300000)) | (df.SalePrice ==0)]'''
df = df[(df.GrLivArea < 3000)| (df.SalePrice ==0)]
df = df[(df.SalePrice < 200)| (df.SalePrice ==0)]
print("Shape after removing outlier : ", df.shape)
df1 = pd.concat([df_tot,df[df.SalePrice == 0]])
return df1
df_out_removed = removeOutlier(df_fire.copy())
df_out_removed.shape
from IPython import display
def transformTarget(df_temp, revert=False ):
'''df = df_temp.copy()
#df['new_variable'] = df.BedroomAbvGr * .1 + df.FullBath *.25 + df.HalfBath * .5 + df.BsmtFullBath *.75 + df.BsmtHalfBath * 1
#df['new_variable'] = df.BedroomAbvGr * .1 + df.FullBath *.25 + df.HalfBath * .5 + df.BsmtFullBath *.75 + df.BsmtHalfBath * .1
df['new_variable'] = df.BedroomAbvGr.apply(lambda x: x if x > 0 else 1)
#print("new_variable calculated")
#display.display(df[df.new_variable.isna()])
if not revert:
df['sales_per_new'] = df['SalePrice']/ df.new_variable
#display.display(df[df.sales_per_new.isna()])
y = np.array(df['sales_per_new'].apply( lambda x: math.log(x)))
return y
df['sales_per_new'] = df['SalePrice']* df.new_variable
#print("sales_per_new calculated")
#display.display(df[df.sales_per_new.isna()])'''
#return np.log( df_temp.SalePrice / df_temp.GrLivArea)
return np.log(df_temp['SalePrice'])
#return (df_temp['SalePrice'])
def newBoxCoxTranformation(df_temp,target,testFile=False, log=False):
df = df_temp.copy()
#assuming that only numerical features are presented
if log:
print("Shape of the dataset initial : ", df.shape)
if not testFile:
df =df[df.SalePrice >0]
if log:
print("Shape of the dataset before transformation : ", df.shape)
#display.display(df[df['SalePrice'].isna()])
y = transformTarget(df)
X= df.drop([target],axis = 1)
#X=df.drop(target, axis=1)
x_columns = list(X)
#print(x_columns)
#X = preprocessing.RobustScaler().fit_transform(X)
#X = preprocessing.MinMaxScaler(feature_range=(1, 2)).fit_transform(X)
#X_testx = preprocessing.MinMaxScaler(feature_range=(1, 2)).fit_transform(X,y)
#X_testxx = preprocessing.MinMaxScaler(feature_range=(1, 2)).fit_transform(X,df.SalePrice)
#print(np.unique(X == X_testx))
#print(np.unique(X == X_testxx))
#print(np.unique(X_testx == X_testxx))
'''X = pd.DataFrame(X, columns=x_columns)
for c in list(X):
if True:
#if len(X[c].unique()) in [1,2]:
if log:
print("Skipping Transformation for ", c, "because unique values are :",X[c].unique())
else:
if log:
print("Boxcoxing : ", c)
X[c] = stats.boxcox(X[c])[0]'''
#X = preprocessing.MinMaxScaler(feature_range=(1, 2)).fit_transform(X)
X = preprocessing.StandardScaler().fit_transform(X)
#X = preprocessing.RobustScaler().fit_transform(X)
#X = X.values
if log:
print("Shape of the dataset after transformation : ", X.shape, y.shape)
return X,y
else:
df = df[df.SalePrice == 0.0]
if log:
print("Shape of the dataset before transformation : ", df.shape)
X=df.drop(target,axis = 1)
x_columns = list(X)
#print(x_columns)
#X = preprocessing.RobustScaler().fit_transform(X)
#X = preprocessing.MinMaxScaler(feature_range=(1, 2)).fit_transform(X)
'''X = pd.DataFrame(X, columns=x_columns)
for c in list(X):
if True:
#if len(X[c].unique()) in [1,2]:
if log:
print("Skipping Transformation for ", c, "because unique values are :",X[c].unique())
else:
if log:
print("Boxcoxing : ", c)
X[c] = stats.boxcox(X[c])[0]'''
#X = preprocessing.power_transform( X, method='box-cox')
#X = preprocessing.MinMaxScaler(feature_range=(1, 2)).fit_transform(X)
X = preprocessing.StandardScaler().fit_transform(X)
#X = preprocessing.RobustScaler().fit_transform(X)
#X = X.values
if log:
print("Shape of the dataset after transformation : ", X.shape)
return X
#df_fire_b = df_fire.copy()
#df_fire_b['SalePrice'] = df_fire_b['SalePrice'] / df_fire_b.GrLivArea
df_out_removed = removeOutlier(df_fire.copy())
df_prep = preProcessData(df_out_removed.copy())
#df_prep = preProcessData(df_fire.copy())
#df_prep['SalePrice'] = df_prep['SalePrice'] / df_prep['OverallQual']
df_prep['SalePrice'] = df_prep['SalePrice'] / df_prep['GrLivArea']
X = newBoxCoxTranformation(df_prep.copy(),'SalePrice',True,False)
X,y = newBoxCoxTranformation(df_prep.copy(),'SalePrice',False,False)
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.5, random_state=random.randint(1,500))
#reg = XGBRegressor(max_depth=4, n_estimators=200)
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.5, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=100,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.0, scale_pos_weight=1, seed=None,
silent=True, subsample=1) #0.13073788936978095'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.5,
colsample_bytree=0.75, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=800,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5) #.144'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.5,
colsample_bytree=0.75, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=5, min_child_weight=1, missing=None, n_estimators=100,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.35, reg_lambda=0.35, scale_pos_weight=1, seed=None,
silent=True, subsample=1)#0.13515'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.5, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=6, min_child_weight=1, missing=None, n_estimators=100,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.1, reg_lambda=0.6, scale_pos_weight=1, seed=None,
silent=True, subsample=1) #0.13495'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.75, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=5, min_child_weight=1, missing=None, n_estimators=100,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.1, reg_lambda=0.35, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5) #0.13486'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.5, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=300,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.25, reg_lambda=0.25, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=600,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.25, reg_lambda=0.25, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5) #0.13....'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.5, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.25, reg_lambda=0.25, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5) #0.131...'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5) #0.126'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.5,
colsample_bytree=0.75, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=4, min_child_weight=1, missing=None, n_estimators=300,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5) #0.13...'''
'''reg=XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.5, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.25, reg_lambda=0.75, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5) #0.129'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.75, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5) #0.127'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.5,
colsample_bytree=0.5, gamma=0, learning_rate=0.05, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)#126xx'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.75, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=600,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.25, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=600,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.25, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5) #damn 0.13...'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.5,
colsample_bytree=0.5, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5) #0.1622'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=600,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.5,
colsample_bytree=0.75, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=400,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.25, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)#0.14'''
'''reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=600,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)'''
reg = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.5,
colsample_bytree=0.5, gamma=0.05, learning_rate=0.05,
max_delta_step=0, max_depth=3, min_child_weight=1, missing=None,
n_estimators=3000, n_jobs=2, nthread=None, objective='reg:linear',
random_state=0, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
seed=None, silent=True, subsample=0.5)
'''reg=XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.25, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)# 0.126'''
reg.fit(X_train,y_train)
reg.score(X_test,y_test)
#np.sqrt(mean_squared_log_error((y_test), (reg.predict(X_test))))
np.sqrt(mean_squared_log_error(np.exp(y_test), np.exp(reg.predict(X_test))))
reg
def dummyCrossValidation(loop_count):
for i in range(loop_count):
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.5, random_state=random.randint(1,500))
#reg = XGBRegressor(max_depth=3, n_estimators=100, reg_alpha=.5, reg_lambda=.5)
reg.fit(X_train,y_train)
print("R2 Score: ", reg.score(X_test,y_test))
#print("RMSLE Score : ", np.sqrt(mean_squared_log_error((y_test), (reg.predict(X_test)))))
print("RMSLE Score : ", np.sqrt(mean_squared_log_error(np.exp(y_test), np.exp(reg.predict(X_test)))))
dummyCrossValidation(10)
reg.fit(X,y)
def checkTheTrainFile(reg,df_prep):
df_train = pd.read_csv(train_File)
#df_tra['SalePrice'] = 0.0
#df_train = pd.read_csv(train_File)
#df_concat = pd.concat([df_train,df_test])
#print(df_test[df_test.TotalBsmtSF.isna()])
#return
#df = giveMeWrangledData(df_concat,True)
#print(df.info())
#df = preProcessData(df)
#print(df.info())
#dt = df_prep.copy()
#df_prep = preProcessData(df_fire.copy())
#dt = dt[dt.SalePrice > 0]
df_prep1 = df_prep.copy()
df_prep1 = df_prep1[df_prep1['SalePrice'] > 0]
df_prep1['New_SalePrice'] = 0.0
df_prep = df_prep[df_prep['SalePrice'] > 0]
df_prep['SalePrice'] = 0
X = newBoxCoxTranformation(df_prep.copy(),'SalePrice',True)
#print(np.sqrt(mean_squared_log_error(y, reg.predict(X))))
df_prep1['New_SalePrice'] = list(np.exp(reg.predict(X)))
#df_prep1['New_SalePrice'] = reg.predict(X)
df_prep1['New_SalePrice'] = df_prep1['New_SalePrice'] * df_prep1.GrLivArea
df_prep1['SalePrice'] = df_prep1['SalePrice'] * df_prep1.GrLivArea
#df_prep1['New_SalePrice'] = df_prep1['New_SalePrice'] * df_prep1.OverallQual
#df_prep1['SalePrice'] = df_prep1['SalePrice'] * df_prep1.OverallQual
df_train_score = df_prep1[df_prep1.SalePrice > 0]
print(np.sqrt(mean_squared_log_error(df_train_score['SalePrice'], df_train_score['New_SalePrice'])))
#return df_test, X, reg.predict(X)
#df_test, X_dummy, y_dummy= checkTheTestFile(reg)
checkTheTrainFile(reg,df_prep.copy())
def checkTheTestFile(reg):
df_test = pd.read_csv(test_File)
df_test['SalePrice'] = 0.0
print(df_test.shape)
#df_train = pd.read_csv(train_File)
#df_concat = pd.concat([df_train,df_test])
#print(df_test[df_test.TotalBsmtSF.isna()])
#return
#df = giveMeWrangledData(df_concat,True)
#print(df.info())
#df = preProcessData(df)
#print(df.info())
X = newBoxCoxTranformation(df_prep.copy(),'SalePrice',True)
#print(np.sqrt(mean_squared_log_error(y, reg.predict(X))))
#df_test['SalePrice'] = (reg.predict(X))
df_test['SalePrice'] = np.exp(reg.predict(X))
df_test['SalePrice'] = df_test['SalePrice'] * df_test.GrLivArea
#df_test['SalePrice'] = df_test['SalePrice'] * df_test.OverallQual
return df_test, X, reg.predict(X)
df_test, X_dummy, y_dummy= checkTheTestFile(reg)
df_test[['Id','SalePrice']].to_csv('smubmission.csv',index=False)
df_test[['Id','SalePrice']]
def forCrossValidationStratifiedShuffleSplit(df):
sss = StratifiedShuffleSplit(n_splits=10, test_size=.5, random_state=1986)
#print("Number of Splits configured :", sss.get_n_splits(df, df.BldgType))
for train_index, test_index in sss.split(df, df.BldgType):
yield train_index, test_index
for train_index, test_index in sss.split(df, df.OverallQual):
yield train_index, test_index
df_train = pd.read_csv(train_File)
def crossValidationScoring(reg,X,y):
return -np.sqrt(mean_squared_log_error(np.exp(y),
np.exp(reg.predict(X))
))
#return np.sqrt(mean_squared_log_error(np.exp(y),
# np.exp(reg.predict(X
# ))
# ))
#return np.sqrt(mean_squared_log_error((y),
# (reg.predict(X))
# ))
mean_temp_rmsle = np.mean(cross_val_score(reg,X,y,cv= 5,scoring='neg_mean_squared_log_error'))
print("RMSE with without target variable transformation : ", np.sqrt(mean_temp_rmsle * -1))
mean_temp_rmsle = np.mean(cross_val_score(reg, X, y,
cv= 5,
scoring=crossValidationScoring))
print("RMSE with post target variable transformation : ", mean_temp_rmsle)
```
from sklearn.model_selection import GridSearchCV
def gridSearchCVImp():
start_time =datetime.datetime.now()
#reg = XGBRegressor(n_jobs =2, reg_alpha=.5, reg_lambda=.5, subsample=.5)
reg = XGBRegressor(n_jobs =2,subsample=.5, learning_rate=0.05)
parameters = {
'max_depth':list(range(3,4)),
'colsample_bylevel' : np.arange(0.25, 1.0, 0.25),
'n_estimators' : list(range(200,600,100)),
'colsample_bytree' : np.arange(0.25, 1.0, 0.25),
'reg_alpha': np.arange(0.0, 1.0, 0.25),
'reg_lambda': np.arange(0.25, 1.0, 0.25)
}
cv= ShuffleSplit(n_splits=5, test_size=.05, random_state=1986)
reg_grid = GridSearchCV(reg, parameters,
#cv=forCrossValidationStratifiedShuffleSplit(df_train),
cv=cv,
n_jobs = 2,
#scoring = 'neg_median_absolute_error',
#scoring = 'neg_mean_absolute_error',
#scoring = 'neg_mean_squared_log_error',
scoring = crossValidationScoring,
verbose=1,
#error_score ='raise'
error_score =5
#pre_dispatch = 2
)
reg_grid.fit(X,y)
print("Total time for the gridserach", datetime.datetime.now() - start_time)
return reg_grid
reg_grid = gridSearchCVImp()
print(reg_grid.best_estimator_)
print(reg_grid.best_score_)
print(np.sqrt(reg_grid.best_score_ * -1))
This statement is written just to keep the kaggle kernel up during the gridsearch.This again to keep the session live.
Conservative GridSearch : testsize : .5, scoring: r2
Total time for the gridserach 0:54:19.899125
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.5, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.25, reg_lambda=0.25, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
0.9153911325136409
Conservative GridSearch : testsize : .5, scoring: neg_msle
Total time for the gridserach 0:55:00.412282
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.5, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.25, reg_lambda=0.25, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-7.906906622834065e-05
Conservative GridSearch : testsize : .5, scoring: negative RMSLE
Total time for the gridserach 0:50:49.492892
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.5, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.25, reg_lambda=0.25, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.11425853874531103 ==> Kaggle Score gave 0.131.....
Optimistic GridSearch: testsize: .1 scoring: negative RMSLE
Total time for the gridserach 1:34:16.138500
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.1135887234582907
Optimistic GridSearch testsize : .1 , scoring: negative RMSLE, target transformation : per OverallQual
Total time for the gridserach 1:05:27.602182
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.5,
colsample_bytree=0.75, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=4, min_child_weight=1, missing=None, n_estimators=300,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.12313052538188561
Optimistic GridSearch testsize : .1 , scoring: negative RMSLE, target transformation : per GrLivArea
Total time for the gridserach 0:52:42.390855
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.5, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.25, reg_lambda=0.75, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.11300130870279539
Conservative Grid Search, testsize : .5, negative RMSLE, target transformation: per GrLivArea
Total time for the gridserach 0:30:47.246428
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.75, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.1140592041230081
Conservative Grid Search, testsize : .5, negative RMSLE, target transformation: per GrLivArea
With all variables to boxcox
Total time for the gridserach 0:18:57.023773
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.75, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.11473189632093529
Conservative Grid Search, testsize : .5, negative RMSLE, target transformation: per GrLivArea
With all variables to boxcox
With 300 - 900 estimator
Total time for the gridserach 0:20:53.198464
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.75, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.11473189632093529
Conservative Grid Search, testsize : .5, negative RMSLE, target transformation: per GrLivArea
With all variables to boxcox and with minmax scaling instead to standard scaling
Total time for the gridserach 0:12:38.560831
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=600,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.25, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.11470853780134761
Conservative Grid Search, testsize : .5, negative RMSLE, target transformation: per GrLivArea
With all variables to boxcox and with no scaling post box cox
Total time for the gridserach 0:09:23.388481
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=600,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.25, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.11470853780134761
Optimistic Grid Search, testsize : .05, negative RMSLE, target transformation: per GrLivArea
With all variables to boxcox and with no scaling post box cox
Total time for the gridserach 0:31:17.912847
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.5,
colsample_bytree=0.5, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.1032861306860222
Conservative Grid Search, testsize : .5, negative RMSLE, target transformation: per GrLivArea
With all variables to boxcox and with no scaling post box cox & 2 more ordinal variables converted to number.
Total time for the gridserach 0:16:43.250894
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=600,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.11436784398304499
Conservative Grid Search, testsize : .5, negative RMSLE, target transformation: per GrLivArea
With all variables to boxcox and with no scaling post box cox & 2 more ordinal variables converted to number.
and robust scaling before boxcox
Total time for the gridserach 0:12:00.972793
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=600,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.11436784398304499
How robust is robustscaler ?
Total time for the gridserach 0:12:36.182610
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.5,
colsample_bytree=0.75, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=400,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.25, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.12788816069618766
How much does the standard scaler help post boxcox
Total time for the gridserach 0:11:45.913461
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=600,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.11468293397638228
Robustscaler went transperant with standard scaler post box cox
Total time for the gridserach 0:11:52.590051
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=600,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.5, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.11468293397638228
Optimistic grid search for estimator between 500- 800 with standard scaler post boxcox
Total time for the gridserach 0:20:25.400043
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.25, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.1028989248977331 #0.12646
Optimistic grid search for estimator between 200- 600 with standard scaler post boxcox
Total time for the gridserach 0:16:03.879108
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.25, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.1028989248977331
1. OverallQual target transformation
2. GrLivArea target transformation
3. Less number of outliers removal
4. Imputing based on high co related variables.
5. boxcox for the dataset
6. min max post boxcox
Next Steps to follow are :
1. optimistic grid search with standard scaling post box cox - This would be with the anticipation of the best score which we got post first round of ordinal numerical variation. Expectation is that since standard scaling is done optimistic search i.e. testcase = 0.05 will be equivalent to testing with the full training set and the value should be equivalent to kaggle score.
With no boxcox but standard scaler:
Total time for the gridserach 0:16:46.997090
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.25, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.1028989248977331 #kaggle score : 0.12646
no box cox but with robust scaler
Total time for the gridserach 0:16:41.065970
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.25, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.10308985379081549 # 0.12759
no box cox but robust scaler conservative search
Total time for the gridserach 0:09:37.766766
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.25, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.11388498877762311 #0.13298
no box cox and min max
Total time for the gridserach 0:16:47.306173
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.75,
colsample_bytree=0.25, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=500,
n_jobs=2, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0.0, reg_lambda=0.25, scale_pos_weight=1, seed=None,
silent=True, subsample=0.5)
-0.1028989248977331
with learning rate 0.05
Total time for the gridserach 0:16:47.242496
XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=0.25,
colsample_bytree=0.75, gamma=0, learning_rate=0.05,
max_delta_step=0, max_depth=3, min_child_weight=1, missing=None,
n_estimators=500, n_jobs=2, nthread=None, objective='reg:linear',
random_state=0, reg_alpha=0.0, reg_lambda=0.75, scale_pos_weight=1,
seed=None, silent=True, subsample=0.5)
-0.10392091407197643
#1 https://www.kaggle.com/zenstat/simple-linear-regression-example SaleCondition condition mapping to ordinal values as 0/1 after comparing the relation with target variable.
https://www.kaggle.com/jatinmittal0001/housing-price-prediction - talks about the ensembling the elastic_net and lasso with 30:70 ratio.
Found an interesting kernel on easy implementation of stacking approach: https://www.kaggle.com/aiden98/house-prices-error-0-11433
Found an interesting article on model selection: http://blog.minitab.com/blog/how-to-choose-the-best-regression-model
| github_jupyter |
# Clonamos el repositorio para obtener los dataSet
```
!git clone https://github.com/joanby/ia-course.git
```
# Damos acceso a nuestro Drive
```
from google.colab import drive
drive.mount('/content/drive')
```
# Test it
```
!ls '/content/drive/My Drive'
```
#Google colab tools
```
from google.colab import files # Para manejar los archivos y, por ejemplo, exportar a su navegador
import glob # Para manejar los archivos y, por ejemplo, exportar a su navegador
from google.colab import drive # Montar tu Google drive
```
# Instalar dependencias de Renderizado, tarda alrededor de 45 segundos
```
!apt-get update > /dev/null 2>&1
!apt-get install python-opengl -y > /dev/null 2>&1
!apt install xvfb -y --fix-missing > /dev/null 2>&1
!apt-get install ffmpeg > /dev/null 2>&1
!apt-get install x11-utils > /dev/null 2>&1
!apt-get install pyglet > /dev/null 2>&1
```
# Instalar OpenAi Gym
```
!pip install gym pyvirtualdisplay > /dev/null 2>&1
!pip install piglet > /dev/null 2>&1
!pip install 'gym[box2d]' > /dev/null 2>&1
#por si quieres algun environment en concreto
#!pip install atari_py > /dev/null 2>&1
#!pip install gym[atari] > /dev/null 2>&1
```
# Todos los imports necesarios en google colab y helpers para poder visualizar OpenAi
```
import gym
from gym import logger as gymlogger
from gym.wrappers import Monitor
gymlogger.set_level(40) #error only
import numpy as np
import random
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
import math
import glob
import io
import base64
from IPython.display import HTML
from IPython import display as ipythondisplay
```
# Activamos una vista, seria como crear un plot de una grafica en python
```
from pyvirtualdisplay import Display
display = Display(visible=0, size=(1400, 900)) #Puedes modificar el high and width de la pantalla
display.start()
```
# Este código crea una pantalla virtual para dibujar imágenes del juego.
## Si se ejecuta localmente, simplemente ignóralo
```
import os
if type(os.environ.get('DISPLAY')) is not str or \
len(os.environ.get('DISPLAY')) == 0:
!bash ../xvfb start
%env DISPLAY=:1
```
# Funciones de utilidad para permitir la grabación de video del ambiente del gimnasio y su visualización
## Para habilitar la visualizacion por pantalla , tan solo haz "**environment = wrap_env(environment)**", por ejemplo: **environment = wrap_env(gym.make("MountainCar-v0"))**
```
import io
import glob
import base64
from IPython.display import HTML
from IPython import display as ipythondisplay
def show_video():
mp4list = glob.glob('video/*.mp4')
if len(mp4list) > 0:
mp4 = mp4list[0]
video = io.open(mp4, 'r+b').read()
encoded = base64.b64encode(video)
content = ipythondisplay.display(HTML(data='''
<video alt="test" autoplay loop controls style="height: 400px;">
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>
'''.format(encoded.decode('ascii'))))
else:
print("Couldn't find video")
def wrap_env(env):
env = gym.wrappers.Monitor(env, './video', force=True)
return env
```
# Nuestro Script
```
environment = wrap_env(gym.make("Qbert-v0"))
from gym.spaces import *
import sys
# Box -> R^n (x1,x2,x3,...,xn), xi [low, high]
#gym.spaces.Box(low = -10, high = 10, shape = (2,)) # (x,y), -10<x,y<10
# Discrete -> Números enteros entre 0 y n-1, {0,1,2,3,...,n-1}
#gym.spaces.Discrete(5) # {0,1,2,3,4}
#Dict -> Diccionario de espacios más complejos
#gym.spaces.Dict({
# "position": gym.spaces.Discrete(3), #{0,1,2}
# "velocity": gym.spaces.Discrete(2) #{0,1}
# })
# Multi Binary -> {T,F}^n (x1,x2,x3,...xn), xi {T,F}
# gym.spaces.MultiBinary(3)# (x,y,z), x,y,z = T|F
# Multi Discreto -> {a,a+1,a+2..., b}^m
#gym.spaces.MultiDiscrete([-10,10],[0,1])
# Tuple -> Producto de espacios simples
#gym.spaces.Tuple((gym.spaces.Discrete(3), gym.spaces.Discrete(2)))#{0,1,2}x{0,1}
# prng -> Random Seed
def print_spaces(space):
print(space)
if isinstance(space, Box):#Comprueba si el space subministrado es de tipo Box
print("\n Cota inferior: ", space.low)
print("\n Cota superior: ", space.high)
```
# main
```
sys.argv=['self.py','CartPole-v0'] #Aqui Cambia el nombre para ver el environment
if __name__ == "__main__":
environment = gym.make(sys.argv[1]) ## El usuario debe llamar al script con el nombre del entorno como parámetro
print("Espacio de estados:")
print_spaces(environment.observation_space)
print("Espacio de acciones: ")
print_spaces(environment.action_space)
try:
print("Descripción de las acciones: ", environment.unwrapped.get_action_meanings())
except AttributeError:
pass
```
| github_jupyter |
<a href="https://colab.research.google.com/github/revendrat/FinancialMathematics/blob/main/Py_Finance_01.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#install yahoo finance
%pip install yfinance
# import packages & modules
import pandas as pd
import numpy as np
import yfinance as yf
from pylab import mpl, plt
# suppress scientific notation
pd.options.display.float_format = '{:.5f}'.format
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
%matplotlib inline
# Analyse Infosys stock
#define the ticker symbol
tickerSymbol = 'INFY.NS'
#get data on this ticker
tickerData = yf.Ticker(tickerSymbol)
#get the historical prices for this ticker
infy = tickerData.history(period='1d', start='2017-1-1', end='2022-4-3')
#see your data
infy.info()
# verify the top 5 records
infy.head()
# verify the bottom 5 records
infy.tail()
#pandas.Dataframe.shift(# lags)
#Using shift(1), we can get the row just above the present row. Here, # lags is 1.
#log() is a function given in numpy package in python. It calculates the natural log of the value given inside it.
infy['LogReturn'] = np.log(infy['Close']/infy['Close'].shift(1))
#print() function prints the value inside it on the console.
infy['LogReturn'].head()
infy['LogReturn'].head()
sma_180=180
infy['sma_180'] = infy['LogReturn'].rolling(sma_180).sum()
# plot the sma_180 against time
infy['sma_180'].plot(figsize=(10,6))
infy.head()
infy.tail()
infy.iloc[1]
# Verify random values of LogReturn and sma_180
infy[['LogReturn', 'sma_180']].iloc[175:185]
# Build the strategy
# Create the 'position' column that has strategy based on sum of last 180 days log returns with following criteria
# sma_180 >0 is Buy, < 0 is Sell & Na is No Position for trade
infy['position'] = np.where(infy['sma_180'].isna(), 'No Position', np.where(infy['sma_180'] > 0, 'Buy', 'Sell'))
# likewise create a column 'position indicator' that stores numeric values of strategy as mentioned below
# 1 is buy, -1 is sell & 0 is no position indicator
infy['position_indicator'] = np.where(infy['sma_180'].isna(), 0, np.where(infy['sma_180'] > 0, 1, -1))
# Verify random values of LogReturn, sma_180, position and position_indicator
infy[['LogReturn', 'sma_180', 'position', 'position_indicator']].iloc[175:185]
infy['position'] = np.where(infy['sma_180'] >0, 'Buy', 'Sell')
infy[['LogReturn', 'sma_180', 'position']].iloc[175:185]
infy.head()
infy.tail()
infy['position_indicator'] = np.where(infy['sma_180'] >0, 1, -1)
infy[['LogReturn', 'sma_180', 'position', 'position_indicator']].iloc[175:185]
# rebalance every month
# send the data frame and date to rebalance
# if the position_indicator is negative (sum of 180 days log returns is negative) then change the strategy to sell.
# Otherwise, remain with past strategy
# Record the past and current strategy and return details on a rebalanced_strategy dataframeac
def rebalance(data, date):
rebalanced_strategy = pd.DataFrame({'review_date':[], 'previous_position':[], 'rebalanced_position':[]})
#df = pd.DataFrame(data)
date = pd.to_datetime(date)
temp_vals = data.loc[date]
temp_rebalance = 'Buy'
if (temp_vals['position_indicator'] < 1):
temp_rebalance = 'Sell'
print(" temp_rebalance ", temp_rebalance)
temp_df = pd.DataFrame({'review_date':[date], 'previous_position':[temp_vals['position']], 'rebalanced_position':[temp_rebalance]})
rebalanced_strategy = rebalanced_strategy.append(temp_df)
print(" rebalanced_strategy ", rebalanced_strategy)
# test the rebalance strategy
rebalance(infy[['position', 'position_indicator']], '2017-09-27')
rebalance(infy[['position', 'position_indicator']], '2022-03-28')
# Comparing the results with buy & hold strategy of Infosys NSE
# Multiplies the position_indicator values with previous da log returns of Infosys NSE stock to avoid foresight bias
# Foresight bias implies that trade is placed given today's data, and returns are made on tomorrow
infy['strategy_sma180'] = infy['position_indicator'].shift(1)*infy['LogReturn']
# round the decimal to fourth place and verify the first 5 values
infy.round(4).head()
infy.round(4).head()
# Fetch the returns of buy & hold strategy and 180 days sum of log returns strategy
infy[['LogReturn', 'strategy_sma180']].sum()
# Use anti-log(e) to get the returns value
np.exp(infy[['LogReturn', 'strategy_sma180']].sum())
# Optimising the look back period with multiple iterations of past values of log returns
# import product module from itertools
from itertools import product
# select the random values for finding optimal values
sma1 = range(20, 61, 4)
sma2 = range(180, 201, 10)
# create results data frame
results = pd.DataFrame()
# write a for loop for different values of look back periods
for SMA1, SMA2 in product(sma1, sma2):
# create a temporary data frame
data_opt = pd.DataFrame()
# Drop all Nan & NaT values
data_opt.dropna(inplace=True)
# pass log returns of Infosys NSE to data_opt dataframe
data_opt['Returns'] = infy['LogReturn']
# Create log returns of Infosys NSE to data_opt dataframe
data_opt['SMA1'] = data_opt['Returns'].rolling(SMA1).sum()
data_opt['SMA2'] = data_opt['Returns'].rolling(SMA2).sum()
# Drop all Nan & NaT values
data_opt.dropna(inplace=True)
# if sma1 look back period's sum of log returns is greater than sma2 look back period's sum of log returns, then buy, otherwise sell
data_opt['Position'] = np.where(data_opt['SMA1'] > data_opt['SMA2'], 1, -1)
# Drop all Nan & NaT values
data_opt.dropna(inplace=True)
# calculate the returns of strategy based sma1 greater than sma2 (foresight bias method)
data_opt['Strategy'] = data_opt['Position'].shift(1) * data_opt['Returns']
# Drop all Nan & NaT values
data_opt.dropna(inplace=True)
# calculate the performance of strategy
perf = np.exp(data_opt[['Returns', 'Strategy']].sum())
# create a dataframe with values of sma1, sma2 (look back periods, returns and strategy values)
results = results.append(pd.DataFrame(
{'SMA1': SMA1, 'SMA2': SMA2,
'MARKET': perf['Returns'],
'STRATEGY': perf['Strategy'],
'OUT': perf['Strategy'] - perf['Returns']},
index=[0]), ignore_index=True)
# Optimising the look back period with multiple iterations of past values of log returns
# import product module from itertools
from itertools import product
# select the random values for finding optimal values
sma1 = range(20, 61, 4)
#sma2 = range(180, 201, 10)
# create results data frame
results = pd.DataFrame()
# write a for loop for different values of look back periods
for SMA1, SMA2 in product(sma1, sma2):
# create a temporary data frame
data_opt = pd.DataFrame()
# Drop all Nan & NaT values
data_opt.dropna(inplace=True)
# pass log returns of Infosys NSE to data_opt dataframe
data_opt['Returns'] = infy['LogReturn']
# Create log returns of Infosys NSE to data_opt dataframe
data_opt['SMA1'] = data_opt['Returns'].rolling(SMA1).sum()
#data_opt['SMA2'] = data_opt['Returns'].rolling(SMA2).sum()
# Drop all Nan & NaT values
data_opt.dropna(inplace=True)
# if sma1 look back period's sum of log returns is greater than 0 then buy, otherwise sell
data_opt['Position'] = np.where(data_opt['SMA1'] >0, 1, -1)
# Drop all Nan & NaT values
data_opt.dropna(inplace=True)
# calculate the returns of strategy based sma1 greater than sma2 (foresight bias method)
data_opt['Strategy'] = data_opt['Position'].shift(1) * data_opt['Returns']
# Drop all Nan & NaT values
data_opt.dropna(inplace=True)
# calculate the performance of strategy
perf = np.exp(data_opt[['Returns', 'Strategy']].sum())
# create a dataframe with values of sma1, sma2 (look back periods, returns and strategy values)
results = results.append(pd.DataFrame(
{'SMA1': SMA1,
'MARKET': perf['Returns'],
'STRATEGY': perf['Strategy'],
'OUT': perf['Strategy'] - perf['Returns']},
index=[0]), ignore_index=True)
results.info()
results.sort_values('OUT', ascending=False).head(7)
results.head()
results.shape
results
data_opt.head()
data_opt.tail()
data_opt['Position'].shift(1) * data_opt['Returns']
results.sort_values('OUT', ascending=True).head(7)
```
| github_jupyter |
## Objectives
- Identify noun phrase chunks using POS tags
- Extract information from noun phrases in the Penn Treebank
## Dowloading Lexicons
```
import nltk
nltk.download("punkt")
nltk.download("treebank")
nltk.download("averaged_perceptron_tagger")
nltk.download("wordnet")
```
Code to access relevant modules (you can add to this as needed):
```
from nltk.corpus import treebank
from nltk import word_tokenize, pos_tag, RegexpParser
from nltk.tree import Tree
from nltk.chunk.util import ChunkScore
from nltk.stem import WordNetLemmatizer
```
### Simple NP chunking
We start by building a basic NP chunker. A simple approach to the task of NP chunking is to assume that a sequence of words is an NP if
* it contains only determiners, nouns, pronouns, and adjectives,
* and it contains at least one noun or pronoun.
The first letters of relevant POS tags are provided for you in the sets `NP_POS` and `NP_HEAD_POS`.
We write a function which takes a raw sentence (a string) and
1. tokenizes and POS tags it using NLTK,
1. finds all contiguous sequences of words that fit the above description, and returns them.
```
NP_POS = {"DT", "NN", "JJ", "PR"} # these are the first two letters of the POS that you should consider potential parts of NP chunks
NP_HEAD_POS = {"NN", "PR"} # each chunk must have at least one of these
def get_chunks(sentence):
'''Extracts noun phrases from a sentence corresponding to the part-of-speech tags in optional_POS,
requiring at least one of the POS tags in required_POS. Returns the chunks as a list of strings'''
# your code here
chunks=[]
list_1=[]
tags= set()
tagged_sent = pos_tag(word_tokenize(sentence))
for word,tag in tagged_sent:
if tag[:2] in NP_POS:
list_1.append(word)
tags.add(tag[:2])
else:
if len(tags & NP_HEAD_POS)>0:
chunks.append(" ".join(list_1))
list_1= []
tags= set()
if len(tags & NP_HEAD_POS)>0:
chunks.append(" ".join(list_1))
return chunks
```
Here are a few examples which show you the input format for `get_chunks` and the intended output format.
```
assert(sorted(get_chunks("the quick brown fox jumped over the lazy dog"))) == sorted(["the quick brown fox", "the lazy dog"])
assert(get_chunks("life is good")) == ["life"]
assert(get_chunks("life is good and chickens are tasty")) == ["life","chickens"]
print("Success!")
```
### Regex chunking
Created three different NLTK regex noun chunkers using the `RegexpParser` class:
1. `simple_chunk` which exactly duplicates the logic from above.
```
tagged_sent = pos_tag(word_tokenize("I gave John my old Globe and Mail"))
simple_chunk = RegexpParser("NP: {(<DT.*|NN.*|JJ.*|PR.*>*)(<NN.*|PR.*>)(<DT.*|NN.*|JJ.*|PR.*>*)}")
print(simple_chunk.parse(tagged_sent))
```
2. `ordered_chunk` which captures the standard English NP word order, defined by the following properties:
* The syntactic head of an NP is either a personal pronoun, common noun or proper noun. Every NP has to contain at least one of these. Note that there can be more.
* If the head is a noun, it can be preceded by a determiner (also called an article) as in _the dog_ or a possessive pronoun as in _my dogs_.
* If the head is a noun, it can be preceded by one or more adjectives as in _beautiful weather_.
* If a determiner or possessive pronoun occurs, it has to be the first token of the NP.
* If the syntactic head is a noun, it can be preceded by an adjective as in _the grey dog_ and _grey dogs_.
```
tagged_sent = pos_tag(word_tokenize("I gave John my old Globe and Mail"))
ordered_chunk = RegexpParser("NP: {(<DT.*|PRP\$>?<JJ.*>*<NN.*>+)|(<PRP>+)}")
print(ordered_chunk.parse(tagged_sent))
```
3. `conj_chunk` which allows for coordination of two NPs matching `ordered_chunk` using a coordinate conjunction `CC`. Note that often there is only one determiner in a coordinated NP as in "the Globe and Mail", however, "the Globe and the Mail" is also grammatical.
```
tagged_sent = pos_tag(word_tokenize("I gave John my old Globe and MAIL and mail"))
conj_chunk = RegexpParser("NP: {((<DT.*|PRP\$>?<JJ.*>*<NN.*>+)|(<PRP>+))(<CC>((<DT.*|PRP\$>?<JJ.*>*<NN.*>+)|(<PRP>+)))*}")
print(conj_chunk.parse(tagged_sent))
sent = "I gave John my old Globe and Mail"
assert (str(simple_chunk.parse(pos_tag(word_tokenize(sent)))) == str(Tree.fromstring("(S (NP I/PRP) gave/VBD (NP John/NNP my/PRP$ old/JJ Globe/NNP) and/CC (NP Mail/NNP))")))
assert (str(ordered_chunk.parse(pos_tag(word_tokenize(sent)))) == str(Tree.fromstring("(S (NP I/PRP) gave/VBD (NP John/NNP) (NP my/PRP$ old/JJ Globe/NNP) and/CC (NP Mail/NNP))")))
assert (str(conj_chunk.parse(pos_tag(word_tokenize(sent)))) == str(Tree.fromstring("(S (NP I/PRP) gave/VBD (NP John/NNP) (NP my/PRP$ old/JJ Globe/NNP and/CC Mail/NNP))")))
print("Success!")
```
### Chunking evaluation and improvement
Evaluation our regular expression chunkers by comparing their output to gold standard chunks extrated from the Penn Treebank.
First, we will create a new test set for our chunkers by pulling out noun phrases from the Penn Treebank. We start by creating a function `convert_to_chunk` which converts standard syntactic trees into shallow chunk trees, where all phrases except `NP` have been flattened.
Your `convert_to_chunk` function should take a list of syntax trees as input and return a list of chunk trees as output. Here is an example of a syntax tree and the corresponding chunk tree:
```
def is_wanted_NP(tree):
'''returns False if the NLTK tree of a NP has either other NPs or traces ("*") within it'''
if tree.label() != "NP":
return False
subtrees = list(tree.subtrees())[1:]
if any([subtree.label().startswith("NP") for subtree in subtrees]):
return False
# your code here
if "*" in tree.leaves():
return False
return True
def convert_to_chunk_(tree,chunks):
'''Recursively finds any shallow NPs in the tree, converting the parse into the NLTK chunk format.
The list of chunks is returned'''
# your code here
for inner_tree in tree:
if is_wanted_NP(inner_tree):
chunks.append(Tree(inner_tree.label(), inner_tree.pos()))
elif inner_tree.height()==2:
chunks.append(inner_tree.pos()[0])
else:
chunks = (convert_to_chunk_(inner_tree, chunks))
return chunks
tree = Tree.fromstring("(S (NP (DT the) (NN dog)) (VP (VBD saw) (NP (DT the) (NN cat))))")
assert(convert_to_chunk_(tree,[]) == [Tree('NP', [('the', 'DT'), ('dog', 'NN')]), ('saw', 'VBD'), Tree('NP', [('the', 'DT'),('cat','NN')])])
def convert_to_chunk(tree):
return Tree("S",convert_to_chunk_(tree,[]))
treebank_test = []
for parsed_sent in treebank.parsed_sents():
treebank_test.append(convert_to_chunk(parsed_sent))
```
Now, we evaluate the three regex chunkers from Exercise 2 using the built-in NLTK chunk evaluation system.
```
print("simple chunk")
print(simple_chunk.evaluate(treebank_test))
print("ordered chunk")
print(ordered_chunk.evaluate(treebank_test))
print("conj chunk")
print(conj_chunk.evaluate(treebank_test))
```
#### looking for errors
```
dev_set = treebank.tagged_sents()[:50]
test_set = treebank.tagged_sents()[50:]
for tagged, gold_tree in zip(dev_set,treebank_test):
sys_tree = ordered_chunk.parse(tagged)
print("SYS:",sys_tree)
print("GOLD:",gold_tree)
```
```
from nltk.corpus import brown
chunk = RegexpParser("NP: {((<DT|PRP\$>?<JJ.*>*<CD.*|NN.*>+)|(<PRP>))(<CC>((<DT|PRP\$>?<JJ.*>*<CD.*|NN.*>+)|(<PRP>)))*}")
test_set = []
for parsed_sent in treebank.parsed_sents()[50:]:
test_set.append(convert_to_chunk(parsed_sent))
print(chunk.evaluate(test_set))
```
### Identifying predicates and objects
We now build a function which extracts predicate-object pairs from syntax trees. For example, for the sentence _I bought the toys_ , your function should identify that the predicate of the sentence is _bought_ and its object is _toys_ , the function should then return the pair `("buy", "toy")`.
First, we write a recursive function `get_head` which takes two arguments: `phrase` and `phrase_type` as input. The `phrase` argument is an NLTK tree representing either an NP or a VP, and `phrase_type` is either `"N"` or `"V"` for NPs and VPs, respectively. The function should return the **lemmatized** syntactic head of `phrase`. For example, given the following NLTK syntax tree as input
```
(NP
(DT the)
(JJ grey)
(NN dogs)
)
```
The function should return `dog`.
We assume that the head is either the right-most token with the appropriate POS `V.*` or `N.*`, or the syntactic head of the right-most child phrase having type `NP.*` or `VP.*` depending on `phrase_type`. This means that we may need to call `get_head` recursively. For example, for
```
(NP
(DT the)
(JJ second)
(NN incentive)
(NN plan)
)
```
should return `"plan"` which is the right-most noun. As another example, consider
```
(NP
(DT the)
(JJ blue)
(NN bird)
(CC and)
(NP
(DT the)
(JJ yellow)
(NN butterfly)
)
)
```
Here we return "`butterfly`" which is the head of the right-most child NP.
```
# lemmatizer.lemmatize(word,pos) returns the lemma for word.
# pos should be 'n' for nouns and 'v' for verbs.
lemmatizer = WordNetLemmatizer()
head_list = []
def get_head(phrase, phrase_type):
'''returns the lemmatized lexical head assuming the provided phrase_type ("N","V",etc.)'''
head = None
# your code here
get_head_(phrase, head, phrase_type)
if len(head_list) == 0:
return head
return lemmatizer.lemmatize(head_list[-1],phrase_type.lower())
def get_head_(phrase, head, phrase_type):
'''returns the lemmatized lexical head assuming the provided phrase_type ("N","V",etc.)'''
if phrase.label()[:1] == phrase_type and phrase.height() == 2:
head = phrase.pos()[0][0]
head_list.append(phrase.pos()[0][0])
elif phrase.label()[:1] == phrase_type and phrase.height() > 2:
for sub in phrase:
get_head_(sub, head, phrase_type)
assert (get_head(Tree.fromstring("(NP (DT the) (JJ second) (NN incentive) (NN plan))"), "N") == "plan")
assert (get_head(Tree.fromstring("(NP-SUBJ (NP (DT the) (NNS policies)) (PP (IN of) (NP (NN tomorrow))))"), "N") == "policy")
assert (get_head(Tree.fromstring("(VP (VBN offered) (NP (NNS advertisers)))"),"V") == "offer")
print("Success!")
```
| github_jupyter |
# Importing Required Libraries
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn import metrics
%matplotlib inline
sns.set_style("darkgrid")
sns.set(style="ticks", color_codes=True)
data = pd.read_csv("input_bcell.csv")
data.head()
```
**By checking the value we can see that our class is Unbalanced.**
```
data["target"].value_counts()
data.describe()
columns = []
for cols in data.columns:
if type(data[cols][0]) is not str:
columns.append(cols)
```
**Including features only with integer or float values.**
```
data = data[columns]
data.shape
```
# Standardising the values
**Transforming the features to a scaled version.**
```
from sklearn.preprocessing import StandardScaler
scaled_data = StandardScaler()
scaled_data = scaled_data.fit(data)
scaled_data = scaled_data.transform(data)
scaled_data.shape
```
**Check if any of the features have null values.**
```
sns.heatmap(data.isnull(), cmap="viridis", yticklabels=False, cbar = False, square = False)
```
# Using Logistic Regression
```
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
X = data[data.columns[:-1]]
y = data[data.columns[-1]]
X_train,X_test, y_train, y_test = train_test_split(X, y, random_state = 42, test_size = 0.33,shuffle = True)
model.fit(X_train, y_train)
predictions = model.predict(X_test)
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, predictions))
print("\n")
print(classification_report(y_test, predictions))
```
## Principal Component Analysis
```
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(scaled_data)
x_pca = pca.transform(scaled_data)
x_pca.shape
def plot_pca(y_sklearn, y, title, x_label='Principal Component 1', y_label = 'Principal Component 2'):
with plt.style.context('seaborn-whitegrid'):
plt.figure(figsize=(9, 4))
for lab, col in zip((0,1), ('blue', 'red')):
plt.scatter(y_sklearn[y==lab, 0],
y_sklearn[y==lab, 1],
label=lab,
c=col)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend(loc='lower center')
plt.tight_layout()
plt.title(title)
plt.show()
plot_pca(x_pca, y,"Entire Data")
pca.components_
new_data = pd.DataFrame(pca.components_, columns = data.columns)
new_data.shape
sns.set_style("darkgrid")
plt.figure(figsize=(12,6))
sns.heatmap(new_data,cmap='plasma',)
```
# Using K Nearest Neighbours
```
from sklearn.neighbors import KNeighborsClassifier
error_rate = []
for i in range(1,50):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train,y_train)
knn_predictions = knn.predict(X_test)
error_rate.append(np.mean(knn_predictions != y_test))
sns.set_style("darkgrid")
plt.plot(range(1,50),error_rate,color='blue',linestyle='dashed', marker='o',
markerfacecolor='red', markersize=5, )
knn = KNeighborsClassifier(n_neighbors=2)
knn.fit(X_train,y_train)
knn_predictions = knn.predict(X_test)
print(confusion_matrix(y_test, knn_predictions))
print("\n")
print(classification_report(y_test, knn_predictions))
```
# Using Random Forest Classifier
```
from sklearn.ensemble import RandomForestClassifier
random_model = RandomForestClassifier(n_estimators = 600)
random_model.fit(X_train, y_train)
random_predictions = random_model.predict(X_test)
print(confusion_matrix(y_test, random_predictions))
print("\n")
print(classification_report(y_test, random_predictions))
```
# Using Support Vector Machine
```
from sklearn.svm import SVC
svc_model = SVC(C = 1000 , gamma = 0.001)
param_grid = {"C":[0.1,1,10,100,1000], "gamma":[10,1,0.1,0.001,0.0001]}
from sklearn.model_selection import GridSearchCV
grid_search_model = GridSearchCV(SVC(),param_grid,refit=True,verbose=2)
grid_search_model.fit(X_train,y_train)
grid_search_model.best_estimator_
svc_model.fit(X_train, y_train)
svc_predictions = svc_model.predict(X_test)
```
# Comapring the accuracy of all the above 4 models
```
print("Logistic Regression : \n", confusion_matrix(y_test, predictions))
print("Support Vector Machine : \n",confusion_matrix(y_test, svc_predictions))
print("Random Forest : \n",confusion_matrix(y_test, random_predictions))
print("K Nearest Neigbiour : \n",confusion_matrix(y_test, knn_predictions))
print("\n")
print("Logistic Regression : \n", classification_report(y_test, predictions))
print("Support Vector Machine : \n",classification_report(y_test, svc_predictions))
print("Random Forest : \n",classification_report(y_test, random_predictions))
print("K Nearest Neigbiour : \n",classification_report(y_test, knn_predictions))
```
| github_jupyter |
```
import pandas as pd
import mysql.connector
from IPython.display import display, Markdown
db = mysql.connector.connect(
host="localhost",
user="root",
password="secret",
port="3306",
database="serlo"
)
def read_event_log():
df = pd.read_sql("""
select event_log.id, event_log.date, event_log.actor_id,
user.username, event_log.event_id, event.name
from event_log
join event on event.id = event_log.event_id
join user on user.id = event_log.actor_id
order by event_log.id
""", db)
df.set_index("id", inplace=True)
return df
event_log = read_event_log()
event_log.head()
from IPython.display import display
import matplotlib.pyplot as plt
def becomes_active(x, delta):
diff = x-x.min()
if diff[diff < delta].count() > 10:
return 1
else:
return 0
d = event_log.groupby("actor_id").aggregate({
"date": ["first", lambda x: becomes_active(x, pd.Timedelta("90days")), lambda x: becomes_active(x, pd.Timedelta("3days"))],
"username": "first",
})
d.columns = d.columns.to_flat_index()
d.rename(columns={
("date", "first"): "date",
("date", "<lambda_0>"): "becomes_active_90days",
("date", "<lambda_1>"): "becomes_active_2days"
}, inplace=True)
d.sort_values("date", inplace=True)
d = d[["date", "becomes_active_90days", "becomes_active_2days"]]
d.set_index("date", inplace=True)
d = d.rolling("365d").mean()
display(d.head())
d.plot(figsize=(20,8), ylim=(0,0.4))
plt.show()
d["diff"] = d["becomes_active_90days"] - d["becomes_active_2days"]
d["diff"].plot(figsize=(20,8), ylim=(0,0.2))
plt.show()
from IPython.display import display
import matplotlib.pyplot as plt
def becomes_active(x, delta):
diff = x-x.min()
if diff[diff < delta].count() > 10:
return 1
else:
return 0
d = event_log.groupby("actor_id").aggregate({
"date": ["first", lambda x: becomes_active(x, pd.Timedelta("90days")), lambda x: becomes_active(x, pd.Timedelta("1days"))],
"username": "first",
})
d.columns = d.columns.to_flat_index()
d.rename(columns={
("date", "first"): "date",
("date", "<lambda_0>"): "becomes_active_90days",
("date", "<lambda_1>"): "becomes_active_2days"
}, inplace=True)
d.sort_values("date", inplace=True)
d = d[["date", "becomes_active_90days", "becomes_active_2days"]]
d.set_index("date", inplace=True)
d = d.rolling("90d").sum()
display(d.head())
d.plot(figsize=(20,8))
plt.show()
d["diff"] = d["becomes_active_90days"] - d["becomes_active_2days"]
d["diff"].plot(figsize=(20,8))
plt.show()
pd.set_option('display.max_rows', None)
df = event_log[event_log["event_id"] == 5].groupby("actor_id").aggregate({
"username": "first",
"event_id": "count"
})
df.rename(columns = { "event_id": "edits" }, inplace=True)
df.sort_values("edits", inplace=True, ascending=False)
df = df.head(136)
df.to_csv("/tmp/most_active_user.csv")
df
def read_event_log_all():
df = pd.read_sql("""
select event_log.id, event.name, event_log.actor_id, user.username,
event_log.date, event_log.event_id, event_parameter_uuid.uuid_id
from event_log
join event on event.id = event_log.event_id
join user on user.id = event_log.actor_id
join event_parameter on event_parameter.log_id = event_log.id
join event_parameter_uuid on event_parameter_uuid.event_parameter_id = event_parameter.id
where event_parameter.name_id != 8
""", db)
df.set_index("id", inplace=True)
return df
read_event_log_all().to_csv("/tmp/event_log.csv")
df2 = event_log[event_log["event_id"] == 5].copy()
df2["year"] = df2["date"].map(lambda x: x.year)
df2 = df2[df2["year"] <= 2019].groupby("actor_id").aggregate({
"username": "first",
"event_id": "count"
})
df2.rename(columns = { "event_id": "edits" }, inplace=True)
df2.sort_values("edits", inplace=True, ascending=False)
df2 = df2.head(136)
df2[~df2["username"].isin(df["username"])].to_csv("/tmp/2019.csv")
p = event_log.copy()
p = p[(p["username"] != "Legacy") & (p["event_id"] == 5) & (p["username"] == "kathongi")].copy()
p.set_index("date", inplace=True)
p = p.resample("1d").aggregate({"actor_id": "count" })
p.rename(columns={"actor_id": "edits"}, inplace=True)
f = p.tail(100)
#display(f[f["edits"] <10])
m = int(f.max())
f.plot.hist(bins=m, figsize=(20,8))
f.describe()
```
| github_jupyter |
# Inferential Statistics III - Bayesian
## Introduction
In the last two subunits, you've encountered two schools for performing inference from samples. The Frequentist school calls upon a body of theory established over the past couple of centuries or so. Under certain assumptions and conditions, this allows us to calculate what we would expect to see if an experiment were to be repeated again and again and again. The expected value of the average of a sample is one such statistic we can calculate a result for, even if the originating distribution is far from normal. The bootstrap school, on the other hand, literally does (virtually) run that experiment again and again and again and empirically observes the multitude of outcomes. It then empirically calculates a statistic of interest. While this can be for exactly the same statistics that frequentism calculates (e.g. the mean of a sample) this empirical approach can also perform inference on statistics that do not have well known sampling distributions. Because of the requirement to repeat many, many redraws (with replacement) from the sample, this approach only became feasible with modern computing power.
And thus we come to the Bayesian school of inference. Here we frame our probabilities not so much in terms of "how many times would I expect this event to occur if the experiment were to be rerun many times" but rather in terms of "what is my belief in the likelihood of this event occurring?" In a Bayesian probabilistic programming context, we can build models for systems and then let the data tell us how likely certain values for our model parameters are. This can be a very useful way to incorporate prior knowledge and deal with limited data. It can just be more than a _little_ fiddly to produce a good model!
## Medical charge data set
For the final mini-project of the stats unit, you'll once again return tot he medical charge data you've used for the other mini-projects. Previously, we considered whether we believed that the actual average(non-insured) charge had fallen below a certain threshold.
The hospital is now reviewing its financial resiliency plan, which requires a model for revenue under a range of conditions that include the number of patients treated. Its current model is based on a confidence interval for the mean, and scaling that by different numbers of patients for each scenario. This approach has a number of limitations, most acutely the breakdown of the central limit theorem for low patient volumes; the current model does not do a good job of reflecting the variability in revenue you would see as the number of cases drops. A bootstrap approach would return samples of the same size as the original. Taking subsamples would restrict the sampling to the values already present in the original sample and would not do a good job of representing the actual variability you might see. What is needed is a better model of individual charges.
So the problem here is that we want to model the distribution of individual charges and _we also really want to be able to capture our uncertainty about that distribution_ so we can better capture the range of values we might see. This naturally leads us to a powerful, probabilistic approach — we'll use the pymc3 library to perform Bayesian inference.
### Loading the data and performing an initial view
```
import pandas as pd
import numpy as np
import pymc3 as pm
import pymc3 as pm
import pandas as pd
import numpy as np
from numpy.random import seed
import matplotlib.pyplot as plt
from scipy.stats import gamma
# there has been some incompatibilty between theano and numpy, if you encounter
# an error with the latest packages from anaconda, then the included
# package-list-txt should allow you to create a conda environment with compatible
# packages.
medical = pd.read_csv('insurance2.csv')
medical.head()
insurance = medical.charges[medical.insuranceclaim == 1]
no_insurance = medical.charges[medical.insuranceclaim == 0]
n_ins = len(insurance)
n_no_ins = len(no_insurance)
_ = plt.hist(insurance, bins=30, alpha=0.5, label='insurance claim')
_ = plt.hist(no_insurance, bins=30, alpha=0.5, label='not insurance claim')
_ = plt.xlabel('Charge amount')
_ = plt.ylabel('Frequency')
_ = plt.legend()
```
We may suspect from the above that there is some sort of exponential-like distribution at play here. The charges that were not insurance claims seem most like this. The insurance claim charges may possibly be multimodal. The gamma distribution may be applicable and we could test this for the distribution of charges that weren't insurance claims first. Developing our new method for the easiest looking case first is a common and sound approach that can demonstrate a minimum viable solution/product and get, or keep, stakeholders on board.
### Initial parameter estimation
An initial guess for the gamma distribution's $\alpha$ and $\beta$ parameters can be made as described [here](https://wiki.analytica.com/index.php?title=Gamma_distribution).
```
alpha_est = np.mean(no_insurance)**2 / np.var(no_insurance)
beta_est = np.var(no_insurance) / np.mean(no_insurance)
alpha_est, beta_est
```
### Initial simulation
Let's draw the same number of random variates from this distribution and compare to our observed data.
```
seed(47)
no_ins_model_rvs = gamma(alpha_est, scale=beta_est).rvs(n_no_ins)
_ = plt.hist(no_ins_model_rvs, bins=30, alpha=0.5, label='simulated')
_ = plt.hist(no_insurance, bins=30, alpha=0.5, label='observed')
_ = plt.xlabel('Charge amount')
_ = plt.ylabel('Frequency')
_ = plt.legend()
```
Well it doesn't look too bad! We're not a million miles off. But can we do better? We have a plausible form for the distribution of charge amounts and potential values for that distribution's parameters so we can already draw random variates from that distribution to perform simulations. But we don't know if we have a _best_ estimate for the population parameters, and we also only have a single estimate each for $\alpha$ and $\beta$; we aren't capturing our uncertainty in their values. Can we take a Bayesian inference approach to estimate the parameters?
### Creating a PyMC3 model
```
# PyMC3 Gamma seems to use rate = 1/beta
rate_est = 1/beta_est
# Initial parameter estimates we'll use below
alpha_est, rate_est
```
__Q:__ You are now going to create your own PyMC3 model!
1. Use an [exponential](https://docs.pymc.io/api/distributions/continuous.html#pymc3.distributions.continuous.Exponential) prior for alpha. Call this stochastic variable `alpha_`.
2. Similarly, use an exponential prior for the rate ([$1/\beta$](https://wiki.analytica.com/index.php?title=Gamma_distribution)) parameter in PyMC3's [Gamma](https://docs.pymc.io/api/distributions/continuous.html#pymc3.distributions.continuous.Gamma). Call this stochastic variable `rate_` (but it will be supplied as `pm.Gamma`'s `beta` parameter). Hint: to set up a prior with an exponential distribution for $x$ where you have an initial estimate for $x$ of $x_0$, use a scale parameter of $1/x_0$.
5. Create your Gamma distribution with your `alpha_` and `rate_` stochastic variables and the observed data.
6. Perform 10000 draws.
Hint: you may find it helpful to work backwards. Start with your `pm.Gamma`, and note the required stochastic variables `alpha` and `beta`. Then, before that, you need to create those stochastic variables using `pm.Exponential` and the correct parameters.
__A:__
```
with pm.Model() as model:
alpha_ = pm.Exponential('alpha_', 1/alpha_est)
rate_ = pm.Exponential('rate_', 1/rate_est) #beta = 1/rate
no_insurance_charge = pm.Gamma('no_insurance_charge', alpha=alpha_, beta=rate_, observed = no_insurance)
trace = pm.sample(10000)
trace
```
If you get a warning about acceptance probability not matching the target, and that it's around 0.88 when it should be close to 0.8, don't worry. We encourage you to read up on this and see if you can adjust the parameters and/or arguments to pm.sample, but if your model runs without any additional warnings or errors then you should be doing great!
__Q:__ Explore your posteriors for $\alpha$ and $\beta$ (from the trace).
* Calculate the 95% credible interval for $\alpha$ and $\beta$.
* Plot your posterior values of $\alpha$ and $\beta$ (both line plots and histograms).
* Mark your CIs on the histograms.
* Do they look okay? What would bad plots look like?
__A:__
```
#95% Confidence Interval for Alpha:
alpha_mean = np.mean(trace.alpha_)
print(f"Alpha Mean is {alpha_mean}")
confi_alpha_lower, confi_alpha_upper= np.percentile(trace.alpha_, [2.5, 97.5])
print(f"Confidence Intreval for Alpha is {confi_alpha_lower} & {confi_alpha_upper}")
#95% Confidence Interval for Beta:
beta_mean = 1/np.mean(trace.rate_) #Beta mean = 1/rate mean
print(f"Beta Mean is {beta_mean}")
confi_beta_lower, confi_beta_upper = 1/np.percentile(trace.rate_, [2.5, 97.5])
print(f"Confidence Interval for Beta is {confi_beta_lower} and {confi_beta_upper}")
#Histogram and Line plots for Alpha Posterior Values:
_ = plt.hist(trace.alpha_, bins = 30, edgecolor = 'black')
_ = plt.axvline(alpha_mean, color= 'red')
_ = plt.axvline(confi_alpha_lower, color = 'gold', linestyle= '-.')
_ = plt.axvline(confi_alpha_upper, color = 'gold', linestyle= '-.')
_ = plt.xlabel('Alpha_')
_ = plt.ylabel('Count')
_ = plt.title('Alpha Posterior Values')
#Histogram and Line Plots for Beta Posterior Values:
_ = plt.hist(1/trace.rate_, bins = 30, edgecolor = 'black')
_ = plt.axvline(beta_mean, color= 'red')
_ = plt.axvline(confi_beta_lower, color = 'gold', linestyle= '-.')
_ = plt.axvline(confi_beta_upper, color = 'gold', linestyle= '-.')
_ = plt.xlabel('Beta_')
_ = plt.ylabel('Count')
_ = plt.title('Beta Posterior Values')
```
__Q:__ Play around with some of the built-in diagnostic plots for your model. We suggest at least checking out the traceplot for alpha and beta. How do they look?
__A:__
```
#TracePlot:
_ = pm.traceplot(trace)
```
Traceplots for alpha and beta looks similar
__Q:__ Take your best shot at a new simulated sequence of medical charges using scipy.stat's gamma distribution. Don't forget the difference between functions that take $\beta$ and functions that use $1/\beta$ for the scale parameter. Simulate a data set the same size as the number of observations in the data and overlay the two histograms (simulated and observed).
__A:__
```
#Using Scipy Stats:
seed(47)
best_shot_simulated = gamma(alpha_mean, scale = beta_mean).rvs(n_no_ins)
#Overlay Histogram:
_ = plt.hist(best_shot_simulated, bins=30, alpha = 0.5, label ='Simulated')
_ = plt.hist(no_insurance, bins = 30, alpha = 0.5, label = 'Observed')
_ = plt.legend()
_ = plt.xlabel('Non Insured Charges')
_ = plt.ylabel('Count')
_ = plt.title('Overlay of Simluated and Observed Non Insured Charges')
```
Similar to initial simulation
## Summary
In this exercise, we have postulated a distribution to describe the individual charge amounts for non-insured cases. This distribution has two required parameters, which we do not know, but we used PyMC3 to perform Bayesian inference to find our level of "belief" in a range of values for them. We then used the average parameter values to create one simulated data set of the same size as the original, but the distribution of our posteriors for these parameters will allow us to perform simulations of any sample size we desire and for a range of scenarios of different $\alpha$ and $\beta$. This could be a powerful tool to model different financial conditions for the hospital.
Well done making it through this tricky subject. Starting think Bayesian _and_ starting to get to grips with something like PyMC3 is no easy task. As a data scientist, the most important thing is to be aware that this statistical approach exists, though you may not actually use this approach as much as you use the other approaches you've learned about. Still, we encourage you to think of ways that this approach could apply to the work that you do in this course and throughout your career.
| github_jupyter |
```
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from flask import Flask, jsonify
import numpy as np
import pandas as pd
import datetime as dt
from scipy import stats
```
# Reflect Tables into SQLAlchemy ORM
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine,inspect, func
# create engine to hawaii.sqlite
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
inspector = inspect(engine)
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# View all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
inspector.get_table_names()
columns = inspector.get_columns('measurement')
for column in columns:
primarykeystr = ""
if column['primary_key'] == 1:
primarykeystr = "Primary Key"
print(column["name"],column["type"],primarykeystr)
columns = inspector.get_columns('station')
for column in columns:
primarykeystr = ""
if column['primary_key'] == 1:
primarykeystr = "Primary Key"
print(column["name"], column["type"], primarykeystr)
```
# Exploratory Precipitation Analysis
```
#Validate records in the column
session.query(func.count(Measurement.date)).all()
#Find the earliest starting date
earlieststr = session.query(Measurement.date).order_by(Measurement.date).first()
lateststr = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
print(f"Earliest: {earlieststr[0]} , Latest: {lateststr[0]}")
#Find the last/ most recent date record in column
#Find the the last years worth of data
#Save to dataFrame
#Sort vales by date
#Set Index by date
latestdate = dt.datetime.strptime(lateststr[0], '%Y-%m-%d')
querydate = dt.date(latestdate.year -1, latestdate.month, latestdate.day)
querydate
sel = [Measurement.date,Measurement.prcp]
queryresult = session.query(*sel).filter(Measurement.date >= querydate).all()
precipitation = pd.DataFrame(queryresult, columns=['Date','Precipitation'])
precipitation = precipitation.dropna(how='any') # clean up non value entries
precipitation = precipitation.sort_values(["Date"], ascending=True)
precipitation = precipitation.set_index("Date")
precipitation.head()
# Use Pandas Plotting with Matplotlib to plot the data
# Set plot and plot the chart
xx = precipitation.index.tolist()
yy = precipitation['Precipitation'].tolist()
plt.figure(figsize=(10,7))
plt.bar(xx,yy,width = 5 ,color='g', alpha=0.5, align="center",label='Precipitation')
plt.tick_params(
axis='x',
which='both',
bottom=False,
top=False,
labelbottom=False)
major_ticks = np.arange(0,400,80)
plt.xticks(major_ticks)
plt.title(f"Precipitation from {querydate} to {lateststr[0]}")
plt.xlabel("Date")
plt.ylabel("Precipitation")
plt.grid(which='major', axis='both', linestyle='-')
plt.legend()
plt.show()
# Use Pandas to calcualte the summary statistics for the precipitation data
precipitation.describe()
```
# Exploratory Station Analysis
```
# Design a query to calculate the total number stations in the dataset
session.query(Station.id).count()
# Design a query to find the most active stations (i.e. what stations have the most rows?)
# List the stations and the counts in descending order.
sel = [Measurement.station,func.count(Measurement.id)]
activestations = session.query(*sel).\
group_by(Measurement.station).\
order_by(func.count(Measurement.id).desc()).all()
activestations
# Using the most active station id from the previous query, calculate the lowest, highest, and average temperature.
sel = [func.min(Measurement.tobs),func.max(Measurement.tobs),func.avg(Measurement.tobs)]
mostactivestationdata = session.query(*sel).\
group_by(Measurement.station).\
order_by(func.count(Measurement.id).desc()).first()
mostactivestationdata
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
queryresult = session.query(Measurement.tobs).\
filter(Measurement.station == activestations[0][0]).\
filter(Measurement.date >= querydate).all()
temperatures = list(np.ravel(queryresult))
sel = [Station.station,Station.name,Station.latitude,Station.longitude,Station.elevation]
queryresult = session.query(*sel).all()
stations_desc = pd.DataFrame(queryresult, columns=['Station','Name','Latitude','Longitude','Elevation'])
stationname = stations_desc.loc[stations_desc["Station"] == activestations[0][0],"Name"].tolist()[0]
plt.hist(temperatures, bins=12,rwidth=1.0,label='tobs')
plt.grid(axis='both', alpha=0.75)
plt.ylabel('Frequency')
plt.title(f"Temperature from {querydate} to {lateststr[0]} \nmeasured at {stationname}")
plt.legend()
#func to return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
print(calc_temps('2012-02-28', '2012-03-05'))
# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax
startdate = '2017-01-01'
enddate = '2017-01-07'
tempresult = calc_temps(startdate,enddate)[0]
tempresult
#Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as title
# Use the average temperature for y axis
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
x_pos = [0]
y_pos = [tempresult[1]]
error = [(tempresult[2] - tempresult[0])]
w = 3
h = 5
d = 70
plt.figure(figsize=(w, h), dpi=d)
plt.bar(x_pos,y_pos,color='orange', yerr=error)
plt.xlim(-0.75,0.75)
plt.title("Trip Avg Temp")
plt.ylabel("Temp (F)")
plt.ylim(0, 100)
plt.tick_params(axis='x',which='both',bottom=False,top=False,labelbottom=False)
plt.grid(which='major', axis='x', linestyle='')
plt.grid(which='major', axis='y', linestyle='-')
plt.show()
```
# Close session
```
# Close Session
session.close()
```
| github_jupyter |
# Gated PixelCNN receptive fields
Hi everybody!
In this notebook, we will analyse the Gated PixelCNN's block receptive field. Diferent of the original PixelCNN, we expect that the blocks of the Gated PixelCNN do not create blind spots that limit the information flow of the previous pixel in order to model the density probability function.
Let's start!
First, we define the masked convolutions involved in the Gated PixelCNN as presented in the post.
*Note: Here we are using float64 to get more precise values of the gradients and avoid false values.
```
import random as rn
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FixedLocator
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow import nn
from tensorflow.keras import initializers
from tensorflow.keras.utils import Progbar
tf.keras.backend.set_floatx('float64')
class MaskedConv2D(keras.layers.Layer):
"""Convolutional layers with masks extended to work with Gated PixelCNN.
Convolutional layers with simple implementation of masks type A and B for
autoregressive models. Extended version to work with the verticala and horizontal
stacks from the Gated PixelCNN model.
Arguments:
mask_type: one of `"V"`, `"A"` or `"B".`
filters: Integer, the dimensionality of the output space (i.e. the number of output
filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the height and width
of the 2D convolution window.
Can be a single integer to specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of the
convolution along the height and width.
Can be a single integer to specify the same value for all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying any
`dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
"""
def __init__(self,
mask_type,
filters,
kernel_size,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
bias_initializer='zeros'):
super(MaskedConv2D, self).__init__()
assert mask_type in {'A', 'B', 'V'}
self.mask_type = mask_type
self.filters = filters
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding.upper()
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
kernel_h, kernel_w = self.kernel_size
self.kernel = self.add_weight('kernel',
shape=(kernel_h,
kernel_w,
int(input_shape[-1]),
self.filters),
initializer=self.kernel_initializer,
trainable=True)
self.bias = self.add_weight('bias',
shape=(self.filters,),
initializer=self.bias_initializer,
trainable=True)
mask = np.ones(self.kernel.shape, dtype=np.float64)
# Get centre of the filter for even or odd dimensions
if kernel_h % 2 != 0:
center_h = kernel_h // 2
else:
center_h = (kernel_h - 1) // 2
if kernel_w % 2 != 0:
center_w = kernel_w // 2
else:
center_w = (kernel_w - 1) // 2
if self.mask_type == 'V':
mask[center_h + 1:, :, :, :] = 0.
else:
mask[:center_h, :, :] = 0.
mask[center_h, center_w + (self.mask_type == 'B'):, :, :] = 0.
mask[center_h + 1:, :, :] = 0.
self.mask = tf.constant(mask, dtype=tf.float64, name='mask')
def call(self, input):
masked_kernel = tf.math.multiply(self.mask, self.kernel)
x = nn.conv2d(input,
masked_kernel,
strides=[1, self.strides, self.strides, 1],
padding=self.padding)
x = nn.bias_add(x, self.bias)
return x
```
Then, we define th eblock implementation.
```
class GatedBlock(tf.keras.Model):
""" Gated block that compose Gated PixelCNN."""
def __init__(self, mask_type, filters, kernel_size):
super(GatedBlock, self).__init__(name='')
self.mask_type = mask_type
self.vertical_conv = MaskedConv2D(mask_type='V',
filters=2 * filters,
kernel_size=kernel_size)
self.horizontal_conv = MaskedConv2D(mask_type=mask_type,
filters=2 * filters,
kernel_size=(1, kernel_size))
self.padding = keras.layers.ZeroPadding2D(padding=((1, 0), 0))
self.cropping = keras.layers.Cropping2D(cropping=((0, 1), 0))
self.v_to_h_conv = keras.layers.Conv2D(filters=2 * filters, kernel_size=1)
self.horizontal_output = keras.layers.Conv2D(filters=filters, kernel_size=1)
def _gate(self, x):
tanh_preactivation, sigmoid_preactivation = tf.split(x, 2, axis=-1)
return tf.nn.tanh(tanh_preactivation) * tf.nn.sigmoid(sigmoid_preactivation)
def call(self, input_tensor):
v = input_tensor[0]
h = input_tensor[1]
vertical_preactivation = self.vertical_conv(v)
# Shifting vertical stack feature map down before feed into horizontal stack to
# ensure causality
v_to_h = self.padding(vertical_preactivation)
v_to_h = self.cropping(v_to_h)
v_to_h = self.v_to_h_conv(v_to_h)
horizontal_preactivation = self.horizontal_conv(h)
v_out = self._gate(vertical_preactivation)
horizontal_preactivation = horizontal_preactivation + v_to_h
h_activated = self._gate(horizontal_preactivation)
h_activated = self.horizontal_output(h_activated)
if self.mask_type == 'A':
h_out = h_activated
elif self.mask_type == 'B':
h_out = h + h_activated
return v_out, h_out
```
In order to analyse grow the receptive field grows along the layers, we will start analysing 1 block.
```
height = 10
width = 10
n_channel = 1
data = tf.random.normal((1, height, width, n_channel))
inputs = keras.layers.Input(shape=(height, width, n_channel))
v, h = GatedBlock(mask_type='A', filters=1, kernel_size=3)([inputs, inputs])
model = tf.keras.Model(inputs=inputs, outputs=h)
def plot_receptive_field(model, data):
with tf.GradientTape() as tape:
tape.watch(data)
prediction = model(data)
loss = prediction[:,5,5,0]
gradients = tape.gradient(loss, data)
gradients = np.abs(gradients.numpy().squeeze())
gradients = (gradients > 0).astype('float64')
gradients[5, 5] = 0.5
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.xticks(np.arange(0, 10, step=1))
plt.yticks(np.arange(0, 10, step=1))
ax.xaxis.set_minor_locator(FixedLocator(np.arange(0.5, 10.5, step=1)))
ax.yaxis.set_minor_locator(FixedLocator(np.arange(0.5, 10.5, step=1)))
plt.grid(which="minor")
plt.imshow(gradients, vmin=0, vmax=1)
plt.show()
plot_receptive_field(model, data)
```
Excellent! Like we expected the block considered all the previous blocks in the same row of the analyssed pixel, and the two rows over it.
Note that this receptive field is different from the original PixelCNN. In the original PixelCNN only one row over the analysed pixel influenced in its prediction (when using one masked convolution). In the Gated PixelCNN, the authors used a vertical stack with effective area of 2x3 per vertical convolution. This is not a problem, since the considered pixels still being the ones in past positions. We believe the main coice for this format is to implement an efficient way to apply the masked convolutions without using masking (which we will discuss in future posts).
For the next step, we wll verify a model with 2, 3, 4, and 5 layers
```
inputs = keras.layers.Input(shape=(height, width, n_channel))
v, h = GatedBlock(mask_type='A', filters=1, kernel_size=3)([inputs, inputs])
v, h = GatedBlock(mask_type='B', filters=1, kernel_size=3)([v, h])
model = tf.keras.Model(inputs=inputs, outputs=h)
plot_receptive_field(model, data)
inputs = keras.layers.Input(shape=(height, width, n_channel))
v, h = GatedBlock(mask_type='A', filters=1, kernel_size=3)([inputs, inputs])
v, h = GatedBlock(mask_type='B', filters=1, kernel_size=3)([v, h])
v, h = GatedBlock(mask_type='B', filters=1, kernel_size=3)([v, h])
model = tf.keras.Model(inputs=inputs, outputs=h)
plot_receptive_field(model, data)
inputs = keras.layers.Input(shape=(height, width, n_channel))
v, h = GatedBlock(mask_type='A', filters=1, kernel_size=3)([inputs, inputs])
v, h = GatedBlock(mask_type='B', filters=1, kernel_size=3)([v, h])
v, h = GatedBlock(mask_type='B', filters=1, kernel_size=3)([v, h])
v, h = GatedBlock(mask_type='B', filters=1, kernel_size=3)([v, h])
model = tf.keras.Model(inputs=inputs, outputs=h)
plot_receptive_field(model, data)
inputs = keras.layers.Input(shape=(height, width, n_channel))
v, h = GatedBlock(mask_type='A', filters=1, kernel_size=3)([inputs, inputs])
v, h = GatedBlock(mask_type='B', filters=1, kernel_size=3)([v, h])
v, h = GatedBlock(mask_type='B', filters=1, kernel_size=3)([v, h])
v, h = GatedBlock(mask_type='B', filters=1, kernel_size=3)([v, h])
v, h = GatedBlock(mask_type='B', filters=1, kernel_size=3)([v, h])
model = tf.keras.Model(inputs=inputs, outputs=h)
plot_receptive_field(model, data)
```
As you can notice, the Gated PixelCNN does not create blind spots when adding more and more layers.
| github_jupyter |
```
%matplotlib notebook
import sys
from pathlib import Path
SRC_ROOT_DIR_0 = '/g/wsl_projs/practical-astronomy'
SRC_ROOT_DIR_1 = '/g/wsl_projs/practical-astronomy/myastro/'
sys.path.insert(0, SRC_ROOT_DIR_0)
sys.path.insert(1, SRC_ROOT_DIR_1)
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import cnames
from matplotlib import animation
from myastro.ephem import calc_orbits_data, EphemrisInput
from myastro.data_catalog import DF_BODYS, DF_COMETS, read_body_elms_for, read_comet_elms_for, CometElms, BodyElems
from myastro.data_catalog import APOFIS
import seaborn as sns
from toolz import concat, first
from myastro.graphics_util import OrbitsPlot
eph = EphemrisInput(from_date="1972.02.01.0",
to_date = "1992.05.01.0",
step_dd_hh_hhh = "50 00.0",
equinox_name = "J2000")
#PLANETS = ['Earth','Mercury','Venus','Mars']
#PLANETS = ['Jupiter','Saturn','Uranus','Neptune', 'Pluto']
PLANETS = ['Jupiter']
#PLANETS = []
#PLANETS = ['Jupiter','Saturn']
#MINOR_BODYS = []
#MINOR_BODYS = ['Ceres','Pallas','Juno','Vesta']
#MINOR_BODYS = ['Ceres',APOFIS]
#MINOR_BODYS = ['Ceres']
#MINOR_BODYS = []
MINOR_BODYS=[]
#COMETS = ['1P/Halley','2P/Encke','10P/Tempel 2','C/1995 O1 (Hale-Bopp)']
COMETS = ['C/2019 Q4 (Borisov)']
#COMETS = ['D/1993 F2-A (Shoemaker-Levy 9)']
COMETS = ['C/1988 L1 (Shoemaker-Holt-Rodriquez)'] #, 'C/1980 E1 (Bowell)','C/2019 Q4 (Borisov)']
COMETS = ['C/1980 E1 (Bowell)','C/2019 Q4 (Borisov)']
#COMETS = []
orbs, dfs, date_refs = calc_orbits_data(eph, PLANETS, MINOR_BODYS, COMETS)
```
### Other implementation
```
dfs.append(calc_eph_minor_body_perturbed(APOFIS,eph))
CERES = read_body_elms_for("Ceres",DF_BODYS)
dfs.append(calc_eph_minor_body_perturbed(CERES,eph))
#HALLEY = read_comet_elms_for("1P/Halley",DF_COMETS)
#ENCKE = read_comet_elms_for("2P/Encke",DF_COMETS)
HALLEY = CometElms(name="1P/Halley",
epoch_mjd=None ,
q = 0.5870992 ,
e = 0.9672725 ,
i_dg = 162.23932 ,
Node_dg = 58.14397 ,
w_dg = 111.84658 ,
tp_str = "19860209.44",
equinox_name = "B1950")
ENCKE = CometElms(name="2P/Encke",
epoch_mjd=None ,
q = 2.2091404*(1-0.8502196) ,
e = 0.8502196 ,
i_dg = 11.94524 ,
Node_dg = 334.75006 ,
w_dg = 186.23352 ,
tp_str = "19901028.54502",
equinox_name = "J2000")
comets = [HALLEY,ENCKE]
for comet in comets:
dfs.append(calc_eph_comet(comet,eph))
cols=['h_x','h_y','h_z']
# A 3d matrix with shape (a,b,c) where a=n_orbits, b=n_t_samples, c=3
x_t = np.array([df[cols].to_numpy() for df in dfs])
N_trajectories = len(dfs)
# Set up figure & 3D axis for animation
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection='3d')
#ax.axis('off')
# choose a different color for each trajectory
colors = plt.cm.jet(np.linspace(0, 1, N_trajectories))
# set up lines and points
lines = sum([ax.plot([], [], [], '-', c=c) for c in colors], [])
pts = sum([ax.plot([], [], [], 'o', c=c) for c in colors], [])
LIMITS=(-1.3,1.3)
# prepare the axes limits
#ax.set_xlim((-30,30 ))
#ax.set_ylim((-30,30))
#ax.set_zlim((-30, 30))
ax.set_xlim(LIMITS)
ax.set_ylim(LIMITS)
ax.set_zlim(LIMITS)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
# set point-of-view: specified by (altitude degrees, azimuth degrees)
ax.view_init(15, 0)
# SUN
ax.scatter3D(0,0,0, color='yellow', marker='o', lw=8, label='Sun')
# initialization function: plot the background of each frame
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return lines + pts
# animation function. This will be called sequentially with the frame number
def animate(i):
#print (i)
# we'll step two time-steps per frame. This leads to nice results.
i = (2 * i) % x_t.shape[1]
for line, pt, xi in zip(lines, pts, x_t):
x, y, z = xi[:i].T
#print (x)
#print (y)
#print (z)
line.set_data(x, y)
line.set_3d_properties(z)
pt.set_data(x[-1:], y[-1:])
pt.set_3d_properties(z[-1:])
#ax.view_init(30, 0.3 * i)
fig.canvas.draw()
return lines + pts
# instantiate the animator.
anim = animation.FuncAnimation(fig, animate, init_func=init,frames=500, interval=500, blit=True, repeat=False)
#anim = animation.FuncAnimation(fig, animate, init_func=init, interval=500, blit=True, repeat=False)
# Save as mp4. This requires mplayer or ffmpeg to be installed
#anim.save('lorentz_attractor.mp4', fps=15, extra_args=['-vcodec', 'libx264'])
plt.show()
animate(2)
#animate (3)
len(orbs)
```
### Next Implementation
```
n_trajectories = len(orbs)
# Set up figure & 3D axis for animation
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection='3d')
#ax.axis('off')
# choose a different color for each trajectory
colors = plt.cm.jet(np.linspace(0, 1, n_trajectories))
lines = []
pts = []
for i, (name, mtx) in enumerate(orbs.items()):
lines.append(ax.plot([], [], [], '--', c=colors[i], label=name,lw=.7))
pts.append(ax.plot([], [], [], 'o', c=colors[i]))
#ax.plot3D(mtx[:,0],mtx[:,1],mtx[:,2], c=colors[i], lw=.75, label=name)
lines = list(concat(lines))
pts = list(concat(pts))
#LIMITS=(-1.3,1.3)
LIMITS=(-30,30)
# prepare the axes limits
ax.set_xlim(LIMITS)
ax.set_ylim(LIMITS)
ax.set_zlim(LIMITS)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
# SUN
ax.scatter3D(0,0,0, color='yellow', marker='o', lw=8, label='Sun')
# set the legend, title and animation encoding type
ax.legend(loc='upper right', prop={'size': 9})
#ax.set_title("Tim-Sitze, Orbits of the Inner Planets")
#animation.writer = animation.writers['ffmpeg']
axtext = fig.add_axes([0.0,0.95,0.1,0.05])
# turn the axis labels/spines/ticks off
axtext.axis("off")
time = axtext.text(0.5,0.5, date_refs[0], ha="left", va="top")
# initialization function: plot the background of each frame
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return lines + pts
def animate(i):
for line, pt, mtx in zip(lines, pts, orbs.values()):
xs = mtx[0:i,0]
ys = mtx[0:i,1]
zs = mtx[0:i,2]
line.set_data(xs, ys)
line.set_3d_properties(zs)
x = xs[-1:]
y = ys[-1:]
z = zs[-1:]
pt.set_data(x, y)
pt.set_3d_properties(z)
time.set_text(date_refs[i])
#ax.view_init(30, 0.3 * i)
fig.canvas.draw()
return lines + pts
# instantiate the animator.
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=len(date_refs), interval=1000, blit=False, repeat=False)
plt.show()
animate(4)
```
### Current Implementation
```
fig = plt.figure()
LIMITS=(-7,7)
orbs_plot = OrbitsPlot(orbs, date_refs, fig, LIMITS)
anim = animation.FuncAnimation(fig, orbs_plot.animate, init_func=orbs_plot.init, frames=len(date_refs), interval=1000, blit=False, repeat=False)
plt.show()
df = DF_COMETS
df.info()
df.sort_values('e',ascending=False)[:3]
# Set up figure & 3D axis for animation
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection='3d')
#ax.axis('off')
# choose a different color for each trajectory
colors = plt.cm.jet(np.linspace(0, 1, N_trajectories))
colors
lines = []
pts = []
for i, (name, mtx) in enumerate(orbs.items()):
lines.append(ax.plot([], [], [], '-', c=colors[i],label=name))
pts.append(ax.plot([], [], [], 'o', c=colors[i]))
#ax.plot3D(mtx[:,0],mtx[:,1],mtx[:,2], c=colors[i], lw=.75, label=name)
lines = list(concat(lines))
pts = list(concat(pts))
LIMITS=(-1.3,1.3)
# prepare the axes limits
ax.set_xlim(LIMITS)
ax.set_ylim(LIMITS)
ax.set_zlim(LIMITS)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
# initialization function: plot the background of each frame
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return lines + pts
def animate(i):
for line, pt, mtx in zip(lines, points, orbs.values()):
xs = mtx[0:i,0]
ys = mtx[0:i,1]
zs = mtx[0:i,2]
line.set_data(xs, ys)
line.set_3d_properties(zs)
x = xs[-1:]
y = ys[-1:]
z = zs[-1:]
pt.set_data(x, y)
pt.set_3d_properties(z)
#ax.view_init(30, 0.3 * i)
fig.canvas.draw()
return lines + pts
anim = animation.FuncAnimation(fig, animate, init_func=init, interval=500, blit=True, repeat=False)
plt.show()
i=3
xyz[0:i,0] # x
xs = xyz[0:i,0]
xs[-1:]
xs
# first plot the constants to be used in our plots
# plot the sun at the origin
ax.scatter3D(0,0,0, color='yellow', marker='o', lw=8, label='Sun')
# plot the orbit of mercury
ax.plot3D(merc_orb[:,0],merc_orb[:,1],merc_orb[:,2], color='gray', lw=.75, label='Mercury')
# plot the prbit of Venus
ax.plot3D(ven_orb[:,0],ven_orb[:,1],ven_orb[:,2], color='orange', lw=.75, label='Venus')
# plot the orbit of the Earth
ax.plot3D(earth_orb[:,0],earth_orb[:,1],earth_orb[:,2], color='blue', lw=.75, label='Earth')
# plot the orbit of Mars
ax.plot3D(mars_orb[:,0],mars_orb[:,1],mars_orb[:,2], color='red', lw=.75, label='Mars')
# get the particles 3d plots. Initially empty
merc_particle, = plt.plot([],[],[], marker='.', color='gray', lw=2)
ven_particle, = plt.plot([],[],[], marker='.', color='orange', lw=2)
earth_particle, = plt.plot([],[],[], marker='.', color='blue', lw=2)
mars_particle, = plt.plot([],[],[], marker='.', color='red', lw=2)
lines
cols=['h_x','h_y','h_z']
# A 3d matrix with shape (a,b,c) where a=n_orbits, b=n_t_samples, c=3
x_t = np.array([df[cols].to_numpy() for df in dfs])
N_trajectories = len(dfs)
# Set up figure & 3D axis for animation
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection='3d')
#ax.axis('off')
# choose a different color for each trajectory
colors = plt.cm.jet(np.linspace(0, 1, N_trajectories))
# set up lines and points
lines = sum([ax.plot([], [], [], '-', c=c)
for c in colors], [])
pts = sum([ax.plot([], [], [], 'o', c=c)
for c in colors], [])
LIMITS=(-1.3,1.3)
# prepare the axes limits
#ax.set_xlim((-30,30 ))
#ax.set_ylim((-30,30))
#ax.set_zlim((-30, 30))
ax.set_xlim(LIMITS)
ax.set_ylim(LIMITS)
ax.set_zlim(LIMITS)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
# set point-of-view: specified by (altitude degrees, azimuth degrees)
ax.view_init(15, 0)
# SUN
ax.scatter3D(0,0,0, color='yellow', marker='o', lw=8, label='Sun')
# initialization function: plot the background of each frame
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return lines + pts
# animation function. This will be called sequentially with the frame number
def animate(i):
# we'll step two time-steps per frame. This leads to nice results.
i = (2 * i) % x_t.shape[1]
for line, pt, xi in zip(lines, pts, x_t):
x, y, z = xi[:i].T
line.set_data(x, y)
line.set_3d_properties(z)
pt.set_data(x[-1:], y[-1:])
pt.set_3d_properties(z[-1:])
#ax.view_init(30, 0.3 * i)
fig.canvas.draw()
return lines + pts
# instantiate the animator.
#anim = animation.FuncAnimation(fig, animate, init_func=init,frames=500, interval=500, blit=True, repeat=False)
anim = animation.FuncAnimation(fig, animate, init_func=init, interval=500, blit=True, repeat=False)
# Save as mp4. This requires mplayer or ffmpeg to be installed
#anim.save('lorentz_attractor.mp4', fps=15, extra_args=['-vcodec', 'libx264'])
plt.show()
list1=['a','b','c']
sum(list1,[''])
```
| github_jupyter |
<img src='https://certificate.tpq.io/quantsdev_banner_color.png' width="250px" align="right">
# Reinforcement Learning
© Dr Yves J Hilpisch | The Python Quants GmbH
[quants@dev Discord Server](https://discord.gg/uJPtp9Awaj) | [@quants_dev](https://twitter.com/quants_dev) | <a href="mailto:qd@tpq.io">qd@tpq.io</a>
<img src="https://hilpisch.com/aiif_cover_shadow.png" width="300px" align="left">
## Imports
```
import os
import math
import random
import numpy as np
import pandas as pd
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
np.set_printoptions(precision=4, suppress=True)
os.environ['PYTHONHASHSEED'] = '0'
%config InlineBackend.figure_format = 'svg'
import warnings as w
w.simplefilter('ignore')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '4'
import tensorflow as tf
from tensorflow import keras
from keras.layers import Dense, Dropout
from keras.models import Sequential
from sklearn.metrics import accuracy_score
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
def set_seeds(seed=100):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
env.seed(seed)
env.action_space.seed(100)
```
## Improved Finance Environment
```
class observation_space:
def __init__(self, n):
self.shape = (n,)
class action_space:
def __init__(self, n):
self.n = n
def seed(self, seed):
pass
def sample(self):
return random.randint(0, self.n - 1)
class Finance:
url = 'http://hilpisch.com/aiif_eikon_eod_data.csv'
def __init__(self, symbol, features, window, lags,
leverage=1, min_performance=0.85,
start=0, end=None, mu=None, std=None):
self.symbol = symbol
self.features = features
self.n_features = len(features)
self.window = window
self.lags = lags
self.leverage = leverage
self.min_performance = min_performance
self.start = start
self.end = end
self.mu = mu
self.std = std
self.observation_space = observation_space(self.lags)
self.action_space = action_space(2)
self._get_data()
self._prepare_data()
def _get_data(self):
self.raw = pd.read_csv(self.url, index_col=0,
parse_dates=True).dropna()
def _prepare_data(self):
self.data = pd.DataFrame(self.raw[self.symbol])
self.data = self.data.iloc[self.start:]
self.data['r'] = np.log(self.data / self.data.shift(1))
self.data.dropna(inplace=True)
self.data['s'] = self.data[self.symbol].rolling(
self.window).mean()
self.data['m'] = self.data['r'].rolling(self.window).mean()
self.data['v'] = self.data['r'].rolling(self.window).std()
self.data.dropna(inplace=True)
if self.mu is None:
self.mu = self.data.mean()
self.std = self.data.std()
self.data_ = (self.data - self.mu) / self.std
self.data_['d'] = np.where(self.data['r'] > 0, 1, 0)
self.data_['d'] = self.data_['d'].astype(int)
if self.end is not None:
self.data = self.data.iloc[:self.end - self.start]
self.data_ = self.data_.iloc[:self.end - self.start]
def _get_state(self):
return self.data_[self.features].iloc[self.bar -
self.lags:self.bar]
def seed(self, seed):
random.seed(seed)
np.random.seed(seed)
def reset(self):
self.treward = 0
self.accuracy = 0
self.performance = 1
self.bar = self.lags
state = self.data_[self.features].iloc[self.bar-
self.lags:self.bar]
return state.values
def step(self, action):
correct = action == self.data_['d'].iloc[self.bar]
ret = self.data['r'].iloc[self.bar] * self.leverage
reward_1 = 1 if correct else 0
reward_2 = abs(ret) if correct else -abs(ret)
self.treward += reward_1
self.bar += 1
self.accuracy = self.treward / (self.bar - self.lags)
self.performance *= math.exp(reward_2)
if self.bar >= len(self.data):
done = True
elif reward_1 == 1:
done = False
elif (self.performance < self.min_performance and
self.bar > self.lags + 15):
done = True
else:
done = False
state = self._get_state()
info = {}
return state.values, reward_1 + reward_2 * 252, done, info
env = Finance('EUR=', ['EUR=', 'r', 'v'], window=10, lags=5)
a = env.action_space.sample()
a
env.reset()
env.step(a)
```
## Improved Financial QL Agent
```
from collections import deque
class FQLAgent:
def __init__(self, hidden_units, learning_rate, learn_env, valid_env, dropout=True):
self.learn_env = learn_env
self.valid_env = valid_env
self.dropout = dropout
self.epsilon = 1.0
self.epsilon_min = 0.1
self.epsilon_decay = 0.98
self.learning_rate = learning_rate
self.gamma = 0.95
self.batch_size = 128
self.max_treward = 0
self.trewards = list()
self.averages = list()
self.performances = list()
self.aperformances = list()
self.vperformances = list()
self.memory = deque(maxlen=2000)
self.model = self._build_model(hidden_units, learning_rate)
def _build_model(self, hu, lr):
model = Sequential()
model.add(Dense(hu, input_shape=(
self.learn_env.lags, self.learn_env.n_features),
activation='relu'))
if self.dropout:
model.add(Dropout(0.3, seed=100))
model.add(Dense(hu, activation='relu'))
if self.dropout:
model.add(Dropout(0.3, seed=100))
model.add(Dense(2, activation='linear'))
model.compile(
loss='mse',
optimizer=keras.optimizers.RMSprop(learning_rate=lr)
)
return model
def act(self, state):
if random.random() <= self.epsilon:
return self.learn_env.action_space.sample()
action = self.model.predict(state)[0, 0]
return np.argmax(action)
def replay(self):
batch = random.sample(self.memory, self.batch_size)
for state, action, reward, next_state, done in batch:
if not done:
reward += self.gamma * np.amax(
self.model.predict(next_state)[0, 0])
target = self.model.predict(state)
target[0, 0, action] = reward
self.model.fit(state, target, epochs=1,
verbose=False)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def learn(self, episodes):
for e in range(1, episodes + 1):
state = self.learn_env.reset()
state = np.reshape(state, [1, self.learn_env.lags,
self.learn_env.n_features])
for _ in range(10000):
action = self.act(state)
next_state, reward, done, info = \
self.learn_env.step(action)
next_state = np.reshape(next_state,
[1, self.learn_env.lags,
self.learn_env.n_features])
self.memory.append([state, action, reward,
next_state, done])
state = next_state
if done:
treward = _ + 1
self.trewards.append(treward)
av = sum(self.trewards[-25:]) / 25
perf = self.learn_env.performance
self.averages.append(av)
self.performances.append(perf)
self.aperformances.append(
sum(self.performances[-25:]) / 25)
self.max_treward = max(self.max_treward, treward)
templ = 'episode: {:2d}/{} | treward: {:4d} | '
templ += 'perf: {:5.3f} | av: {:5.1f} | max: {:4d}'
print(templ.format(e, episodes, treward, perf,
av, self.max_treward), end='\r')
break
self.validate(e, episodes)
if len(self.memory) > self.batch_size:
self.replay()
print()
def validate(self, e, episodes):
state = self.valid_env.reset()
state = np.reshape(state, [1, self.valid_env.lags,
self.valid_env.n_features])
for _ in range(10000):
action = np.argmax(self.model.predict(state)[0, 0])
next_state, reward, done, info = self.valid_env.step(action)
state = np.reshape(next_state, [1, self.valid_env.lags,
self.valid_env.n_features])
if done:
treward = _ + 1
perf = self.valid_env.performance
self.vperformances.append(perf)
if e % 20 == 0:
templ = 71 * '='
templ += '\nepisode: {:2d}/{} | VALIDATION | '
templ += 'treward: {:4d} | perf: {:5.3f} | '
templ += 'eps: {:.2f}\n'
templ += 71 * '='
print(templ.format(e, episodes, treward,
perf, self.epsilon))
break
symbol = 'EUR='
features = ['r', 's', 'm', 'v']
a = 0
b = 2000
c = 500
learn_env = Finance(symbol, features, window=10, lags=6,
leverage=1, min_performance=0.85,
start=a, end=a + b, mu=None, std=None)
learn_env.data.info()
valid_env = Finance(symbol, features, window=learn_env.window,
lags=learn_env.lags, leverage=learn_env.leverage,
min_performance=learn_env.min_performance,
start=a + b, end=a + b + c,
mu=learn_env.mu, std=learn_env.std)
valid_env.data.info()
set_seeds(100)
agent = FQLAgent(48, 0.0001, learn_env, valid_env, True)
episodes = 61
%time agent.learn(episodes)
agent.epsilon
plt.figure(figsize=(10, 6))
x = range(1, len(agent.averages) + 1)
y = np.polyval(np.polyfit(x, agent.averages, deg=3), x)
plt.plot(agent.averages, label='moving average')
plt.plot(x, y, 'r--', label='regression')
plt.xlabel('episodes')
plt.ylabel('total reward')
plt.legend();
plt.figure(figsize=(10, 6))
x = range(1, len(agent.performances) + 1)
y = np.polyval(np.polyfit(x, agent.performances, deg=3), x)
y_ = np.polyval(np.polyfit(x, agent.vperformances, deg=3), x)
plt.plot(agent.performances[:], label='training')
plt.plot(agent.vperformances[:], label='validation')
plt.plot(x, y, 'r--', label='regression (train)')
plt.plot(x, y_, 'r-.', label='regression (valid)')
plt.xlabel('episodes')
plt.ylabel('gross performance')
plt.legend();
```
<img src="https://certificate.tpq.io/quantsdev_banner_color.png" alt="quants@dev" width="35%" align="right" border="0"><br>
[quants@dev Discord Server](https://discord.gg/uJPtp9Awaj) | [@quants_dev](https://twitter.com/quants_dev) | <a href="mailto:qd@tpq.io">qd@tpq.io</a>
| github_jupyter |
[View in Colaboratory](https://colab.research.google.com/github/SakshiPriya/inverted-visualization/blob/master/m&v_inverted_visualization_.ipynb)
```
!apt-get install -y -qq software-properties-common python-software-properties module-init-tools
!add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null
!apt-get update -qq 2>&1 > /dev/null
!apt-get -y install -qq google-drive-ocamlfuse fuse
from google.colab import auth
auth.authenticate_user()
from oauth2client.client import GoogleCredentials
creds = GoogleCredentials.get_application_default()
import getpass
!google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL
vcode = getpass.getpass()
!echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret}
!mkdir -p drive
!google-drive-ocamlfuse drive
!pip install torch
!pip install torchvision
!pip install pillow==4.0.0
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
from torchvision import transforms,models
from PIL import Image
from torch import optim
mean=torch.Tensor([0.485, 0.456, 0.406])
std=torch.Tensor([0.229, 0.224, 0.225])
def transformimage(image):
transform=transforms.Compose([transforms.Resize((224,224)),transforms.ToTensor(),transforms.Normalize(mean,std)])
transformedimage=transform(image)
transformedimage=transformedimage.unsqueeze(0)
return transformedimage
untransform=transforms.ToPILImage()
grayscale=transforms.Grayscale()
def showimage(transformedimage,show,gradient):
untrans_image=transformedimage.squeeze(0)
if show==True:
for i in range(untrans_image.shape[0]):
untrans_image[i]=untrans_image[i]*(std[i].double())+(mean[i].double())
elif gradient==True:
untrans_image=(untrans_image-untrans_image.min())/untrans_image.max()
untrans_image=untrans_image.float()
untrans_image=untransform(untrans_image)
plt.figure()
plt.imshow(untrans_image)
plt.show()
return untrans_image
listofimages=[['drive/app/goldfish.jpg',1],
['drive/app/hamster.jpg',333],
['drive/app/jellyfish.jpg',107]]
index=0
layervisual=9
imgpath=listofimages[index][0]
classid=listofimages[index][1]
model=models.vgg19(pretrained=True)
image=Image.open(imgpath)
imagetensor=transformimage(image)
initial_img=torch.tensor(np.random.normal(0,1,imagetensor.size()),requires_grad=True,dtype=torch.float)
model=model.eval().float()
model.to('cpu')
def outputoflayer(layerno,inputs):
i=0
for layer in model.features:
inputs=layer(inputs)
if i==layerno:
break
i+=1
return inputs
def vectorization(image,power):
return (image.view(-1)**power).sum()
def lossfunc(output,target,layer):
result=0
lambda_alpha=2.16e-8
#if layer in range(11):
#lambda_beta=0.05
#elif layer in range(21):
#lambda_beta=0.5
#else:
lambda_beta=1e-8
alpha=6
beta=2
distance=target-output
euclidean_loss=vectorization(distance,2)/vectorization(output,2)
regularizer1=lambda_alpha*vectorization(output,alpha)
for i in range(output.size()[2]-1):
for j in range(output.size()[3]-1):
result+=( ((output[0,:,i,j+1]-output[0,:,i,j])**2 + (output[0,:,i+1,j]-output[0,:,i,j])**2 )**(beta/2)).sum()
regularizer2=lambda_beta*result
return euclidean_loss+regularizer1+regularizer2
epochno=2000
targetimage=outputoflayer(layervisual,imagetensor).detach()
optimizer=optim.SGD([initial_img],lr=0.001,momentum=0.9)
for i in range(epochno):
optimizer.zero_grad()
input_image=outputoflayer(layervisual,initial_img)
loss=lossfunc(input_image,targetimage,layervisual)
print('loss{}:{}'.format(i,loss))
loss.backward()
optimizer.step()
img=showimage(initial_img,False,True)
grayscale(img)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content-dl/blob/main/tutorials/W1D1_BasicsAndPytorch/W1D1_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Tutorial 1: PyTorch
**Week 1, Day 1: Basics and PyTorch**
**By Neuromatch Academy**
__Content creators:__ Shubh Pachchigar, Vladimir Haltakov, Matthew Sargent, Konrad Kording
__Content reviewers:__ Deepak Raya, Siwei Bai, Kelson Shilling-Scrivo
__Content editors:__ Anoop Kulkarni, Spiros Chavlis
__Production editors:__ Arush Tagade, Spiros Chavlis
**Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
<p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
---
# Tutorial Objectives
Then have a few specific objectives for this tutorial:
* Learn about PyTorch and tensors
* Tensor Manipulations
* Data Loading
* GPUs and Cuda Tensors
* Train NaiveNet
* Get to know your pod
* Start thinking about the course as a whole
```
# @title Tutorial slides
# @markdown These are the slides for the videos in this tutorial today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/wcjrv/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
```
---
# Setup
Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, NumPy); set global or environment variables, and load in helper functions for things like plotting. In some tutorials, you will notice that we install some dependencies even if they are preinstalled on google colab or kaggle. This happens because we have added automation to our repository through [GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions).
Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells.
If you start building your own projects built on this code base we highly recommend looking at them in more detail.
```
# @title Install dependencies
!pip install pandas --quiet
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
# Imports
import time
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch import nn
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
# @title Figure Settings
import ipywidgets as widgets
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
# @title Helper Functions
atform = AirtableForm('appn7VdPRseSoMXEG','W1D1_T1','https://portal.neuromatchacademy.org/api/redirect/to/97e94a29-0b3a-4e16-9a8d-f6838a5bd83d')
def checkExercise1(A, B, C, D):
"""
Helper function for checking exercise.
Args:
A: torch.Tensor
B: torch.Tensor
C: torch.Tensor
D: torch.Tensor
Returns:
Nothing.
"""
errors = []
# TODO better errors and error handling
if not torch.equal(A.to(int),torch.ones(20, 21).to(int)):
errors.append(f"Got: {A} \n Expected: {torch.ones(20, 21)} (shape: {torch.ones(20, 21).shape})")
if not np.array_equal( B.numpy(),np.vander([1, 2, 3], 4)):
errors.append("B is not a tensor containing the elements of Z ")
if C.shape != (20, 21):
errors.append("C is not the correct shape ")
if not torch.equal(D, torch.arange(4, 41, step=2)):
errors.append("D does not contain the correct elements")
if errors == []:
print("All correct!")
else:
[print(e) for e in errors]
def timeFun(f, dim, iterations, device='cpu'):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f(dim, device)
end = time.time()
t_total += end - start
print(f"time taken for {iterations} iterations of {f.__name__}({dim}): {t_total:.5f}")
```
**Important note: Google Colab users**
*Scratch Code Cells*
If you want to quickly try out something or take a look at the data you can use scratch code cells. They allow you to run Python code, but will not mess up the structure of your notebook.
To open a new scratch cell go to *Insert* → *Scratch code cell*.
# Section 1: Welcome to Neuromatch Deep learning course
*Time estimate: ~25mins*
```
# @title Video 1: Welcome and History
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Av411n7oL", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ca21SNqt78I", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing
atform.add_event('Video 1: Welcome and History')
display(out)
```
*This will be an intensive 3 week adventure. We will all learn Deep Learning. In a group. Groups need standards. Read our
[Code of Conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing).
```
# @title Video 2: Why DL is cool
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1gf4y1j7UZ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"l-K6495BN-4", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 2: Why DL is cool')
display(out)
```
**Describe what you hope to get out of this course in about 100 words.**
---
# Section 2: The Basics of PyTorch
*Time estimate: ~2 hours 05 mins*
PyTorch is a Python-based scientific computing package targeted at two sets of
audiences:
- A replacement for NumPy to use the power of GPUs
- A deep learning platform that provides significant flexibility
and speed
At its core, PyTorch provides a few key features:
- A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration.
- An optimized **autograd** engine for automatically computing derivatives.
- A clean, modular API for building and deploying **deep learning models**.
You can find more information about PyTorch in the appendix.
## Section 2.1: Creating Tensors
```
# @title Video 3: Making Tensors
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Rw411d7Uy", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"jGKd_4tPGrw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 3: Making Tensors')
display(out)
```
There are various ways of creating tensors, and when doing any real deep learning project we will usually have to do so.
**Construct tensors directly:**
---
```
# we can construct a tensor directly from some common python iterables,
# such as list and tuple nested iterables can also be handled as long as the
# dimensions make sense
# tensor from a list
a = torch.tensor([0, 1, 2])
#tensor from a tuple of tuples
b = ((1.0, 1.1), (1.2, 1.3))
b = torch.tensor(b)
# tensor from a numpy array
c = np.ones([2, 3])
c = torch.tensor(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
```
**Some common tensor constructors:**
---
```
# the numerical arguments we pass to these constructors
# determine the shape of the output tensor
x = torch.ones(5, 3)
y = torch.zeros(2)
z = torch.empty(1, 1, 5)
print(f"Tensor x: {x}")
print(f"Tensor y: {y}")
print(f"Tensor z: {z}")
```
Notice that ```.empty()``` does not return zeros, but seemingly random small numbers. Unlike ```.zeros()```, which initialises the elements of the tensor with zeros, ```.empty()``` just allocates the memory. It is hence a bit faster if you are looking to just create a tensor.
**Creating random tensors and tensors like other tensors:**
---
```
# there are also constructors for random numbers
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
# there are also constructors that allow us to construct
# a tensor according to the above constructors, but with
# dimensions equal to another tensor
c = torch.zeros_like(a)
d = torch.rand_like(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
print(f"Tensor d: {d}")
```
*Reproducibility*:
- PyTorch random number generator: You can use `torch.manual_seed()` to seed the RNG for all devices (both CPU and CUDA)
```python
import torch
torch.manual_seed(0)
```
- For custom operators, you might need to set python seed as well:
```python
import random
random.seed(0)
```
- Random number generators in other libraries
```python
import numpy as np
np.random.seed(0)
```
Here, we define for you a function called `set_seed` that does the job for you!
```
def set_seed(seed=None, seed_torch=True):
"""
Function that controls randomness. NumPy and random modules must be imported.
Args:
seed : Integer
A non-negative integer that defines the random state. Default is `None`.
seed_torch : Boolean
If `True` sets the random seed for pytorch tensors, so pytorch module
must be imported. Default is `True`.
Returns:
Nothing.
"""
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
```
Now, let's use the `set_seed` function in the previous example. Execute the cell multiple times to verify that the numbers printed are always the same.
```
def simplefun(seed=True, my_seed=None):
if seed:
set_seed(seed=my_seed)
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
print("Tensor a: ", a)
print("Tensor b: ", b)
simplefun(seed=True, my_seed=0) # Turn `seed` to `False` or change `my_seed`
```
**Numpy-like number ranges:**
---
The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy.
```
a = torch.arange(0, 10, step=1)
b = np.arange(0, 10, step=1)
c = torch.linspace(0, 5, steps=11)
d = np.linspace(0, 5, num=11)
print(f"Tensor a: {a}\n")
print(f"Numpy array b: {b}\n")
print(f"Tensor c: {c}\n")
print(f"Numpy array d: {d}\n")
```
### Coding Exercise 2.1: Creating Tensors
Below you will find some incomplete code. Fill in the missing code to construct the specified tensors.
We want the tensors:
$A:$ 20 by 21 tensor consisting of ones
$B:$ a tensor with elements equal to the elements of numpy array $Z$
$C:$ a tensor with the same number of elements as $A$ but with values $
\sim U(0,1)$
$D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive.
```
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = ...
B = ...
C = ...
D = ...
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
# A, B, C, D = tensor_creation(Z)
# checkExercise1(A, B, C, D)
# to_remove solution
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
A = torch.ones(20, 21)
B = torch.tensor(Z)
C = torch.rand_like(A)
D = torch.arange(4, 41, step=2)
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
A, B, C, D = tensor_creation(Z)
checkExercise1(A, B, C, D)
```
```
All correct!
```
## Section 2.2: Operations in PyTorch
**Tensor-Tensor operations**
We can perform operations on tensors using methods under ```torch.```
```
# @title Video 4: Tensor Operators
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1G44y127As", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"R1R8VoYXBVA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 4: Tensor Operators')
display(out)
```
**Tensor-Tensor operations**
We can perform operations on tensors using methods under ```torch.```
```
a = torch.ones(5, 3)
b = torch.rand(5, 3)
c = torch.empty(5, 3)
d = torch.empty(5, 3)
# this only works if c and d already exist
torch.add(a, b, out=c)
#Pointwise Multiplication of a and b
torch.multiply(a, b, out=d)
print(c)
print(d)
```
However, in PyTorch most common Python operators are overridden.
The common standard arithmetic operators (+, -, *, /, and **) have all been lifted to elementwise operations
```
x = torch.tensor([1, 2, 4, 8])
y = torch.tensor([1, 2, 3, 4])
x + y, x - y, x * y, x / y, x**y # The ** operator is exponentiation
```
**Tensor Methods**
Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!)
All of these operations should have similar syntax to their numpy equivalents.(Feel free to skip if you already know this!)
```
x = torch.rand(3, 3)
print(x)
print("\n")
# sum() - note the axis is the axis you move across when summing
print(f"Sum of every element of x: {x.sum()}")
print(f"Sum of the columns of x: {x.sum(axis=0)}")
print(f"Sum of the rows of x: {x.sum(axis=1)}")
print("\n")
print(f"Mean value of all elements of x {x.mean()}")
print(f"Mean values of the columns of x {x.mean(axis=0)}")
print(f"Mean values of the rows of x {x.mean(axis=1)}")
```
**Matrix Operations**
The ```@``` symbol is overridden to represent matrix multiplication. You can also use ```torch.matmul()``` to multiply tensors. For dot multiplication, you can use ```torch.dot()```, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section).
Transposes of 2D tensors are obtained using ```torch.t()``` or ```Tensor.t```. Note the lack of brackets for ```Tensor.t``` - it is an attribute, not a method.
### Coding Exercise 2.2 : Simple tensor operations
Below are two expressions involving operations on matrices.
$$ \textbf{A} =
\begin{bmatrix}2 &4 \\5 & 7
\end{bmatrix}
\begin{bmatrix} 1 &1 \\2 & 3
\end{bmatrix}
+
\begin{bmatrix}10 & 10 \\ 12 & 1
\end{bmatrix}
$$
and
$$ b =
\begin{bmatrix} 3 \\ 5 \\ 7
\end{bmatrix} \cdot
\begin{bmatrix} 2 \\ 4 \\ 8
\end{bmatrix}
$$
The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines.
```
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
################################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
################################################
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = ...
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
# A = simple_operations(a1, a2, a3)
# print(A)
# to_remove solution
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = a1 @ a2 + a3
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
A = simple_operations(a1, a2, a3)
print(A)
```
```
tensor([[20, 24],
[31, 27]])
```
```
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
###############################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
###############################################
# Use torch.dot() to compute the dot product of two tensors
product = ...
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
# b = dot_product(b1, b2)
# print(b)
# to_remove solution
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
# Use torch.dot() to compute the dot product of two tensors
product = torch.dot(b1, b2)
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
b = dot_product(b1, b2)
print(b)
```
```
tensor(82)
```
## Section 2.3 Manipulating Tensors in Pytorch
```
# @title Video 5: Tensor Indexing
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1BM4y1K7pD", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"0d0KSJ3lJbg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 5: Tensor Indexing')
display(out)
```
**Indexing**
Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices. Indexing is also referred to as slicing.
For example, [-1] selects the last element; [1:3] selects the second and the third elements, and [:-2] will select all elements excluding the last and second-to-last elements.
```
x = torch.arange(0, 10)
print(x)
print(x[-1])
print(x[1:3])
print(x[:-2])
```
When we have multidimensional tensors, indexing rules work the same way as numpy.
```
# make a 5D tensor
x = torch.rand(1, 2, 3, 4, 5)
print(f" shape of x[0]:{x[0].shape}")
print(f" shape of x[0][0]:{x[0][0].shape}")
print(f" shape of x[0][0][0]:{x[0][0][0].shape}")
```
**Flatten and reshape**
There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the ```.flatten()``` and ```.reshape()``` methods.
```
z = torch.arange(12).reshape(6, 2)
print(f"Original z: \n {z}")
# 2D -> 1D
z = z.flatten()
print(f"Flattened z: \n {z}")
# and back to 2D
z = z.reshape(3, 4)
print(f"Reshaped (3x4) z: \n {z}")
```
You will also see the ```.view()``` methods used a lot to reshape tensors. There is a subtle difference between ```.view()``` and ```.reshape()```, though for now we will just use ```.reshape()```. The documentation can be found in the appendix.
**Squeezing tensors**
When processing batches of data, you will quite often be left with singleton dimensions. e.g. [1,10] or [256, 1, 3]. This dimension can quite easily mess up your matrix operations if you don't plan on it being there...
In order to compress tensors along their singleton dimensions we can use the ```.squeeze()``` method. We can use the ```.unsqueeze()``` method to do the opposite.
```
x = torch.randn(1, 10)
# printing the zeroth element of the tensor will not give us the first number!
print(x.shape)
print(f"x[0]: {x[0]}")
```
Because of that pesky singleton dimension, x[0] gave us the first row instead!
```
# lets get rid of that singleton dimension and see what happens now
x = x.squeeze(0)
print(x.shape)
print(f"x[0]: {x[0]}")
# adding singleton dimensions works a similar way, and is often used when tensors
# being added need same number of dimensions
y = torch.randn(5, 5)
print(f"shape of y: {y.shape}")
# lets insert a singleton dimension
y = y.unsqueeze(1)
print(f"shape of y: {y.shape}")
```
**Permutation**
Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim [3x48x64], but our pipeline expects the colour dimension to be the last dimension i.e. [48x64x3]. To get around this we can use ```.permute()```
```
# `x` has dimensions [color,image_height,image_width]
x = torch.rand(3, 48, 64)
# we want to permute our tensor to be [ image_height , image_width , color ]
x = x.permute(1, 2, 0)
# permute(1,2,0) means:
# the 0th dim of my new tensor = the 1st dim of my old tensor
# the 1st dim of my new tensor = the 2nd
# the 2nd dim of my new tensor = the 0th
print(x.shape)
```
You may also see ```.transpose()``` used. This works in a similar way as permute, but can only swap two dimensions at once.
**Concatenation**
In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length ( 6 ) is the sum of the two input tensors’ axis-0 lengths ( 3+3 ); while the second output tensor’s axis-1 length ( 8 ) is the sum of the two input tensors’ axis-1 lengths ( 4+4 ).
```
# Create two tensors of the same shape
x = torch.arange(12, dtype=torch.float32).reshape((3, 4))
y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
#concatenate them along rows
cat_rows = torch.cat((x, y), dim=0)
# concatenate along columns
cat_cols = torch.cat((x, y), dim=1)
# printing outputs
print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows))
print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols))
```
**Conversion to Other Python Objects**
Converting to a NumPy tensor, or vice versa, is easy. The converted result does not share memory. This minor inconvenience is actually quite important: when you perform operations on the CPU or on GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory.
When converting to a numpy array, the information being tracked by the tensor will be lost i.e. the computational graph. This will be covered in detail when you are introduced to autograd tomorrow!
```
x = torch.randn(5)
print(f"x: {x} | x type: {x.type()}")
y = x.numpy()
print(f"y: {y} | y type: {type(y)}")
z = torch.tensor(y)
print(f"z: {z} | z type: {z.type()}")
```
To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions.
```
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
```
### Coding Exercise 2.3: Manipulating Tensors
Using a combination of the methods discussed above, complete the functions below.
**Function A**
This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$ i.e. a scalar, e.g.,:
$ A = \begin{bmatrix}
1 & 1 \\
1 & 1
\end{bmatrix} \,$
and
$ B = \begin{bmatrix}
1 & 2 & 3\\
1 & 2 & 3
\end{bmatrix} \,$
so
$ \, Out = \begin{bmatrix} 2 & 2 \\
\end{bmatrix} \cdot 12 = \begin{bmatrix}
24 & 24\\
\end{bmatrix}$
**Function B**
This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension, e.g.,:
$ C = \begin{bmatrix}
2 & 3 \\
-1 & 10
\end{bmatrix} \,$
so
$ \, Out = \begin{bmatrix}
0 & 2 \\
1 & 3 \\
2 & -1 \\
3 & 10
\end{bmatrix}$
**Hint:** pay close attention to singleton dimensions
**Function C**
This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $D$-shaped $E$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors, e.g.,:
$ D = \begin{bmatrix}
1 & -1 \\
-1 & 3
\end{bmatrix} \,$
and
$ E = \begin{bmatrix}
2 & 3 & 0 & 2 \\
\end{bmatrix} \, $
so
$ \, Out = \begin{bmatrix}
3 & 2 \\
-1 & 5
\end{bmatrix}$
$ D = \begin{bmatrix}
1 & -1 \\
-1 & 3
\end{bmatrix}$
and
$ \, E = \begin{bmatrix}
2 & 3 & 0 \\
\end{bmatrix} \,$
so
$ \, Out = \begin{bmatrix}
1 & -1 & -1 & 3 & 2 & 3 & 0
\end{bmatrix}$
**Hint:** `torch.numel()` is an easy way of finding the number of elements in a tensor
```
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
################################################
## TODO for students: complete functionA
raise NotImplementedError("Student exercise: complete function A")
################################################
# TODO multiplication the sum of the tensors
output = ...
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function B")
################################################
# TODO flatten the tensor `my_tensor`
my_tensor = ...
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = ...
# TODO concatenate the two tensors
output = ...
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function C")
################################################
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if ...:
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = ...
# TODO sum the two tensors
output = ...
else:
# TODO flatten both tensors
my_tensor1 = ...
my_tensor2 = ...
# TODO concatenate the two tensors in the correct dimension
output = ...
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
# print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
# print(functionB(torch.tensor([[2, 3], [-1, 10]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
# to_remove solution
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
# TODO multiplication the sum of the tensors
output = my_tensor1.sum(axis=0) * my_tensor2.sum()
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO flatten the tensor `my_tensor`
my_tensor = my_tensor.flatten()
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = torch.arange(0, len(my_tensor))
# TODO concatenate the two tensors
output = torch.cat([idx_tensor.unsqueeze(1), my_tensor.unsqueeze(1)], axis=1)
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if torch.numel(my_tensor1) == torch.numel(my_tensor2):
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = my_tensor2.reshape(my_tensor1.shape)
# TODO sum the two tensors
output = my_tensor1 + my_tensor2
else:
# TODO flatten both tensors
my_tensor1 = my_tensor1.reshape(1, -1)
my_tensor2 = my_tensor2.reshape(1, -1)
# TODO concatenate the two tensors in the correct dimension
output = torch.cat([my_tensor1, my_tensor2], axis=1).squeeze()
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
print(functionB(torch.tensor([[2, 3], [-1, 10]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
```
```
tensor([24, 24])
tensor([[ 0, 2],
[ 1, 3],
[ 2, -1],
[ 3, 10]])
tensor([[ 3, 2],
[-1, 5]])
tensor([ 1, -1, -1, 3, 2, 3, 0])
```
## Section 2.4: GPUs
```
# @title Video 6: GPU vs CPU
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1nM4y1K7qx", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"9Mc9GFUtILY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 6: GPU vs CPU')
display(out)
```
By default, when we create a tensor it will *not* live on the GPU!
```
x = torch.randn(10)
print(x.device)
```
When using Colab notebooks by default will not have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page.
By following Runtime -> Change runtime type and selecting "GPU" from the Hardware Accelerator dropdown list, we can start playing with sending tensors to GPUs.
Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell.
(For more information on the GPU usage policy you can view in the appendix)
**Now we have a GPU**
The cell below should return True.
```
print(torch.cuda.is_available())
```
CUDA is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python. *NOTE I am assuming that GPU stuff might be covered in more detail on another day but there could be a bit more detail here.*
In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python!
Here, we define the function `set_device`, which returns the device use in the notebook, i.e., `cpu` or `cuda`. Unless otherwise specified, we use this function on top of every tutorial, and we store the device variable such as
```python
DEVICE = set_device()
```
Let's define the function using the PyTorch package `torch.cuda`, which is lazily initialized, so we can always import it, and use `is_available()` to determine if our system supports CUDA.
```
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
```
Let's make some CUDA tensors!
```
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
DEVICE = set_device()
# we can specify a device when we first create our tensor
x = torch.randn(2, 2, device=DEVICE)
print(x.dtype)
print(x.device)
# we can also use the .to() method to change the device a tensor lives on
y = torch.randn(2, 2)
print(f"y before calling to() | device: {y.device} | dtype: {y.type()}")
y = y.to(DEVICE)
print(f"y after calling to() | device: {y.device} | dtype: {y.type()}")
```
**Operations between cpu tensors and cuda tensors**
Note that the type of the tensor changed after calling ```.to()```. What happens if we try and perform operations on tensors on devices?
```
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
# Uncomment the following line and run this cell
# z = x + y
```
We cannot combine cuda tensors and cpu tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the `.to()` method as before, or the `.cpu()` and `.cuda()` methods. Note that using the `.cuda()` will throw an error if CUDA is not enabled in your machine.
Genrally in this course all Deep learning is done on the GPU and any computation is done on the CPU, so sometimes we have to pass things back and forth so you'll see us call.
```
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
z = torch.tensor([6, 7, 8], device=DEVICE)
# moving to cpu
x = x.to("cpu") # alternatively, you can use x = x.cpu()
print(x + y)
# moving to gpu
y = y.to(DEVICE) # alternatively, you can use y = y.cuda()
print(y + z)
```
### Coding Exercise 2.4: Just how much faster are GPUs?
Below is a simple function. Complete the second function, such that it is performs the same operations as the first function, but entirely on the GPU. We will use the helper function `timeFun(f, dim, iterations, device)`.
```
dim = 10000
iterations = 1
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda:0"
Returns:
Nothing.
"""
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to create the tensors")
###############################################
x = ...
y = ...
z = ...
x = ...
y = ...
del x
del y
del z
## TODO: Implement the function above and uncomment the following lines to test your code
# timeFun(f=simpleFun, dim=dim, iterations=iterations)
# timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
# to_remove solution
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
x = torch.rand(dim, dim).to(device)
y = torch.rand_like(x).to(device)
z = 2*torch.ones(dim, dim).to(device)
x = x * y
x = x @ z
del x
del y
del z
## TODO: Implement the function above and uncomment the following lines to test your code
timeFun(f=simpleFun, dim=dim, iterations=iterations)
timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
```
Sample output (depends on your hardware)
```
time taken for 1 iterations of simpleFun(10000): 28.50481
time taken for 1 iterations of simpleFunGPU(10000): 0.91102
```
**Discuss!**
Try and reduce the dimensions of the tensors and increase the iterations. You can get to a point where the cpu only function is faster than the GPU function. Why might this be?
## Section 2.5: Datasets and Dataloaders
```
# @title Video 7: Getting Data
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1744y127SQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"LSkjPM1gFu0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 7: Getting Data')
display(out)
```
When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples.
```
# Import dataset and dataloaders related packages
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Grayscale
```
**Datasets**
The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals.
Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset. The resulting data structure can be treated as a list containing data samples and their corresponding labels.
```
# Download and load the images from the CIFAR10 dataset
cifar10_data = datasets.CIFAR10(
root="data", # path where the images will be stored
download=True, # all images should be downloaded
transform=ToTensor() # transform the images to tensors
)
# Print the number of samples in the loaded dataset
print(f"Number of samples: {len(cifar10_data)}")
print(f"Class names: {cifar10_data.classes}")
```
We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label.
```
# Choose a random sample
random.seed(2021)
image, label = cifar10_data[random.randint(0, len(cifar10_data))]
print(f"Label: {cifar10_data.classes[label]}")
print(f"Image size: {image.shape}")
```
Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels (C) of the image (in this case we have RGB images). The second dimensions is the height (H) of the image and the third is the width (W). We can denote this image format as C × H × W.
### Coding Exercise 2.5: Display an image from the dataset
Let's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - $H \times W \times C$.
You need to reorder the dimensions of the tensor using the `permute` method of the tensor. PyTorch `torch.permute(*dims)` rearranges the original tensor according to the desired ordering and returns a new multidimensional rotated tensor. The size of the returned tensor remains the same as that of the original.
**Code hint:**
```python
# create a tensor of size 2 x 4
input_var = torch.randn(2, 4)
# print its size and the tensor
print(input_var.size())
print(input_var)
# dimensions permuted
input_var = input_var.permute(1, 0)
# print its size and the permuted tensor
print(input_var.size())
print(input_var)
```
```
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
# plt.imshow(image.permute(...))
# plt.show()
# to_remove solution
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
plt.imshow(image.permute(1, 2, 0))
plt.show()
#@title Video 8: Train and Test
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1rV411H7s5", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JokSIuPs-ys", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 8: Train and Test')
display(out)
```
**Training and Test Datasets**
When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately. For simplicity, today we will not use both datasets separately, but this topic will be adressed in the next days.
```
# Load the training samples
training_data = datasets.CIFAR10(
root="data",
train=True,
download=True,
transform=ToTensor()
)
# Load the test samples
test_data = datasets.CIFAR10(
root="data",
train=False,
download=True,
transform=ToTensor()
)
# @title Video 9: Data Augmentation - Transformations
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19B4y1N77t", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"sjegA9OBUPw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 9: Data Augmentation - Transformations')
display(out)
```
**Dataloader**
Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches.
```
# Create dataloaders with
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
```
*Reproducibility:* DataLoader will reseed workers following Randomness in multi-process data loading algorithm. Use `worker_init_fn()` and a `generator` to preserve reproducibility:
```python
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
numpy.random.seed(worker_seed)
random.seed(worker_seed)
g_seed = torch.Generator()
g_seed.manual_seed(my_seed)
DataLoader(
train_dataset,
batch_size=batch_size,
num_workers=num_workers,
worker_init_fn=seed_worker,
generator=g_seed
)
```
**Note:** For the `seed_worker` to have an effect, `num_workers` should be 2 or more.
We can now query the next batch from the data loader and inspect it. For this we need to convert the dataloader object to a Python iterator using the function `iter` and then we can query the next batch using the function `next`.
We can now see that we have a 4D tensor. This is because we have a 64 images in the batch ($B$) and each image has 3 dimensions: channels ($C$), height ($H$) and width ($W$). So, the size of the 4D tensor is $B \times C \times H \times W$.
```
# Load the next batch
batch_images, batch_labels = next(iter(train_dataloader))
print('Batch size:', batch_images.shape)
# Display the first image from the batch
plt.imshow(batch_images[0].permute(1, 2, 0))
plt.show()
```
**Transformations**
Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Checkout the [pytorch documentation](https://pytorch.org/vision/stable/transforms.html) for details.
### Coding Exercise 2.6: Load the CIFAR10 dataset as grayscale images
The goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images. Note that we rerun the `set_seed` function to ensure reproducibility.
```
def my_data_load():
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to load the data")
###############################################
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(...,
transform=...)
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
# data = my_data_load()
# to_remove solution
def my_data_load():
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(root="data", download=True,
transform=Compose([ToTensor(), Grayscale()]))
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
data = my_data_load()
```
---
# Section 3: Neural Networks
*Time estimate: ~1 hour 30 mins (excluding movie)*
Now it's time for you to create your first neural network using PyTorch. This section will walk you through the process of:
- Creating a simple neural network model
- Training the network
- Visualizing the results of the network
- Tweeking the network
```
# @title Video 10: CSV Files
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1xy4y1T7kv", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JrC_UAJWYKU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 10: CSV Files')
display(out)
```
## Section 3.1: Data Loading
First we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file.
```
# @title Generate sample data
# @markdown we used `scikit-learn` module
from sklearn.datasets import make_moons
# Create a dataset of 256 points with a little noise
X, y = make_moons(256, noise=0.1)
# Store the data as a Pandas data frame and save it to a CSV file
df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y))
df.to_csv('sample_data.csv')
```
Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in various formats. When loading data from a CSV file, we can reference the columns directly by their names.
```
# Load the data from the CSV file in a Pandas DataFrame
data = pd.read_csv("sample_data.csv")
# Create a 2D numpy array from the x0 and x1 columns
X_orig = data[["x0", "x1"]].to_numpy()
# Create a 1D numpy array from the y column
y_orig = data["y"].to_numpy()
# Print the sizes of the generated 2D points X and the corresponding labels Y
print(f"Size X:{X_orig.shape}")
print(f"Size y:{y_orig.shape}")
# Visualize the dataset. The color of the points is determined by the labels `y_orig`.
plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig)
plt.show()
```
**Prepare Data for PyTorch**
Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors.
```
# Initialize the device variable
DEVICE = set_device()
# Convert the 2D points to a float32 tensor
X = torch.tensor(X_orig, dtype=torch.float32)
# Upload the tensor to the device
X = X.to(DEVICE)
print(f"Size X:{X.shape}")
# Convert the labels to a long interger tensor
y = torch.from_numpy(y_orig).type(torch.LongTensor)
# Upload the tensor to the device
y = y.to(DEVICE)
print(f"Size y:{y.shape}")
```
## Section 3.2: Create a Simple Neural Network
```
# @title Video 11: Generating the Neural Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1fK4y1M74a", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"PwSzRohUvck", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 11: Generating the Neural Network')
display(out)
```
For this example we want to have a simple neural network consisting of 3 layers:
- 1 input layer of size 2 (our points have 2 coordinates)
- 1 hidden layer of size 16 (you can play with different numbers here)
- 1 output layer of size 2 (we want the have the scores for the two classes)
During the course you will deal with differend kinds of neural networks. On Day 2 we will focus on linear networks, but you will work with some more complicated architectures in the next days. The example here is meant to demonstrate the process of creating and training a neural network end-to-end.
**Programing the Network**
PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods:
`__init__`
In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc.
`forward`
All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it.
`predict`
This is not an obligatory method of a neural network module, but it is a good practice if you want to quickly get the most likely label from the network. It calls the `forward` method and chooses the label with the highest score.
`train`
This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook.
> Note that you can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`.
```
# Inherit from nn.Module - the base class for neural network modules provided by Pytorch
class NaiveNet(nn.Module):
# Define the structure of your network
def __init__(self):
super(NaiveNet, self).__init__()
# The network is defined as a sequence of operations
self.layers = nn.Sequential(
nn.Linear(2, 16), # Transformation from the input to the hidden layer
nn.ReLU(), # Activation function (ReLU) is a non-linearity which is widely used because it reduces computation. The function returns 0 if it receives any
# negative input, but for any positive value x, it returns that value back.
nn.Linear(16, 2), # Transformation from the hidden to the output layer
)
# Specify the computations performed on the data
def forward(self, x):
# Pass the data through the layers
return self.layers(x)
# Choose the most likely label predicted by the network
def predict(self, x):
# Pass the data through the networks
output = self.forward(x)
# Choose the label with the highest score
return torch.argmax(output, 1)
# Train the neural network (will be implemented later)
def train(self, X, y):
pass
```
**Check that your network works**
Create an instance of your model and visualize it
```
# Create new NaiveNet and transfer it to the device
model = NaiveNet().to(DEVICE)
# Print the structure of the network
print(model)
```
### Coding Exercise 3.2: Classify some samples
Now let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet.
The goal here is just to get some experience with the data structures that are passed to the forward and predict methods and their results.
```
## Get the samples
# X_samples = ...
# print("Sample input:\n", X_samples)
## Do a forward pass of the network
# output = ...
# print("\nNetwork output:\n", output)
## Predict the label of each point
# y_predicted = ...
# print("\nPredicted labels:\n", y_predicted)
# to_remove solution
## Get the samples
X_samples = X[0:5]
print("Sample input:\n", X_samples)
# Do a forward pass of the network
output = model.forward(X_samples)
print("\nNetwork output:\n", output)
# Predict the label of each point
y_predicted = model.predict(X_samples)
print("\nPredicted labels:\n", y_predicted)
```
```
Sample input:
tensor([[ 0.9066, 0.5052],
[-0.2024, 1.1226],
[ 1.0685, 0.2809],
[ 0.6720, 0.5097],
[ 0.8548, 0.5122]], device='cuda:0')
Network output:
tensor([[ 0.1543, -0.8018],
[ 2.2077, -2.9859],
[-0.5745, -0.0195],
[ 0.1924, -0.8367],
[ 0.1818, -0.8301]], device='cuda:0', grad_fn=<AddmmBackward>)
Predicted labels:
tensor([0, 0, 1, 0, 0], device='cuda:0')
```
## Section 3.3: Train Your Neural Network
```
# @title Video 12: Train the Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1v54y1n7CS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"4MIqnE4XPaA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 12: Train the Network')
display(out)
```
Now it is time to train your network on your dataset. Don't worry if you don't fully understand everything yet - we wil cover training in much more details in the next days. For now, the goal is just to see your network in action!
You will usually implement the `train` method directly when implementing your class `NaiveNet`. Here, we will implement it as a function outside of the class in order to have it in a ceparate cell.
```
# @title Helper function to plot the decision boundary
# Code adapted from this notebook: https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/
from pathlib import Path
def plot_decision_boundary(model, X, y, device):
# Transfer the data to the CPU
X = X.cpu().numpy()
y = y.cpu().numpy()
# Check if the frames folder exists and create it if needed
frames_path = Path("frames")
if not frames_path.exists():
frames_path.mkdir()
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor)
Z = model.predict(grid_points.to(device)).cpu().numpy()
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary)
# Implement the train function given a training dataset X and correcsponding labels y
def train(model, X, y):
# The Cross Entropy Loss is suitable for classification problems
loss_function = nn.CrossEntropyLoss()
# Create an optimizer (Stochastic Gradient Descent) that will be used to train the network
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of epochs
epochs = 15000
# List of losses for visualization
losses = []
for i in range(epochs):
# Pass the data through the network and compute the loss
# We'll use the whole dataset during the training instead of using batches
# in to order to keep the code simple for now.
y_logits = model.forward(X)
loss = loss_function(y_logits, y)
# Clear the previous gradients and compute the new ones
optimizer.zero_grad()
loss.backward()
# Adapt the weights of the network
optimizer.step()
# Store the loss
losses.append(loss.item())
# Print the results at every 1000th epoch
if i % 1000 == 0:
print(f"Epoch {i} loss is {loss.item()}")
plot_decision_boundary(model, X, y, DEVICE)
plt.savefig('frames/{:05d}.png'.format(i))
return losses
# Create a new network instance a train it
model = NaiveNet().to(DEVICE)
losses = train(model, X, y)
```
**Plot the loss during training**
Plot the loss during the training to see how it reduces and converges.
```
plt.plot(np.linspace(1, len(losses), len(losses)), losses)
plt.xlabel("Epoch")
plt.ylabel("Loss")
# @title Visualize the training process
# @markdown ### Execute this cell!
!pip install imageio --quiet
!pip install pathlib --quiet
import imageio
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import Image, display
from pathlib import Path
InteractiveShell.ast_node_interactivity = "all"
# Make a list with all images
images = []
for i in range(10):
filename = "frames/0"+str(i)+"000.png"
images.append(imageio.imread(filename))
# Save the gif
imageio.mimsave('frames/movie.gif', images)
gifPath = Path("frames/movie.gif")
with open(gifPath,'rb') as f:
display(Image(data=f.read(), format='png'))
# @title Video 13: Play with it
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Cq4y1W7BH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"_GGkapdOdSY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 13: Play with it')
display(out)
```
### Exercise 3.3: Tweak your Network
You can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try:
- Increase or decrease the number of epochs for training
- Increase or decrease the size of the hidden layer
- Add one additional hidden layer
Can you get the network to better fit the data?
```
# @title Video 14: XOR Widget
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1mB4y1N7QS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"oTr1nE2rCWg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 14: XOR Widget')
display(out)
```
Exclusive OR (XOR) logical operation gives a true (`1`) output when the number of true inputs is odd. That is, a true output result if one, and only one, of the inputs to the gate is true. If both inputs are false (`0`) or both are true or false output results. Mathematically speaking, XOR represents the inequality function, i.e., the output is true if the inputs are not alike; otherwise, the output is false.
In case of two inputs ($X$ and $Y$) the following truth table is applied:
\begin{array}{ccc}
X & Y & \text{XOR} \\
\hline
0 & 0 & 0 \\
0 & 1 & 1 \\
1 & 0 & 1 \\
1 & 1 & 0 \\
\end{array}
Here, with `0`, we denote `False`, and with `1` we denote `True` in boolean terms.
### Interactive Demo 3.3: Solving XOR
Here we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground).
* Play with the widget and observe that you can not solve the continuous XOR dataset.
* Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly.
For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left.
Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is:
\begin{equation}
y = f(x_1)+f(x_2)-f(x_1+x_2)
\end{equation}
Try to set the weights and biases to implement this function after you played enough :)
```
# @markdown ###Play with the parameters to solve XOR
from IPython.display import HTML
HTML('<iframe width="1020" height="660" src="https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false" allowfullscreen></iframe>')
# @markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units?
w1_min_xor = 'Select' #@param ['Select', 'Yes', 'No']
if w1_min_xor == 'No':
print("Correct!")
else:
print("How about giving it another try?")
```
---
# Section 4: Ethics And Course Info
```
# @title Video 15: Ethics
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Hw41197oB", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Kt6JLi3rUFU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 16: Be a group
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1j44y1272h", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Sfp6--d_H1A", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 17: Syllabus
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1iB4y1N7uQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"cDvAqG_hAvQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
```
Meet our lecturers:
Week 1: the building blocks
* [Konrad Kording](https://kordinglab.com)
* [Andrew Saxe](https://www.saxelab.org/)
* [Surya Ganguli](https://ganguli-gang.stanford.edu/)
* [Ioannis Mitliagkas](http://mitliagkas.github.io/)
* [Lyle Ungar](https://www.cis.upenn.edu/~ungar/)
Week 2: making things work
* [Alona Fyshe](https://webdocs.cs.ualberta.ca/~alona/)
* [Alexander Ecker](https://eckerlab.org/)
* [James Evans](https://sociology.uchicago.edu/directory/james-evans)
* [He He](https://hhexiy.github.io/)
* [Vikash Gilja](https://tnel.ucsd.edu/bio) and [Akash Srivastava](https://akashgit.github.io/)
Week 3: more magic
* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)
* [Jane Wang](http://www.janexwang.com/) and [Feryal Behbahani](https://feryal.github.io/)
* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)
* [Josh Vogelstein](https://jovo.me/) and [Vincenzo Lamonaco](https://www.vincenzolomonaco.com/)
Now, go to the [visualization of ICLR papers](https://iclr.cc/virtual/2021/paper_vis.html). Read a few abstracts. Look at the various clusters. Where do you see yourself in this map?
---
# Submit to Airtable
```
# @title Video 18: Submission info
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1e44y127ti", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JwTn7ej2dq8", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
```
This is Darryl, the Deep Learning Dapper Lion, and he's here to teach you about content submission to airtable.
<br>
<img src="https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/static/DapperLion.png" alt="Darryl">
<br><br>
At the end of each tutorial there will be an <b>Airtable Submission</b> Cell. Run the cell to generate the airtable submission button and click on it to submit your information to airtable.
<br><br>
if it is the last tutorial of the day your button will look like this and take you to the end of day survey:
<br>
<img src="https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/static/SurveyButton.png?raw=1" alt="Survey Button">
otherwise it look like this:
<br>
<img src="https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/static/AirtableSubmissionButton.png?raw=1" alt="Submission Button">
<br><br>
It is critical that you push the submit button for every tutorial you run. <b><u> even if you don't finish the tutorial, still submit!</b></u>
Submitting is the only way we can verify that you attempted each tutorial, which is critical for the award of your completion certificate at the end of the course.
<br><br><br>
Finally, we try to keep the airtable code as hidden as possible, but if you ever see any calls to `atform` such as `atform.add_event()` in the coding exercises, just know that is for saving airtable information only.<b> It will not affect the code that is being run around it in any way</b> , so please do not modify, comment out, or worry about any of those lines of code.
<br><br><br>
Now, lets try submitting today's course to Airtable by running the next cell and clicking the button when it appears.
```
# @title Airtable Submission Link
from IPython import display
display.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link to survey" style="width:410px"></a>
</div>""" )
```
---
# Bonus - 60 years of Machine Learning Research in one Plot
by [Hendrik Strobelt](http://hendrik.strobelt.com) (MIT-IBM Watson AI Lab) with support from Benjamin Hoover.
In this notebook we visualize a subset* of 3,300 articles retreived from the AllenAI [S2ORC dataset](https://github.com/allenai/s2orc). We represent each paper by a position that is output of a dimensionality reduction method applied to a vector representation of each paper. The vector representation is output of a neural network.
*The selection is very biased on the keywords and methodology we used to filter. Please see the details section to learn about what we did.
```
# @title Import `altair` and load the data
!pip install altair vega_datasets --quiet
import requests
import altair as alt # altair is defining data visualizations
# Source data files
# Position data file maps ID to x,y positions
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc.pos_umap_cosine_100_d0.1.json
POS_FILE = 'https://osf.io/qyrfn/download'
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc_clean.csv
# Metadata file maps ID to title, abstract, author,....
META_FILE = 'https://osf.io/vfdu6/download'
# data loading and wrangling
def load_data():
positions = pd.read_json(POS_FILE)
positions[['x', 'y']] = positions['pos'].to_list()
meta = pd.read_csv(META_FILE)
return positions.merge(meta, left_on='id', right_on='paper_id')
# load data
data = load_data()
# @title Define Visualization using ALtair
YEAR_PERIOD = "quinquennial" # @param
selection = alt.selection_multi(fields=[YEAR_PERIOD], bind='legend')
data[YEAR_PERIOD] = (data["year"] / 5.0).apply(np.floor) * 5
chart = alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count"]], width=800,
height=800).mark_circle(radius=2, opacity=0.2).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False, clamp=True, domain=list(range(1955,2020,5))),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
# size='citation_count',
# color="decade:O",
opacity=alt.condition(selection, alt.value(.8), alt.value(0.2)),
).add_selection(
selection
).interactive()
```
Lets look at the Visualization. Each dot represents one paper. Close dots mean that the respective papers are closer related than distant ones. The color indicates the 5-year period of when the paper was published. The dot size indicates the citation count (within S2ORC corpus) as of July 2020.
The view is **interactive** and allows for three main interactions. Try them and play around.
1. hover over a dot to see a tooltip (title, author)
2. select a year in the legend (right) to filter dots
2. zoom in/out with scroll -- double click resets view
```
chart
```
## Questions
By playing around, can you find some answers to the follwing questions?
1. Can you find topical clusters? What cluster might occur because of a filtering error?
2. Can you see a temporal trend in the data and clusters?
2. Can you determine when deep learning methods started booming ?
3. Can you find the key papers that where written before the DL "winter" that define milestones for a cluster? (tipp: look for large dots of different color)
## Methods
Here is what we did:
1. Filtering of all papers who fullfilled the criterria:
- are categorized as `Computer Science` or `Mathematics`
- one of the following keywords appearing in title or abstract: `"machine learning|artificial intelligence|neural network|(machine|computer) vision|perceptron|network architecture| RNN | CNN | LSTM | BLEU | MNIST | CIFAR |reinforcement learning|gradient descent| Imagenet "`
2. per year, remove all papers that are below the 99 percentile of citation count in that year
3. embed each paper by using abstract+title in SPECTER model
4. project based on embedding using UMAP
5. visualize using Altair
### Find Authors
```
# @title Edit the `AUTHOR_FILTER` variable to full text search for authors.
AUTHOR_FILTER = "Rush " # @param space at the end means "word border"
### Don't ignore case when searching...
FLAGS = 0
### uncomment do ignore case
# FLAGS = re.IGNORECASE
## --- FILTER CODE.. make it your own ---
import re
data['issel'] = data['authors'].str.contains(AUTHOR_FILTER, na=False, flags=FLAGS, )
if data['issel'].mean()<0.0000000001:
print('No match found')
## --- FROM HERE ON VIS CODE ---
alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count", "issel"]], width=800,
height=800) \
.mark_circle(stroke="black", strokeOpacity=1).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.StrokeWidth('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[0, 2]), legend=None),
alt.Opacity('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[.2, 1]), legend=None),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
).interactive()
```
---
# Appendix
## Official PyTorch resources:
### Tutorials
https://pytorch.org/tutorials/
### Documentation
https://pytorch.org/docs/stable/tensors.html (tensor methods)
https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view (The view method in particular)
https://pytorch.org/vision/stable/datasets.html (pre-loaded image datasets)
## Google Colab Resources:
https://research.google.com/colaboratory/faq.html (FAQ including guidance on GPU usage)
## Books for reference:
https://www.deeplearningbook.org/ (Deep Learning by Ian Goodfellow, Yoshua Bengio and Aaron Courville)
| github_jupyter |
# Logistic Regression
## Resources:
[Logistic Regression Tutorial for Machine Learning](http://machinelearningmastery.com/logistic-regression-tutorial-for-machine-learning/)
[Logistic Regression for Machine Learning](http://machinelearningmastery.com/logistic-regression-for-machine-learning/)
[How To Implement Logistic Regression With Stochastic Gradient Descent From Scratch With Python](http://machinelearningmastery.com/implement-logistic-regression-stochastic-gradient-descent-scratch-python/)
Logistic regression is the go-to linear classification algorithm for two-class problems. It is easy to implement, easy to understand and gets great results on a wide variety of problems, even when the expectations the method has for your data are violated.
### Description
#### Logistic Regression
Logistic regression is named for the function used at the core of the method, the [logistic function](https://en.wikipedia.org/wiki/Logistic_function).
The logistic function, also called the **Sigmoid function** was developed by statisticians to describe properties of population growth in ecology, rising quickly and maxing out at the carrying capacity of the environment. It’s an S-shaped curve that can take any real-valued number and map it into a value between 0 and 1, but never exactly at those limits.
$$\frac{1}{1 + e^{-x}}$$
$e$ is the base of the natural logarithms and $x$ is value that you want to transform via the logistic function.
```
import numpy as np
import matplotlib.pyplot as plt
import seaborn
%matplotlib inline
x = np.linspace(-6, 6, num = 1000)
plt.figure(figsize = (12,8))
plt.plot(x, 1 / (1 + np.exp(-x))); # Sigmoid Function
plt.title("Sigmoid Function");
```
***
The logistic regression equation has a very simiar representation like linear regression. The difference is that the output value being modelled is binary in nature.
$$\hat{y}=\frac{e^{\beta_0+\beta_1x_1}}{1+\beta_0+\beta_1x_1}$$
or
$$\hat{y}=\frac{1.0}{1.0+e^{-\beta_0-\beta_1x_1}}$$
$\beta_0$ is the intecept term
$\beta_1$ is the coefficient for $x_1$
$\hat{y}$ is the predicted output with real value between 0 and 1. To convert this to binary output of 0 or 1, this would either need to be rounded to an integer value or a cutoff point be provided to specify the class segregation point.
```
tmp = [0, 0.4, 0.6, 0.8, 1.0]
tmp
np.round(tmp)
np.array(tmp) > 0.7
```
***
# Making Predictions with Logistic Regression
$$\hat{y}=\frac{1.0}{1.0+e^{-\beta_0-\beta_1x_i}}$$
$\beta_0$ is the intecept term
$\beta_1$ is the coefficient for $x_i$
$\hat{y}$ is the predicted output with real value between 0 and 1. To convert this to binary output of 0 or 1, this would either need to be rounded to an integer value or a cutoff point be provided to specify the class segregation point.
```
dataset = [[-2.0011, 0],
[-1.4654, 0],
[0.0965, 0],
[1.3881, 0],
[3.0641, 0],
[7.6275, 1],
[5.3324, 1],
[6.9225, 1],
[8.6754, 1],
[7.6737, 1]]
```
Let's say you have been provided with the coefficient
```
coef = [-0.806605464, 0.2573316]
for row in dataset:
yhat = 1.0 / (1.0 + np.exp(- coef[0] - coef[1] * row[0]))
print("yhat {0:.4f}, yhat {1}".format(yhat, round(yhat)))
```
***
# Learning the Logistic Regression Model
The coefficients (Beta values b) of the logistic regression algorithm must be estimated from your training data. This is done using [maximum-likelihood estimation](https://en.wikipedia.org/wiki/Maximum_likelihood_estimation).
Maximum-likelihood estimation is a common learning algorithm used by a variety of machine learning algorithms, although it does make assumptions about the distribution of your data (more on this when we talk about preparing your data).
The best coefficients would result in a model that would predict a value very close to 1 (e.g. male) for the default class and a value very close to 0 (e.g. female) for the other class. The intuition for maximum-likelihood for logistic regression is that a search procedure seeks values for the coefficients (Beta values) that minimize the error in the probabilities predicted by the model to those in the data (e.g. probability of 1 if the data is the primary class).
We are not going to go into the math of maximum likelihood. It is enough to say that a minimization algorithm is used to optimize the best values for the coefficients for your training data. This is often implemented in practice using efficient numerical optimization algorithm (like the Quasi-newton method).
When you are learning logistic, you can implement it yourself from scratch using the much simpler gradient descent algorithm.
# Learning with Stochastic Gradient Descent
Logistic Regression uses gradient descent to update the coefficients.
Each gradient descent iteration, the coefficients are updated using the equation:
$$\beta=\beta+\textrm{learning rate}\times (y-\hat{y}) \times \hat{y} \times (1-\hat{y}) \times x $$
***
# Using Scikit Learn to Estimate Coefficients
```
from sklearn.linear_model import LogisticRegression
dataset
X = np.array(dataset)[:, 0:1]
y = np.array(dataset)[:, 1]
X
y
clf_LR = LogisticRegression(C=1.0, penalty='l2', tol=0.0001)
clf_LR.fit(X,y)
clf_LR.predict(X)
clf_LR.predict_proba(X)
```
# Further Resources
[A comparison of numerical optimizers for logistic regression](https://tminka.github.io/papers/logreg/)
[PDF: A comparison of numerical optimizers for logistic regression](https://tminka.github.io/papers/logreg/minka-logreg.pdf)
***
# Classification Exercise
```
dataset2 = [[ 0.2, 0. ],
[ 0.2, 0. ],
[ 0.2, 0. ],
[ 0.2, 0. ],
[ 0.2, 0. ],
[ 0.4, 0. ],
[ 0.3, 0. ],
[ 0.2, 0. ],
[ 0.2, 0. ],
[ 0.1, 0. ],
[ 1.4, 1. ],
[ 1.5, 1. ],
[ 1.5, 1. ],
[ 1.3, 1. ],
[ 1.5, 1. ],
[ 1.3, 1. ],
[ 1.6, 1. ],
[ 1. , 1. ],
[ 1.3, 1. ],
[ 1.4, 1. ]]
X = np.array(dataset2)[:, 0:1]
y = np.array(dataset2)[:, 1]
clf_LR = LogisticRegression(C=1.0, penalty='l2', tol=0.0001)
clf_LR.fit(X,y)
y_pred = clf_LR.predict(X)
clf_LR.predict(X)
np.column_stack((y_pred, y))
```
***
| github_jupyter |
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
movies = pd.read_csv("ml-latest-small/movies.csv")
movies_rating = pd.read_csv("ml-latest-small/ratings.csv")
sum_movies_genres = movies["genres"].str.get_dummies('|').sum()
movies_by_genre = sum_movies_genres.sort_values(ascending=False)
movies_by_genre
```
# Desafio 1 do [Guilherme Silveira](https://twitter.com/guilhermecaelum)
## Rotacionar os thicks (os nomes dos generos) do gráfico de barras verdes (o último), de forma a deixar as legendas mais legíveis.
```
sns.set_style("whitegrid")
movies_by_genre = sum_movies_genres.sort_values(ascending=False)
plt.figure(figsize=(16,8))
gr = sns.barplot(x=movies_by_genre.index,
y=movies_by_genre.values,
palette=sns.color_palette("BuGn_r", n_colors=len(movies_by_genre) + 4))
gr.set_xticklabels(movies_by_genre.index.array, rotation=90, ha='right')
plt.show()
```
# Desafio 2 do [Paulo Silveira](https://twitter.com/paulo_caelum)
## Encontar vários filmes com médias próximas e distribuições diferentes, use a função **plot_filmes(n)** para plotar.
```
def plot_movie(id):
rating_movie = movies_rating.query(f"movieId=={id}")['rating']
rating_movie.plot(kind='hist')
return rating_movie.describe()
rating = movies_rating.groupby("movieId")['rating'].mean()
movies_with_mean = movies.join(rating, on="movieId")
count_votes_by_movieId = movies_rating.groupby('movieId')['rating'].count()
count_votes_by_movieId.rename('votes', inplace=True)
movies_with_rating_and_votes = movies_with_mean.join(count_votes_by_movieId, on="movieId").round({'rating':1})
movies_clean = movies_with_rating_and_votes.query("votes > 100").sort_values(by="votes", ascending=False)
movies_clean
movies_clean.query("rating == 3")
plot_movie(344)
plot_movie(586)
plot_movie(185)
plot_movie(434)
```
## Desafio 3 do [Paulo Silveira](https://twitter.com/paulo_caelum)
### Criar o boxplot dos 10 filmes com mais votos (não é com maior média, é com mais votos!). Não apenas plot mas também analise e tente tirar conclusões.
```
movies_id = movies_clean.head(10)["movieId"].tolist()
plt.figure(figsize=(16, 8))
sns.boxplot(data = movies_rating.query(f"movieId in {movies_id}"), x ="movieId", y="rating")
```
### Dentre os 10, o filme de id 318 possui a melhor distribuição de notas.
## Desafio 4 do [Guilherme Silveira](https://twitter.com/guilhermecaelum)
### Configurar a visualização do boxplot gerado pelo seaborn (último boxplot plotado na aula). Configurar o tamanho e colocar o nome dos filmes nos thicks.
```
plt.figure(figsize=(12, 8))
boxplot_data = movies_rating.query("movieId in [1,2,919,46578]").merge(movies[["movieId", "title"]], on="movieId")
sns.boxplot(data = boxplot_data, x = "title", y="rating")
```
## Desafio 5 do [Allan Spadini](https://twitter.com/allanspadini)
### Calcular moda, média e mediana dos filmes. Explore filmes com notas mais próximas de 0.5, 3 e 5.
```
def movies_3m(id):
movie_3m = movies_rating.query(f"movieId=={id}")['rating']
movie_title = movies.query(f"movieId=={id}")['title']
print(movie_title.values[0])
print("Média = ", movie_3m.mean())
print("Mediana = ", movie_3m.median())
print("Moda = ", movie_3m.mode()[0])
print('\n')
for n in movies_with_mean.query("rating > 0.1 and rating < 1")["movieId"].head(5):
movies_3m(n)
for n in movies_with_mean.query("rating > 2.4 and rating < 3.6")["movieId"].head(5):
movies_3m(n)
for n in movies_with_mean.query("rating > 4 and rating <= 5")["movieId"].head(5):
movies_3m(n)
```
## Desafio 6 da [Thais André](https://twitter.com/thais_tandre)
### Plotar o boxplot e o histograma um do lado do outro (na mesma figura ou em figuras distintas, mas um do lado do outro).
```
def plot_movie_box(id):
rating_movie = movies_rating.query(f"movieId=={id}")['rating']
fig, (ax, ax2) = plt.subplots(ncols=2, sharey=False, figsize=(16, 8))
rating_movie.plot(ax = ax, kind='hist')
rating_movie.plot.box().plot()
plt.show()
return rating_movie.describe()
plot_movie_box(919)
```
## Desafio 7 do [Thiago Gonçalves](https://twitter.com/tgcsantos)
### Criar um gráfico de notas médias por ano (média geral considerando todos os filmes lançados naquele ano).
```
movies_with_mean['release_date'] = movies['title'].str.extract('.*\((.*)\).*', expand = False)
movies_with_mean
movies_with_mean.sort_values(by='release_date')
mean_by_date = movies_with_mean.dropna().groupby('release_date').mean().reset_index()[['release_date', 'rating']]
mean_by_date.sort_values(by="release_date", ascending=False, inplace=True)
mean_by_date
plt.figure(figsize=(16,8))
gr = sns.barplot(
x=mean_by_date['release_date'].head(10),
y=mean_by_date['rating'].head(10)
)
plt.show()
```
| github_jupyter |
# The importance of constraints
Constraints determine which potential adversarial examples are valid inputs to the model. When determining the efficacy of an attack, constraints are everything. After all, an attack that looks very powerful may just be generating nonsense. Or, perhaps more nefariously, an attack may generate a real-looking example that changes the original label of the input. That's why you should always clearly define the *constraints* your adversarial examples must meet.
[](https://colab.research.google.com/github/QData/TextAttack/blob/master/docs/2notebook/2_Constraints.ipynb)
[](https://github.com/QData/TextAttack/blob/master/docs/2notebook/2_Constraints.ipynb)
### Classes of constraints
TextAttack evaluates constraints using methods from three groups:
- **Overlap constraints** determine if a perturbation is valid based on character-level analysis. For example, some attacks are constrained by edit distance: a perturbation is only valid if it perturbs some small number of characters (or fewer).
- **Grammaticality constraints** filter inputs based on syntactical information. For example, an attack may require that adversarial perturbations do not introduce grammatical errors.
- **Semantic constraints** try to ensure that the perturbation is semantically similar to the original input. For example, we may design a constraint that uses a sentence encoder to encode the original and perturbed inputs, and enforce that the sentence encodings be within some fixed distance of one another. (This is what happens in subclasses of `textattack.constraints.semantics.sentence_encoders`.)
### A new constraint
To add our own constraint, we need to create a subclass of `textattack.constraints.Constraint`. We can implement one of two functions, either `_check_constraint` or `_check_constraint_many`:
- `_check_constraint` determines whether candidate `TokenizedText` `transformed_text`, transformed from `current_text`, fulfills a desired constraint. It returns either `True` or `False`.
- `_check_constraint_many` determines whether each of a list of candidates `transformed_texts` fulfill the constraint relative to `current_text`. This is here in case your constraint can be vectorized. If not, just implement `_check_constraint`, and `_check_constraint` will be executed for each `(transformed_text, current_text)` pair.
### A custom constraint
For fun, we're going to see what happens when we constrain an attack to only allow perturbations that substitute out a named entity for another. In linguistics, a **named entity** is a proper noun, the name of a person, organization, location, product, etc. Named Entity Recognition is a popular NLP task (and one that state-of-the-art models can perform quite well).
### NLTK and Named Entity Recognition
**NLTK**, the Natural Language Toolkit, is a Python package that helps developers write programs that process natural language. NLTK comes with predefined algorithms for lots of linguistic tasks– including Named Entity Recognition.
First, we're going to write a constraint class. In the `_check_constraints` method, we're going to use NLTK to find the named entities in both `current_text` and `transformed_text`. We will only return `True` (that is, our constraint is met) if `transformed_text` has substituted one named entity in `current_text` for another.
Let's import NLTK and download the required modules:
```
import nltk
nltk.download('punkt') # The NLTK tokenizer
nltk.download('maxent_ne_chunker') # NLTK named-entity chunker
nltk.download('words') # NLTK list of words
```
### NLTK NER Example
Here's an example of using NLTK to find the named entities in a sentence:
```
sentence = ('In 2017, star quarterback Tom Brady led the Patriots to the Super Bowl, '
'but lost to the Philadelphia Eagles.')
# 1. Tokenize using the NLTK tokenizer.
tokens = nltk.word_tokenize(sentence)
# 2. Tag parts of speech using the NLTK part-of-speech tagger.
tagged = nltk.pos_tag(tokens)
# 3. Extract entities from tagged sentence.
entities = nltk.chunk.ne_chunk(tagged)
print(entities)
```
It looks like `nltk.chunk.ne_chunk` gives us an `nltk.tree.Tree` object where named entities are also `nltk.tree.Tree` objects within that tree. We can take this a step further and grab the named entities from the tree of entities:
```
# 4. Filter entities to just named entities.
named_entities = [entity for entity in entities if isinstance(entity, nltk.tree.Tree)]
print(named_entities)
```
### Caching with `@functools.lru_cache`
A little-known feature of Python 3 is `functools.lru_cache`, a decorator that allows users to easily cache the results of a function in an LRU cache. We're going to be using the NLTK library quite a bit to tokenize, parse, and detect named entities in sentences. These sentences might repeat themselves. As such, we'll use this decorator to cache named entities so that we don't have to perform this expensive computation multiple times.
### Putting it all together: getting a list of Named Entity Labels from a sentence
Now that we know how to tokenize, parse, and detect named entities using NLTK, let's put it all together into a single helper function. Later, when we implement our constraint, we can query this function to easily get the entity labels from a sentence. We can even use `@functools.lru_cache` to try and speed this process up.
```
import functools
@functools.lru_cache(maxsize=2**14)
def get_entities(sentence):
tokens = nltk.word_tokenize(sentence)
tagged = nltk.pos_tag(tokens)
# Setting `binary=True` makes NLTK return all of the named
# entities tagged as NNP instead of detailed tags like
#'Organization', 'Geo-Political Entity', etc.
entities = nltk.chunk.ne_chunk(tagged, binary=True)
return entities.leaves()
```
And let's test our function to make sure it works:
```
sentence = 'Jack Black starred in the 2003 film classic "School of Rock".'
get_entities(sentence)
```
We flattened the tree of entities, so the return format is a list of `(word, entity type)` tuples. For non-entities, the `entity_type` is just the part of speech of the word. `'NNP'` is the indicator of a named entity (a proper noun, according to NLTK). Looks like we identified three named entities here: 'Jack' and 'Black', 'School', and 'Rock'. as a 'GPE'. (Seems that the labeler thinks Rock is the name of a place, a city or something.) Whatever technique NLTK uses for named entity recognition may be a bit rough, but it did a pretty decent job here!
### Creating our NamedEntityConstraint
Now that we know how to detect named entities using NLTK, let's create our custom constraint.
```
from textattack.constraints import Constraint
class NamedEntityConstraint(Constraint):
""" A constraint that ensures `transformed_text` only substitutes named entities from `current_text` with other named entities.
"""
def _check_constraint(self, transformed_text, current_text):
transformed_entities = get_entities(transformed_text.text)
current_entities = get_entities(current_text.text)
# If there aren't named entities, let's return False (the attack
# will eventually fail).
if len(current_entities) == 0:
return False
if len(current_entities) != len(transformed_entities):
# If the two sentences have a different number of entities, then
# they definitely don't have the same labels. In this case, the
# constraint is violated, and we return False.
return False
else:
# Here we compare all of the words, in order, to make sure that they match.
# If we find two words that don't match, this means a word was swapped
# between `current_text` and `transformed_text`. That word must be a named entity to fulfill our
# constraint.
current_word_label = None
transformed_word_label = None
for (word_1, label_1), (word_2, label_2) in zip(current_entities, transformed_entities):
if word_1 != word_2:
# Finally, make sure that words swapped between `x` and `x_adv` are named entities. If
# they're not, then we also return False.
if (label_1 not in ['NNP', 'NE']) or (label_2 not in ['NNP', 'NE']):
return False
# If we get here, all of the labels match up. Return True!
return True
```
### Testing our constraint
We need to create an attack and a dataset to test our constraint on. We went over all of this in the transformations tutorial, so let's gloss over this part for now.
```
# Import the model
import transformers
from textattack.models.tokenizers import AutoTokenizer
from textattack.models.wrappers import HuggingFaceModelWrapper
model = transformers.AutoModelForSequenceClassification.from_pretrained("textattack/albert-base-v2-yelp-polarity")
tokenizer = AutoTokenizer("textattack/albert-base-v2-yelp-polarity")
model_wrapper = HuggingFaceModelWrapper(model, tokenizer)
# Create the goal function using the model
from textattack.goal_functions import UntargetedClassification
goal_function = UntargetedClassification(model_wrapper)
# Import the dataset
from textattack.datasets import HuggingFaceDataset
dataset = HuggingFaceDataset("yelp_polarity", None, "test")
from textattack.transformations import WordSwapEmbedding
from textattack.search_methods import GreedySearch
from textattack.shared import Attack
from textattack.constraints.pre_transformation import RepeatModification, StopwordModification
# We're going to the `WordSwapEmbedding` transformation. Using the default settings, this
# will try substituting words with their neighbors in the counter-fitted embedding space.
transformation = WordSwapEmbedding(max_candidates=15)
# We'll use the greedy search method again
search_method = GreedySearch()
# Our constraints will be the same as Tutorial 1, plus the named entity constraint
constraints = [RepeatModification(),
StopwordModification(),
NamedEntityConstraint(False)]
# Now, let's make the attack using these parameters.
attack = Attack(goal_function, constraints, transformation, search_method)
print(attack)
```
Now, let's use our attack. We're going to attack samples until we achieve 5 successes. (There's a lot to check here, and since we're using a greedy search over all potential word swap positions, each sample will take a few minutes. This will take a few hours to run on a single core.)
```
from textattack.loggers import CSVLogger # tracks a dataframe for us.
from textattack.attack_results import SuccessfulAttackResult
results_iterable = attack.attack_dataset(dataset)
logger = CSVLogger(color_method='html')
num_successes = 0
while num_successes < 5:
result = next(results_iterable)
if isinstance(result, SuccessfulAttackResult):
logger.log_attack_result(result)
num_successes += 1
print(f'{num_successes} of 5 successes complete.')
```
Now let's visualize our 5 successes in color:
```
import pandas as pd
pd.options.display.max_colwidth = 480 # increase column width so we can actually read the examples
from IPython.core.display import display, HTML
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
```
### Conclusion
Our constraint seems to have done its job: it filtered out attacks that did not swap out a named entity for another, according to the NLTK named entity detector. However, we can see some problems inherent in the detector: it often thinks the first word of a given sentence is a named entity, probably due to capitalization.
We did manage to produce some nice adversarial examples! "Sigh" beacame "Inahles" and the prediction shifted from negative to positive.
| github_jupyter |
```
import os
import glob
import pandas as pd
import numpy as np
from tqdm import tqdm
import pickle
from copy import copy
sources_with_data_text = os.path.join('data', 'sources_with_data.txt')
with open (sources_with_data_text, mode='r') as f:
lines = f.readlines()
#check we closed the file
assert f.closed
#strip the spaces at the end
lines = [l.strip() for l in lines]
#keep only CVEs and drop the rest
lines = [l for l in lines if 'CVE' in l]
unique_cve = (set(lines))
print("Found {} unique CVEs in {}".format(len(unique_cve), sources_with_data_text))
def load_obj(path ):
with open(path, 'rb') as f:
return pickle.load(f)
#create list of dicts
broadcom_arr=[]
for file in tqdm(glob.glob('broadcom_dicts/*.pkl')):
obj = load_obj(file)
#if array is not empty
if obj['CVE']:
broadcom_arr.extend(obj['CVE'])
broadcom_cve = (set(broadcom_arr))
print("Found {} unique CVEs in {}".format(len(broadcom_cve), 'broadcom dicts'))
cve_in_wild = copy(broadcom_cve)
cve_in_wild.update(unique_cve)
#cve_in_wild = list(cve_in_wild)
print("Found {} unique CVEs overll".format(len(cve_in_wild)))
#fix some inconsistencies in data collection
#manual fixes
cve_in_wild = [cve.replace('1)', '') for cve in cve_in_wild]
cve_in_wild = [cve.replace('service', '') for cve in cve_in_wild]
cve_in_wild = [cve.replace('3)', '') for cve in cve_in_wild]
cve_in_wild = [cve.replace('_3', '') for cve in cve_in_wild]
cve_in_wild = [cve for cve in cve_in_wild if len(cve)>=11]
cve_in_wild = [cve.replace('(', '') for cve in cve_in_wild]
cve_in_wild = [cve.replace(')', '') for cve in cve_in_wild]
## more manual fixes to corrupted data
cve_in_wild = [cve.replace('CVE2019-7278', 'CVE-2019-7278') for cve in cve_in_wild]
cve_in_wild = [cve.replace('2CVE-2006-3643', 'CVE-2006-3643') for cve in cve_in_wild]
cve_in_wild = [cve.replace('CVE2019-7279', 'CVE-2019-7279') for cve in cve_in_wild]
cve_in_wild = [cve.replace('CVE-2018_16858', 'CVE-2018-16858') for cve in cve_in_wild]
cve_in_wild = [cve.replace('CVE 2014-6278', 'CVE-2014-6278') for cve in cve_in_wild]
cve_in_wild = [cve.replace('CVE-209-18935', 'CVE-2019-18935') for cve in cve_in_wild]
cve_in_wild = [cve.replace('CVE_2009-3729', 'CVE-2009-3729') for cve in cve_in_wild]
cve_in_wild = [cve.replace('CVE-20190-11539', 'CVE-2019-11539') for cve in cve_in_wild]
cve_in_wild = [cve.replace('CVE-2190-11539', 'CVE-2019-11539') for cve in cve_in_wild]
dates = set([x.split('-')[1] for x in cve_in_wild])
for x in cve_in_wild:
if '2190' in x:
print(x)
dates
print("First exploit was recorded in {}".format(min(dates)))
print("Last exploit was recorded in {}".format(max(dates)))
target_cve_dict = {}
df_nvd = pd.read_csv(os.path.join('data', 'nvdcve_combined.csv'))
for cve in df_nvd['ID']:
if cve in cve_in_wild:
target_cve_dict[cve] = 1
else:
target_cve_dict[cve] = 0
df_target = pd.DataFrame.from_dict(target_cve_dict, orient='index', columns=['in_the_wild'])
df_target['ID'] = df_target.index
df_target = df_target.reset_index(drop=True)
#rearrange
df_target = df_target[['ID', 'in_the_wild']]
df_target.head()
df_target['in_the_wild'].mean()
dates = []
```
| github_jupyter |
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# `GiRaFFE_NRPy`: Main Driver
## Author: Patrick Nelson
<a id='intro'></a>
**Notebook Status:** <font color=Red><b> Validation in progress </b></font>
**Validation Notes:** This code assembles the various parts needed for GRFFE evolution in order.
### NRPy+ Source Code for this module:
* [GiRaFFE_NRPy/GiRaFFE_NRPy_Main_Driver.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Main_Driver.py)
### Other critical files (in alphabetical order):
* [GiRaFFE_NRPy/Afield_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Afield_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Afield_flux.ipynb) Generates the expressions to find the flux term of the induction equation.
* [GiRaFFE_NRPy/GiRaFFE_NRPy_A2B.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_A2B.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-A2B.ipynb) Generates the driver to compute the magnetic field from the vector potential/
* [GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-BCs.ipynb) Generates the code to apply boundary conditions to the vector potential, scalar potential, and three-velocity.
* [GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-C2P_P2C.ipynb) Generates the conservative-to-primitive and primitive-to-conservative solvers.
* [GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb) Generates code to interpolate metric gridfunctions to cell faces.
* [GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-PPM.ipynb) Genearates code to reconstruct primitive variables on cell faces.
* [GiRaFFE_NRPy/GiRaFFE_NRPy_Source_Terms.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Source_Terms.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Source_Terms.ipynb) Genearates code to compute the $\tilde{S}_i$ source term.
* [GiRaFFE_NRPy/Stilde_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Stilde_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Stilde_flux.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.
* [../GRFFE/equations.py](../../edit/GRFFE/equations.py) [\[**tutorial**\]](../Tutorial-GRFFE_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.
* [../GRHD/equations.py](../../edit/GRHD/equations.py) [\[**tutorial**\]](../Tutorial-GRHD_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.
## Introduction:
Having written all the various algorithms that will go into evolving the GRFFE equations forward through time, we are ready to write a start-to-finish module to do so. However, to help keep things more organized, we will first create a dedicated module to assemble the various functions we need to run, in order, to perform the evolution. This will reduce the length of the standalone C code, improving that notebook's readability.
<a id='prelim'></a>
# Table of Contents
$$\label{prelim}$$
During a given RK substep, we will perform the following steps in this order, based on the order used in the original `GiRaFFE`:
0. [Step 0](#prelim): Preliminaries
1. [Step 1](#rhs): Calculate the right-hand sides
1. [Step 1.a](#parenthetical): Calculate the portion of the gauge terms for $A_k$, $(\alpha \Phi - \beta^j A_j)$ and $\Phi$, $(\alpha\sqrt{\gamma}A^j - \beta^j [\sqrt{\gamma} \Phi])$ *inside* the parentheses to be finite-differenced.
* **GRFFE/equations.py**, **GRHD/equations.py**
1. [Step 1.b](#source): Calculate the source terms of $\partial_t A_i$, $\partial_t \tilde{S}_i$, and $\partial_t [\sqrt{\gamma} \Phi]$ right-hand sides
* **GRFFE/equations.py**, **GRHD/equations.py**, **GiRaFFE_NRPy/GiRaFFE_NRPy_Source_Terms**
1. [Step 1.c](#flux): Calculate the Flux terms
* In each direction:
* Interpolate the metric gridfunctions to cell faces
* **GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py**
* Reconstruct primitives $\bar{v}^i$ and $B^i$ on cell faces with the piecewise-parabolic method
* **GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py**
* Compute the fluxes of $\tilde{S}_i$ and $A_i$ and add the appropriate combinations to the evolution equation right-hand sides
* **GiRaFFE_NRPy/Stilde_flux.py**, **GiRaFFE_NRPy/Afield_flux.py**
1. [Step 2](#poststep): Recover the primitive variables and apply boundary conditions (post-step)
1. [Step 2.a](#potential_bc): Apply boundary conditions to $A_i$ and $\sqrt{\gamma} \Phi$
* **GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py**
1. [Step 2.b](#a2b): Compute $B^i$ from $A_i$
* **GiRaFFE_NRPy/GiRaFFE_NRPy_A2B.py**
1. [Step 2.c](#c2p): Run the Conservative-to-Primitive solver
* This applies fixes to $\tilde{S}_i$, then computes $\bar{v}^i$. A current sheet prescription is then applied to $\bar{v}^i$, and $\tilde{S}_i$ is recomputed to be consistent.
* **GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py**
1. [Step 2.d](#velocity_bc): Apply outflow boundary conditions to $\bar{v}^i$
* **GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py**
1. [Step 3](#write_out): Write out the C code function
1. [Step 3](#code_validation): Self-Validation against `GiRaFFE_NRPy_Main_Drive.py`
1. [Step 5](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='prelim'></a>
# Step 0: Preliminaries \[Back to [top](#toc)\]
$$\label{prelim}$$
We begin by importing the NRPy+ core functionality. We also import the Levi-Civita symbol, the GRHD module, and the GRFFE module.
```
# Step 0: Add NRPy's directory to the path
# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory
import os,sys
nrpy_dir_path = os.path.join("..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
from outputC import * # NRPy+: Core C code output module
import finite_difference as fin # NRPy+: Finite difference C code generation module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import grid as gri # NRPy+: Functions having to do with numerical grids
import loop as lp # NRPy+: Generate C code loops
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import reference_metric as rfm # NRPy+: Reference metric support
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
thismodule = "GiRaFFE_NRPy_Main_Driver"
par.set_parval_from_str("grid::GridFuncMemAccess","ETK")
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",2)
out_dir = os.path.join("GiRaFFE_standalone_Ccodes")
cmd.mkdir(out_dir)
CoordSystem = "Cartesian"
par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem)
rfm.reference_metric() # Create ReU, ReDD needed for rescaling B-L initial data, generating BSSN RHSs, etc.
# Default Kreiss-Oliger dissipation strength
default_KO_strength = 0.1
diss_strength = par.Cparameters("REAL", thismodule, "diss_strength", default_KO_strength)
outCparams = "outCverbose=False,CSE_sorting=none"
```
<a id='rhs'></a>
# Step 1: Calculate the right-hand sides \[Back to [top](#toc)\]
$$\label{rhs}$$
<a id='parenthetical'></a>
In the method of lines using Runge-Kutta methods, each timestep involves several "RK substeps" during which we will run the same set of function calls. These can be divided into two groups: one in which the RHSs themselves are calculated, and a second in which boundary conditions are applied and auxiliary variables updated (the post-step). Here, we focus on that first group.
## Step 1.a: Calculate the portion of the gauge terms for $A_k$, $(\alpha \Phi - \beta^j A_j)$ and $\Phi$, $(\alpha\sqrt{\gamma}A^j - \beta^j [\sqrt{\gamma} \Phi])$ *inside* the parentheses to be finite-differenced. \[Back to [top](#toc)\]
$$\label{parenthetical}$$
The source terms of our evolution equations consist of two terms that are derivatives of some parenthetical quantity. We can save some effort and execution time (at the cost of memory needed) by computing these parentheticals, storing them, and then finite-differencing that stored variable. For more information, see the notebook for the [implementation](Tutorial-GiRaFFE_NRPy-Source_Terms.ipynb) and the [validation](Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-Source_Terms.ipynb), as well as [Tutorial-GRFFE_Equations-Cartesian](../Tutorial-GRFFE_Equations-Cartesian.ipynb) and [Tutorial-GRHD_Equations-Cartesian](../Tutorial-GRHD_Equations-Cartesian.ipynb) for the terms themselves.
```
import GRHD.equations as GRHD # NRPy+: Generate general relativistic hydrodynamics equations
import GRFFE.equations as GRFFE # NRPy+: Generate general relativisitic force-free electrodynamics equations
gammaDD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL","gammaDD","sym01",DIM=3)
betaU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","betaU",DIM=3)
alpha = gri.register_gridfunctions("AUXEVOL","alpha")
AD = ixp.register_gridfunctions_for_single_rank1("EVOL","AD")
BU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","BU")
ValenciavU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","ValenciavU")
psi6Phi = gri.register_gridfunctions("EVOL","psi6Phi")
StildeD = ixp.register_gridfunctions_for_single_rank1("EVOL","StildeD")
PhievolParenU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","PhievolParenU",DIM=3)
AevolParen = gri.register_gridfunctions("AUXEVOL","AevolParen")
GRHD.compute_sqrtgammaDET(gammaDD)
GRFFE.compute_AD_source_term_parenthetical_for_FD(GRHD.sqrtgammaDET,betaU,alpha,psi6Phi,AD)
GRFFE.compute_psi6Phi_rhs_parenthetical(gammaDD,GRHD.sqrtgammaDET,betaU,alpha,AD,psi6Phi)
parens_to_print = [\
lhrh(lhs=gri.gfaccess("auxevol_gfs","AevolParen"),rhs=GRFFE.AevolParen),\
lhrh(lhs=gri.gfaccess("auxevol_gfs","PhievolParenU0"),rhs=GRFFE.PhievolParenU[0]),\
lhrh(lhs=gri.gfaccess("auxevol_gfs","PhievolParenU1"),rhs=GRFFE.PhievolParenU[1]),\
lhrh(lhs=gri.gfaccess("auxevol_gfs","PhievolParenU2"),rhs=GRFFE.PhievolParenU[2]),\
]
subdir = "RHSs"
cmd.mkdir(os.path.join(out_dir, subdir))
desc = "Calculate quantities to be finite-differenced for the GRFFE RHSs"
name = "calculate_parentheticals_for_RHSs"
outCfunction(
outfile = os.path.join(out_dir,subdir,name+".h"), desc=desc, name=name,
params ="const paramstruct *restrict params,const REAL *restrict in_gfs,REAL *restrict auxevol_gfs",
body = fin.FD_outputC("returnstring",parens_to_print,params=outCparams).replace("IDX4","IDX4S"),
loopopts ="AllPoints",
rel_path_for_Cparams=os.path.join("../"))
```
<a id='source'></a>
## Step 1.b: Calculate the source terms of $\partial_t A_i$, $\partial_t \tilde{S}_i$, and $\partial_t [\sqrt{\gamma} \Phi]$ right-hand sides \[Back to [top](#toc)\]
$$\label{source}$$
With the parentheticals stored in memory from the previous step, we can now now calculate the terms on the RHS of $A_i$ and $[\sqrt{\gamma} \Phi]$ that involve the derivatives of those terms. We also compute the other term in the RHS of $[\sqrt{\gamma} \Phi]$, which is a straightforward damping term.
```
xi_damping = par.Cparameters("REAL",thismodule,"xi_damping",0.1)
GRFFE.compute_psi6Phi_rhs_damping_term(alpha,psi6Phi,xi_damping)
AevolParen_dD = ixp.declarerank1("AevolParen_dD",DIM=3)
PhievolParenU_dD = ixp.declarerank2("PhievolParenU_dD","nosym",DIM=3)
A_rhsD = ixp.zerorank1()
psi6Phi_rhs = GRFFE.psi6Phi_damping
for i in range(3):
A_rhsD[i] += -AevolParen_dD[i]
psi6Phi_rhs += -PhievolParenU_dD[i][i]
# Add Kreiss-Oliger dissipation to the GRFFE RHSs:
# psi6Phi_dKOD = ixp.declarerank1("psi6Phi_dKOD")
# AD_dKOD = ixp.declarerank2("AD_dKOD","nosym")
# for i in range(3):
# psi6Phi_rhs += diss_strength*psi6Phi_dKOD[i]*rfm.ReU[i] # ReU[i] = 1/scalefactor_orthog_funcform[i]
# for j in range(3):
# A_rhsD[j] += diss_strength*AD_dKOD[j][i]*rfm.ReU[i] # ReU[i] = 1/scalefactor_orthog_funcform[i]
RHSs_to_print = [\
lhrh(lhs=gri.gfaccess("rhs_gfs","AD0"),rhs=A_rhsD[0]),\
lhrh(lhs=gri.gfaccess("rhs_gfs","AD1"),rhs=A_rhsD[1]),\
lhrh(lhs=gri.gfaccess("rhs_gfs","AD2"),rhs=A_rhsD[2]),\
lhrh(lhs=gri.gfaccess("rhs_gfs","psi6Phi"),rhs=psi6Phi_rhs),\
]
desc = "Calculate AD gauge term and psi6Phi RHSs"
name = "calculate_AD_gauge_psi6Phi_RHSs"
source_Ccode = outCfunction(
outfile = "returnstring", desc=desc, name=name,
params ="const paramstruct *params,const REAL *in_gfs,const REAL *auxevol_gfs,REAL *rhs_gfs",
body = fin.FD_outputC("returnstring",RHSs_to_print,params=outCparams).replace("IDX4","IDX4S"),
loopopts ="InteriorPoints",
rel_path_for_Cparams=os.path.join("../")).replace("= NGHOSTS","= NGHOSTS_A2B").replace("NGHOSTS+Nxx0","Nxx_plus_2NGHOSTS0-NGHOSTS_A2B").replace("NGHOSTS+Nxx1","Nxx_plus_2NGHOSTS1-NGHOSTS_A2B").replace("NGHOSTS+Nxx2","Nxx_plus_2NGHOSTS2-NGHOSTS_A2B")
# Note the above .replace() functions. These serve to expand the loop range into the ghostzones, since
# the second-order FD needs fewer than some other algorithms we use do.
with open(os.path.join(out_dir,subdir,name+".h"),"w") as file:
file.write(source_Ccode)
```
We also need to compute the source term of the $\tilde{S}_i$ evolution equation. This term involves derivatives of the four metric, so we can save some effort here by taking advantage of the interpolations done of the metric gridfunctions to the cell faces, which will allow us to take a finite-difference derivative with the accuracy of a higher order and the computational cost of a lower order. However, it will require some more complicated coding, detailed in [Tutorial-GiRaFFE_NRPy-Source_Terms](Tutorial-GiRaFFE_NRPy-Source_Terms.ipynb)
```
import GiRaFFE_NRPy.GiRaFFE_NRPy_Source_Terms as source
# Declare this symbol:
sqrt4pi = par.Cparameters("REAL",thismodule,"sqrt4pi","sqrt(4.0*M_PI)")
source.write_out_functions_for_StildeD_source_term(os.path.join(out_dir,subdir),outCparams,gammaDD,betaU,alpha,
ValenciavU,BU,sqrt4pi)
```
<a id='flux'></a>
## Step 1.c: Calculate the Flux terms \[Back to [top](#toc)\]
$$\label{flux}$$
Now, we will compute the flux terms of $\partial_t A_i$ and $\partial_t \tilde{S}_i$. To do so, we will first need to interpolate the metric gridfunctions to cell faces and to reconstruct the primitives on the cell faces using the code detailed in [Tutorial-GiRaFFE_NRPy-Metric_Face_Values](Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb) and in [Tutorial-GiRaFFE_NRPy-PPM](Tutorial-GiRaFFE_NRPy-PPM.ipynb).
```
subdir = "FCVAL"
cmd.mkdir(os.path.join(out_dir, subdir))
import GiRaFFE_NRPy.GiRaFFE_NRPy_Metric_Face_Values as FCVAL
FCVAL.GiRaFFE_NRPy_FCVAL(os.path.join(out_dir,subdir))
subdir = "PPM"
cmd.mkdir(os.path.join(out_dir, subdir))
import GiRaFFE_NRPy.GiRaFFE_NRPy_PPM as PPM
PPM.GiRaFFE_NRPy_PPM(os.path.join(out_dir,subdir))
```
Here, we will write the function to compute the electric field contribution to the induction equation RHS. This is coded with documentation in [Tutorial-GiRaFFE_NRPy-Afield_flux](Tutorial-GiRaFFE_NRPy-Afield_flux.ipynb). The electric field in the $i^{\rm th}$ direction, it will contribute to the $j^{\rm th}$ and $k^{\rm th}$ component of the electric field. That is, in Cartesian coordinates, the component $x$ of the electric field will be the average of the values computed on the cell faces in the $\pm y$- and $\pm z$-directions, and so forth for the other components. This ultimately results in the six functions we create below.
```
import GiRaFFE_NRPy.Afield_flux as Af
# We will pass values of the gridfunction on the cell faces into the function. This requires us
# to declare them as C parameters in NRPy+. We will denote this with the _face infix/suffix.
alpha_face = gri.register_gridfunctions("AUXEVOL","alpha_face")
gamma_faceDD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL","gamma_faceDD","sym01")
beta_faceU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","beta_faceU")
# We'll need some more gridfunctions, now, to represent the reconstructions of BU and ValenciavU
# on the right and left faces
Valenciav_rU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Valenciav_rU",DIM=3)
B_rU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","B_rU",DIM=3)
Valenciav_lU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Valenciav_lU",DIM=3)
B_lU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","B_lU",DIM=3)
subdir = "RHSs"
Af.generate_Afield_flux_function_files(out_dir,subdir,alpha_face,gamma_faceDD,beta_faceU,\
Valenciav_rU,B_rU,Valenciav_lU,B_lU,True)
```
We must do something similar here, albeit a bit simpler. For instance, the $x$ component of $\partial_t \tilde{S}_i$ will be a finite difference of the flux throught the faces in the $\pm x$ direction; for further detail, see [Tutorial-GiRaFFE_NRPy-Stilde_flux](Tutorial-GiRaFFE_NRPy-Stilde_flux.ipynb).
```
import GiRaFFE_NRPy.Stilde_flux as Sf
subdir = "RHSs"
Sf.generate_C_code_for_Stilde_flux(os.path.join(out_dir,subdir), True, alpha_face,gamma_faceDD,beta_faceU,
Valenciav_rU,B_rU,Valenciav_lU,B_lU,sqrt4pi)
```
<a id='poststep'></a>
# Step 2: Recover the primitive variables and apply boundary conditions \[Back to [top](#toc)\]
$$\label{poststep}$$
With the RHSs computed, we can now recover the primitive variables, which are the Valencia three-velocity $\bar{v}^i$ and the magnetic field $B^i$. We can also apply boundary conditions to the vector potential and velocity. By doing this at each RK substep, we can help ensure the accuracy of the following substeps.
<a id='potential_bc'></a>
## Step 2.a: Apply boundary conditions to $A_i$ and $\sqrt{\gamma} \Phi$ \[Back to [top](#toc)\]
$$\label{potential_bc}$$
First, we will apply boundary conditions to the vector potential, $A_i$, and the scalar potential $\sqrt{\gamma} \Phi$. The file we generate here contains both functions we need for BCs, as documented in [Tutorial-GiRaFFE_NRPy-BCs](Tutorial-GiRaFFE_NRPy-BCs.ipynb).
```
subdir = "boundary_conditions"
cmd.mkdir(os.path.join(out_dir,subdir))
import GiRaFFE_NRPy.GiRaFFE_NRPy_BCs as BC
BC.GiRaFFE_NRPy_BCs(os.path.join(out_dir,subdir))
```
<a id='a2b'></a>
## Step 2.b: Compute $B^i$ from $A_i$ \[Back to [top](#toc)\]
$$\label{a2b}$$
Now, we will calculate the magnetic field as the curl of the vector potential at all points in our domain; this requires care to be taken in the ghost zones, which is detailed in [Tutorial-GiRaFFE_NRPy-A2B](Tutorial-GiRaFFE_NRPy-A2B.ipynb).
```
subdir = "A2B"
cmd.mkdir(os.path.join(out_dir,subdir))
import GiRaFFE_NRPy.GiRaFFE_NRPy_A2B as A2B
A2B.GiRaFFE_NRPy_A2B(os.path.join(out_dir,subdir),gammaDD,AD,BU)
```
<a id='c2p'></a>
## Step 2.c: Run the Conservative-to-Primitive solver \[Back to [top](#toc)\]
$$\label{c2p}$$
With these functions, we apply fixes to the Poynting flux, and use that to update the three-velocity. Then, we apply our current sheet prescription to the velocity, and recompute the Poynting flux to agree with the now-fixed velocity. More detail can be found in [Tutorial-GiRaFFE_NRPy-C2P_P2C](Tutorial-GiRaFFE_NRPy-C2P_P2C.ipynb).
```
import GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C as C2P_P2C
C2P_P2C.GiRaFFE_NRPy_C2P(StildeD,BU,gammaDD,betaU,alpha)
values_to_print = [\
lhrh(lhs=gri.gfaccess("in_gfs","StildeD0"),rhs=C2P_P2C.outStildeD[0]),\
lhrh(lhs=gri.gfaccess("in_gfs","StildeD1"),rhs=C2P_P2C.outStildeD[1]),\
lhrh(lhs=gri.gfaccess("in_gfs","StildeD2"),rhs=C2P_P2C.outStildeD[2]),\
lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU0"),rhs=C2P_P2C.ValenciavU[0]),\
lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU1"),rhs=C2P_P2C.ValenciavU[1]),\
lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU2"),rhs=C2P_P2C.ValenciavU[2])\
]
subdir = "C2P"
cmd.mkdir(os.path.join(out_dir,subdir))
desc = "Apply fixes to \tilde{S}_i and recompute the velocity to match with current sheet prescription."
name = "GiRaFFE_NRPy_cons_to_prims"
outCfunction(
outfile = os.path.join(out_dir,subdir,name+".h"), desc=desc, name=name,
params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs,REAL *in_gfs",
body = fin.FD_outputC("returnstring",values_to_print,params=outCparams).replace("IDX4","IDX4S"),
loopopts ="AllPoints,Read_xxs",
rel_path_for_Cparams=os.path.join("../"))
# TINYDOUBLE = par.Cparameters("REAL",thismodule,"TINYDOUBLE",1e-100)
C2P_P2C.GiRaFFE_NRPy_P2C(gammaDD,betaU,alpha, ValenciavU,BU, sqrt4pi)
values_to_print = [\
lhrh(lhs=gri.gfaccess("in_gfs","StildeD0"),rhs=C2P_P2C.StildeD[0]),\
lhrh(lhs=gri.gfaccess("in_gfs","StildeD1"),rhs=C2P_P2C.StildeD[1]),\
lhrh(lhs=gri.gfaccess("in_gfs","StildeD2"),rhs=C2P_P2C.StildeD[2]),\
]
desc = "Recompute StildeD after current sheet fix to Valencia 3-velocity to ensure consistency between conservative & primitive variables."
name = "GiRaFFE_NRPy_prims_to_cons"
outCfunction(
outfile = os.path.join(out_dir,subdir,name+".h"), desc=desc, name=name,
params ="const paramstruct *params,REAL *auxevol_gfs,REAL *in_gfs",
body = fin.FD_outputC("returnstring",values_to_print,params=outCparams).replace("IDX4","IDX4S"),
loopopts ="AllPoints",
rel_path_for_Cparams=os.path.join("../"))
```
<a id='velocity_bc'></a>
## Step 2.d: Apply outflow boundary conditions to $\bar{v}^i$ \[Back to [top](#toc)\]
$$\label{velocity_bc}$$
Now, we can apply outflow boundary conditions to the Valencia three-velocity. This specific type of boundary condition helps avoid numerical error "flowing" into our grid.
This function has already been generated [above](#potential_bc).
<a id='write_out'></a>
# Step 3: Write out the C code function \[Back to [top](#toc)\]
$$\label{write_out}$$
Now, we have generated all the functions we will need for the `GiRaFFE` evolution. So, we will now assemble our evolution driver. This file will first `#include` all of the files we just generated for easy access. Then, we will write a function that calls these functions in the correct order, iterating over the flux directions as necessary.
```
%%writefile $out_dir/GiRaFFE_NRPy_Main_Driver.h
// Structure to track ghostzones for PPM:
typedef struct __gf_and_gz_struct__ {
REAL *gf;
int gz_lo[4],gz_hi[4];
} gf_and_gz_struct;
// Some additional constants needed for PPM:
const int VX=0,VY=1,VZ=2,BX=3,BY=4,BZ=5;
const int NUM_RECONSTRUCT_GFS = 6;
// Include ALL functions needed for evolution
#include "RHSs/calculate_parentheticals_for_RHSs.h"
#include "RHSs/calculate_AD_gauge_psi6Phi_RHSs.h"
#include "PPM/reconstruct_set_of_prims_PPM_GRFFE_NRPy.c"
#include "FCVAL/interpolate_metric_gfs_to_cell_faces.h"
#include "RHSs/calculate_StildeD0_source_term.h"
#include "RHSs/calculate_StildeD1_source_term.h"
#include "RHSs/calculate_StildeD2_source_term.h"
#include "../calculate_E_field_flat_all_in_one.h"
#include "RHSs/calculate_Stilde_flux_D0.h"
#include "RHSs/calculate_Stilde_flux_D1.h"
#include "RHSs/calculate_Stilde_flux_D2.h"
#include "boundary_conditions/GiRaFFE_boundary_conditions.h"
#include "A2B/driver_AtoB.h"
#include "C2P/GiRaFFE_NRPy_cons_to_prims.h"
#include "C2P/GiRaFFE_NRPy_prims_to_cons.h"
void override_BU_with_old_GiRaFFE(const paramstruct *restrict params,REAL *restrict auxevol_gfs,const int n) {
#include "set_Cparameters.h"
char filename[100];
sprintf(filename,"BU0_override-%08d.bin",n);
FILE *out2D = fopen(filename, "rb");
fread(auxevol_gfs+BU0GF*Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2,
sizeof(double),Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2,out2D);
fclose(out2D);
sprintf(filename,"BU1_override-%08d.bin",n);
out2D = fopen(filename, "rb");
fread(auxevol_gfs+BU1GF*Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2,
sizeof(double),Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2,out2D);
fclose(out2D);
sprintf(filename,"BU2_override-%08d.bin",n);
out2D = fopen(filename, "rb");
fread(auxevol_gfs+BU2GF*Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2,
sizeof(double),Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2,out2D);
fclose(out2D);
}
void GiRaFFE_NRPy_RHSs(const paramstruct *restrict params,REAL *restrict auxevol_gfs,const REAL *restrict in_gfs,REAL *restrict rhs_gfs) {
#include "set_Cparameters.h"
// First thing's first: initialize the RHSs to zero!
#pragma omp parallel for
for(int ii=0;ii<Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2*NUM_EVOL_GFS;ii++) {
rhs_gfs[ii] = 0.0;
}
// Next calculate the easier source terms that don't require flux directions
// This will also reset the RHSs for each gf at each new timestep.
calculate_parentheticals_for_RHSs(params,in_gfs,auxevol_gfs);
calculate_AD_gauge_psi6Phi_RHSs(params,in_gfs,auxevol_gfs,rhs_gfs);
// Now, we set up a bunch of structs of pointers to properly guide the PPM algorithm.
// They also count the number of ghostzones available.
gf_and_gz_struct in_prims[NUM_RECONSTRUCT_GFS], out_prims_r[NUM_RECONSTRUCT_GFS], out_prims_l[NUM_RECONSTRUCT_GFS];
int which_prims_to_reconstruct[NUM_RECONSTRUCT_GFS],num_prims_to_reconstruct;
const int Nxxp2NG012 = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2;
REAL *temporary = auxevol_gfs + Nxxp2NG012*AEVOLPARENGF; //We're not using this anymore
// This sets pointers to the portion of auxevol_gfs containing the relevant gridfunction.
int ww=0;
in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAVU0GF;
out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_RU0GF;
out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_LU0GF;
ww++;
in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAVU1GF;
out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_RU1GF;
out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_LU1GF;
ww++;
in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAVU2GF;
out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_RU2GF;
out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_LU2GF;
ww++;
in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*BU0GF;
out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*B_RU0GF;
out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*B_LU0GF;
ww++;
in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*BU1GF;
out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*B_RU1GF;
out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*B_LU1GF;
ww++;
in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*BU2GF;
out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*B_RU2GF;
out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*B_LU2GF;
ww++;
// Prims are defined AT ALL GRIDPOINTS, so we set the # of ghostzones to zero:
for(int i=0;i<NUM_RECONSTRUCT_GFS;i++) for(int j=1;j<=3;j++) { in_prims[i].gz_lo[j]=0; in_prims[i].gz_hi[j]=0; }
// Left/right variables are not yet defined, yet we set the # of gz's to zero by default:
for(int i=0;i<NUM_RECONSTRUCT_GFS;i++) for(int j=1;j<=3;j++) { out_prims_r[i].gz_lo[j]=0; out_prims_r[i].gz_hi[j]=0; }
for(int i=0;i<NUM_RECONSTRUCT_GFS;i++) for(int j=1;j<=3;j++) { out_prims_l[i].gz_lo[j]=0; out_prims_l[i].gz_hi[j]=0; }
ww=0;
which_prims_to_reconstruct[ww]=VX; ww++;
which_prims_to_reconstruct[ww]=VY; ww++;
which_prims_to_reconstruct[ww]=VZ; ww++;
which_prims_to_reconstruct[ww]=BX; ww++;
which_prims_to_reconstruct[ww]=BY; ww++;
which_prims_to_reconstruct[ww]=BZ; ww++;
num_prims_to_reconstruct=ww;
// In each direction, perform the PPM reconstruction procedure.
// Then, add the fluxes to the RHS as appropriate.
for(int flux_dirn=0;flux_dirn<3;flux_dirn++) {
// In each direction, interpolate the metric gfs (gamma,beta,alpha) to cell faces.
interpolate_metric_gfs_to_cell_faces(params,auxevol_gfs,flux_dirn+1);
// Then, reconstruct the primitive variables on the cell faces.
// This function is housed in the file: "reconstruct_set_of_prims_PPM_GRFFE_NRPy.c"
reconstruct_set_of_prims_PPM_GRFFE_NRPy(params, auxevol_gfs, flux_dirn+1, num_prims_to_reconstruct,
which_prims_to_reconstruct, in_prims, out_prims_r, out_prims_l, temporary);
// For example, if flux_dirn==0, then at gamma_faceDD00(i,j,k) represents gamma_{xx}
// at (i-1/2,j,k), Valenciav_lU0(i,j,k) is the x-component of the velocity at (i-1/2-epsilon,j,k),
// and Valenciav_rU0(i,j,k) is the x-component of the velocity at (i-1/2+epsilon,j,k).
if(flux_dirn==0) {
// Next, we calculate the source term for StildeD. Again, this also resets the rhs_gfs array at
// each new timestep.
calculate_StildeD0_source_term(params,auxevol_gfs,rhs_gfs);
// Now, compute the electric field on each face of a cell and add it to the RHSs as appropriate
//calculate_E_field_D0_right(params,auxevol_gfs,rhs_gfs);
//calculate_E_field_D0_left(params,auxevol_gfs,rhs_gfs);
// Finally, we calculate the flux of StildeD and add the appropriate finite-differences
// to the RHSs.
calculate_Stilde_flux_D0(params,auxevol_gfs,rhs_gfs);
}
else if(flux_dirn==1) {
calculate_StildeD1_source_term(params,auxevol_gfs,rhs_gfs);
//calculate_E_field_D1_right(params,auxevol_gfs,rhs_gfs);
//calculate_E_field_D1_left(params,auxevol_gfs,rhs_gfs);
calculate_Stilde_flux_D1(params,auxevol_gfs,rhs_gfs);
}
else {
calculate_StildeD2_source_term(params,auxevol_gfs,rhs_gfs);
//calculate_E_field_D2_right(params,auxevol_gfs,rhs_gfs);
//calculate_E_field_D2_left(params,auxevol_gfs,rhs_gfs);
calculate_Stilde_flux_D2(params,auxevol_gfs,rhs_gfs);
}
for(int count=0;count<=1;count++) {
// This function is written to be general, using notation that matches the forward permutation added to AD2,
// i.e., [F_HLL^x(B^y)]_z corresponding to flux_dirn=0, count=1.
// The SIGN parameter is necessary because
// -E_z(x_i,y_j,z_k) = 0.25 ( [F_HLL^x(B^y)]_z(i+1/2,j,k)+[F_HLL^x(B^y)]_z(i-1/2,j,k)
// -[F_HLL^y(B^x)]_z(i,j+1/2,k)-[F_HLL^y(B^x)]_z(i,j-1/2,k) )
// Note the negative signs on the reversed permutation terms!
// By cyclically permuting with flux_dirn, we
// get contributions to the other components, and by incrementing count, we get the backward permutations:
// Let's suppose flux_dirn = 0. Then we will need to update Ay (count=0) and Az (count=1):
// flux_dirn=count=0 -> AD0GF+(flux_dirn+1+count)%3 = AD0GF + (0+1+0)%3=AD1GF <- Updating Ay!
// (flux_dirn)%3 = (0)%3 = 0 Vx
// (flux_dirn-count+2)%3 = (0-0+2)%3 = 2 Vz . Inputs Vx, Vz -> SIGN = -1 ; 2.0*((REAL)count)-1.0=-1 check!
// flux_dirn=0,count=1 -> AD0GF+(flux_dirn+1+count)%3 = AD0GF + (0+1+1)%3=AD2GF <- Updating Az!
// (flux_dirn)%3 = (0)%3 = 0 Vx
// (flux_dirn-count+2)%3 = (0-1+2)%3 = 1 Vy . Inputs Vx, Vy -> SIGN = +1 ; 2.0*((REAL)count)-1.0=2-1=+1 check!
// Let's suppose flux_dirn = 1. Then we will need to update Az (count=0) and Ax (count=1):
// flux_dirn=1,count=0 -> AD0GF+(flux_dirn+1+count)%3 = AD0GF + (1+1+0)%3=AD2GF <- Updating Az!
// (flux_dirn)%3 = (1)%3 = 1 Vy
// (flux_dirn-count+2)%3 = (1-0+2)%3 = 0 Vx . Inputs Vy, Vx -> SIGN = -1 ; 2.0*((REAL)count)-1.0=-1 check!
// flux_dirn=count=1 -> AD0GF+(flux_dirn+1+count)%3 = AD0GF + (1+1+1)%3=AD0GF <- Updating Ax!
// (flux_dirn)%3 = (1)%3 = 1 Vy
// (flux_dirn-count+2)%3 = (1-1+2)%3 = 2 Vz . Inputs Vy, Vz -> SIGN = +1 ; 2.0*((REAL)count)-1.0=2-1=+1 check!
// Let's suppose flux_dirn = 2. Then we will need to update Ax (count=0) and Ay (count=1):
// flux_dirn=2,count=0 -> AD0GF+(flux_dirn+1+count)%3 = AD0GF + (2+1+0)%3=AD0GF <- Updating Ax!
// (flux_dirn)%3 = (2)%3 = 2 Vz
// (flux_dirn-count+2)%3 = (2-0+2)%3 = 1 Vy . Inputs Vz, Vy -> SIGN = -1 ; 2.0*((REAL)count)-1.0=-1 check!
// flux_dirn=2,count=1 -> AD0GF+(flux_dirn+1+count)%3 = AD0GF + (2+1+1)%3=AD1GF <- Updating Ay!
// (flux_dirn)%3 = (2)%3 = 2 Vz
// (flux_dirn-count+2)%3 = (2-1+2)%3 = 0 Vx . Inputs Vz, Vx -> SIGN = +1 ; 2.0*((REAL)count)-1.0=2-1=+1 check!
calculate_E_field_flat_all_in_one(params,
&auxevol_gfs[IDX4ptS(VALENCIAV_RU0GF+(flux_dirn)%3, 0)],&auxevol_gfs[IDX4ptS(VALENCIAV_RU0GF+(flux_dirn-count+2)%3, 0)],
&auxevol_gfs[IDX4ptS(VALENCIAV_LU0GF+(flux_dirn)%3, 0)],&auxevol_gfs[IDX4ptS(VALENCIAV_LU0GF+(flux_dirn-count+2)%3, 0)],
&auxevol_gfs[IDX4ptS(B_RU0GF +(flux_dirn)%3, 0)],&auxevol_gfs[IDX4ptS(B_RU0GF +(flux_dirn-count+2)%3, 0)],
&auxevol_gfs[IDX4ptS(B_LU0GF +(flux_dirn)%3, 0)],&auxevol_gfs[IDX4ptS(B_LU0GF +(flux_dirn-count+2)%3, 0)],
&auxevol_gfs[IDX4ptS(B_RU0GF +(flux_dirn-count+2)%3, 0)],
&auxevol_gfs[IDX4ptS(B_LU0GF +(flux_dirn-count+2)%3, 0)],
&rhs_gfs[IDX4ptS(AD0GF+(flux_dirn+1+count)%3,0)], 2.0*((REAL)count)-1.0, flux_dirn);
}
}
}
void GiRaFFE_NRPy_post_step(const paramstruct *restrict params,REAL *xx[3],REAL *restrict auxevol_gfs,REAL *restrict evol_gfs,const int n) {
// First, apply BCs to AD and psi6Phi. Then calculate BU from AD
apply_bcs_potential(params,evol_gfs);
driver_A_to_B(params,evol_gfs,auxevol_gfs);
//override_BU_with_old_GiRaFFE(params,auxevol_gfs,n);
// Apply fixes to StildeD, then recompute the velocity at the new timestep.
// Apply the current sheet prescription to the velocities
GiRaFFE_NRPy_cons_to_prims(params,xx,auxevol_gfs,evol_gfs);
// Then, recompute StildeD to be consistent with the new velocities
//GiRaFFE_NRPy_prims_to_cons(params,auxevol_gfs,evol_gfs);
// Finally, apply outflow boundary conditions to the velocities.
apply_bcs_velocity(params,auxevol_gfs);
}
```
<a id='code_validation'></a>
# Step 4: Self-Validation against `GiRaFFE_NRPy_Main_Drive.py` \[Back to [top](#toc)\]
$$\label{code_validation}$$
To validate the code in this tutorial we check for agreement between the files
1. that were generated in this tutorial and
1. those that are generated in the module `GiRaFFE_NRPy_Main_Driver.py`
```
gri.glb_gridfcs_list = []
# Define the directory that we wish to validate against:
valdir = os.path.join("GiRaFFE_validation_Ccodes")
cmd.mkdir(valdir)
import GiRaFFE_NRPy.GiRaFFE_NRPy_Main_Driver as md
md.GiRaFFE_NRPy_Main_Driver_generate_all(valdir)
```
With both sets of codes generated, we can now compare them against each other.
```
import difflib
import sys
print("Printing difference between original C code and this code...")
# Open the files to compare
files = ["GiRaFFE_NRPy_Main_Driver.h",
"RHSs/calculate_parentheticals_for_RHSs.h",
"RHSs/calculate_AD_gauge_psi6Phi_RHSs.h",
"PPM/reconstruct_set_of_prims_PPM_GRFFE_NRPy.c",
"PPM/loop_defines_reconstruction_NRPy.h",
"FCVAL/interpolate_metric_gfs_to_cell_faces.h",
"RHSs/calculate_StildeD0_source_term.h",
"RHSs/calculate_StildeD1_source_term.h",
"RHSs/calculate_StildeD2_source_term.h",
"RHSs/calculate_E_field_D0_right.h",
"RHSs/calculate_E_field_D0_left.h",
"RHSs/calculate_E_field_D1_right.h",
"RHSs/calculate_E_field_D1_left.h",
"RHSs/calculate_E_field_D2_right.h",
"RHSs/calculate_E_field_D2_left.h",
"RHSs/calculate_Stilde_flux_D0.h",
"RHSs/calculate_Stilde_flux_D1.h",
"RHSs/calculate_Stilde_flux_D2.h",
"boundary_conditions/GiRaFFE_boundary_conditions.h",
"A2B/driver_AtoB.h",
"C2P/GiRaFFE_NRPy_cons_to_prims.h",
"C2P/GiRaFFE_NRPy_prims_to_cons.h"]
for file in files:
print("Checking file " + file)
with open(os.path.join(valdir,file)) as file1, open(os.path.join(out_dir,file)) as file2:
# Read the lines of each file
file1_lines = file1.readlines()
file2_lines = file2.readlines()
num_diffs = 0
for line in difflib.unified_diff(file1_lines, file2_lines, fromfile=os.path.join(valdir,file), tofile=os.path.join(out_dir,file)):
sys.stdout.writelines(line)
num_diffs = num_diffs + 1
if num_diffs == 0:
print("No difference. TEST PASSED!")
else:
print("ERROR: Disagreement found with .py file. See differences above.")
sys.exit(1)
```
<a id='latex_pdf_output'></a>
# Step 4: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-GiRaFFE_NRPy_Main_Driver](TTutorial-GiRaFFE_NRPy_Main_Driver.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-GiRaFFE_NRPy_Main_Driver")
```
| github_jupyter |
## 1. Introduction
Notebook for generating lyrics using LSTM network. The dataset contains all the songs recorded by Bob Dylan. Stages:
1. EDA
- Summary statistics on dataset: distribution of no. of characters, words, sentences in collection
- Histograms & wordclouds
2. Preprocessing
- Create corpus of all words from lyrics
- Cleaning: remove special characters, convert to lowercase
- Create mapping of unique chars to indices
- Create features and targets (categorical)
3. Model
- Train LSTM model, one character at a time
- Visualize learning and loss
4. Generation
- Generate lyrics from seed phrase, one character at a time, using model predictions
```
# Imports
# Core
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import string, os, sys, warnings, random, io
warnings.filterwarnings("ignore")
# NLP
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import LSTM, Dense, Dropout
import nltk
import re
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from PIL import Image
# TPU config (for LSTM)
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu)
```
<a id="2"></a>
## 2. EDA
```
# Load song lyrics dataset
df_songs = pd.read_csv("../input/bob-dylan-songs/clear.csv")
df_songs.head()
df_songs.info()
```
### 2.1 Summary stats
```
# Get numbers of characters,words and sentences in each song
df_songs["n_chars"] = df_songs["lyrics"].apply(len)
df_songs["n_words"]=df_songs.apply(lambda row: nltk.word_tokenize(row["lyrics"]), axis=1).apply(len)
df_songs["n_lines"] = df_songs["lyrics"].str.split('\n').apply(len)
df_songs.describe()
# Plot distribution of chars, words, sentences in lyrics
fig, axs = plt.subplots(3, 1, figsize=(15, 10))
fig.suptitle('Distribution of characters, words and lines')
sns.histplot(data=df_songs, x='n_chars', ax=axs[0])
sns.histplot(data=df_songs, x='n_words', ax=axs[1])
sns.histplot(data=df_songs, x='n_lines', ax=axs[2])
```
### 2.2 Wordcloud
```
# Generate a wordcloud
stopwords = set(STOPWORDS)
wc = WordCloud(stopwords=stopwords, background_color="#007399",colormap="cividis", max_words=100)
wc.generate(" ".join(df_songs.lyrics))
plt.figure(figsize=(12,12))
plt.imshow(wc, interpolation="bilinear")
plt.show()
# Sample lyrics
song_idx = 100
print(df_songs.loc[song_idx, :])
df_songs.loc[song_idx, 'lyrics'].split('\n')[:30]
```
## 3. Preprocessing
```
df_songs.shape
```
### 3.1 Create corpus
```
# Create corpus from lyrics
corpus = ''
for text in df_songs.lyrics:
corpus += text
corpus = corpus.lower()
print("Number of unique characters:", len(set(corpus)))
```
### 3.2 Cleaning
```
print(sorted(set(corpus)))
special_chars = ['\x0b', '"', '(', ')', '`', '¥', '©', 'é', 'ñ', 'ó', 'ü', '—', '“', '”', '…', '\u2028']
for symbol in special_chars:
corpus = corpus.replace(symbol,"")
# corpus = re.sub("[^A-Za-z0-9'.,?!\n\w]","",corpus)
print(sorted(set(corpus)))
# Sample section
corpus[:1000]
```
<a id="3.3"></a>
### 3.3 Create inputs from mapping
```
# Map characters in corpus to indices
symb = sorted(list(set(corpus)))
len_corpus = len(corpus)
len_symb = len(symb)
mapping = dict((c, i) for i, c in enumerate(symb))
reverse_mapping = dict((i, c) for i, c in enumerate(symb))
print("Total number of characters:", len_corpus)
print("Number of unique characters:", len_symb)
#Splitting the corpus in equal length of strings and output target
length = 50
features = []
targets = []
for i in range(0, len_corpus - length, 1):
feature = corpus[i:i + length]
target = corpus[i + length]
features.append([mapping[j] for j in feature])
targets.append(mapping[target])
# len_datapoints = len(targets)
# print("Total number of sequences in the Corpus:", len_datapoints)
# reshape X and normalize
X = (np.reshape(features, (len(targets), length, 1)))/ float(len_symb)
# one hot encode the output variable
y = to_categorical(targets)
```
## 4. Model
### 4.1 Training
```
#Initialising the Model
with tpu_strategy.scope():
model = Sequential()
model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2])))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.summary()
%%time
#Training the Model
history = model.fit(X, y, batch_size=256*tpu_strategy.num_replicas_in_sync, epochs=50)
```
model.save("generator_v2.h5")
### 4.2 Evaluation
```
# Evaluation
history_df = pd.DataFrame(history.history)
#Plotting the learning curve
fig = plt.figure(figsize=(12,6))
fig.suptitle("Learning plot for model Loss")
pl = sns.lineplot(data=history_df["loss"])
pl.set(ylabel ="Training Loss")
pl.set(xlabel ="Epochs")
```
## 5. Generation
```
# The function to generate text from model
def generate(seed,char_count, temperature=1.0):
generated= ""
seed = seed
seed_idx=[mapping[char] for char in seed]
generated += seed
# Generating new text of given length
for i in range(char_count):
seed_idx=[mapping[char] for char in seed]
x_pred = np.reshape(seed_idx, (1, len(seed_idx), 1))/len_symb
prediction = model.predict(x_pred, verbose=0)[0]
# Getting the index of the next most probable index
prediction = np.asarray(prediction).astype('float64')
prediction = np.log(prediction) / temperature
# exp_preds = np.exp(prediction)
# prediction = exp_preds / np.sum(exp_preds)
# probas = np.random.multinomial(1, prediction, 1)
index = np.argmax(prediction)
next_char = reverse_mapping[index]
# Generating new text
generated += next_char
seed = seed[1:] + next_char
return generated
song = generate("the answer my friend, is blowi", 50, 2.0)
song.split('\n')
song = generate("the times they ar", 200)
song.split('\n')
```
| github_jupyter |
```
import os
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
import scipy.signal as scisig
import scipy.stats
import cvxEDA
# dir(cvxEDA)
fs_dict = {'ACC': 32, 'BVP': 64, 'EDA': 4, 'TEMP': 4, 'label': 700, 'Resp': 700, 'ECG':700}
WINDOW_IN_SECONDS = 1
label_dict = {'baseline': 0, 'stress': 1, 'amusement': 2, 'meditation':3}
int_to_label = {0: 'baseline', 1: 'stress', 2: 'amusement', 3:'meditation'}
feat_names = None
savePath = 'fresh_start'
subject_feature_path = '/subject_feats'
if not os.path.exists(savePath):
os.makedirs(savePath)
if not os.path.exists(savePath + subject_feature_path):
os.makedirs(savePath + subject_feature_path)
# cvxEDA
def eda_stats(y):
Fs = fs_dict['EDA']
yn = (y - y.mean()) / y.std()
[r, p, t, l, d, e, obj] = cvxEDA.cvxEDA(yn, 1. / Fs)
return [r, p, t, l, d, e, obj]
class SubjectData:
def __init__(self, main_path, subject_number):
self.name = f'S{subject_number}'
self.subject_keys = ['signal', 'label', 'subject']
self.signal_keys = ['chest', 'wrist']
self.chest_keys = ['ACC', 'ECG', 'EMG', 'EDA', 'Temp', 'Resp']
self.wrist_keys = ['ACC', 'BVP', 'EDA', 'TEMP']
with open(os.path.join(main_path, self.name) + '/' + self.name + '.pkl', 'rb') as file:
self.data = pickle.load(file, encoding='latin1')
self.labels = self.data['label']
def get_wrist_data(self):
data = self.data['signal']['wrist']
data.update({'Resp': self.data['signal']['chest']['Resp']})
data.update({'ECG': self.data['signal']['chest']['ECG']})
data.update({'EMG': self.data['signal']['chest']['EMG']})
data.update({'c_EDA': self.data['signal']['chest']['EDA']})
data.update({'c_Temp': self.data['signal']['chest']['Temp']})
data.update({'c_ACC': self.data['signal']['chest']['ACC']})
return data
def get_chest_data(self):
return self.data['signal']['chest']
def extract_features(self): # only wrist
results = \
{
key: get_statistics(self.get_wrist_data()[key].flatten(), self.labels, key)
for key in self.wrist_keys
}
return results
def butter_lowpass(cutoff, fs, order=5):
# Filtering Helper functions
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = scisig.butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order=5):
# Filtering Helper functions
b, a = butter_lowpass(cutoff, fs, order=order)
y = scisig.lfilter(b, a, data)
return y
def get_slope(series):
linreg = scipy.stats.linregress(np.arange(len(series)), series )
slope = linreg[0]
return slope
def get_window_stats(data, label=-1):
mean_features = np.mean(data)
std_features = np.std(data)
min_features = np.amin(data)
max_features = np.amax(data)
features = {'mean': mean_features, 'std': std_features, 'min': min_features, 'max': max_features,
'label': label}
return features
def get_net_accel(data):
return (data['ACC_x'] ** 2 + data['ACC_y'] ** 2 + data['ACC_z'] ** 2).apply(lambda x: np.sqrt(x))
def get_peak_freq(x):
f, Pxx = scisig.periodogram(x, fs=8)
psd_dict = {amp: freq for amp, freq in zip(Pxx, f)}
peak_freq = psd_dict[max(psd_dict.keys())]
return peak_freq
# https://github.com/MITMediaLabAffectiveComputing/eda-explorer/blob/master/AccelerometerFeatureExtractionScript.py
def filterSignalFIR(eda, cutoff=0.4, numtaps=64):
f = cutoff / (fs_dict['ACC'] / 2.0)
FIR_coeff = scisig.firwin(numtaps, f)
return scisig.lfilter(FIR_coeff, 1, eda)
def compute_features(e4_data_dict, labels, norm_type=None):
# Dataframes for each sensor type
eda_df = pd.DataFrame(e4_data_dict['EDA'], columns=['EDA'])
bvp_df = pd.DataFrame(e4_data_dict['BVP'], columns=['BVP'])
acc_df = pd.DataFrame(e4_data_dict['ACC'], columns=['ACC_x', 'ACC_y', 'ACC_z'])
temp_df = pd.DataFrame(e4_data_dict['TEMP'], columns=['TEMP'])
label_df = pd.DataFrame(labels, columns=['label'])
resp_df = pd.DataFrame(e4_data_dict['Resp'], columns=['Resp'])
ecg_df = pd.DataFrame(e4_data_dict['ECG'], columns=['ECG'])
emg_df = pd.DataFrame(e4_data_dict['EMG'], columns=['EMG'])
c_temp_df = pd.DataFrame(e4_data_dict['c_Temp'], columns=['c_Temp'])
c_acc_df = pd.DataFrame(e4_data_dict['c_ACC'], columns=['c_ACC_x', 'c_ACC_y', 'c_ACC_z'])
# Filter EDA
eda_df['EDA'] = butter_lowpass_filter(eda_df['EDA'], 1.0, fs_dict['EDA'], 6)
# Filter ACM
for _ in acc_df.columns:
acc_df[_] = filterSignalFIR(acc_df.values)
# Adding indices for combination due to differing sampling frequencies
eda_df.index = [(1 / fs_dict['EDA']) * i for i in range(len(eda_df))]
bvp_df.index = [(1 / fs_dict['BVP']) * i for i in range(len(bvp_df))]
acc_df.index = [(1 / fs_dict['ACC']) * i for i in range(len(acc_df))]
temp_df.index = [(1 / fs_dict['TEMP']) * i for i in range(len(temp_df))]
label_df.index = [(1 / fs_dict['label']) * i for i in range(len(label_df))]
resp_df.index = [(1 / fs_dict['Resp']) * i for i in range(len(resp_df))]
ecg_df.index = [(1 / fs_dict['ECG']) * i for i in range(len(ecg_df))]
emg_df.index = [(1 / fs_dict['ECG']) * i for i in range(len(emg_df))] #ECG fz = EMG fz
c_temp_df.index = [(1 / fs_dict['ECG']) * i for i in range(len(c_temp_df))]
c_acc_df.index = [(1 / fs_dict['ECG']) * i for i in range(len(c_acc_df))]
# Change indices to datetime
eda_df.index = pd.to_datetime(eda_df.index, unit='s')
bvp_df.index = pd.to_datetime(bvp_df.index, unit='s')
temp_df.index = pd.to_datetime(temp_df.index, unit='s')
acc_df.index = pd.to_datetime(acc_df.index, unit='s')
label_df.index = pd.to_datetime(label_df.index, unit='s')
resp_df.index = pd.to_datetime(resp_df.index, unit='s')
ecg_df.index = pd.to_datetime(ecg_df.index , unit='s')
emg_df.index = pd.to_datetime(emg_df.index , unit='s')
c_temp_df.index = pd.to_datetime(c_temp_df.index , unit='s')
c_acc_df.index = pd.to_datetime(c_acc_df.index, unit='s')
# New EDA features
r, p, t, l, d, e, obj = eda_stats(eda_df['EDA'])
eda_df['EDA_phasic'] = r
eda_df['EDA_smna'] = p
eda_df['EDA_tonic'] = t
# Combined dataframe - not used yet
df = eda_df.join(bvp_df, how='outer')
df = df.join(temp_df, how='outer')
df = df.join(acc_df, how='outer')
df = df.join(resp_df, how='outer')
df = df.join(label_df, how='outer')
df = df.join(ecg_df, how='outer')
df = df.join(emg_df, how='outer')
df = df.join(c_temp_df, how='outer')
df = df.join(c_acc_df, how='outer')
df['label'] = df['label'].fillna(method='bfill')
df.reset_index(drop=True, inplace=True)
if norm_type is 'std':
# std norm
df = (df - df.mean()) / df.std()
elif norm_type is 'minmax':
# minmax norm
df = (df - df.min()) / (df.max() - df.min())
# Groupby
grouped = df.groupby('label')
baseline = grouped.get_group(1)
stress = grouped.get_group(2)
amusement = grouped.get_group(3)
meditation = grouped.get_group(4)
# print ("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
# print (df)
# print (grouped.groups.keys())
# print ("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
return grouped, baseline, stress, amusement, meditation
def get_samples(data, n_windows, label):
global feat_names
global WINDOW_IN_SECONDS
samples = []
# Using label freq (700 Hz) as our reference frequency due to it being the largest
# and thus encompassing the lesser ones in its resolution.
window_len = fs_dict['label'] * WINDOW_IN_SECONDS
for i in range(n_windows):
# Get window of data
w = data[window_len * i: window_len * (i + 1)]
# Add/Calc rms acc
# w['net_acc'] = get_net_accel(w)
w = pd.concat([w, get_net_accel(w)])
#w.columns = ['net_acc', 'ACC_x', 'ACC_y', 'ACC_z', 'BVP',
# 'EDA', 'EDA_phasic', 'EDA_smna', 'EDA_tonic', 'TEMP',
# 'label']
cols = list(w.columns)
cols[0] = 'net_acc'
w.columns = cols
# Calculate stats for window
wstats = get_window_stats(data=w, label=label)
# Seperating sample and label
x = pd.DataFrame(wstats).drop('label', axis=0)
y = x['label'][0]
x.drop('label', axis=1, inplace=True)
if feat_names is None:
feat_names = []
for row in x.index:
for col in x.columns:
feat_names.append('_'.join([row, col]))
# sample df
wdf = pd.DataFrame(x.values.flatten()).T
wdf.columns = feat_names
wdf = pd.concat([wdf, pd.DataFrame({'label': y}, index=[0])], axis=1)
# More feats
wdf['BVP_peak_freq'] = get_peak_freq(w['BVP'].dropna())
wdf['TEMP_slope'] = get_slope(w['TEMP'].dropna())
samples.append(wdf)
return pd.concat(samples)
def make_patient_data(subject_id):
global savePath
global WINDOW_IN_SECONDS
# Make subject data object for Sx
subject = SubjectData(main_path='WESAD', subject_number=subject_id)
# Empatica E4 data - now with resp
e4_data_dict = subject.get_wrist_data()
# e4_data_dict = subject.get_chest_data()
# norm type
norm_type = None
# The 3 classes we are classifying
grouped, baseline, stress, amusement, meditation = compute_features(e4_data_dict, subject.labels, norm_type)
# print(f'Available windows for {subject.name}:')
n_baseline_wdws = int(len(baseline) / (fs_dict['label'] * WINDOW_IN_SECONDS))
n_stress_wdws = int(len(stress) / (fs_dict['label'] * WINDOW_IN_SECONDS))
n_amusement_wdws = int(len(amusement) / (fs_dict['label'] * WINDOW_IN_SECONDS))
n_meditation_wdws = int(len(meditation) / (fs_dict['label'] * WINDOW_IN_SECONDS))
# print(f'Baseline: {n_baseline_wdws}\nStress: {n_stress_wdws}\nAmusement: {n_amusement_wdws}\n')
#
baseline_samples = get_samples(baseline, n_baseline_wdws, 0)
# Downsampling
# baseline_samples = baseline_samples[::2]
stress_samples = get_samples(stress, n_stress_wdws, 1)
amusement_samples = get_samples(amusement, n_amusement_wdws, 2)
meditation_samples = get_samples(meditation, n_meditation_wdws, 3)
all_samples = pd.concat([baseline_samples, stress_samples, amusement_samples,meditation_samples])
all_samples = pd.concat([all_samples.drop('label', axis=1), pd.get_dummies(all_samples['label'])], axis=1)
# Selected Features
# all_samples = all_samples[['EDA_mean', 'EDA_std', 'EDA_min', 'EDA_max',
# 'BVP_mean', 'BVP_std', 'BVP_min', 'BVP_max',
# 'TEMP_mean', 'TEMP_std', 'TEMP_min', 'TEMP_max',
# 'net_acc_mean', 'net_acc_std', 'net_acc_min', 'net_acc_max',
# 0, 1, 2]]
# Save file as csv (for now)
all_samples.to_csv(f'{savePath}{subject_feature_path}/S{subject_id}_feats_4.csv')
# Does this save any space?
subject = None
def combine_files(subjects):
df_list = []
for s in subjects:
df = pd.read_csv(f'{savePath}{subject_feature_path}/S{s}_feats_4.csv', index_col=0)
df['subject'] = s
df_list.append(df)
df = pd.concat(df_list)
# df.info()
df['label'] = (df['0'].astype(str) + df['1'].astype(str) + df['2'].astype(str)+ df['3'].astype(str)).apply(lambda x: x.index('1'))
# print (df)
# print ("***************************8")
# df['label']
# print ("***************************8")
df.drop(['0', '1', '2','3'], axis=1, inplace=True)
df.reset_index(drop=True, inplace=True)
df.to_csv(f'{savePath}/may14_feats4_chest.csv')
counts = df['label'].value_counts()
print('Number of samples per class:')
print (counts)
for label, number in zip(counts.index, counts.values):
print(f'{int_to_label[label]}: {number}')
if __name__ == '__main__':
subject_ids = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17]
# subject_ids = [2]
for patient in subject_ids:
print(f'Processing data for S{patient}...')
make_patient_data(patient)
combine_files(subject_ids)
print('Processing complete.')
pre = pd.read_csv("fresh_start/may14_feats4_chest.csv")
pre['subject'].value_counts()
pre['label'].value_counts()
pre.info()
pre.to_csv('fresh_start/60s_window_wrist_chest.csv',index=False)
```
| github_jupyter |
```
import numpy as np
import zarr
import pandas as pd
import dask.array as da
import allel
import matplotlib.pyplot as plt
%matplotlib inline
```
## Cluster setup
```
from dask_kubernetes import KubeCluster
#cluster = KubeCluster(n_workers=30)
#cluster
cluster= KubeCluster()
cluster.scale_up(6)
cluster.adapt(minimum=0, maximum=6)
from dask.distributed import Client
client = Client(cluster)
client
# client.get_versions(check=True)
```
## Data setup
```
storage_path = 'ag1000g-release/phase2.AR1/variation/main/zarr/all/ag1000g.phase2.ar1'
# GCS configuration
import gcsfs
gcs_bucket_fs = gcsfs.GCSFileSystem(project='malariagen-jupyterhub', token='anon', access='read_only')
#store = gcsfs.mapping.GCSMap(storage_path, gcs=gcs_bucket_fs, check=False, create=False)
store = gcs_bucket_fs.get_mapper(storage_path)
# S3 or compatible object storage configuration
#import s3fs
#s3 = s3fs.S3FileSystem(anon=True, client_kwargs=dict(region_name='eu-west-2'))
#store = s3.get_mapper(root=storage_path)
callset = zarr.open_consolidated(store)
callset
chrom = '3R'
gtz = callset[chrom]['calldata/GT']
gtz
# gtz.info
gt = allel.GenotypeDaskArray(gtz)
gt
df_samples = pd.read_csv('ftp://ngs.sanger.ac.uk/production/ag1000g/phase2/AR1/samples/samples.meta.txt',
sep='\t')
df_samples.head()
```
## Subset data
```
pop = 'GHcol'
loc_pass_variants = callset[chrom]['variants/FILTER_PASS'][:]
loc_pass_variants
len(loc_pass_variants), np.count_nonzero(loc_pass_variants)
loc_pop_samples = df_samples[df_samples.population == pop].index.values
loc_pop_samples
len(loc_pop_samples)
gt_pass_pop = gt.subset(loc_pass_variants, loc_pop_samples)
gt_pass_pop
```
## Allele count computation
```
# watch the dask dashboard while this is computing
ac_pass_pop = gt_pass_pop.count_alleles(max_allele=3).compute()
ac_pass_pop
ac_pass_pop.count_segregating()
```
## Multi-population test for selection
```
def population_allele_counts(chrom, pop):
gtz = callset[chrom]['calldata/GT']
gt = allel.GenotypeDaskArray(gtz)
loc_pass_variants = callset[chrom]['variants/FILTER_PASS'][:]
loc_pop_samples = df_samples[df_samples.population == pop].index.values
gt_pass_pop = gt.subset(loc_pass_variants, loc_pop_samples)
ac_pass_pop = gt_pass_pop.count_alleles(max_allele=3)
return ac_pass_pop
def pbs(chrom, pop1, pop2, pop3, window_size=100, min_maf=0.02, normed=True):
# load variant positions
loc_pass_variants = callset[chrom]['variants/FILTER_PASS'][:]
pos = callset[chrom]['variants/POS'][:][loc_pass_variants]
# load allele counts
ac1 = population_allele_counts(chrom, pop1)
ac2 = population_allele_counts(chrom, pop2)
ac3 = population_allele_counts(chrom, pop3)
ac1, ac2, ac3 = da.compute(ac1, ac2, ac3)
ac1 = allel.AlleleCountsArray(ac1)
ac2 = allel.AlleleCountsArray(ac2)
ac3 = allel.AlleleCountsArray(ac3)
# locate segregating variants at sufficient frequency
ac = ac1 + ac2 + ac3
loc_seg = ac.is_biallelic_01() & (ac.to_frequencies()[:, :2].min(axis=1) > min_maf)
pos = pos[loc_seg]
ac1 = ac1[loc_seg]
ac2 = ac2[loc_seg]
ac3 = ac3[loc_seg]
# setup windows
starts = allel.moving_statistic(pos, statistic=lambda v: v[0], size=window_size)
starts[0] = 1 # fix to start of sequence
ends = np.append(starts[1:] - 1, [np.max(pos)])
# compute pbs
res = allel.pbs(ac1, ac2, ac3, window_size=window_size, normed=normed)
return starts, ends, res
# watch the dask dashboard
starts, ends, y = pbs('3R', 'BFgam', 'UGgam', 'GW')
fig, ax = plt.subplots(figsize=(14, 3))
x = (starts + ends) / 2
ax.plot(x, y, marker='o', linestyle=' ', mfc='none', mec='k', markersize=2)
ax.set_xlabel('Genome position (bp)')
ax.set_ylabel('PBS');
# watch the dask dashboard
starts, ends, y = pbs('3R', 'BFcol', 'UGgam', 'GW')
fig, ax = plt.subplots(figsize=(14, 3))
x = (starts + ends) / 2
ax.plot(x, y, marker='o', linestyle=' ', mfc='none', mec='k', markersize=2)
ax.set_xlabel('Genome position (bp)')
ax.set_ylabel('PBS');
```
| github_jupyter |
```
# Visualization of the KO Gold Standard from:
# Miraldi et al. (2018) "Leveraging chromatin accessibility data for transcriptional regulatory network inference in Th17 Cells"
# TO START: In the menu above, choose "Cell" --> "Run All", and network + heatmap will load
# NOTE: Default limits networks to TF-TF edges in top 1 TF / gene model (.93 quantile), to see the full
# network hit "restore" (in the drop-down menu in cell below) and set threshold to 0 and hit "threshold"
# You can search for gene names in the search box below the network (hit "Match"), and find regulators ("targeted by")
# Change "canvas" to "SVG" (drop-down menu in cell below) to enable drag interactions with nodes & labels
# Change "SVG" to "canvas" to speed up layout operations
# More info about jp_gene_viz and user interface instructions are available on Github:
# https://github.com/simonsfoundation/jp_gene_viz/blob/master/doc/dNetwork%20widget%20overview.ipynb
# directory containing gene expression data and network folder
directory = "."
# folder containing networks
netPath = 'Networks'
# network file name
networkFile = 'KO75_KOrk_1norm_sp.tsv'
# title for network figure
netTitle = 'KO Gold Standard'
# name of gene expression file
expressionFile = 'Th0_Th17_48hTh.txt'
# column of gene expression file to color network nodes
rnaSampleOfInt = 'Th17(48h)'
# edge cutoff, TF KO edges from Yosef et al. (2013) Nature. and Ciofani et al. (2012) Cell. are not on comparable scales
edgeCutoff = 0
import sys
if ".." not in sys.path:
sys.path.append("..")
from jp_gene_viz import dNetwork
dNetwork.load_javascript_support()
# from jp_gene_viz import multiple_network
from jp_gene_viz import LExpression
LExpression.load_javascript_support()
# Load network linked to gene expression data
L = LExpression.LinkedExpressionNetwork()
L.show()
# Load Network and Heatmap
L.load_network(directory + '/' + netPath + '/' + networkFile)
L.load_heatmap(directory + '/' + expressionFile)
N = L.network
N.set_title(netTitle)
N.threshhold_slider.value = edgeCutoff
N.apply_click(None)
N.draw()
# Add labels to nodes
N.labels_button.value=True
N.restore_click()
# Limit to TFs only, remove unconnected TFs, choose and set network layout
N.tf_only_click()
N.connected_only_click()
N.layout_dropdown.value = 'fruchterman_reingold'
N.layout_click()
# Interact with Heatmap
# Limit genes in heatmap to network genes
L.gene_click(None)
# Z-score heatmap values
L.expression.transform_dropdown.value = 'Z score'
L.expression.apply_transform()
# Choose a column in the heatmap (e.g., 48h Th17) to color nodes
L.expression.col = rnaSampleOfInt
L.condition_click(None)
# Switch SVG layout to get line colors, then switch back to faster canvas mode
N.force_svg(None)
```
| github_jupyter |
# Model development using MIMIC-IV EMR data only (Strategies 0-3)
1. Summary statistics
2. Feature selection (to add)
3. Model development
4. Hyperparameter tuning (to add)
5. Evaluation of the final model and error analysis (to add)
<img src="../results/class distribution.jpeg" alt="Groups" style="width: 400px;"/>
```
import numpy as np
import pandas as pd
import utils
from time import time
import copy, math, os, pickle, time
import scipy.stats as ss
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB, ComplementNB
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.metrics import average_precision_score, roc_auc_score, accuracy_score, f1_score, precision_recall_curve
# To show all columns in a dataframe
pd.options.display.max_info_columns=250
pd.options.display.max_columns=500
# To make pretty plots
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-ticks')
sns.set_style('ticks')
plt.rcParams['figure.figsize'] = (6, 4)
plt.rcParams['axes.titlesize'] = 22
plt.rcParams['axes.labelsize'] = 20
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
%matplotlib inline
```
### Load and prepare the data
* For a simple model predicting PMV add "S0" to filename and set label to "over72h"
* For strategy S1 add "S1" to filename and set label to "over72h"
* For strategy S2 add "S2" to filename and set label to "over72h"
* For strategy S3 add "S3" to filename and set label to "good_outcome"
```
df_train = pd.read_csv("../data/mimic-emr-ft98-train-S0.csv")
df_train.drop(columns=["starttime", "endtime"], inplace=True)
label = "over72h"
print(df_train.shape)
df_train.head()
```
**Summary statistics**
```
df_train.describe()
```
**Drop constant variables**
```
df_train = df_train.loc[:, df_train.apply(pd.Series.nunique) != 1]
df_train.shape
```
### Assign cluster numbers based on severity scores
```
df_train = utils.cluster_by_severity(df_train)
```
### Feature selection
```
features=None
# features = df_train.select_dtypes(np.number).columns[1:-2].tolist()
# features = ["apsiii",
# "peep_min",
# "resp_rate_min",
# "paraplegia",
# "neuroblocker",
# "vasopressin",
# "chronic_pulmonary_disease",
# "cerebrovascular_disease",
# "congestive_heart_failure",
# "diabetes_with_cc",
# "ph_max"]
# features = ["apsiii",
# "peep_min",
# "resp_rate_min",
# "paraplegia",
# "neuroblocker",
# "vasopressin",
# "height",
# "chronic_pulmonary_disease",
# "cerebrovascular_disease",
# "congestive_heart_failure",
# "diabetes_with_cc"]
# features = ["heart_rate_max", "heart_rate_min",
# "peep_max", "ph_max",
# "resp_rate_max", "resp_rate_min",
# "spo2_min", "temp_max", "temp_min"]
# features = ["resp_rate_max",
# "resp_rate_min",
# "temp_max",
# "temp_min",
# "spo2_min",
# "glucose_max",
# "mbp_arterial_max",
# "apsiii",
# "glucose_min",
# "heart_rate_min",
# "heart_rate_max",
# "ph_max",
# "co2_total_min",
# "co2_total_max",
# "mbp_ni_min",
# "peep_min"]
# features = ['ph_max', 'spo2_min',
# 'heart_rate_min', 'heart_rate_max',
# 'resp_rate_min', 'resp_rate_max',
# 'temp_min', 'temp_max',
# 'glucose_max', 'glucose_min',
# 'co2_total_max', 'co2_total_min',
# 'mbp_max', 'mbp_ni_min',
# 'apsiii',
# 'peep_max', 'peep_min']
X_train, y_train = utils.get_X_and_y(df_train, features=features, label=label)
print(X_train.shape, y_train.shape)
preprocessor = utils.define_preprocessor(X_train.columns)
```
### Model development
```
# class_names = ("MV <= 72 hours", "MV > 72 hours")
# class_names = ("Bad outcome", "Good outcome")
clfs = (
LogisticRegression(max_iter=1000),
# KNeighborsClassifier(),
# SVC(),
# DecisionTreeClassifier(),
# RandomForestClassifier(),
GradientBoostingClassifier(),
# CalibratedClassifierCV(GradientBoostingClassifier(), method='isotonic', cv=3)
)
for clf in clfs:
pipe = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', clf)])
scores = utils.benchmark_cv_score(pipe, X_train, y_train)
from scipy.stats import mannwhitneyu, ttest_ind
print(mannwhitneyu(scores_S0['test_roc'], scores_S2['test_roc'], alternative="two-sided"))
print(ttest_ind(scores_S0['test_roc'], scores_S2['test_roc']))
```
### Compare full and reduced models
```
X_train, y_train = utils.get_X_and_y(df_train, features=None, label=label)
print(X_train.shape, y_train.shape)
preprocessor = utils.define_preprocessor(X_train.columns)
clf = GradientBoostingClassifier()
pipe = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', clf)])
y_proba_full = utils.benchmark_cv(pipe, X_train, y_train)
X_train, y_train = utils.get_X_and_y(df_train, features=features, label=label)
print(X_train.shape, y_train.shape)
preprocessor = utils.define_preprocessor(X_train.columns)
clf = GradientBoostingClassifier()
pipe = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', clf)])
y_proba_small = utils.benchmark_cv(pipe, X_train, y_train)
from sklearn.metrics import roc_auc_score, roc_curve
plt.figure();
sns.lineplot(x=[0, 1], y=[0, 1], color=sns.color_palette()[0], lw=2, linestyle='--', label="Chance")
fpr, tpr, _ = roc_curve(y_train, y_proba_full[:,-1])
roc_auc = roc_auc_score(y_train, y_proba_full[:,-1])
sns.lineplot(x=fpr, y=tpr, lw=3, color=sns.color_palette()[1],
label="All features: AUC = %0.2f" % roc_auc)
fpr, tpr, _ = roc_curve(y_train, y_proba_small[:,-1])
roc_auc = roc_auc_score(y_train, y_proba_small[:,-1])
sns.lineplot(x=fpr, y=tpr, lw=3, color=sns.color_palette()[2],
label="15 features: AUC = %0.2f" % roc_auc)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC curve")
plt.legend(loc="lower right", fontsize=14);
plt.savefig("../results/Feature selection ROC CV", bbox_inches='tight', dpi=300, transparent=False, pad_inches=0);
```
### Model calibration
```
preprocessor = utils.define_preprocessor(X_train.columns)
clf = GradientBoostingClassifier()
calibrated_clf = CalibratedClassifierCV(clf, method='isotonic', cv=3)
pipe = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', clf)])
calibrated_pipe = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', calibrated_clf)])
```
**Run cross validation to calibrate the model**
```
y_proba = utils.benchmark_cv(pipe, X_train, y_train)
y_proba_c = utils.benchmark_cv(calibrated_pipe, X_train, y_train)
```
**Diagnostic plots**
```
sns.lineplot(x=[0, 1], y=[0, 1],
color=sns.color_palette()[0],
lw=2, linestyle='--',
label="Perfectly calibrated")
fop, mpv = calibration_curve(y_train, y_proba[:,1], n_bins=30, normalize=False)
sns.lineplot(x=mpv, y=fop,
lw=3, marker='.', markersize=15,
color=sns.color_palette()[1],
label="Uncalibrated");
fop, mpv = calibration_curve(y_train, y_proba_c[:,1], n_bins=30, normalize=False)
sns.lineplot(x=mpv, y=fop,
lw=3, marker='.', markersize=15,
color=sns.color_palette()[2],
label="Calibrated");
plt.legend(fontsize=16, loc="upper left");
plt.xlabel("Mean predicted value");
plt.ylabel("Fraction of positives");
plt.savefig("../results/15ft_calibration.png", bbox_inches='tight', dpi=300, pad_inches=0);
sns.histplot(y_proba[:,1], bins=10, stat="count",
color=sns.color_palette()[1], lw=3, fill=False,
label="Uncalibrated");
sns.histplot(y_proba_c[:,1], bins=10, stat="count",
color=sns.color_palette()[2], lw=3, fill=False,
label="Calibrated");
plt.ylim([0, 3800]);
plt.legend(fontsize=16, loc="upper right");
plt.xlabel("Mean predicted value");
plt.savefig("../results/15ft_probabilities.png", bbox_inches='tight', dpi=300, pad_inches=0);
```
### Threshold selection
```
def select_threshold(y_train, y_proba):
precision, recall, thresholds = precision_recall_curve(y_train, y_proba)
fscore = (2 * precision * recall) / (precision + recall)
idx = np.argmax(fscore)
thresh = thresholds[idx]
print('Best threshold is %.3f, F1 score=%.3f' % (thresh, fscore[idx]))
return thresh
preprocessor = utils.define_preprocessor(X_train.columns)
clf = GradientBoostingClassifier()
pipe = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', clf)])
y_proba = utils.benchmark_cv(pipe, X_train, y_train)
thresh = select_threshold(y_train, y_proba)
df_train["y_proba"] = y_proba[:,1]
select_threshold(df_train[df_train.cluster==3].over72h, df_train[df_train.cluster==3].y_proba)
```
### Evaluation using CV
```
df_train["y_pred"] = utils.evaluate_model(y_train, y_proba, ("MV < 72h", "MV >= 72h"),
"CV, cluster 3", thresh=thresh, digits=3)
df_train["outcome"] = 0
df_train.loc[(df_train.over72h == 0) & (df_train.y_pred == 0), "outcome"] = "TN"
df_train.loc[(df_train.over72h == 1) & (df_train.y_pred == 0), "outcome"] = "FN"
df_train.loc[(df_train.over72h == 0) & (df_train.y_pred == 1), "outcome"] = "FP"
df_train.loc[(df_train.over72h == 1) & (df_train.y_pred == 1), "outcome"] = "TP"
df_train.outcome.value_counts()
tmp = pd.DataFrame((df_train.groupby("cluster").outcome.value_counts() /
df_train.groupby('cluster').size() * 100).unstack())
tmp
color = sns.color_palette("Set1")
tmp.plot(kind="bar", stacked=True, color=color, alpha=0.8);
plt.legend(bbox_to_anchor=(1, 0.5), fontsize=16);
from sklearn.metrics import classification_report
# Cluster 1
print(classification_report(df_train[df_train.cluster==0].over72h, df_train[df_train.cluster==0].y_pred, digits=3))
print(classification_report(df_train[df_train.cluster==1].over72h, df_train[df_train.cluster==1].y_pred, digits=3))
print(classification_report(df_train[df_train.cluster==2].over72h, df_train[df_train.cluster==2].y_pred, digits=3))
print(classification_report(df_train[df_train.cluster==3].over72h, df_train[df_train.cluster==3].y_pred, digits=3))
```
### Model evaluation on MIMIC data
```
preprocessor = utils.define_preprocessor(X_train.columns)
clf = GradientBoostingClassifier()
pipe = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', clf)])
pipe.fit(X_train, y_train)
```
**Feature importance**
```
feature_weights = pd.DataFrame(zip(X_train.columns, pipe['classifier'].feature_importances_),
columns=["feature", "weight"]).sort_values(by="weight", ascending=False)
plt.rcParams['figure.figsize'] = (4, 6)
ax = sns.barplot(y="feature", x="weight", data=feature_weights, orient="h");
plt.ylabel("Feature");
plt.xlabel("Relative importance");
plt.xlim([0, 0.35]);
utils.show_values_on_bars(ax, orient="h", space=0.01)
plt.savefig("../results/Feature importance", bbox_inches='tight', dpi=300, transparent=False, pad_inches=0);
feature_weights.feature.tolist()
```
**Test set**
```
df_test = pd.read_csv("../data/mimic-emr-test-S0.csv")
df_test.drop(columns=["starttime", "endtime"], inplace=True)
print(df_test.shape)
df_test.head()
df_test = df_test.loc[:, df_test.apply(pd.Series.nunique) != 1]
df_test.shape
X_test, y_test = utils.get_X_and_y(df_test, features=features, label=label)
print(X_test.shape, y_test.shape)
y_proba_test = pipe.predict_proba(X_test)
utils.evaluate_model(y_test, y_proba_test, ("MV < 72h", "MV >= 72h"), "test", digits=3,
save_figures=False, filename="../results/mimic-test")
```
### External validation on eICU data
```
df_eicu = pd.read_csv("../data/eicu-ft17.csv")
print(df_eicu.shape)
df_eicu.head()
df_eicu.over72h.value_counts()
df_eicu.rename({"mbp_arterial_max": "mbp_max"}, axis=1, inplace=True)
X_eicu, y_eicu = utils.get_X_and_y(df_eicu, features=features, label=label)
print(X_eicu.shape, y_eicu.shape)
y_proba_eicu = pipe.predict_proba(X_eicu)
utils.evaluate_model(y_eicu, y_proba_eicu, ("MV < 72h", "MV >= 72h"), "eICU", digits=3,
save_figures=False, filename="../results/eicu")
from sklearn.metrics import f1_score, auc, roc_auc_score
roc_auc = roc_auc_score(y_eicu, y_proba_eicu[:,-1])
roc_auc
from sklearn.metrics import roc_auc_score, roc_curve
plt.figure();
sns.lineplot(x=[0, 1], y=[0, 1], color=sns.color_palette()[0], lw=2, linestyle='--', label="Chance")
# fpr, tpr, _ = roc_curve(y_test, y_proba_test[:,-1])
# roc_auc = roc_auc_score(y_test, y_proba_test[:,-1])
# sns.lineplot(x=fpr, y=tpr, lw=3, color=sns.color_palette()[1],
# label="MIMIC-IV: AUC = %0.2f" % roc_auc)
fpr, tpr, _ = roc_curve(y_eicu, y_proba_eicu[:,-1])
roc_auc = roc_auc_score(y_eicu, y_proba_eicu[:,-1])
sns.lineplot(x=fpr, y=tpr, lw=3, color=sns.color_palette()[2],
label="eICU: AUC = %0.2f" % roc_auc)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC curve")
plt.legend(loc="lower right", fontsize=14);
# plt.savefig("../results/ROC mimic vs eicu", bbox_inches='tight', dpi=300, pad_inches=0);
```
| github_jupyter |
```
# import dependencies
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
```
# Reflect Tables into SQLAlchemy ORM
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
#SETUP SQLALCHEMY ENVIRONMENT
#create engine function prepaers the db to be connected later
engine = create_engine("sqlite:///hawaii.sqlite")
#add automap base: helps your code to function properly
Base = automap_base()
# reflect the schema from the tables to our code:
Base.prepare(engine, reflect=True)
engine = create_engine("sqlite:///hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
#This code references the classes that were mapped in each table.
#Base.classes: gives accesses to all classes
#keys(): references all the names of the classes
# Save references to each table
# syntax: Base.classes.<class name>
# assign variables
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
# session essentially allows us to query for data.
```
# Exploratory Climate Analysis
```
# Design a query to retrieve the last 12 months of precipitation data and plot the results.
#Starting from the last data point in the database.
# prev_year = dt.date(2017, 8, 23)
# Calculate the date one year from the last date in data set.
prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
# Perform a query to retrieve the data and precipitation scores
results = []
results = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= prev_year).all()
# print(results)
# Save the query results as a Pandas DataFrame and set the index to the date column
# this saves our query results in two columns, date and precipitation.
df = pd.DataFrame(results, columns=['date','precipitation'])
df.set_index(df['date'], inplace=True)
# Use Pandas Plotting with Matplotlib to plot the data
print(df)
# print the DataFrame without the index so we can see just the date and precipitation.
# onvert the DataFrame to strings, and then we'll set our index to "False."
# This will allow us to print the DataFrame without the index
print(df.to_string(index=False))
```
### 9.2.3 Sort the DataFrame
```
# Sort the dataframe by date
df = df.sort_index()
# code to print the sorted list without the index
print(df.to_string(index=False))
```
### 9.2.4 Plot the Data
```
# Use Pandas Plotting with Matplotlib to plot the data
df.plot()
# Use Pandas to calcualte the summary statistics for the precipitation data
df.describe()
```
#### This data gives us a summary of different statistics for the amount of precipitation in a year. The count is the number of times precipitation was observed. The other statistics are the precipitation amounts for each station for each day
### 9.3.1 Find the Number of Stations
```
# How many stations are available in this dataset?
session.query(func.count(Station.station)).all()
```
### 9.3.2 Determine the Most Active Stations
```
# What are the most active stations? (will need to add parameters to the query)
session.query(Measurement.station, func.count(Measurement.station)).\
group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()
# List the stations and the counts in descending order.
```
#### In the left column is the station ID, and on the right are the counts for each station. The counts indicate which stations are most active. We can also see which stations are the least active.
### 9.3.3 Find Low, High, and Average Temperatures
```
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature most active station?
session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
filter(Measurement.station == 'USC00519281').all()
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
results = session.query(Measurement.tobs).\
filter(Measurement.station == 'USC00519281').\
filter(Measurement.date >= prev_year).all()
print(results)
# insert into a DataFrame
df = pd.DataFrame(results, columns=['tobs'])
print(df)
# Create histogram
# We'll be creating a histogram from the temperature observations.
# This will allow us to quickly count how many temperature observations we have.
df.plot.hist(bins=12)
plt.tight_layout()
```
#### Looking at this plot, we can infer that a vast majority of the observations were over 67 degrees. If you count up the bins to the right of 67 degrees, you will get about 325 days where it was over 67 degrees when the temperature was observed.
```
# Write a function called `calc_temps` that will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
```
### 9.4.3 Set Up Flask and Create a Route
```
pip install flask
```
# Challenge
| github_jupyter |
# MCIS6273 Data Mining (Prof. Maull) / Fall 2021 / HW0
**This assignment is worth up to 20 POINTS to your grade total if you complete it on time.**
| Points <br/>Possible | Due Date | Time Commitment <br/>(estimated) |
|:---------------:|:--------:|:---------------:|
| 20 | Wednesday, Sep 1 @ Midnight | _up to_ 20 hours |
* **GRADING:** Grading will be aligned with the completeness of the objectives.
* **INDEPENDENT WORK:** Copying, cheating, plagiarism and academic dishonesty _are not tolerated_ by University or course policy. Please see the syllabus for the full departmental and University statement on the academic code of honor.
## OBJECTIVES
* Familiarize yourself with the JupyterLab environment, Markdown and Python
* Familiarize yourself with Github and basic git
* Explore JupyterHub Linux console integrating what you learned in the prior parts of this homework
* Listen to the Talk Python['Podcast'] from June 25, 2021: A Path to Data Science Interview with Sanyam Bhutani
* Explore Python for data munging and analysis, with an introduction to CSV and Pandas
## WHAT TO TURN IN
You are being encouraged to turn the assignment in using the provided
Jupyter Notebook. To do so, make a directory in your Lab environment called
`homework/hw0`. Put all of your files in that directory. Then zip that directory,
rename it with your name as the first part of the filename (e.g. `maull_hw0_files.zip`), then
download it to your local machine, then upload the `.zip` to Blackboard.
If you do not know how to do this, please ask, or visit one of the many tutorials out there
on the basics of using zip in Linux.
If you choose not to use the provided notebook, you will still need to turn in a
`.ipynb` Jupyter Notebook and corresponding files according to the instructions in
this homework.
## ASSIGNMENT TASKS
### (0%) Familiarize yourself with the JupyterLab environment, Markdown and Python
As stated in the course announcement [Jupyter (https://jupyter.org)](https://jupyter.org) is the
core platform we will be using in this course and
is a popular platform for data scientists around the world. We have a JupyterLab
setup for this course so that we can operate in a cloud-hosted environment, free from
some of the resource constraints of running Jupyter on your local machine (though you are free to set
it up on your own and seek my advice if you desire).
You have been given the information about the Jupyter environment we have setup for our course, and
the underlying Python environment will be using is the [Anaconda (https://anaconda.com)](https://anaconda.com)
distribution. It is not necessary for this assignment, but you are free to look at the multitude
of packages installed with Anaconda, though we will not use the majority of them explicitly.
As you will soon find out, Notebooks are an incredibly effective way to mix code with narrative
and you can create cells that are entirely code or entirely Markdown. Markdown (MD or `md`) is
a highly readable text format that allows for easy documentation of text files, while allowing
for HTML-based rendering of the text in a way that is style-independent.
We will be using Markdown frequently in this course, and you will learn that there are many different
"flavors" or Markdown. We will only be using the basic flavor, but you will benefit from exploring
the "Github flavored" Markdown, though you will not be responsible for using it in this course -- only the
"basic" flavor. Please refer to the original course announcement about Markdown.
§ **THERE IS NOTHING TO TURN IN FOR THIS PART.** Play with and become familiar with the basic functions of
the Lab environment given to you online in the course Blackboard.
§ **PLEASE _CREATE A MARKDOWN DOCUMENT_ CALLED `semester_goals.md` WITH 3 SENTENCES/FRAGMENTS THAT
ANSWER THE FOLLOWING QUESTION:**
* **What do you wish to accomplish this semester in Data Mining?**
Read the documentation for basic Markdown [here](https://www.markdownguide.org/basic-syntax).
Turn in the text `.md` file *not* the processed `.html`. In whatever you turn in,
you must show the use of *ALL* the following:
* headings (one level is fine),
* bullets,
* bold and italics
Again, the content of your document needs to address the question above and it should live
in the top level directory of your assignment submission. This part will be graded but no
points are awarded for your answer.
### (0%) Familiarize yourself with Github and basic git
[Github (https://github.com)](https://github.com) is the _de facto_ platform for open source software in the world based
on the very popular [git (https://git-scm.org)](https://git-scm.org) version control system. Git has a sophisticated set
of tools for version control based on the concept of local repositories for fast commits and remote
repositories only when collaboration and remote synchronization is necessary. Github enhances git by providing
tools and online hosting of public and private repositories to encourage and promote sharing and collaboration.
Github hosts some of the world's most widely used open source software.
**If you are already familiar with git and Github, then this part will be very easy!**
§ **CREATE A PUBLIC GITHUB REPO NAMED `"mcis6273-F21-datamining"` AND PLACE A README.MD FILE IN IT.**
Create your first file called
`README.md` at the top level of the repository. You can put whatever text you like in the file
(If you like, use something like [lorem ipsum](https://lipsum.com/)
to generate random sentences to place in the file.).
Please include the link to **your** Github repository that now includes the minimal `README.md`.
You don't have to have anything elaborate in that file or the repo.
### (0%) Explore JupyterHub Linux console integrating what you learned in the prior parts of this homework
The Linux console in JupyterLab is a great way to perform command-line tasks and is an essential tool
for basic scripting that is part of a data scientist's toolkit. Open a console in the lab environment
and familiarize yourself with your files and basic commands using git as indicated below.
1. In a new JupyterLab command line console, run the `git clone` command to clone the new
repository you created in the prior part.
You will want to read the documentation on this
command (try here [https://www.git-scm.com/docs/git-clone](https://www.git-scm.com/docs/git-clone) to get a good
start).
2. Within the same console, modify your `README.md` file, check it in and push it back to your repository, using
`git push`. Read the [documentation about `git push`](https://git-scm.com/docs/git-push).
3. The commands `wget` and `curl` are useful for grabbing data and files from remote resources off the web.
Read the documentation on each of these commands by typing `man wget` or `man curl` in the terminal.
Make sure you pipe the output to a file or use the proper flags to do so.
§ **THERE IS NOTHING TO TURN IN FOR THIS PART.**
### (30%) Listen to the Talk Python['Podcast'] from June 25, 2021: A Path to Data Science Interview with Sanyam Bhutani
Data science is one of the most important and "hot" disciplines today
and there is a lot going on from data engineering to modeling and
analysis.
Bhutani is one of the top [Kaggle]() leaders and in this interview
shares his experience from computer science to data science,
documenting some of the lessons he learned along the way.
Please listen to this one hour podcast and answer some of the questions below.
You can listen to it from one of the two links below:
* [Talk Python['Podcast'] landing page](https://talkpython.fm/episodes/transcript/322/a-path-into-data-science)
* [direct link to mp3 file](https://downloads.talkpython.fm/podcasts/talkpython/322-starting-in-data-sci.mp3)
§ **PLEASE ANSWER THE FOLLOWING QUESTIONS AFTER LISTENING TO THE PODCAST:**
1. List 3 things that you learned from this podcast?
**ANSWER**
(a).Computer science has became more practical these days
(b).In order to gain knowledge on programming we need more practice in the coding techniques and try to solve concrete problems
(c).Data science has became first koggle competition and tend to set goals every year
2. What is your reaction to the podcast? Pick at least one point Sanyam brought up in the interview that you agree with and list your reason why.
**ANSWER**
The one thing I agree with Sanyam brought up in this interview is you gain lot of knowledge by learning from lot of forums and discussion board
and when it comes to the practical knowledge lot of members have the problems in coding techniques and solving the issues.
3. After listening to the podcast, do you think you are more interested or less interested in a career in Data Science?
**ANSWER**
Yes I got more interest in Data Science field to develope my career
### (70%) Explore Python for data munging and analysis, with an introduction to CSV and Pandas
Python's strengths shine when tasked with data munging and analysis. As we will learn throughout
the course, there are a number of excellent data sources for open data of all kinds now
available for the public. These open data sources are heralding the new era of transparency
from all levels from small municipal data to big government data, from transportation, to science,
to education.
To warm up to such datasets, we will be working with an interesting
dataset from the US Fish and Wildlife Service (FWS). This is a
water quality data set taken from a managed national refuge in
Virginia called Back Bay National Wildlife Refuge, which was
established in 1938. As a function of being managed by the FWS,
water quality samples are taken regularly from the marshes within
the refuge.
You can (and should) learn a little more about Back Bay from
this link, since it has an interesting history, features and wildlife.
* [https://www.fws.gov/refuge/Back_Bay/about.html](https://www.fws.gov/refuge/Back_Bay/about.html)
The data we will be looking at can be found as a direct download
from data.gov, the US data repository where many datasets from
a variety of sources can be found -- mostly related to the
multitude of US government agencies.
The dataset is a small water quality dataset with several decades
of water quality data from Back Bay. We will be warming up
to this dataset with a basic investigation into the shape, content
and context of the data contained therein.
In this part of the assignment, we will make use of Python libraries to pull the data from the
endpoint and use [Pandas](https://pandas.pydata.org) to plot the data. The raw CSV data is
readily imported into Pandas from the following URL:
* [FWS Water Quality Data 12/20/2020](https://catalog.data.gov/dataset/water-quality-data/resource/f4d736fd-ade9-4e3f-b8e0-ae7fd98b2f87)
Please take a look at the page, on it you will notice a link
to the raw CSV file:
* [https://ecos.fws.gov/ServCat/DownloadFile/173741?Reference=117348](https://ecos.fws.gov/ServCat/DownloadFile/173741?Reference=117348)
We are going to explore this dataset to learn a bit more about the
water quality characteristics of Bay Bay over the past couple decades
or so.
§ **WRITE THE CODE IN YOUR NOTEBOOK TO LOAD AND RESHAPE THE COMPLETE CSV WATER QUALITY DATASET**:
You will need to perform the following steps:
1. **use [`pandas.read_csv()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) method to load the dataset** into a Pandas DataFrame;
**ANSWER:**
import pandas as pd
df=pd.read_csv('https://ecos.fws.gov/ServCat/DownloadFile/173741?Reference=117348')
2. **clean the data so that the range of years is restricted to the 20 year period from 1999 to 2018**
**ANSWER:**
#filter_range=pd.DataFrame({'Year': range(1999, 2018)})
df = df[(df['Year'] >= 1999) & (df['Year'] <= 2018)]
3. **store the entire dataset back into a new CSV** file called `back_bay_1998-2018_clean.csv`.
**ANSWER:**
df.to_csv('back_bay_1998-2018_clean.csv')
**HINTS:** _Here are some a code hints you might like to study and use to craft a solution:_
* study [`pandas.DataFrame.query()]`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html?highlight=query#pandas.DataFrame.query) to learn how to filter and query year ranges
* study [`pandas.DataFrame.groupby()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.groupby.html?highlight=groupby#pandas.DataFrame.groupby) to understand how to group data
§ **USE PANDAS TO LOAD THE CSV DATA TO A DATAFRAME AND ANSWER THE FOLLOWING QUESTIONS:**
1. How many and what are the names of the columns in this dataset?
**ANSWER:**
There are 17 columns in the dataset and below are the column names in dataset.
Site_Id
Unit_Id
Read_Date
Salinity (ppt)
Dissolved Oxygen (mg/L)
pH (standard units)
Secchi Depth (m)
Water Depth (m)
Water Temp (?C)
Air Temp-Celsius
Air Temp (?F)
Time (24:00)
Field_Tech
DateVerified
WhoVerified
AirTemp (C)
Year
2. What is the mean `Dissolved Oxygen (mg/L)` over the entire dataset?
>This is to load the data from csv file into data frame for performing the required operations
dataframe=pd.read_csv('back_bay_1998-2018_clean.csv')
**ANSWER:**
mean_dissolved_oxygen = dataframe['Dissolved Oxygen (mg/L)'].mean()
print(mean_dissolved_oxygen)
**Result**
5.8677
>This value is the result obtained from filtered dataset
In order to find the mean value for the entire dataset
import pandas as pd
df=pd.read_csv('https://ecos.fws.gov/ServCat/DownloadFile/173741?Reference=117348')
mean_dissolved_oxygen = df['Dissolved Oxygen (mg/L)'].mean()
print(mean_dissolved_oxygen)
**Result**
6.646263157894744
3. Which year were the highest number of `AirTemp (C)` data points collected?
**ANSWER:**
highest_data_points = dataframe.groupby('Year').size()
print(highest_data_points)
**Result**
Year
1999 45
2000 109
2001 115
2002 108
2003 98
2004 119
2005 107
2006 118
2007 114
2008 117
2009 120
2010 109
2011 97
2012 88
2013 91
2014 87
2015 88
2016 59
2017 85
2018 82
dtype: int64
> From the above result 2009 has the highest data points(120)
4. Which year were the least number of `AirTemp (C)` data points collected?
**ANSWER:**
lowest_data_points = dataframe.groupby('Year').size()
print(lowest_data_points)
**Result**
Year
1999 45
2000 109
2001 115
2002 108
2003 98
2004 119
2005 107
2006 118
2007 114
2008 117
2009 120
2010 109
2011 97
2012 88
2013 91
2014 87
2015 88
2016 59
2017 85
2018 82
dtype: int64
>From the above result 1999 has the lowest data points i.e (45)
To answer these questions, you'll need to dive further into Pandas, which is
the standard tool in the Python data science stack for loading, manipulating,
transforming, analyzing and preparing data as input to other tools such as
[Numpy (http://www.numpy.org/)](http://www.numpy.org/),
[SciKitLearn (http://scikit-learn.org/stable/index.html)](http://scikit-learn.org/stable/index.html),
[NLTK (http://www.nltk.org/)](http://www.nltk.org/) and others.
For this assignment, you will only need to learn how to load and select data using Pandas.
* **LOADING DATA**
The core data structure in Pandas is the `DataFrame`. You will need to visit
the Pandas documentation [(https://pandas.pydata.org/pandas-docs/stable/reference/)](https://pandas.pydata.org/pandas-docs/stable/reference/)
to learn more about the library, but to help you along with a hint, read the
documentation on the [`pandas.read_csv()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) method.
* **SELECTING DATA**
The [tutorial here on indexing and selecting](http://pandas.pydata.org/pandas-docs/stable/indexing.html)
should be of great use in understanding how to index and select subsets of
the data to answer the questions.
* **GROUPING DATA** You may use [`DataFrame.value_counts()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.value_counts.html?highlight=value_counts#pandas.DataFrame.value_counts) or [`DataFrame.groupby()`](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html) to group
the data you need for these questons. You will also find [`DataFrame.groupby()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.groupby.html?highlight=groupby#pandas.DataFrame.groupby) and [`DataFrame.describe()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.describe.html?highlight=describe#pandas.DataFrame.describe) very useful.
**CODE HINTS**
Here is example code that should give you clues about the structure
of your code for this part.
```python
import pandas as pd
df = pd.read_csv('your_json_file.csv')
# code for question 1 ... and so on
```
§ **EXPLORING WATER SALINITY IN THE DATA**
The Back
```
Bay refuge is on the eastern coast of Virginia and to
the east is the Atlantic Ocean. Salinity is a measure of
the salt concentration of water, and you can learn a little more
about salinity in water [here](https://www.usgs.gov/special-topic/water-science-school/science/saline-water-and-salinity?qt-science_center_objects=0#qt-science_center_objects).
You will notice that there is a `Site_Id` variable in the data, which
we will find refers to the five sampling locations (see the [documentation here](https://ecos.fws.gov/ServCat/Reference/Profile/117348))
of (1) the Bay, (2) D-Pool (fishing pond), (3) C-Pool, (4) B-Pool and (5) A-Pool.
The ppt in Salinity is the percent salinity, and so 1 ppt is equivalent to 10000 ppm salinity. Use this information to answer
the following questions.
1. Which sampling location has the highest mean ppt? What is the equivalent ppm?
**Answer**
import pandas as pd
df=pd.read_csv('https://ecos.fws.gov/ServCat/DownloadFile/173741')
mean_salinity = df.groupby('Site_Id')['Salinity (ppt)'].mean()
print(mean_salinity)
**Result**
Site_Id
A 0.329517
B 0.433765
Bay 1.483154
C 0.596397
D 0.099619
>Looking at the result, the Bay has the highest mean ppt of 1.483154. It's equivalent ppm is 14831.54
2. When looking at the mean ppt, which location would you infer is furthest from the influence of ocean water inflows?
(Assume that higher salinity correlates to closer proximity to the ocean.)
**Answer**'
Looking at the mean ppt values of all sampling locations, I would infer D-pool (fishing pond) is furthest from the influence of ocean water.
3. Dig a little deeper into #2, and write why there may be some uncertainty in your answer? (hint: certainty is improved by consistency in data)
**Answer**
There might some uncertainity in the answer that was provided because we can notice some inconsistencies in the data provided. For example there are values for Site_Id which are not mentioned like 'd' and also some blanks. We have to sanitize the data before performing any analysis/operations.
4. Use the data to determine the correlation between `Salinity (ppt)` and `pH (standard units)`. Use the [DataFrame.corr()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.core.groupby.DataFrameGroupBy.corr.html?highlight=correlate). You just need to report the correlation value.
**Answer**
correlation_value = df['Salinity (ppt)'].corr(df['pH (standard units)'])
print(correlation_value)
**Result**
0.29607528012371914
```
| github_jupyter |
# SMIB system as in Milano's book example 8.1
```
%matplotlib widget
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as sopt
import ipywidgets
from pydae import ssa
import json
```
## Import system module
```
from smib_milano_ex8p1_4ord_avr import smib_milano_ex8p1_4ord_avr_class
```
## Instantiate system
```
syst = smib_milano_ex8p1_4ord_avr_class()
```
## Initialize the system (backward and foreward)
```
events=[{'p_t':0.8, 'v_t':1.1, 'K_a':500, 'T_e':0.1}]
syst.initialize(events,xy0=1)
syst.save_0()
syst.report_u()
syst.report_x()
syst.report_y()
```
## Simulation
```
syst = smib_milano_ex8p1_4ord_avr_class()
events=[{'p_t':0.8, 'v_t':1.0, 'K_a':400, 'T_e':0.5, 'H':6}]
syst.initialize(events,xy0=1)
events=[{'t_end':1.0},
{'t_end':15.0, 'p_m':0.8, 'v_ref':1.05}
]
syst.simulate(events,xy0='prev');
plt.close('all')
fig, axes = plt.subplots(nrows=2,ncols=2, figsize=(10, 5), frameon=False, dpi=50)
axes[0,0].plot(syst.T, syst.get_values('omega'), label=f'$\omega$')
axes[0,1].plot(syst.T, syst.get_values('v_t'), label=f'$v_t$')
axes[1,0].plot(syst.T, syst.get_values('p_t'), label=f'$p_t$')
axes[1,1].plot(syst.T, syst.get_values('q_t'), label=f'$q_t$')
```
## Run in two time intervals
```
events=[{'t_end':1.0}]
syst.run(events)
events=[{'t_end':2.0}]
syst.run(events)
syst.get_value('omega')
events=[{'p_t':0.8, 'v_t':1.0, 'K_a':400, 'T_e':0.5}]
syst.initialize(events,xy0=1)
ssa.eval_A(syst)
ssa.damp_report(syst)
syst.get_value('p_m')
Ts_control = 0.010
times = np.arange(0.0,10,Ts_control)
# Calculate second references
events=[{'P_t':0.9, 'Q_t':0.0}]
syst.initialize(events,xy0=1.0)
x_ref = np.copy(syst.struct[0].x)
v_f_ref = syst.struct[0]['v_f']
p_m_ref = syst.struct[0]['p_m']
# Calculate initial references
events=[{'P_t':0.0, 'Q_t':0.0}]
syst.initialize(events,xy0=1.0)
x_0 = np.copy(syst.struct[0].x)
v_f_0 = syst.get_value('v_f')
p_m_0 = syst.get_value('p_m')
# Control design
ssa.eval_ss(syst)
Q = np.eye(syst.N_x)*100
R = np.eye(syst.N_u)
K = ctrl.place(syst.A,syst.B,[-2.0+1j*6,-2.0-1j*6,-100,-101])
K,S,E = ctrl.lqr(syst.A,syst.B,Q,R)
Ad,Bd = ssa.discretise_time(syst.A,syst.B,Ts_control)
Kd,S,E = ssa.dlqr(Ad,Bd,Q,R)
for t in times:
x = np.copy(syst.struct[0].x)
v_f = v_f_0
p_m = p_m_0
if t>1.0:
u_ctrl = K*(x_ref - x)
p_m = p_m_ref + u_ctrl[0]
v_f = v_f_ref + u_ctrl[1]
events=[{'t_end':t,'v_f':v_f,'p_m':p_m}]
syst.run(events)
syst.post();
plt.close('all')
fig, axes = plt.subplots(nrows=2,ncols=2, figsize=(10, 5), frameon=False, dpi=50)
axes[0,0].plot(syst.T, syst.get_values('omega'), label=f'$\omega$')
axes[0,1].plot(syst.T, syst.get_values('v_1'), label=f'$v_1$')
axes[1,0].plot(syst.T, syst.get_values('P_t'), label=f'$P_t$')
axes[1,1].plot(syst.T, syst.get_values('Q_t'), label=f'$Q_t$')
ssa.eval_ss(syst)
from scipy.signal import ss2tf,lti,bode
num,den =ss2tf(syst.A,syst.B,syst.C,syst.D,input=0)
G = lti(num[1],den)
w, mag, phase = G.bode()
plt.figure()
plt.semilogx(w, mag) # Bode magnitude plot
plt.figure()
plt.semilogx(w, phase) # Bode phase plot
plt.show()
events=[{'t_end':1.0,'P_t':0.8, 'Q_t':0.5},
{'t_end':10.0, 'p_m':0.9}
]
syst.simulate(events,xy0=1.0);
syst.inputs_run_list
0.01/6
syst.B
syst.struct[0]['Fu']
```
| github_jupyter |
# Simrandeep Brar
## Research question/interests
My research question is to determine what show type (movie/tv show & pg/pg-13/mature) are the most popular. I'm researching this question because I wanna see what kind of content people seem to enjoy the most on Netflix. I'll be using multiple supplemental graphs to support my question. The graphs will be 1: Tv shows vs movies released during a time period. 2: The ratings of tv shows during that time. 3: The rating of movies during that time. 4: Comparing the ratings of the most popular tv shows vs the most popular ratings of movies.
```
#Importing libraries
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pylab as plt
netflixds = pd.read_csv("../data/raw/netflix_titles.csv")
```
## Data Analysis Pipeline
##### The data analysis pipeline is used to clean up the dataset by removing all null values and also removing the Description column of the dataset as it wasn't needed for my research question.
```
netflix_updated = (netflixds.dropna()
.reset_index(drop=True)
.rename(columns={"listed_in": "Genre", "release_year": "Release Year", "type": "Type"})
.rename(columns={"title": "Title", "director": "Director", "cast": "Cast"})
.rename(columns={"show_id": "Show ID", "country": "Country", "date_added": "Date Added"})
.rename(columns={"rating": "Rating", "duration": "Duration", "description": "Description"})
.drop(["Description"], axis = 1)
.sort_values(by = "Release Year", ascending = True)
)
netflix_updated
```
## Method Chaining
##### I'm using method chaining to help streamline the process and make it universal to most databases. Doing so is much for efficient and easier to read compared to creating a new method each time for each new database.
```
from methods import project_functions
df = project_functions.load_and_process("../data/raw/netflix_titles.csv")
df
```
## Exploratory Data Analysis
##### This EDA is to help me get a better understanding on my data set by getting some information and creating some graphs to visualize the data.
### Plot 1: The Quantity of Movies vs TV Shows on Netflix
```
plt.figure(figsize=(16,10))
graph1 = sns.countplot(x='Type',data=netflix_updated)
sns.set(font_scale = 2)
graph1.set(title = 'The Quantity of Movies vs TV Shows on Netflix')
```
#### Comments
##### After viewing the plot above, it's quite obvious that there's a much larger volume of movies available for consumers on Netflix compared TV shows. This isn't really indicitive of too much at the moment as the lower number of tv shows available can be slightly misleading. Although there may be only a few tv shows availble on Netflix, that doesn't account for the all the seasons those shows maybe have. If each season were to be counted as it's own tv show, of course the number of tv shows would go up drastically.
### Plot 2: Overall Movie Ratings on Netflix
```
test = netflix_updated.loc[(netflix_updated['Type'] == 'Movie')]
plt.figure(figsize=(25,10))
plt.ylim(0, 1800)
graph3=sns.countplot(x='Rating', data=test)
sns.set(font_scale = 2)
graph3.set(title = 'Overall Movie Ratings on Netflix')
```
### Plot 3: Overall TV Show Ratings on Netflix
```
test2 = netflix_updated.loc[(netflix_updated['Type'] == 'TV Show')]
plt.figure(figsize=(25,10))
plt.ylim(0, 1800)
graph4 = sns.countplot(x='Rating', data=test2)
sns.set(font_scale = 2)
graph4.set(title = 'Overall TV Show Ratings on Netflix')
```
#### Comments
##### After examining both plots above, we can determine that there's a much larger volume of TV-MA rated content available on Netflix in terms of both films and TV shows. Although this data doesn't conclusively prove that TV-MA content is the most popular on Netflix, it could be inferred that this content is more widely available due to its popularity. As I had also stated above earlier, it's also possible that the amount of tv show content available to watch is close to the movie content availble but just isn't show in the data as additional seasons aren't accounted for.
### Plot 4: TV-MA TV Show Quantities vs TV-MA Movie Quantities on Netflix
```
filter1 = netflix_updated.loc[(netflix_updated['Rating'] == 'TV-MA')]
graph2=filter1.groupby(['Type','Rating']).size()
graph2=graph2.unstack()
graph2.plot(figsize=(25,10), kind='bar', title = 'TV-MA Movie Quantities on Netflix vs TV-MA TV Show Quantities ')
```
#### Comments
##### As can be seen from the prior plots, as well as the graph above, TV-MA rated movies are significantly more common on Netflix compared to TV-MA rated TV shows. I do belive this to be a bit more telling as to what the most popular content on Netflix is although I can't say for certain without knowing how much tv show content is truly available.
## Conclusion
After taking a look at all of our data and the supplemental graphs I believe I've come to a conclusion regarding my research question. The question was to determine what kind of content was the most popular on Netflix and I believe that is TV-MA rated movies. I believe this to be true because as we can see in the first graph, movies significantly outnumber the available tv shows on the platform. However, this obviously isn't a great indicator as tv shows may have multiple seasons and those seasons don't get counted as a new addition on Netflix.
Taking a look at the next 2 graphs, we can see that the most common type of content on Netflix is TV-MA rated for both movies and tv shows. Taking this into consideration and the sheer difference in available content between the two content types, I believe that TV-MA rated movies are the most popular content on Netflix.
My reasoning for this is simple. Movie producers are interested in making the most profit possible, it only makes sense for them to create content that helps them achieve this goal. In Netflix's case, they want to add content to the site that helps them bring in new customers and help retain existing customers. So taking this into consideration, the fact that the most commonly available content on Netflix is TV-MA rated movies, I believe it's fair to say that is because it is the most popular and enjoyed content on the platform.
| github_jupyter |
# Week 2 - Data handling
The Python modules `pandas` and `numpy` are useful libraries to handle datasets and apply basic operations on them.
Some of the things we learnt in week 1 using native Python (e.g. accessing, working with and writing data files, and performing operations on them) can be easily achieved using `pandas` instead. `pandas` offers data structures and operations for manipulating different types of datasets - see [documentation](https://pandas.pydata.org/).
We will only cover `pandas` today, however feel free to explore `numpy` in parallel at your own pace e.g. following [this tutorial](https://numpy.org/devdocs/user/quickstart.html) and combining it with continuing to learn `pandas`.
### Aims
- Gain familiarity using `pandas` to handle datasets
- Create, read and write data
- Select a subset of variables (columns)
- Filter rows based on their values
- Sort datasets
- Create new columns or modify existing ones
- Summarise and collapse values in one or more columns to a single summary value
- Handle missing data
- Merge datasets
### Installing pandas
The module `pandas` does not come by default as part of the default Anaconda installation. In order to install it in your system, launch the "Anaconda Prompt (Anaconda3)" program and run the following command: `conda install pandas`. Once the command finishes execution, `pandas` will be installed in your system
<img src="../img/az_conda_prompt.png" width="400">
**Note:** if you have any issues installing `pandas`, please get in touch with one of the trainers after the lecture
### Loading pandas
Once installed, you can import it e.g. using the alias `pd` as follows:
```
import pandas as pd
```
### Reading datasets with `pandas`
We are going to use the METABRIC dataset `metabric_clinical_and_expression_data.csv` containing information about breast cancer patients as we did in week 1.
Pandas allows importing data from various file formats such as csv, xls, json, sql ...
To read a csv file, use the method `.read_csv()`:
```
metabric = pd.read_csv("../data/metabric_clinical_and_expression_data.csv")
metabric
print(metabric)
```
If you forget to include `../data/` above, or if you include it but your copy of the file is saved somewhere else, you will get an error that ends with a line like this: `FileNotFoundError: File b'metabric_clinical_and_expression_data.csv' does not exist`
Generally, rows in a `DataFrame` are the **observations** (patients in the case of METABRIC) whereas columns are known as the observed **variables** (Cohort, Age_at_diagnosis ...).
Looking at the column on the far left, you can see the row names of the DataFrame `metabric` assigned using the known 0-based indexing used in Python.
Note that the `.read_csv()` method is not limited to reading csv files. For example, you can also read Tab Separated Value (TSV) files by adding the argument `sep='\t'`.
### Exploring data
The pandas DataFrame object borrows features from the well-known R's `data.frame` or SQL's `table`. They are 2-dimensional tables whose columns can contain different data types (e.g. boolean, integer, float, categorical/factor). Both the rows and columns are indexed, and can be referred to by number or name.
An index in a DataFrame refers to the position of an element in the data structure. Using the `.info()` method, we can view basic information about our DataFrame object:
```
metabric.info()
```
As expected, our object is a `DataFrame` (or, to use the full name that Python uses to refer to it internally, a `pandas.core.frame.DataFrame`).
```
type(metabric)
```
It has 1904 rows (the patients) and 32 columns. The columns consist of integer, floats and strings (object). It uses almost 500 KB of memory.
As mentioned, a DataFrame is a Python object or data structure, which means it can have **Attributes** and **Methods**.
**Attributes** contain information about the object. You can access them to learn more about the contents of your DataFrame. To do this, use the object variable name `metabric` followed by the attribute name, separated by a `.`. Do not use any () to access attributes.
For example, the types of data contained in the columns are stored in the `.dtypes` attribute:
```
metabric.dtypes
```
You can access the dimensions of your DataFrame using the `.shape` attribute. The first value is the number of rows, and the second the number of columns:
```
metabric.shape
```
The row and column names can be accessed using the attributes `.index` and `.columns` respectively:
```
metabric.index
metabric.columns
```
If you'd like to transpose `metabric` use the attribute `T`:
```
metabric.T
```
**Methods** are functions that are associated with a DataFrame. Because they are functions, you do use () to call them, and can have arguments added inside the parentheses to control their behaviour. For example, the `.info()` command we executed previously was a method.
The `.head()` method prints the first few rows of the table, while the `.tail()` method prints the last few rows:
```
metabric.head()
metabric.head(3)
metabric.tail()
```
The `.describe()` method computes summary statistics for the columns (including the count, mean, median, and std):
```
metabric.describe()
```
In general you can find which **Attributes** and **Methods** are available for your DataFrame using the function `dir()`:
```
dir(metabric)
```
We often want to calculate summary statistics grouped by subsets or attributes within fields of our data. For example, we might want to calculate the average survival time for patients with an advanced tumour stage.
There are two ways to access columns in a DataFrame. The first is using the name of the DataFrame `metabric` followed by a `.` and then followed by the name of the column. The second is using square brackets:
```
metabric.Survival_time
metabric['Survival_time']
```
We can also compute metrics on specific columns or on the entire DataFrame:
```
metabric['Survival_time'].mean()
metabric['Survival_time'].std()
metabric.mean()
```
### Selecting columns and rows
The [pandas cheat sheet](https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf) can be very helpful for recalling basic pandas operations.
To select rows and columns in a DataFrame, we use square brackets `[ ]`. There are two ways to do this: with **positional** indexing, which uses index numbers, and **label-based** indexing which uses column or row names.
To select the first three rows using their numeric index:
```
metabric[:3]
```
The colon `:` defines a range as we saw with slicing lists in week 1.
To select one column using its name:
```
metabric['Mutation_count']
```
And we can combine the two like this:
```
metabric[:3]['Mutation_count']
```
However the following does not work:
```
metabric[:3,'Mutation_count']
```
To do **positional** indexing for both rows and columns, use `.iloc[]`. The first argument is the numeric index of the rows, and the second the numeric index of the columns:
```
metabric.iloc[:3,2]
```
For **label-based** indexing, use `.loc[]` with the column and row names:
```
metabric.loc[:3,"Age_at_diagnosis"]
```
**Note**: because the rows have numeric indices in this DataFrame, we may think that selecting rows with `.iloc[]` and `.loc[]` is same. As observed above, this is not the case.
If you'd like to select more than one row:
```
metabric.loc[:3, ['Cohort', 'Chemotherapy']]
metabric.loc[:3, 'Cohort':'Chemotherapy']
```
### Filtering rows
You can choose rows from a DataFrame that match some specified criteria. The criteria are based on values of variables and can make use of comparison operators such as `==`, `>`, `<` and `!=`.
For example, to filter `metabric` so that it only contains observations for those patients who died of breast cancer:
```
metabric[metabric.Vital_status=="Died of Disease"]
```
To filter based on more than one condition, you can use the operators `&` (and), `|` (or).
```
metabric[(metabric.Vital_status=="Died of Disease") & (metabric.Age_at_diagnosis>70)]
```
For categorical variables e.g. `Vital_status` or `Cohort`, it may be useful to count how many occurrences there is for each category:
```
metabric['Vital_status'].unique()
metabric['Vital_status'].value_counts()
```
To filter by more than one category, use the `.isin()` method.
```
metabric[metabric.Vital_status.isin(['Died of Disease', 'Died of Other Causes'])]
metabric['Cohort'].value_counts()
```
To tabulate two categorical variables just like `table` in R, use the function `.crosstab()`:
```
pd.crosstab(metabric['Vital_status'], metabric['Cohort'])
```
### Define new columns
To obtain the age of the patient today `Age_today` (new column) based on the `Age_at_diagnosis` (years) and the `Survival_time` (days), you can do the following:
```
metabric['Age_today'] = metabric['Age_at_diagnosis'] + metabric['Survival_time']/365
metabric
```
### Sort data
To sort the entire DataFrame according to one of the columns, we can use the `.sort_values()` method. We can store the sorted DataFrame using a new variable name such as `metabric_sorted`:
```
metabric_sorted = metabric.sort_values('Tumour_size')
metabric_sorted
metabric_sorted.iloc[0]
metabric_sorted.loc[0]
```
We can also sort the DataFrame in descending order:
```
metabric_sorted = metabric.sort_values('Tumour_size', ascending=False)
metabric_sorted
```
### Missing data
Pandas primarily uses `NaN` to represent missing data, which are by default not included in computations.
The `.info()` method shown above already gave us a way to find columns containing missing data:
```
metabric.info()
```
To get the locations where values are missing:
```
pd.isna(metabric)
metabric.isnull()
```
To drop any rows containing at least one column with missing data:
```
metabric.dropna()
```
However, from the other way around, to rather remove columns with at least one row with missing data, you need to use the 'axis' argument:
```
metabric.dropna(axis=1)
```
Define in which columns to look for missing values before dropping the row:
```
metabric.dropna(subset = ["Tumour_size"])
metabric.dropna(subset = ["Tumour_size", "Tumour_stage"])
```
Filling missing data:
```
metabric.fillna(value=0)
metabric.fillna(value={'Tumour_size':0, 'Tumour_stage':5})
```
### Grouping
Grouping patients by Cohort and then applying the `.mean()` function to the resulting groups:
```
metabric.groupby('Cohort')
metabric.groupby('Cohort').mean()
```
Grouping by multiple columns forms a hierarchical index, and again we can apply the `.mean()` function:
```
metabric.groupby(['Cohort', 'Vital_status']).mean()
```
### Pivoting
In some cases, you may want to re-structure your existing DataFrame. The function `.pivot_table()` is useful for this:
```
import numpy as np
df = pd.DataFrame({'A': ['one', 'one', 'two', 'three'] * 3, 'B': ['A', 'B', 'C'] * 4, 'C': ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2, 'D': np.random.randn(12), 'E': np.random.randn(12)})
df
pd.pivot_table(df, values='D', index=['A', 'B'], columns=['C'])
```
### Merge datasets
You can concatenate DataFrames using the function `concat()`:
```
metabric_cohort1 = metabric[metabric["Cohort"]==1]
metabric_cohort1
metabric_cohort2 = metabric[metabric["Cohort"]==2]
metabric_cohort2
pd.concat([metabric_cohort1,metabric_cohort2])
```
Or join datasets using the function `.merge()`:
```
left = pd.DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]})
left
right = pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]})
right
pd.merge(left, right, on='key')
```
A final example:
```
left = pd.DataFrame({'key': ['foo', 'bar'], 'lval': [1, 2]})
left
right = pd.DataFrame({'key': ['foo', 'bar'], 'rval': [4, 5]})
right
pd.merge(left, right, on='key')
```
## Assignment
1. Write python commands using pandas to learn how to output tables as follows:
- Read the dataset `metabric_clinical_and_expression_data.csv` and store its summary statistics into a new variable called `metabric_summary`.
- Just like the `.read_csv()` method allows reading data from a file, `pandas` provides a `.to_csv()` method to write `DataFrames` to files. Write your summary statistics object into a file called `metabric_summary.csv`. You can use `help(metabric.to_csv)` to get information on how to use this function.
- Use the help information to modify the previous step so that you can generate a Tab Separated Value (TSV) file instead
- Similarly, explore the method `to_excel()` to output an excel spreadsheet containing summary statistics
2. Write python commands to perform basic statistics in the metabric dataset and answer the following questions:
- Read the dataset `metabric_clinical_and_expression_data.csv` into a variable e.g. `metabric`.
- Calculate mean tumour size of patients grouped by vital status and tumour stage
- Find the cohort of patients and tumour stage where the average expression of genes TP53 and FOXA1 is the highest
- Do patients with greater tumour size live longer? How about patients with greater tumour stage? How about greater Nottingham_prognostic_index?
3. Review the section on missing data presented in the lecture. Consulting the [user's guide section dedicated to missing data](https://pandas.pydata.org/pandas-docs/stable/user_guide/missing_data.html) and any other materials as necessary use the functionality provided by pandas to answer the following questions:
- Which variables (columns) of the metabric dataset have missing data?
- Find the patients ids who have missing tumour size and/or missing mutation count data. Which cohorts do they belong to?
- For the patients identified to have missing tumour size data for each cohort, calculate the average tumour size of the patients with tumour size data available within the same cohort to fill in the missing data
| github_jupyter |
```
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from fastai.vision import *
import torch
#from mrnet_orig import *
from mrnet_itemlist import *
#from ipywidgets import interact, Dropdown, IntSlider
%matplotlib notebook
plt.style.use('grayscale')
# run tree on my data to see its data structure
! tree -d ..
data_path = Path('../data24') # /data24 contains interpolated data where each case-plane has 24 slices
caselist = MRNetCaseList.from_folder(path=data_path)
type(caselist)
caselist.items[:5] # items are Case numbers as 4-character strings
caselist.inner_df # at this point, inner_df is an empty attribute, returning None, since caselist was
```
Construct a DataFrame with labels linked to cases. First, do just the "Abnormal" label.
```
train_abnl = pd.read_csv(data_path/'train-abnormal.csv', header=None,
names=['Case', 'Abnormal'],
dtype={'Case': str, 'Abnormal': np.int64})
valid_abnl = pd.read_csv(data_path/'valid-abnormal.csv', header=None,
names=['Case', 'Abnormal'],
dtype={'Case': str, 'Abnormal': np.int64})
abnl = train_abnl.append(valid_abnl, ignore_index=True)
caselist.link_label_df(df=abnl)
caselist.inner_df
```
Now can label from inner_df associated to CaseList.
```
casesplit = caselist.split_by_folder()
ll = casesplit.label_from_df()
len(ll.train), len(ll.valid)
casesplit.valid
case = casesplit.train.get(0)
case.data.shape
```
At this point we have a correctly labeled dataset. It would be possible to do various types of transformations and augmentation on the data, or could convert into a data bunch. Will implement custom transformations/augmentations later.
Can just call `.databunch()` on the labeled list to create a `DataBunch`.
```
data = ll.databunch(bs=8)
data.show_batch(4)
x,y = data.one_batch(DatasetType.Train, True, True)
x.shape, y.shape
smpl = grab_idx(x, 2)
smpl.shape, type(smpl)
```
Calling `.reconstruct` on the PyTorch Tensor returns the same kind of thing as the `.get` method, which in this context is an `MRNetCase`, which we can then display.
```
tst = data.train_ds.x.reconstruct(smpl)
type(tst)
tst
tst.data.shape
fig, ax = plt.subplots(1, 1, figsize=(10,10))
ax.imshow(tst.data[2, 11, :, :])
plt.show()
```
### Minimal training example
- [x] import necessary libraries (fastai, mrnet_itemlist)
- [x] https://docs.fast.ai/data_block.html
- [x] 1 provide inputs
- [x] 2 split data into training and validation sets
- [x] 3 label the inputs
- [ ] 4 what transforms to apply (none for now)
- [ ] 5 how to add test set (none for now)
- [x] 6 how to wrap in dataloaders and create the databunch
| github_jupyter |
```
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
```
# Reflect Tables into SQLAlchemy ORM
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
```
# Exploratory Climate Analysis
```
# Design a query to retrieve the last 12 months of precipitation data and plot the results
# Calculate the date 1 year ago from the last data point in the database
# Perform a query to retrieve the data and precipitation scores
# Save the query results as a Pandas DataFrame and set the index to the date column
# Sort the dataframe by date
# Use Pandas Plotting with Matplotlib to plot the data
```

```
# Use Pandas to calcualte the summary statistics for the precipitation data
```

```
# Design a query to show how many stations are available in this dataset?
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature most active station?
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
```

```
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# function usage example
print(calc_temps('2012-02-28', '2012-03-05'))
# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax
# for your trip using the previous year's data for those same dates.
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
```
## Optional Challenge Assignment
```
# Create a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
daily_normals("01-01")
# calculate the daily normals for your trip
# push each tuple of calculations into a list called `normals`
# Set the start and end date of the trip
# Use the start and end date to create a range of dates
# Stip off the year and save a list of %m-%d strings
# Loop through the list of %m-%d strings and calculate the normals for each date
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
# Plot the daily normals as an area plot with `stacked=False`
```
| github_jupyter |
Sascha Spors,
Professorship Signal Theory and Digital Signal Processing,
Institute of Communications Engineering (INT),
Faculty of Computer Science and Electrical Engineering (IEF),
University of Rostock, Germany
# Tutorial Digital Signal Processing
**Correlation**,
Winter Semester 2021/22 (Course #24505)
- lecture: https://github.com/spatialaudio/digital-signal-processing-lecture
- tutorial: https://github.com/spatialaudio/digital-signal-processing-exercises
Feel free to contact lecturer frank.schultz@uni-rostock.de
WIP...
```
# most common used packages for DSP, have a look into other scipy submodules
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import signal
def my_xcorr2(x, y, scaleopt='none'):
N = len(x)
M = len(y)
kappa = np.arange(0, N+M-1) - (M-1)
ccf = signal.correlate(x, y, mode='full', method='auto')
if N == M:
if scaleopt == 'none' or scaleopt == 'raw':
ccf /= 1
elif scaleopt == 'biased' or scaleopt == 'bias':
ccf /= N
elif scaleopt == 'unbiased' or scaleopt == 'unbias':
ccf /= (N - np.abs(kappa))
elif scaleopt == 'coeff' or scaleopt == 'normalized':
ccf /= np.sqrt(np.sum(x**2) * np.sum(y**2))
else:
print('scaleopt unknown: we leave output unnormalized')
return kappa, ccf
if True: # test my_xcorr with simple example
x = np.array([0, 1, 0, 0, 0])
y = np.array([1, 0, 0])
# plot my_xcorr2(x, y) vs. my_xcorr2(y, x)
plt.figure(figsize=(9, 2))
plt.subplot(1, 2, 1)
kappa_xy, ccf_xy = my_xcorr2(x, y)
plt.stem(kappa_xy, ccf_xy, basefmt='C0:', use_line_collection=True)
plt.xlabel(r'$\kappa$')
plt.ylabel(r'$\varphi_{xy}[\kappa]$')
plt.title('cross correlation between x and y')
plt.grid(True)
plt.subplot(1, 2, 2)
kappa_yx, ccf_yx = my_xcorr2(y, x)
plt.stem(kappa_yx, ccf_yx, basefmt='C0:', use_line_collection=True)
plt.xlabel(r'$\kappa$')
plt.ylabel(r'$\varphi_{yx}[\kappa]$')
plt.title('cross correlation between y and x')
plt.grid(True)
```
## Normalization schemes for cross correlation of finite length signals
check cross correlation
- of a cosine and a sine signal
- of a normal pdf process that exhibits some repetition
```
case_str = 'cos_sin'
case_str = 'normal_pdf'
if case_str == 'cos_sin':
Nt = 200 # number of samples for a full period
x = np.cos(2*np.pi/Nt * 1 * np.arange(0, Nt)) * 2
y = np.sin(2*np.pi/Nt * 1 * np.arange(0, Nt)) * 2
elif case_str == 'normal_pdf':
Nt = 20000
loc, scale = 2, np.sqrt(2) # mu, sigma
x = scale * np.random.randn(Nt) + loc
y = np.roll(x,-7500) # process similarity for offset of 7500 samples
plt.figure(figsize=(8,6))
plt.subplot(2,2,1)
kappa, ccf = my_xcorr2(x, y, scaleopt='none')
plt.plot(kappa, ccf)
plt.ylabel(r'$\varphi_{xy}[\kappa]$')
plt.title('raw CCF(x,y)')
plt.grid(True)
plt.subplot(2,2,2)
kappa, ccf = my_xcorr2(x, y, scaleopt='biased')
plt.plot(kappa, ccf)
plt.title('biased CCF(x,y)')
plt.grid(True)
plt.subplot(2,2,3)
kappa, ccf = my_xcorr2(x, y, scaleopt='unbiased')
plt.plot(kappa, ccf)
plt.xlabel(r'$\kappa$')
plt.ylabel(r'$\varphi_{xy}[\kappa]$')
plt.title('unbiased CCF(x,y)')
plt.grid(True)
plt.subplot(2,2,4)
kappa, ccf = my_xcorr2(x, y, scaleopt='coeff')
plt.plot(kappa, ccf)
plt.xlabel(r'$\kappa$')
plt.title('normalized CCF(x,y)')
plt.grid(True)
# check that the unbiased estimate of the CCF represents the theoretical
# result best in comparison to the other normalization schemes, at least
# for the chosen examples
```
# **Copyright**
The notebooks are provided as [Open Educational Resources](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebooks for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Frank Schultz, Digital Signal Processing - A Tutorial Featuring Computational Examples* with the URL https://github.com/spatialaudio/digital-signal-processing-exercises
| github_jupyter |
Template untuk mengupdate database bangwas di sinkronkan dengan data aset milik pt kai
- load data yang akan di sinkronkan di line 1 (dataset dr bangwas.web.id belum ada perubahan, jenis dan pemilik masih berupa kode)
- load data yg jd bencmarking yg sudah diolah dari data aset milik pt kai (relate to data_aset_kai.ipynb, jika ada perubahan - jalankan kembali aplikasinya dan ekpor ulang data outputnya)
- pastikan format nomor identitas sudah sama(tanpa spasi dan no urut dimulai dr dua digit 01 bukan 1 atau 001)
- program bisa digunakan untuk mengupdate status, daop, depo dan jenis sarana jika ada perubahan
```
import pandas as pd
colnames=['id', 'kode_sarana', 'kode_sarana_lama', 'id_kartu', 'id_jenis', 'id_sub_jenis', 'seri', 'tahun_dinas', 'id_daops', 'id_depo', 'status', 'negara_asal', 'tgl_uji_pertama', 'tgl_masa_uji', 'tgl_masa_perawatan', 'pabrikan', 'id_pemilik', 'created_at', 'last_update' ]
df_sarana = pd.read_csv('../data_source/datsar/tblsarana_nov.csv', sep=";", names=colnames)
df_sarana.head()
#slice data yg hanya kepemilikan PT KAI
#id_pemilik = 2 (untuk KAI)
df_kai = df_sarana.loc[(df_sarana['id_pemilik'] == 2)]
df_kai
#import data yang dijadikan rujukan atau bencmarking hasil dari aplikasi data_aset_kai.ipynb
#pastikan data rujukan sudah sesuai
df_kai_baru = pd.read_csv('../data_source/datsar/data_kai_after(nov).csv', index_col=0)
df_kai_baru
#hapus kolom yang tidak diinginkan
#df_kai_baru.drop(columns='Unnamed: 0', inplace=True)
#df_kai_baru
#hapus spasi pada nomor_identitas
df_kai['kode_sarana'] = df_kai['kode_sarana'].str.strip()
df_kai_baru['NOMOR_IDENTITAS'] = df_kai_baru['NOMOR_IDENTITAS'].str.strip()
#fungsi digunakan untuk mengubah jenis sarana ke kode_sarana sesuai database (sesuaikan apabila ada perubahan)
def kode_sarana(jenis_sarana):
if jenis_sarana == 'LOKOMOTIF' :
jenis_sarana = 1
elif jenis_sarana == 'KRL' :
jenis_sarana = 2
elif jenis_sarana == 'LRT' :
jenis_sarana = 2
elif jenis_sarana == 'KRD' :
jenis_sarana = 3
elif jenis_sarana == 'GERBONG' :
jenis_sarana = 4
elif jenis_sarana == 'PERALATAN KHUSUS' :
jenis_sarana = 9
elif jenis_sarana == 'KERETA' :
jenis_sarana = 11
elif jenis_sarana == 'RAILBUS' :
jenis_sarana = 19
else :
jenis_sarana = 'undefined'
return jenis_sarana
df_kai_baru['JENIS'] = df_kai_baru['JENIS'].apply(lambda x: kode_sarana(x))
df_kai_baru['JENIS'].value_counts()
# fungsi untuk ubah nama daop pada kolom DAOP menjadi kode daop(sesuaikan dengan database)
def kode_daop(daop):
if daop == 'DAOP 1' or daop == 'DAOP 1 JAKARTA' :
daop = 7
elif daop == 'DAOP 2' or daop == 'DAOP 2 BANDUNG' :
daop = 8
elif daop == 'DAOP 3' or daop == 'DAOP 3 CIREBON':
daop = 9
elif daop == 'DAOP 4' or daop == 'DAOP 4 SEMARANG' :
daop = 10
elif daop == 'DAOP 5' or daop == 'DAOP 5 PURWOKERTO' :
daop = 11
elif daop == 'DAOP 6' or daop == 'DAOP 6 YOGYAKARTA':
daop = 12
elif daop == 'DAOP 7' or daop == 'DAOP 7 MADIUN' :
daop = 13
elif daop == 'DAOP 8' or daop == 'DAOP 8 SURABAYA' :
daop = 14
elif daop == 'DAOP 9' or daop == 'DAOP 9 JEMBER':
daop = 15
elif daop == 'DIVRE 1' or daop == 'DIVRE I SUMATERA UTARA' or daop == 'DIVRE I' :
daop = 16
elif daop == 'DIVRE 2' or daop == 'DIVRE II SUMATERA BARAT' or daop == 'DIVRE II' :
daop = 17
elif daop == 'DIVRE 3' or daop == 'DIVRE III SUMATERA SELATAN' or daop == 'DIVRE III':
daop = 18
elif daop == 'DIVRE 4' or daop == 'DIVRE IV LAMPUNG' or daop == 'DIVRE IV' :
daop = 26
elif daop == 'BY LT' :
daop = 18
elif daop == 'BY MRI' :
daop = 26
elif daop == 'PT KAI COMMUTER JABOTABEK' :
daop = 24
else :
daop = 'undefined'
return daop
df_kai_baru['NAMA_DAOP2'] = df_kai_baru['NAMA_DAOP2'].apply(lambda x: kode_daop(x))
df_kai_baru['NAMA_DAOP2'].value_counts(dropna=False)
df_kai_baru[df_kai_baru.NAMA_DAOP2 == "undefined"]
df_kai_baru
# join/merge data awal dengan data yg dijadikan rujukan dengan tetap mempertahankan data awal, dan menghapus data yg tidak terdapat di data awal
#(digunakan bila tidak ada pengadaan sarana baru dan perubahan nomor identitas)
merge_df = pd.merge(left=df_kai, right=df_kai_baru, how='left', left_on='kode_sarana', right_on='NOMOR_IDENTITAS')
merge_df.shape
# join/merge data awal dengan data yg dijadikan rujukan dengan menambahkan data baru jika terdaapt data baru di rujukan
# (digunakan bila terdapat penambahan nomor sarana baru karena pengadaan maupun perubahan
# perhatikan jumlah baris nya kl lebih banyak berarti ada penambahan baru
merge_df2 = pd.merge(left=df_kai, right=df_kai_baru, how='outer', left_on='kode_sarana', right_on='NOMOR_IDENTITAS')
merge_df2.shape
#slicing data dari nomor baru tersebut untuk di verifikasi lebih lanjut
data_baru = merge_df2.loc[(merge_df2['kode_sarana'].isna()) & (merge_df2['NOMOR_IDENTITAS'].notna())]
data_baru.shape
#mengetahui jumlah nomor baru berdasarkan jenis nya
data_baru['JENIS'].value_counts()
data_baru
data_baru.to_csv('../data_source/datsar/kroscek_data.csv', sep=',')
# replace missing value(null value) menjadi string "NA" untuk pendefinisian nilai
merge_df['NOMOR_IDENTITAS'] = merge_df['NOMOR_IDENTITAS'].fillna("NA")
merge_df['NAMA_DAOP2'].fillna("NA", inplace=True)
merge_df['JENIS'].fillna("NA", inplace=True)
#fungsi untuk mengupdate kolom status berdasrkan data rujukan
# AWAS, jika ada nomor yang mengalami perubahan dan belum terupdate maka status akan otomatis berubah ke Tidak Aktiv
def ubah_status(x):
if x == "NA":
return "Tidak Aktiv"
else:
return "Aktiv"
#cek nilai kolom status
merge_df['status'].value_counts()
# mengupdate status sesuai dg data rujukan
merge_df['status']= merge_df['NOMOR_IDENTITAS'].apply(lambda x : ubah_status(x))
merge_df['status'].value_counts()
# bandingkan nilai kolom status dengan sebelum
# (jika berubah berarti ada perubahan kondisi dari SG ke Konservasi atau sebaliknya)
#fungsi untuk merubah jenis sarana bila ada perubahan/modifikasi ex: bagasi ke SN
#dg catatan masih menggunakan no lama belum ada penetapanya tp secara fungsi sudah diganti
def ubah_jenis(x,y):
jenis_sarana = ""
if x == y :
jenis_sarana = x
elif y == 'NA' :
jenis_sarana = x
else :
jenis_sarana = y
return jenis_sarana
merge_df['id_jenis'].value_counts()
# mengupdate perubahan jenis sarana (bilamana diperlukan)
merge_df['id_jenis']= list(map(lambda x, y : ubah_jenis(x,y), merge_df['id_jenis'], merge_df['JENIS']))
merge_df['id_jenis'].value_counts()
jumlah_before = df_kai.groupby(['id_jenis','status'])['kode_sarana'].count().reset_index()
#rekap nilai sebelum dilakukan sinkronisasi
jumlah_before = jumlah_before.pivot_table(index=['id_jenis'], columns='status')
jumlah_before
jumlah_after = merge_df.groupby(['id_jenis','status'])['kode_sarana'].count().reset_index()
#rekap nilai setelah dilakukan sinkronisasi
jumlah_after = jumlah_after.pivot_table(index=['id_jenis'], columns='status')
jumlah_after
#fungsi untuk merubah nilai pada kolom DAOP yg NA diganti dengan isi dari kolom nama_daop
def ubah_daop(x,y):
daop = ""
if x == 'NA' :
daop = y
else :
daop = x
return daop
merge_df['NAMA_DAOP2'].value_counts(dropna=False)
# ubah na pada kolom daop dengan memanggil fungsi yang telah dibuat cek hasilnya
# kolom ini yang nantinya digunakan untuk update perubahan lokasi daop
merge_df['NAMA_DAOP2']= list(map(lambda x, y : ubah_daop(x,y), merge_df['NAMA_DAOP2'], merge_df['id_daops']))
merge_df['NAMA_DAOP2'].value_counts(dropna=False)
```
UPDATE KODE DEPO
```
merge_df['KODE_DEPO'].fillna(0, inplace=True)
merge_df['KODE_DEPO'] = merge_df['KODE_DEPO'].astype('int64')
merge_df
#fungsi untuk merubah nilai pada kolom KODE DEPO yg 0 diganti dengan isi dari kolom id_depo(data awal)
def ubah_depo(x,y):
depo = ""
if x == 0 :
depo = y
else :
depo = x
return depo
merge_df['KODE_DEPO'].value_counts(dropna=False)
merge_df['KODE_DEPO']= list(map(lambda x, y : ubah_depo(x,y), merge_df['KODE_DEPO'], merge_df['id_depo']))
merge_df['KODE_DEPO'].value_counts(dropna=False)
merge_df['id_depo'].value_counts(dropna=False)
merge_df[merge_df.KODE_DEPO == 0]
# buat rekap untuk mengetahui jumlah sarana per jenis dan per daop, cocokkan nilainya dengan data armada milik PT KAI
data_aktiv2 = merge_df.loc[(merge_df['status'] == "Aktiv")]
rekap = data_aktiv2.groupby(['id_jenis', 'NAMA_DAOP2'])['kode_sarana'].count().reset_index()
rekap = rekap.pivot_table(index=['id_jenis'], columns='NAMA_DAOP2')
rekap.fillna(0, inplace=True)
rekap.columns = rekap.columns.droplevel(0)
rekap = rekap.astype('int64')
rekap
merge_df['id_jenis'].astype('int64')
merge_df['id_jenis'] = merge_df['id_jenis'].astype('int64')
merge_df['NAMA_DAOP2'] = merge_df['NAMA_DAOP2'].astype('int64')
merge_df.dtypes
#hapus kolom yang tidak diinginkan
merge_df.drop(columns=['id_daops','id_depo','NOMOR_IDENTITAS', 'DAOP','DEPO', 'JENIS', 'TAHUN','NAMA_DEPO' ], inplace=True)
merge_df.columns
# rename kolom DAOP menjadi id_daops
merge_df.rename(columns={'NAMA_DAOP2' : 'id_daops', 'KODE_DEPO' : 'id_depo'}, inplace=True)
merge_df.columns
# atur urutan kolom seperti dataset semula
final_df = merge_df[['id', 'kode_sarana', 'kode_sarana_lama', 'id_kartu', 'id_jenis', 'id_sub_jenis', 'seri', 'tahun_dinas', 'id_daops', 'id_depo', 'status',
'negara_asal', 'tgl_uji_pertama', 'tgl_masa_uji', 'tgl_masa_perawatan',
'pabrikan', 'id_pemilik', 'created_at', 'last_update']]
final_df
df_sarana.shape
final_df.shape
df_nonkai = df_sarana.loc[(df_sarana['id_pemilik'] != 2)]
df_nonkai.shape
x = 14038 - 12269
print(x)
df_sarana2 = final_df.append(df_nonkai, ignore_index=True)
df_sarana2.shape
df_sarana2.head()
```
#df_sarana2 = df_sarana2[['id', 'kode_sarana', 'kode_sarana_lama', 'id_kartu', 'id_jenis', 'id_sub_jenis', 'seri', 'tahun_dinas', 'id_daops', 'id_depo', 'status',
'negara_asal', 'tgl_uji_pertama', 'tgl_masa_uji', 'tgl_masa_perawatan',
'pabrikan', 'id_pemilik', 'created_at', 'last_update']]
#df_sarana2
```
# buat rekap untuk mengetahui jumlah sarana per jenis dan kepemilikan, cocokkan nilainya
data_aktiv2 = df_sarana2.loc[(df_sarana2['status'] == "Aktiv")]
rekap = data_aktiv2.groupby(['id_jenis', 'id_pemilik'])['kode_sarana'].count().reset_index()
rekap = rekap.pivot_table(index=['id_jenis'], columns='id_pemilik')
rekap.fillna(0, inplace=True)
rekap.columns = rekap.columns.droplevel(0)
rekap = rekap.astype('int64')
rekap
df_sarana2.to_csv('../data_source/datsar/tblsarana_update_nov.csv', sep=',')
#merge_df.to_csv('data_source/datsar/kai(update_depo).csv', sep=',')
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Pdugovich/DS-Unit-1-Sprint-3-Statistical-Tests-and-Experiments/blob/master/module2-sampling-confidence-intervals-and-hypothesis-testing/LS_DS_132_Sampling_Confidence_Intervals_and_Hypothesis_Testing_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Assignment - Build a confidence interval
A confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.
52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.
In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.
But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.
How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."
For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.
Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.
Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)):
### Confidence Intervals:
1. Generate and numerically represent a confidence interval
2. Graphically (with a plot) represent the confidence interval
3. Interpret the confidence interval - what does it tell you about the data and its distribution?
### Chi-squared tests:
4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data
- By hand using Numpy
- In a single line using Scipy
## Confidence Intervals
### 1) Generate and numerically represent a confidence interval
```
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa']
df = pd.read_csv('house-votes-84.data',
header=None,
names=names)
print(df.shape)
df.head()
df = df.replace({'?': np.NaN, 'y':1, 'n': 0})
dem = df[df['party'] == 'democrat']
rep = df[df['party'] == 'republican']
dem['aid-to-contras'].mean()
#Dropping nas to use with function
dem_contras = dem['aid-to-contras'].dropna()
def sample_confidence_interval(data, confidence_level=0.95):
data = np.array(data)
mean = sum(data) / len(data)
std_error = np.std(data, ddof=1) / (len(data))**(1/2)
t_value = stats.t.ppf((1 + confidence_level) / 2.0, len(data) - 1)
margin = t_value * std_error
return (mean, mean - margin, mean + margin)
#Checking to make sure the code works
sample_confidence_interval(dem_contras)
# I tried a few different styles, but I liked Ryan's graphical
#representation best
dem_contras.plot(kind='density', figsize=(10,8))
#zooming in to get a better view, the margin of error is pretty small
plt.xlim(left = -0.1, right=1.1)
plt.grid()
CI = sample_confidence_interval(dem_contras)
plt.axvline(x=CI[1], color='red', lw=1)
plt.axvline(x=CI[2], color='red', lw=1)
plt.axvline(x=CI[0], color='black', lw=3);
dem['south-africa'].dropna().plot(kind='density', figsize=(10,8))
CI = sample_confidence_interval(dem['south-africa'].dropna())
plt.xlim(left=-.2, right=1.2)
plt.grid()
plt.axvline(x=CI[1], color='red')
plt.axvline(x=CI[2], color='red')
plt.axvline(x=CI[0], color='black');
#This graph serves no purpose, and should be ignored. But it looks cool.
for issue in df.columns[range(1,17)]:
dem[issue].dropna().plot(kind='density', figsize=(10,8))
CI = sample_confidence_interval(dem[issue].dropna())
plt.axvline(x=CI[1], color='red')
plt.axvline(x=CI[2], color='red')
plt.axvline(x=CI[0], color='black');
```
##Chi-squared Test
```
# Loading in a dataset from a previous lecture
dataset_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'
column_headers = ['age', 'workclass', 'fnlwgt', 'education', 'education-num',
'marital-status', 'occupation', 'relationship', 'race', 'sex',
'capital-gain', 'capital-loss', 'hours-per-week',
'native-country', 'income']
#Note that having the incorrect number of column headers makes the far left the "index",
df_chi = pd.read_csv(dataset_url, names=column_headers)
print(df.shape)
df_chi.head(5)
df_chi['race'].value_counts()
df_chi['marital-status'].value_counts()
#Putting the two categorical variables into a crosstab
crosstab_table = pd.crosstab(df_chi['sex'], df_chi['race'], margins=True)
crosstab_table
row_sums = crosstab_table.iloc[0:2, 5].values
col_sums = crosstab_table.iloc[2, 0:5].values
total = crosstab_table.loc['All','All']
print(row_sums)
print(col_sums)
print(total)
#Creating an empty list to fill with expected values
expected = []
for num in range(len(row_sums)):
expected_row = []
for col in col_sums:
expected_val = col*row_sums[num]/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
```
## Stretch goals:
1. Write a summary of your findings, mixing prose and math/code/results. *Note* - yes, this is by definition a political topic. It is challenging but important to keep your writing voice *neutral* and stick to the facts of the data. Data science often involves considering controversial issues, so it's important to be sensitive about them (especially if you want to publish).
2. Apply the techniques you learned today to your project data or other data of your choice, and write/discuss your findings here.
3. Refactor your code so it is elegant, readable, and can be easily run for all issues.
## Resources
- [Interactive visualize the Chi-Squared test](https://homepage.divms.uiowa.edu/~mbognar/applets/chisq.html)
- [Calculation of Chi-Squared test statistic](https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test)
- [Visualization of a confidence interval generated by R code](https://commons.wikimedia.org/wiki/File:Confidence-interval.svg)
- [Expected value of a squared standard normal](https://math.stackexchange.com/questions/264061/expected-value-calculation-for-squared-normal-distribution) (it's 1 - which is why the expected value of a Chi-Squared with $n$ degrees of freedom is $n$, as it's the sum of $n$ squared standard normals)
| github_jupyter |
# Let's kill off `Runner`
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
#export
from exp.nb_09 import *
AvgStats
```
## Imagenette data
[Jump_to lesson 11 video](https://course.fast.ai/videos/?lesson=11&t=6571)
```
path = datasets.untar_data(datasets.URLs.IMAGENETTE_160)
tfms = [make_rgb, ResizeFixed(128), to_byte_tensor, to_float_tensor]
bs=64
il = ImageList.from_files(path, tfms=tfms)
sd = SplitData.split_by_func(il, partial(grandparent_splitter, valid_name='val'))
ll = label_by_func(sd, parent_labeler, proc_y=CategoryProcessor())
data = ll.to_databunch(bs, c_in=3, c_out=10, num_workers=4)
cbfs = [partial(AvgStatsCallback,accuracy),
CudaCallback,
partial(BatchTransformXCallback, norm_imagenette)]
nfs = [32]*4
```
Having a Runner is great but not essential when the `Learner` already has everything needed in its state. We implement everything inside it directly instead of building a second object.
##### In Lesson 12 Jeremy Howard revisited material in the cell below [Jump_to lesson 12 video](https://course.fast.ai/videos/?lesson=12&t=65)
```
#export
def param_getter(m): return m.parameters()
class Learner():
def __init__(self, model, data, loss_func, opt_func=sgd_opt, lr=1e-2, splitter=param_getter,
cbs=None, cb_funcs=None):
self.model,self.data,self.loss_func,self.opt_func,self.lr,self.splitter = model,data,loss_func,opt_func,lr,splitter
self.in_train,self.logger,self.opt = False,print,None
# NB: Things marked "NEW" are covered in lesson 12
# NEW: avoid need for set_runner
self.cbs = []
self.add_cb(TrainEvalCallback())
self.add_cbs(cbs)
self.add_cbs(cbf() for cbf in listify(cb_funcs))
def add_cbs(self, cbs):
for cb in listify(cbs): self.add_cb(cb)
def add_cb(self, cb):
cb.set_runner(self)
setattr(self, cb.name, cb)
self.cbs.append(cb)
def remove_cbs(self, cbs):
for cb in listify(cbs): self.cbs.remove(cb)
def one_batch(self, i, xb, yb):
try:
self.iter = i
self.xb,self.yb = xb,yb; self('begin_batch')
self.pred = self.model(self.xb); self('after_pred')
self.loss = self.loss_func(self.pred, self.yb); self('after_loss')
if not self.in_train: return
self.loss.backward(); self('after_backward')
self.opt.step(); self('after_step')
self.opt.zero_grad()
except CancelBatchException: self('after_cancel_batch')
finally: self('after_batch')
def all_batches(self):
self.iters = len(self.dl)
try:
for i,(xb,yb) in enumerate(self.dl): self.one_batch(i, xb, yb)
except CancelEpochException: self('after_cancel_epoch')
def do_begin_fit(self, epochs):
self.epochs,self.loss = epochs,tensor(0.)
self('begin_fit')
def do_begin_epoch(self, epoch):
self.epoch,self.dl = epoch,self.data.train_dl
return self('begin_epoch')
def fit(self, epochs, cbs=None, reset_opt=False):
# NEW: pass callbacks to fit() and have them removed when done
self.add_cbs(cbs)
# NEW: create optimizer on fit(), optionally replacing existing
if reset_opt or not self.opt: self.opt = self.opt_func(self.splitter(self.model), lr=self.lr)
try:
self.do_begin_fit(epochs)
for epoch in range(epochs):
self.do_begin_epoch(epoch)
if not self('begin_epoch'): self.all_batches()
with torch.no_grad():
self.dl = self.data.valid_dl
if not self('begin_validate'): self.all_batches()
self('after_epoch')
except CancelTrainException: self('after_cancel_train')
finally:
self('after_fit')
self.remove_cbs(cbs)
ALL_CBS = {'begin_batch', 'after_pred', 'after_loss', 'after_backward', 'after_step',
'after_cancel_batch', 'after_batch', 'after_cancel_epoch', 'begin_fit',
'begin_epoch', 'begin_validate', 'after_epoch',
'after_cancel_train', 'after_fit'}
def __call__(self, cb_name):
res = False
assert cb_name in self.ALL_CBS
for cb in sorted(self.cbs, key=lambda x: x._order): res = cb(cb_name) and res
return res
#export
class AvgStatsCallback(Callback):
def __init__(self, metrics):
self.train_stats,self.valid_stats = AvgStats(metrics,True),AvgStats(metrics,False)
def begin_epoch(self):
self.train_stats.reset()
self.valid_stats.reset()
def after_loss(self):
stats = self.train_stats if self.in_train else self.valid_stats
with torch.no_grad(): stats.accumulate(self.run)
def after_epoch(self):
#We use the logger function of the `Learner` here, it can be customized to write in a file or in a progress bar
self.logger(self.train_stats)
self.logger(self.valid_stats)
cbfs = [partial(AvgStatsCallback,accuracy),
CudaCallback,
partial(BatchTransformXCallback, norm_imagenette)]
#export
def get_learner(nfs, data, lr, layer, loss_func=F.cross_entropy,
cb_funcs=None, opt_func=sgd_opt, **kwargs):
model = get_cnn_model(data, nfs, layer, **kwargs)
init_cnn(model)
return Learner(model, data, loss_func, lr=lr, cb_funcs=cb_funcs, opt_func=opt_func)
learn = get_learner(nfs, data, 0.4, conv_layer, cb_funcs=cbfs)
%time learn.fit(1)
```
## Check everything works
Let's check our previous callbacks still work.
```
cbfs += [Recorder]
learn = get_learner(nfs, data, 0.4, conv_layer, cb_funcs=cbfs)
phases = combine_scheds([0.3, 0.7], cos_1cycle_anneal(0.2, 0.6, 0.2))
sched = ParamScheduler('lr', phases)
learn.fit(1, sched)
learn.recorder.plot_lr()
learn.recorder.plot_loss()
```
## Export
```
!./notebook2script.py 09b_learner.ipynb
```
| github_jupyter |

<h2 align='center'>Data Literacy through Sports Analytics</h2>
<h3 align='center'>Southern Alberta Teachers' Convention 2021</h3>
<h3 align='center'>Tina Leard (Cybera)<br>
Michael Lamoureux (University of Calgary)</h3><br>
<h4 align='center'> Slides at: https://tinyurl.com/callysto-data </h4>

<center><img src='./images/ccby.png' alt="CC BY logo" width='300' /></center>
<p><center><a href='https://creativecommons.org/licenses/by/4.0/' target='_blank'>CC BY</a>:<br>
This license allows reusers to distribute, remix, adapt, and build upon the material in any medium or format,<br>
so long as attribution is given to the creator.
</center></p>
```
import numpy as np
import pandas as pd
from pandas import read_csv
import plotly.graph_objects as go
import plotly.express as px
from plotly import subplots
from plotly.offline import download_plotlyjs, plot,iplot
import cufflinks as cf
cf.go_offline()
from IPython.display import YouTubeVideo
from ipysheet import sheet, cell, cell_range
%matplotlib inline
```
# Overview
- Data literacy via sports
- The learning progression
- Examples of learning and data analysis
- Professional soccer
- Ice hockey
- Field hockey
- Python, Jupyter, and Callysto
<center><img src='./images/data_literacy.png' alt='data literacy' width='85%' /></center>
#### Content and context
(Alberta Education, 2000, 2007, updated 2016, 2017)
## Example: professional soccer event data
```
df_soccer = pd.read_csv("https://raw.githubusercontent.com/metrica-sports/sample-data/master/data/Sample_Game_1/Sample_Game_1_RawEventsData.csv"); df_soccer
```
**Home team passes, second half**
```
df_soccer.loc[lambda df: (df['Team'] == 'Home') & (df['Period'] == 2) & (df['Type'] == 'PASS'), :] \
.iplot(kind="scatter",x = "Start X", y = "Start Y", mode = "markers")
```
## Bridging expert to novice
## Data visualization learning progression
<img src='./images/creating_scaffolding.png' alt='scaffolding' width='95%' />
## Data visualization learning progression
<img src='./images/creating_adapting.png' alt='adapting' width='95%' />
Communicating mathemtical reasoning (Alberta Education, 2007, updated 2016)
## Data gathering learning progression
<br>
<center><img src='./images/data_gathering.png' alt='data gathering' width='85%' /></center>
<br><br><br>Source: <a href='http://oceansofdata.org/sites/oceansofdata.org/files/pervasive-and-persistent-understandings-01-14.pdf' target='_blank'>Pervasive and Persistent Understandings about Data</a>, Kastens (2014)
## Authentic learning approach
- Learning design based on interdisciplinary<br>
connections and real-world examples
- Industry-aligned data science analysis process
- Python, an all-purpose programming language
- Jupyter notebook, a free industry-standard tool for data scientists
- CallystoHub, free cloud computing
## Athlete development
### U15 training to train
- Promotes tactical strategies for in-game decision making, reading the situation and inferring
- Focuses on the team and the process
- Situates personal goals within a team approach
### U18 training to compete
- Emphasizes individual technical and position-specific training
## Youth sports analytics
Online communication,<br>
sometimes through shared video analysis spaces
Video replay during games and training
Post–game video analysis, limitted statistics
## Learning design and flexibility
<br>
<img src='./images/flexibility.png' alt='adapting' width='90%' />
## Two data examples
1. Import a csv file and use a Python spreadsheet<br>to create shot maps (ice hockey)
2. Gather data from video to analyze and make decisions (field hockey)
## Data example 1:
## Using IIHF World Junior Championship data to create graphs and a shot map
## Defining ice hockey net zones:<br> What factors can lead to scoring?
<!--USA Hockey Goaltender Basics https://www.usahockeygoaltending.com/page/show/890039-stance-->
||
|-|-|
|<img src='./images/hockey_net_zones.png' width='100%'/>|<img src='https://cdn.hockeycanada.ca/hockey-canada/Team-Canada/Men/Under-18/2014-15/2014-15_goalie_camp.jpg?q=60' />|
||<a href='https://www.hockeycanada.ca/en-ca/news/34-goaltenders-invited-to-2014-poe-camp' target='_blank'>Image source: Hockey Canada</a>|
```
%%html
<h2>Data source IIHF: Shot charts</h2><br>
<iframe width="1200" height="600" src="https://www.iihf.com/pdf/503/ihm503a13_77a_3_0" frameborder="0" ></iframe>
```
## Tally chart
<img src='./images/hockey_tally.png' alt='tally chart' width='85%' />
## Generating a csv file
Zone,Austria,Canada,Czech_Republic,Finland,Germany,Russia,Switzerland,Slovakia,Sweden,USA,Total<br>
one,0,7,0,3,2,0,0,0,3,3,18<br>
two,0,1,1,0,1,0,0,0,0,0,3<br>
three,0,5,0,2,2,4,1,0,3,6,23<br>
four,0,4,3,2,1,1,0,1,0,3,15<br>
five,0,1,0,2,1,0,0,0,0,0,4<br>
six,1,1,2,4,0,2,0,1,0,2,13<br>
seven,0,6,0,1,3,3,1,1,0,9,24<br>
eight,0,5,1,2,2,3,1,2,3,2,21<br>
nine,0,3,3,0,2,3,2,0,5,0,18<br>
## Exploring scoring on net zones
```
hockey_goals_df = pd.read_csv('./data/hockey_goals.csv')
hockey_goals_df.head(9)
```
### What do measures of central tendency<br>tell us about the total goals per net zone?
```
hockey_goals_df['Total'].sum()
hockey_goals_df['Total'].min()
hockey_goals_df['Total'].max()
scatter_hockey_goals_df = px.scatter(hockey_goals_df,x="Zone",y="Total",title="Total goals per net zone")
scatter_hockey_goals_df.show()
hockey_goals_df['Total'].mean()
hockey_goals_df['Total'].median()
hockey_goals_df['Total'].mode()
```
### Which net zones score above the median?
```
hockey_goals_df = hockey_goals_df.sort_values('Total', ascending=False)
hockey_goals_df
bar_hockey_goals_df = px.bar(hockey_goals_df,
x="Zone", y="Total")
bar_hockey_goals_df.update_layout(title_text='Total goals by net zone')
```
### What connections exist between<br>goalie position and scoring?
```
hockey_goals_df = pd.read_csv('./data/hockey_goals.csv')
hockey_goals_df.Total
spread_sheet_hockey_net = sheet(rows=3, columns=3)
my_cells_net = cell_range([[18,3,23],[15,4,13],[24,21,18]],row_start=0,col_start=0,numeric_format="int")
figure_hockey_net = go.Figure(data=go.Heatmap(
z =list(reversed(my_cells_net.value)),
type = 'heatmap',
colorscale = 'greys',opacity = 1.0))
axis_template = dict(range = [0,5], autorange = True,
showgrid = False, zeroline = False,
showticklabels = False,
ticks = '' )
figure_hockey_net.update_layout(margin = dict(t=50,r=200,b=200,l=200),
xaxis = axis_template,
yaxis = axis_template,
showlegend = False,
width = 800, height = 500, title="Ice hockey net zones",
autosize = True )
# Add image in the background
nLanes = 3
nZones = 3
figure_hockey_net.add_layout_image(
dict(
source="images/hockey_net.png",
xref="x",
yref="y",
x=-0.5,
y=-.5 + nLanes, #this adjusts the placement of the image
sizex=nZones,
sizey=nLanes,
sizing="fill",
opacity=1.0,
layer="above")
)
# changes in my_cells should trigger this function
def calculate(change):
figure_hockey_net.update_traces(z=list(reversed(my_cells_net.value)))
my_cells_net.observe(calculate, 'value')
spread_sheet_hockey_net
139
figure_hockey_net.update() # Click the keys "Shift-return" to update the figure
```
## Data example 2:
## Analyzing youth field hockey data to make decisions
<center><img src='./images/learning_cycle1.png' alt="Learning design and context" width='90%' /></center>
#### Learning design and context notes
The context is physical education, and the content is statistics. Within physical education, in-game skills, fair play, teamwork, and goal setting are integrated. Those outcomes can be applied to in-game decision making. The goal setting can also be part of the communication resulting from the data analysis. When considering in-game decision making, we can define an action as the result of a decision. Decision making is part of a learning cycle that incorporates a technological feedback loop.
(Field Hokcey Alberta, 2020; Field Hockey Canada, 2020; Alberta Education, 2000)
<center><img src='./images/learning_cycle5.png' alt="Learning cycle" width='90%' /></center>
#### Learning cycle notes
The real situation occurs on the field where a decision is made and an action is executed. Then, the athlete forms a mental representation, processing occurs, and a real model is formed. The real model is integrated into the computational model, which results in a technological feedback, then a connection is made back into game play.
(Butler & Winne, 1995; Cleary & Zimmerman, 2001; Hadwin et al., 2017; Leard & Hadwin, 2001)
<center><img src='./images/computational_thinking.png' alt="Computationl thinking" width='90%' /></center>
<a href="https://app.lucidchart.com/documents/view/8e3186f7-bdfe-46af-9c7f-9c426b80d083">Connecting data literacy and sports</a>
#### Computational modelling and data literacy notes
The definition of computational thinking can vary.
Computational thinking is math reasoning combined with critical thinking plus the power of computers. We can use computers to do work more efficiently for us, like compute thousands of lines of data.
Under that definition of computational thinking, we can apply computational thinking strategies. The foundational process is decomposing to look for patterns. We can use computer programming to design algorithms to look for patterns. With these algorithms, we can infer through abstractions.
The abstractions can be in the form of computational models: data visualizations (including graphs from the curriculum), data analyses, and simulations of probability models. The data visualizations can extend beyond the curriculum to support math reasoning.
(Berikan & Özdemir, 2019; Gadanidis, 2020; Guadalupe & Gómez-Blancarte, 2019; Leard & Hadwin, 2001)
<center><img src='./images/analysis_process.png' alt="Data science analysis process" width='90%' /></center>
#### Data science analysis process notes
This data science analysis process was modified from how expert data scientists analyze data and aligned to several provincial curricula.
There are six steps:
1. Understand the problem. What questions are we trying to answer?
2. Gather the data. Find the data sources, with the extension of big data sets.
3. Organize the data so we can explore it, usually in the form of a table.
4. Explore the data to create computational models. Usually, there is more than one model. Look for evidence to answer our questions.
5. Interpret the data through inferences. Explain how the evidence answers our questions.
6. Communicate the results. In the context of sports analytics, the communication might be within a team to decide tactical strategies for game play.
(Alberta Education, 2007, updated 2016; Ferri, 2006; Leard & Hadwin, 2001; Manitoba Education and Training, 2020; Ontario Ministry of Education, 2020)
<center><img src='./images/collective.png' alt="Collective decision making" width='90%' /></center>
#### Learning cycle notes
How the individual makes decisions within the collective responsibilities and actions of the team can be considered. In-game decision making involves in-game communication with team members, with each athlete referring to their own real model.
While in-game decision making will always produce a real model, athletes also need to decide when it is appropriate to connect the real model to the computational model and integrate that connection back into game play.
(BC Ministry of Education, 2020; Hadwin et al., 2017; Leard & Hadwin, 2001)
<center><img src='./images/models.png' alt="Models" width='90%' /></center>
#### Real model and computational model notes
How the individual makes decisions within the collective responsibilities and actions of the team can be considered. In-game decision making involves in-game communication with team members, with each athlete referring to their own real model.
While in-game decision making will always produce a real model, athletes also need to decide when it is appropriate to connect the real model to the computational model and integrate that connection back into game play.
(Field Hockey Canada, 2020)
<center><img src='./images/data_literacy_sports.png' alt="Connecting data literacy and sports" width='90%' /></center>
<center><img src='./images/field_hockey_game.png' alt="Field hockey" width='90%' /></center>
<center><img src='./images/understand1.png' alt="Understand actions" width='90%' /></center>
(Field Hockey Alberta, 2020; Field Hockey Canada, 2020)
<center><img src='./images/actions.png' alt="Understand viewpoints" width='90%' /></center>
```
print ('Passes received')
YouTubeVideo('mIwiiJO7Rk4?start=2893&end=2915', width='600', height='355')
```
<center><img src='./images/gather4.png' alt="Gather" width='90%' /></center>
<center><img src='./images/collection_passing.png' alt="Passing" width='90%' /></center>
## 3. Organize
```
possession_time_df = read_csv('data/field_hockey_possession_time.csv')
possession_time_df.head(8)
```
## 4. Explore
How does ball possession affect outcomes?
```
bar_possession_time_df = px.bar(possession_time_df,x="Possession Time (seconds)",y="Quarter",title="Possession per quarter<br>Home 2 shots on net (Q3); Away 1 shot on net (Q1)",color="Team")
bar_possession_time_df.update_layout(autosize=False, width=600, height=400)
lanes_home_passes_df = read_csv('data/field_hockey_lanes_home_passes.csv')
lanes_home_passes_df.head()
circle_lanes_home_passes_df = px.pie(lanes_home_passes_df,values="Count",names="Action",title="Passes received, intercepted, and missed for Home team")
circle_lanes_home_passes_df.show()
bar_lanes_home_passes_df = px.bar(lanes_home_passes_df,
x="Quarter", y="Count", color="Action", title="Passes per quarter for Home team")
bar_lanes_home_passes_df.update_layout(barmode='stack', xaxis={'categoryorder':'array', 'categoryarray':['first','second','third','fourth']})
```
## 4. Explore passes received
What stays the same and what changes?
```
lanes_home_passes_received_df = lanes_home_passes_df[lanes_home_passes_df['Action']=='pass received']
lanes_home_passes_received_df.head()
bar_lanes_home_passes_received_df = px.bar(lanes_home_passes_received_df,
x="Quarter", y="Count", color="Lane", text="Lane", title="Passes received in lanes per quarter for Home team")
bar_lanes_home_passes_received_df.update_layout(barmode='stack', xaxis={'categoryorder':'array', 'categoryarray':['first','second','third','fourth']})
df_passes_home = pd.read_csv('data/field_hockey_home_passes.csv'); df_passes_home
df_temp_1 = df_passes_home.loc[lambda df: (df['Phase of Play'] == 'attack') &(df['Quarter'] == 'first') ];
df_temp_2 = df_passes_home.loc[lambda df: (df['Phase of Play'] == 'attack') &(df['Quarter'] == 'second') ];
df_temp_3 = df_passes_home.loc[lambda df: (df['Phase of Play'] == 'attack') &(df['Quarter'] == 'third') ];
df_temp_4 = df_passes_home.loc[lambda df: (df['Phase of Play'] == 'attack') &(df['Quarter'] == 'fourth') ];
#import plotly.tools as tls
fig_all = subplots.make_subplots(rows=1, cols=4)
fig_1 = df_temp_1.iplot(kind='heatmap', colorscale='blues', x='Lane', y='Zone', z='Count' , asFigure=True)
fig_2 = df_temp_2.iplot(kind='heatmap', colorscale='blues', x='Lane', y='Zone', z='Count' , asFigure=True)
fig_3 = df_temp_3.iplot(kind='heatmap', colorscale='blues', x='Lane', y='Zone', z='Count' , asFigure=True)
fig_4 = df_temp_4.iplot(kind='heatmap', colorscale='blues', x='Lane', y='Zone', z='Count' , asFigure=True)
fig_all.append_trace(fig_1['data'][0], 1, 1)
fig_all.append_trace(fig_2['data'][0], 1, 2)
fig_all.append_trace(fig_3['data'][0], 1, 3)
fig_all.append_trace(fig_4['data'][0], 1, 4)
fig_all.update_xaxes(showticklabels = False, linecolor='black')
fig_all.update_yaxes(showticklabels = False, linecolor='black')
iplot(fig_all)
```
#### Passes in left outside lane of the opponent's net
|||||
|---|---|---|---|
|**Q1: 29%** (14/49)|**Q2: 41%** (13/32)|**Q3: 38%** (16/42)|**Q4: 28%** (8/29)|
```
df_passes_home.loc[lambda df: (df['Lane'] == 1) &(df['Phase of Play'] == 'attack') &(df['Quarter']== 'first') ].sum()
14/49
```
## 5. Interpret<br> How can the data exploration inform decision making?
> - Considering the role of passing versus carrying the ball
> - Keeping the ball out of the zone near the net
> - Attacking on the outer lanes, especially toward the left side of the opponent's net
# The technology in this talk
- **Jupyter** notebooks, **Python** programming, **Pandas** for data
- Free to teachers and students
- **Callysto.ca** project (CanCode, Cybera, PIMS)
- This slideshow **IS** a Jupyter notebook! (take a tour)
## Callysto resources
- <a href="https://www.callysto.ca/starter-kit/">Callysto starter kit</a> Getting started
- <a href="https://courses.callysto.ca">courses.callysto.ca</a> Online courses
- <a href="https://www.callysto.ca/weekly-data-visualization/">Weekly data visualizations</a> Quick activities
<center><a href='https://www.callysto.ca/learning-modules/'><img src='./images/learning_modules.png' target='_blank' alt="Callysto learning modules" width='90%' /></a></center>
<center>All free, all open source, aimed at teachers and students</center>
<p><center>Contact us at <a href="mailto:contact@callysto.ca">contact@callysto.ca</a><br>
for in-class workshops, virtual hackathons...<br>
<a href="https://twitter.com/callysto_canada">@callysto_canada</a><br>
<a href="https://callysto.ca">callysto.ca</a><br>
<a href="https://www.youtube.com/channel/UCPdq1SYKA42EZBvUlNQUAng">YouTube</a>
</center></p>
## Thank you for your attention!
<center><img src='./images/callysto_logo.png' alt="Callysto logo" width='80%' /></center>
<center><img src='./images/callysto_partners2.png' alt='Callysto partners' width='80%' /></center>
### References
Alberta Education. (2000). *Physical education* [Program of Studies]. https://education.alberta.ca/media/160191/phys2000.pdf
Alberta Education. (2007, updated 2016). *Mathematics kindergarten to grade 9* [Program of Studies]. https://education.alberta.ca/media/3115252/2016_k_to_9_math_pos.pdf
Alberta Education. (2017). *Career and Ttechnology foundations* [Program of Studies]. https://education.alberta.ca/media/3795641/ctf-program-of-studies-jan-4-2019.pdf
BC Ministry of Education. (2020). *BC's digital literacy framework*. https://www2.gov.bc.ca/assets/gov/education/kindergarten-to-grade-12/teach/teaching-tools/digital-literacy-framework.pdf
Berikan, B., & Özdemir, S. (2019). Investigating “problem-solving with datasets” as an implementation of computational thinking: A literature review. *Journal of Educational Computing Research, 58*(2), 502–534. https://doi.org/10.1177/0735633119845694
Butler, D. L., & Winne, P. H. (1995). Feedback and self-regulated learning: A theoretical synthesis. *Review of Educational Research, 65*(3), 245–281. https://doi.org/10.3102/00346543065003245
Cleary, T. J., & Zimmerman, B. J. (2001). Self-regulation differences during athletic practice by experts, non-experts, and novices. *I Journal of Applied Sport Psychology, 13*(2), 185–206. https://doi.org/10.1080/104132001753149883
Ferri, R. B. (2006). Theoretical and empirical differentiations of phases in the modelling process. *ZDM, 38*(2), 86–95. https://doi.org/10.1007/bf02655883
Field Hockey Alberta (2020). *Tactical Seminars*. http://www.fieldhockey.ab.ca/content/tactical-seminars
Field Hockey Canada (2020). *Ahead of the Game*. http://www.fieldhockey.ca/ahead-of-the-game-field-hockey-canada-webinar-series/
Gadanidis, G. (2020, September 2). *Shifting from computational thinking to computational modelling in math education* [Online plenary talk]. Changing the Culture 2020, Pacific Institute for the Mathematical Sciences.
Guadalupe, T. & Gómez-Blancarte, A. (2019). Assessment of informal and formal inferential reasoning: A critical research review. *Statistics Education Research Journal, 18*, 8-25. https://www.researchgate.net/publication/335057564_ASSESSMENT_OF_INFORMAL_AND_FORMAL_INFERENTIAL_REASONING_A_CRITICAL_RESEARCH_REVIEW
Hadwin, A., Järvelä, S., & Miller, M. (2017). Self-Regulation, Co-Regulation, and Shared Regulation in Collaborative Learning Environments. *Handbook of Self-Regulation of Learning and Performance*, 83–106. https://doi.org/10.4324/9781315697048-6
Kastens, K. (2014). *Pervasive and Persistent Understandings about Data*. Oceans of Data Institute. http://oceansofdata.org/sites/oceansofdata.org/files/pervasive-and-persistent-understandings-01-14.pdf
Leard, T., & Hadwin, A. F. (2001, May). *Analyzing logfile data to produce navigation profiles of studying as self-regulated learning* [Paper presentaion]. Canadian Society for the Study of Education, Quebec City, Quebec, Canada.
Manitoba Education and Training (2020). *Literacy with ICT across the curriculum: A model for 21st century learning from K-12*. https://www.edu.gov.mb.ca/k12/tech/lict/index.html
Ontario Ministry of Education. (2020). *The Ontario curriculum grades 1‐8: Mathematics* [Program of Studies]. https://www.dcp.edu.gov.on.ca/en/curriculum/elementary-mathematics
| github_jupyter |
```
#import libraries
import numpy.ma as MA
import datetime as dt
from datetime import datetime, timedelta
import xarray as xr
import numpy as np
import pandas as pd
import os
from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/
#subroutine to check for bad values
def checkValue(value):
# Check if value should be a float
# or flagged as missing
if value == "999.0" or value == "99.0":
value = MA.masked
else:
value = float(value)
return value
#subroutine to read .txt buoy data files
def readData(fname):
f = open(fname)
# Ignore header
for i in range(0):
f.readline()
col_names = f.readline().split()
print(col_names)
col_units = f.readline().split()
data_block = f.readlines()
f.close()
data={}
for col_name in col_names:
data[col_name] = MA.zeros(len(data_block), 'f', fill_value = 999.999)
# Loop through each value: append to each column
for (line_count, line) in enumerate(data_block):
items = line.split()
for (col_count, col_name) in enumerate(col_names):
value = items[col_count]
data[col_name][line_count] = checkValue(value)
ilen=len(data["#YY"])
data['dtime'] = MA.zeros(len(data_block), 'datetime64[s]')
for i in range(0,ilen):
data["dtime"][i]=dt.datetime(data["#YY"][i],data["MM"][i],data["DD"][i],data["hh"][i],data["mm"][i])
return data
#subroutine to read the high resolution CMAN data files
def readData_highres(fname):
print('reading:',fname)
ds_buoy=Dataset(fname)
#hourly data
buoy_time=ds_buoy.variables['time'][:]
time_index=np.array(buoy_time).astype('datetime64[s]')
#payload 1
tempgrp = ds_buoy.groups['payload_1']
tempgrp2 = tempgrp.groups['ocean_temperature_sensor_1']
data=tempgrp2.variables['sea_surface_temperature'][:]
df_buoy = pd.DataFrame({'sst1' : data,'time' : time_index}).set_index(['time'])
df_buoy['sst1qc']=tempgrp2.variables['sea_surface_temperature_qc'][:]
gattrs={}
gattrs['install_date'] = tempgrp2.install_date
gattrs['height_of_instrument'] = tempgrp2.height_of_instrument
df_buoy['sst1'].attrs=gattrs
tempgrp2 = tempgrp.groups['anemometer_1']
df_buoy['wnd1']=tempgrp2.variables['wind_speed'][:]
df_buoy['wnd1qc']=tempgrp2.variables['wind_speed_qc'][:]
df_buoy['wdir1']=tempgrp2.variables['wind_direction'][:]
df_buoy['wdir1qc']=tempgrp2.variables['wind_direction_qc'][:]
# data=tempgrp2.variables['continuous_wind_speed'][:]
gattrs['install_date'] = tempgrp2.install_date
gattrs['height_of_instrument'] = tempgrp2.height_of_instrument
df_buoy['wnd1'].attrs=gattrs
tempgrp2 = tempgrp.groups['anemometer_2']
df_buoy['wnd2']=tempgrp2.variables['wind_speed'][:]
df_buoy['wnd2qc']=tempgrp2.variables['wind_speed_qc'][:]
df_buoy['wdir2']=tempgrp2.variables['wind_direction'][:]
df_buoy['wdir2qc']=tempgrp2.variables['wind_direction_qc'][:]
gattrs['install_date'] = tempgrp2.install_date
gattrs['height_of_instrument'] = tempgrp2.height_of_instrument
df_buoy['wnd2'].attrs=gattrs
tempgrp2 = tempgrp.groups['air_temperature_sensor_1']
df_buoy['air1']=tempgrp2.variables['air_temperature'][:]
df_buoy['air1qc']=tempgrp2.variables['air_temperature_qc'][:]
gattrs['install_date'] = tempgrp2.install_date
gattrs['height_of_instrument'] = tempgrp2.height_of_instrument
df_buoy['air1'].attrs=gattrs
tempgrp2 = tempgrp.groups['barometer_1']
df_buoy['air_pres1']=tempgrp2.variables['air_pressure'][:]
df_buoy['air_pres1qc']=tempgrp2.variables['air_pressure_qc'][:]
df_buoy['air_pres_sea_level1']=tempgrp2.variables['air_pressure_at_sea_level'][:]
df_buoy['air_pres_sea_level1qc']=tempgrp2.variables['air_pressure_at_sea_level_qc'][:]
gattrs['install_date'] = tempgrp2.install_date
gattrs['height_of_instrument'] = tempgrp2.height_of_instrument
df_buoy['air_pres1'].attrs=gattrs
tempgrp2 = tempgrp.groups['barometer_2']
df_buoy['air_pres2']=tempgrp2.variables['air_pressure'][:]
df_buoy['air_pres2qc']=tempgrp2.variables['air_pressure_qc'][:]
df_buoy['air_pres_sea_level2']=tempgrp2.variables['air_pressure_at_sea_level'][:]
df_buoy['air_pres_sea_level2qc']=tempgrp2.variables['air_pressure_at_sea_level_qc'][:]
gattrs['install_date'] = tempgrp2.install_date
gattrs['height_of_instrument'] = tempgrp2.height_of_instrument
df_buoy['air_pres2'].attrs=gattrs
tempgrp2 = tempgrp.groups['gps_1']
df_buoy['lat']=tempgrp2.variables['latitude'][:]
df_buoy['latqc']=tempgrp2.variables['latitude_qc'][:]
df_buoy['lon']=tempgrp2.variables['longitude'][:]
df_buoy['lonqc']=tempgrp2.variables['longitude_qc'][:]
#payload 2
tempgrp = ds_buoy.groups['payload_2']
tempgrp2 = tempgrp.groups['ocean_temperature_sensor_1']
test=tempgrp2.variables['sea_surface_temperature'][:]
df_buoy['sst2']=tempgrp2.variables['sea_surface_temperature'][:]
df_buoy['sst2qc']=tempgrp2.variables['sea_surface_temperature_qc'][:]
# gattrs['install_date'] = tempgrp2.install_date
# gattrs['height_of_instrument'] = tempgrp2.height_of_instrument
# df_buoy['sst2'].attrs=gattrs
tempgrp2 = tempgrp.groups['anemometer_1']
df_buoy['wnd3']=tempgrp2.variables['wind_speed'][:]
df_buoy['wnd3qc']=tempgrp2.variables['wind_speed_qc'][:]
df_buoy['wdir3']=tempgrp2.variables['wind_direction'][:]
df_buoy['wdir3qc']=tempgrp2.variables['wind_direction_qc'][:]
# gattrs['install_date'] = tempgrp2.install_date
# gattrs['height_of_instrument'] = tempgrp2.height_of_instrument
# df_buoy['wnd3'].attrs=gattrs
tempgrp2 = tempgrp.groups['anemometer_2']
df_buoy['wnd4']=tempgrp2.variables['wind_speed'][:]
df_buoy['wnd4qc']=tempgrp2.variables['wind_speed_qc'][:]
df_buoy['wdir4']=tempgrp2.variables['wind_direction'][:]
df_buoy['wdir4qc']=tempgrp2.variables['wind_direction_qc'][:]
# gattrs['install_date'] = tempgrp2.install_date
# gattrs['height_of_instrument'] = tempgrp2.height_of_instrument
# df_buoy['wnd4'].attrs=gattrs
tempgrp2 = tempgrp.groups['air_temperature_sensor_1']
df_buoy['air2']=tempgrp2.variables['air_temperature'][:]
df_buoy['air2qc']=tempgrp2.variables['air_temperature_qc'][:]
# gattrs['install_date'] = tempgrp2.install_date
# gattrs['height_of_instrument'] = tempgrp2.height_of_instrument
# df_buoy['air2'].attrs=gattrs
ds_buoy.close() # close the new file
xr_buoy=xr.Dataset(df_buoy)
return xr_buoy
#subroutine to read the 10 min CMAN data
def readData_highres10(fname):
ds_buoy=Dataset(fname)
#hourly data
buoy_time=ds_buoy.variables['time'][:]
time_index=np.array(buoy_time).astype('datetime64[s]')
#10min data
buoy_time10=ds_buoy.variables['time10'][:]
tem=np.array(buoy_time10).astype('datetime64[s]')
time10_index=tem
#payload 1
tempgrp = ds_buoy.groups['payload_1']
tempgrp2 = tempgrp.groups['anemometer_1']
data=tempgrp2.variables['continuous_wind_speed'][:]
df_buoy10 = pd.DataFrame({'wnd1' : data,'time' : time10_index}).set_index(['time'])
df_buoy10['wnd1qc']=tempgrp2.variables['continuous_wind_speed_qc'][:]
df_buoy10['wdir1']=tempgrp2.variables['continuous_wind_direction'][:]
df_buoy10['wdir1qc']=tempgrp2.variables['continuous_wind_direction_qc'][:]
tempgrp2 = tempgrp.groups['anemometer_2']
df_buoy10['wnd2']=tempgrp2.variables['continuous_wind_speed'][:]
df_buoy10['wnd2qc']=tempgrp2.variables['continuous_wind_speed_qc'][:]
df_buoy10['wdir2']=tempgrp2.variables['continuous_wind_direction'][:]
df_buoy10['wdir2qc']=tempgrp2.variables['continuous_wind_direction_qc'][:]
#get gps data at lower resolution and map onto highresolution data
tempgrp2 = tempgrp.groups['gps_1']
# df_buoy['lat']=tempgrp2.variables['latitude'][:]
data=tempgrp2.variables['latitude'][:]
df_buoy = pd.DataFrame({'lat' : data,'time' : time_index}).set_index(['time'])
df_buoy['latqc']=tempgrp2.variables['latitude_qc'][:]
df_buoy['lon']=tempgrp2.variables['longitude'][:]
df_buoy['lonqc']=tempgrp2.variables['longitude_qc'][:]
#payload 2
tempgrp = ds_buoy.groups['payload_2']
tempgrp2 = tempgrp.groups['anemometer_1']
df_buoy10['wnd3']=tempgrp2.variables['continuous_wind_speed'][:]
df_buoy10['wnd3qc']=tempgrp2.variables['continuous_wind_speed_qc'][:]
df_buoy10['wdir3']=tempgrp2.variables['continuous_wind_direction'][:]
df_buoy10['wdir3qc']=tempgrp2.variables['continuous_wind_direction_qc'][:]
tempgrp2 = tempgrp.groups['anemometer_2']
df_buoy10['wnd4']=tempgrp2.variables['continuous_wind_speed'][:]
df_buoy10['wnd4qc']=tempgrp2.variables['continuous_wind_speed_qc'][:]
df_buoy10['wdir4']=tempgrp2.variables['continuous_wind_direction'][:]
df_buoy10['wdir4qc']=tempgrp2.variables['continuous_wind_direction_qc'][:]
ds_buoy.close() # close the new file
xr_buoy=xr.Dataset(df_buoy)
xr_buoy10=xr.Dataset(df_buoy10)
time10=xr_buoy10.time
xr_buoy10['lat']=xr_buoy.lat.interp(time=time10)
xr_buoy10['latqc']=xr_buoy.latqc.interp(time=time10)
xr_buoy10['lon']=xr_buoy.lon.interp(time=time10)
xr_buoy10['lonqc']=xr_buoy.lonqc.interp(time=time10)
return xr_buoy10
dir_in = 'https://www.ncei.noaa.gov/thredds-ocean/catalog/ndbc/cmanwx/'
files = []
for root, dirs, files in os.walk(dir_in, topdown=False):
for name in files:
if name.startswith("NDBC_45005") and name.endswith("v00.nc"):
#continue
fname_in=os.path.join(root, name)
#print(fname)
#fname='F:/data/cruise_data/saildrone/baja-2018/buoy_data/NDBC_46011_201804_D5_v00.nc'
#in this data time = hourly time10 = 10 min data
files.append(fname_in)
print(files)
xr_buoy=readData_highres(fname_in)
#print(xr_buoy)
fname_out=fname_in[:-3]+'hrly_xrformat.nc'
xr_buoy.to_netcdf(fname_out)
xr_buoy10=readData_highres10(fname_in)
fname_out=fname_in[:-3]+'10min_xrformat.nc'
xr_buoy10.to_netcdf(fname_out)
```
| github_jupyter |
**Introduction**
In this post, you will discover the Keras Python library that provides a clean and convenient way to create a range of deep learning models on top of Theano or TensorFlow.
All creidts to -- "http://machinelearningmastery.com/tutorial-first-neural-network-python-keras/"
Let’s get started.
**Dependencies**
All important libraries and data set are imported below
**Python**
Please run this script in Python 2
```
import os, sys, re
import cPickle as pickle
from keras.models import Sequential
from keras.layers import Dense
import time
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
print (time.time())
dataset = pd.read_csv("loan.csv")
```
Replace all the missing entries with zeros
```
dataset = dataset.fillna(0) ## filling missing values with zeros
```
**Data Modification**
Convert all kind of categorical data into integral values accordingly and 'Date Column' into real values'
```
dataset['application_type'] = dataset['application_type'].astype('category').cat.codes
dataset['addr_state'] = dataset['addr_state'].astype('category').cat.codes
dataset['earliest_cr_line'] = pd.to_datetime(dataset['earliest_cr_line'])
dataset['earliest_cr_line'] = (dataset['earliest_cr_line']-dataset['earliest_cr_line'].min())/np.timedelta64(1,'D')
dataset['emp_length'] = dataset['emp_length'].astype('category').cat.codes
dataset['grade'] = dataset['grade'].astype('category').cat.codes
dataset['home_ownership'] = dataset['home_ownership'].astype('category').cat.codes
dataset['initial_list_status'] = dataset['initial_list_status'].astype('category').cat.codes
dataset['issue_d'] = pd.to_datetime(dataset['issue_d'])
dataset['issue_d'] = (dataset['issue_d']-dataset['issue_d'].min())/np.timedelta64(1,'D')
dataset['last_credit_pull_d'] = pd.to_datetime(dataset['last_credit_pull_d'])
dataset['last_credit_pull_d'] = (dataset['last_credit_pull_d']-dataset['last_credit_pull_d'].min())/np.timedelta64(1,'D')
dataset['last_pymnt_d'] = pd.to_datetime(dataset['last_pymnt_d'])
dataset['last_pymnt_d'] = (dataset['last_pymnt_d']-dataset['last_pymnt_d'].min())/np.timedelta64(1,'D')
dataset['loan_status'] = dataset['loan_status'].astype('category').cat.codes
dataset['next_pymnt_d'] = pd.to_datetime(dataset['next_pymnt_d'])
dataset['next_pymnt_d'] = (dataset['next_pymnt_d']-dataset['next_pymnt_d'].min())/np.timedelta64(1,'D')
dataset['purpose'] = dataset['purpose'].astype('category').cat.codes
dataset['pymnt_plan'] = dataset['pymnt_plan'].astype('category').cat.codes
dataset['sub_grade'] = dataset['sub_grade'].astype('category').cat.codes
dataset['term'] = dataset['term'].astype('category').cat.codes
dataset['verification_status'] = dataset['verification_status'].astype('category').cat.codes
dataset['verification_status_joint'] = dataset['verification_status_joint'].astype('category').cat.codes
```
Storing non numeric or non real columns name in non_numerics array
```
non_numerics = [x for x in dataset.columns\
if not (dataset[x].dtype == np.float64 or dataset[x].dtype == np.int8 or dataset[x].dtype == np.int64)]
```
Droping non_numerics column for easy modeling
```
df = dataset
df = df.drop(non_numerics,1)
```
Converting 'loan result status' into two categories 0 and 1. 0 means loan failed or that type of person should not be given loan in future and 1 means loan passed i.e. they are good for extending the loan.
```
def LoanResult(status):
if (status == 5) or (status == 1) or (status == 7):
return 1
else:
return 0
df['loan_status'] = df['loan_status'].apply(LoanResult)
```
Splitting data into train data and test data with the help of scikit library in the ratio of 3:1
```
train, test = train_test_split(df, test_size = 0.25)
##running complete data set will take a lot of time, hence reduced the data set
X_train = train.drop('loan_status',1).values[0:50000, :]
Y_train = train['loan_status'].values[0:50000]
X_test = test.drop('loan_status',1).values[0:1000, :]
Y_test = test['loan_status'].values[0:1000]
X_pred = test.drop('loan_status',1).values[1001:2000, :]
```
Setting the seed for pseudo random numbers generation
```
seed = 8
np.random.seed(seed)
```
Now we will define a three layered neural network model. We create a Sequential model and add layers one at a time until we are happy with our network topology. After that we will set activation function and number of nets in each layer. These are done by heuristics and training the model several times.
```
# Create the model
model = Sequential()
# Define the three layered model
model.add(Dense(110, input_dim = 68, kernel_initializer = "uniform", activation = "relu"))
model.add(Dense(110, kernel_initializer = "uniform", activation = "relu"))
model.add(Dense(1, kernel_initializer = "uniform", activation = "sigmoid"))
```
Now we will compile the model. In this we have to input three parameters viz. loss function, optimizer function and an evaluation metrics. These choices are again by heuristics. Here we are using "binary_crossentropy" as loss func, "adam" as optimizer func and "accuracy" as evaluation metrics.
```
#
# Compile the model
model.compile(loss="binary_crossentropy", optimizer= "adam", metrics=['accuracy'])
#
```
Now we have to fit the data into our model.
We can train or fit our model on our loaded data by calling the fit() function on the model.
The training process will run for a fixed number of iterations through the dataset called epochs, that we must specify using the **epochs** argument. We can also set the number of instances that are evaluated before a weight update in the network is performed, called the batch size and set using the **batch_size** argument.
```
# Fit the model
model.fit(X_train, Y_train, epochs= 22000, batch_size=200)
```
**Evaluate Model**
We have trained our neural network on the entire dataset and we can evaluate the performance of the network on the test dataset.
```
performance = model.evaluate(X_test, Y_test)
print("%s: %.2f%%" % (model.metrics_names[1], performance[1]*100))
#
```
**Final Prediction**
Predicting using the trained model
```
# Predict using the trained model
prediction = model.predict(X_pred)
rounded_predictions = [round(x) for x in prediction]
print(rounded_predictions)
```
| github_jupyter |
# Prepare Superresolution Training Data with eo-learn
There are many examples and resources for training superresolution networks on (satellite) imagery:
- [MDL4EO](https://mdl4eo.irstea.fr/2019/03/29/enhancement-of-sentinel-2-images-at-1-5m/)
- [ElementAI HighRes-Net](https://github.com/ElementAI/HighRes-net)
- [Fast.ai superresolution](https://github.com/fastai/course-v3/blob/master/nbs/dl1/lesson7-superres.ipynb)
We'll show you how to use `eo-learn` to prepare data for these tasks (and an example of training the network with `fastai`)
First you'll need to download the [Spacenet Challenge: Paris Data](https://spacenetchallenge.github.io/AOI_Lists/AOI_3_Paris.html). We're using this to get high resolution image chips.
```
from os import path as op
from glob import glob
import datetime
from eolearn.io import ImportFromTiff, SentinelHubInputTask
from eolearn.core import FeatureType, LinearWorkflow, EOTask
from sentinelhub import BBox, CRS, DataSource
from PIL import Image
import numpy as np
from tqdm import tqdm
spacenet_images = glob('AOI_3_Paris_Train/RGB-PanSharpen/*.tif')
# Import the Spacenet chips into EOPatches, as a feature called "spacenet"
input_task = ImportFromTiff((FeatureType.DATA_TIMELESS, 'spacenet'))
# Add Sentinel 2 L2A to our EOPatches covering the same area
time_interval = ('2017-02-28', '2017-04-01') # roughly matching the spacenet dates
add_l2a = SentinelHubInputTask(
data_source=DataSource.SENTINEL2_L2A,
bands=['B04','B03','B02'],
bands_feature=(FeatureType.DATA, 'TRUE-COLOR-S2-L2A'),
additional_data=[(FeatureType.MASK, 'dataMask', 'IS_VALID'), (FeatureType.DATA, 'SCL')],
maxcc=.1,
time_difference=datetime.timedelta(hours=2),
max_threads=3,
resolution=(10,10)
)
# Save the Spacenet and Sentinel images in separate folders. Resize our images when saving
BIG_SIZE = (256, 256)
SMALL_SIZE = (64, 64)
INPUT_FOLDER = 'input'
TARGET_FOLDER = 'target'
class CustomSave(EOTask):
def execute(self, eopatch, image_name=None):
# if we don't have enough data, don't save
spacenet_array = eopatch.data_timeless['spacenet']
data_pct = (np.count_nonzero(spacenet_array) / spacenet_array.size)
if data_pct < 0.9:
return eopatch
# resize images, rescale to 8bit
sentinel_array = eopatch.data[layer][0]
sentinel_array_8bit = (sentinel_array * 255.).astype(np.uint8)
sentinel_img = Image.fromarray(sentinel_array_8bit).resize(SMALL_SIZE, resample=Image.BILINEAR)
sentinel_img.save(op.join(INPUT_FOLDER, f'{image_name}.png'))
spacenet_array_8bit = ((spacenet_array - np.min(spacenet_array, axis=(0, 1))) / (np.max(spacenet_array, axis=(0, 1)) - np.min(spacenet_array, axis=(0, 1))) * 255).astype(np.uint8)
spacenet_image = Image.fromarray(spacenet_array_8bit).resize(BIG_SIZE, resample=Image.BILINEAR)
spacenet_image.save(op.join(TARGET_FOLDER, f'{image_name}.png'))
return eopatch
custom_save = CustomSave()
# Create this as a EOWorkflow to run over all the images
prepare_data = LinearWorkflow(
input_task,
add_l2a,
custom_save
)
# Execute the workflow
pbar = tqdm(total=len(spacenet_images))
for image in spacenet_images:
image_name = op.splitext(op.basename(image))[0].replace('RGB-PanSharpen_AOI_3_Paris_', '')
workflow_input = {
input_task: dict(filename=image),
add_l1c: dict(time_interval=time_interval),
custom_save: dict(image_name=image_name)
}
prepare_data.execute(workflow_input)
pbar.update(1)
```
| github_jupyter |
```
import cv2
import keras
from keras import backend as K
from tensorflow.python.keras.applications import imagenet_utils
from keras.models import Model
from keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np
#from scipy.misc import imread
from imageio import imread
import tensorflow as tf
import os
from ssd_model import SSD512
from ssd_utils import PriorUtil
input_shape = (512, 512, 3)
batch_size = 32
image_size = input_shape[:2]
from data_voc import GTUtility
gt_util = GTUtility('data/VOC2007/')
gt_util_train, gt_util_val = gt_util.split(0.8)
num_classes = gt_util.num_classes
model = SSD512(input_shape, num_classes=num_classes)
prior_util = PriorUtil(model)
model.load_weights('./models/ssd512_voc_weights_fixed.hdf5', by_name=True)
from utils.caffe2keras import compare_output_shape
compare_output_shape(model, './models/ssd512_voc_shape.pkl')
from ssd_data import preprocess_image
po = np.get_printoptions()
np.set_printoptions(formatter={'all': '{:+.6e}'.format})
for layer in model.layers:
layer_name = layer.name
n = 12; p = 5
file_name = os.path.join('ssd512_voc_activations_fish-bike', layer_name+'.npy')
try:
caffe_output = np.load(file_name)
except FileNotFoundError:
print(layer_name)
continue
if len(caffe_output.shape) == 4:
caffe_output = caffe_output.transpose(0,2,3,1)
input_image = np.round(preprocess_image('pics/fish-bike.jpg', (512,512)), 4)
f = K.function(model.inputs, [model.get_layer(layer_name).output])
keras_output = f([[input_image]])[0]
try:
diff = np.round(caffe_output[0]-keras_output[0], 9)
except ValueError:
print(" %s %s" % (caffe_output.shape, keras_output.shape))
print(layer_name)
continue
min_diff = np.min(diff)
max_diff = np.max(diff)
mean_diff = np.mean(diff)
max_abs_diff = np.max(np.abs(diff))
diff_ratio = np.nonzero(np.round(diff,4))[0].shape[0] / np.prod(diff.shape)
print('%-28s %10.6f %10.6f %3i%%' % (layer_name, mean_diff, max_abs_diff, diff_ratio*100))
np.set_printoptions(**po)
img_path = './data/images/fish-bike.jpg'
img = preprocess_image(img_path, size=image_size, lib='skimage')
print(img.transpose((2, 0, 1))[:,:5,:5])
```
```
inputs = []
images = []
for img_path in ['./data/images/fish-bike.jpg',
'./data/images/cat.jpg',
'./data/images/boys.jpg',
'./data/images/car_cat.jpg',
'./data/images/car_cat2.jpg',
]:
images.append(imread(img_path))
inputs.append(preprocess_image(img_path, image_size))
inputs = np.array(inputs)
preds = model.predict(inputs, batch_size=1, verbose=1)
results = [prior_util.decode(p) for p in preds]
plt.figure()
plt.imshow(images[0])
res = prior_util.decode(preds[0])
prior_util.plot_results(classes=gt_util.classes, confidence_threshold=0.5) # 0.1 !!!
plt.show()
print(res)
```
```
# layer activation
po = np.get_printoptions()
#np.set_printoptions(formatter={'all': '{:+.6e}'.format})
np.set_printoptions(formatter={'all': '{:+.8f}'.format})
print('%-28s %14s %10s %10s %4s' % ('', 'max activation', 'mean diff', 'max diff', 'ratio'))
for layer in model.layers:
layer_name = layer.name
file_name = os.path.join('./caffe_activation_dumps/ssd512_voc_activations_fish-bike', layer_name+'.npy')
try:
caffe_output = np.load(file_name)
except FileNotFoundError:
print(layer_name)
continue
if len(caffe_output.shape) == 4:
caffe_output = caffe_output.transpose(0,2,3,1)
input_image = preprocess_image('./data/images/fish-bike.jpg', input_shape[:2])
f = K.function(model.inputs, [model.get_layer(layer_name).output])
keras_output = f([[input_image]])[0]
try:
diff = np.round(caffe_output[0]-keras_output[0], 9)
except ValueError:
print(layer_name)
continue
max_value = np.max(keras_output)
min_diff = np.abs(np.min(diff))
max_diff = np.abs(np.max(diff))
mean_diff = np.abs(np.mean(diff))
diff_ratio = np.nonzero(np.round(diff,5))[0].shape[0] / np.prod(diff.shape)
print('%-28s %14.6f %10.6f %10.6f %3i%%' % (layer_name, max_value, mean_diff, max_diff, diff_ratio*100))
n = 12; p = 5
#print(np.round( diff[:n,:n,p], 8))
np.set_printoptions(po)
fstr = '%-28s %-20s %-20s %-12s %-9s %-9s %-28s %-22s %-8s %-8s %-8s'
print(fstr % ('', 'input_shape', 'output_shape', 'img_size', 'min_size', 'max_size', 'aspect_ratios', 'variances', 'clip', 'flip', 'step'))
for l in model.layers:
if l.__class__.__name__ == 'PriorBox':
aspect_ratios = [ round(e, 2) for e in l.aspect_ratios ]
print(fstr % (l.name, l.input_shape, l.output_shape, l.img_size, l.min_size, l.max_size, aspect_ratios, l.variances, l.clip, l.flip, l.step))
from utils.vis import plot_priorboxes
from ssd_utils import calc_priorboxes
img_path = 'pics/fish-bike.jpg'
image = imread(img_path)
input_image = preprocess_image(img_path)
for layer in model.layers:
if layer.__class__.__name__ == 'PriorBox':
prior_boxes = calc_priorboxes(model, layer.name)
nratios = len(layer.aspect_ratios)
nlocs = len(prior_boxes)/nratios
print('%-28s locations: %-6i aspect_ratios: %s' % (layer.name, nlocs, layer.aspect_ratios))
plot_priorboxes(prior_boxes, image, nratios)
#np.save('ssd512_prior_boxes.npy', calc_priorboxes(model))
#pb = np.load('ssd512_prior_boxes.npy')
# compare the prior boxes
mbox_priorbox = model.get_layer('mbox_priorbox')
f = K.function(model.inputs, [mbox_priorbox.output])
pb_keras = f([[input_image]])[0][0][:,:4]
pb_caffe = np.load('ssd512_prior_boxes_caffe.npy')
print(pb_keras[:10])
print(np.array([np.min(pb_keras, 0), np.max(pb_keras, 0), np.mean(pb_keras, 0)]))
print()
print(pb_caffe[:10])
print(np.array([np.min(pb_caffe, 0), np.max(pb_caffe, 0), np.mean(pb_caffe, 0)]))
print()
pb_diff = pb_keras - pb_caffe
print(pb_diff[:10])
print(np.mean(pb_diff, 0))
print(np.nonzero(np.round(pb_diff, 6))[0])
# compare the positions
pos_keras = np.array([pb_keras[:,0]+pb_keras[:,2], pb_keras[:,1]+pb_keras[:,3]]).T/2
pos_caffe = np.array([pb_caffe[:,0]+pb_caffe[:,2], pb_caffe[:,1]+pb_caffe[:,3]]).T/2
pos_diff = pos_keras - pos_caffe
print(np.array([np.min(pos_keras, 0), np.max(pos_keras, 0), np.mean(pos_keras, 0)]))
print()
print(np.array([np.min(pos_caffe, 0), np.max(pos_caffe, 0), np.mean(pos_caffe, 0)]))
print()
print(pos_diff[:20])
print(np.nonzero(np.round(pos_diff, 6))[0])
# compute positions
steps = [8, 16, 32, 64, 128, 256, 512]
img_width, img_height = 512, 512
layer_sizes = (64, 32, 16, 8, 4, 2, 1)
aspect_ratios = ([1.0, 1.0, 2, 0.5], [1.0, 1.0, 2, 0.5, 3, 0.3333333333333333], [1.0, 1.0, 2, 0.5, 3, 0.3333333333333333], [1.0, 1.0, 2, 0.5, 3, 0.3333333333333333], [1.0, 1.0, 2, 0.5, 3, 0.3333333333333333], [1.0, 1.0, 2, 0.5], [1.0, 1.0, 2, 0.5])
idx = 0
for i in range(len(layer_sizes)):
nboxes = len(aspect_ratios[i]) * layer_sizes[i]**2
print("%i %s %i" % (layer_sizes[i], aspect_ratios[i], nboxes))
print(np.max(pos_keras[idx:idx+nboxes,0]))
print(np.max(pos_caffe[idx:idx+nboxes,0]))
#print(np.min(pos_keras[idx:idx+nboxes,0]))
#print(np.min(pos_caffe[idx:idx+nboxes,0]))
layer_width = layer_sizes[i]
# keras way
step_x = img_width / layer_width
centers_x = np.linspace(step_x / 2., img_width - step_x / 2., layer_width) / img_width
#centers_x = np.array([(0.5 + i) for i in range(layer_width)]) * step_x / img_width
print('%-20s %-20s' % (np.max(centers_x), step_x))
# caffe way
step_x = steps[i]
centers_x = np.array([(0.5 + i) for i in range(layer_width)]) * step_x / img_width
print('%-20s %-20s' % (np.max(centers_x), step_x))
print()
idx += nboxes
#calucalation of min and max_size
min_dim = 512
# conv4_3 ==> 64 x 64
# fc7 ==> 32 x 32
# conv6_2 ==> 16 x 16
# conv7_2 ==> 8 x 8
# conv8_2 ==> 4 x 4
# conv9_2 ==> 2 x 2
# conv10_2 ==> 1 x 1
mbox_source_layers = ['conv4_3', 'fc7', 'conv6_2', 'conv7_2', 'conv8_2', 'conv9_2', 'conv10_2']
# in percent %
min_ratio = 15
max_ratio = 90
step = int(np.floor((max_ratio - min_ratio) / (len(mbox_source_layers) - 2)))
min_sizes = []
max_sizes = []
for ratio in range(min_ratio, max_ratio + 1, step):
min_sizes.append(min_dim * ratio / 100.)
max_sizes.append(min_dim * (ratio + step) / 100.)
min_sizes = [min_dim * 7 / 100.] + min_sizes
max_sizes = [min_dim * 15 / 100.] + max_sizes
print(min_sizes)
print(max_sizes)
np.set_printoptions(precision=4)
import math
# calculation of min_sizes / max_sizes values
# and why float in prototext and cat to int in PriorBox layer, confusing
min_dim = 512
mbox_source_layers = ['conv4_3', 'fc7', 'conv6_2', 'conv7_2', 'conv8_2', 'conv9_2', 'conv10_2']
min_ratio = 15
max_ratio = 90
n = len(mbox_source_layers)
step = int(math.floor((max_ratio - min_ratio) / (len(mbox_source_layers) - 2)))
min_sizes = []
max_sizes = []
for ratio in range(min_ratio, max_ratio + 1, step):
min_sizes.append(min_dim * ratio / 100.)
max_sizes.append(min_dim * (ratio + step) / 100.)
min_sizes = [min_dim * 7 / 100.] + min_sizes
max_sizes = [min_dim * 15 / 100.] + max_sizes
print(min_sizes)
print(max_sizes)
# naiv approach according to the paper
np.linspace(min_ratio, max_ratio, n+1) * min_dim / 100.
# why is it done in the following way?
a = np.linspace(min_ratio, max_ratio, n-1)
a = np.concatenate((np.array([7]), a ,np.array([a[-1]+min_ratio])))
a * min_dim / 100.
# as in pytb
import numpy as np
np.set_printoptions(precision=1)
s_min = 0.15
s_max = 0.90
m = 8
s = []
for k in range(1,m+1):
s_k = s_min + (s_max - s_min) * (k - 1.0) / (m - 1.0) # equation 2
s.append(s_k)
np.array(s) * 512
# compute prior box sizes
steps = [8, 16, 32, 64, 128, 256, 512]
img_width, img_height = 512, 512
layer_sizes = (64, 32, 16, 8, 4, 2, 1)
aspect_ratios = ([1.0, 1.0, 2, 0.5], [1.0, 1.0, 2, 0.5, 3, 0.3333333333333333], [1.0, 1.0, 2, 0.5, 3, 0.3333333333333333], [1.0, 1.0, 2, 0.5, 3, 0.3333333333333333], [1.0, 1.0, 2, 0.5, 3, 0.3333333333333333], [1.0, 1.0, 2, 0.5], [1.0, 1.0, 2, 0.5])
nboxes = []
unique_size_values_keras = []
for i in range(len(layer_sizes)):
layer_width = layer_height = layer_sizes[i]
nboxes.append( layer_width * layer_height * len(aspect_ratios[i]) )
min_size = int(min_sizes[i])
max_size = int(max_sizes[i])
box_widths = []
box_heights = []
for ar in aspect_ratios[i]:
if ar == 1 and len(box_widths) > 0:
box_widths.append(np.sqrt(min_size * max_size))
box_heights.append(np.sqrt(min_size * max_size))
else:
box_widths.append(min_size * np.sqrt(ar))
box_heights.append(min_size / np.sqrt(ar))
#print(aspect_ratios[i])
#print(np.array(box_widths)/img_width)
#print(np.array(box_heights)/img_height)
values = np.unique(np.round(np.array(box_widths)/img_width, 4))
print(values)
unique_size_values_keras.append(values)
#print()
unique_size_values_keras = np.array(unique_size_values_keras)
print()
# caffe prior box sizes
size_caffe = np.array([pb_caffe[:,2]-pb_caffe[:,0], pb_caffe[:,3]-pb_caffe[:,1]]).T
offset = 0
unique_size_values_caffe = []
for n in nboxes:
values = np.unique(np.round(size_caffe[offset:offset+n],4))
print(values)
unique_size_values_caffe.append(values)
offset += n
unique_size_values_caffe = np.array(unique_size_values_caffe)
print()
for i in range(len(nboxes)):
try:
print(unique_size_values_caffe[i] - unique_size_values_keras[i])
except:
pass
```
```
fstr = '%-28s %-20s %-20s %-12s %-9s %-9s %-28s %-22s %-8s %-8s %-8s'
print(fstr % ('', 'input_shape', 'output_shape', 'img_size', 'min_size', 'max_size', 'aspect_ratios', 'variances', 'clip', 'flip', 'step'))
for l in model.layers:
if l.__class__.__name__ == 'PriorBox':
aspect_ratios = [ round(r, 2) for r in l.aspect_ratios ]
print(fstr % (l.name, l.input_shape, l.output_shape, l.img_size, l.min_size, l.max_size, aspect_ratios, l.variances, l.clip, l.flip, l.step))
layer_widths = [64, 32, 16, 8, 4, 2, 1]
layer_widths = []
for l in model.layers:
if l.__class__.__name__ == 'PriorBox':
layer_widths.append(l.input_shape[1])
gamma = 1.5
image_width = image_size[0]
a = image_width / np.array(layer_widths) * gamma
```
| github_jupyter |
**Chapter 6 – Decision Trees**
_This notebook contains all the sample code and solutions to the exercises in chapter 6._
<table align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/ageron/handson-ml/blob/master/06_decision_trees.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
</table>
**Warning**: this is the code for the 1st edition of the book. Please visit https://github.com/ageron/handson-ml2 for the 2nd edition code, with up-to-date notebooks using the latest library versions.
# Setup
First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
```
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "decision_trees"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
```
# Training and visualizing
```
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data[:, 2:] # petal length and width
y = iris.target
tree_clf = DecisionTreeClassifier(max_depth=2, random_state=42)
tree_clf.fit(X, y)
from sklearn.tree import export_graphviz
def image_path(fig_id):
return os.path.join(IMAGES_PATH, fig_id)
export_graphviz(
tree_clf,
out_file=image_path("iris_tree.dot"),
feature_names=iris.feature_names[2:],
class_names=iris.target_names,
rounded=True,
filled=True
)
from matplotlib.colors import ListedColormap
def plot_decision_boundary(clf, X, y, axes=[0, 7.5, 0, 3], iris=True, legend=False, plot_training=True):
x1s = np.linspace(axes[0], axes[1], 100)
x2s = np.linspace(axes[2], axes[3], 100)
x1, x2 = np.meshgrid(x1s, x2s)
X_new = np.c_[x1.ravel(), x2.ravel()]
y_pred = clf.predict(X_new).reshape(x1.shape)
custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])
plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap)
if not iris:
custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50'])
plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)
if plot_training:
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo", label="Iris-Setosa")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs", label="Iris-Versicolor")
plt.plot(X[:, 0][y==2], X[:, 1][y==2], "g^", label="Iris-Virginica")
plt.axis(axes)
if iris:
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
else:
plt.xlabel(r"$x_1$", fontsize=18)
plt.ylabel(r"$x_2$", fontsize=18, rotation=0)
if legend:
plt.legend(loc="lower right", fontsize=14)
plt.figure(figsize=(8, 4))
plot_decision_boundary(tree_clf, X, y)
plt.plot([2.45, 2.45], [0, 3], "k-", linewidth=2)
plt.plot([2.45, 7.5], [1.75, 1.75], "k--", linewidth=2)
plt.plot([4.95, 4.95], [0, 1.75], "k:", linewidth=2)
plt.plot([4.85, 4.85], [1.75, 3], "k:", linewidth=2)
plt.text(1.40, 1.0, "Depth=0", fontsize=15)
plt.text(3.2, 1.80, "Depth=1", fontsize=13)
plt.text(4.05, 0.5, "(Depth=2)", fontsize=11)
save_fig("decision_tree_decision_boundaries_plot")
plt.show()
```
# Predicting classes and class probabilities
```
tree_clf.predict_proba([[5, 1.5]])
tree_clf.predict([[5, 1.5]])
```
# Sensitivity to training set details
```
X[(X[:, 1]==X[:, 1][y==1].max()) & (y==1)] # widest Iris-Versicolor flower
not_widest_versicolor = (X[:, 1]!=1.8) | (y==2)
X_tweaked = X[not_widest_versicolor]
y_tweaked = y[not_widest_versicolor]
tree_clf_tweaked = DecisionTreeClassifier(max_depth=2, random_state=40)
tree_clf_tweaked.fit(X_tweaked, y_tweaked)
plt.figure(figsize=(8, 4))
plot_decision_boundary(tree_clf_tweaked, X_tweaked, y_tweaked, legend=False)
plt.plot([0, 7.5], [0.8, 0.8], "k-", linewidth=2)
plt.plot([0, 7.5], [1.75, 1.75], "k--", linewidth=2)
plt.text(1.0, 0.9, "Depth=0", fontsize=15)
plt.text(1.0, 1.80, "Depth=1", fontsize=13)
save_fig("decision_tree_instability_plot")
plt.show()
from sklearn.datasets import make_moons
Xm, ym = make_moons(n_samples=100, noise=0.25, random_state=53)
deep_tree_clf1 = DecisionTreeClassifier(random_state=42)
deep_tree_clf2 = DecisionTreeClassifier(min_samples_leaf=4, random_state=42)
deep_tree_clf1.fit(Xm, ym)
deep_tree_clf2.fit(Xm, ym)
plt.figure(figsize=(11, 4))
plt.subplot(121)
plot_decision_boundary(deep_tree_clf1, Xm, ym, axes=[-1.5, 2.5, -1, 1.5], iris=False)
plt.title("No restrictions", fontsize=16)
plt.subplot(122)
plot_decision_boundary(deep_tree_clf2, Xm, ym, axes=[-1.5, 2.5, -1, 1.5], iris=False)
plt.title("min_samples_leaf = {}".format(deep_tree_clf2.min_samples_leaf), fontsize=14)
save_fig("min_samples_leaf_plot")
plt.show()
angle = np.pi / 180 * 20
rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
Xr = X.dot(rotation_matrix)
tree_clf_r = DecisionTreeClassifier(random_state=42)
tree_clf_r.fit(Xr, y)
plt.figure(figsize=(8, 3))
plot_decision_boundary(tree_clf_r, Xr, y, axes=[0.5, 7.5, -1.0, 1], iris=False)
plt.show()
np.random.seed(6)
Xs = np.random.rand(100, 2) - 0.5
ys = (Xs[:, 0] > 0).astype(np.float32) * 2
angle = np.pi / 4
rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
Xsr = Xs.dot(rotation_matrix)
tree_clf_s = DecisionTreeClassifier(random_state=42)
tree_clf_s.fit(Xs, ys)
tree_clf_sr = DecisionTreeClassifier(random_state=42)
tree_clf_sr.fit(Xsr, ys)
plt.figure(figsize=(11, 4))
plt.subplot(121)
plot_decision_boundary(tree_clf_s, Xs, ys, axes=[-0.7, 0.7, -0.7, 0.7], iris=False)
plt.subplot(122)
plot_decision_boundary(tree_clf_sr, Xsr, ys, axes=[-0.7, 0.7, -0.7, 0.7], iris=False)
save_fig("sensitivity_to_rotation_plot")
plt.show()
```
# Regression trees
```
# Quadratic training set + noise
np.random.seed(42)
m = 200
X = np.random.rand(m, 1)
y = 4 * (X - 0.5) ** 2
y = y + np.random.randn(m, 1) / 10
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor(max_depth=2, random_state=42)
tree_reg.fit(X, y)
from sklearn.tree import DecisionTreeRegressor
tree_reg1 = DecisionTreeRegressor(random_state=42, max_depth=2)
tree_reg2 = DecisionTreeRegressor(random_state=42, max_depth=3)
tree_reg1.fit(X, y)
tree_reg2.fit(X, y)
def plot_regression_predictions(tree_reg, X, y, axes=[0, 1, -0.2, 1], ylabel="$y$"):
x1 = np.linspace(axes[0], axes[1], 500).reshape(-1, 1)
y_pred = tree_reg.predict(x1)
plt.axis(axes)
plt.xlabel("$x_1$", fontsize=18)
if ylabel:
plt.ylabel(ylabel, fontsize=18, rotation=0)
plt.plot(X, y, "b.")
plt.plot(x1, y_pred, "r.-", linewidth=2, label=r"$\hat{y}$")
plt.figure(figsize=(11, 4))
plt.subplot(121)
plot_regression_predictions(tree_reg1, X, y)
for split, style in ((0.1973, "k-"), (0.0917, "k--"), (0.7718, "k--")):
plt.plot([split, split], [-0.2, 1], style, linewidth=2)
plt.text(0.21, 0.65, "Depth=0", fontsize=15)
plt.text(0.01, 0.2, "Depth=1", fontsize=13)
plt.text(0.65, 0.8, "Depth=1", fontsize=13)
plt.legend(loc="upper center", fontsize=18)
plt.title("max_depth=2", fontsize=14)
plt.subplot(122)
plot_regression_predictions(tree_reg2, X, y, ylabel=None)
for split, style in ((0.1973, "k-"), (0.0917, "k--"), (0.7718, "k--")):
plt.plot([split, split], [-0.2, 1], style, linewidth=2)
for split in (0.0458, 0.1298, 0.2873, 0.9040):
plt.plot([split, split], [-0.2, 1], "k:", linewidth=1)
plt.text(0.3, 0.5, "Depth=2", fontsize=13)
plt.title("max_depth=3", fontsize=14)
save_fig("tree_regression_plot")
plt.show()
export_graphviz(
tree_reg1,
out_file=image_path("regression_tree.dot"),
feature_names=["x1"],
rounded=True,
filled=True
)
tree_reg1 = DecisionTreeRegressor(random_state=42)
tree_reg2 = DecisionTreeRegressor(random_state=42, min_samples_leaf=10)
tree_reg1.fit(X, y)
tree_reg2.fit(X, y)
x1 = np.linspace(0, 1, 500).reshape(-1, 1)
y_pred1 = tree_reg1.predict(x1)
y_pred2 = tree_reg2.predict(x1)
plt.figure(figsize=(11, 4))
plt.subplot(121)
plt.plot(X, y, "b.")
plt.plot(x1, y_pred1, "r.-", linewidth=2, label=r"$\hat{y}$")
plt.axis([0, 1, -0.2, 1.1])
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", fontsize=18, rotation=0)
plt.legend(loc="upper center", fontsize=18)
plt.title("No restrictions", fontsize=14)
plt.subplot(122)
plt.plot(X, y, "b.")
plt.plot(x1, y_pred2, "r.-", linewidth=2, label=r"$\hat{y}$")
plt.axis([0, 1, -0.2, 1.1])
plt.xlabel("$x_1$", fontsize=18)
plt.title("min_samples_leaf={}".format(tree_reg2.min_samples_leaf), fontsize=14)
save_fig("tree_regression_regularization_plot")
plt.show()
```
# Exercise solutions
## 1. to 6.
See appendix A.
## 7.
_Exercise: train and fine-tune a Decision Tree for the moons dataset._
a. Generate a moons dataset using `make_moons(n_samples=10000, noise=0.4)`.
Adding `random_state=42` to make this notebook's output constant:
```
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=10000, noise=0.4, random_state=42)
```
b. Split it into a training set and a test set using `train_test_split()`.
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
```
c. Use grid search with cross-validation (with the help of the `GridSearchCV` class) to find good hyperparameter values for a `DecisionTreeClassifier`. Hint: try various values for `max_leaf_nodes`.
```
from sklearn.model_selection import GridSearchCV
params = {'max_leaf_nodes': list(range(2, 100)), 'min_samples_split': [2, 3, 4]}
grid_search_cv = GridSearchCV(DecisionTreeClassifier(random_state=42), params, n_jobs=-1, verbose=1, cv=3)
grid_search_cv.fit(X_train, y_train)
grid_search_cv.best_estimator_
```
d. Train it on the full training set using these hyperparameters, and measure your model's performance on the test set. You should get roughly 85% to 87% accuracy.
By default, `GridSearchCV` trains the best model found on the whole training set (you can change this by setting `refit=False`), so we don't need to do it again. We can simply evaluate the model's accuracy:
```
from sklearn.metrics import accuracy_score
y_pred = grid_search_cv.predict(X_test)
accuracy_score(y_test, y_pred)
```
## 8.
_Exercise: Grow a forest._
a. Continuing the previous exercise, generate 1,000 subsets of the training set, each containing 100 instances selected randomly. Hint: you can use Scikit-Learn's `ShuffleSplit` class for this.
```
from sklearn.model_selection import ShuffleSplit
n_trees = 1000
n_instances = 100
mini_sets = []
rs = ShuffleSplit(n_splits=n_trees, test_size=len(X_train) - n_instances, random_state=42)
for mini_train_index, mini_test_index in rs.split(X_train):
X_mini_train = X_train[mini_train_index]
y_mini_train = y_train[mini_train_index]
mini_sets.append((X_mini_train, y_mini_train))
```
b. Train one Decision Tree on each subset, using the best hyperparameter values found above. Evaluate these 1,000 Decision Trees on the test set. Since they were trained on smaller sets, these Decision Trees will likely perform worse than the first Decision Tree, achieving only about 80% accuracy.
```
from sklearn.base import clone
forest = [clone(grid_search_cv.best_estimator_) for _ in range(n_trees)]
accuracy_scores = []
for tree, (X_mini_train, y_mini_train) in zip(forest, mini_sets):
tree.fit(X_mini_train, y_mini_train)
y_pred = tree.predict(X_test)
accuracy_scores.append(accuracy_score(y_test, y_pred))
np.mean(accuracy_scores)
```
c. Now comes the magic. For each test set instance, generate the predictions of the 1,000 Decision Trees, and keep only the most frequent prediction (you can use SciPy's `mode()` function for this). This gives you _majority-vote predictions_ over the test set.
```
Y_pred = np.empty([n_trees, len(X_test)], dtype=np.uint8)
for tree_index, tree in enumerate(forest):
Y_pred[tree_index] = tree.predict(X_test)
from scipy.stats import mode
y_pred_majority_votes, n_votes = mode(Y_pred, axis=0)
```
d. Evaluate these predictions on the test set: you should obtain a slightly higher accuracy than your first model (about 0.5 to 1.5% higher). Congratulations, you have trained a Random Forest classifier!
```
accuracy_score(y_test, y_pred_majority_votes.reshape([-1]))
```
| github_jupyter |
# Train DynUNet on Decathlon datasets
This tutorial shows how to train 3D segmentation tasks on all the 10 decathlon datasets with `DynUNet`.
Refer to papers:
`Automated Design of Deep Learning Methods for Biomedical Image Segmentation <https://arxiv.org/abs/1904.08128>`
`nnU-Net: Self-adapting Framework for U-Net-Based Medical Image Segmentation <https://arxiv.org/abs/1809.10486>`
[](https://colab.research.google.com/github/Project-MONAI/tutorials/blob/master/modules/dynunet_tutorial.ipynb)
## Setup environment
```
%pip install -q "monai[itk, ignite, tqdm]"
%pip install -q matplotlib
%matplotlib inline
```
## Setup imports
```
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import ignite
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import numpy as np
from monai.apps import DecathlonDataset
from monai.config import print_config
from monai.data import DataLoader
from monai.engines import SupervisedTrainer
from monai.handlers import MeanDice, StatsHandler
from monai.inferers import SimpleInferer
from monai.losses import DiceLoss
from monai.networks.nets import DynUNet
from monai.transforms import (
AsDiscreted,
Compose,
LoadNiftid,
AddChanneld,
CropForegroundd,
Spacingd,
Orientationd,
SpatialPadd,
NormalizeIntensityd,
RandCropByPosNegLabeld,
RandZoomd,
CastToTyped,
RandGaussianNoised,
RandGaussianSmoothd,
RandScaleIntensityd,
RandFlipd,
ToTensord,
)
print_config()
```
## Select Decathlon task
The Decathlon dataset contains 10 tasks, this dynUNet tutorial can support all of them.
Just need to select task ID and other parameters will be automatically selected.
(Tested task 04 locally, epoch time is 8 secs on V100 GPU and best metrics is 0.8828 at epoch: 70)
```
task_id = "04"
task_name = {
"01": "Task01_BrainTumour",
"02": "Task02_Heart",
"03": "Task03_Liver",
"04": "Task04_Hippocampus",
"05": "Task05_Prostate",
"06": "Task06_Lung",
"07": "Task07_Pancreas",
"08": "Task08_HepaticVessel",
"09": "Task09_Spleen",
"10": "Task10_Colon",
}
patch_size = {
"01": [128, 128, 128],
"02": [160, 192, 80],
"03": [128, 128, 128],
"04": [40, 56, 40],
"05": [320, 256, 20],
"06": [192, 160, 80],
"07": [224, 224, 40],
"08": [192, 192, 64],
"09": [192, 160, 64],
"10": [192, 160, 56],
}
spacing = {
"01": [1.0, 1.0, 1.0],
"02": [1.25, 1.25, 1.37],
"03": [0.77, 0.77, 1],
"04": [1.0, 1.0, 1.0],
"05": [0.62, 0.62, 3.6],
"06": [0.79, 0.79, 1.24],
"07": [0.8, 0.8, 2.5],
"08": [0.8, 0.8, 1.5],
"09": [0.79, 0.79, 1.6],
"10": [0.78, 0.78, 3],
}
```
## Setup data directory
You can specify a directory with the `MONAI_DATA_DIRECTORY` environment variable.
This allows you to save results and reuse downloads.
If not specified a temporary directory will be used.
```
directory = os.environ.get("MONAI_DATA_DIRECTORY")
root_dir = tempfile.mkdtemp() if directory is None else directory
print(root_dir)
```
## Define train and validation transforms
```
train_transform = Compose(
[
LoadNiftid(keys=["image", "label"]),
AddChanneld(keys=["image", "label"]),
CropForegroundd(keys=["image", "label"], source_key="image"),
Spacingd(
keys=["image", "label"],
pixdim=spacing[task_id],
mode=("bilinear", "nearest"),
),
Orientationd(keys=["image", "label"], axcodes="RAS"),
SpatialPadd(keys=["image", "label"], spatial_size=patch_size[task_id]),
NormalizeIntensityd(keys=["image"], nonzero=False, channel_wise=True),
RandCropByPosNegLabeld(
keys=["image", "label"],
label_key="label",
spatial_size=patch_size[task_id],
pos=1,
neg=1,
num_samples=1,
image_key="image",
image_threshold=0,
),
RandZoomd(
keys=["image", "label"],
min_zoom=0.9,
max_zoom=1.2,
mode=("trilinear", "nearest"),
align_corners=(True, None),
prob=0.16,
),
CastToTyped(keys=["image", "label"], dtype=(np.float32, np.uint8)),
RandGaussianNoised(keys=["image"], std=0.01, prob=0.15),
RandGaussianSmoothd(
keys=["image"],
sigma_x=(0.5, 1.15),
sigma_y=(0.5, 1.15),
sigma_z=(0.5, 1.15),
prob=0.15,
),
RandScaleIntensityd(keys=["image"], factors=0.3, prob=0.15),
RandFlipd(["image", "label"], spatial_axis=[0, 1, 2], prob=0.5),
ToTensord(keys=["image", "label"]),
]
)
val_transform = Compose(
[
LoadNiftid(keys=["image", "label"]),
AddChanneld(keys=["image", "label"]),
CropForegroundd(keys=["image", "label"], source_key="image"),
Spacingd(
keys=["image", "label"],
pixdim=spacing[task_id],
mode=("bilinear", "nearest"),
),
Orientationd(keys=["image", "label"], axcodes="RAS"),
SpatialPadd(keys=["image", "label"], spatial_size=patch_size[task_id]),
NormalizeIntensityd(keys=["image"], nonzero=False, channel_wise=True),
CastToTyped(keys=["image", "label"], dtype=(np.float32, np.uint8)),
ToTensord(keys=["image", "label"]),
]
)
```
## Load data by MONAI DecathlonDataset
```
train_ds = DecathlonDataset(
root_dir=root_dir,
task=task_name[task_id],
transform=train_transform,
section="training",
download=False,
num_workers=4,
)
train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=1)
val_ds = DecathlonDataset(
root_dir=root_dir,
task=task_name[task_id],
transform=val_transform,
section="validation",
download=False,
num_workers=4,
)
val_loader = DataLoader(val_ds, batch_size=1, shuffle=False, num_workers=1)
```
## Visualize batch of data to check images and labels
```
for i in range(2):
image, label = val_ds[i]["image"], val_ds[i]["label"]
plt.figure("check", (12, 8))
plt.subplot(1, 2, 1)
plt.title("image")
plt.imshow(image[0, :, :, 10].detach().cpu(), cmap="gray")
plt.subplot(1, 2, 2)
plt.title("label")
plt.imshow(label[0, :, :, 10].detach().cpu())
plt.show()
```
## Customize loss function
Here we combine Dice loss and Cross Entropy loss.
```
class CrossEntropyLoss(nn.Module):
def __init__(self):
super().__init__()
self.loss = nn.CrossEntropyLoss()
def forward(self, y_pred, y_true):
# CrossEntropyLoss target needs to have shape (B, D, H, W)
# Target from pipeline has shape (B, 1, D, H, W)
y_true = torch.squeeze(y_true, dim=1).long()
return self.loss(y_pred, y_true)
class DiceCELoss(nn.Module):
def __init__(self):
super().__init__()
self.dice = DiceLoss(to_onehot_y=True, softmax=True)
self.cross_entropy = CrossEntropyLoss()
def forward(self, y_pred, y_true):
dice = self.dice(y_pred, y_true)
cross_entropy = self.cross_entropy(y_pred, y_true)
return dice + cross_entropy
```
## Initialize training components
```
device = torch.device("cuda:0")
loss = DiceCELoss()
learning_rate = 0.01
max_epochs = 200
sizes, spacings = patch_size[task_id], spacing[task_id]
properties = val_ds.get_properties(keys=["labels", "modality"])
n_class, in_channels = len(properties["labels"]), len(properties["modality"])
best_dice, best_epoch = (n_class - 1) * [0], (n_class - 1) * [0]
strides, kernels = [], []
while True:
spacing_ratio = [sp / min(spacings) for sp in spacings]
stride = [2 if ratio <= 2 and size >= 8 else 1 for (ratio, size) in zip(spacing_ratio, sizes)]
kernel = [3 if ratio <= 2 else 1 for ratio in spacing_ratio]
if all(s == 1 for s in stride):
break
sizes = [i / j for i, j in zip(sizes, stride)]
spacings = [i * j for i, j in zip(spacings, stride)]
kernels.append(kernel)
strides.append(stride)
strides.insert(0, len(spacings) * [1])
kernels.append(len(spacings) * [3])
net = DynUNet(
spatial_dims=3,
in_channels=in_channels,
out_channels=n_class,
kernel_size=kernels,
strides=strides,
upsample_kernel_size=strides[1:],
norm_name="instance",
deep_supervision=True,
deep_supr_num=2,
res_block=False,
).to(device)
optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum=0.95)
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=lambda epoch: (1 - epoch / max_epochs) ** 0.9
)
```
## MONAI evaluator
Here we customized the forward computation, so need to define `_iteration` function.
```
from monai.engines import SupervisedEvaluator
from monai.handlers import StatsHandler, CheckpointSaver, MeanDice
from monai.inferers import SlidingWindowInferer
val_handlers = [
StatsHandler(output_transform=lambda x: None),
CheckpointSaver(save_dir="./runs/", save_dict={"net": net}, save_key_metric=True),
]
val_post_transform = Compose(
[AsDiscreted(keys=("pred", "label"), argmax=(True, False), to_onehot=True, n_classes=n_class)]
)
# Define customized evaluator
class DynUNetEvaluator(SupervisedEvaluator):
def _iteration(self, engine, batchdata):
inputs, targets = self.prepare_batch(batchdata)
inputs, targets = inputs.to(engine.state.device), targets.to(engine.state.device)
flip_inputs = torch.flip(inputs, dims=(2, 3, 4))
def _compute_pred():
pred = self.inferer(inputs, self.network)
flip_pred = torch.flip(self.inferer(flip_inputs, self.network), dims=(2, 3, 4))
return (pred + flip_pred) / 2
# execute forward computation
self.network.eval()
with torch.no_grad():
if self.amp:
with torch.cuda.amp.autocast():
predictions = _compute_pred()
else:
predictions = _compute_pred()
return {"image": inputs, "label": targets, "pred": predictions}
evaluator = DynUNetEvaluator(
device=device,
val_data_loader=val_loader,
network=net,
inferer=SlidingWindowInferer(roi_size=patch_size[task_id], sw_batch_size=4, overlap=0.5),
post_transform=val_post_transform,
key_val_metric={
"val_mean_dice": MeanDice(
include_background=False,
output_transform=lambda x: (x["pred"], x["label"]),
)
},
val_handlers=val_handlers,
amp=True,
)
```
## MONAI trainer
Here we customized loss computation progress, so need to define `_iteration` function.
```
from torch.nn.functional import interpolate
from monai.engines import SupervisedTrainer
from monai.inferers import SimpleInferer
from monai.handlers import LrScheduleHandler, ValidationHandler, StatsHandler
train_handlers = [
LrScheduleHandler(lr_scheduler=scheduler, print_lr=True),
ValidationHandler(validator=evaluator, interval=2, epoch_level=True),
StatsHandler(tag_name="train_loss", output_transform=lambda x: x["loss"]),
]
# define customized trainer
class DynUNetTrainer(SupervisedTrainer):
def _iteration(self, engine, batchdata):
inputs, targets = self.prepare_batch(batchdata)
inputs, targets = inputs.to(engine.state.device), targets.to(engine.state.device)
def _compute_loss(preds, label):
labels = [label] + [interpolate(label, pred.shape[2:]) for pred in preds[1:]]
return sum([0.5 ** i * self.loss_function(p, l) for i, (p, l) in enumerate(zip(preds, labels))])
self.network.train()
self.optimizer.zero_grad()
if self.amp and self.scaler is not None:
with torch.cuda.amp.autocast():
predictions = self.inferer(inputs, self.network)
loss = _compute_loss(predictions, targets)
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
else:
predictions = self.inferer(inputs, self.network)
loss = _compute_loss(predictions, targets).mean()
loss.backward()
self.optimizer.step()
return {"image": inputs, "label": targets, "pred": predictions, "loss": loss.item()}
trainer = DynUNetTrainer(
device=device,
max_epochs=max_epochs,
train_data_loader=train_loader,
network=net,
optimizer=optimizer,
loss_function=loss,
inferer=SimpleInferer(),
post_transform=None,
key_train_metric=None,
train_handlers=train_handlers,
amp=True,
)
```
## Execute training with workflows
```
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
trainer.run()
```
## Cleanup data directory
Remove directory if a temporary was used.
```
if directory is None:
shutil.rmtree(root_dir)
```
| github_jupyter |
## Recommendations with MovieTweetings: Collaborative Filtering
One of the most popular methods for making recommendations is **collaborative filtering**. In collaborative filtering, you are using the collaboration of user-item recommendations to assist in making new recommendations.
There are two main methods of performing collaborative filtering:
1. **Neighborhood-Based Collaborative Filtering**, which is based on the idea that we can either correlate items that are similar to provide recommendations or we can correlate users to one another to provide recommendations.
2. **Model Based Collaborative Filtering**, which is based on the idea that we can use machine learning and other mathematical models to understand the relationships that exist amongst items and users to predict ratings and provide ratings.
In this notebook, you will be working on performing **neighborhood-based collaborative filtering**. There are two main methods for performing collaborative filtering:
1. **User-based collaborative filtering:** In this type of recommendation, users related to the user you would like to make recommendations for are used to create a recommendation.
2. **Item-based collaborative filtering:** In this type of recommendation, first you need to find the items that are most related to each other item (based on similar ratings). Then you can use the ratings of an individual on those similar items to understand if a user will like the new item.
In this notebook you will be implementing **user-based collaborative filtering**. However, it is easy to extend this approach to make recommendations using **item-based collaborative filtering**. First, let's read in our data and necessary libraries.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tests as t
import progressbar
from scipy.sparse import csr_matrix
from IPython.display import HTML
%matplotlib inline
# Read in the datasets
movies = pd.read_csv('movies_clean.csv')
reviews = pd.read_csv('reviews_clean.csv')
del movies['Unnamed: 0']
del reviews['Unnamed: 0']
print(reviews.head())
```
### Measures of Similarity
When using **neighborhood** based collaborative filtering, it is important to understand how to measure the similarity of users or items to one another.
There are a number of ways in which we might measure the similarity between two vectors (which might be two users or two items). In this notebook, we will look specifically at two measures used to compare vectors:
* **Pearson's correlation coefficient**
Pearson's correlation coefficient is a measure of the strength and direction of a linear relationship. The value for this coefficient is a value between -1 and 1 where -1 indicates a strong, negative linear relationship and 1 indicates a strong, positive linear relationship.
If we have two vectors **x** and **y**, we can define the correlation between the vectors as:
$$CORR(x, y) = \frac{\text{COV}(x, y)}{\text{STDEV}(x)\text{ }\text{STDEV}(y)}$$
where
$$\text{STDEV}(x) = \sqrt{\frac{1}{n-1}\sum_{i=1}^{n}(x_i - \bar{x})^2}$$
and
$$\text{COV}(x, y) = \frac{1}{n-1}\sum_{i=1}^{n}(x_i - \bar{x})(y_i - \bar{y})$$
where n is the length of the vector, which must be the same for both x and y and $\bar{x}$ is the mean of the observations in the vector.
We can use the correlation coefficient to indicate how alike two vectors are to one another, where the closer to 1 the coefficient, the more alike the vectors are to one another. There are some potential downsides to using this metric as a measure of similarity. You will see some of these throughout this workbook.
* **Euclidean distance**
Euclidean distance is a measure of the straightline distance from one vector to another. Because this is a measure of distance, larger values are an indication that two vectors are different from one another (which is different than Pearson's correlation coefficient).
Specifically, the euclidean distance between two vectors **x** and **y** is measured as:
$$ \text{EUCL}(x, y) = \sqrt{\sum_{i=1}^{n}(x_i - y_i)^2}$$
Different from the correlation coefficient, no scaling is performed in the denominator. Therefore, you need to make sure all of your data are on the same scale when using this metric.
**Note:** Because measuring similarity is often based on looking at the distance between vectors, it is important in these cases to scale your data or to have all data be in the same scale. If some measures are on a 5 point scale, while others are on a 100 point scale, you are likely to have non-optimal results due to the difference in variability of your features. Measures like Pearson and Spearman's correlation coefficients are unit agnostic, which means it is not necessary to scale for these measures. However, many measures used to measure similarity (like euclidean or manhatten distances) are not unit agnostic.
In this case, we will not need to scale data because they are all on a 10 point scale, but it is always something to keep in mind!
------------
### User-Item Matrix
In order to calculate the similarities, it is common to put values in a matrix. In this matrix, users are identified by each row, and items are represented by columns.

In the above matrix, you can see that **User 1** and **User 2** both used **Item 1**, and **User 2**, **User 3**, and **User 4** all used **Item 2**. However, there are also a large number of missing values in the matrix for users who haven't used a particular item. A matrix with many missing values (like the one above) is considered **sparse**.
Our first goal for this notebook is to create the above matrix with the **reviews** dataset. However, instead of 1 values in each cell, you should have the actual rating.
The users will indicate the rows, and the movies will exist across the columns. To create the user-item matrix, we only need the first three columns of the **reviews** dataframe, which you can see by running the cell below.
```
user_items = reviews[['user_id', 'movie_id', 'rating']]
user_items.head()
```
### Ceating the User-Item Matrix
In order to create the user-items matrix (like the one above), I personally started by using a [pivot table](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.pivot_table.html).
However, I quickly ran into a memory error (a common theme throughout this notebook). I will help you navigate around many of the errors I had, and acheive useful collaborative filtering results!
_____
`1.` Create a matrix where the users are the rows, the movies are the columns, and the ratings exist in each cell, or a NaN exists in cells where a user hasn't rated a particular movie. If you get a memory error (like I did), [this link here](https://stackoverflow.com/questions/39648991/pandas-dataframe-pivot-memory-error) might help you!
```
# Create user-by-item matrix
```
Check your results below to make sure your matrix is ready for the upcoming sections.
```
assert movies.shape[0] == user_by_movie.shape[1], "Oh no! Your matrix should have {} columns, and yours has {}!".format(movies.shape[0], user_by_movie.shape[1])
assert reviews.user_id.nunique() == user_by_movie.shape[0], "Oh no! Your matrix should have {} rows, and yours has {}!".format(reviews.user_id.nunique(), user_by_movie.shape[0])
print("Looks like you are all set! Proceed!")
HTML('<img src="images/greatjob.webp">')
```
`2.` Now that you have a matrix of users by movies, use this matrix to create a dictionary where the key is each user and the value is an array of the movies each user has rated.
```
# Create a dictionary with users and corresponding movies seen
def movies_watched(user_id):
'''
INPUT:
user_id - the user_id of an individual as int
OUTPUT:
movies - an array of movies the user has watched
'''
return movies
def create_user_movie_dict():
'''
INPUT: None
OUTPUT: movies_seen - a dictionary where each key is a user_id and the value is an array of movie_ids
Creates the movies_seen dictionary
'''
# Do things - hint this may take some time, so you might want to set up a progress bar to watch things progress
return movies_seen
# Use your function to return dictionary
movies_seen = create_user_movie_dict()
```
`3.` If a user hasn't rated more than 2 movies, we consider these users "too new". Create a new dictionary that only contains users who have rated more than 2 movies. This dictionary will be used for all the final steps of this workbook.
```
# Remove individuals who have watched 2 or fewer movies - don't have enough data to make recs
def create_movies_to_analyze(movies_seen, lower_bound=2):
'''
INPUT:
movies_seen - a dictionary where each key is a user_id and the value is an array of movie_ids
lower_bound - (an int) a user must have more movies seen than the lower bound to be added to the movies_to_analyze dictionary
OUTPUT:
movies_to_analyze - a dictionary where each key is a user_id and the value is an array of movie_ids
The movies_seen and movies_to_analyze dictionaries should be the same except that the output dictionary has removed
'''
# Do things to create updated dictionary
return movies_to_analyze
# Use your function to return your updated dictionary
movies_to_analyze = create_movies_to_analyze(movies_seen)
# Run the tests below to check that your movies_to_analyze matches the solution
assert len(movies_to_analyze) == 23512, "Oops! It doesn't look like your dictionary has the right number of individuals."
assert len(movies_to_analyze[2]) == 23, "Oops! User 2 didn't match the number of movies we thought they would have."
assert len(movies_to_analyze[7]) == 3, "Oops! User 7 didn't match the number of movies we thought they would have."
print("If this is all you see, you are good to go!")
```
### Calculating User Similarities
Now that you have set up the **movies_to_analyze** dictionary, it is time to take a closer look at the similarities between users. Below the sudo code for how I thought about determining the similarity between users:
```
for user1 in movies_to_analyze
for user2 in movies_to_analyze
see how many movies match between the two users
if more than two movies in common
pull the overlapping movies
compute the distance/similarity metric between ratings on the same movies for the two users
store the users and the distance metric
```
However, this took a very long time to run, and other methods of performing these operations did not fit on the workspace memory!
Therefore, your task for this question is to look at a few specific examples of the correlation between ratings given by two users. For this question consider you want to compute the [correlation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.corr.html) between users.
`4.` Using the **movies_to_analyze** dictionary and **user_by_movie** dataframe, create a function that computes the correlation between the ratings of similar movies for two users. Then use your function to compare your results to ours using the tests below.
```
def compute_correlation(user1, user2):
'''
INPUT
user1 - int user_id
user2 - int user_id
OUTPUT
the correlation between the matching ratings between the two users
'''
return corr #return the correlation
# Read in solution correlations - this will take some time to read in
import pickle
corrs_import = pickle.load(open("corrs.p", "rb"))
df_corrs = pd.DataFrame(corrs_import)
df_corrs.columns = ['user1', 'user2', 'movie_corr']
# Test your function against the solution
assert compute_correlation(2,2) == df_corrs.query("user1 == 2 and user2 == 2")['movie_corr'][0], "Oops! The correlation between a user and itself should be 1.0."
assert round(compute_correlation(2,66), 2) == round(df_corrs.query("user1 == 2 and user2 == 66")['movie_corr'][1], 2), "Oops! The correlation between user 2 and 66 should be about 0.76."
assert np.isnan(compute_correlation(2,104)) == np.isnan(df_corrs.query("user1 == 2 and user2 == 104")['movie_corr'][4]), "Oops! The correlation between user 2 and 104 should be a NaN."
print("If this is all you see, then it looks like your function passed all of our tests!")
```
### Why the NaN's?
If the function you wrote passed all of the tests, then you have correctly set up your function to calculate the correlation between any two users. The **df_corrs** dataframe created in the cell leading up to the tests holds combinations of users along with their corresponding correlation.
`5.` But one question is why are we still obtaining **NaN** values. Look at the header below for users 2 and 104, they have a correlation of **NaN**, why?
```
df_corrs.head()
```
Leave your thoughts here about why the NaN exists, and use the cells below to validate your thoughts. These Nan's ultimately make the correlation coefficient a less than optimal measure of similarity between two users.
```
# Which movies did both user 2 and user 4 see?
# What were the ratings for each user on those movies?
```
`6.` Because the correlation coefficient proved to be less than optimal for relating user ratings to one another, we could instead calculate the euclidean distance between the ratings. I found [this post](https://stackoverflow.com/questions/1401712/how-can-the-euclidean-distance-be-calculated-with-numpy) particularly helpful when I was setting up my function. This function should be very similar to your previous function. When you feel confident with your function, test it against our results.
```
def compute_euclidean_dist(user1, user2):
'''
INPUT
user1 - int user_id
user2 - int user_id
OUTPUT
the euclidean distance between user1 and user2
'''
return dist #return the euclidean distance
# Read in solution euclidean distances - this will take some time to read in
df_dists = pickle.load(open("dists.p", "rb"))
# Test your function against the solution
assert compute_euclidean_dist(2,2) == df_dists.query("user1 == 2 and user2 == 2")['eucl_dist'][0], "Oops! The distance between a user and itself should be 0.0."
assert round(compute_euclidean_dist(2,66), 2) == round(df_dists.query("user1 == 2 and user2 == 66")['eucl_dist'][1], 2), "Oops! The distance between user 2 and 66 should be about 2.24."
assert np.isnan(compute_euclidean_dist(2,104)) == np.isnan(df_dists.query("user1 == 2 and user2 == 104")['eucl_dist'][4]), "Oops! The distance between user 2 and 104 should be 2."
print("If this is all you see, then it looks like your function passed all of our tests!")
```
### Using the Nearest Neighbors to Make Recommendations
In the previous questions, you read in **df_corrs** and **df_dists**. Therefore, you have a measure of distance and similarity for each user to every other user. These dataframes hold every possible combination of users, as well as the corresponding correlation or euclidean distance, respectively.
Because of the **NaN** values that exist within **df_corrs**, we will proceed using **df_dists**. You will want to find the users are 'nearest' each user. Then you will want to find the movies the closest neighbors have liked to recommend to each user.
I made use of the following objects:
* df_dists (to obtain the neighbors)
* user_items (to obtain the movies the neighbors and users have rated)
* movies (to obtain the names of the movies)
`7.` Complete the functions below, which allow you to find the recommendations for any user. There are three functions which you will need:
* **find_closest_neighbors** - this returns a list of user_ids from closest neighbor to farthest neighbor using euclidean distance
* **movies_liked** - returns an array of movie_ids
* **movie_names** - takes the output of movies_liked and returns a list of movie names associated with the movie_ids
* **make_recommendations** - takes a user id and goes through closest neighbors to return a list of movie names as recommendations
* **all_recommendations** = loops through every user and returns a dictionary of with the key as a user_id and the value as a list of movie recommendations
```
def find_closest_neighbors(user):
'''
INPUT:
user - (int) the user_id of the individual you want to find the closest users
OUTPUT:
closest_neighbors - an array of the id's of the users sorted from closest to farthest away
'''
# I treated ties as arbitrary and just kept whichever was easiest to keep using the head method
# You might choose to do something less hand wavy - order the neighbors
return closest_neighbors
def movies_liked(user_id, min_rating=7):
'''
INPUT:
user_id - the user_id of an individual as int
min_rating - the minimum rating considered while still a movie is still a "like" and not a "dislike"
OUTPUT:
movies_liked - an array of movies the user has watched and liked
'''
return movies_liked
def movie_names(movie_ids):
'''
INPUT
movie_ids - a list of movie_ids
OUTPUT
movies - a list of movie names associated with the movie_ids
'''
return movie_lst
def make_recommendations(user, num_recs=10):
'''
INPUT:
user - (int) a user_id of the individual you want to make recommendations for
num_recs - (int) number of movies to return
OUTPUT:
recommendations - a list of movies - if there are "num_recs" recommendations return this many
otherwise return the total number of recommendations available for the "user"
which may just be an empty list
'''
return recommendations
def all_recommendations(num_recs=10):
'''
INPUT
num_recs (int) the (max) number of recommendations for each user
OUTPUT
all_recs - a dictionary where each key is a user_id and the value is an array of recommended movie titles
'''
# Apply make recs for each user -
# hint this may take some time, so you might want to set up a progress bar to watch things progress
return all_recs
all_recs = all_recommendations(10)
# This make some time - it loads our solution dictionary so you can compare results
all_recs_sol = pickle.load(open("all_recs.p", "rb"))
assert all_recs[2] == make_recommendations(2), "Oops! Your recommendations for user 2 didn't match ours."
assert all_recs[26] == make_recommendations(26), "Oops! It actually wasn't possible to make any recommendations for user 26."
assert all_recs[1503] == make_recommendations(1503), "Oops! Looks like your solution for user 1503 didn't match ours."
print("If you made it here, you now have recommendations for many users using collaborative filtering!")
HTML('<img src="images/greatjob.webp">')
```
### Now What?
If you made it this far, you have successfully implemented a solution to making recommendations using collaborative filtering.
`8.` Let's do a quick recap of the steps taken to obtain recommendations using collaborative filtering.
```
# Check your understanding of the results by correctly filling in the dictionary below
a = "pearson's correlation and spearman's correlation"
b = 'item based collaborative filtering'
c = "there were too many ratings to get a stable metric"
d = 'user based collaborative filtering'
e = "euclidean distance and pearson's correlation coefficient"
f = "manhatten distance and euclidean distance"
g = "spearman's correlation and euclidean distance"
h = "the spread in some ratings was zero"
i = 'content based recommendation'
sol_dict = {
'The type of recommendation system implemented here was a ...': # letter here,
'The two methods used to estimate user similarity were: ': # letter here,
'There was an issue with using the correlation coefficient. What was it?': # letter here
}
t.test_recs(sol_dict)
```
Additionally, let's take a closer look at some of the results. There are three objects that you read in to check your results against the solution:
* **df_corrs** - a dataframe of user1, user2, pearson correlation between the two users
* **df_dists** - a dataframe of user1, user2, euclidean distance between the two users
* **all_recs_sol** - a dictionary of all recommendations (key = user, value = list of recommendations)
Looping your results from the correlation and euclidean distance functions through every pair of users could have been used to create the first two objects (I don't recommend doing this given how long it will take).
`9.`Use these three objects along with the cells below to correctly fill in the dictionary below and complete this notebook!
```
a = 567
b = 1503
c = 1319
d = 1325
e = 2526710
f = 0
g = 'Use another method to make recommendations - content based, knowledge based, or model based collaborative filtering'
sol_dict2 = {
'For how many pairs of users were we not able to obtain a measure of similarity using correlation?': # letter here,
'For how many pairs of users were we not able to obtain a measure of similarity using euclidean distance?': # letter here,
'For how many users were we unable to make any recommendations for using collaborative filtering?': # letter here,
'For how many users were we unable to make 10 recommendations for using collaborative filtering?': # letter here,
'What might be a way for us to get 10 recommendations for every user?': # letter here
}
t.test_recs2(sol_dict2)
#Use the below cells for any work you need to do!
# Users without recs
# NaN correlation values
# NaN euclidean distance values
# Users with less than 10 recs
```
| github_jupyter |
# L5 Closed-loop Gym-compatible Environment
This notebook demonstrates some of the aspects of our gym-compatible closed-loop environment.
You will understand the inner workings of our L5Kit environment and an RL policy can be used to rollout the environment.
Note: The training of different RL policies in our environment will be shown in a separate notebook.

```
#@title Download L5 Sample Dataset and install L5Kit
import os
RunningInCOLAB = 'google.colab' in str(get_ipython())
if RunningInCOLAB:
!wget https://raw.githubusercontent.com/lyft/l5kit/master/examples/setup_notebook_colab.sh -q
!sh ./setup_notebook_colab.sh
os.environ["L5KIT_DATA_FOLDER"] = open("./dataset_dir.txt", "r").read().strip()
else:
os.environ["L5KIT_DATA_FOLDER"] = "/tmp/level5_data"
print("Not running in Google Colab.")
import gym
import matplotlib.pyplot as plt
import torch
import numpy as np
import l5kit.environment
from l5kit.configs import load_config_data
from l5kit.environment.envs.l5_env import EpisodeOutputGym, SimulationConfigGym
from l5kit.environment.gym_metric_set import L2DisplacementYawMetricSet
from l5kit.visualization.visualizer.zarr_utils import episode_out_to_visualizer_scene_gym_cle
from l5kit.visualization.visualizer.visualizer import visualize
from bokeh.io import output_notebook, show
from prettytable import PrettyTable
```
### First, let's configure where our data lives!
The data is expected to live in a folder that can be configured using the `L5KIT_DATA_FOLDER` env variable. Your data folder is expected to contain subfolders for the aerial and semantic maps as well as the scenes (`.zarr` files).
In this example, the env variable is set to the local data folder. You should make sure the path points to the correct location for you.
We built our code to work with a human-readable `yaml` config. This config file holds much useful information, however, we will only focus on a few functionalities concerning the creation of our gym environment here
```
# Dataset is assumed to be on the folder specified
# in the L5KIT_DATA_FOLDER environment variable
# get environment config
env_config_path = '../gym_config.yaml'
cfg = load_config_data(env_config_path)
print(cfg)
```
### We can look into our current configuration for interesting fields
\- when loaded in python, the `yaml`file is converted into a python `dict`.
`raster_params` contains all the information related to the transformation of the 3D world onto an image plane:
- `raster_size`: the image plane size
- `pixel_size`: how many meters correspond to a pixel
- `ego_center`: our raster is centered around an agent, we can move the agent in the image plane with this param
- `map_type`: the rasterizer to be employed. We currently support a satellite-based and a semantic-based one. We will look at the differences further down in this script
The `raster_params` are used to determine the observation provided by our gym environment to the RL policy.
```
print(f'current raster_param:\n')
for k,v in cfg["raster_params"].items():
print(f"{k}:{v}")
```
## Create L5 Closed-loop Environment
We will now create an instance of the L5Kit gym-compatible environment. As you can see, we need to provide the path to the configuration file of the environment.
1. The `rescale_action` flag rescales the policy action based on dataset statistics. This argument helps for faster convergence during policy training.
2. The `return_info` flag informs the environment to return the episode output everytime an episode is rolled out.
Note: The environment has already been registered with gym during initialization of L5Kit.
```
env = gym.make("L5-CLE-v0", env_config_path=env_config_path, rescale_action=False, return_info=True)
```
## Visualize an observation from the environment
Let us visualize the observation from the environment. We will reset the environment and visualize an observation which is provided by the environment.
```
obs = env.reset()
im = obs["image"].transpose(1, 2, 0)
im = env.dataset.rasterizer.to_rgb(im)
plt.imshow(im)
plt.show()
```
## Rollout an episode from the environment
### The rollout of an episode in our environment takes place in three steps:
### Gym Environment Update:
1. Reward Calculation (CLE): Given an action from the policy, the environment will calculate the reward received as a consequence of the action.
2. Internal State Update: Since we are rolling out the environment in closed-loop, the internal state of the ego is updated based on the action.
3. Raster rendering: A new raster image is rendered based on the predicted ego position and returned as the observation of next time-step.
### Policy Forward Pass
The policy takes as input the observation provided by the environment and outputs the action via a forward pass.
### Inter-process communication
Usually, we deploy different subprocesses to rollout parallel environments to speed up rollout time during training. Each subprocess rolls out one environemnt. In such scenarios, there is an additional component called inter-process communication: The subprocess outputs (observations) are aggregated and passed to the main process and vice versa (for the actions)

### Dummy Policy
For this notebook, we will not train the policy but use a dummy policy. Our dummy policy that will move the ego by 10 m/s along the direction of orientation.
```
class DummyPolicy(torch.nn.Module):
"""A policy that advances the ego by constant speed along x-direction.
:param advance_x: the distance to advance per time-step
"""
def __init__(self, advance_x: float = 0.0):
super(DummyPolicy, self).__init__()
self.advance_x = advance_x
def forward(self, x):
positions_and_yaws = torch.zeros(3,)
positions_and_yaws[..., 0] = self.advance_x
return positions_and_yaws.cpu().numpy()
# We multiple the desired speed by the step-time (inverse of frequency) of data collection
desired_speed = 10.0
dummy_policy = DummyPolicy(cfg["model_params"]["step_time"] * desired_speed)
```
Let us now rollout the environment using the dummy policy.
```
def rollout_episode(env, idx = 0):
"""Rollout a particular scene index and return the simulation output.
:param env: the gym environment
:param idx: the scene index to be rolled out
:return: the episode output of the rolled out scene
"""
# Set the reset_scene_id to 'idx'
env.reset_scene_id = idx
# Rollout step-by-step
obs = env.reset()
while True:
action = dummy_policy(obs)
obs, _, done, info = env.step(action)
if done:
break
# The episode outputs are present in the key "sim_outs"
sim_out = info["sim_outs"][0]
return sim_out
# Rollout one episode
sim_out = rollout_episode(env)
```
## Visualize the episode from the environment
We can easily visualize the outputs obtained by rolling out episodes in the L5Kit using the Bokeh visualizer.
```
# might change with different rasterizer
map_API = env.dataset.rasterizer.sem_rast.mapAPI
def visualize_outputs(sim_outs, map_API):
for sim_out in sim_outs: # for each scene
vis_in = episode_out_to_visualizer_scene_gym_cle(sim_out, map_API)
show(visualize(sim_out.scene_id, vis_in))
output_notebook()
visualize_outputs([sim_out], map_API)
```
## Calculate the performance metrics from the episode outputs
We can also calculate the various quantitative metrics on the rolled out episode output.
```
def quantify_outputs(sim_outs, metric_set=None):
metric_set = metric_set if metric_set is not None else L2DisplacementYawMetricSet()
metric_set.evaluate(sim_outs)
scene_results = metric_set.evaluator.scene_metric_results
fields = ["scene_id", "FDE", "ADE"]
table = PrettyTable(field_names=fields)
tot_fde = 0.0
tot_ade = 0.0
for scene_id in scene_results:
scene_metrics = scene_results[scene_id]
ade_error = scene_metrics["displacement_error_l2"][1:].mean()
fde_error = scene_metrics['displacement_error_l2'][-1]
table.add_row([scene_id, round(fde_error.item(), 4), round(ade_error.item(), 4)])
tot_fde += fde_error.item()
tot_ade += ade_error.item()
ave_fde = tot_fde / len(scene_results)
ave_ade = tot_ade / len(scene_results)
table.add_row(["Overall", round(ave_fde, 4), round(ave_ade, 4)])
print(table)
quantify_outputs([sim_out])
```
| github_jupyter |
##### Function "print" for prints the specified message to the screen, or other standard output device
```
print(5+5)
print("Hello World")
print(TRUE)
----------------------
```
##### R is case sensitive
```
print("Me")
#Not same with
print("ME")
print("01")
#Not same with
print("1")
----------------------
```
###### "c" Function for makes continues number
```
c(1,2,3,4,5,6,7,8,9,10)
#Same with
c(1:10)
----------------------
```
##### Variable in R
```
a <- "Hello World"
b <- 57
print(a)
print(b)
----------------------
```
##### Making Vector in R
```
Vector <- c(51,45,67)
print(Vector)
----------------------
```
##### Continues Vector and Manual Vector
```
manual <- c(10,11,12,13,14,15,16,17,18,19,20)
#Same with
vector <- c(10:20)
print(manual)
print(vector)
----------------------
```
##### Vector can contains text
```
vector <- c("I","Love","You")
print(vector)
----------------------
```
##### Vector Indexing
```
vector <- c(80,85,87,83,82,98,93,100)
print(vector[[2]]) #Print index number 2 from left
print(vector[6]) #Print index number 6 from left
print(vector[5:8]) #Print index number 5 to 8 from left
----------------------
```
##### Named Vector
```
named <- c(language="R",machine_learning="Yes",data_mining="Yes")
print(named)
print(named["machine_learning"]) #Print object named "machine_learning"
----------------------
```
##### List
```
alist <- list("I","am",100,"%","Human")
print(alist)
----------------------
```
##### List Indexing
```
alist <- list("I","am",100,"%","Human")
print(alist[3]) #Print index number 3
print(alist[1:4]) #Print index number 1 to 4
----------------------
```
##### Data Frame in R
```
jumlah_mahasiswa <- c(450,670,490,421,577)
fakultas <- c("Teknik Pertanian","Sistem Informasi","Statistika","Matematika","Ilmu Komputer")
dataframe <- data.frame(fakultas,jumlah_mahasiswa)
print(dataframe)
----------------------
```
##### Taking column from data frame
```
jumlah_mahasiswa <- c(450,670,490,421,577)
fakultas <- c("Teknik Pertanian","Sistem Informasi","Statistika","Matematika","Ilmu Komputer")
dataframe <- data.frame(fakultas,jumlah_mahasiswa)
dataframe$fakultas
dataframe$jumlah_mahasiswa
----------------------
```
##### Making chart with ggplot2
```
library(ggplot2) #Importing ggplot2 library
jumlah_mahasiswa <- c(450,670,490,421,577)
fakultas <- c("Teknik Pertanian","Sistem Informasi","Statistika","Matematika","Ilmu Komputer")
dataframe <- data.frame(prodi,jumlah_mahasiswa)
plot <- ggplot(dataframe,aes(x=fakultas,y=jumlah_mahasiswa,fill=fakultas))
plot <- plot + geom_bar(width=0.9,stat="identity")
plot
----------------------
```
##### Adding title and other informations
```
library(ggplot2) #Importing ggplot2 library
jumlah_mahasiswa <- c(450,670,490,421,577)
fakultas <- c("Teknik Pertanian","Sistem Informasi","Statistika","Matematika","Ilmu Komputer")
dataframe <- data.frame(prodi,jumlah_mahasiswa)
plot <- ggplot(dataframe,aes(x=fakultas,y=jumlah_mahasiswa,fill=fakultas))
plot <- plot + geom_bar(width=0.9,stat="identity")
plot <- plot + ggtitle("Grafik Jumlah Mahasiswa Terhadap Fakultas") #Adding title
plot <- plot + xlab("Fakultas") #Adding bottom information
plot <- plot + ylab("Jumlah Mahasiswa") # Adding side information
plot
----------------------
```
##### Reading .xlsx file in R
```
library(openxlsx) #Importing library for reading .xlsx file
read <- read.xlsx("https://academy.dqlab.id/dataset/mahasiswa.xlsx",sheet = "Sheet 1")
read
----------------------
```
##### Making graph from .xlsx file
```
library(openxlsx) #Importing library for reading .xlsx file
read <- read.xlsx("https://academy.dqlab.id/dataset/mahasiswa.xlsx",sheet = "Sheet 1")
plot <- ggplot(read,aes(x=Fakultas,y=JUMLAH,fill=Fakultas))
plot <- plot + geom_bar(width=0.9,stat="identity")
plot
```
Learning Source : www.dqlab.id
| github_jupyter |
## Instructions to reproduce the WaMDaM / WEAP paper use case results
### Open Source Python Software To Manage, Populate, Compare, And Analyze Weap Models And Scenarios
#### By Adel M. Abdallah, Feb 2022
### Abstract
The Water Evaluation and Planning system (WEAP) is a proprietary systems simulation software that is used globally for water management modeling studies. WEAP has a simple and powerful Application Programming Interface (API), however most WEAP modelers manually populate data into their WEAP area (model), which is error-prone and time-consuming. Remaining modelers use the WEAP API and data and scenarios.
We contribute open-source Python software that automates and generalizes the processes for WEAP modelers to prepare and load data and run sensitivity analysis for multiple WEAP areas and their scenarios without writing code. The software also allows others to export and store model data and run independent analyses. We demonstrate the software with existing WEAP areas for the 1) Bear River Basin in Idaho and Utah and 2) Weber River Basin in Utah. Results for changes in reservoir capacity, demand, evaporation, and river headflows show estimated demand reliability across all simulation years and scenarios. Demand sites reliability in both the Bear and Weber Rivers models varied from 50% to 100%.
WaMDaM software developments continues at the WaMDaM project on GitHub at https://github.com/WamdamProject
### The instructions here will help you to reproduce:
* Use Case 1: Estimate and compare how regulated two river basins are based on two existing WEAP models
* Use case 2: estimate sensitivity of different water systems’ reliability to meet demand
### Required Software
* Windows 7 or 10 64-bit operating systems, both up to date.
* Internet connection and Google Chrome v. 69 or FireFox Quantum v. 62.0 browsers
* Microsoft Excel (versions after 2007)
* Python 3.7.
* Water Evaluation and Planning System (WEAP) (WEAP requires a license to run the model: Version 2019.2 or newer If you don’t have access to a WEAP license, you will still be able to replicate all the results of the first use.
### Required online accounts (if you don't have them)
* Free: create a user account at https://openagua.org. After you create the account, email Adel Abdalla @ amabdallah@aggiemail.usu.edu to let him know your username (email) to share a project with you as part of replicating the use case.
* Free: create a user account at https://www.hydroshare.org/
### Difficulty level
Very little coding or database experience are needed to reproduce the work but some knowledge of coding like in Python and awareness of Structure Query Language (SQL) are a plus.
**Options and required time**
Please expect to spend a couple of of hours to complete these directions. This work is 6 years in the making so spending a a couple of hours to learn and reproduce the results is quite amazing!
### How to use the Notebook
Execute the Notebook cells that contain Python code by pressing `Shift-Enter`, or by pressing the play button <img style='display:inline;padding-bottom:15px' src='play-button.png'> on the toolbar above.
**Note:** Any changes you make to the live Notebooks are temporary and will be lost once you close it. You always can start a new live Notebook that has the original content.
# Preperations (step 1-2)
-----------------------------------------------------------
### Step 1: Setup_local_Jupyter
[01_Step 1_Setup_local_Jupyter](01_Step1_Setup_local_Jupyter.ipynb)
<br>
### Step 2: Install_WaMDaM_Wizard_Connect to SQLite
[02_Step 2_Step2_Install_WaMDaM_Wizard_Connect](02_Step2_Install_WaMDaM_Wizard_Connect.ipynb)
<br>
# Use Case 1: Estimate and compare how regulated two river basins are based on two existing WEAP models
-----------------------------------------------------------
### Step 3: Use WaMDaM Wizard to Extract WEAP models into WaMDaM
[03_Step 3_Step3_Visualize_OpenAgua](03_Step3_Visualize_OpenAgua.ipynb)
<br>
# Use Case 2
--------------------------------------------------------------
### Step 4: Create_New_Scenarios_Edit_OpenAgua
[Step4_Create_New_Scenarios_Edit_OpenAgua](https://github.com/WamdamProject/WaMDaM_JupyterNotebooks/blob/master/2_VisualizePublish/04_Step4_Create_New_Scenarios_Edit_OpenAgua.ipynb)
<br>
### Step 5: Download_to_WaMDaM
[Step5_EditOpenAgua_download_to_WaMDaM](https://github.com/WamdamProject/WaMDaM_JupyterNotebooks/blob/master/2_VisualizePublish/05_Step5_download_to_WaMDaM.ipynb)
<br>
### Step 6: Serve_NewScenarios_WASH
[Step6_Serve_NewScenarios_WASH](https://github.com/WamdamProject/WaMDaM_JupyterNotebooks/blob/master/2_VisualizePublish/06_Step6_Serve_NewScenarios_WASH.ipynb)
<br>
### Step 7: Serve_NewScenarios_WEAP
[Step7_Serve_NewScenarios_WEAP](https://github.com/WamdamProject/WaMDaM_JupyterNotebooks/blob/master/2_VisualizePublish/07_Step7_Serve_NewScenarios_WEAP.ipynb)
<br>
# Use Case 3
### Step 8: Publish the Bear River and Monterrey models into HydroShare
[Step8_Publish_HydroShare](https://github.com/WamdamProject/WaMDaM_JupyterNotebooks/blob/master/2_VisualizePublish/08_Step8_Publish_HydroShare.ipynb)
<br>
### Step 9: Query_Analyze_HydroShare_Monterrey_Mexico
[Step9_Query_Analyze_HydroShare_Monterrey_Mexico](https://github.com/WamdamProject/WaMDaM_JupyterNotebooks/blob/master/2_VisualizePublish/09_Step9_Query_Analyze_HydroShare_Monterrey_Mexico.ipynb)
<br>
# Info
### Sponsors and Credit
This material is based upon work [supported](http://docs.wamdam.org/SponsorsCredit/) by the National Science Foundation (NSF) under Grants 1135482 (CI-Water) and 1208732 (iUtah). Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the NSF.
### License
WaMDaM and its products are disturbed under a BSD 3-Clause [license](http://docs.wamdam.org/License/)
### Authors
[Adel M. Abdallah](http://adelmabdallah.com/) has been the lead in WaMDaM development as part of his PhD dissertation at Utah State University under the advising of Dr. David Rosenberg.
If you have questions, feel free to email me at: [amabdallah@aggiemail.usu.edu](mailto:amabdallah@aggiemail.usu.edu)
### Citation
Adel M. Abdallah, David Rheinheimer, David E. Rosenberg, Steve Knox, Julien J Harou, in review. A Software Ecosystem to Store, Visualize, and Publish Modelling Data for Water Resources Systems. Journal of Hydroinformatics.
# The End :) Congratulations!
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Pix2Pix
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/generative/pix2pix"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/pix2pix.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/pix2pix.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/generative/pix2pix.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This notebook demonstrates image to image translation using conditional GAN's, as described in [Image-to-Image Translation with Conditional Adversarial Networks](https://arxiv.org/abs/1611.07004). Using this technique we can colorize black and white photos, convert google maps to google earth, etc. Here, we convert building facades to real buildings.
In example, we will use the [CMP Facade Database](http://cmp.felk.cvut.cz/~tylecr1/facade/), helpfully provided by the [Center for Machine Perception](http://cmp.felk.cvut.cz/) at the [Czech Technical University in Prague](https://www.cvut.cz/). To keep our example short, we will use a preprocessed [copy](https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/) of this dataset, created by the authors of the [paper](https://arxiv.org/abs/1611.07004) above.
Each epoch takes around 15 seconds on a single V100 GPU.
Below is the output generated after training the model for 200 epochs.


## Import TensorFlow and other libraries
```
import tensorflow as tf
import os
import time
from matplotlib import pyplot as plt
from IPython import display
!pip install -U tensorboard
```
## Load the dataset
You can download this dataset and similar datasets from [here](https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets). As mentioned in the [paper](https://arxiv.org/abs/1611.07004) we apply random jittering and mirroring to the training dataset.
* In random jittering, the image is resized to `286 x 286` and then randomly cropped to `256 x 256`
* In random mirroring, the image is randomly flipped horizontally i.e left to right.
```
_URL = 'https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/facades.tar.gz'
path_to_zip = tf.keras.utils.get_file('facades.tar.gz',
origin=_URL,
extract=True)
PATH = os.path.join(os.path.dirname(path_to_zip), 'facades/')
BUFFER_SIZE = 400
BATCH_SIZE = 1
IMG_WIDTH = 256
IMG_HEIGHT = 256
def load(image_file):
image = tf.io.read_file(image_file)
image = tf.image.decode_jpeg(image)
w = tf.shape(image)[1]
w = w // 2
real_image = image[:, :w, :]
input_image = image[:, w:, :]
input_image = tf.cast(input_image, tf.float32)
real_image = tf.cast(real_image, tf.float32)
return input_image, real_image
inp, re = load(PATH+'train/100.jpg')
# casting to int for matplotlib to show the image
plt.figure()
plt.imshow(inp/255.0)
plt.figure()
plt.imshow(re/255.0)
def resize(input_image, real_image, height, width):
input_image = tf.image.resize(input_image, [height, width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
real_image = tf.image.resize(real_image, [height, width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return input_image, real_image
def random_crop(input_image, real_image):
stacked_image = tf.stack([input_image, real_image], axis=0)
cropped_image = tf.image.random_crop(
stacked_image, size=[2, IMG_HEIGHT, IMG_WIDTH, 3])
return cropped_image[0], cropped_image[1]
# normalizing the images to [-1, 1]
def normalize(input_image, real_image):
input_image = (input_image / 127.5) - 1
real_image = (real_image / 127.5) - 1
return input_image, real_image
@tf.function()
def random_jitter(input_image, real_image):
# resizing to 286 x 286 x 3
input_image, real_image = resize(input_image, real_image, 286, 286)
# randomly cropping to 256 x 256 x 3
input_image, real_image = random_crop(input_image, real_image)
if tf.random.uniform(()) > 0.5:
# random mirroring
input_image = tf.image.flip_left_right(input_image)
real_image = tf.image.flip_left_right(real_image)
return input_image, real_image
```
As you can see in the images below
that they are going through random jittering
Random jittering as described in the paper is to
1. Resize an image to bigger height and width
2. Randomly crop to the target size
3. Randomly flip the image horizontally
```
plt.figure(figsize=(6, 6))
for i in range(4):
rj_inp, rj_re = random_jitter(inp, re)
plt.subplot(2, 2, i+1)
plt.imshow(rj_inp/255.0)
plt.axis('off')
plt.show()
def load_image_train(image_file):
input_image, real_image = load(image_file)
input_image, real_image = random_jitter(input_image, real_image)
input_image, real_image = normalize(input_image, real_image)
return input_image, real_image
def load_image_test(image_file):
input_image, real_image = load(image_file)
input_image, real_image = resize(input_image, real_image,
IMG_HEIGHT, IMG_WIDTH)
input_image, real_image = normalize(input_image, real_image)
return input_image, real_image
```
## Input Pipeline
```
train_dataset = tf.data.Dataset.list_files(PATH+'train/*.jpg')
train_dataset = train_dataset.map(load_image_train,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
train_dataset = train_dataset.shuffle(BUFFER_SIZE)
train_dataset = train_dataset.batch(BATCH_SIZE)
test_dataset = tf.data.Dataset.list_files(PATH+'test/*.jpg')
test_dataset = test_dataset.map(load_image_test)
test_dataset = test_dataset.batch(BATCH_SIZE)
```
## Build the Generator
* The architecture of generator is a modified U-Net.
* Each block in the encoder is (Conv -> Batchnorm -> Leaky ReLU)
* Each block in the decoder is (Transposed Conv -> Batchnorm -> Dropout(applied to the first 3 blocks) -> ReLU)
* There are skip connections between the encoder and decoder (as in U-Net).
```
OUTPUT_CHANNELS = 3
def downsample(filters, size, apply_batchnorm=True):
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(
tf.keras.layers.Conv2D(filters, size, strides=2, padding='same',
kernel_initializer=initializer, use_bias=False))
if apply_batchnorm:
result.add(tf.keras.layers.BatchNormalization())
result.add(tf.keras.layers.LeakyReLU())
return result
down_model = downsample(3, 4)
down_result = down_model(tf.expand_dims(inp, 0))
print (down_result.shape)
def upsample(filters, size, apply_dropout=False):
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(
tf.keras.layers.Conv2DTranspose(filters, size, strides=2,
padding='same',
kernel_initializer=initializer,
use_bias=False))
result.add(tf.keras.layers.BatchNormalization())
if apply_dropout:
result.add(tf.keras.layers.Dropout(0.5))
result.add(tf.keras.layers.ReLU())
return result
up_model = upsample(3, 4)
up_result = up_model(down_result)
print (up_result.shape)
def Generator():
inputs = tf.keras.layers.Input(shape=[256,256,3])
down_stack = [
downsample(64, 4, apply_batchnorm=False), # (bs, 128, 128, 64)
downsample(128, 4), # (bs, 64, 64, 128)
downsample(256, 4), # (bs, 32, 32, 256)
downsample(512, 4), # (bs, 16, 16, 512)
downsample(512, 4), # (bs, 8, 8, 512)
downsample(512, 4), # (bs, 4, 4, 512)
downsample(512, 4), # (bs, 2, 2, 512)
downsample(512, 4), # (bs, 1, 1, 512)
]
up_stack = [
upsample(512, 4, apply_dropout=True), # (bs, 2, 2, 1024)
upsample(512, 4, apply_dropout=True), # (bs, 4, 4, 1024)
upsample(512, 4, apply_dropout=True), # (bs, 8, 8, 1024)
upsample(512, 4), # (bs, 16, 16, 1024)
upsample(256, 4), # (bs, 32, 32, 512)
upsample(128, 4), # (bs, 64, 64, 256)
upsample(64, 4), # (bs, 128, 128, 128)
]
initializer = tf.random_normal_initializer(0., 0.02)
last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, 4,
strides=2,
padding='same',
kernel_initializer=initializer,
activation='tanh') # (bs, 256, 256, 3)
x = inputs
# Downsampling through the model
skips = []
for down in down_stack:
x = down(x)
skips.append(x)
skips = reversed(skips[:-1])
# Upsampling and establishing the skip connections
for up, skip in zip(up_stack, skips):
x = up(x)
x = tf.keras.layers.Concatenate()([x, skip])
x = last(x)
return tf.keras.Model(inputs=inputs, outputs=x)
generator = Generator()
tf.keras.utils.plot_model(generator, show_shapes=True, dpi=64)
gen_output = generator(inp[tf.newaxis,...], training=False)
plt.imshow(gen_output[0,...])
```
* **Generator loss**
* It is a sigmoid cross entropy loss of the generated images and an **array of ones**.
* The [paper](https://arxiv.org/abs/1611.07004) also includes L1 loss which is MAE (mean absolute error) between the generated image and the target image.
* This allows the generated image to become structurally similar to the target image.
* The formula to calculate the total generator loss = gan_loss + LAMBDA * l1_loss, where LAMBDA = 100. This value was decided by the authors of the [paper](https://arxiv.org/abs/1611.07004).
The training procedure for the generator is shown below:
```
LAMBDA = 100
def generator_loss(disc_generated_output, gen_output, target):
gan_loss = loss_object(tf.ones_like(disc_generated_output), disc_generated_output)
# mean absolute error
l1_loss = tf.reduce_mean(tf.abs(target - gen_output))
total_gen_loss = gan_loss + (LAMBDA * l1_loss)
return total_gen_loss, gan_loss, l1_loss
```

## Build the Discriminator
* The Discriminator is a PatchGAN.
* Each block in the discriminator is (Conv -> BatchNorm -> Leaky ReLU)
* The shape of the output after the last layer is (batch_size, 30, 30, 1)
* Each 30x30 patch of the output classifies a 70x70 portion of the input image (such an architecture is called a PatchGAN).
* Discriminator receives 2 inputs.
* Input image and the target image, which it should classify as real.
* Input image and the generated image (output of generator), which it should classify as fake.
* We concatenate these 2 inputs together in the code (`tf.concat([inp, tar], axis=-1)`)
```
def Discriminator():
initializer = tf.random_normal_initializer(0., 0.02)
inp = tf.keras.layers.Input(shape=[256, 256, 3], name='input_image')
tar = tf.keras.layers.Input(shape=[256, 256, 3], name='target_image')
x = tf.keras.layers.concatenate([inp, tar]) # (bs, 256, 256, channels*2)
down1 = downsample(64, 4, False)(x) # (bs, 128, 128, 64)
down2 = downsample(128, 4)(down1) # (bs, 64, 64, 128)
down3 = downsample(256, 4)(down2) # (bs, 32, 32, 256)
zero_pad1 = tf.keras.layers.ZeroPadding2D()(down3) # (bs, 34, 34, 256)
conv = tf.keras.layers.Conv2D(512, 4, strides=1,
kernel_initializer=initializer,
use_bias=False)(zero_pad1) # (bs, 31, 31, 512)
batchnorm1 = tf.keras.layers.BatchNormalization()(conv)
leaky_relu = tf.keras.layers.LeakyReLU()(batchnorm1)
zero_pad2 = tf.keras.layers.ZeroPadding2D()(leaky_relu) # (bs, 33, 33, 512)
last = tf.keras.layers.Conv2D(1, 4, strides=1,
kernel_initializer=initializer)(zero_pad2) # (bs, 30, 30, 1)
return tf.keras.Model(inputs=[inp, tar], outputs=last)
discriminator = Discriminator()
tf.keras.utils.plot_model(discriminator, show_shapes=True, dpi=64)
disc_out = discriminator([inp[tf.newaxis,...], gen_output], training=False)
plt.imshow(disc_out[0,...,-1], vmin=-20, vmax=20, cmap='RdBu_r')
plt.colorbar()
```
**Discriminator loss**
* The discriminator loss function takes 2 inputs; **real images, generated images**
* real_loss is a sigmoid cross entropy loss of the **real images** and an **array of ones(since these are the real images)**
* generated_loss is a sigmoid cross entropy loss of the **generated images** and an **array of zeros(since these are the fake images)**
* Then the total_loss is the sum of real_loss and the generated_loss
```
loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(disc_real_output, disc_generated_output):
real_loss = loss_object(tf.ones_like(disc_real_output), disc_real_output)
generated_loss = loss_object(tf.zeros_like(disc_generated_output), disc_generated_output)
total_disc_loss = real_loss + generated_loss
return total_disc_loss
```
The training procedure for the discriminator is shown below.
To learn more about the architecture and the hyperparameters you can refer the [paper](https://arxiv.org/abs/1611.07004).

## Define the Optimizers and Checkpoint-saver
```
generator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
discriminator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
```
## Generate Images
Write a function to plot some images during training.
* We pass images from the test dataset to the generator.
* The generator will then translate the input image into the output.
* Last step is to plot the predictions and **voila!**
Note: The `training=True` is intentional here since
we want the batch statistics while running the model
on the test dataset. If we use training=False, we will get
the accumulated statistics learned from the training dataset
(which we don't want)
```
def generate_images(model, test_input, tar):
prediction = model(test_input, training=True)
plt.figure(figsize=(15,15))
display_list = [test_input[0], tar[0], prediction[0]]
title = ['Input Image', 'Ground Truth', 'Predicted Image']
for i in range(3):
plt.subplot(1, 3, i+1)
plt.title(title[i])
# getting the pixel values between [0, 1] to plot it.
plt.imshow(display_list[i] * 0.5 + 0.5)
plt.axis('off')
plt.show()
for example_input, example_target in test_dataset.take(1):
generate_images(generator, example_input, example_target)
```
## Training
* For each example input generate an output.
* The discriminator receives the input_image and the generated image as the first input. The second input is the input_image and the target_image.
* Next, we calculate the generator and the discriminator loss.
* Then, we calculate the gradients of loss with respect to both the generator and the discriminator variables(inputs) and apply those to the optimizer.
* Then log the losses to TensorBoard.
```
EPOCHS = 150
import datetime
log_dir="logs/"
summary_writer = tf.summary.create_file_writer(
log_dir + "fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
@tf.function
def train_step(input_image, target, epoch):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
gen_output = generator(input_image, training=True)
disc_real_output = discriminator([input_image, target], training=True)
disc_generated_output = discriminator([input_image, gen_output], training=True)
gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(disc_generated_output, gen_output, target)
disc_loss = discriminator_loss(disc_real_output, disc_generated_output)
generator_gradients = gen_tape.gradient(gen_total_loss,
generator.trainable_variables)
discriminator_gradients = disc_tape.gradient(disc_loss,
discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(generator_gradients,
generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(discriminator_gradients,
discriminator.trainable_variables))
with summary_writer.as_default():
tf.summary.scalar('gen_total_loss', gen_total_loss, step=epoch)
tf.summary.scalar('gen_gan_loss', gen_gan_loss, step=epoch)
tf.summary.scalar('gen_l1_loss', gen_l1_loss, step=epoch)
tf.summary.scalar('disc_loss', disc_loss, step=epoch)
```
The actual training loop:
* Iterates over the number of epochs.
* On each epoch it clears the display, and runs `generate_images` to show it's progress.
* On each epoch it iterates over the training dataset, printing a '.' for each example.
* It saves a checkpoint every 20 epochs.
```
def fit(train_ds, epochs, test_ds):
for epoch in range(epochs):
start = time.time()
display.clear_output(wait=True)
for example_input, example_target in test_ds.take(1):
generate_images(generator, example_input, example_target)
print("Epoch: ", epoch)
# Train
for n, (input_image, target) in train_ds.enumerate():
print('.', end='')
if (n+1) % 100 == 0:
print()
train_step(input_image, target, epoch)
print()
# saving (checkpoint) the model every 20 epochs
if (epoch + 1) % 20 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print ('Time taken for epoch {} is {} sec\n'.format(epoch + 1,
time.time()-start))
checkpoint.save(file_prefix = checkpoint_prefix)
```
This training loop saves logs you can easily view in TensorBoard to monitor the training progress. Working locally you would launch a separate tensorboard process. In a notebook, if you want to monitor with TensorBoard it's easiest to launch the viewer before starting the training.
To launch the viewer paste the following into a code-cell:
```
#docs_infra: no_execute
%load_ext tensorboard
%tensorboard --logdir {log_dir}
```
Now run the training loop:
```
fit(train_dataset, EPOCHS, test_dataset)
```
If you want to share the TensorBoard results _publicly_ you can upload the logs to [TensorBoard.dev](https://tensorboard.dev/) by copying the following into a code-cell.
Note: This requires a Google account.
```
!tensorboard dev upload --logdir {log_dir}
```
Caution: This command does not terminate. It's designed to continuously upload the results of long-running experiments. Once your data is uploaded you need to stop it using the "interrupt execution" option in your notebook tool.
You can view the [results of a previous run](https://tensorboard.dev/experiment/lZ0C6FONROaUMfjYkVyJqw) of this notebook on [TensorBoard.dev](https://tensorboard.dev/).
TensorBoard.dev is a managed experience for hosting, tracking, and sharing ML experiments with everyone.
It can also included inline using an `<iframe>`:
```
display.IFrame(
src="https://tensorboard.dev/experiment/lZ0C6FONROaUMfjYkVyJqw",
width="100%",
height="1000px")
```
Interpreting the logs from a GAN is more subtle than a simple classification or regression model. Things to look for::
* Check that neither model has "won". If either the `gen_gan_loss` or the `disc_loss` gets very low it's an indicator that this model is dominating the other, and you are not successfully training the combined model.
* The value `log(2) = 0.69` is a good reference point for these losses, as it indicates a perplexity of 2: That the discriminator is on average equally uncertain about the two options.
* For the `disc_loss` a value below `0.69` means the discriminator is doing better than random, on the combined set of real+generated images.
* For the `gen_gan_loss` a value below `0.69` means the generator i doing better than random at foolding the descriminator.
* As training progresses the `gen_l1_loss` should go down.
## Restore the latest checkpoint and test
```
!ls {checkpoint_dir}
# restoring the latest checkpoint in checkpoint_dir
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
```
## Generate using test dataset
```
# Run the trained model on a few examples from the test dataset
for inp, tar in test_dataset.take(5):
generate_images(generator, inp, tar)
```
| github_jupyter |
# Managing Throwing and Catching and Exceptions
In this workbook, we're going to work with a sample that describes a cashier's till at a store. We'll look at what happens when the cashier makes change for orders, the exceptions thrown and the danger they create.
First, let's describe the `Till` class
```
public class Till
{
private int OneDollarBills;
private int FiveDollarBills;
private int TenDollarBills;
private int TwentyDollarBills;
public Till(int ones, int fives, int tens = 0, int twenties = 0) =>
(OneDollarBills, FiveDollarBills, TenDollarBills, TwentyDollarBills) =
(ones, fives, tens, twenties);
public void MakeChange(int cost, int twenties, int tens = 0, int fives = 0, int ones = 0)
{
TwentyDollarBills += twenties;
TenDollarBills += tens;
FiveDollarBills += fives;
OneDollarBills += ones;
int amountPaid = twenties * 20 + tens * 10 + fives * 5 + ones;
int changeNeeded = amountPaid - cost;
if (changeNeeded < 0)
throw new InvalidOperationException("Not enough money provided");
Console.WriteLine("Cashier Returns:");
while ((changeNeeded > 19) && (TwentyDollarBills > 0))
{
TwentyDollarBills--;
changeNeeded -= 20;
Console.WriteLine("\t A twenty");
}
while ((changeNeeded > 9) && (TenDollarBills > 0))
{
TenDollarBills--;
changeNeeded -= 10;
Console.WriteLine("\t A tenner");
}
while ((changeNeeded > 4) && (FiveDollarBills > 0))
{
FiveDollarBills--;
changeNeeded -= 5;
Console.WriteLine("\t A fiver");
}
while ((changeNeeded > 0) && (OneDollarBills > 0))
{
OneDollarBills--;
changeNeeded--;
Console.WriteLine("\t A one");
}
if (changeNeeded > 0)
throw new InvalidOperationException("Can't make change. Do you have anything smaller?");
}
public void LogTillStatus()
{
Console.WriteLine("The till currently has:");
Console.WriteLine($"{TwentyDollarBills * 20} in twenties");
Console.WriteLine($"{TenDollarBills * 10} in tens");
Console.WriteLine($"{FiveDollarBills * 5} in fives");
Console.WriteLine($"{OneDollarBills} in ones");
Console.WriteLine();
}
public override string ToString() =>
$"The till has {TwentyDollarBills * 20 + TenDollarBills * 10 + FiveDollarBills * 5 + OneDollarBills} dollars";
}
```
Now that we have our `Till`, let's set up our scenario to experiment with.
```
// TheBank is our cashier's till we are working with and we'll give it some cash to start with
var theBank = new Till(ones: 50, fives: 20, tens: 10, twenties: 5);
var expectedTotal = 50 * 1 + 20 * 5 + 10 * 10 + 5 * 20;
theBank.LogTillStatus();
Console.WriteLine(theBank);
Console.WriteLine($"Expected till value: {expectedTotal}");
```
Now that we have set an initial value for the contents of `TheBank`, let's start working with customers and making change.
We'll define a number of transactions to run through `TheBank` and also setup a random number generator to give us the feeling of random items being purchased and we'll make change for those customers
```
int transactions = 2;
var valueGenerator = new Random((int)DateTime.Now.Ticks);
while (transactions-- > 0)
{
int itemCost = valueGenerator.Next(2, 50);
int numOnes = itemCost % 2;
int numFives = (itemCost % 10 > 7) ? 1 : 0;
int numTens = (itemCost % 20 > 13) ? 1 : 0;
int numTwenties = (itemCost < 20) ? 1 : 2;
try
{
Console.WriteLine($"Customer making a ${itemCost} purchase");
Console.WriteLine($"\t Using {numTwenties} twenties");
Console.WriteLine($"\t Using {numTens} tenners");
Console.WriteLine($"\t Using {numFives} fivers");
Console.WriteLine($"\t Using {numOnes} silver dollar coins");
theBank.MakeChange(itemCost, numTwenties, numTens, numFives, numOnes);
expectedTotal += itemCost;
}
catch (InvalidOperationException e)
{
Console.WriteLine($"Could not make transaction: {e.Message}");
}
Console.WriteLine(theBank);
Console.WriteLine($"Expected till value: {expectedTotal}");
Console.WriteLine(" ------------------------------------------");
}
```
| github_jupyter |
# Consume deployed webservice via REST
Demonstrates the usage of a deployed model via plain REST.
REST is language-agnostic, so you should be able to query from any REST-capable programming language.
## Configuration
```
from environs import Env
env = Env(expand_vars=True)
env.read_env("foundation.env")
env.read_env("service-principals.env")
# image to test
IMAGE_TO_TEST = "mnist_fashion/04_consumption/random_test_images/random-test-image-9629.png"
# endpoint of the scoring webservice
SCORING_URI = "<add your own scoring REST endpoint here>"
# auth method, either "Token", "Keys" or "None".
# also specify additional values depending on auth method
AUTH_METHOD = "Keys"
if AUTH_METHOD == "Keys":
AUTH_KEY = "<add your own key here>"
elif AUTH_METHOD == "Token":
REGION = "eastus"
SUBSCRIPTION_ID = env("SUBSCRIPTION_ID")
RESOURCE_GROUP = env("RESOURCE_GROUP")
WORKSPACE_NAME = env("WORKSPACE_NAME")
SERVICE_NAME = "mnist-fashion-service"
CONSUME_MODEL_SP_TENANT_ID = env("CONSUME_MODEL_SP_TENANT_ID")
CONSUME_MODEL_SP_CLIENT_ID = env("CONSUME_MODEL_SP_CLIENT_ID")
CONSUME_MODEL_SP_CLIENT_SECRET = env("CONSUME_MODEL_SP_CLIENT_SECRET")
elif AUTH_METHOD == "None":
pass
```
## Load a random image and plot it
```
import matplotlib.pyplot as plt
from PIL import Image
image = Image.open(IMAGE_TO_TEST)
plt.figure()
plt.imshow(image)
plt.colorbar()
plt.grid(False)
plt.show()
```
## Invoke the webservice and show result
```
import requests
import json
# --- get input data
input_data = open(IMAGE_TO_TEST, "rb").read()
# alternatively for JSON input
#input_data = json.dumps({"x": 4711})
# --- get headers
# Content-Type
# for binary data
headers = {"Content-Type": "application/octet-stream"}
# alternatively for JSON data
#headers = {"Content-Type": "application/json"}
# Authorization
if AUTH_METHOD == "Token":
# get an access token for the service principal to access Azure
azure_access_token = requests.post(
f"https://login.microsoftonline.com/{CONSUME_MODEL_SP_TENANT_ID}/oauth2/token",
headers={"Content-Type": "application/x-www-form-urlencoded"},
data="grant_type=client_credentials"
+ "&resource=https%3A%2F%2Fmanagement.azure.com%2F"
+ f"&client_id={CONSUME_MODEL_SP_CLIENT_ID}"
+ f"&client_secret={CONSUME_MODEL_SP_CLIENT_SECRET}",
).json()["access_token"]
# use that token to get another token for accessing the webservice
# note: the token is only valid for a certain period of time.
# after that time, a new token has to be used. the logic
# to do this, is not implemented here yet. you can check
# the current time against the refresh after time to know
# if a new token is required. refreshAfter and expiryOn
# are UNIX timestamps. use time.time() to get the current
# timestamp.
token_response = requests.post(
f"https://{REGION}.modelmanagement.azureml.net/modelmanagement/v1.0/subscriptions/{SUBSCRIPTION_ID}/resourceGroups/{RESOURCE_GROUP}/providers/Microsoft.MachineLearningServices/workspaces/{WORKSPACE_NAME}/services/{SERVICE_NAME}/token",
headers={"Authorization": f"Bearer {azure_access_token}"}
).json()
access_token = token_response["accessToken"]
access_token_refresh_after = int(token_response["refreshAfter"])
access_token_expiry_on = int(token_response["expiryOn"])
# finally, use that token to access the webservice
headers["Authorization"] = f"Bearer {access_token}"
if AUTH_METHOD == "Keys":
headers["Authorization"] = f"Bearer {AUTH_KEY}"
if AUTH_METHOD == "None":
# do nothing
pass
# --- make request and display response
response = requests.post(SCORING_URI, input_data, headers=headers, verify=True)
print(response.json())
```
| github_jupyter |
# Advanced Certification in AIML
## A Program by IIIT-H and TalentSprint
## Not for grades
## Learning Objective
The objective of this experiment is to understand Decision Tree classifier.
## Dataset
#### History
This is a multivariate dataset introduced by R.A.Fisher (Father of Modern Statistics) for showcasing linear discriminant analysis. This is arguably the best known dataset in Feature Selection literature.
The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant. One class is linearly separable from the other 2; the latter are NOT linearly separable from each other.
#### Description
The Iris dataset consists of 150 data instances. There are 3 classes (Iris Versicolor, Iris Setosa and Iris Virginica) each have 50 instances.
For each flower we have the below data attributes
- sepal length in cm
- sepal width in cm
- petal length in cm
- petal width in cm
To make our experiment easy we rename the classes with numbers :
"0": setosa
"1": versicolor
"2": virginica
### Challenges
When we use the data with large number of features or dimensionality, models usually choke because
1. Training time increases exponentially with number of features.
2. Models have increasing risk of overfitting with increasing number of features.
To avoid the above mentioned problems while learning about data analysis, we use simple, well behaved, data that reduces the cognitive load, and makes it easier to debug as we are able to better comprehend the data we are working with.
Hence, this is a good dataset to work on.
## Domain Information
Iris Plants are flowering plants with showy flowers. They are very popular among movie directors as it gives excellent background.
They are predominantly found in dry, semi-desert, or colder rocky mountainous areas in Europe and Asia. They have long, erect flowering stems and can produce white, yellow, orange, pink, purple, lavender, blue or brown colored flowers. There are 260 to 300 types of iris.

As you could see, flowers have 3 sepals and 3 petals. The sepals are usually spreading or drop downwards and the petals stand upright, partly behind the sepal bases. However, the length and width of the sepals and petals vary for each type.
### Setup Steps
#### Importing Required Packages
```
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
```
#### Loading the data
```
# Load data
iris = load_iris()
# Checking for the type of iris
print(type(iris))
# Checking the keys
print(iris.keys())
# Checking for the type of data
print(type(iris.data))
# Checking for unique target or class values
print(set(iris.target))
# Let us see how the iris data looks
print(iris.data[::15])
# Let us see how the labels
print(iris.target[::15])
# Storing the data and labels into "X" and "y" varaibles
X = iris.data
y = iris.target
```
### Splitting the data into train and test sets
```
from sklearn.model_selection import train_test_split
# Training and testing set ratio is 70 : 30
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# Let us see the size of train and test sets
X_train.shape, X_test.shape
# Let us see first five rows of the training data
X_train[:5]
```
### Training a Decision Tree Classifier
```
decision_tree = DecisionTreeClassifier(max_depth=2)
# Training or fitting the model with the train data
decision_tree.fit(X_train,y_train)
# Testing the trained model
decision_tree.predict(X_test)
# Calculating the score
decision_tree.score(X_test,y_test)
```
| github_jupyter |
# Calculate China-Z Index (CZI) with Python
China Z-Index (CZI) is extensively used by National Climate Centre (NCC) of China to monitor drought conditions throughout
the country (Wu et al., 2001; Dogan et al., 2012). CZI assumes that precipitation data follow the Pearson Type III distribution and is related to Wilson–Hilferty cube-root transformation (Wilson and Hilferty, 1931) from chi-square variable to the Z-scale (Kendall and Stuart, 1977).
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
## Prepare data
```
data = pd.read_csv('data/prcphq.046037.month.txt', sep=r"\s+",
skiprows=1, usecols=[1, 2],
parse_dates=True,
index_col = 0,
names=['Date', 'Rain'])
```
## Calculate six-monthly CZI
Here we use all years as a reference period to calculate monthly long-term normals. ZSI = (p-pm)/s
```
data['Rain_6'] = data['Rain'].rolling(6).sum()
df_6mon = data[['Rain_6']].dropna()
df_6mon['CZI'] = np.nan
for imon in np.arange(1, 13):
sinds = df_6mon.index.month==imon
x = df_6mon[sinds]
zsi = (x -x.mean())/x.std()
cs = np.power(zsi, 3)/len(x)
czi = 6.0/cs*np.power((cs/2.0*zsi + 1.0), 1.0/3.0)-6.0/cs + cs/6.0
df_6mon.loc[sinds, 'CZI'] = czi.values[:,0]
data['CZI'] = df_6mon['CZI']
del df_6mon
data.head(7)
```
## Visualize
```
ax = data['CZI'].plot(figsize=(15, 7), )
ax.axhline(1, linestyle='--', color='g')
ax.axhline(-1, linestyle='--', color='r')
ax.set_title('Six-Monthly China-Z Index', fontsize=16)
ax.set_xlim(data.index.min(), data.index.max())
ax.set_ylim(-3, 3)
data.head(12)
data.Rain_6.plot(figsize=(15, 7),)
```
## Summary and discussion
NCC computes CZI only for 1-month time step. However, CZI could be computed for five time steps i.e. 1-, 3-, 6-, 9- and 12-month time step.
Many studies comparing the CZI with that of SPI and Z-score reported similar results (Wu et al., 2001; Morid et al., 2006).
Further, Wu et al. (2001) suggested that because of simplicity in calculating drought severity at monthly time step using CZI, it can be preferred over SPI, where rainfall data are often incomplete.
## References
Dogan, S., Berktay, A., Singh, V.P., 2012. Comparison of multi-monthly rainfall-based drought severity indices, with application to semi-arid Konya closed basin, Turkey. J. Hydrol. 470–471, 255–268.
Kendall, M.G.; Stuart, A. The Advanced Theory of Statistics; Charles Griffin & Company-High Wycombe: London, UK, 1997; pp. 400–401.
Morid, S., Smakhtin, V., Moghaddasi, M., 2006. Comparison of seven meteorological indices for drought monitoring in Iran. Int. J. Climatol. 26, 971–985.
Wilson, E.B., Hilferty, M.M., 1931. The Distribution of Chi-Square. Proc. Natl. Acad. Sci. USA 17, 684–688.
Wu, H., Hayes, M.J., Weiss, A., Hu, Q.I., 2001. An evaluation of the standardized precipitation index, the china-Zindex and the statistical Z-Score. Int. J. Climatol.21, 745–758. http://dx.doi.org/10.1002/joc.658.
| github_jupyter |
```
import os
import sys
sys.path.append(f'{os.environ["HOME"]}/Projects/planckClusters/catalogs')
from load_catalogs import load_PSZcatalog
from tqdm import tqdm_notebook
data = load_PSZcatalog()
PS1_dir = f'{os.environ["HOME"]}/Projects/planckClusters/data/extern/PS1'
SDSS_dir = f'{os.environ["HOME"]}/Projects/planckClusters/data/extern/SDSS'
DECaLS_dir = f'{os.environ["HOME"]}/Projects/planckClusters/data/extern/DECaLS'
DES_dir = f'{os.environ["HOME"]}/Projects/planckClusters/data/extern/DES'
outpath = './data_full_new'
for name in tqdm_notebook(data['NAME'], total=len(data['NAME'])):
name = name.replace(' ', '_')
if not os.path.exists(f'{outpath}/{name}'):
continue
if os.path.isdir(f'{PS1_dir}/{name}'):
relpath = os.path.relpath(f'{PS1_dir}/{name}', f'{outpath}/{name}')
target_files = ['_PS1stack_g.fits', '_PS1stack_r.fits', '_PS1stack_i.fits',
'_PS1stack_z.fits', '_PS1stack_y.fits', '_PS1stack_irg.tiff']
for file in target_files:
try:
os.symlink(f'{PS1_dir}/{name}/{name}{file}',
f'{outpath}/{name}/{name}{file}')
except FileExistsError:
pass
if os.path.isdir(f'{SDSS_dir}/{name}'):
relpath = os.path.relpath(f'{SDSS_dir}/{name}', f'{outpath}/{name}')
target_files = ['_SDSSstack_g.fits', '_SDSSstack_r.fits', '_SDSSstack_i.fits',
'_SDSSstack_z.fits', '_SDSSstack_irg.tiff']
for file in target_files:
try:
os.symlink(f'{SDSS_dir}/{name}/{name}{file}',
f'{outpath}/{name}/{name}{file}')
except FileExistsError:
pass
if os.path.isdir(f'{DECaLS_dir}/{name}'):
relpath = os.path.relpath(f'{DECaLS_dir}/{name}', f'{outpath}/{name}')
target_files = ['_DECaLSstack_r.fits', '_DECaLSstack.jpg']
for file in target_files:
try:
os.symlink(f'{DECaLS_dir}/{name}/{name}{file}',
f'{outpath}/{name}/{name}{file}')
except FileExistsError:
pass
if os.path.isdir(f'{DES_dir}/{name}'):
relpath = os.path.relpath(f'{DES_dir}/{name}', f'{outpath}/{name}')
target_files = ['_DESstack_r.fits', '_DESstack.jpg']
for file in target_files:
try:
os.symlink(f'{DES_dir}/{name}/{name}{file}',
f'{outpath}/{name}/{name}{file}')
except FileExistsError:
pass
```
| github_jupyter |
# Keras mnist LeNet-5 v2
**此项目为测试修改版的LeNet-5**
- 目前达到$0.9929$的准确率
```
%matplotlib inline
import os
import PIL
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from IPython import display
from functools import partial
from sklearn.preprocessing import normalize
from keras import backend
from keras.utils import np_utils, plot_model
from keras.callbacks import TensorBoard, ModelCheckpoint
from keras.callbacks import LearningRateScheduler, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
from keras.models import Sequential, Model
from keras.layers import Dense, Conv2D, MaxPool2D, Input, AveragePooling2D
from keras.layers import Activation, Dropout, Flatten, BatchNormalization
import warnings
warnings.filterwarnings('ignore')
np.random.seed(42)
```
## 准备数据
```
file_path = r"I:\Dataset\mnist\all_mnist_data.csv"
mnist_data = pd.read_csv(file_path)
idx = np.random.permutation(len(mnist_data))
train_data = mnist_data.iloc[idx[: 60000]]
test_data = mnist_data.iloc[idx[60000: ]]
X_train = np.array(train_data.drop('0', axis=1)).reshape(-1, 28, 28, 1).astype("float32")
X_test = np.array(test_data.drop('0', axis=1)).reshape(-1, 28, 28, 1).astype("float32")
y_train = np.array(train_data['0'])
y_test = np.array(test_data['0'])
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
x_train = X_train[10000:]
t_train = y_train[10000:]
x_val = X_train[:10000]
t_val = y_train[:10000]
print("\nimgs of trainset : ", x_train.shape)
print("labels of trainset : ", t_train.shape)
print("imgs of valset : ", x_val.shape)
print("labels of valset : ", t_val.shape)
print("imgs of testset : ", X_test.shape)
print("labels of testset : ", y_test.shape)
```
## 搭建模型
```
def myCNN():
model = Sequential()
model.add(Conv2D(filters=16,
kernel_size=(5, 5),
padding='same',
input_shape=(28, 28, 1),
activation='relu',
name='conv2d_1'))
model.add(MaxPool2D(pool_size=(2, 2), name='max_pool2d_1'))
model.add(Conv2D(filters=36,
kernel_size=(5, 5),
padding='same',
input_shape=(14, 14, 1),
activation='relu',
name='conv2d_2'))
model.add(MaxPool2D(pool_size=(2, 2), name='max_pool2d_2'))
model.add(Dropout(0.25, name='dropout_1'))
model.add(Flatten(name='flatten_1'))
model.add(Dense(128, activation='relu', name='dense_1'))
model.add(Dropout(0.5, name='dropout_2'))
model.add(Dense(10, activation='softmax', name='dense_2'))
return model
model = myCNN()
model.summary()
```
### 计算资源的分配
```
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
sess = tf.Session(config=config)
backend.set_session(sess)
```
### 训练
```
"""训练模型并保存模型及训练历史
保存模型单独创建一个子文件夹modeldir, 保存训练历史则为单个文件hisfile"""
models_name = "Keras_mnist_LeNet-5_v2" # 模型名称的公共前缀
factor_list = [""] # 此次调参的变量列表
model_list = [] # 模型名称列表
for i in range(len(factor_list)):
modelname = models_name + factor_list[i] + ".h5"
model_list.append(modelname)
# 创建模型保存子目录modeldir
if not os.path.isdir("saved_models"):
os.mkdir("saved_models")
modeldir = r"saved_models"
# 创建训练历史保存目录
if not os.path.isdir("train_history"):
os.mkdir("train_history")
# 设置训练历史文件路径
hisfile = r"train_history\Keras_mnist_LeNet-5_v2.train_history"
# 每个模型及其对应的训练历史作为键值对{modelname: train_history}
# train_history为字典,含四个key,代表train和val的loss和acc
model_train_history = dict()
# 开始训练
epochs=100
batch_size = 32
steps_per_epoch=1250
for i in range(len(model_list)):
model = myCNN()
modelname = model_list[i]
modelpath = os.path.join(modeldir, modelname)
train_his = np.array([]).reshape(-1, 2)
val_his = np.array([]).reshape(-1, 2)
datagen = ImageDataGenerator()
datagen.fit(x_train)
model.compile(loss="categorical_crossentropy",
optimizer=keras.optimizers.Adam(),
metrics=["accuracy"])
print("\ntraining model : ", modelname)
ck_epoch, max_val_acc = 0, 0.0
for epoch in range(epochs+1):
i = 0
tr_his = []
for X, y in datagen.flow(x_train, t_train, batch_size=batch_size):
his = model.train_on_batch(X, y)
tr_his.append(his)
i += 1
if i >= steps_per_epoch: break
tr = np.mean(tr_his, axis=0)
val = model.evaluate(x_val, t_val, verbose=0)
train_his = np.vstack((train_his, tr))
val_his = np.vstack((val_his, val))
if epoch<10 or epoch%5==0:
print("%4d epoch: train acc: %8f loss: %8f val acc: %8f loss: %8f"%(epoch, tr[1], tr[0], val[1], val[0]))
# 设置保存模型
if val[1] > max_val_acc:
model.save(modelpath)
print("val acc improved from %6f to %6f"%(max_val_acc, val[1]))
max_val_acc = val[1]
ck_epoch = epoch
model_train_history[modelname] = {"acc": train_his[:, 1], "val_acc": val_his[:, 1],
"loss": train_his[:, 0], "val_loss": val_his[:, 0]}
"""保存训练历史"""
fo = open(hisfile, 'wb')
pickle.dump(model_train_history, fo)
fo.close()
```
### 可视化训练过程
```
def show_train_history(saved_history, his_img_file):
modelnames = sorted(list(saved_history.keys()))
train = ["acc", "loss"]
val = ["val_acc", "val_loss"]
"""作loss和acc两个图"""
fig, ax = plt.subplots(1, 2, figsize=(16, 5))
ax = ax.flatten()
color_add = 0.9/len(saved_history)
for i in range(2):
c = 0.05
for j in range(len(saved_history)):
modelname = modelnames[j]
train_history = saved_history[modelname]
ax[i].plot(train_history[train[i]],
color=(0, 1-c, 0),
linestyle="-",
label="train_"+modelname[21:-3])
ax[i].plot(train_history[val[i]],
color=(c, 0, 1-c),
linestyle="-",
label="val_"+modelname[21:-3])
c += color_add
ax[i].set_title('Train History')
ax[i].set_ylabel(train[i])
ax[i].set_xlabel('Epoch')
ax[0].legend(loc="lower right")
ax[1].legend(loc="upper right")
ax[0].set_ylim(0.9, 1.0)
ax[1].set_ylim(0, 0.2)
plt.suptitle("LeNet-5_v2")
print("saved img: ", his_img_file)
plt.savefig(his_img_file)
plt.show()
"""载入训练历史并可视化, 并且保存图片"""
if not os.path.isdir("his_img"):
os.mkdir("his_img")
his_img_file = r"his_img\LeNet-5_v2.png"
fo2 = open(hisfile, "rb")
saved_history1 = pickle.load(fo2)
show_train_history(saved_history1, his_img_file)
```
## 在测试集上测试
```
smodel = load_model(modelpath)
print("test model: ", os.path.basename(modelpath))
loss, acc = smodel.evaluate(X_test, y_test)
print("test :acc: %.4f"%(acc))
```
| github_jupyter |
```
class Chain:
def __init__(self, cid):
self.cid = cid
self.res = []
return
def addres(self, res):
self.res.append(res)
return
def printchain(self):
print("chain: %s" % str(self.cid))
# for res in self.res:
# res.printme()
class Residue:
def __init__(self, resid):
self.resid = resid
self.cid = list(self.resid)[2]
self.atoms = []
return
def addatom(self, atom):
self.atoms.append(atom)
return
def printme(self):
print("residue: %s" % str(self.resid))
# for atom in self.atoms:
# atom.printline()
# return
class Atom:
def __init__(self, line):
self.resName = ""
self.xyz = (0, 0, 0)
self.name = ""
self.cid = ""
self.seq = 0
self.serial = 0
self.loadline(line)
self.resid = (self.resName, self.seq, self.cid)
return
def loadline(self, line):
self.serial = int(line[6:11])
self.name = line[12:16]
self.resName = line[17:20]
self.cid = line[21]
self.seq = int(line[22:26])
self.xyz = (float(line[30:38]), float(line[38:46]), float(line[46:54]))
return
def printline(self):
print("ATOM %5d %4s %3s %1s%4d %8.3f%8.3f%8.3f" % (self.serial, self.name, self.resName, self.cid,
self.seq, self.xyz[0], self.xyz[1], self.xyz[2]))
return
fname = '1fn3.pdb'
atomlines = [line for line in open(fname).readlines() if line[:6] == "ATOM " or line[:6] == "HETATM"]
atoms = []
for x in atomlines:
atoms.append(Atom(x))
residues = []
for atom in atoms:
# is this atom belong to an existing residue,
in_exisiting_residue = False
existing_res = None
for res in residues:
if atom.resid == res.resid:
existing_res = res
in_exisiting_residue = True
break
if in_exisiting_residue:
#if yes, add atom to that residue
existing_res.addatom(atom)
else:
# if not, create a new Residue object and add this atom to the new residue
new_res = Residue(atom.resid)
new_res.addatom(atom)
residues.append(new_res)
chains = []
for res in residues:
in_existing_chain = False
existing_chain = None
for chain in chains:
if res.cid == chain.cid:
existing_chain = chain
in_existing_chain = True
break
if in_existing_chain:
existing_chain.addres(res)
else:
new_chain = Chain(res.cid)
new_chain.addres(res)
chains.append(new_chain)
distances = []
for chain in chains:
for res in chain.res:
seq_skipped = False
current_seq = res.resid[1]
if current_seq != len(chain.res):
next_res = chain.res[current_seq]
if next_res.resid[1] != current_seq + 1:
seq_skipped = True
print('sequence skipped after', res.resid)
for atom1 in res.atoms:
if 'C ' in atom1.name:
C_coord = atom1.xyz
for atom2 in next_res.atoms:
if 'N ' in atom2.name:
N_coord = atom2.xyz
distance = ((C_coord[0] - N_coord[0])**2 + (C_coord[1] - N_coord[1])**2 + (C_coord[2] - N_coord[2])**2)**.5
distances.append(distance)
if seq_skipped == False:
print('no sequence skipped')
import matplotlib.pyplot as plt
import numpy as np
plt.hist(distances)
plt.title("Histogram")
plt.xlabel("distances")
plt.ylabel("frequency")
plt.show()
distances = []
for res in chains[0].res:
current_seq = res.resid[1]
if current_seq != len(chains[0].res):
next_res = chains[0].res[current_seq]
for atom1 in res.atoms:
if 'C ' in atom1.name:
cx = float(atom1.xyz[0])
cy = float(atom1.xyz[1])
cz = float(atom1.xyz[2])
for atom2 in next_res.atoms:
if 'N ' in atom2.name:
nx = float(atom2.xyz[0])
ny = float(atom2.xyz[1])
nz = float(atom2.xyz[2])
distance = ((cx - nx)**2 + (cy - ny)**2 + (cz - nz)**2)**.5
distances.append(distance)
print(len(chains[0].res))
print(len(distances))
import matplotlib.pyplot as plt
import numpy as np
plt.hist(distances)
plt.title("Histogram")
plt.xlabel("distances")
plt.ylabel("frequency")
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/krmiddlebrook/intro_to_deep_learning/blob/master/machine_learning/lesson%203%20-%20Neural%20Networks/intro-to-neural-networks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Intro to Neural Networks
<figure><img src='https://mk0analyticsindf35n9.kinstacdn.com/wp-content/uploads/2018/12/nural-network-banner.gif' width='70%'></img><figcaption>A Feed Forward Neural Network</figcaption>
</figure>
In the previous lesson, we introduced the softmax regression method to solve multi-class classification tasks, implementing a classifer to recognize 10 handwritten digits from the MNIST digits dataset.
We've come a long way and covered many concepts throughout this series, with each lesson building on the previous material. We've learned how to clean data, create linear models (via linear regression), coerce model outputs into a valid probability distribution (via logistic and softmax regression), train models using Sklearn and Tensorflow, apply the appropriate loss function, and to minimize it with respect to our model's parameters (via optimization algorithms). Now that we have a healthy understanding of these concepts in the context of simple linear models, we are ready to explore neural networks--one of the most exciting and successful methods in modern machine learning!
In this lesson, we describe deep linear neural networks at a high level, focusing on their structure, and demonstrate how to build one using Tensorflow.
To make this lesson more approachable, we don't cover every detail about neural networks here, but we aim to provide enough information for you to create your own neural networks and to inspire you to explore deep learning in more detail.
Lesson roadmap:
- High level introduction to *neural networks*.
- Building neural networks in Python - recreating the feed forward neural network model in 3Blue1Brown's excellent video [But what is a Neural Network? | Deep learning, chapter 1](https://www.youtube.com/watch?v=aircAruvnKk&t=436s) and training it to classify handwritten digits. As you will see, this simple feed forward neural network achieves impressive results.
## Neural Networks
Although neural networks only recently became popular, they've been around for quite some time. In fact, they first appeared in machine learning research way back in the late 1950s! But they didn't become popular until after 2012, when researchers built a neural network to classify different kinds of labeled images, achieving groundbreaking results (see [ImageNet Classification with Deep Convolutional
Neural Networks](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)). Since then, neural networks have become widely used in machine learning. Neural networks are successful, in part, because they can effectively learn representations of complex data (e.g., images, text, sound, tabular, etc.), especially given enough data and computing power.
At a high level, there are three fundemental types of neural networks: 1) encoders, 2) decoders, or 3) a combination of both. We will focus on *encoders*.
Encoder networks take in some input data (i.e., images, texts, sounds, etc.) and output *predictions*, just like the linear models we've been working with. The simplest type of neural networks are called feed forward neural networks (FFNNs), and they consist of many *layers* of *neurons* each *fully-connected* to those in the layer below (from which they receive input) and those above (which they, in turn, influence).
FFNNs may sound complex right now, but hang in there. In many ways FFNNs are the superpowered version of the linear models we already know about. Like the linear models we've discussed (linear/logistic/softmax regression), neural networks can be configured to solve different kinds of tasks: either *regression* or *classification*.
Here are some quick facts about neural networks:
- They are effective models for learning to represent complex data (like images, text, sound, tabular, etc.).
- Encoder-based networks, which take input data and output predictions, are probably the most common neural networks - they are useful for classification and regression tasks
- Feed forward neural networks (FFNNs) are the simplest type of neural network. They consist of many *layers* of *neurons* each *fully-connected* to those in the layer below (from which they receive input) and those above (which they, in turn, influence).
- FFNNs are like linear models on steriods. They have many more parameters than simple linear models, which enables them to learn more complex relationships from the input data.
- Even though FFNNs are the simplest kind of neural network, they can be very effective.
**Challenge:** What are two tasks that you think encoder networks might be at good at solving?
### Feed Forward Neural Networks
<figure><img src='https://thumbs.gfycat.com/WeepyConcreteGemsbok-size_restricted.gif' width='100%'></img><figcaption>A Feed Forward Neural Network | <em>Source: <a href='https://www.youtube.com/watch?v=aircAruvnKk&t=436s'>3Blue1Brown - But what is a Neural Network? Deep Learning Part 1</a></em></figcaption>
</figure>
Did you watch the [3Blue1Brown video on neural networks](https://www.youtube.com/watch?v=aircAruvnKk&t=436s)? If you haven't yet, I highly recommend checking it out (feel free to rewatch it too, it's a great overview of neural networks). I'll frequently be referencing important concepts that the video talks about.
In the following sections, we will summarize the key concepts behind neural networks. First, we describe the motivation and inspiration behind neural networks. Then, we dive into the structure of neural networks, outlining a few critical pieces that make them work.
*Note, we describe these concepts from the perspective of a feed forward neural network. That said, the fundemental ideas discussed generalize to almost every type of neural network.*
#### Neural Networks: Neural Network $=$ Brain?
<figure><img src='https://github.com/BreakoutMentors/Data-Science-and-Machine-Learning/blob/master/images/neural-network-brain-pizza-yoda-analogy.png?raw=true' width='75%'></img><figcaption>"Pizza, I like" - Yoda</figcaption>
</figure>
No, neural networks $\neq$ brains.
While neural networks don't actually operate like brains, they were inspired by them.
Let's consider an extremely oversimplified version of the brain. The brain is an organ that uses neurons to process information and make decisions. The neurons are what the brain uses to process data (i.e., information about the world). When some piece of data is sent to a neuron it activates (or dpesn't). The magnitude/strength (i.e., positive or negative) of the activation triggers other groups of neurons to activate (or not). Eventually, this process outputs a decision--based on a combination of the prior triggers and activations--as a response to the input data. As an example, let's say there is a pizza in the kitchen and my nose picks up the scent. The smell of freshly baked dough and melted cheese activates my "I'm hungry neurons". Eventually, I can't ignore these neurons any longer, so I run to the kitchen and eat some pizza.
#### Neural Networks: Neurons
<figure><img src='https://github.com/BreakoutMentors/Data-Science-and-Machine-Learning/blob/master/images/neuron-3blue1brown.png?raw=true' width='75%'></img><figcaption>Neural Networks: Neuron | <em>Source: <a href='https://www.youtube.com/watch?v=aircAruvnKk&t=436s'>3Blue1Brown - But what is a Neural Network? Deep Learning Part 1</a></em></figcaption>
</figure>
**Neurons** are at the core of neural networks (after all, they are practically in the name). At a high level, a neuron holds a corresponding value (i.e., number) called an **activation**. The activation can be represented by a tiny value, a large value, or a value somewhere in between. A neuron is "lit up" (i.e., activated) when its corresponding activation is large, and it is "dim" (i.e., not very activated) when its activation is small. Connecting this to the pizza example, my "I'm hungry neurons" lit up after I smelled the pizza in the kitchen.
#### Neural Networks: Layers
<figure><img src='https://miro.medium.com/max/1280/1*_nTmA2RowzQBCqI9BVtmEQ.gif' width='75%'></img><figcaption>The Neural Network's Secret Sauce: Stacking Layers | <em>Source: <a href='https://www.youtube.com/watch?v=aircAruvnKk&t=436s'>3Blue1Brown - But what is a Neural Network? Deep Learning Part 1</a></em></figcaption>
</figure>
The secret sauce driving neural networks is the technique of *stacking layers*. At a high level, this method enables the neural network to learn an effective representation of the data. The layers that are in between the input layer and the output layer are called *hidden layers*.
A **layer** is composed of a set of **neurons**. We can manually configure the number of neurons we want to have in each layer, except for in the first and last ones. When we add more neurons and layers to the model, we add more parameters (weights and biases) to it. As a result, larger models (models with many parameters) can be computationally expensive, but very effective. This creates a trade-off between computation effeciency and model representation ability (making smaller models as effective as bigger ones is an active area of research).
For classification tasks, the number of neurons in the last layer is determined by the number of categories/classes in the dataset. While in regression tasks, there is generally only one neurons in the final layer, since we are predicting a continuous value (e.g., the happiness score for a particular country).
**Challenge:** In the above figure (from previous cell), how many layers are in the neural network? How many are hidden layers? How many neurons are in the first layer? How many are in the last layer?
#### Neural Networks: Weights & Activation Functions
<figure><img src='https://thumbs.gfycat.com/BabyishGeneralFruitfly-size_restricted.gif' width='65%'></img><figcaption>Calculating a Neuron's Activation: Connections and Weights (1) | <em>Source: <a href='https://www.youtube.com/watch?v=aircAruvnKk&t=436s'>3Blue1Brown - But what is a Neural Network? Deep Learning Part 1</a></em></figcaption>
</figure>
<figure><img src='https://thumbs.gfycat.com/GlitteringCavernousGoosefish-small.gif' width='65%'></img><figcaption>Calculating a Neuron's Activation: Connections and Weights (2)| <em>Source: <a href='https://www.youtube.com/watch?v=aircAruvnKk&t=436s'>3Blue1Brown - But what is a Neural Network? Deep Learning Part 1</a></em></figcaption>
</figure>
Neural networks pass information through the network using connections between pairs of neurons in adjacent layers. Each connection has a corresponding **weight** parameter that is learned during the model training phase. As shown in the figure above, the activation of a neuron in a subsequent layer is determined by the *weighted sum* of the weights and activations of the neurons in the previous layer (i.e., connections). A **bias** term is added at the end of the weighted sum to control how large/small a neuron's weighted sum must be to activate. Before the neuron receives a final activation value, the weighted sum is *squeezed* by an **activation function**.
Activation functions and parameters (weights and biases)may sound intimidating. Fortunately, you already know a lot about these concepts: 1) the *sigmoid* and *softmax* logit functions are examples of activation functions, 2) linear models (linear/logistic/softmax) use the same *weighted sum* method to activate neurons in subsequent layers, the difference is these networks only have one layer after the input.
As you may remember from the logistic and softmax lessons, these logit functions convert the inputs to a valid probability space. An activation function, more generally, can be defined as any function that transforms the neuron output. It is common to choose an activation function that normalizes the input between 0 and 1 or -1 and 1.
Activation functions play a critical role in building effective deep neural networks. They can help the network converge quickly (find the right parameters) and improve the model's overall performance.
In the diagrams above, the second layer has one neuron. This neuron is connected to every other neuron in the previous layer. Consequently, it has 784 connections plus one bias term. That's a lot of number crunching! For this reason, we generally select activation functions that can be computed effeciently (quickly).
<figure><img src='https://github.com/BreakoutMentors/Data-Science-and-Machine-Learning/blob/master/images/sigmoid-activation-3Blue1Brown.png?raw=true' width='65%'></img><figcaption>Calculating a Neuron's Activation: Sigmoid Activation Function (2)| <em>Source: <a href='https://www.youtube.com/watch?v=aircAruvnKk&t=436s'>3Blue1Brown - But what is a Neural Network? Deep Learning Part 1</a></em></figcaption>
</figure>
So far we've only discussed connections and activations in the context of one neuron in a subsequent layer. But, most layers have many neurons. The good news is, we calculate neuron activations in the same way as before. The bad news is, we have to repeat the calculation process many times over. For example, in the diagrams below we see that all 16 neurons in the 2nd layer are connected to every other neuron in the 1st layer (i.e., 784 neurons). Thus, we need to perform $784\times16$ weights $ + 16$ biases calculations to get the activations for the 16 neurons in the 2nd layer. Doing this by hand would be way too difficult, but luckily, we can make computers do most of the heavy lifting.
<figure><img src='https://github.com/BreakoutMentors/Data-Science-and-Machine-Learning/blob/master/images/2-layer-weights-biases-connections-3Blue1Brown.png?raw=true' width='65%'></img><figcaption>Calculating a Neuron's Activation: Sigmoid Activation Function (2) | <em>Source: <a href='https://www.youtube.com/watch?v=aircAruvnKk&t=436s'>3Blue1Brown - But what is a Neural Network? Deep Learning Part 1</a></em></figcaption>
</figure>
**Challenge:** In the below neural network diagram, how many weights and biases are there between the 2nd layer and the 3rd layer? How many total weights and biases are there in the entire network? Hint: all neurons are connected to every other neuron in the previous layer.
<figure><img src='https://thumbs.gfycat.com/DeadlyDeafeningAtlanticblackgoby-poster.jpg' width='65%'></img><figcaption>A Neural Network: Total Weights & Biases | <em>Source: <a href='https://www.youtube.com/watch?v=aircAruvnKk&t=436s'>3Blue1Brown - But what is a Neural Network? Deep Learning Part 1</a></em></figcaption>
</figure>
## Building a Neural Network: Summary
Now that we know a little about neural networks, it's time to make our own! In this section, we demonstrate how to build a neural network in Python using Tensorflow. Specifically, we implement the neural network from 3Blue1Brown's video [But what is a Neural Network? Deep Learning Part 1](https://www.youtube.com/watch?v=aircAruvnKk&t=436s) to classify 10 types of handwritten digits from the MNIST dataset. Before we start, let's summarize what we know so far about neural networks:
- *Stacking layers* is their secret sauce - enabling the model to learn an effective representations of the data (most of the time).
- Layers are comprised of *neurons*. We configure the number of neurons in *hidden layers*.
- Neurons hold a corresponding *activation* - large activations "light up" neurons.
- The activations of neurons are determined by the weighted sum of their *connections* with the previous layer's neurons - quantified by *weights* and a *bias* term. The resulting output is then squeezed by an *activation function* such as the *sigmoid* function.
- For classification tasks, the number of neurons in the last layer corresponds to the number of classes/categories in the dataset.
Now, it's time to make our first neural network!
### Classification of Handwritten Digits with a Feed Forward Neural Network
<figure><img src='https://thumbs.gfycat.com/ViciousUnnaturalAmethystsunbird-max-1mb.gif' width='75%'></img><figcaption>A Neural Network: Total Weights & Biases | <em>Source: <a href='https://www.youtube.com/watch?v=aircAruvnKk&t=436s'>3Blue1Brown - But what is a Neural Network? Deep Learning Part 1</a></em></figcaption>
</figure>
In this section, we will recreate the feed forward neural network (FFNN) from 3Blue1Brown's video [But what is a Neural Network? Deep Learning Part 1](https://www.youtube.com/watch?v=aircAruvnKk&t=436s) and use it to classify handwritten digits from the MNIST dataset. This process involves several steps: 1) [loading the dataset](#-Step-1:-Loading-the-Dataset), 2) [building the model](#-Step-2:-Building-the-Model), 3) [training the model](#-Step-3:-Training-the-Model), 4) [testing the model](#-Step-4:-Testing-the-Model).
### Prerequisites: Google Colab + building neural networks in python
We recommend that you run this this notebook in the cloud on Google Colab, if you're not already doing so. It's the simplest way to get started. Google Colab gives you free access to specialized compute resources called [GPUs](https://en.wikipedia.org/wiki/Graphics_processing_unit) and [TPUs](https://en.wikipedia.org/wiki/Tensor_processing_unit). In modern machine learning these resources are frequently used because they significantly speed up model training compared to using [CPUs](https://en.wikipedia.org/wiki/Central_processing_unit) (your computer is probably using CPUs). At a high level, GPUs and TPUs are special types of computer chips that excel at performing computations on large matrices. They perform mathematical matrix operations like multiplication, addition, subtraction, etc. at a much higher rate (i.e., speed) than CPUs.
Native Python code won't run on GPUs and TPUs because they use specialized operating system *kernels*. We could convert our code to a language that these kernals can understand, but that would be a very tedious and frustrating process. Fortunately, there are several open-source Python libraries exist that do the heavy lifting for us. In particular, the two most popular open-source libraries are [PyTorch](https://pytorch.org/) and [Tensorflow](https://www.tensorflow.org/). These libraries enable us to build custom neural networks in Python that can run on GPUs and TPUs!
In this lesson we will use Tensorflow because it is a bit easier to use (while you are learning about neural networks) and it comes preinstalled in Google Colab. It is also possible to [install TensorFlow locally](https://www.tensorflow.org/install/). But, the simple solution is normally best (i.e., use Google Colab).
[tf.keras](https://www.tensorflow.org/guide/keras) is the simplest way to build and train neural network models in TensorFlow, so we will use it throughout this lessons.
Note that there's [tf.keras](https://www.tensorflow.org/guide/keras) (comes with TensorFlow) and there's [Keras](https://keras.io/) (standalone). You should be using [tf.keras](https://www.tensorflow.org/guide/keras) because 1) it comes with TensorFlow so you don't need to install anything extra and 2) it comes with powerful TensorFlow-specific features.
Lastly, to accelerate model training time, you may want to run this notebook on a GPU in Google Colab. To do this, click on the "Runtime" tab in the top left corner of the notebook, click "Change runtime type", and select the "GPU" option under "Hardware accelerator".
```
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Input, Flatten, Dense
# Commonly used modules
import numpy as np
import os
import sys
# Images, plots, display, and visualization
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import cv2
import IPython
from six.moves import urllib
print('Tensorflow version:', tf.__version__)
```
### Step 1: Loading the Dataset
The MNIST dataset contains 70k grayscale images of handwritten digits at a resolution of $28 \times 28$ pixels. Our goal is to build a classification model to take one of these images as input and predict the most likely digit contained in the image (along with a relative confidence about the prediction):
<figure><img src="https://i.imgur.com/ITrm9x4.png" width="65%"><figcaption><em>Source: <a href="https://deeplearning.mit.edu/">MIT Deep Learning</a></em></figcaption></figure>
Loading the dataset will return four NumPy arrays:
* The `train_images` and `train_labels` arrays are the *training set*—the data the model uses to learn.
* The `test_images` and `test_labels` arrays are the *test set*--the data the model is tested on.
The images are $28\times28$ NumPy arrays (i.e., the x variables), with pixel values ranging between 0 and 255. The *labels* (i.e., y variable) are an array of integers, ranging from 0 to 9. We will use *one-hot encoding* (the technique we learned about in the logistic regression lesson) to convert these labels to vectors (i.e., arrays with mostly 0s and a 1 at the index that corresponds to the data sample's digit category). We also need to *normalize* the input images by subtracting the mean and standard deviation of the pixels. Normalizing the data encourages our model to learn more generalizable features and helps it perform better on outside data. The final data processing step is "flattening" the $28\times28$ image pixel matrices into $784 \times 1$ arrays. We reshape the image matrices into arrays because our model expects the input to be a tensor with $784$ features.
Now, let's load the data!
```
# Model / data parameters
num_classes = 10
input_shape = (-1, 28*28) # this will be used to reshape the 28x28 image pixel matrices into 784 pixel vectors
# the data, split between train and test sets
(train_images, train_labels), (test_images, test_labels) = keras.datasets.mnist.load_data()
# Normalize train/test images
train_images = train_images.astype("float32")
test_images = test_images.astype("float32")
mean = train_images.mean()
std = train_images.std()
train_images -= mean
train_images /= std
test_images -= mean
test_images /= std
print(f'normalized images mean and std pixel values: {round(train_images.mean(), 4)}, {round(train_images.std(), 4)}')
# Flatten the images.
train_images = train_images.reshape(input_shape)
test_images = test_images.reshape(input_shape)
print("train_images shape:", train_images.shape)
print(train_images.shape[0], "train samples")
print(test_images.shape[0], "test samples")
# convert class vectors to binary class matrices (i.e., "one-hot encode" the y labels)
train_labels = keras.utils.to_categorical(train_labels, num_classes)
test_labels = keras.utils.to_categorical(test_labels, num_classes)
```
Let's display the first 5 images from the *training set* and display the class name below each image.
```
plt.figure(figsize=(10,2))
for i in range(5):
plt.subplot(1,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i].reshape(28, 28), cmap=plt.cm.binary)
plt.xlabel(np.argmax(train_labels[i]))
```
#### Step 2: Building the Model
Remember that the secret sauce of neural networks is *stacking layers*? In code, we take advantage of this secret sauce by constructing several layers and combining them to create a neural network model. Building the model is a two step process that involves 1) stacking layers together using `keras.Sequential`, 2) configuring the loss function, optimizer, and metrics to monitor the model using the keras `compile` method. Loss functions, optimizers, and metrics aren't formally discussed in this lesson. Don't worry too much about them for now. They will be described in detail in a future lesson. The goal of this lesson is to introduce the underlying structure of neural networks, and demonstrate how to build/train/test one in Python. Nonetheless, a quick summary about loss functions, optimizers, and metrics can't hurt:
* **Loss function** - measures how accurate the model is during training, we want to minimize the value this function returns using an optimization method.
* **Optimizer** - defines the optimization method to use to update the model's weights based on the data it sees and its loss function.
* **Metrics** - monitors the model using a set of user-defined metrics; metrics are calculated at the end of every train and test cycle.
**Building the Model - Step 1: Stacking Layers with `keras.Sequential`**
The [3Blue1Brown video](https://www.youtube.com/watch?v=aircAruvnKk&t=436s) used a feed forward neural network with 2 hidden layers to classify handwritten digits. To recreate this neural network, first we need to build a model that 1) takes 784 image pixel feature vectors as input, 2) has 2 hidden layers with 16 neurons and the sigmoid activation function, and 3) includes a final layer with 10 neurons (i.e., there are 10 digit classes so we need 10 neurons) and the *softmax* activation function. The softmax activation function normalizes the activations for the output neurons such that:
- every activation is between 0 and 1
- the sum of all activations is 1
Notice that the softmax activation is similar to the sigmoid activation--neuron activations are squeezed between 0 and 1. Softmax differs from sigmoid by constraining the sum of all activations to 1. For multi-class classification problems, where multiple categories/classes are present in the y variable, it is common to use the softmax activation (or a varient) in the final layer. This is because the softmax activation enables us to treat the final neuron activations as confidence values (i.e., probabilities). The neuron with the largest activation is selected as the category/class prediction.
Let's see what this looks like in Python code.
```
# step 1: stack model layers using keras.Sequential
model = keras.Sequential([
Input(shape=input_shape[1]), # each input is a 784 feature pixel vector
Dense(16, activation="relu"), # hidden layer 1
Dense(16, activation='relu'), # hidden layer 2
Dense(num_classes, activation="softmax"), # final output layer
])
# print the model summary to see its structure and the number of parameters (i.e., weights and biases)
print(model.summary())
```
**Building the Model - Step 2: Configuring the Loss Function, Optimizer, & Metrics with the Keras `compile` Method**
The model structure is defined in step 1, so most of the building process is finished. But, we still need to configure the model's loss function, optimizer, and metrics using the keras `compile` method. We will use binary cross entropy (BCE) for the loss function, the Adam optimization method, and monitor accuracy, precision, and recall of the model.
```
# step 2: configure the loss function, optimizer, and model metrics
model.compile(loss="categorical_crossentropy", # BCE loss
optimizer="adam", # Adam optimization
metrics=["accuracy", keras.metrics.Precision(), keras.metrics.Recall()] # monitor metrics
)
```
Now, we can train the model!
#### Step 3: Training the Model
Training the neural network model requires the following steps:
1. Feed the training data to the model—in this example, the `train_images` and `train_labels` arrays.
2. The model learns to associate images and labels.
3. We ask the model to make predictions on a test set—in this example, the `test_images` array. We verify that the predictions match the labels from the `test_labels` array.
We call the `model.fit` method to train the model—the model is "fit" to the training data:
```
# fit the model to the data; train for 20 epochs, use batch size 128, and 10% of the training data
# for validation to model performance during training, and configure the early stopping callback
batch_size = 128
epochs = 20
history = model.fit(train_images,
train_labels,
batch_size=batch_size,
epochs=epochs,
validation_split=0.1)
```
**Challenge:** As the model is trained, the loss and metrics are displayed. What is the final precision score on the training data?
Now that we finished training, let's view the results. We'll use the Pandas library to store the training history in a dataframe.
```
# store the training history in a dataframe
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
# see what the hist dataframe looks like
hist
```
Now, let's plot the loss function measure on the training and validation sets. The validation set is used to prevent overfitting ([learn more about it here](https://www.tensorflow.org/tutorials/keras/overfit_and_underfit)). However, because our network is small, the training converges (i.e., reaches an optimal loss value) without noticeably overfitting the data as the plot shows.
```
def plot_loss():
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.plot(hist['epoch'], hist['loss'], label='Train Error')
plt.plot(hist['epoch'], hist['val_loss'], label = 'Val Error')
plt.legend()
plt.ylim([0,max(hist['loss'].max()+0.2, hist['val_loss'].max()+0.2)])
plot_loss()
```
Now, let's plot accuracy metric on the training and validation set. Similar to the loss metric, we expect the validation accuracy to be a bit lower than the training accuracy. If the validation accuracy is noticeably different than the training one, we might want to do some more analysis. When the validation accuracy is much lower than the training accuracy, the model could be overfitting. When the it is much higher than the training accuracy, the model could be underfitting (this happens less often). However, the plot suggests the model is not overfitting/underfitting the data.
```
def plot_accuracy():
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.plot(hist['epoch'], hist['accuracy'], label='Train Accuracy')
plt.plot(hist['epoch'], hist['val_accuracy'], label = 'Val Accuracy')
plt.legend()
plt.ylim([0,1])
plot_accuracy()
```
#### Step 4: Testing the Model
Our results on the training and validation data look promising, but we want to know whether our model performs well on unknown data. For this, we compare how the model performs on the test dataset:
```
print(test_images.shape)
test_loss, test_acc, test_prec, test_rec = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
```
Let's take a look at a few sample that the network classified incorrectly.
```
predictions = np.argmax(model.predict(test_images, batch_size=32), axis=-1)
is_correct = predictions == np.argmax(test_labels, axis=-1)
misclassified_indices = np.argwhere(is_correct == False)
def plot_misclassified(imgs, labels, preds, misclassified_indices, n=5):
plt.figure(figsize=(10,3))
for i, idx in enumerate(misclassified_indices[:n]):
plt.subplot(1, n, i+1)
plt.xticks([])
plt.yticks([])
plt.imshow(imgs[idx].reshape(28, 28), cmap=plt.cm.binary)
plt.xlabel(f'True: {np.argmax(labels[idx])}, Pred: {preds[idx][0]}')
plot_misclassified(test_images, test_labels, predictions, misclassified_indices)
```
We can see that some of these digits are hard to recognize, even for a human!
Often times, the accuracy on the test dataset is a little less than the accuracy on the training dataset. Small differences are ok, but we don't want the test results to differ significantly from the training results--this suggests the model is overfitting/underfitting.
**Challenge:** Do you think the difference between the training accuracy and the testing accuracy is significant? Is the model overfitting? Is it underfitting? Are the misclassify images justifiably misclassified (i.e., does it make sense that the model misclassified them)?
## Recap
You made it! We covered a lot of material in this lesson. Don't worry if it doesn't all make sense yet. The concepts will become more intuitive as you practice building, training, and testing your own neural network models.
Let's summarize what we learned about neural networks:
- Neural Networks are popular and successful machine learning models that can learn effective representations from data (i.e., images, text, sound). They can and *classification* tasks (see [Part 2](#-Part-2:-Classification-of-MNIST-Digits-with-Convolutional-Neural-Networks)), and also to generate images, text, videos, and sound.
- Special libraries like Tensorflow and Pytorch enable us to build neural networks in Python and train them on accelerated hardware like GPUs and TPUs.
- Several steps are involved in making an effective neural network:
1. Loading the dataset
2. Building the model--stacking several layers and configuring the loss function, optimizer, and metrics.
3. Training the model--fitting the model on the training data.
4. Evaluating/Testing the model--evaluating the model on the testing data.
- Once a model is trained, it can be used to make predictions on outside data (see [Part 2, Step 5](#-Step-5:-Make-predictions-on-outside-data)).
#### Acknowlegements
- [MIT Deep Learning Basics](https://www.youtube.com/watch?v=O5xeyoRL95U&list=PLrAXtmErZgOeiKm4sgNOknGvNjby9efdf)
- [Dive into Deep Learning](https://d2l.ai/index.html)
```
```
| github_jupyter |
```
#Packages
import pandas as pd
import numpy as np
import os
import pickle
import random
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV,RandomizedSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, precision_score, recall_score
from sklearn.model_selection import cross_val_score
from sklearn.compose import ColumnTransformer
import warnings
warnings.filterwarnings("ignore")
#Environment variables
DATASET_PATH=os.getenv('DATASET_PATH')
MODEL_PATH=os.getenv('MODEL_PATH')
METRICS_PATH=os.getenv('METRICS_PATH')
#Import data
df = pd.read_csv(DATASET_PATH)
print(df.shape)
df.head()
#Check NaN values
df.isnull().sum()
#Treating the data and dropping irrelevant columns
df=df.drop(['product_id','seller_id','creation_date','order_counts'],axis=1)
df=df.dropna()
df.shape
#Split data for training and test
X = df.drop(['category'],axis=1)
y = df['category']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.33)
cv = StratifiedKFold(n_splits=5)
print(X_train.shape)
print(y_train.shape)
#Select best parameters for each model tested
def choose_model(pip,params,cv,X,y,seed):
CV = RandomizedSearchCV(estimator=pip,
param_distributions=params,
cv=cv,
n_jobs= -1,
scoring='roc_auc',
random_state=seed)
CV.fit(X_train, y_train)
best_choice = CV.best_estimator_
print('Best choice:','\n',best_choice)
return best_choice
#Treat text data separately
preprocess = ColumnTransformer(
[('query_countvec', CountVectorizer(), 'query'),
('title_countvec', CountVectorizer(), 'title'),
('concatenated_tags_tfidf', TfidfVectorizer(ngram_range=(1,3)), 'concatenated_tags')],
remainder='passthrough')
#Logistic regression model
pipeline = Pipeline([
('union',preprocess),
('scaler', StandardScaler(with_mean=False)),
('clf',LogisticRegression(random_state=42))
])
params = [
{
"clf__penalty": ['l2', 'l1'],
"clf__C": np.logspace(0, 2, 10),
"clf__solver":['newton-cg','saga', 'liblinear']
}
]
logreg_result = choose_model(pip=pipeline,params=params,cv=cv,X=X_train,y=y_train,seed=42)
acc = cross_val_score(logreg_result, X_train, y_train, cv=cv,scoring='accuracy')
f1 = cross_val_score(logreg_result, X_train, y_train, cv=cv,scoring='f1_weighted')
print("%f accuracy with std of %f, and f1-score of %f with std of %f." % (acc.mean(), acc.std(),f1.mean(),f1.std()))
#Random forest model
pipeline = Pipeline([
('union',preprocess),
('scaler', StandardScaler(with_mean=False)),
('clf',RandomForestClassifier(random_state=42))])
params = [
{
"clf__n_estimators": [10, 50, 100, 1000],
"clf__max_depth":[5, 10, 15, 25, 50, None],
"clf__min_samples_leaf":[1, 2, 5, 10, 15, 100],
"clf__max_leaf_nodes": [2, 3,5, 15,20]
}
]
rf_result = choose_model(pip=pipeline,params=params,cv=cv,X=X_train,y=y_train,seed=42)
acc = cross_val_score(rf_result, X_train, y_train, cv=cv,scoring='accuracy')
f1 = cross_val_score(rf_result, X_train, y_train, cv=cv,scoring='f1_weighted')
print("%f accuracy with std of %f, and f1-score of %f with std of %f." % (acc.mean(), acc.std(),f1.mean(),f1.std()))
#Check metrics and choose final model
pipelines = [logreg_result,rf_result]
result = pd.DataFrame(columns=['Model','train_acc','test_acc','train_prec','test_prec',
'train_rec','test_rec','train_f1','test_f1','train_auc','test_auc'])
for p in pipelines:
model = p['clf'].__class__.__name__
tracc = accuracy_score(y_train, p.predict(X_train))
teacc = accuracy_score(y_test, p.predict(X_test))
trprec = precision_score(y_train, p.predict(X_train),average='weighted')
teprec = precision_score(y_test, p.predict(X_test), average='weighted')
trrec = recall_score(y_train, p.predict(X_train), average='weighted')
terec = recall_score(y_test, p.predict(X_test), average='weighted')
trf1 = f1_score(y_train, p.predict(X_train), average='weighted')
tef1 = f1_score(y_test, p.predict(X_test), average='weighted')
trauc = roc_auc_score(y_train, p.predict_proba(X_train), multi_class="ovo")
teauc = roc_auc_score(y_test, p.predict_proba(X_test), multi_class="ovo")
result = result.append({'Model':model,
'train_acc':tracc,
'test_acc':teacc,
'train_prec':trprec,
'test_prec':teprec,
'train_rec':trrec,
'test_rec':terec,
'train_f1':trf1,
'test_f1':tef1,
'train_auc':trauc,
'test_auc':teauc},ignore_index=True)
result
#Save best model
model=logreg_result
filepath=MODEL_PATH
pickle.dump(model,open(filepath,'wb'))
#Save metrics
pd.DataFrame(result.loc[0]).to_csv(METRICS_PATH)
```
| github_jupyter |
# Building your Deep Neural Network: Step by Step
Welcome to your week 4 assignment (part 1 of 2)! You have previously trained a 2-layer Neural Network (with a single hidden layer). This week, you will build a deep neural network, with as many layers as you want!
- In this notebook, you will implement all the functions required to build a deep neural network.
- In the next assignment, you will use these functions to build a deep neural network for image classification.
**After this assignment you will be able to:**
- Use non-linear units like ReLU to improve your model
- Build a deeper neural network (with more than 1 hidden layer)
- Implement an easy-to-use neural network class
**Notation**:
- Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer.
- Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters.
- Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example.
- Example: $x^{(i)}$ is the $i^{th}$ training example.
- Lowerscript $i$ denotes the $i^{th}$ entry of a vector.
- Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations).
Let's get started!
### <font color='darkblue'> Updates to Assignment <font>
#### If you were working on a previous version
* The current notebook filename is version "4a".
* You can find your work in the file directory as version "4".
* To see the file directory, click on the Coursera logo at the top left of the notebook.
#### List of Updates
* compute_cost unit test now includes tests for Y = 0 as well as Y = 1. This catches a possible bug before students get graded.
* linear_backward unit test now has a more complete unit test that catches a possible bug before students get graded.
## 1 - Packages
Let's first import all the packages that you will need during this assignment.
- [numpy](www.numpy.org) is the main package for scientific computing with Python.
- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.
- dnn_utils provides some necessary functions for this notebook.
- testCases provides some test cases to assess the correctness of your functions
- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed.
```
import numpy as np
import h5py
import matplotlib.pyplot as plt
from testCases_v4a import *
from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
```
## 2 - Outline of the Assignment
To build your neural network, you will be implementing several "helper functions". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will:
- Initialize the parameters for a two-layer network and for an $L$-layer neural network.
- Implement the forward propagation module (shown in purple in the figure below).
- Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$).
- We give you the ACTIVATION function (relu/sigmoid).
- Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function.
- Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function.
- Compute the loss.
- Implement the backward propagation module (denoted in red in the figure below).
- Complete the LINEAR part of a layer's backward propagation step.
- We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward)
- Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function.
- Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function
- Finally update the parameters.
<img src="images/final outline.png" style="width:800px;height:500px;">
<caption><center> **Figure 1**</center></caption><br>
**Note** that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps.
## 3 - Initialization
You will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers.
### 3.1 - 2-layer Neural Network
**Exercise**: Create and initialize the parameters of the 2-layer neural network.
**Instructions**:
- The model's structure is: *LINEAR -> RELU -> LINEAR -> SIGMOID*.
- Use random initialization for the weight matrices. Use `np.random.randn(shape)*0.01` with the correct shape.
- Use zero initialization for the biases. Use `np.zeros(shape)`.
```
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
parameters -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(1)
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h,n_x) * 0.01
b1 = np.zeros(shape=(n_h,1))
W2 = np.random.randn(n_y,n_h) * 0.01
b2 = np.zeros(shape=(n_y,1))
### END CODE HERE ###
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
parameters = initialize_parameters(3,2,1)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
**Expected output**:
<table style="width:80%">
<tr>
<td> **W1** </td>
<td> [[ 0.01624345 -0.00611756 -0.00528172]
[-0.01072969 0.00865408 -0.02301539]] </td>
</tr>
<tr>
<td> **b1**</td>
<td>[[ 0.]
[ 0.]]</td>
</tr>
<tr>
<td>**W2**</td>
<td> [[ 0.01744812 -0.00761207]]</td>
</tr>
<tr>
<td> **b2** </td>
<td> [[ 0.]] </td>
</tr>
</table>
### 3.2 - L-layer Neural Network
The initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the `initialize_parameters_deep`, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then:
<table style="width:100%">
<tr>
<td> </td>
<td> **Shape of W** </td>
<td> **Shape of b** </td>
<td> **Activation** </td>
<td> **Shape of Activation** </td>
<tr>
<tr>
<td> **Layer 1** </td>
<td> $(n^{[1]},12288)$ </td>
<td> $(n^{[1]},1)$ </td>
<td> $Z^{[1]} = W^{[1]} X + b^{[1]} $ </td>
<td> $(n^{[1]},209)$ </td>
<tr>
<tr>
<td> **Layer 2** </td>
<td> $(n^{[2]}, n^{[1]})$ </td>
<td> $(n^{[2]},1)$ </td>
<td>$Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ </td>
<td> $(n^{[2]}, 209)$ </td>
<tr>
<tr>
<td> $\vdots$ </td>
<td> $\vdots$ </td>
<td> $\vdots$ </td>
<td> $\vdots$</td>
<td> $\vdots$ </td>
<tr>
<tr>
<td> **Layer L-1** </td>
<td> $(n^{[L-1]}, n^{[L-2]})$ </td>
<td> $(n^{[L-1]}, 1)$ </td>
<td>$Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ </td>
<td> $(n^{[L-1]}, 209)$ </td>
<tr>
<tr>
<td> **Layer L** </td>
<td> $(n^{[L]}, n^{[L-1]})$ </td>
<td> $(n^{[L]}, 1)$ </td>
<td> $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$</td>
<td> $(n^{[L]}, 209)$ </td>
<tr>
</table>
Remember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if:
$$ W = \begin{bmatrix}
j & k & l\\
m & n & o \\
p & q & r
\end{bmatrix}\;\;\; X = \begin{bmatrix}
a & b & c\\
d & e & f \\
g & h & i
\end{bmatrix} \;\;\; b =\begin{bmatrix}
s \\
t \\
u
\end{bmatrix}\tag{2}$$
Then $WX + b$ will be:
$$ WX + b = \begin{bmatrix}
(ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\\
(ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\\
(pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u
\end{bmatrix}\tag{3} $$
**Exercise**: Implement initialization for an L-layer Neural Network.
**Instructions**:
- The model's structure is *[LINEAR -> RELU] $ \times$ (L-1) -> LINEAR -> SIGMOID*. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function.
- Use random initialization for the weight matrices. Use `np.random.randn(shape) * 0.01`.
- Use zeros initialization for the biases. Use `np.zeros(shape)`.
- We will store $n^{[l]}$, the number of units in different layers, in a variable `layer_dims`. For example, the `layer_dims` for the "Planar Data classification model" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. This means `W1`'s shape was (4,2), `b1` was (4,1), `W2` was (1,4) and `b2` was (1,1). Now you will generalize this to $L$ layers!
- Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network).
```python
if L == 1:
parameters["W" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01
parameters["b" + str(L)] = np.zeros((layer_dims[1], 1))
```
```
# GRADED FUNCTION: initialize_parameters_deep
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l],1))
### END CODE HERE ###
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
parameters = initialize_parameters_deep([5,4,3])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
**Expected output**:
<table style="width:80%">
<tr>
<td> **W1** </td>
<td>[[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388]
[-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218]
[-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034]
[-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]</td>
</tr>
<tr>
<td>**b1** </td>
<td>[[ 0.]
[ 0.]
[ 0.]
[ 0.]]</td>
</tr>
<tr>
<td>**W2** </td>
<td>[[-0.01185047 -0.0020565 0.01486148 0.00236716]
[-0.01023785 -0.00712993 0.00625245 -0.00160513]
[-0.00768836 -0.00230031 0.00745056 0.01976111]]</td>
</tr>
<tr>
<td>**b2** </td>
<td>[[ 0.]
[ 0.]
[ 0.]]</td>
</tr>
</table>
## 4 - Forward propagation module
### 4.1 - Linear Forward
Now that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order:
- LINEAR
- LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid.
- [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID (whole model)
The linear forward module (vectorized over all the examples) computes the following equations:
$$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\tag{4}$$
where $A^{[0]} = X$.
**Exercise**: Build the linear part of forward propagation.
**Reminder**:
The mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find `np.dot()` useful. If your dimensions don't match, printing `W.shape` may help.
```
# GRADED FUNCTION: linear_forward
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python tuple containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
### START CODE HERE ### (≈ 1 line of code)
Z = (np.dot(W,A) + b)
### END CODE HERE ###
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
A, W, b = linear_forward_test_case()
Z, linear_cache = linear_forward(A, W, b)
print("Z = " + str(Z))
```
**Expected output**:
<table style="width:35%">
<tr>
<td> **Z** </td>
<td> [[ 3.26295337 -1.23429987]] </td>
</tr>
</table>
### 4.2 - Linear-Activation Forward
In this notebook, you will use two activation functions:
- **Sigmoid**: $\sigma(Z) = \sigma(W A + b) = \frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the `sigmoid` function. This function returns **two** items: the activation value "`a`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call:
``` python
A, activation_cache = sigmoid(Z)
```
- **ReLU**: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the `relu` function. This function returns **two** items: the activation value "`A`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call:
``` python
A, activation_cache = relu(Z)
```
For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step.
**Exercise**: Implement the forward propagation of the *LINEAR->ACTIVATION* layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation "g" can be sigmoid() or relu(). Use linear_forward() and the correct activation function.
```
# GRADED FUNCTION: linear_activation_forward
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python tuple containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev,W,b)
A, activation_cache = sigmoid(Z)
### END CODE HERE ###
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev,W,b)
A, activation_cache = relu(Z)
### END CODE HERE ###
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
A_prev, W, b = linear_activation_forward_test_case()
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid")
print("With sigmoid: A = " + str(A))
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu")
print("With ReLU: A = " + str(A))
```
**Expected output**:
<table style="width:35%">
<tr>
<td> **With sigmoid: A ** </td>
<td > [[ 0.96890023 0.11013289]]</td>
</tr>
<tr>
<td> **With ReLU: A ** </td>
<td > [[ 3.43896131 0. ]]</td>
</tr>
</table>
**Note**: In deep learning, the "[LINEAR->ACTIVATION]" computation is counted as a single layer in the neural network, not two layers.
### d) L-Layer Model
For even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (`linear_activation_forward` with RELU) $L-1$ times, then follows that with one `linear_activation_forward` with SIGMOID.
<img src="images/model_architecture_kiank.png" style="width:600px;height:300px;">
<caption><center> **Figure 2** : *[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model</center></caption><br>
**Exercise**: Implement the forward propagation of the above model.
**Instruction**: In the code below, the variable `AL` will denote $A^{[L]} = \sigma(Z^{[L]}) = \sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called `Yhat`, i.e., this is $\hat{Y}$.)
**Tips**:
- Use the functions you had previously written
- Use a for loop to replicate [LINEAR->RELU] (L-1) times
- Don't forget to keep track of the caches in the "caches" list. To add a new value `c` to a `list`, you can use `list.append(c)`.
```
# GRADED FUNCTION: L_model_forward
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_activation_forward() (there are L-1 of them, indexed from 0 to L-1)
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
### START CODE HERE ### (≈ 2 lines of code)
A, cache = linear_activation_forward(A_prev,
parameters['W' + str(l)],
parameters['b' + str(l)],
activation='relu')
caches.append(cache)
### END CODE HERE ###
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
### START CODE HERE ### (≈ 2 lines of code)
AL, cache = linear_activation_forward(A,
parameters['W' + str(L)],
parameters['b' + str(L)],
activation='sigmoid')
caches.append(cache)
### END CODE HERE ###
assert(AL.shape == (1,X.shape[1]))
return AL, caches
X, parameters = L_model_forward_test_case_2hidden()
AL, caches = L_model_forward(X, parameters)
print("AL = " + str(AL))
print("Length of caches list = " + str(len(caches)))
```
<table style="width:50%">
<tr>
<td> **AL** </td>
<td > [[ 0.03921668 0.70498921 0.19734387 0.04728177]]</td>
</tr>
<tr>
<td> **Length of caches list ** </td>
<td > 3 </td>
</tr>
</table>
Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in "caches". Using $A^{[L]}$, you can compute the cost of your predictions.
## 5 - Cost function
Now you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning.
**Exercise**: Compute the cross-entropy cost $J$, using the following formula: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right)) \tag{7}$$
```
# GRADED FUNCTION: compute_cost
def compute_cost(AL, Y):
"""
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
m = Y.shape[1]
# Compute loss from aL and y.
### START CODE HERE ### (≈ 1 lines of code)
cost = -(1/m) * (np.sum(np.multiply(Y,np.log(AL)) + np.multiply((1 - Y),np.log(1 - AL))))
### END CODE HERE ###
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
Y, AL = compute_cost_test_case()
print("cost = " + str(compute_cost(AL, Y)))
```
**Expected Output**:
<table>
<tr>
<td>**cost** </td>
<td> 0.2797765635793422</td>
</tr>
</table>
## 6 - Backward propagation module
Just like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters.
**Reminder**:
<img src="images/backprop_kiank.png" style="width:650px;height:250px;">
<caption><center> **Figure 3** : Forward and Backward propagation for *LINEAR->RELU->LINEAR->SIGMOID* <br> *The purple blocks represent the forward propagation, and the red blocks represent the backward propagation.* </center></caption>
<!--
For those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows:
$$\frac{d \mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \frac{d\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\frac{{da^{[2]}}}{{dz^{[2]}}}\frac{{dz^{[2]}}}{{da^{[1]}}}\frac{{da^{[1]}}}{{dz^{[1]}}} \tag{8} $$
In order to calculate the gradient $dW^{[1]} = \frac{\partial L}{\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted.
Equivalently, in order to calculate the gradient $db^{[1]} = \frac{\partial L}{\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial b^{[1]}}$.
This is why we talk about **backpropagation**.
!-->
Now, similar to forward propagation, you are going to build the backward propagation in three steps:
- LINEAR backward
- LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation
- [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model)
### 6.1 - Linear backward
For layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation).
Suppose you have already calculated the derivative $dZ^{[l]} = \frac{\partial \mathcal{L} }{\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]}, dA^{[l-1]})$.
<img src="images/linearback_kiank.png" style="width:250px;height:300px;">
<caption><center> **Figure 4** </center></caption>
The three outputs $(dW^{[l]}, db^{[l]}, dA^{[l-1]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need:
$$ dW^{[l]} = \frac{\partial \mathcal{J} }{\partial W^{[l]}} = \frac{1}{m} dZ^{[l]} A^{[l-1] T} \tag{8}$$
$$ db^{[l]} = \frac{\partial \mathcal{J} }{\partial b^{[l]}} = \frac{1}{m} \sum_{i = 1}^{m} dZ^{[l](i)}\tag{9}$$
$$ dA^{[l-1]} = \frac{\partial \mathcal{L} }{\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \tag{10}$$
**Exercise**: Use the 3 formulas above to implement linear_backward().
```
# GRADED FUNCTION: linear_backward
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
### START CODE HERE ### (≈ 3 lines of code)
dW = 1/m * (np.dot(dZ,A_prev.T))
db = 1/m * (np.sum(dZ,axis = 1,keepdims = True))
dA_prev = np.dot(W.T,dZ)
### END CODE HERE ###
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
# Set up some test inputs
dZ, linear_cache = linear_backward_test_case()
dA_prev, dW, db = linear_backward(dZ, linear_cache)
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
```
** Expected Output**:
```
dA_prev =
[[-1.15171336 0.06718465 -0.3204696 2.09812712]
[ 0.60345879 -3.72508701 5.81700741 -3.84326836]
[-0.4319552 -1.30987417 1.72354705 0.05070578]
[-0.38981415 0.60811244 -1.25938424 1.47191593]
[-2.52214926 2.67882552 -0.67947465 1.48119548]]
dW =
[[ 0.07313866 -0.0976715 -0.87585828 0.73763362 0.00785716]
[ 0.85508818 0.37530413 -0.59912655 0.71278189 -0.58931808]
[ 0.97913304 -0.24376494 -0.08839671 0.55151192 -0.10290907]]
db =
[[-0.14713786]
[-0.11313155]
[-0.13209101]]
```
### 6.2 - Linear-Activation backward
Next, you will create a function that merges the two helper functions: **`linear_backward`** and the backward step for the activation **`linear_activation_backward`**.
To help you implement `linear_activation_backward`, we provided two backward functions:
- **`sigmoid_backward`**: Implements the backward propagation for SIGMOID unit. You can call it as follows:
```python
dZ = sigmoid_backward(dA, activation_cache)
```
- **`relu_backward`**: Implements the backward propagation for RELU unit. You can call it as follows:
```python
dZ = relu_backward(dA, activation_cache)
```
If $g(.)$ is the activation function,
`sigmoid_backward` and `relu_backward` compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \tag{11}$$.
**Exercise**: Implement the backpropagation for the *LINEAR->ACTIVATION* layer.
```
# GRADED FUNCTION: linear_activation_backward
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
if activation == "relu":
### START CODE HERE ### (≈ 2 lines of code)
dZ = relu_backward(dA,activation_cache)
dA_prev, dW, db = linear_backward(dZ,linear_cache)
### END CODE HERE ###
elif activation == "sigmoid":
### START CODE HERE ### (≈ 2 lines of code)
dZ = sigmoid_backward(dA,activation_cache)
dA_prev, dW, db = linear_backward(dZ,linear_cache)
### END CODE HERE ###
return dA_prev, dW, db
dAL, linear_activation_cache = linear_activation_backward_test_case()
dA_prev, dW, db = linear_activation_backward(dAL, linear_activation_cache, activation = "sigmoid")
print ("sigmoid:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db) + "\n")
dA_prev, dW, db = linear_activation_backward(dAL, linear_activation_cache, activation = "relu")
print ("relu:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
```
**Expected output with sigmoid:**
<table style="width:100%">
<tr>
<td > dA_prev </td>
<td >[[ 0.11017994 0.01105339]
[ 0.09466817 0.00949723]
[-0.05743092 -0.00576154]] </td>
</tr>
<tr>
<td > dW </td>
<td > [[ 0.10266786 0.09778551 -0.01968084]] </td>
</tr>
<tr>
<td > db </td>
<td > [[-0.05729622]] </td>
</tr>
</table>
**Expected output with relu:**
<table style="width:100%">
<tr>
<td > dA_prev </td>
<td > [[ 0.44090989 0. ]
[ 0.37883606 0. ]
[-0.2298228 0. ]] </td>
</tr>
<tr>
<td > dW </td>
<td > [[ 0.44513824 0.37371418 -0.10478989]] </td>
</tr>
<tr>
<td > db </td>
<td > [[-0.20837892]] </td>
</tr>
</table>
### 6.3 - L-Model Backward
Now you will implement the backward function for the whole network. Recall that when you implemented the `L_model_forward` function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the `L_model_backward` function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass.
<img src="images/mn_backward.png" style="width:450px;height:300px;">
<caption><center> **Figure 5** : Backward pass </center></caption>
** Initializing backpropagation**:
To backpropagate through this network, we know that the output is,
$A^{[L]} = \sigma(Z^{[L]})$. Your code thus needs to compute `dAL` $= \frac{\partial \mathcal{L}}{\partial A^{[L]}}$.
To do so, use this formula (derived using calculus which you don't need in-depth knowledge of):
```python
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL
```
You can then use this post-activation gradient `dAL` to keep going backward. As seen in Figure 5, you can now feed in `dAL` into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a `for` loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula :
$$grads["dW" + str(l)] = dW^{[l]}\tag{15} $$
For example, for $l=3$ this would store $dW^{[l]}$ in `grads["dW3"]`.
**Exercise**: Implement backpropagation for the *[LINEAR->RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model.
```
# GRADED FUNCTION: L_model_backward
def L_model_backward(AL, Y, caches):
"""
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)
the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1])
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
### START CODE HERE ### (1 line of code)
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
### END CODE HERE ###
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "dAL, current_cache". Outputs: "grads["dAL-1"], grads["dWL"], grads["dbL"]
### START CODE HERE ### (approx. 2 lines)
current_cache = caches[L-1]
grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL,current_cache,activation="sigmoid")
### END CODE HERE ###
# Loop from l=L-2 to l=0
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
# Inputs: "grads["dA" + str(l + 1)], current_cache". Outputs: "grads["dA" + str(l)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)]
### START CODE HERE ### (approx. 5 lines)
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l+1)],current_cache,activation='relu')
grads["dA" + str(l)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
### END CODE HERE ###
return grads
AL, Y_assess, caches = L_model_backward_test_case()
grads = L_model_backward(AL, Y_assess, caches)
print_grads(grads)
```
**Expected Output**
<table style="width:60%">
<tr>
<td > dW1 </td>
<td > [[ 0.41010002 0.07807203 0.13798444 0.10502167]
[ 0. 0. 0. 0. ]
[ 0.05283652 0.01005865 0.01777766 0.0135308 ]] </td>
</tr>
<tr>
<td > db1 </td>
<td > [[-0.22007063]
[ 0. ]
[-0.02835349]] </td>
</tr>
<tr>
<td > dA1 </td>
<td > [[ 0.12913162 -0.44014127]
[-0.14175655 0.48317296]
[ 0.01663708 -0.05670698]] </td>
</tr>
</table>
### 6.4 - Update Parameters
In this section you will update the parameters of the model, using gradient descent:
$$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{16}$$
$$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{17}$$
where $\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary.
**Exercise**: Implement `update_parameters()` to update your parameters using gradient descent.
**Instructions**:
Update parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$.
```
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
### START CODE HERE ### (≈ 3 lines of code)
for l in range(L):
parameters["W" + str(l+1)] -= (learning_rate * grads["dW" + str(l+1)])
parameters["b" + str(l+1)] -= (learning_rate * grads["db" + str(l+1)])
### END CODE HERE ###
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads, 0.1)
print ("W1 = "+ str(parameters["W1"]))
print ("b1 = "+ str(parameters["b1"]))
print ("W2 = "+ str(parameters["W2"]))
print ("b2 = "+ str(parameters["b2"]))
```
**Expected Output**:
<table style="width:100%">
<tr>
<td > W1 </td>
<td > [[-0.59562069 -0.09991781 -2.14584584 1.82662008]
[-1.76569676 -0.80627147 0.51115557 -1.18258802]
[-1.0535704 -0.86128581 0.68284052 2.20374577]] </td>
</tr>
<tr>
<td > b1 </td>
<td > [[-0.04659241]
[-1.28888275]
[ 0.53405496]] </td>
</tr>
<tr>
<td > W2 </td>
<td > [[-0.55569196 0.0354055 1.32964895]]</td>
</tr>
<tr>
<td > b2 </td>
<td > [[-0.84610769]] </td>
</tr>
</table>
## 7 - Conclusion
Congrats on implementing all the functions required for building a deep neural network!
We know it was a long assignment but going forward it will only get better. The next part of the assignment is easier.
In the next assignment you will put all these together to build two models:
- A two-layer neural network
- An L-layer neural network
You will in fact use these models to classify cat vs non-cat images!
| github_jupyter |
# Saving and Loading Models
In this notebook, I'll show you how to save and load models with PyTorch. This is important because you'll often want to load previously trained models to use in making predictions or to continue training on new data.
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms
import helper
import fc_model
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# Download and load the training data
trainset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
```
Here we can see one of the images.
```
image, label = next(iter(trainloader))
helper.imshow(image[0,:]);
```
# Train a network
To make things more concise here, I moved the model architecture and training code from the last part to a file called `fc_model`. Importing this, we can easily create a fully-connected network with `fc_model.Network`, and train the network using `fc_model.train`. I'll use this model (once it's trained) to demonstrate how we can save and load models.
```
# Create the network, define the criterion and optimizer
model = fc_model.Network(784, 10, [512, 256, 128])
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
fc_model.train(model, trainloader, testloader, criterion, optimizer, epochs=2)
```
## Saving and loading networks
As you can imagine, it's impractical to train a network every time you need to use it. Instead, we can save trained networks then load them later to train more or use them for predictions.
The parameters for PyTorch networks are stored in a model's `state_dict`. We can see the state dict contains the weight and bias matrices for each of our layers.
```
print("Our model: \n\n", model, '\n')
print("The state dict keys: \n\n", model.state_dict().keys())
```
The simplest thing to do is simply save the state dict with `torch.save`. For example, we can save it to a file `'checkpoint.pth'`.
```
torch.save(model.state_dict(), 'checkpoint.pth')
```
Then we can load the state dict with `torch.load`.
```
state_dict = torch.load('checkpoint.pth')
print(state_dict.keys())
```
And to load the state dict in to the network, you do `model.load_state_dict(state_dict)`.
```
model.load_state_dict(state_dict)
```
Seems pretty straightforward, but as usual it's a bit more complicated. Loading the state dict works only if the model architecture is exactly the same as the checkpoint architecture. If I create a model with a different architecture, this fails.
```
# Try this
model = fc_model.Network(784, 10, [400, 200, 100])
# This will throw an error because the tensor sizes are wrong!
model.load_state_dict(state_dict)
```
This means we need to rebuild the model exactly as it was when trained. Information about the model architecture needs to be saved in the checkpoint, along with the state dict. To do this, you build a dictionary with all the information you need to compeletely rebuild the model.
```
checkpoint = {'input_size': 784,
'output_size': 10,
'hidden_layers': [each.out_features for each in model.hidden_layers],
'state_dict': model.state_dict()}
torch.save(checkpoint, 'checkpoint.pth')
```
Now the checkpoint has all the necessary information to rebuild the trained model. You can easily make that a function if you want. Similarly, we can write a function to load checkpoints.
```
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
model = fc_model.Network(checkpoint['input_size'],
checkpoint['output_size'],
checkpoint['hidden_layers'])
model.load_state_dict(checkpoint['state_dict'])
return model
model = load_checkpoint('checkpoint.pth')
print(model)
```
| github_jupyter |
**Copyright 2020 The TF-Agents Authors.**
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# SAC minitaur with the Actor-Learner API
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/agents/tutorials/7_SAC_minitaur_tutorial">
<img src="https://www.tensorflow.org/images/tf_logo_32px.png" />
View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/docs/tutorials/7_SAC_minitaur_tutorial.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/agents/blob/master/docs/tutorials/7_SAC_minitaur_tutorial.ipynb">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/agents/docs/tutorials/7_SAC_minitaur_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
## Introduction
This example shows how to train a [Soft Actor Critic](https://arxiv.org/abs/1812.05905) agent on the [Minitaur](https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/gym/pybullet_envs/bullet/minitaur.py) environment.
If you've worked through the [DQN Colab](https://github.com/tensorflow/agents/blob/master/docs/tutorials/1_dqn_tutorial.ipynb) this should feel very familiar. Notable changes include:
* Changing the agent from DQN to SAC.
* Training on Minitaur which is a much more complex environment than CartPole. The Minitaur environment aims to train a quadruped robot to move forward.
* Using the TF-Agents Actor-Learner API for distributed Reinforcement Learning.
The API supports both distributed data collection using an experience replay buffer and variable container (parameter server) and distributed training across multiple devices. The API is designed to be very simple and modular. We utilize [Reverb](https://deepmind.com/research/open-source/Reverb) for both replay buffer and variable container and [TF DistributionStrategy API](https://www.tensorflow.org/guide/distributed_training) for distributed training on GPUs and TPUs.
If you haven't installed the following dependencies, run:
```
!sudo apt-get install -y xvfb ffmpeg
!pip install gym
!pip install 'imageio==2.4.0'
!pip install matplotlib
!pip install PILLOW
!pip install tf-agents[reverb]
!pip install pybullet
```
## Setup
First we will import the different tools that we need.
```
import base64
import imageio
import IPython
import matplotlib.pyplot as plt
import os
import reverb
import tempfile
import PIL.Image
import tensorflow as tf
from tf_agents.agents.ddpg import critic_network
from tf_agents.agents.sac import sac_agent
from tf_agents.agents.sac import tanh_normal_projection_network
from tf_agents.environments import suite_pybullet
from tf_agents.experimental.train import actor
from tf_agents.experimental.train import learner
from tf_agents.experimental.train import triggers
from tf_agents.experimental.train.utils import spec_utils
from tf_agents.experimental.train.utils import strategy_utils
from tf_agents.experimental.train.utils import train_utils
from tf_agents.metrics import py_metrics
from tf_agents.networks import actor_distribution_network
from tf_agents.policies import greedy_policy
from tf_agents.policies import py_tf_eager_policy
from tf_agents.policies import random_py_policy
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.replay_buffers import reverb_utils
tempdir = tempfile.gettempdir()
```
## Hyperparameters
```
env_name = "MinitaurBulletEnv-v0" # @param {type:"string"}
# Use "num_iterations = 1e6" for better results (2 hrs)
# 1e5 is just so this doesn't take too long (1 hr)
num_iterations = 100000 # @param {type:"integer"}
initial_collect_steps = 10000 # @param {type:"integer"}
collect_steps_per_iteration = 1 # @param {type:"integer"}
replay_buffer_capacity = 10000 # @param {type:"integer"}
batch_size = 256 # @param {type:"integer"}
critic_learning_rate = 3e-4 # @param {type:"number"}
actor_learning_rate = 3e-4 # @param {type:"number"}
alpha_learning_rate = 3e-4 # @param {type:"number"}
target_update_tau = 0.005 # @param {type:"number"}
target_update_period = 1 # @param {type:"number"}
gamma = 0.99 # @param {type:"number"}
reward_scale_factor = 1.0 # @param {type:"number"}
actor_fc_layer_params = (256, 256)
critic_joint_fc_layer_params = (256, 256)
log_interval = 5000 # @param {type:"integer"}
num_eval_episodes = 20 # @param {type:"integer"}
eval_interval = 10000 # @param {type:"integer"}
policy_save_interval = 5000 # @param {type:"integer"}
```
## Environment
Environments in RL represent the task or problem that we are trying to solve. Standard environments can be easily created in TF-Agents using `suites`. We have different `suites` for loading environments from sources such as the OpenAI Gym, Atari, DM Control, etc., given a string environment name.
Now let's load the Minituar environment from the Pybullet suite.
```
env = suite_pybullet.load(env_name)
env.reset()
PIL.Image.fromarray(env.render())
```
In this environment the goal is for the agent to train a policy that will control the Minitaur robot and have it move forward as fast as possible. Episodes last 1000 steps and the return will be the sum of rewards throughout the episode.
Let's look at the information the environment provides as an `observation` which the policy will use to generate `actions`.
```
print('Observation Spec:')
print(env.time_step_spec().observation)
print('Action Spec:')
print(env.action_spec())
```
As we can see the observation is fairly complex. We recieve 28 values representing the angles, velocities and torques for all the motors. In return the environment expects 8 values for the actions between `[-1, 1]`. These are the desired motor angles.
Usually we create two environments: one for collecting data during training and one for evaluation. The environments are written in pure python and use numpy arrays, which the Actor Learner API directly consumes.
```
collect_env = suite_pybullet.load(env_name)
eval_env = suite_pybullet.load(env_name)
```
## Distribution Strategy
We use the DistributionStrategy API to enable running the train step computation across multiple devices such as multiple GPUs or TPUs using data parallelism. The train step:
* Receives a batch of training data
* Splits it across the devices
* Computes the forward step
* Aggregates and computes the MEAN of the loss
* Computes the backward step and performs a gradient variable update
With TF-Agents Learner API and DistributionStrategy API it is quite easy to switch between running the train step on GPUs (using MirroredStrategy) to TPUs (using TPUStrategy) without changing any of the training logic below.
### Enabling the GPU
If you want to try running on a GPU, you'll first need to enable GPUs for the notebook:
* Navigate to Edit→Notebook Settings
* Select GPU from the Hardware Accelerator drop-down
### Picking a strategy
Use `strategy_utils` to generate a strategy. Under the hood, passing the parameter:
* `use_gpu = False` returns `tf.distribute.get_strategy()`, which uses CPU
* `use_gpu = True` returns `tf.distribute.MirroredStrategy()`, which uses all GPUs that are visible to TensorFlow on one machine
```
use_gpu = True #@param {type:"boolean"}
strategy = strategy_utils.get_strategy(tpu=False, use_gpu=use_gpu)
```
All variables and Agents need to be created under `strategy.scope()`, as you'll see below.
## Agent
To create an SAC Agent, we first need to create the networks that it will train. SAC is an actor-critic agent, so we will need two networks.
The critic will give us value estimates for `Q(s,a)`. That is, it will recieve as input an observation and an action, and it will give us an estimate of how good that action was for the given state.
```
observation_spec, action_spec, time_step_spec = (
spec_utils.get_tensor_specs(collect_env))
with strategy.scope():
critic_net = critic_network.CriticNetwork(
(observation_spec, action_spec),
observation_fc_layer_params=None,
action_fc_layer_params=None,
joint_fc_layer_params=critic_joint_fc_layer_params,
kernel_initializer='glorot_uniform',
last_kernel_initializer='glorot_uniform')
```
We will use this critic to train an `actor` network which will allow us to generate actions given an observation.
The `ActorNetwork` will predict parameters for a tanh-squashed [MultivariateNormalDiag](https://www.tensorflow.org/probability/api_docs/python/tfp/distributions/MultivariateNormalDiag) distribution. This distribution will then be sampled, conditioned on the current observation, whenever we need to generate actions.
```
with strategy.scope():
actor_net = actor_distribution_network.ActorDistributionNetwork(
observation_spec,
action_spec,
fc_layer_params=actor_fc_layer_params,
continuous_projection_net=(
tanh_normal_projection_network.TanhNormalProjectionNetwork))
```
With these networks at hand we can now instantiate the agent.
```
with strategy.scope():
train_step = train_utils.create_train_step()
tf_agent = sac_agent.SacAgent(
time_step_spec,
action_spec,
actor_network=actor_net,
critic_network=critic_net,
actor_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=actor_learning_rate),
critic_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=critic_learning_rate),
alpha_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=alpha_learning_rate),
target_update_tau=target_update_tau,
target_update_period=target_update_period,
td_errors_loss_fn=tf.math.squared_difference,
gamma=gamma,
reward_scale_factor=reward_scale_factor,
train_step_counter=train_step)
tf_agent.initialize()
```
## Replay Buffer
In order to keep track of the data collected from the environment, we will use [Reverb](https://deepmind.com/research/open-source/Reverb), an efficient, extensible, and easy-to-use replay system by Deepmind. It stores experience data collected by the Actors and consumed by the Learner during training.
In this tutorial, this is less important than `max_size` -- but in a distributed setting with async collection and training, you will probably want to experiment with `rate_limiters.SampleToInsertRatio`, using a samples_per_insert somewhere between 2 and 1000. For example:
```
rate_limiter=reverb.rate_limiters.SampleToInsertRatio(samples_per_insert=3.0, min_size_to_sample=3, error_buffer=3.0))
```
```
table_name = 'uniform_table'
table = reverb.Table(
table_name,
max_size=replay_buffer_capacity,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1))
reverb_server = reverb.Server([table])
```
The replay buffer is constructed using specs describing the tensors that are to be stored, which can be obtained from the agent using `tf_agent.collect_data_spec`.
Since the SAC Agent needs both the current and next observation to compute the loss, we set `sequence_length=2`.
```
reverb_replay = reverb_replay_buffer.ReverbReplayBuffer(
tf_agent.collect_data_spec,
sequence_length=2,
table_name=table_name,
local_server=reverb_server)
```
Now we generate a TensorFlow dataset from the Reverb replay buffer. We will pass this to the Learner to sample experiences for training.
```
dataset = reverb_replay.as_dataset(
sample_batch_size=batch_size, num_steps=2).prefetch(50)
experience_dataset_fn = lambda: dataset
```
## Policies
In TF-Agents, policies represent the standard notion of policies in RL: given a `time_step` produce an action or a distribution over actions. The main method is `policy_step = policy.step(time_step)` where `policy_step` is a named tuple `PolicyStep(action, state, info)`. The `policy_step.action` is the `action` to be applied to the environment, `state` represents the state for stateful (RNN) policies and `info` may contain auxiliary information such as log probabilities of the actions.
Agents contain two policies:
- `agent.policy` — The main policy that is used for evaluation and deployment.
- `agent.collect_policy` — A second policy that is used for data collection.
```
tf_eval_policy = tf_agent.policy
eval_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_eval_policy, use_tf_function=True)
tf_collect_policy = tf_agent.collect_policy
collect_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_collect_policy, use_tf_function=True)
```
Policies can be created independently of agents. For example, use `tf_agents.policies.random_py_policy` to create a policy which will randomly select an action for each time_step.
```
random_policy = random_py_policy.RandomPyPolicy(
collect_env.time_step_spec(), collect_env.action_spec())
```
## Actors
The actor manages interactions between a policy and an environment.
* The Actor components contain an instance of the environment (as `py_environment`) and a copy of the policy variables.
* Each Actor worker runs a sequence of data collection steps given the local values of the policy variables.
* Variable updates are done explicitly using the variable container client instance in the training script before calling `actor.run()`.
* The observed experience is written into the replay buffer in each data collection step.
As the Actors run data collection steps, they pass trajectories of (state, action, reward) to the observer, which caches and writes them to the Reverb replay system.
We're storing trajectories for frames [(t0,t1) (t1,t2) (t2,t3), ...] because `stride_length=1`.
```
rb_observer = reverb_utils.ReverbAddTrajectoryObserver(
reverb_replay.py_client,
table_name,
sequence_length=2,
stride_length=1)
```
We create an Actor with the random policy and collect experiences to seed the replay buffer with.
```
initial_collect_actor = actor.Actor(
collect_env,
random_policy,
train_step,
steps_per_run=initial_collect_steps,
observers=[rb_observer])
initial_collect_actor.run()
```
Instantiate an Actor with the collect policy to gather more experiences during training.
```
env_step_metric = py_metrics.EnvironmentSteps()
collect_actor = actor.Actor(
collect_env,
collect_policy,
train_step,
steps_per_run=1,
metrics=actor.collect_metrics(10),
summary_dir=os.path.join(tempdir, learner.TRAIN_DIR),
observers=[rb_observer, env_step_metric])
```
Create an Actor which will be used to evaluate the policy during training. We pass in `actor.eval_metrics(num_eval_episodes)` to log metrics later.
```
eval_actor = actor.Actor(
eval_env,
eval_policy,
train_step,
episodes_per_run=num_eval_episodes,
metrics=actor.eval_metrics(num_eval_episodes),
summary_dir=os.path.join(tempdir, 'eval'),
)
```
## Learners
The Learner component contains the agent and performs gradient step updates to the policy variables using experience data from the replay buffer. After one or more training steps, the Learner can push a new set of variable values to the variable container.
```
saved_model_dir = os.path.join(tempdir, learner.POLICY_SAVED_MODEL_DIR)
# Triggers to save the agent's policy checkpoints.
learning_triggers = [
triggers.PolicySavedModelTrigger(
saved_model_dir,
tf_agent,
train_step,
interval=policy_save_interval),
triggers.StepPerSecondLogTrigger(train_step, interval=1000),
]
agent_learner = learner.Learner(
tempdir,
train_step,
tf_agent,
experience_dataset_fn,
triggers=learning_triggers)
```
## Metrics and Evaluation
We instantiated the eval Actor with `actor.eval_metrics` above, which creates most commonly used metrics during policy evaluation:
* Average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes.
* Average episode length.
We run the Actor to generate these metrics.
```
def get_eval_metrics():
eval_actor.run()
results = {}
for metric in eval_actor.metrics:
results[metric.name] = metric.result()
return results
metrics = get_eval_metrics()
def log_eval_metrics(step, metrics):
eval_results = (', ').join(
'{} = {:.6f}'.format(name, result) for name, result in metrics.items())
print('step = {0}: {1}'.format(step, eval_results))
log_eval_metrics(0, metrics)
```
Check out the [metrics module](https://github.com/tensorflow/agents/blob/master/tf_agents/metrics/tf_metrics.py) for other standard implementations of different metrics.
## Training the agent
The training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.
```
#@test {"skip": true}
try:
%%time
except:
pass
# Reset the train step
tf_agent.train_step_counter.assign(0)
# Evaluate the agent's policy once before training.
avg_return = get_eval_metrics()["AverageReturn"]
returns = [avg_return]
for _ in range(num_iterations):
# Training.
collect_actor.run()
loss_info = agent_learner.run(iterations=1)
# Evaluating.
step = agent_learner.train_step_numpy
if eval_interval and step % eval_interval == 0:
metrics = get_eval_metrics()
log_eval_metrics(step, metrics)
returns.append(metrics["AverageReturn"])
if log_interval and step % log_interval == 0:
print('step = {0}: loss = {1}'.format(step, loss_info.loss.numpy()))
rb_observer.close()
reverb_server.stop()
```
## Visualization
### Plots
We can plot average return vs global steps to see the performance of our agent. In `Minitaur`, the reward function is based on how far the minitaur walks in 1000 steps and penalizes the energy expenditure.
```
#@test {"skip": true}
steps = range(0, num_iterations + 1, eval_interval)
plt.plot(steps, returns)
plt.ylabel('Average Return')
plt.xlabel('Step')
plt.ylim()
```
### Videos
It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab.
```
def embed_mp4(filename):
"""Embeds an mp4 file in the notebook."""
video = open(filename,'rb').read()
b64 = base64.b64encode(video)
tag = '''
<video width="640" height="480" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4">
Your browser does not support the video tag.
</video>'''.format(b64.decode())
return IPython.display.HTML(tag)
```
The following code visualizes the agent's policy for a few episodes:
```
num_episodes = 3
video_filename = 'sac_minitaur.mp4'
with imageio.get_writer(video_filename, fps=60) as video:
for _ in range(num_episodes):
time_step = eval_env.reset()
video.append_data(eval_env.render())
while not time_step.is_last():
action_step = eval_actor.policy.action(time_step)
time_step = eval_env.step(action_step.action)
video.append_data(eval_env.render())
embed_mp4(video_filename)
```
| github_jupyter |
<img align="center" style="max-width: 1000px" src="banner.png">
<img align="right" style="max-width: 200px; height: auto" src="hsg_logo.png">
## Lab 02 - "Artificial Neural Networks"
Machine Learning, University of St. Gallen, Spring Term 2022
The lab environment of the "Coding and Artificial Intelligence" IEMBA course at the University of St. Gallen (HSG) is based on Jupyter Notebooks (https://jupyter.org), which allow to perform a variety of statistical evaluations and data analyses.
In this lab, we will learn how to implement, train, and apply our first **Artificial Neural Network (ANN)** using a Python library named `PyTorch`. The `PyTorch` library is an open-source machine learning library for Python, used for a variety of applications such as image classification and natural language processing. We will use the implemented neural network to learn to again classify images of fashion articles from the **Fashion-MNIST** dataset.
The figure below illustrates a high-level view of the machine learning process we aim to establish in this lab:
<img align="center" style="max-width: 700px" src="classification.png">
As always, pls. don't hesitate to ask all your questions either during the lab, post them in our CANVAS (StudyNet) forum (https://learning.unisg.ch), or send us an email (using the course email).
## 1. Lab Objectives:
After today's lab, you should be able to:
> 1. Understand the basic concepts, intuitions and major building blocks of **Artificial Neural Networks (ANNs)**.
> 2. Know how to use Python's **PyTorch library** to train and evaluate neural network based models.
> 3. Understand how to apply neural networks to **classify images** of handwritten digits.
> 4. Know how to **interpret the detection results** of the network as well as its **reconstruction loss**.
Before we start let's watch a motivational video:
```
from IPython.display import YouTubeVideo
# Official Intro | GTC 2017 | I AM AI"
# YouTubeVideo('SUNPrR4o5ZA', width=800, height=400)
```
## 2. Setup of the Jupyter Notebook Environment
Similar to the previous labs, we need to import a couple of Python libraries that allow for data analysis and data visualization. We will mostly use the `PyTorch`, `Numpy`, `Scikit-Learn`, `Matplotlib` and the `Seaborn` and a few utility libraries throughout this lab:
```
# import standard python libraries
import os, urllib, io
from datetime import datetime
import numpy as np
```
Import the Python machine / deep learning libraries:
```
# import the PyTorch deep learning libary
import torch, torchvision
import torch.nn.functional as F
from torch import nn, optim
```
Import the sklearn classification metrics:
```
# import sklearn classification evaluation library
from sklearn import metrics
from sklearn.metrics import classification_report, confusion_matrix
```
Import Python plotting libraries:
```
# import matplotlib, seaborn, and PIL data visualization libary
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
```
Enable notebook matplotlib inline plotting:
```
%matplotlib inline
```
Import `Google's GDrive` connector and mount your `GDrive` directories:
```
# import the Google Colab GDrive connector
from google.colab import drive
# mount GDrive inside the Colab notebook
drive.mount('/content/drive')
```
Create a structure of `Colab` Notebook sub-directories inside of `GDrive` to to store the data and the trained neural network models:
```
# create Colab Notebooks directory
notebook_directory = '/content/drive/MyDrive/Colab Notebooks'
if not os.path.exists(notebook_directory): os.makedirs(notebook_directory)
# create data sub-directory inside the Colab Notebooks directory
data_directory = '/content/drive/MyDrive/Colab Notebooks/data_fmnist'
if not os.path.exists(data_directory): os.makedirs(data_directory)
# create models sub-directory inside the Colab Notebooks directory
models_directory = '/content/drive/MyDrive/Colab Notebooks/models_fmnist'
if not os.path.exists(models_directory): os.makedirs(models_directory)
```
Set a random `seed` value to obtain reproducable results:
```
# init deterministic seed
seed_value = 1234
np.random.seed(seed_value) # set numpy seed
torch.manual_seed(seed_value) # set pytorch seed CPU
```
Google Colab provides the use of free GPUs for running notebooks. However, if you just execute this notebook as is, it will use your device's CPU. To run the lab on a GPU, got to `Runtime` > `Change runtime type` and set the Runtime type to `GPU` in the drop-down. Running this lab on a CPU is fine, but you will find that GPU computing is faster. *CUDA* indicates that the lab is being run on GPU.
Enable GPU computing by setting the device flag and init a CUDA seed:
```
# set cpu or gpu enabled device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu').type
# init deterministic GPU seed
torch.cuda.manual_seed(seed_value)
# log type of device enabled
print('[LOG] notebook with {} computation enabled'.format(str(device)))
```
Let's determine if we have access to a GPU provided by e.g. `Google's Colab` environment:
```
!nvidia-smi
```
## 3. Dataset Download and Data Assessment
The **Fashion-MNIST database** is a large database of Zalando articles that is commonly used for training various image processing systems. The database is widely used for training and testing in the field of machine learning. Let's have a brief look into a couple of sample images contained in the dataset:
<img align="center" style="max-width: 700px; height: 300px" src="FashionMNIST.png">
Source: https://www.kaggle.com/c/insar-fashion-mnist-challenge
Further details on the dataset can be obtained via Zalando research's [github page](https://github.com/zalandoresearch/fashion-mnist).
The **Fashion-MNIST database** is an image dataset of Zalando's article images, consisting of in total 70,000 images.
The dataset is divided into a set of **60,000 training examples** and a set of **10,000 evaluation examples**. Each example is a **28x28 grayscale image**, associated with a **label from 10 classes**. Zalando created this dataset with the intention of providing a replacement for the popular **MNIST** handwritten digits dataset. It is a useful addition as it is a bit more complex, but still very easy to use. It shares the same image size and train/test split structure as MNIST, and can therefore be used as a drop-in replacement. It requires minimal efforts on preprocessing and formatting the distinct images.
Let's download, transform and inspect the training images of the dataset. Therefore, let's first define the directory in which we aim to store the training data:
```
train_path = data_directory + '/train_fmnist'
```
Now, let's download the training data accordingly:
```
# define pytorch transformation into tensor format
transf = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
# download and transform training images
fashion_mnist_train_data = torchvision.datasets.FashionMNIST(root=train_path, train=True, transform=transf, download=True)
```
Verify the number of training images downloaded:
```
# determine the number of training data images
len(fashion_mnist_train_data)
```
Furthermore, let's inspect a couple of the downloaded training images:
```
# select and set a (random) image id
image_id = 3000
# retrieve image exhibiting the image id
fashion_mnist_train_data[image_id]
```
Ok, that doesn't seem right :). Let's now seperate the image from its label information:
```
fashion_mnist_train_image, fashion_mnist_train_label = fashion_mnist_train_data[image_id]
```
We can verify the label that our selected image has:
```
fashion_mnist_train_label
```
Ok, we know that the numerical label is 6. Each image is associated with a label from 0 to 9, and this number represents one of the fashion items. So what does 6 mean? Is 6 a bag? A pullover? The order of the classes can be found on Zalando research's [github page](https://github.com/zalandoresearch/fashion-mnist). We need to map each numerical label to its fashion item, which will be useful throughout the lab:
```
fashion_classes = {0: 'T-shirt/top',
1: 'Trouser',
2: 'Pullover',
3: 'Dress',
4: 'Coat',
5: 'Sandal',
6: 'Shirt',
7: 'Sneaker',
8: 'Bag',
9: 'Ankle boot'}
```
So, we can determine the fashion item that the label represents:
```
fashion_classes[fashion_mnist_train_label]
```
Great, let's now visually inspect our sample image:
```
# define tensor to image transformation
trans = torchvision.transforms.ToPILImage()
# set image plot title
plt.title('Example: {}, Label: {}'.format(str(image_id), fashion_classes[fashion_mnist_train_label]))
# plot mnist handwritten digit sample
plt.imshow(trans(fashion_mnist_train_image), cmap='gray')
```
Fantastic, right? Let's now define the directory in which we aim to store the evaluation data:
```
eval_path = data_directory + '/eval_fmnist'
```
And download the evaluation data accordingly:
```
# define pytorch transformation into tensor format
transf = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
# download and transform training images
fashion_mnist_eval_data = torchvision.datasets.FashionMNIST(root=eval_path, train=False, transform=transf, download=True)
```
Let's also verify the number of evaluation images downloaded:
```
# determine the number of evaluation data images
len(fashion_mnist_eval_data)
```
## 4. Neural Network Implementation
In this section we, will implement the architecture of the **neural network** we aim to utilize to learn a model that is capable to classify the 28x28 pixel FashionMNIST images of fashion items. However, before we start the implementation let's briefly revisit the process to be established. The following cartoon provides a birds-eye view:
<img align="center" style="max-width: 1000px" src="https://github.com/HSG-AIML/LabGSERM/blob/main/lab_04/process.png?raw=1">
### 4.1 Implementation of the Neural Network Architecture
The neural network, which we name **'FashionMNISTNet'** consists of three **fully-connected layers** (including an “input layer” and two hidden layers). Furthermore, the **FashionMNISTNet** should encompass the following number of neurons per layer: 100 (layer 1), 50 (layer 2) and 10 (layer 3). Meaning the first layer consists of 100 neurons, the second layer of 50 neurons and third layer of 10 neurons (the number of digit classes we aim to classify.
We will now start implementing the network architecture as a separate Python class. Implementing the network architectures as a **separate class** in Python is good practice in deep learning projects. It will allow us to create and train several instances of the same neural network architecture. This provides us, for example, the opportunity to evaluate different initializations of the network parameters or train models using distinct datasets.
```
# implement the MNISTNet network architecture
class FashionMNISTNet(nn.Module):
# define the class constructor
def __init__(self):
# call super class constructor
super(FashionMNISTNet, self).__init__()
# specify fully-connected (fc) layer 1 - in 28*28, out 100
self.linear1 = nn.Linear(28*28, 100, bias=True) # the linearity W*x+b
self.relu1 = nn.ReLU(inplace=True) # the non-linearity
# specify fc layer 2 - in 100, out 50
self.linear2 = nn.Linear(100, 50, bias=True) # the linearity W*x+b
self.relu2 = nn.ReLU(inplace=True) # the non-linarity
# specify fc layer 3 - in 50, out 10
self.linear3 = nn.Linear(50, 10) # the linearity W*x+b
# add a softmax to the last layer
self.logsoftmax = nn.LogSoftmax(dim=1) # the softmax
# define network forward pass
def forward(self, images):
# reshape image pixels
x = images.view(-1, 28*28)
# define fc layer 1 forward pass
x = self.relu1(self.linear1(x))
# define fc layer 2 forward pass
x = self.relu2(self.linear2(x))
# define layer 3 forward pass
x = self.logsoftmax(self.linear3(x))
# return forward pass result
return x
```
You may have noticed, when reviewing the implementation above, that we applied an additional operator, referred to as **'Softmax'** to the third layer of our neural network.
The **softmax function**, also known as the normalized exponential function, is a function that takes as input a vector of K real numbers, and normalizes it into a probability distribution consisting of K probabilities.
That is, prior to applying softmax, some vector components could be negative, or greater than one; and might not sum to 1; but after application of the softmax, each component will be in the interval $(0,1)$, and the components will add up to 1, so that they can be interpreted as probabilities. In general, the softmax function $\sigma :\mathbb {R} ^{K}\to \mathbb {R} ^{K}$ is defined by the formula:
<center> $\sigma (\mathbf {z} )_{i}=\ln ({e^{z_{i}} / \sum _{j=1}^{K}e^{z_{j}}})$ </center>
for $i = 1, …, K$ and ${\mathbf {z}}=(z_{1},\ldots ,z_{K})\in \mathbb {R} ^{K}$ (Source: https://en.wikipedia.org/wiki/Softmax_function ).
Let's have a look at the simplified three-class example below. The scores of the distinct predicted classes $c_i$ are computed from the forward propagation of the network. We then take the softmax and obtain the probabilities as shown:
<img align="center" style="max-width: 800px" src="https://github.com/HSG-AIML/LabGSERM/blob/main/lab_04/softmax.png?raw=1">
The output of the softmax describes the probability (or if you may, the confidence) of the neural network that a particular sample belongs to a certain class. Thus, for the first example above, the neural network assigns a confidence of 0.49 that it is a 'three', 0.49 that it is a 'four', and 0.03 that it is an 'eight'. The same goes for each of the samples above.
Now, that we have implemented our first neural network we are ready to instantiate a network model to be trained:
```
model = FashionMNISTNet()
```
Let's push the initialized `FashionMNISTNet` model to the computing `device` that is enabled:
```
model = model.to(device)
```
Let's double check if our model was deployed to the GPU if available:
```
!nvidia-smi
```
Once the model is initialized, we can visualize the model structure and review the implemented network architecture by execution of the following cell:
```
# print the initialized architectures
print('[LOG] FashionMNISTNet architecture:\n\n{}\n'.format(model))
```
Looks like intended? Brilliant! Finally, let's have a look into the number of model parameters that we aim to train in the next steps of the notebook:
```
# init the number of model parameters
num_params = 0
# iterate over the distinct parameters
for param in model.parameters():
# collect number of parameters
num_params += param.numel()
# print the number of model paramters
print('[LOG] Number of to be trained FashionMNISTNet model parameters: {}.'.format(num_params))
```
Ok, our "simple" FashionMNISTNet model already encompasses an impressive number 84'060 model parameters to be trained.
### 4.2 Specification of the Neural Network Loss Function
Now that we have implemented the **FashionMNISTNet** we are ready to train the network. However, prior to starting the training, we need to define an appropriate loss function. Remember, we aim to train our model to learn a set of model parameters $\theta$ that minimize the classification error of the true class $c^{i}$ of a given handwritten digit image $x^{i}$ and its predicted class $\hat{c}^{i} = f_\theta(x^{i})$ as faithfully as possible.
Thereby, the training objective is to learn a set of optimal model parameters $\theta^*$ that optimize $\arg\min_{\theta} \|C - f_\theta(X)\|$ over all training images in the FashionMNIST dataset. To achieve this optimization objective, one typically minimizes a loss function $\mathcal{L_{\theta}}$ as part of the network training. In this lab we use the **'Negative Log Likelihood (NLL)'** loss, defined by:
<center> $\mathcal{L}^{NLL}_{\theta} (c_i, \hat c_i) = - \frac{1}{N} \sum_{i=1}^N \log (\hat{c}_i) $, </center>
for a set of $n$-FashionMNIST images $x^{i}$, $i=1,...,n$ and their respective predicted class labels $\hat{c}^{i}$. This is summed for all the correct classes.
Let's have a look at a brief example:
<img align="center" style="max-width: 900px" src="./loss.png">
As we see in the example, we first compute class predictions for each class. We normalize the predictions with a softmax over all classes, so that we end up with 'probabilities' (that's what comes out of the NN).
To compute the loss, we pick the predicted probability of the true class $\hat{c}_i$ and take the log of it. As the probabilities are on [0,1], the log of them are on [-$\infty$,0]. To maximize the probability of the true class $\hat{c}_i$, we have to maximize $log(\hat{c}_i)$. Due to the softmax, the predicted probabilties of all classes $c_i$ sum to 1: $\sum_i c_i = 1$. Therefore, by maximizing the probability of the true class $\hat{c}_i$, we minimize the probabilities of all the other (wrong) classes.
In ML, it has become common to minimize an 'error' or 'loss' term. Therfore, we sum over the log-likelihoods and take the negative of it. Small values (close to $0$) here translate to high values in true class probability.
During training the **NLL** loss will penalize models that result in a high classification error between the predicted class labels $\hat{c}^{i}$ and their respective true class label $c^{i}$. Luckily, an implementation of the NLL loss is already available in PyTorch! It can be instantiated "off-the-shelf" via the execution of the following PyTorch command:
```
# define the optimization criterion / loss function
nll_loss = nn.NLLLoss()
```
Let's also push the initialized `nll_loss` computation to the computing `device` that is enabled:
```
nll_loss = nll_loss.to(device)
```
## 5. Neural Network Model Training
In this section, we will train our neural network model (as implemented in the section above) using the transformed images of fashion items. More specifically, we will have a detailed look into the distinct training steps as well as how to monitor the training progress.
### 5.1. Preparing the Network Training
So far, we have pre-processed the dataset, implemented the ANN and defined the classification error. Let's now start to train a corresponding model for **20 epochs** and a **mini-batch size of 128** FashionMNIST images per batch. This implies that the whole dataset will be fed to the ANN 20 times in chunks of 128 images yielding to **469 mini-batches** (60.000 images / 128 images per mini-batch) per epoch.
```
# specify the training parameters
num_epochs = 20 # number of training epochs
mini_batch_size = 128 # size of the mini-batches
```
Based on the loss magnitude of a certain mini-batch PyTorch automatically computes the gradients. But even better, based on the gradient, the library also helps us in the optimization and update of the network parameters $\theta$.
We will use the **Stochastic Gradient Descent (SGD) optimization** and set the learning-rate $l = 0.001$. Each mini-batch step the optimizer will update the model parameters $\theta$ values according to the degree of classification error (the MSE loss).
```
# define learning rate and optimization strategy
learning_rate = 0.001
optimizer = optim.SGD(params=model.parameters(), lr=learning_rate)
```
Now that we have successfully implemented and defined the three ANN building blocks let's take some time to review the `FashionMNISTNet` model definition as well as the `loss`. Please, read the above code and comments carefully and don't hesitate to let us know any questions you might have.
Furthermore, lets specify and instantiate a corresponding PyTorch data loader that feeds the image tensors to our neural network:
```
fashion_mnist_train_dataloader = torch.utils.data.DataLoader(fashion_mnist_train_data, batch_size=mini_batch_size, shuffle=True)
```
### 5.2. Running the Network Training
Finally, we start training the model. The detailed training procedure for each mini-batch is performed as follows:
>1. do a forward pass through the FashionMNISTNet network,
>2. compute the negative log likelihood classification error $\mathcal{L}^{NLL}_{\theta}(c^{i};\hat{c}^{i})$,
>3. do a backward pass through the FashionMNISTNet network, and
>4. update the parameters of the network $f_\theta(\cdot)$.
To ensure learning while training our ANN model, we will monitor whether the loss decreases with progressing training. Therefore, we obtain and evaluate the classification performance of the entire training dataset after each training epoch. Based on this evaluation, we can conclude on the training progress and whether the loss is converging (indicating that the model might not improve any further).
The following elements of the network training code below should be given particular attention:
>- `loss.backward()` computes the gradients based on the magnitude of the reconstruction loss,
>- `optimizer.step()` updates the network parameters based on the gradient.
```
# init collection of training epoch losses
train_epoch_losses = []
# set the model in training mode
model.train()
# train the MNISTNet model
for epoch in range(num_epochs):
# init collection of mini-batch losses
train_mini_batch_losses = []
# iterate over all-mini batches
for i, (images, labels) in enumerate(fashion_mnist_train_dataloader):
# push mini-batch data to computation device
images = images.to(device)
labels = labels.to(device)
# run forward pass through the network
output = model(images)
# reset graph gradients
model.zero_grad()
# determine classification loss
loss = nll_loss(output, labels)
# run backward pass
loss.backward()
# update network paramaters
optimizer.step()
# collect mini-batch reconstruction loss
train_mini_batch_losses.append(loss.data.item())
# determine mean min-batch loss of epoch
train_epoch_loss = np.mean(train_mini_batch_losses)
# print epoch loss
now = datetime.utcnow().strftime("%Y%m%d-%H:%M:%S")
print('[LOG {}] epoch: {} train-loss: {}'.format(str(now), str(epoch), str(train_epoch_loss)))
# set filename of actual model
model_name = 'fashion_mnist_model_epoch_{}.pth'.format(str(epoch))
# save current model to GDrive models directory
torch.save(model.state_dict(), os.path.join(models_directory, model_name))
# determine mean min-batch loss of epoch
train_epoch_losses.append(train_epoch_loss)
```
Upon successfull training let's visualize and inspect the training loss per epoch:
```
# prepare plot
fig = plt.figure()
ax = fig.add_subplot(111)
# add grid
ax.grid(linestyle='dotted')
# plot the training epochs vs. the epochs' classification error
ax.plot(np.array(range(1, len(train_epoch_losses)+1)), train_epoch_losses, label='epoch loss (blue)')
# add axis legends
ax.set_xlabel("[training epoch $e_i$]", fontsize=10)
ax.set_ylabel("[Classification Error $\mathcal{L}^{NLL}$]", fontsize=10)
# set plot legend
plt.legend(loc="upper right", numpoints=1, fancybox=True)
# add plot title
plt.title('Training Epochs $e_i$ vs. Classification Error $L^{NLL}$', fontsize=10);
```
Ok, fantastic. The training error is nicely going down. We could train the network a couple more epochs until the error converges. But let's stay with the 20 training epochs for now and continue with evaluating our trained model.
## 6. Neural Network Model Evaluation
Before evaluating our model let's load the best performing model. Remember, that we stored a snapshot of the model after each training epoch to our local model directory. We will now load the last snapshot saved.
```
### load state_dict from some url
# # restore pre-trained model snapshot
# best_model_name = 'https://raw.githubusercontent.com/HSG-AIML-Teaching/ML2022-Lab/main/lab_02/models/fashion_mnist_model_epoch_19.pth'
# # read stored model from the remote location
# model_bytes = urllib.request.urlopen(best_model_name)
# # load model tensor from io.BytesIO object
# model_buffer = io.BytesIO(model_bytes.read())
# # init pre-trained model class
# best_model = FashionMNISTNet()
# # load pre-trained models
# best_model.load_state_dict(torch.load(model_buffer, map_location=torch.device('cpu')))
## load state_dict from local path
# restore pre-trained model snapshot
best_model_name = models_directory +'/fashion_mnist_model_epoch_19.pth'
# load state_dict from path
state_dict_best = torch.load(best_model_name)
# init pre-trained model class
best_model = FashionMNISTNet()
# load pre-trained state_dict to model
best_model.load_state_dict(state_dict_best)
```
Let's inspect if the model was loaded successfully:
```
# set model in evaluation mode
best_model.eval()
```
To evaluate our trained model, we need to feed the FashionMNIST images reserved for evaluation (the images that we didn't use as part of the training process) through the model. Therefore, let's again define a corresponding PyTorch data loader that feeds the image tensors to our neural network:
```
fashion_mnist_eval_dataloader = torch.utils.data.DataLoader(fashion_mnist_eval_data, batch_size=10000, shuffle=True)
```
We will now evaluate the trained model using the same mini-batch approach as we did throughout the network training and derive the mean negative log-likelihood loss of the mini-batches:
```
# init collection of mini-batch losses
eval_mini_batch_losses = []
# iterate over all-mini batches
for i, (images, labels) in enumerate(fashion_mnist_eval_dataloader):
# run forward pass through the network
output = best_model(images)
# determine classification loss
loss = nll_loss(output, labels)
# collect mini-batch reconstruction loss
eval_mini_batch_losses.append(loss.data.item())
# determine mean min-batch loss of epoch
eval_loss = np.mean(eval_mini_batch_losses)
# print epoch loss
now = datetime.utcnow().strftime("%Y%m%d-%H:%M:%S")
print('[LOG {}] eval-loss: {}'.format(str(now), str(eval_loss)))
```
Ok, great. The evaluation loss looks in-line with our training loss. Let's now inspect a few sample predictions to get an impression of the model quality. Therefore, we will again pick a random image of our evaluation dataset and retrieve its PyTorch tensor as well as the corresponding label:
```
# set (random) image id
image_id = 2000
# retrieve image exhibiting the image id
fashion_mnist_eval_image, fashion_mnist_eval_label = fashion_mnist_eval_data[image_id]
```
Let's now inspect the true class of the image we selected:
```
fashion_classes[fashion_mnist_eval_label]
```
Ok, the randomly selected image should contain a bag. Let's inspect the image accordingly:
```
# define tensor to image transformation
trans = torchvision.transforms.ToPILImage()
# set image plot title
plt.title('Example: {}, Label: {}'.format(str(image_id), fashion_classes[fashion_mnist_eval_label]))
# plot mnist handwritten digit sample
plt.imshow(trans(fashion_mnist_eval_image), cmap='gray')
```
Let's compare the true label with the prediction of our model:
```
best_model(fashion_mnist_eval_image)
```
We can even determine the likelihood of the most probable class:
```
most_probable = torch.argmax(best_model(fashion_mnist_eval_image), dim=1).item()
print('Most probable class: {}'.format(most_probable))
print('This class represents the following fashion article: {}'.format(fashion_classes[most_probable]))
```
Let's now obtain the predictions for all the fashion item images of the evaluation data:
```
predictions = torch.argmax(best_model(fashion_mnist_eval_data.data.float()), dim=1)
```
Furthermore, let's obtain the overall classifcation accuracy:
```
metrics.accuracy_score(fashion_mnist_eval_data.targets, predictions.detach())
```
Let's also inspect the confusion matrix to determine major sources of misclassification:
```
# determine classification matrix of the predicted and target classes
mat = confusion_matrix(fashion_mnist_eval_data.targets, predictions.detach())
# initialize the plot and define size
plt.figure(figsize=(8, 8))
# plot corresponding confusion matrix
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False, cmap='YlOrRd_r', xticklabels=fashion_classes.values(), yticklabels=fashion_classes.values())
plt.tick_params(axis='both', which='major', labelsize=8, labelbottom = False, bottom=False, top = False, left = False, labeltop=True)
# set plot title
plt.title('Fashion MNIST classification matrix')
# set axis labels
plt.xlabel('[true label]')
plt.ylabel('[predicted label]');
```
Ok, we can easily see that our current model confuses sandals with either sneakers or ankle boots. However, the inverse does not really hold. The model sometimes confuses sneakers with ankle boots, and only very rarely with sandals. The same holds ankle boots. Our model also has issues distinguishing shirts from coats (and, to a lesser degree, from T-shirts and pullovers).
These mistakes are not very surprising, as these items exhibit a high similarity.
## 7. Lab Summary:
In this lab, a step by step introduction into the **design, implementation, training and evaluation** of neural networks to classify images of fashion items is presented. The code and exercises presented in this lab may serves as a starting point for developing more complex, more deep and tailored **neural networks**.
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Premade Estimators
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/estimator/premade"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/estimator/premade.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/estimator/premade.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/estimator/premade.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This tutorial shows you
how to solve the Iris classification problem in TensorFlow using Estimators. An Estimator is TensorFlow's high-level representation of a complete model, and it has been designed for easy scaling and asynchronous training. For more details see
[Estimators](https://www.tensorflow.org/guide/estimator).
Note that in TensorFlow 2.0, the [Keras API](https://www.tensorflow.org/guide/keras) can accomplish many of these same tasks, and is believed to be an easier API to learn. If you are starting fresh, we would recommend you start with Keras. For more information about the available high level APIs in TensorFlow 2.0, see [Standardizing on Keras](https://medium.com/tensorflow/standardizing-on-keras-guidance-on-high-level-apis-in-tensorflow-2-0-bad2b04c819a).
## First things first
In order to get started, you will first import TensorFlow and a number of libraries you will need.
```
import tensorflow as tf
import pandas as pd
```
## The data set
The sample program in this document builds and tests a model that
classifies Iris flowers into three different species based on the size of their
[sepals](https://en.wikipedia.org/wiki/Sepal) and
[petals](https://en.wikipedia.org/wiki/Petal).
You will train a model using the Iris data set. The Iris data set contains four features and one
[label](https://developers.google.com/machine-learning/glossary/#label).
The four features identify the following botanical characteristics of
individual Iris flowers:
* sepal length
* sepal width
* petal length
* petal width
Based on this information, you can define a few helpful constants for parsing the data:
```
CSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Species']
SPECIES = ['Setosa', 'Versicolor', 'Virginica']
```
Next, download and parse the Iris data set using Keras and Pandas. Note that you keep distinct datasets for training and testing.
```
train_path = tf.keras.utils.get_file(
"iris_training.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv")
test_path = tf.keras.utils.get_file(
"iris_test.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv")
train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)
test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)
```
You can inspect your data to see that you have four float feature columns and one int32 label.
```
train.head()
```
For each of the datasets, split out the labels, which the model will be trained to predict.
```
train_y = train.pop('Species')
test_y = test.pop('Species')
# The label column has now been removed from the features.
train.head()
```
## Overview of programming with Estimators
Now that you have the data set up, you can define a model using a TensorFlow Estimator. An Estimator is any class derived from `tf.estimator.Estimator`. TensorFlow
provides a collection of
`tf.estimator`
(for example, `LinearRegressor`) to implement common ML algorithms. Beyond
those, you may write your own
[custom Estimators](https://www.tensorflow.org/guide/custom_estimators).
We recommend using pre-made Estimators when just getting started.
To write a TensorFlow program based on pre-made Estimators, you must perform the
following tasks:
* Create one or more input functions.
* Define the model's feature columns.
* Instantiate an Estimator, specifying the feature columns and various
hyperparameters.
* Call one or more methods on the Estimator object, passing the appropriate
input function as the source of the data.
Let's see how those tasks are implemented for Iris classification.
## Create input functions
You must create input functions to supply data for training,
evaluating, and prediction.
An **input function** is a function that returns a `tf.data.Dataset` object
which outputs the following two-element tuple:
* [`features`](https://developers.google.com/machine-learning/glossary/#feature) - A Python dictionary in which:
* Each key is the name of a feature.
* Each value is an array containing all of that feature's values.
* `label` - An array containing the values of the
[label](https://developers.google.com/machine-learning/glossary/#label) for
every example.
Just to demonstrate the format of the input function, here's a simple
implementation:
```
def input_evaluation_set():
features = {'SepalLength': np.array([6.4, 5.0]),
'SepalWidth': np.array([2.8, 2.3]),
'PetalLength': np.array([5.6, 3.3]),
'PetalWidth': np.array([2.2, 1.0])}
labels = np.array([2, 1])
return features, labels
```
Your input function may generate the `features` dictionary and `label` list any
way you like. However, we recommend using TensorFlow's [Dataset API](https://www.tensorflow.org/guide/datasets), which can
parse all sorts of data.
The Dataset API can handle a lot of common cases for you. For example,
using the Dataset API, you can easily read in records from a large collection
of files in parallel and join them into a single stream.
To keep things simple in this example you are going to load the data with
[pandas](https://pandas.pydata.org/), and build an input pipeline from this
in-memory data:
```
def input_fn(features, labels, training=True, batch_size=256):
"""An input function for training or evaluating"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle and repeat if you are in training mode.
if training:
dataset = dataset.shuffle(1000).repeat()
return dataset.batch(batch_size)
```
## Define the feature columns
A [**feature column**](https://developers.google.com/machine-learning/glossary/#feature_columns)
is an object describing how the model should use raw input data from the
features dictionary. When you build an Estimator model, you pass it a list of
feature columns that describes each of the features you want the model to use.
The `tf.feature_column` module provides many options for representing data
to the model.
For Iris, the 4 raw features are numeric values, so we'll build a list of
feature columns to tell the Estimator model to represent each of the four
features as 32-bit floating-point values. Therefore, the code to create the
feature column is:
```
# Feature columns describe how to use the input.
my_feature_columns = []
for key in train.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
```
Feature columns can be far more sophisticated than those we're showing here. You can read more about Feature Columns in [this guide](https://www.tensorflow.org/guide/feature_columns).
Now that you have the description of how you want the model to represent the raw
features, you can build the estimator.
## Instantiate an estimator
The Iris problem is a classic classification problem. Fortunately, TensorFlow
provides several pre-made classifier Estimators, including:
* `tf.estimator.DNNClassifier` for deep models that perform multi-class
classification.
* `tf.estimator.DNNLinearCombinedClassifier` for wide & deep models.
* `tf.estimator.LinearClassifier` for classifiers based on linear models.
For the Iris problem, `tf.estimator.DNNClassifier` seems like the best choice.
Here's how you instantiated this Estimator:
```
# Build a DNN with 2 hidden layers with 30 and 10 hidden nodes each.
classifier = tf.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 30 and 10 nodes respectively.
hidden_units=[30, 10],
# The model must choose between 3 classes.
n_classes=3)
```
## Train, Evaluate, and Predict
Now that you have an Estimator object, you can call methods to do the following:
* Train the model.
* Evaluate the trained model.
* Use the trained model to make predictions.
### Train the model
Train the model by calling the Estimator's `train` method as follows:
```
# Train the Model.
classifier.train(
input_fn=lambda: input_fn(train, train_y, training=True),
steps=5000)
```
Note that you wrap up your `input_fn` call in a
[`lambda`](https://docs.python.org/3/tutorial/controlflow.html)
to capture the arguments while providing an input function that takes no
arguments, as expected by the Estimator. The `steps` argument tells the method
to stop training after a number of training steps.
### Evaluate the trained model
Now that the model has been trained, you can get some statistics on its
performance. The following code block evaluates the accuracy of the trained
model on the test data:
```
eval_result = classifier.evaluate(
input_fn=lambda: input_fn(test, test_y, training=False))
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
```
Unlike the call to the `train` method, you did not pass the `steps`
argument to evaluate. The `input_fn` for eval only yields a single
[epoch](https://developers.google.com/machine-learning/glossary/#epoch) of data.
The `eval_result` dictionary also contains the `average_loss` (mean loss per sample), the `loss` (mean loss per mini-batch) and the value of the estimator's `global_step` (the number of training iterations it underwent).
### Making predictions (inferring) from the trained model
You now have a trained model that produces good evaluation results.
You can now use the trained model to predict the species of an Iris flower
based on some unlabeled measurements. As with training and evaluation, you make
predictions using a single function call:
```
# Generate predictions from the model
expected = ['Setosa', 'Versicolor', 'Virginica']
predict_x = {
'SepalLength': [5.1, 5.9, 6.9],
'SepalWidth': [3.3, 3.0, 3.1],
'PetalLength': [1.7, 4.2, 5.4],
'PetalWidth': [0.5, 1.5, 2.1],
}
def input_fn(features, batch_size=256):
"""An input function for prediction."""
# Convert the inputs to a Dataset without labels.
return tf.data.Dataset.from_tensor_slices(dict(features)).batch(batch_size)
predictions = classifier.predict(
input_fn=lambda: input_fn(predict_x))
```
The `predict` method returns a Python iterable, yielding a dictionary of
prediction results for each example. The following code prints a few
predictions and their probabilities:
```
for pred_dict, expec in zip(predictions, expected):
class_id = pred_dict['class_ids'][0]
probability = pred_dict['probabilities'][class_id]
print('Prediction is "{}" ({:.1f}%), expected "{}"'.format(
SPECIES[class_id], 100 * probability, expec))
```
| github_jupyter |
# LeNet Lab Solution

Source: Yan LeCun
## Load Data
Load the MNIST data, which comes pre-loaded with TensorFlow.
You do not need to modify this section.
```
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", reshape=False)
X_train, y_train = mnist.train.images, mnist.train.labels
X_validation, y_validation = mnist.validation.images, mnist.validation.labels
X_test, y_test = mnist.test.images, mnist.test.labels
assert(len(X_train) == len(y_train))
assert(len(X_validation) == len(y_validation))
assert(len(X_test) == len(y_test))
print()
print("Image Shape: {}".format(X_train[0].shape))
print()
print("Training Set: {} samples".format(len(X_train)))
print("Validation Set: {} samples".format(len(X_validation)))
print("Test Set: {} samples".format(len(X_test)))
```
The MNIST data that TensorFlow pre-loads comes as 28x28x1 images.
However, the LeNet architecture only accepts 32x32xC images, where C is the number of color channels.
In order to reformat the MNIST data into a shape that LeNet will accept, we pad the data with two rows of zeros on the top and bottom, and two columns of zeros on the left and right (28+2+2 = 32).
You do not need to modify this section.
```
import numpy as np
# Pad images with 0s
X_train = np.pad(X_train, ((0,0),(2,2),(2,2),(0,0)), 'constant')
X_validation = np.pad(X_validation, ((0,0),(2,2),(2,2),(0,0)), 'constant')
X_test = np.pad(X_test, ((0,0),(2,2),(2,2),(0,0)), 'constant')
print("Updated Image Shape: {}".format(X_train[0].shape))
```
## Visualize Data
View a sample from the dataset.
You do not need to modify this section.
```
import random
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
index = random.randint(0, len(X_train))
image = X_train[index].squeeze()
plt.figure(figsize=(1,1))
plt.imshow(image, cmap="gray")
print(y_train[index])
```
## Preprocess Data
Shuffle the training data.
You do not need to modify this section.
```
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
```
## Setup TensorFlow
The `EPOCH` and `BATCH_SIZE` values affect the training speed and model accuracy.
You do not need to modify this section.
```
import tensorflow as tf
EPOCHS = 10
BATCH_SIZE = 128
```
## SOLUTION: Implement LeNet-5
Implement the [LeNet-5](http://yann.lecun.com/exdb/lenet/) neural network architecture.
This is the only cell you need to edit.
### Input
The LeNet architecture accepts a 32x32xC image as input, where C is the number of color channels. Since MNIST images are grayscale, C is 1 in this case.
### Architecture
**Layer 1: Convolutional.** The output shape should be 28x28x6.
**Activation.** Your choice of activation function.
**Pooling.** The output shape should be 14x14x6.
**Layer 2: Convolutional.** The output shape should be 10x10x16.
**Activation.** Your choice of activation function.
**Pooling.** The output shape should be 5x5x16.
**Flatten.** Flatten the output shape of the final pooling layer such that it's 1D instead of 3D. The easiest way to do is by using `tf.contrib.layers.flatten`, which is already imported for you.
**Layer 3: Fully Connected.** This should have 120 outputs.
**Activation.** Your choice of activation function.
**Layer 4: Fully Connected.** This should have 84 outputs.
**Activation.** Your choice of activation function.
**Layer 5: Fully Connected (Logits).** This should have 10 outputs.
### Output
Return the result of the 3rd fully connected layer.
```
from tensorflow.contrib.layers import flatten
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
# Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# Activation.
conv1 = tf.nn.relu(conv1)
# Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# Activation.
conv2 = tf.nn.relu(conv2)
# Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# Activation.
fc1 = tf.nn.relu(fc1)
# Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# Activation.
fc2 = tf.nn.relu(fc2)
# Layer 5: Fully Connected. Input = 84. Output = 10.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 10), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(10))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
```
## Features and Labels
Train LeNet to classify [MNIST](http://yann.lecun.com/exdb/mnist/) data.
`x` is a placeholder for a batch of input images.
`y` is a placeholder for a batch of output labels.
You do not need to modify this section.
```
x = tf.placeholder(tf.float32, (None, 32, 32, 1))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 10)
```
## Training Pipeline
Create a training pipeline that uses the model to classify MNIST data.
You do not need to modify this section.
```
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
```
## Model Evaluation
Evaluate how well the loss and accuracy of the model for a given dataset.
You do not need to modify this section.
```
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
```
## Train the Model
Run the training data through the training pipeline to train the model.
Before each epoch, shuffle the training set.
After each epoch, measure the loss and accuracy of the validation set.
Save the model after training.
You do not need to modify this section.
```
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_validation, y_validation)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
```
## Evaluate the Model
Once you are completely satisfied with your model, evaluate the performance of the model on the test set.
Be sure to only do this once!
If you were to measure the performance of your trained model on the test set, then improve your model, and then measure the performance of your model on the test set again, that would invalidate your test results. You wouldn't get a true measure of how well your model would perform against real data.
You do not need to modify this section.
```
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
```
| github_jupyter |
# SVR with Scale & Quantile Transformer
This Code template is for regression analysis using the SVR Regressor where rescaling method used is Scale and feature transformation is done via Quantile Transformer.
### Required Packages
```
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as se
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import QuantileTransformer, scale
from sklearn.svm import SVR
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
warnings.filterwarnings('ignore')
```
### Initialization
Filepath of CSV file
```
#filepath
file_path= ""
```
List of features which are required for model training.
```
#x_values
features=[]
```
Target feature for prediction.
```
#y_value
target=''
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path)
df.head()
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X and target/outcome to Y.
```
X=df[features]
Y=df[target]
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
```
Calling preprocessing functions on the feature and target set.
```
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=NullClearner(Y)
X.head()
```
#### Correlation Map
In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
```
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
```
### Data Splitting
The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
```
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=123)
```
###Data Rescaling
####Scale
It is a step of Data Pre Processing which is applied to independent variables or features of data. It basically helps to normalise the data within a particular range. Sometimes, it also helps in speeding up the calculations in an algorithm.
```
x_train =scale(x_train)
x_test = scale(x_test)
```
### Quantile Transformer
This method transforms the features to follow a uniform or a normal distribution. Therefore, for a given feature, this transformation tends to spread out the most frequent values. It also reduces the impact of (marginal) outliers: this is therefore a robust preprocessing scheme.
Transform features using quantiles information.
####Epsilon-Support Vector Regression.
Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.
A Support Vector Machine is a discriminative classifier formally defined by a separating hyperplane. In other terms, for a given known/labelled data points, the SVM outputs an appropriate hyperplane that classifies the inputted new cases based on the hyperplane. In 2-Dimensional space, this hyperplane is a line separating a plane into two segments where each class or group occupied on either side.
Here we will use SVR, the svr implementation is based on libsvm. The fit time scales at least quadratically with the number of samples and maybe impractical beyond tens of thousands of samples.
#### Parameters:
**kernel: {‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’}, default=’rbf’** ->
Specifies the kernel type to be used in the algorithm. It must be one of ‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’ or a callable. If none is given, ‘rbf’ will be used. If a callable is given it is used to precompute the kernel matrix.
**degree: int, default=3** ->
Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels.
**gamma: {‘scale’, ‘auto’} or float, default=’scale’** ->
Kernel coefficient for ‘rbf’, ‘poly’ and ‘sigmoid’.
**coef0: float, default=0.0** ->
Independent term in kernel function. It is only significant in ‘poly’ and ‘sigmoid’.
**tol: float, default=1e-3** ->
Tolerance for stopping criterion.
**C: float, default=1.0** ->
Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. The penalty is a squared l2 penalty.
**epsilon: float, default=0.1** ->
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube within which no penalty is associated in the training loss function with points predicted within a distance epsilon from the actual value.
**shrinking: bool, default=True** ->
Whether to use the shrinking heuristic. See the User Guide.
**cache_size: float, default=200** ->
Specify the size of the kernel cache (in MB).
**verbose: bool, default=False** ->
Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in libsvm that, if enabled, may not work properly in a multithreaded context.
**max_iter: int, default=-1** ->
Hard limit on iterations within solver, or -1 for no limit.
```
model=make_pipeline(QuantileTransformer(), SVR(kernel='poly', degree=13))
model.fit(x_train, y_train)
```
#### Model Accuracy
We will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.
score: The score function returns the coefficient of determination R2 of the prediction.
```
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
```
> **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions.
> **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model.
> **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model.
```
y_pred=model.predict(x_test)
print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100))
print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred)))
print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred)))
```
#### Prediction Plot
First, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.
For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.
```
n=len(x_test) if len(x_test)<20 else 20
plt.figure(figsize=(14,10))
plt.plot(range(n),y_test[0:n], color = "green")
plt.plot(range(n),model.predict(x_test[0:n]), color = "red")
plt.legend(["Actual","prediction"])
plt.title("Predicted vs True Value")
plt.xlabel("Record number")
plt.ylabel(target)
plt.show()
```
#### Creator: Ayush Gupta , Github: [Profile](https://github.com/guptayush179)
| github_jupyter |
```
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
from datetime import datetime
import numpy as np
import pandas as pd
import datetime as dt
```
# Reflect Tables into SQLAlchemy ORM
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, MetaData, Table, Column, ForeignKey, Integer, String, Float, DateTime, inspect, distinct, desc, and_
# Create Database Connection
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base.metadata.create_all(engine)
# reflect an existing database into a new model
Base=automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# save refernece to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(bind=engine)
inspector = inspect(engine)
columns = inspector.get_columns('measurement')
for c in columns:
print(c['name'], c["type"])
engine.execute('SELECT * FROM measurement LIMIT 5').fetchall()
columns = inspector.get_columns('station')
for c in columns:
print(c['name'], c["type"])
engine.execute('SELECT * FROM station LIMIT 5').fetchall()
```
# Exploratory Climate Analysis
```
# Design a query to retrieve the last 12 months of precipitation data and plot the results
# Calculate the date 1 year ago from the last data point in the database
end_date, = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
begin_date=dt.datetime.strptime(end_date, '%Y-%m-%d')-dt.timedelta(days=365)
end_date = dt.datetime.strptime(end_date, '%Y-%m-%d')
print(end_date,begin_date)
# Perform a query to retrieve the data and precipitation scores
# data = session.query(Measurement.id,Measurement.station,Measurement.date, Measurement.prcp, Measurement.tobs)\
# .filter(and_(Measurement.date>=begin_date, Measurement.date<=end_date)).all()
data = session.query(Measurement.id,Measurement.station,Measurement.date, Measurement.prcp, Measurement.tobs)\
.filter(Measurement.date>=begin_date).filter(Measurement.date<=end_date).all()
# Save the query results as a Pandas DataFrame and set the index to the date column
# Sort the dataframe by date
prcp_data = pd.DataFrame(data).set_index('date').sort_values(by='date', ascending=False)
# Use Pandas Plotting with Matplotlib to plot the data
prcp_data
# Use Pandas to calcualte the summary statistics for the precipitation data
prcp_data["prcp"].agg(["mean","median", "sum", "count", "max", "min", "std", "var"])
# Design a query to show how many stations are available in this dataset.
stations_lastyr = prcp_data.station.nunique()
stations, = session.query(func.count(distinct(Measurement.station))).order_by
(f'There are {stations_lastyr} unique weather stations with measurments taken in the last year of data. There are {stations} unique weather stations in the entire dataset.')
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
active_all = session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station) \
.order_by(desc(func.count(Measurement.station))).all()
active = prcp_data["station"].value_counts() #Returns descending by default
active = pd.DataFrame(active)
# prcp_data["station"].value_counts(normalize=True) #returns percentages of whole instead of count!
print('This is the dataset filtered for the last year of data.')
active
print('This is the whole dataset.')
active_all = [[ i for i, j in active_all ],
[ j for i, j in active_all ]]
active_all
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station?
most_active = active.index[0]
active_agg = prcp_data.loc[prcp_data["station"] == most_active]
active_agg["tobs"].agg(["mean", "max", "min"])
most_active_all =
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
```
## Bonus Challenge Assignment
```
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# function usage example
print(calc_temps('2012-02-28', '2012-03-05'))
# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax
# for your trip using the previous year's data for those same dates.
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
# Create a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
daily_normals("01-01")
# calculate the daily normals for your trip
# push each tuple of calculations into a list called `normals`
# Set the start and end date of the trip
# Use the start and end date to create a range of dates
# Stip off the year and save a list of %m-%d strings
# Loop through the list of %m-%d strings and calculate the normals for each date
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
# Plot the daily normals as an area plot with `stacked=False`
# class Measurement_two(Base):
# __tablename__= "measurement"
# id = Column(Integer, primary_key=True)
# station = Column(String(200))
# date = Column(DateTime)
# prcp = Column(Float)
# tobs = Column(Float)
# class Station_two(Base):
# __tablename__= "station"
# id = Column(Integer, primary_key=True)
# station = Column(String(200))
# name = Column(String(200))
# latitude = Column(Float)
# longitude = Column(Float)
# elevation = Column(Float)
```
| github_jupyter |
```
import sys
sys.path.append("/Users/msachde1/Downloads/Research/Development/mgwr/")
import warnings
warnings.filterwarnings("ignore")
from mgwr.gwr import GWR
import pandas as pd
import numpy as np
from spglm.family import Gaussian, Binomial, Poisson
from mgwr.gwr import MGWR
from mgwr.sel_bw import Sel_BW
import multiprocessing as mp
pool = mp.Pool()
from scipy import linalg
import numpy.linalg as la
from scipy import sparse as sp
from scipy.sparse import linalg as spla
from spreg.utils import spdot, spmultiply
from scipy import special
import libpysal as ps
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from copy import deepcopy
import copy
from collections import namedtuple
```
<img src="image.png">
### IWLS convergence loop
```
data_p = pd.read_csv("C:/Users/msachde1/Downloads/logistic_mgwr_data/landslides.csv")
coords = list(zip(data_p['X'],data_p['Y']))
y = np.array(data_p['Landslid']).reshape((-1,1))
elev = np.array(data_p['Elev']).reshape((-1,1))
slope = np.array(data_p['Slope']).reshape((-1,1))
SinAspct = np.array(data_p['SinAspct']).reshape(-1,1)
CosAspct = np.array(data_p['CosAspct']).reshape(-1,1)
X = np.hstack([elev,slope,SinAspct,CosAspct])
x = CosAspct
X_std = (X-X.mean(axis=0))/X.std(axis=0)
x_std = (x-x.mean(axis=0))/x.std(axis=0)
y_std = (y-y.mean(axis=0))/y.std(axis=0)
```
### Initialization with GWPR
```
sel=Sel_BW(coords,y,x,family=Binomial(),constant=False)
bw_in=sel.search()
def gwr_func(y,X,bw):
return GWR(coords,y,X,bw,family=Binomial(),fixed=False,kernel='bisquare',constant=False).fit()
optim_model = gwr_func(y=y,X=x,bw=bw_in)
om_p=optim_model.params
bw_in
```
### Starting values
```
n_iter=0
n=x.shape[0]
diff = 1.0e+06
tol = 1.0e-06
max_iter=200
betas=om_p
XB =np.sum( np.multiply(optim_model.params,optim_model.X),axis=1)
mu = 1 / ( 1 + np.exp (-1 * XB))
ni_old = np.log((mu)/(1-mu))
while diff> tol and n_iter < max_iter:
n_iter +=1
w = mu*(1-mu)
z = (ni_old + ((optim_model.y - mu)/mu*(1-mu))).reshape(-1,1)
wx = spmultiply(x.reshape(-1),w.reshape(-1),array_out=False)
x_std=((wx-wx.mean(axis=0))/wx.std(axis=0)).reshape(-1,1)
print(x_std.shape)
selector=Sel_BW(coords,z,x_std,multi=True,constant=False)
selector.search(pool=pool)
print(selector.bw[0])
mgwr_model=MGWR(coords,z,x_std,selector,family=Gaussian(),constant=False).fit()
n_betas=mgwr_model.params
XB =np.sum( np.multiply(n_betas,mgwr_model.X),axis=1)
mu = 1 / ( 1 + np.exp (-1 * XB))
ni_old = np.log((mu)/(1-mu))
diff=min(min(abs(betas-n_betas).reshape(1,-1).tolist()))
print("diff = "+str(diff))
betas = n_betas
#print (betas, w, z, n_iter)
bw=Sel_BW(coords,y,x_std,family=Binomial(),constant=False)
bw=bw.search()
bw
gwr_mod = GWR(coords,y,x_std,bw,family=Binomial(),constant=False).fit()
gwr_mod.aic
sns.distplot(z)
sns.distplot(x_std)
mgwr_model.aic
optim_model.aic
```
| github_jupyter |
# Hidden Markov Models
### Problem Statement
The following problem is from the Udacity course on Artificial Intelligence (Thrun and Norvig), chapter 11 (HMMs and filters). It involves a simple scenario where a person's current emotional state is determined by the weather on that particular day. The task is to find the underlying hidden sequence of states (in this case, the weather), given only a set of observations (moods) and information about state/observation changes.
```
#import required libraries
import numpy as np
import warnings
from pprint import pprint
```
$P(\;Rainy\;) = P(R_{0}) = 0.5$ (initial probabilites)
$P(\;Sunny\;) = P(S_{0}) = 0.5$
The chances of weather changing are given as follows:
For rainy weather, $P(S_{tomorrow}|R_{today}) = 0.4$, and $P(R_{tomorrow}|R_{today}) = 0.6$
For sunny weather, $P(R_{tomorrow}|S_{today}) = 0.2$, therefore $P(S_{tomorrow}| S_{today}) = 0.8$
For the purpose of formulating an HMM, we call the above ***Transition Probabilities.***
The corresponding mood changes, given the weather are :
$P(H|R) = 0.4$, therefore $P(G|R) = 0.6$
$P(H|S) = 0.9$, and $P(G|S) = 0.1$
We call these ***Emission Probabilities***
```
S = np.array([0, 1]) # 0 Rainy, 1 Sunny
S_names = ('Rainy', 'Sunny')
pi = np.array([0.5, 0.5]) # Initial Probabilities
O = np.array(['Happy', 'Grumpy']) # Set of observations
A = np.array([[0.6, 0.4], [0.2, 0.8]]) # {R:{R, S}, S:{R, S}} Transition Matrix
B = np.array([[0.4, 0.6], [0.9, 0.1]]) # {R: {H, G}, S: {H, G}} Emission Matrix
Y = np.array([0, 0, 1]) # 0 Happy, 1 Grumpy -- Observation sequence
```
### Hidden Markov Models
[HMMs](https://en.wikipedia.org/wiki/Hidden_Markov_model) are a class of probabilistic graphical models that can predict the sequence of states, given a sequence of observations that are dependent on those states, and when the states themselves are unobservable. HMMs have seen widespread success in a variety of applications, from Speech processing and Robotics to DNA Sequencing. An HMM operates according to a set of assumptions, which are :
1. ** Markov Assumption **
Current state is dependent on only the previous state.
2. ** Stationarity Assumption **
Transition probabilities are independent of time of transition.
3. ** Independence Assumption **
Each observation depends solely on the current underlying state (which in turn depends on the previous one), and is independent of other observations.
An HMM is a **Generative model**, in that it attempts to find the probability of a set of observations being produced or *generated* by a class. The parameters that we pass to the HMM class, defined below, are:
*O* = a set of observations
*S* = a set of states
*A* = transition probabilities, represented as a matrix
*B* = emission probabilities, represented as a matrix
*pi* = initial state probabilties
*Y* = sequence observed
### Viterbi Algorithm
The Viterbi algorithm is a Dynamic Programming algorithm for decoding the observation sequence to uncover the most probable state sequence. Given the required parameters, it starts from the initial state and uses the transition/emission information to calculate probabilities of subsequent states. Information from the previous step is passed along to the next, similar to a belief propagation mechanism (such as one used in the Forward-Backward algorithm explained later).
We store the results of each step in a table or matrix of size $k * t$, where k is the number of possible states, and t is the length of the observation sequence. The idea here is to find the path through possible states that has the maximum probability. Since initially we do not have a transition from state to state, we multiply the initial probabilities (from pi) and $P(\;observation\;|\;state\;)$ (from emission matrix B).
Eg. For the first day, we have the observation as Happy, so :
$P(R_{1}) = P(R_{0}) * P(H|R_{1}) = 0.5 * 0.4 = 0.2$
$P(S_{1}) = P(S_{0}) * P(H|S_{1}) \;= 0.5 * 0.9 = 0.45$
We log both these results in the table, since we are starting from an initial state. For the following observations, however, each state has only its maximum probability of moving to the next state logged.
#### On Day 2 : (observation - Happy) :
If current state = Rainy:
$P(R_{1}) * P(R_{2}|R_{1}) = 0.20 * 0.6 = 0.12$ (given Rainy was previous state)
$P(S_{1}) * P(R_{2}|S_{1}) = 0.45 * 0.2 = 0.09$ (Given Sunny was previous state)
Since $0.12>0.09$, We choose $P(R_{2}|H)$ as the most probable transition from $R_{1}$, and update the table with
$P(R_{2}|H) = P(R_{1}) * P(R_{2}|R_{1}) * P(H|R_{2}) = 0.12 * 0.4 = 0.048$
If current state = Sunny:
$P(R_{1}) * P(S_{2}|R_{1}) = 0.20 * 0.4 = 0.08$ (given Rainy was previous state)
$P(S_{1}) * P(S_{2}|S_{1}) = 0.45 * 0.8 = 0.36$ (given Sunny was previous state)
Here too, we choose $P(S_{2}|H)$ as the most probable transition from $S_{1}$, and add it to the table.
$P(S_{2}|H) = P(S_{1}) * P(S_{2}|S_{1}) * P(H|S_{2}) = 0.36 * 0.9 = 0.324$
#### On Day 3: (observation - Grumpy) :
If current state = Rainy:
$P(R_{2}) * P(R_{3}|R_{2}) = 0.048 * 0.6 = 0.0288$ (given Rainy was previous state)
$P(S_{2}) * P(R_{3}|S_{2}) = 0.324 * 0.2 = 0.0648$ (given Sunny was previous state)
As $0.0648>0.0288$, We choose $P(R_{3}|G)$ as the most probable transition from $R_{2}$, and update the table with
$P(R_{3}|G) = P(R_{2}) * P(R_{3}|R_{2}) * P(G|R_{3}) = 0.0648 * 0.6 = 0.03888$
If current state = Sunny:
$P(R_{2}) * P(S_{3}|R_{2}) = 0.048 * 0.4 = 0.0192$ (given Rainy was previous state)
$P(S_{2}) * P(S_{3}|S_{2}) = 0.324 * 0.8 = 0.2592$ (given Sunny was previous state)
Here too, we choose $P(S_{3}|G)$ as the most probable transition from $S_{1}$, and add it to the table.
$P(S_{3}|G) = P(S_{2}) * P(S_{3}|S_{2}) * P(G|S_{3}) = 0.2592 * 0.1 = 0.02592$
Since now the table is completely filled, we work in reverse from probability of the last observation and its inferred state (in this case, $0.0388$ i.e Rainy) finding which state had the maximum probability upto that point. In this way, we find the most probable sequence of states corresponding to our observations!
```
class HMM:
def __init__(self, observations, states, start_probs, trans_probs, emm_probs, obs_sequence):
self.O = observations
self.S = states
self.state_names = None
self.pi = start_probs
self.A = trans_probs
self.B = emm_probs
self.Y = obs_sequence
self.k = np.array(self.S).shape[0]
self.t = self.Y.shape[0]
self.table_1 = np.zeros((self.k, self.t))
self.output_sequence = np.zeros((self.t,))
self.fwds = None
self.bwds = None
self.smoothened = None
def viterbi(self):
# loop through states, but only for first observation
print "Day 1 : Observation was", self.Y[0], "i.e", self.O[self.Y[0]]
for i in range(self.k):
self.table_1[i, 0] = self.pi[i] * self.B[i, self.Y[0]]
print "Probability of state", i, "-->", self.table_1[i, 0]
print "-------------------------------------------"
print "========================================="
# loop through second to last observation
for i in range(1, self.t):
print "Day", i + 1, ": Observation was", self.Y[i], "i.e", self.O[self.Y[i]]
for j in range(self.k): # loop through states
print "If current state", j, "i.e", self.state_names[j]
max_t1_A = 0.0
for d in range(self.k): # loop through states*states
print "probability of the previous state i.e", d, "-->", self.table_1[d, i - 1]
val = self.table_1[d, i - 1] * self.A[d, j]
print "State", d, "to State", j, "-->", self.A[d, j]
print self.table_1[d, i - 1], "*", self.A[d, j], "=", val
if val > max_t1_A:
max_t1_A = val
else:
continue
self.table_1[j, i] = max_t1_A
tmp = self.table_1[j, i]
self.table_1[j, i] = self.table_1[j, i] * self.B[j, self.Y[i]]
print "Probability of next state given previous state, transition and observation :"
print tmp, "*", self.B[j, self.Y[i]], "=", self.table_1[j, i]
print "-------------------------------------------"
print "==========================================="
print ""
# work backwards from the last day, comparing probabilities
# from observations and transitions up to that day.
for i in range(self.t - 1, -1, -1):
max_at_i = 0.0
max_j = 0.0
for j in range(self.k):
if self.table_1[j][i] > max_at_i:
max_at_i = self.table_1[j][i]
max_j = j
else:
continue
self.output_sequence[i] = j
print "State", self.state_names[int(self.output_sequence[i])], "was most likely on day", i+1
print ""
return self.output_sequence
def get_obs(self, obs_val, emm_prob):
ob_mat = np.zeros((self.k, self.k))
for i in self.S:
for j in self.S:
if i == j:
ob_mat[i, j] = emm_prob[i, obs_val]
return ob_mat
def get_diagonal(self, mat_A, mat_B):
x = np.transpose(mat_A).shape[1]
mat_C = np.dot(mat_A, np.transpose(mat_B))
mat_D = np.zeros((self.k, 1))
for i in range(x):
for j in range(x):
if i == j:
mat_D[i][0] = mat_C[i][j]
return mat_D
def forward_backward(self):
self.m = self.O.shape[0]
# print self.m
obs_mats = [None for i in range(self.t)]
for i in range(self.t):
obs_mats[i] = self.get_obs(self.Y[i], self.B)
print "Observation matrices :"
pprint(obs_mats)
print ""
# forward probability calculation
f = [[] for i in range(self.t + 1)]
f[0] = self.pi.reshape(self.k, 1)
csum = 0.0
for j in f[0]:
csum += j
for j in range(f[0].shape[0]):
f[0][j] = f[0][j] / csum
for i in range(1, self.t + 1):
# print "obs", obs_mats[i-1]
# print "prev f", f[i-1]
f[i] = np.dot(np.dot(obs_mats[i - 1], self.A),
f[i - 1]).reshape(self.k, 1)
# scaling done here
csum = 0.0
for j in f[i]:
csum += j
for j in range(f[i].shape[0]):
f[i][j] = f[i][j] / csum
# print "new f", f[i]
f = np.array(f)
print "Forward probabilities :"
pprint(f)
print ""
# backward probability calculation
b = [[] for i in range(self.t + 1)]
b[-1] = np.array([[1.0] for i in range(self.k)])
for i in range(self.t - 1, -1, -1):
b[i] = np.dot(np.dot(self.A, obs_mats[i]),
b[i + 1]).reshape(self.k, 1)
# scaling done here
csum = 0.0
for j in b[i]:
csum += j
for j in range(b[i].shape[0]):
b[i][j] = b[i][j] / csum
b = np.array(b)
print "Backward probabilities :"
pprint(b)
print ""
# smoothed values
smooth = [[] for i in range(self.t + 1)]
for i in range(self.t + 1):
smooth[i] = self.get_diagonal(f[i], b[i])
csum = 0.0
for j in smooth[i]:
csum += j
for j in range(smooth[i].shape[0]):
smooth[i][j] = smooth[i][j] / csum
smooth = np.array(smooth)
print "Smoothed probabilities :"
pprint(smooth)
self.fwds = f
self.bwds = b
self.smoothened = smooth
for i in range(1, smooth.shape[0]):
max_prob = max(smooth[i].tolist())
print "Day", i, "probability was max for state", smooth[i].tolist().index(max_prob), "-->", max_prob[0]
self.output_sequence[i - 1] = smooth[i].tolist().index(max_prob)
return self.output_sequence
weather_hmm = HMM(O, S, pi, A, B, Y)
weather_hmm.state_names = S_names
obs_states = [O[i] for i in Y]
print "Observations :"
print obs_states, "\n"
with warnings.catch_warnings():
warnings.simplefilter("ignore")
print "Using Viterbi Algorithm:\n"
op1 = weather_hmm.viterbi()
print "Table of state probabilities :"
for i in weather_hmm.table_1:
print "----------------------------"
print "|",
for j in i:
print "{0:.4f} |".format(j),
print ""
print "----------------------------\n"
op_states1 = [S_names[int(i)] for i in op1]
print op_states1
```
### Forward-Backward Algorithm
Explanation : **TO-DO**
```
#reset output sequence values to zero
weather_hmm.output_sequence = np.zeros((weather_hmm.t,))
print "Using Forward-Backward Algorithm:"
op2 = weather_hmm.forward_backward()
op_states2 = [S_names[int(i)] for i in op2]
print op_states2
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.