code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
<a href="https://colab.research.google.com/github/hf2000510/infectious_disease_modelling/blob/master/part_two.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Make sure to open in Colab to see the plots!
### Importing the libraries
```
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
!pip install mpld3
import mpld3
mpld3.enable_notebook()
```
### Plot Function
```
def plotseird(t, S, E, I, R, D=None, L=None, R0=None, Alpha=None, CFR=None):
f, ax = plt.subplots(1,1,figsize=(10,4))
ax.plot(t, S, 'b', alpha=0.7, linewidth=2, label='Susceptible')
ax.plot(t, E, 'y', alpha=0.7, linewidth=2, label='Exposed')
ax.plot(t, I, 'r', alpha=0.7, linewidth=2, label='Infected')
ax.plot(t, R, 'g', alpha=0.7, linewidth=2, label='Recovered')
if D is not None:
ax.plot(t, D, 'k', alpha=0.7, linewidth=2, label='Dead')
ax.plot(t, S+E+I+R+D, 'c--', alpha=0.7, linewidth=2, label='Total')
else:
ax.plot(t, S+E+I+R, 'c--', alpha=0.7, linewidth=2, label='Total')
ax.set_xlabel('Time (days)')
ax.yaxis.set_tick_params(length=0)
ax.xaxis.set_tick_params(length=0)
ax.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax.legend(borderpad=2.0)
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax.spines[spine].set_visible(False)
if L is not None:
plt.title("Lockdown after {} days".format(L))
plt.show();
if R0 is not None or CFR is not None:
f = plt.figure(figsize=(12,4))
if R0 is not None:
# sp1
ax1 = f.add_subplot(121)
ax1.plot(t, R0, 'b--', alpha=0.7, linewidth=2, label='R_0')
ax1.set_xlabel('Time (days)')
ax1.title.set_text('R_0 over time')
# ax.set_ylabel('Number (1000s)')
# ax.set_ylim(0,1.2)
ax1.yaxis.set_tick_params(length=0)
ax1.xaxis.set_tick_params(length=0)
ax1.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax1.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax.spines[spine].set_visible(False)
if Alpha is not None:
# sp2
ax2 = f.add_subplot(122)
ax2.plot(t, Alpha, 'r--', alpha=0.7, linewidth=2, label='alpha')
ax2.set_xlabel('Time (days)')
ax2.title.set_text('fatality rate over time')
# ax.set_ylabel('Number (1000s)')
# ax.set_ylim(0,1.2)
ax2.yaxis.set_tick_params(length=0)
ax2.xaxis.set_tick_params(length=0)
ax2.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax2.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax.spines[spine].set_visible(False)
plt.show();
```
## Basic SIR Equations
```
def deriv(y, t, N, beta, gamma):
S, I, R = y
dSdt = -beta * S * I / N
dIdt = beta * S * I / N - gamma * I
dRdt = gamma * I
return dSdt, dIdt, dRdt
```
## The Exposed-Compartment
```
def deriv(y, t, N, beta, gamma, delta):
S, E, I, R = y
dSdt = -beta * S * I / N
dEdt = beta * S * I / N - delta * E
dIdt = delta * E - gamma * I
dRdt = gamma * I
return dSdt, dEdt, dIdt, dRdt
```
### Variables that we define:
```
N = 1_000_000 # total population
D = 4.0 # infections lasts four days
gamma = 1.0 / D
delta = 1.0 / 5.0 # incubation period of five days
R_0 = 5.0
beta = R_0 * gamma # R_0 = beta / gamma, so beta = R_0 * gamma
S0, E0, I0, R0 = N-1, 1, 0, 0 # initial conditions: one exposed
t = np.linspace(0, 99, 100) # Grid of time points (in days)
y0 = S0, E0, I0, R0 # Initial conditions vector
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N, beta, gamma, delta))
S, E, I, R = ret.T
```
### Plot the result:
```
plotseird(t, S, E, I, R)
```
## Programming the Dead-Compartment
```
def deriv(y, t, N, beta, gamma, delta, alpha, rho):
S, E, I, R, D = y
dSdt = -beta * S * I / N
dEdt = beta * S * I / N - delta * E
dIdt = delta * E - (1 - alpha) * gamma * I - alpha * rho * I
dRdt = (1 - alpha) * gamma * I
dDdt = alpha * rho * I
return dSdt, dEdt, dIdt, dRdt, dDdt
```
### New variables:
```
N = 1_000_000
D = 4.0 # infections lasts four days
gamma = 1.0 / D
delta = 1.0 / 5.0 # incubation period of five days
R_0 = 5.0
beta = R_0 * gamma # R_0 = beta / gamma, so beta = R_0 * gamma
alpha = 0.2 # 20% death rate
rho = 1/9 # 9 days from infection until death
S0, E0, I0, R0, D0 = N-1, 1, 0, 0, 0 # initial conditions: one exposed
t = np.linspace(0, 99, 100) # Grid of time points (in days)
y0 = S0, E0, I0, R0, D0 # Initial conditions vector
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N, beta, gamma, delta, alpha, rho))
S, E, I, R, D = ret.T
```
### Plot the result:
```
plotseird(t, S, E, I, R, D)
```
## Time-Dependent $R_{0}$
### Simple Approach: Single Lockdown
```
def deriv(y, t, N, beta, gamma, delta, alpha, rho):
S, E, I, R, D = y
dSdt = -beta(t) * S * I / N
dEdt = beta(t) * S * I / N - delta * E
dIdt = delta * E - (1 - alpha) * gamma * I - alpha * rho * I
dRdt = (1 - alpha) * gamma * I
dDdt = alpha * rho * I
return dSdt, dEdt, dIdt, dRdt, dDdt
L = 40
N = 1_000_000
D = 4.0 # infections lasts four days
gamma = 1.0 / D
delta = 1.0 / 5.0 # incubation period of five days
def R_0(t):
return 5.0 if t < L else 0.9
def beta(t):
return R_0(t) * gamma
alpha = 0.2 # 20% death rate
rho = 1/9 # 9 days from infection until death
S0, E0, I0, R0, D0 = N-1, 1, 0, 0, 0 # initial conditions: one exposed
t = np.linspace(0, 99, 100) # Grid of time points (in days)
y0 = S0, E0, I0, R0, D0 # Initial conditions vector
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N, beta, gamma, delta, alpha, rho))
S, E, I, R, D = ret.T
```
### Plot the result:
```
plotseird(t, S, E, I, R, D, L)
```
### Advanced Approach: logistic $R_{0}$
```
### we will use the logistic R in our model, because R probably never “jumps” from one value to another. Rather, it continuously changes.
def deriv(y, t, N, beta, gamma, delta, alpha, rho):
S, E, I, R, D = y
dSdt = -beta(t) * S * I / N
dEdt = beta(t) * S * I / N - delta * E
dIdt = delta * E - (1 - alpha) * gamma * I - alpha * rho * I
dRdt = (1 - alpha) * gamma * I
dDdt = alpha * rho * I
return dSdt, dEdt, dIdt, dRdt, dDdt
N = 1_000_000
D = 4.0 # infections lasts four days
gamma = 1.0 / D
delta = 1.0 / 5.0 # incubation period of five days
R_0_start, k, x0, R_0_end = 5.0, 0.5, 50, 0.5
def logistic_R_0(t):
return (R_0_start-R_0_end) / (1 + np.exp(-k*(-t+x0))) + R_0_end
def beta(t):
return logistic_R_0(t) * gamma
alpha = 0.2 # 20% death rate
rho = 1/9 # 9 days from infection until death
S0, E0, I0, R0, D0 = N-1, 1, 0, 0, 0 # initial conditions: one exposed
t = np.linspace(0, 99, 100) # Grid of time points (in days)
y0 = S0, E0, I0, R0, D0 # Initial conditions vector
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N, beta, gamma, delta, alpha, rho))
S, E, I, R, D = ret.T
R0_over_time = [logistic_R_0(i) for i in range(len(t))] # to plot R_0 over time: get function values
```
### Plot the result:
```
plotseird(t, S, E, I, R, D, R0=R0_over_time)
```
## Resource- and Age-Dependent Fatality Rate
```
def deriv(y, t, N, beta, gamma, delta, alpha_opt, rho):
S, E, I, R, D = y
def alpha(t):
return s * I/N + alpha_opt
dSdt = -beta(t) * S * I / N
dEdt = beta(t) * S * I / N - delta * E
dIdt = delta * E - (1 - alpha(t)) * gamma * I - alpha(t) * rho * I
dRdt = (1 - alpha(t)) * gamma * I
dDdt = alpha(t) * rho * I
return dSdt, dEdt, dIdt, dRdt, dDdt
### New variables:
N = 1_000_000
D = 4.0 # infections lasts four days
gamma = 1.0 / D
delta = 1.0 / 5.0 # incubation period of five days
R_0_start, k, x0, R_0_end = 5.0, 0.5, 50, 0.5
def logistic_R_0(t):
return (R_0_start-R_0_end) / (1 + np.exp(-k*(-t+x0))) + R_0_end
def beta(t):
return logistic_R_0(t) * gamma
alpha_by_agegroup = {"0-29": 0.01, "30-59": 0.05, "60-89": 0.2, "89+": 0.3}
proportion_of_agegroup = {"0-29": 0.1, "30-59": 0.3, "60-89": 0.4, "89+": 0.2}
s = 0.01
alpha_opt = sum(alpha_by_agegroup[i] * proportion_of_agegroup[i] for i in list(alpha_by_agegroup.keys()))
rho = 1/9 # 9 days from infection until death
S0, E0, I0, R0, D0 = N-1, 1, 0, 0, 0 # initial conditions: one exposed
t = np.linspace(0, 99, 100) # Grid of time points (in days)
y0 = S0, E0, I0, R0, D0 # Initial conditions vector
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N, beta, gamma, delta, alpha_opt, rho))
S, E, I, R, D = ret.T
R0_over_time = [logistic_R_0(i) for i in range(len(t))] # to plot R_0 over time: get function values
Alpha_over_time = [s * I[i]/N + alpha_opt for i in range(len(t))] # to plot alpha over time
```
### Plot the result:
```
plotseird(t, S, E, I, R, D, R0=R0_over_time, Alpha=Alpha_over_time)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
##bring in Leung data (data is from Leung's Git Repo on betting on nhl 2018); he scraped it from nhl.com
pathjj = '/Users/joejohns/'
data_path = 'data_bootcamp/GitHub/final_project_nhl_prediction/Data/Leung_Data_Results/nhl_data_Leung.csv'
results_path = 'data_bootcamp/GitHub/final_project_nhl_prediction/Data/Leung_Data_Results/results_Leung.csv'
L_data = pd.read_csv(pathjj+data_path) #stats CF etc ... one line every data and team so num_dates*30? rows
L_res = pd.read_csv(pathjj+results_path) #results win, loss etc
#df_data.dropna(inplace = True)
#My old data sets (next two cells)
Kaggle_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Kaggle_Data_Ellis/"
mp_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Money_Puck_Data/"
betting_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Betting_Data/"
#data_bootcamp/GitHub/final_project_nhl_prediction/Data/Betting_Data/nhl odds 2007-08.xlsx
#data_bootcamp/GitHub/final_project_nhl_prediction/Data/Kaggle_Data_Ellis/
#data_bootcamp/GitHub/final_project_nhl_prediction/Data/Money_Puck_Data/
##Kaggle files
df_game = pd.read_csv(Kaggle_path+'game.csv')
df_game_team_stats = pd.read_csv(Kaggle_path+'game_teams_stats.csv')
df_team_info = pd.read_csv(Kaggle_path+'team_info.csv')
##? not sure what this is ...
from numpy.core.numeric import True_
##import all the files
##file paths
Kaggle_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Kaggle_Data_Ellis/"
mp_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Money_Puck_Data/"
betting_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Betting_Data/"
#data_bootcamp/GitHub/final_project_nhl_prediction/Data/Betting_Data/nhl odds 2007-08.xlsx
#data_bootcamp/GitHub/final_project_nhl_prediction/Data/Kaggle_Data_Ellis/
#data_bootcamp/GitHub/final_project_nhl_prediction/Data/Money_Puck_Data/
##Kaggle files
df_game = pd.read_csv(Kaggle_path+'game.csv')
df_game_team_stats = pd.read_csv(Kaggle_path+'game_teams_stats.csv')
df_game_skater_stats = pd.read_csv(Kaggle_path+'game_skater_stats.csv')
df_game_goalie_stats = pd.read_csv(Kaggle_path+'game_goalie_stats.csv')
##more subtle Kaggle features:
df_game_scratches = pd.read_csv(Kaggle_path+'game_scratches.csv')
df_game_officials = pd.read_csv(Kaggle_path+'game_officials.csv')
df_team_info = pd.read_csv(Kaggle_path+'team_info.csv')
## grab all the moneypuck data
df_mp_teams = pd.read_csv(mp_path+'all_teams.csv')
## grab all betting data
df1 = pd.read_excel(io = betting_path+'nhl odds 2007-08.xlsx')
df2 = pd.read_excel(io = betting_path+'nhl odds 2008-09.xlsx')
df3 = pd.read_excel(io = betting_path+'nhl odds 2009-10.xlsx')
df4 = pd.read_excel(io = betting_path+'nhl odds 2010-11.xlsx')
df5 = pd.read_excel(io = betting_path+'nhl odds 2011-12.xlsx')
df6 = pd.read_excel(io = betting_path+'nhl odds 2012-13.xlsx')
df7 = pd.read_excel(io = betting_path+'nhl odds 2013-14.xlsx')
df8 = pd.read_excel(io = betting_path+'nhl odds 2014-15.xlsx')
df9 = pd.read_excel(io = betting_path+'nhl odds 2015-16.xlsx')
df10 = pd.read_excel(io = betting_path+'nhl odds 2016-17.xlsx')
df11 = pd.read_excel(io = betting_path+'nhl odds 2017-18.xlsx')
df12 = pd.read_excel(io = betting_path+'nhl odds 2018-19.xlsx')
df13 = pd.read_excel(io = betting_path+'nhl odds 2019-20.xlsx')
df1['season'] = 20072008
df2['season'] = 20082009
df3['season'] = 20092010
df4['season'] = 20102011
df5['season'] = 20112012
df6['season'] = 20122013
df7['season'] = 20132014
df8['season'] = 20142015
df9['season'] = 20152016
df10['season'] = 20162017
df11['season'] = 20172018
df12['season'] = 20182019
df13['season'] = 20192020
##omit 20072008
df_betting = pd.concat([df2, df3, df4, df5, df6, df7, df8, df9, df10, df11, df12, df13])
def fav_pred(x):
if x < 0:
return 1
if x>0:
return 0
if x==0:
print("O odds detected, nan returned")
return np.NaN
v_fav_pred = np.vectorize(fav_pred)
def implied_proba(odds):
if odds > 0:
return 100/(odds+100) #bet 100 to get 100+odds; profit = odds
if odds < 0:
return (-odds)/(-odds + 100) #bet |odds| to get 100+|odds|; profit = 100
df= df_betting.copy()
df
```
##Note Bene: In L_data or L_res team id wrong! not franchise id either ...
```
## cleaning tool
def perc_null(X):
total = X.isnull().sum().sort_values(ascending=False)
data_types = X.dtypes
percent = (X.isnull().sum()/X.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, data_types, percent], axis=1, keys=['Total','Type' ,'Percent'])
return missing_data
##fixing dates and season label
def to_string(n):
s = str(n)
if len(s) ==1:
return "0"+s
else:
return s
#makes mp_date for L_data '2014-1-16' --> 20140116 (integer)
def L_to_mp_date(s):
ymd = s.split('-')
y = ymd[0]
m = ymd[1]
d = ymd[2]
if len(m) ==1:
m = '0'+m
if len(d) == 1:
d = '0'+d
return int(y+m+d)
#test '2014-1-16'.split('-')
#make_mp_date('2014-10-16')
##for assigning dates in df_game
#takes in date_time date and returns integer yyyymmdd eg 20080904
##note this is df_mp format
def game_add_mp_date(date_time):
d = date_time.day
m = date_time.month
y = date_time.year
dd = to_string(d)
mm = to_string(m)
yyyy = str(y)
return(int(yyyy+mm+dd))
##test
date_time1 = pd.to_datetime('2016-10-19T00:30:00Z')
date_time2 = pd.to_datetime('2008-09-19T00:30:00Z')
print("two tests for game_add_mp_date:")
print(game_add_mp_date(date_time1) == 20161019)
print(game_add_mp_date(date_time2)==20080919)
print(" ")
##for assigning dates to df_betting
#takes in ingegers date = 913 or 1026 season = 20082009
# returns returns integer yyyymmdd eg 20080904
#note this is df_mp format
def bet_add_mp_date(date, season):
d = str(date)
s = str(season)
if len(d) == 3:
d = "0"+d
if (900 < date):
y= s[:4]
else:
y = s[4:]
return int(y+d)
##fix season format for df_mp
##takes in integer 2008 and returns 20082009
##note: I have verified in df_mp for two seasons 2008 labels 20082009
def fix_mp_season(n):
return int(str(n)+str(n+1))
def L_add_season(mp_date):
st = str(mp_date)
y = st[0:4]
ym1 = str(int(y)-1)
yp1 = str(int(y)+1)
m = st[4:6]
d = st[6:8]
if 9 <= int(m) <= 12:
return int(y+yp1)
if 1 <= int(m) <= 8:
return int(ym1+y)
##df_game_team_stats does not have 'season' or 'won' so we add that
def add_season_from_id(game_id):
id = str(game_id)
y = int(id[0:4])
yn = y+1
return int(str(y)+str(yn))
##test
print("three tests for bed_add_mp_date:")
print(bet_add_mp_date(912, 20082009) == 20080912)
print(bet_add_mp_date(1025,20102011) == 20101025)
print(bet_add_mp_date(227,20102011) == 20110227)
print(" ")
##tests
print("two tests for fix_mp_season:")
print(fix_mp_season(2008) == 20082009)
print(fix_mp_season(2019) == 20192020)
print(" ")
print("three tests for L_add_season:")
print(L_add_season('20140119')==20132014)
print(L_add_season('20140912')==20142015)
print(" ")
print("three tests for L_to_mp_season:")
print(L_to_mp_date('2014-10-16')==20141016)
print(L_to_mp_date('2014-1-16')==20140116)
print(L_to_mp_date('2014-1-6')==20140106)
print(" ")
print('test for add_season_from_id')
#test
print(add_season_from_id(2019021081) == 20192020)
##fix team names
##Leung dropped all the ATL games so I should do same
# issues CAL MON and WAS
L_to_nhl_dic = {'ANA': 'ANA',
'ARI': 'ARI',
'ATL': 'ATL',
'BOS': 'BOS',
'BUF': 'BUF',
'CAR': 'CAR',
'CBJ': 'CBJ',
'CAL': 'CGY',
'CHI': 'CHI',
'COL': 'COL',
'DAL': 'DAL',
'DET': 'DET',
'EDM': 'EDM',
'FLA': 'FLA',
'LAK': 'LAK',
'MIN': 'MIN',
'MON': 'MTL',
'NJD': 'NJD',
'NSH': 'NSH',
'NYI': 'NYI',
'NYR': 'NYR',
'OTT': 'OTT',
'PHI': 'PHI',
'PIT': 'PIT',
'SJS': 'SJS',
'STL': 'STL',
'TBL': 'TBL',
'TOR': 'TOR',
'VAN': 'VAN',
'VGK': 'VGK',
'WPG': 'WPG',
'WAS': 'WSH'}
mp_to_nhl_dic = {'ANA': 'ANA',
'ARI': 'ARI',
'ATL': 'ATL',
'BOS': 'BOS',
'BUF': 'BUF',
'CAR': 'CAR',
'CBJ': 'CBJ',
'CGY': 'CGY',
'CHI': 'CHI',
'COL': 'COL',
'DAL': 'DAL',
'DET': 'DET',
'EDM': 'EDM',
'FLA': 'FLA',
'L.A': 'LAK',
'MIN': 'MIN',
'MTL': 'MTL',
'N.J': 'NJD',
'NSH': 'NSH',
'NYI': 'NYI',
'NYR': 'NYR',
'OTT': 'OTT',
'PHI': 'PHI',
'PIT': 'PIT',
'S.J': 'SJS',
'STL': 'STL',
'T.B': 'TBL',
'TOR': 'TOR',
'VAN': 'VAN',
'VGK': 'VGK',
'WPG': 'WPG',
'WSH': 'WSH'}
bet_to_nhl_dic = {'Anaheim': 'ANA',
'Arizona': 'ARI',
'Arizonas': 'ARI',
'Atlanta': 'ATL',
'Boston': 'BOS',
'Buffalo': 'BUF',
'Calgary': 'CGY',
'Carolina': 'CAR',
'Chicago': 'CHI',
'Colorado': 'COL',
'Columbus': 'CBJ',
'Dallas': 'DAL',
'Detroit': 'DET',
'Edmonton': 'EDM',
'Florida': 'FLA',
'LosAngeles': 'LAK',
'Minnesota': 'MIN',
'Montreal': 'MTL',
'NYIslanders': 'NYI',
'NYRangers': 'NYR',
'Nashville': 'NSH',
'NewJersey': 'NJD',
'Ottawa': 'OTT',
'Philadelphia': 'PHI',
'Phoenix': 'ARI',
'Pittsburgh': 'PIT',
'SanJose': 'SJS',
'St.Louis': 'STL',
'Tampa': 'TBL',
'TampaBay': 'TBL',
'Toronto': 'TOR',
'Vancouver': 'VAN',
'Vegas': 'VGK',
'Washington': 'WSH',
'Winnipeg': 'WPG',
'WinnipegJets': 'WPG'}
game_to_nhl_dic = {1: 'NJD',
4: 'PHI',
26: 'LAK',
14: 'TBL',
6: 'BOS',
3: 'NYR',
5: 'PIT',
17: 'DET',
28: 'SJS',
18: 'NSH',
23: 'VAN',
16: 'CHI',
9: 'OTT',
8: 'MTL',
30: 'MIN',
15: 'WSH',
19: 'STL',
24: 'ANA',
27: 'ARI',
2: 'NYI',
10: 'TOR',
13: 'FLA',
7: 'BUF',
20: 'CGY',
21: 'COL',
25: 'DAL',
29: 'CBJ',
52: 'WPG',
22: 'EDM',
54: 'VGK',
12: 'CAR',
53: 'ARI',
11: 'ATL'}
def L_to_nhl(abbrevName):
return L_to_nhl_dic[abbrevName]
def bet_to_nhl(Team):
return bet_to_nhl_dic[Team]
def mp_to_nhl(team):
return mp_to_nhl_dic[team]
def game_to_nhl(team_id):
return game_to_nhl_dic[team_id]
##simple tests:
print(set(bet_to_nhl_dic.values()) == set(mp_to_nhl_dic.values()) )
print(set(bet_to_nhl_dic.values() )== set(game_to_nhl_dic.values()) )
print(set(mp_to_nhl_dic.values()) == set(game_to_nhl_dic.values()) )
print(set(mp_to_nhl_dic.values()) == set(L_to_nhl_dic.values()) )
print(set(game_to_nhl_dic.values()))
print(set(bet_to_nhl_dic.values()))
print(set(mp_to_nhl_dic.values()))
#print(set(mp_to_nhl_dic.values()) == set(L_data['teamAbbreviation'])) #these are different bec no ATL in L_data
len(set(mp_to_nhl_dic.values()))
##assign game_id to df_betting (two ways)
##these two functions
# assign game_id to df_betting ... using two different look up tables, df_game and df_mp_teams
##note the following error codes are assigned if a game is not found or found more than once
# if a home game is empty: 80 #8 is for h
# if a home game has >= 2 entries: 82
# if an away game is empty: 10 #1 is for a
# if an away game has >= 12 entries: 82
#df_betting['mp_game_id'] = np.vectorize(mp_to_bet_add_game_id, excluded={0})(df_mp_teams, df_betting['mp_date'], df_betting['nhl_name'], df_betting['VH'] )
def game_to_bet_add_game_id(df_game_team_stats, mp_date, nhl_name, VH):##put VH in there! duh
if VH == 'H':
## this h and (also a) is an array of values ... it may be empty; we expect exactly one element; might have >=2 or 0 if something is wrong
h = df_game.loc[(df_game_team_stats['mp_date'] == mp_date)&(df_game_team_stats['nhl_name'] == nhl_name)&(df_game_team_stats['HoA'] == 'home'), :]['game_id'].values
if len(h) ==1:
return h[0]
elif len(h)==0: #and mp_date < 2010000:
return 80
print(mp_date)
elif len(h) > 1:
return 82
if VH == 'V':
a = df_game.loc[(df_game_team_stats['mp_date'] == mp_date)&(df_game_team_stats['nhl_name'] == nhl_name)&(df_game_team_stats['HoA'] == 'away'), :]['game_id']
if len(a) ==1:
return a[0]
elif len(a)==0:
return 220
elif len(a) > 1:
return 222
def mp_to_bet_add_game_id(df_mp_teams, mp_date, nhl_name, VH):##put VH in there! duh
if VH == 'H':
h = df_mp_teams.loc[(df_mp_teams['mp_date'] == mp_date)&(df_mp_teams['nhl_name'] == nhl_name)&(df_mp_teams['home_or_away'] == 'HOME'), :]['game_id'].values
if len(h) ==1:
return h[0]
elif len(h)==0:
return 80
print(mp_date)
elif len(h) > 1:
return 82
if VH == 'V':
a = df_mp_teams.loc[(df_mp_teams['mp_date'] == mp_date)&(df_mp_teams['nhl_name'] == nhl_name)&(df_mp_teams['home_or_away'] == 'AWAY'), :]['game_id'].values
if len(a) ==1:
return a[0]
elif len(a)==0:
return 10
elif len(a) > 1:
return 12
if VH == 'N':
n = df_mp_teams.loc[(df_mp_teams['mp_date'] == mp_date)&(df_mp_teams['nhl_name'] == nhl_name), :]['game_id'].values
if len(n) ==1:
return n[0]
elif len(n)==0:
return 130
elif len(n) > 1:
return 132
else:
return 26
def mp_to_bet_add_game_id_no_VH(df_mp_teams, mp_date, nhl_name):##put VH in there! duh
ha = df_mp_teams.loc[(df_mp_teams['mp_date'] == mp_date)&(df_mp_teams['nhl_name'] == nhl_name), :]['game_id'].values
if len(ha) ==1:
return ha[0]
elif len(ha)==0:
return 180
elif len(ha) > 1:
return 182
else:
return 26
#cleaning up df's before merge ...
df_betting = df_betting.loc[:, ['Date', 'season','VH', 'Team', 'Open']].copy()
df_mp_teams.rename(columns={"teamId": "team_id", 'gameDate': 'mp_date', 'gameId':'game_id' }, inplace = True)
##note there are other column names which differ, but team_id, mp_date, game_id are key for joining so we
##make it consistent
df_mp_teams_big = df_mp_teams.copy()
##only has the rows for all situations (divide num rows by 5)
#restrict situation to all
df_mp_teams = df_mp_teams.loc[(df_mp_teams['situation'] == 'all'), :] #&(df_mp_teams['playoffGame']==0), :].copy()
##restrict types to all situations later before merge
df_game_team_stats.drop_duplicates(inplace = True)
df_game_team_stats = df_game_team_stats[~df_game_team_stats['team_id'].isin([87,88,89,90])].copy()
df_betting = df_betting[~(df_betting['VH'] == 'N')].copy()
##add season and won to df_game_team_stats (need later to re-do part of missing Leung data )
df_game_team_stats['season'] = np.vectorize(add_season_from_id)(df_game_team_stats['game_id']).copy()
df_game_team_stats['won'] = df_game_team_stats['won'].apply(int).copy()
#change 2008 to 20082009 for df_mp_teams
df_mp_teams['season'] = np.vectorize(fix_mp_season)(df_mp_teams['season']).copy()
# fix these in L_data: 'endDate', 'teamAbbreviation'
L_data['mp_date'] = np.vectorize(L_to_mp_date)(L_data['endDate']).copy()
L_data['nhl_name'] = np.vectorize(L_to_nhl)(L_data['teamAbbreviation']).copy()
L_data['season'] = np.vectorize(L_add_season)(L_data['mp_date'])
#L_data.columns
##fix some labels in df_betting and df_mp_teams
df_betting['mp_date'] = np.vectorize(bet_add_mp_date)(df_betting['Date'], df_betting['season']).copy()
HA_dic = { 'H': 'home', 'V': 'away', 'HOME': 'home', 'AWAY':'away'}
def add_HoA(h_or_a): # h_or_a can be either of the formats used by mp or betting
return HA_dic[h_or_a]
df_betting['HoA'] = df_betting['VH'].apply(add_HoA).copy()
df_mp_teams['HoA'] = df_mp_teams['home_or_away'].apply(add_HoA).copy()
df_betting['nhl_name'] = df_betting['Team'].apply(bet_to_nhl).copy()
df_game_team_stats['nhl_name'] = df_game_team_stats['team_id'].apply(game_to_nhl).copy()
df_mp_teams['nhl_name'] = df_mp_teams['name'].apply(mp_to_nhl).copy()
df_betting['game_id'] = np.vectorize(mp_to_bet_add_game_id_no_VH, excluded={0})(df_mp_teams, df_betting['mp_date'], df_betting['nhl_name'] ).copy()
##get ready to restrict seasons; I reduced it to 20182019 for Leung
seasons = []
for n in range(2008,2019):
seasons.append(int(str(n)+str(n+1)))
#check seasons look ok
print(seasons)
#restrict seasons
L_data = L_data.loc[L_data['season'].isin(seasons), :].copy()
df_betting = df_betting.loc[df_betting['season'].isin(seasons), :].copy()
#df_game_team_stats = df_game_team_stats.loc[df_game_team_stats['season'].isin(seasons), :].copy()
df_mp_teams = df_mp_teams.loc[df_mp_teams['season'].isin(seasons), :].copy()
#df_mp_teams = df_mp_teams.loc[df_mp_teams['season'].isin(seasons), :].copy()
## the index is no longer consecutive so we reset:
df_betting.reset_index(drop = True, inplace = True)
#df_game_team_stats.reset_index(drop = True, inplace = True)
df_mp_teams.reset_index(drop = True, inplace = True)
L_data.reset_index(drop = True, inplace = True)
##! restrict df_game_team_stats later using the game_id
```
df_temp = df_results.dropna(inplace = False).copy()
A = [set(df_betting['game_id']) , set(df_game_team_stats['game_id']) , set(df_results['game_id']) ]
for i in range(3):
j = i + 1 %3
print(i, len(A[i]))
print(i, j, 'inters: ', len( set.intersection(A[i], A[j]) )
print("all: ": len( set.intersection(A[0],A[1],A[2]))
```
##only doing first merge now ...
common_game_ids = set.intersection( set(df_betting['game_id']) , set(df_game_team_stats['game_id']) ) #, set(df_results['game_id']) )
A = [set(df_betting['game_id']) , set(df_game_team_stats['game_id'])]
print(len(A[0]))
print(len(A[1]))
print("all: ", len( set.intersection(A[0],A[1]) ))
df_betting = df_betting.loc[df_betting['game_id'].isin(common_game_ids), :].copy()
df_game_team_stats = df_game_team_stats.loc[df_game_team_stats['game_id'].isin(common_game_ids), :].copy()
#df_mp_teams = df_mp_teams.loc[df_mp_teams['game_id'].isin(common_game_ids), :].copy()
##check consistency of nhl_names, HoA ...
#df_mp_teams.sort_values(by = ['game_id', 'nhl_name'], inplace = True)
df_game_team_stats.sort_values(by = ['game_id', 'nhl_name'], inplace = True)
df_betting.sort_values(by = ['game_id', 'nhl_name'], inplace = True)
df_betting.reset_index(drop = True, inplace = True)
df_game_team_stats.reset_index(drop = True, inplace = True)
#df_mp_teams.reset_index(drop = True, inplace = True)
##The big merge! Note: I am not using X_merge2 in this note book, but it is important on prev version
X_merge = pd.merge(df_betting, df_game_team_stats, on = ['game_id', 'nhl_name'], how = 'inner', suffixes = ('_bet', '_gm_stats'))
#X_merge2 = pd.merge(X_merge, df_mp_teams, on = ['game_id', 'nhl_name', 'mp_date', 'season'], how = 'inner', suffixes = ('_merge', '_mp_teams'))
#print(2*len(common_game_ids), X_merge2.shape[0])
#drop ATL because Leung's data did
X_merge = X_merge.loc[ (X_merge['nhl_name'] !='ATL'), :].copy()
X_merge.rename(columns = {'season_bet':'season'}, inplace=True)
X_merge.rename(columns = {'HoA_gm_stats':'HoA'}, inplace=True)
#X_merge_L3 = pd.merge(X_merge, L_data, on = ['nhl_name', 'mp_date', 'season'], how = 'left', suffixes = ('_merge', '_L'))
##using this as 'HoA' results in less data loss
##convenient function for turning Vegas odds (-200 = fav, 120 = underdog) into game predictions win 1, lose 0
def Vegas_pred(Open):
if Open < 0:
return 1
elif Open > 0:
return 0
Vegas_pred(-200)
X_merge['Open'].isnull().sum()
np.vectorize(Vegas_pred)(np.array([2,-2]))
X_merge['Vegas_pred_won'] = X_merge['Open'].apply(Vegas_pred).copy()
X_merge = X_merge.loc[~(X_merge['Open'] == 0), :].copy()
X_merge['won'] = X_merge['won'].apply(int).copy()
X_merge['won'] = X_merge['won'].apply(int)
X_merge['won'] == X_merge['Vegas_pred_won']
##Here we start creating a new df called df_results ... it will have similar form to df_game
##and is suitable fora attaching Leung's data ... (I am not using df_game because I'm scared of it's GMT. dates
##which did work sometimes properly (I got tired of it so never checked super thoroughly)
#From X_merge use the game_id as a seed to make new df in the form of df_game ...(df_game is annoying bc of date)
##these are the games we will use:
list_game_ids = sorted(set(X_merge['game_id']))
print(len(sorted(set(X_merge['game_id']))))
df_results = pd.DataFrame({'game_id':list_game_ids})
###This is adapted from Leung's code.
def value(frame, key):
if len(frame[key]) ==0:
return np.NaN
elif len(frame[key]) >=2:
return 3.14
else:
return frame.iloc[0][key].astype(float)
def intValue(frame, key):
if len(frame[key]) ==0:
return np.NaN
elif len(frame[key]) >=2:
return 314
else:
return int(frame.iloc[0][key])
def strValue(frame, key):
if len(frame[key]) ==0:
return np.NaN
elif len(frame[key]) >=2:
return "314"
else:
return frame.iloc[0][key]
def get_home_results(game_id):
return X_merge.loc[(X_merge['game_id'] == game_id)&(X_merge['HoA'] == 'home'), :]
def get_away_results(game_id):
return X_merge.loc[(X_merge['game_id'] == game_id)&(X_merge['HoA'] == 'away'), :]
def add_results(row):
home = get_home_results(row['game_id']) #can use home
away = get_away_results(row['game_id'])
row['mp_date'] = intValue(home, 'mp_date')
row['season'] = intValue(home, 'season')
row['home_team'] = strValue(home, 'nhl_name')
row['away_team'] = strValue(away, 'nhl_name')
row['home_odds'] = value(home, 'Open')
row['away_odds'] = value(away, 'Open')
row['home_goals'] = value(home, 'goals')
row['away_goals'] = value(away, 'goals')
row['home_win'] = intValue(home, 'won') #can use home
row['settled_in'] = strValue(home, 'settled_in')
return row
df_results = df_results.apply(add_results, axis=1).copy()
df_results.shape
(df_results =="314").sum()
perc_null(df_results)
```
###This is adapted from Leung's code.
##Here we taking data from L_data and attaching to our new df_results (one row for each game id in X_merge)
##we do not use X_merge because it has two rows for every game id (H and A)
##get results info from X_merge:
##game_id restricts season ... check
def get_home_results(game_id):
return X_merge.loc[(X_merge['game_id'] == game_id)&(X_merge['HoA'] == 'home'), :]
def get_away_results(game_id):
return X_merge.loc[(X_merge['game_id'] == game_id)&(X_merge['HoA'] == 'away'), :]
#below row will come from df_result (game_id ) and from will be the either home info or away info from X_merge
def value(frame, key):
if len(frame[key]) ==0:
# print( "There is only one value for some feature in X_merge for this game_id: ", frame['game_id'] )
return np.NaN
elif len(frame[key]) >=2:
#print( "A feature value appears more than once in X_merge for this game_id: ", frame['game_id'] )
return np.NaN
else:
return frame.iloc[0][key].astype(float)
def intValue(frame, key):
if len(frame[key]) ==0:
#print( "There emight be a missing (only 1 not 2?) value for some feature in X_merge for this game_id: ", frame['game_id'] )
return np.NaN
elif len(frame[key]) >=2:
#print( "A feature value appears more than once in X_merge for this game_id: ", frame['game_id'] )
return np.NaN
else:
return int(frame.iloc[0][key])
def strValue(frame, key):
if len(frame[key]) ==0:
#print( "There might be a missing (only 1 not 2?) value for some feature in X_merge for this game_id: ", frame['game_id'] )
return np.NaN
elif len(frame[key]) >=2:
#print( "A feature value appears more than once in X_merge for this game_id: ", frame['game_id'] )
return np.NaN
else:
return frame.iloc[0][key]
def add_results(row):
home = get_home_results(row['game_id'])
away = get_away_results(row['game_id'])
id rows (to go with game_id)
row['mp_date'] = intValue(home, 'mp_date') #we can use the home one for these 2
row['season'] = intValue(home, 'season')
row['home_team'] = strValue(home, 'nhl_name')
row['away_team'] = strValue(away, 'nhl_name')
#odds (treat as a target ?)
row['home_odds'] = value(home, 'Open')
row['away_odds'] = value(away, 'Open')
#target vars
row['home_goals'] = value(home, 'goals')
row['away_goals'] = value(away, 'goals')
row['goal_difference']=value(home, 'goals')-value(away, 'goals') #keep same name as before
row['home_win'] = intValue(home, 'won')
row['settled_in'] = strValue(home, 'settled_in')
return row
```
X_merge.columns
df_results = df_results.apply(add_results, axis=1).copy()
perc_null(df_results)
##
##df_results.dropna(inplace=True)
##get_L_stats is called getStats in Leung's code
def get_L_stats(nhl_name, mp_date): #season is restriced because date is fixed as date -1
return L_data.loc[(L_data['nhl_name'] == nhl_name)& (L_data['mp_date'] == mp_date -1), :] #(!) -1 VERY important so you don't leak!
#the L_data stats are cumulative up to <= date ... so we grab cumulative stats up to mp_date = mp_date -1
##note the mp_date-1 !! this is what gives us the *previous day's'* cumulative stats for that te
##if the frame is empty value() will return NaN; this should happen for all first games for each team.
##value is defined above when we created df_results from X_merge
def add_stats(row):
home = get_L_stats(row['home_team'], row['mp_date'])
away = get_L_stats(row['away_team'], row['mp_date'])
row['CF%'] = value(home, 'cF%') - value(away, 'cF%')
corsiShooting = (value(home, 'gF') / value(home, 'cF')) - (value(away, 'gF') / value(away, 'cF'))
row['CSh%'] = corsiShooting * 100
corsiSave = (1 - (value(home, 'gA') / value(home, 'cA'))) - (1 - (value(away, 'gA') / value(away, 'cA')))
row['CSv%'] = corsiSave * 100
row['FF%'] = value(home, 'fF%') - value(away, 'fF%')
fenwickShooting = (value(home, 'gF') / value(home, 'fF')) - (value(away, 'gF') / value(away, 'fF'))
row['FSh%'] = fenwickShooting * 100
fenwickSave = (1 - (value(home, 'gA') / value(home, 'fA'))) - (1 - (value(away, 'gA') / value(away, 'fA')))
row['FSv%'] = fenwickSave * 100
row['GDiff'] = (value(home, 'gF60') - value(home, 'gA60')) - (value(away, 'gF60') - value(away, 'gA60'))
row['GF%'] = value(home, 'gF%') - value(away, 'gF%')
row['PDO'] = value(home, 'pDO') - value(away, 'pDO')
row['PENDiff'] = (value(home, 'pEND') - value(home, 'pENT')) - (value(away, 'pEND') - value(away, 'pENT'))
row['SF%'] = value(home, 'sF%') - value(away, 'sF%')
row['SDiff'] = (value(home, 'sF60') - value(home, 'sA60')) - (value(away, 'sF60') - value(away, 'sA60'))
shootingPercentage = (value(home, 'gF') / value(home, 'sF')) - (value(away, 'gF') / value(away, 'sF'))
row['Sh%'] = shootingPercentage * 100
row['Sv'] = value(home, 'sv%') - value(away, 'sv%')
#L_data is missing GP, W, FOW%
#row['FOW%'] = value(home, 'FOW%') - value(away, 'FOW%')
#winPercentage = (intValue(home, 'W') / intValue(home, 'GP')) - (intValue(away, 'W') / intValue(away, 'GP'))
#row['W%'] = np.float64(winPercentage)
return row
##test for code
L_data.loc[(L_data['nhl_name'] == 'LAK')& (L_data['mp_date'] == 20151112 -1), :]['cF%']
#test getStats:
get_L_stats('LAK', 20151112).iloc[0]['cF'].astype(float)
#test empty frame if no game ...
len(get_L_stats('LAK', 20121112)['cF'])
df_results_L_data = df_results.apply(add_stats, axis =1).copy() ##new df combines X_merge (results) and L_data
#check on diff between X_merge and L_data games:
#print(L_data.groupby(['mp_date, 'nhl_team']) ... hard to do grup by b/c team appears on all dates (whether played or not)
perc_null(df_results_L_data)
# one of the games have 0 odds ... we will remove those
df_results_L_data= df_results_L_data.loc[ (df_results_L_data['home_odds'] != 0), :].copy()
df_results_L_data.shape
perc_null(df_results_L_data2)
df_results_L_data2 = df_results_L_data.dropna(inplace=False) #drop na first 25%
df_results_L_data2
df_mp_teams.loc[(df_mp_teams['season'] == 20162017)&(df_mp_teams['nhl_name'] == 'LAK')&(df_mp_teams['mp_date'] <= 20141015), : ]['team'].sum()
#df_LA = df_mp_teams.loc[(df_mp_teams['season'] == 20082009)&(df_mp_teams['nhl_name'] == 'LAK')&(df_mp_teams['mp_date'] <= 20090230), : ]
#df_LA2 = df_game_team_stats.loc[(df_game_team_stats['season'] == 20082009)&(df_game_team_stats['nhl_name'] == 'LAK')&(df_game_team_stats['game_id'] <= 2008020919), :]
##from old notebook ... aha ... the counyt and sum are getting messed up by null values I think
##add stats part two ... using mp
def get_mp_stats(season, nhl_name, mp_date): #note each game has two rows, with each team listed ONCE under nhl_name; date<= mp_date
return df_mp_teams.loc[(df_mp_teams['season'] == season)&(df_mp_teams['nhl_name'] == nhl_name)&(df_mp_teams['mp_date'] <= mp_date-1), : ]
#game_id is used as date:
def get_game_stats(season, nhl_name, game_id): #note each game has two rows, with each team listed ONCE under nhl_name; date<= mp_date
return df_game_team_stats.loc[(df_game_team_stats['season'] == season)&(df_game_team_stats['nhl_name'] == nhl_name)&(df_game_team_stats['game_id'] <= game_id-1), :]
def do_sum(frame, key):
if len(frame[key]) ==0:
return np.NaN
else:
return frame[key].sum() ##! remove the .iloc[0] you want the whole frame now to count or sum
def do_count(frame, key):
if len(frame[key]) ==0:
return np.NaN
elif len(frame[key]) >=2:
return 3.14
else:
return frame[key].count()
def add_missing_stats(row):
mp_stats_home = get_mp_stats(row['season'], row['home_team'], row['mp_date'])
mp_stats_away = get_mp_stats(row['season'], row['away_team'], row['mp_date'])
game_stats_home = get_game_stats(row['season'], row['home_team'], row['game_id'])
game_stats_away = get_game_stats(row['season'], row['away_team'], row['game_id'])
GP_home = do_count(mp_stats_home, 'game_id') #game_id is just to count
GP_away = do_count(mp_stats_away, 'game_id')
W_home = do_sum(game_stats_home, 'won')
W_away = do_sum(game_stats_away, 'won')
Wper_home = W_home/GP_home
Wper_away = W_away/GP_away
row['W%'] = Wper_home - Wper_away
FOL_home= do_sum(mp_stats_home,'faceOffsWonAgainst')
FOL_away= do_sum(mp_stats_away,'faceOffsWonAgainst')
FOW_home = do_sum(mp_stats_home,'faceOffsWonFor')
FOW_away = do_sum(mp_stats_away,'faceOffsWonFor')
FOper_home = FOW_home/(FOW_home+FOL_home)
FOper_away = FOW_away/(FOW_away+FOL_away)
row['FOW%'] = FOper_home-FOper_away
##a test for the code
df_LA = df_mp_teams.loc[(df_mp_teams['season'] == 20082009)&(df_mp_teams['nhl_name'] == 'LAK')&(df_mp_teams['mp_date'] <= 20090230), : ]
df_LA2 = df_game_team_stats.loc[(df_game_team_stats['season'] == 20082009)&(df_game_team_stats['nhl_name'] == 'LAK')&(df_game_team_stats['game_id'] <= 2008020919), :]
print(df_LA['game_id'].count())
print(df_LA2['game_id'].count())
#check game_id works as well as date:
list(df_LA2['game_id']) == list(df_LA['game_id']) #using game_id as date will work within a season
#try dropping na first
df_results_L_data3 = df_results_L_data2.apply(add_missing_stats, axis =1).copy()
#perc_null(df_mp_teams)
df_results_L_data3
```
##add stats part two ... using mp to add FOW% and W% ... Leung's data is missing the entries 'GP' etc needed to do those
def get_mp_stats(season, nhl_name, mp_date): #note each game has two rows, with each team listed ONCE under nhl_name; date<= mp_date-1
return df_mp_teams.loc[(df_mp_teams['season'] == season)&(df_mp_teams['nhl_name'] == nhl_name)&(df_mp_teams['mp_date'] <= mp_date-1), : ]
##note the mp_date -1 (!)
#game_id is used as date (cell above checks this works)
def get_game_stats(season, nhl_name, game_id): #note each game has two rows, with each team listed ONCE under nhl_name; date<= mp_date-1
return df_game_team_stats.loc[(df_game_team_stats['season'] == season)&(df_game_team_stats['nhl_name'] == nhl_name)&(df_game_team_stats['game_id'] <= game_id-1), :]
##note the game_id -1 (!)
def do_sum(frame, key):
if len(frame[key]) ==0:
print( "There emight be a missing (only 1 not 2?) value for some feature in X_merge for this game_id: ", frame['game_id'] )
return np.NaN
elif len(frame[key]) >=2:
print( "A feature value appears more than once in X_merge for this game_id: ", frame['game_id'] )
return np.NaN
else:
return frame[key].sum() ##! remove the .iloc[0] you want the whole frame now to count or sum
def do_count(frame, key):
if len(frame[key]) ==0:
print( "Thermight be a missing (only 1 not 2?) value for some feature in df_game_stats for this game_id: ", frame['game_id'] )
return np.NaN
elif len(frame[key]) >=2:
print( "A feature value appears more than once in df_game_stats for this game_id: ", frame['game_id'] )
return np.NaN
else:
return frame[key].count()
#will be applied to rows of df_results_L_data
def add_missing_stats(row):
mp_stats_home = get_mp_stats(row['season'], row['home_team'], row['mp_date'])
mp_stats_away = get_mp_stats(row['season'], row['away_team'], row['mp_date'])
game_stats_home = get_game_stats(row['season'], row['home_team'], row['game_id'])
game_stats_away = get_game_stats(row['season'], row['away_team'], row['game_id'])
GP_home = do_count(mp_stats_home, 'game_id') #game_id is just to count
GP_away = do_count(mp_stats_away, 'game_id')
W_home = do_sum(game_stats_home, 'won')
W_away = do_sum(game_stats_away, 'won')
Wper_home = W_home/GP_home
Wper_away = W_away/GP_home
row['W%'] = Wper_home - Wper_away
FOL_home= do_sum(mp_stats_home,'faceOffsWonAgainst')
FOL_away= do_sum(mp_stats_away,'faceOffsWonAgainst')
FOW_home = do_sum(mp_stats_home,'faceOffsWonFor')
FOW_away = do_sum(mp_stats_away,'faceOffsWonFor')
FOper_home = FOW_home/(FOW_home+FOL_home)
FOper_away = FOW_away/(FOW_away+FOL_away)
row['FOW%'] = FOper_home-FOper_away
```
##should be about same NaN as df_result, around 29%?
df_results_L_data3 = df_results_L_data2.apply(add_missing_stats, axis =1).copy()
df_results_L_data3
perc_null(df_results_L_data3)
df_results_L_data.columns
```
##from Git
##add stats part two ... using mp
def get_mp_stats(season, nhl_name, mp_date): #note each game has two rows, with each team listed ONCE under nhl_name; date<= mp_date
return df_mp_teams.loc[(df_mp_teams['season'] == season)&(df_mp_teams['nhl_name'] == nhl_name)&(df_mp_teams['mp_date'] <= mp_date), : ]
#game_id is used as date:
def get_game_stats(season, nhl_name, game_id): #note each game has two rows, with each team listed ONCE under nhl_name; date<= mp_date
return df_game_team_stats.loc[(df_game_team_stats['season'] == season)&(df_game_team_stats['nhl_name'] == nhl_name)&(df_game_team_stats['game_id'] <= game_id), :]
def do_sum(frame, key):
if len(frame[key]) ==0:
return np.NaN
elif len(frame[key]) >=2:
return 3.14
else:
return frame[key].sum() ##! remove the .iloc[0] you want the whole frame now to count or sum
def do_count(frame, key):
if len(frame[key]) ==0:
return np.NaN
elif len(frame[key]) >=2:
return 3.14
else:
return frame[key].count()
def add_missing_stats(row):
mp_stats_home = get_mp_stats(row['season'], row['home_team'], row['mp_date'])
mp_stats_away = get_mp_stats(row['season'], row['away_team'], row['mp_date'])
game_stats_home = get_game_stats(row['season'], row['home_team'], row['game_id'])
game_stats_away = get_game_stats(row['season'], row['away_team'], row['game_id'])
GP_home = do_count(mp_stats_home, 'game_id') #game_id is just to count
GP_away = do_count(mp_stats_away, 'game_id')
W_home = do_sum(game_stats_home, 'won')
W_away = do_sum(game_stats_away, 'won')
Wper_home = W_home/GP_home
Wper_away = W_away/GP_home
row['W%'] = Wper_home - Wper_away
FOL_home= do_sum(mp_stats_home,'faceOffsWonAgainst')
FOL_away= do_sum(mp_stats_away,'faceOffsWonAgainst')
FOW_home = do_sum(mp_stats_home,'faceOffsWonFor')
FOW_away = do_sum(mp_stats_away,'faceOffsWonFor')
FOper_home = FOW_home/(FOW_home+FOL_home)
FOper_away = FOW_away/(FOW_away+FOL_away)
row['FOW%'] = FOper_home-FOper_away
df_results_L_data5 = df_results_L_data.head(60).apply(add_missing_stats, axis =1).copy()
```
df_results_L_data5
##constructing FavoriteW% .. measures how accurately the betting company predicted games in past for both home team and away team
#fav_stats = df_results_L_data.copy() #we create an auxiliary stats repo ... so it doesn't get all tangled up
fav_stats = X_merge.copy() #renamed fav_stats as convenience; X_merge has a row for every team and pred_Vegas_won =0,1
def get_fav_stats(nhl_name, mp_date, season):
return X_merge.loc[(fav_stats['nhl_name'] == season)&(fav_stats['nhl_name'] == nhl_name)& (fav_stats['mp_date'] <= mp_date -1), :] #(!) -1 VERY important so you don't leak!
##note the mp_date-1 we still need this because fav% is based on vegas pred plus actual results
##if the frame is empty value() will return NaN; this should happen for all first games for each team.
def do_sum(frame, key):
if len(frame[key]) ==0:
return np.NaN
else:
return frame[key].sum() ##! remove the .iloc[0] you want the whole frame now to count or sum
def do_count(frame, key):
if len(frame[key]) ==0:
return np.NaN
elif len(frame[key]) >=2:
return 3.14
else:
return frame[key].count()
def count_when_same(frame, key1, key2): #homeFav
if len(frame[key1])==0 or len(frame[key1])==0:
#print( "There might be 0 values for some feature in X_merge for this game_id: ", str(key1), str(key2), frame['game_id'] )
return np.NaN
elif len(frame[key1])!=len(frame[key1]):
#print( "There might be different values for some feature in X_merge for this game_id: ", str(key1), str(key2), frame['game_id'] )
return np.NaN
else:
return (frame[key1] == frame[key2]).sum()
#def count_when_diff(frame, key1, key2): #homeFav
# return(frame[key1] != frame[key2]).sum()
#rows of df_results_L_data
def add_fav(row):
home = get_fav_stats(row['home_team'], row['mp_date'], row['season']) #stats on *all* previous (stricly previous) games of home team (H or A games)
away = get_fav_stats(row['away_team'], row['mp_date'], row['season'])
GP_home = do_count(home, 'game_id') #game_id is just to count
GP_away = do_count(away, 'game_id')
home_win_pred_corr = count_when_same(home, 'Vegas_pred_won', 'won')
away_win_pred_corr = count_when_same(away, 'Vegas_pred_won', 'won')
home_fav = home_win_pred_corr/GP_home #this measures how accurate Vegas is with this home team in all games strictly passed (H or A)
away_fav = away_win_pred_corr/GP_away #same for away team
FavPerc = home_fav*away_fav
row['FavoritesW%'] = FavPerc ## the product measures the overall accuracy of Vegas for both teams eg (0.9)(0.9) = 0.81, 0.9*0.5 = 0.45
##tests
get_fav_stats('LAK', 20151112, 20152016 )
df_results_L_data4 = df_results_L_data2.head(20).apply(add_fav, axis =1).copy()
df_results_L_data4
#30% missing ... I'll take it!
df_results_L_data4
#df_results_L_data.shape
##!! I think I drop a lot here ... 30% ish (because Leung's data seems to be missing about 30% of games)
data_LJ = df_results_L_data.dropna(inplace=False).copy()
#df_results_L_data.shape
data_LJ.to_csv('/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Shaped_Data/data_LJ.csv')
##exrta investigation
df_temp = df_results.dropna(inplace = False).copy()
A = [set(df_betting['game_id']) , set(df_game_team_stats['game_id']) , set(df_results['game_id']) ]
for i in range(3):
j = i + 1 %3
print(i, len(A[i]))
print(i, j, 'inters: ', len( set.intersection(A[i], A[j]) )
print("all: ": len( set.intersection(A[0],A[1],A[2]))
common_game_ids = set.intersection( set(df_betting['game_id']) , set(df_game_team_stats['game_id']) , set(df_results['game_id']) )
df_betting = df_betting.loc[df_betting['game_id'].isin(common_game_ids), :].copy()
df_game_team_stats = df_game_team_stats.loc[df_game_team_stats['game_id'].isin(common_game_ids), :].copy()
df_mp_teams = df_mp_teams.loc[df_mp_teams['game_id'].isin(common_game_ids), :].copy()
```
| github_jupyter |
```
SAMPLE_TEXT = """
1163751742
1381373672
2136511328
3694931569
7463417111
1319128137
1359912421
3125421639
1293138521
2311944581
"""
SAMPLE_TEXT_2 = """
11637517422274862853338597396444961841755517295286
13813736722492484783351359589446246169155735727126
21365113283247622439435873354154698446526571955763
36949315694715142671582625378269373648937148475914
74634171118574528222968563933317967414442817852555
13191281372421239248353234135946434524615754563572
13599124212461123532357223464346833457545794456865
31254216394236532741534764385264587549637569865174
12931385212314249632342535174345364628545647573965
23119445813422155692453326671356443778246755488935
22748628533385973964449618417555172952866628316397
24924847833513595894462461691557357271266846838237
32476224394358733541546984465265719557637682166874
47151426715826253782693736489371484759148259586125
85745282229685639333179674144428178525553928963666
24212392483532341359464345246157545635726865674683
24611235323572234643468334575457944568656815567976
42365327415347643852645875496375698651748671976285
23142496323425351743453646285456475739656758684176
34221556924533266713564437782467554889357866599146
33859739644496184175551729528666283163977739427418
35135958944624616915573572712668468382377957949348
43587335415469844652657195576376821668748793277985
58262537826937364893714847591482595861259361697236
96856393331796741444281785255539289636664139174777
35323413594643452461575456357268656746837976785794
35722346434683345754579445686568155679767926678187
53476438526458754963756986517486719762859782187396
34253517434536462854564757396567586841767869795287
45332667135644377824675548893578665991468977611257
44961841755517295286662831639777394274188841538529
46246169155735727126684683823779579493488168151459
54698446526571955763768216687487932779859814388196
69373648937148475914825958612593616972361472718347
17967414442817852555392896366641391747775241285888
46434524615754563572686567468379767857948187896815
46833457545794456865681556797679266781878137789298
64587549637569865174867197628597821873961893298417
45364628545647573965675868417678697952878971816398
56443778246755488935786659914689776112579188722368
55172952866628316397773942741888415385299952649631
57357271266846838237795794934881681514599279262561
65719557637682166874879327798598143881961925499217
71484759148259586125936169723614727183472583829458
28178525553928963666413917477752412858886352396999
57545635726865674683797678579481878968159298917926
57944568656815567976792667818781377892989248891319
75698651748671976285978218739618932984172914319528
56475739656758684176786979528789718163989182927419
67554889357866599146897761125791887223681299833479
"""
from dataclasses import dataclass
from typing import Dict, Set, Optional, List
def tokenize_line(line):
return [int(i) for i in list(line)]
def parse_text(raw_text):
return [tokenize_line(l) for l in raw_text.split("\n") if l]
def read_input():
with open("input.txt", "rt") as f:
return f.read()
# Use Dijkstra's algorithm since it will definitely work.
@dataclass(frozen=True)
class Position:
x: int
y: int
@dataclass
class Cost:
parent: Position
cost: int
def update(self, new_parent: Position, new_cost: int) -> bool:
if new_cost < self.cost:
self.parent = new_parent
self.cost = new_cost
return True
return False
def find_cheapest_node(costs: Dict[Position, Cost], visited: Set[Position]) -> Optional[Position]:
non_visited_costs = (i for i in costs.items() if i[0] not in visited)
sorted_costs = sorted(non_visited_costs, key=lambda x: x[1].cost)
if not sorted_costs:
return None
return sorted_costs[0][0]
def get_route(start: Position, end: Position, costs: Dict[Position, Cost]) -> List[Position]:
result = [end]
while True:
parent = costs[result[-1]].parent
result.append(parent)
if parent == start:
break
result.reverse()
return result
def get_adjacent_positions(from_position: Position, weights: Dict[Position, int], visited: Set[Position]) -> List[Position]:
possible_positions = [
Position(from_position.x - 1, from_position.y),
Position(from_position.x + 1, from_position.y),
Position(from_position.x, from_position.y - 1),
Position(from_position.x, from_position.y + 1),
]
return [p for p in possible_positions if p in weights and p not in visited]
def find_cheapest_path(weights: Dict[Position, int], start: Position, end: Position):
costs: Dict[Position, Cost] = dict()
visited: Set[Position] = set()
if start == end:
return 0, []
visited.add(start)
for neighbour in get_adjacent_positions(start, weights, visited):
costs[neighbour] = Cost(start, weights[neighbour])
node = find_cheapest_node(costs, visited)
i = 0
while node is not None:
i += 1
if i == 10000:
pct_visited = len(visited) / ((end.x + 1) * (end.y + 1))
print(pct_visited)
i = 0
running_total = costs[node].cost
for neighbour in get_adjacent_positions(node, weights, visited):
new_cost = running_total + weights[neighbour]
if neighbour not in costs:
costs[neighbour] = Cost(node, new_cost)
else:
costs[neighbour].update(node, new_cost)
visited.add(node)
node = find_cheapest_node(costs, visited)
if end not in costs:
raise ValueError(f"No route to {end}")
return costs[end], get_route(start, end, costs)
def make_weights(lines) -> Dict[Position, int]:
result = {}
for y in range(len(lines)):
for x in range(len(lines[0])):
result[Position(x, y)] = lines[y][x]
return result
def find_end(weights: Dict[Position, int]) -> Position:
max_x = 0
max_y = 0
for k in weights.keys():
max_x = max(max_x, k.x)
max_y = max(max_y, k.y)
return Position(max_x, max_y)
def print_weights(weights: Dict[Position, int]):
end = find_end(weights)
result = ""
for y in range(end.y + 1):
for x in range(end.x + 1):
result += str(weights[Position(x, y)])
result += '\n'
print(result)
weights = make_weights(parse_text(SAMPLE_TEXT))
cost, route = find_cheapest_path(weights, Position(0, 0), find_end(weights))
cost.cost
weights = make_weights(parse_text(read_input()))
cost, route = find_cheapest_path(weights, Position(0, 0), find_end(weights))
cost.cost
print_weights(make_weights(parse_text(SAMPLE_TEXT)))
def multiply_weights(original_weights: Dict[Position, int], times):
end = find_end(original_weights)
new_max_x = (end.x + 1) * times - 1
new_max_y = (end.y + 1) * times - 1
result = {}
for x in range(new_max_x + 1):
for y in range(new_max_y + 1):
orig_x = x % (end.x + 1)
orig_y = y % (end.y + 1)
multiplier = (x // (end.x + 1)) + (y // (end.y + 1))
original_value = original_weights[Position(orig_x, orig_y)]
new_value = original_value + multiplier
if new_value > 9:
new_value -= 9
result[Position(x, y)] = new_value
return result
multiply_weights(make_weights(parse_text(SAMPLE_TEXT)), 5) == make_weights(parse_text(SAMPLE_TEXT_2))
new_weights = multiply_weights(make_weights(parse_text(SAMPLE_TEXT)), 5)
cost, route = find_cheapest_path(new_weights, Position(0, 0), find_end(new_weights))
cost.cost
# Need a better solution for the big map. The approach above takes way too long
new_weights = multiply_weights(make_weights(parse_text(read_input())), 5)
cost, route = find_cheapest_path(new_weights, Position(0, 0), find_end(new_weights))
cost.cost
```
| github_jupyter |
```
import os
from astropy.time import Time
import astropy.coordinates as coord
import astropy.units as u
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from tqdm.notebook import tqdm
from twobody import TwoBodyKeplerElements, KeplerOrbit
from twobody import (eccentric_anomaly_from_mean_anomaly,
true_anomaly_from_eccentric_anomaly)
def true_anomaly(orbit, time):
# mean anomaly
with u.set_enabled_equivalencies(u.dimensionless_angles()):
M = 2*np.pi * (time.tcb - orbit.t0.tcb) / orbit.P - orbit.M0
M = M.to(u.radian)
# eccentric anomaly
E = eccentric_anomaly_from_mean_anomaly(M, orbit.e)
# true anomaly
return true_anomaly_from_eccentric_anomaly(E, orbit.e)
def hb_model(S, i, omega, f, R, a):
num = 1 - 3*np.sin(i)**2 * np.sin(f - omega)**2
den = (R / a) ** 3
return S * num / den
P = 20 * u.day
e = 0.5
S = 1.
epoch = Time(Time.now().mjd, format='mjd')
t = epoch + np.linspace(0, P.value, 8192)
for e in [0.3, 0.5, 0.7]:
fig, axes = plt.subplots(6, 6, figsize=(16, 16),
sharex=True, sharey=True,
constrained_layout=True)
n = 0
omegas = np.linspace(-90, 90, axes.shape[0]) * u.deg
incls = np.linspace(6, 90, axes.shape[0]) * u.deg
for omega in omegas:
for incl in incls:
ax = axes.flat[n]
elem = TwoBodyKeplerElements(P=P, e=e,
m1=1.*u.Msun, m2=0.25*u.Msun,
omega=omega, i=incl,
t0=epoch)
orbit1 = KeplerOrbit(elem.primary)
orbit2 = KeplerOrbit(elem.secondary)
x1 = orbit1.reference_plane(t)
x2 = orbit2.reference_plane(t)
R = (x1.data.without_differentials() - x2.data.without_differentials()).norm()
a = elem.a
f = true_anomaly(orbit1, t)
phase = ((t.mjd - t.mjd.min()) / P.to_value(u.day) + 0.5) % 1 - 0.5
y = hb_model(S, elem.i, elem.omega, f, R, a)
y = y[phase.argsort()]
phase = phase[phase.argsort()]
ax.plot(phase, y, marker='', ls='-', lw=2, color='k')
ax.plot(phase - 1, y, marker='', ls='-', lw=2, color='k')
ax.plot(phase + 1, y, marker='', ls='-', lw=2, color='k')
ax.axhline(0, marker='', zorder=-100,
color='tab:blue', alpha=0.2)
# plt.setp(ax.get_xticklabels(), fontsize=8)
# plt.setp(ax.get_yticklabels(), fontsize=8)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
n += 1
ax.set_xlim(-0.75, 0.75)
n = 0
for omega in omegas:
for incl in incls:
ax = axes.flat[n]
xlim = ax.get_xlim()
xspan = xlim[1] - xlim[0]
ylim = ax.get_ylim()
yspan = ylim[1] - ylim[0]
ax.text(xlim[0] + 0.05 * xspan,
ylim[0] + 0.05 * yspan,
(rf'$\omega = {omega.value:.1f}^\circ$' +
f'\n$i = {incl.value:.0f}^\circ$'),
ha='left', va='bottom', fontsize=12)
n += 1
fig.suptitle(f'$e={e:.1f}$', fontsize=16)
fig.set_facecolor('w')
```
| github_jupyter |
# Exemplo 1
## Problema
Considere um arquivo de entrada no formato CSV (comma separated values) com informações relativas a acidentes na aviação civil brasileira nos últimos 10 anos (arquivo anv.csv)
As informações estão separadas pelo caracter separador ~ e entre “” (aspas) conforme o exemplo abaixo:
```javascript
"201106142171203"~"PPGXE"~"AEROCLUBE"~"AVIÃO"~"NEIVA INDUSTRIA AERONAUTICA"~"56- C"~"PAUL"~"PISTÃO"~"MONOMOTOR"~"660"~"LEVE"~"2"~"1962"~"BRASIL"~"BRASIL"~"PRI"~" INSTRUÇÃO"~"SDPW"~"SDPW"~"INDETERMINADA"~"UNKNOWN"~"VOO DE INSTRUÇÃO"~"SUBSTANCIAL"~"0"~"2018-07-09“
"201707111402595"~"PPNCG"~"OPERADOR PARTICULAR"~"AVIÃO"~"PIPER AIRCRAFT"~"PA-46- 350P"~"PA46"~"TURBOÉLICE"~"MONOMOTOR"~"1950"~"LEVE"~"6"~"1990"~"NULL"~"BRASIL"~" TPP"~"PARTICULAR"~"SBBR"~"SBGR"~"POUSO"~"LANDING"~"VOO PRIVADO"~"NENHUM"~"0"~"2018-07-09"'''
```
O arquivo é composto das seguintes colunas:
1. codigo_ocorrencia
2. aeronave_matricula
3. aeronave_operador_categoria
4. aeronave_tipo_veiculo
5. aeronave_fabricante
6. aeronave_modelo
7. aeronave_tipo_icao
8. aeronave_motor_tipo
9. aeronave_motor_quantidade
10. aeronave_pmd
11. aeronave_pmd_categoria
12. aeronave_assentos
13. aeronave_ano_fabricacao
14. aeronave_pais_fabricante
15. aeronave_pais_registro
16. aeronave_registro_categoria
17. aeronave_registro_segmento
18. aeronave_voo_origem
19. aeronave_voo_destino
20. aeronave_fase_operacao
21. aeronave_fase_operacao_icao
22. aeronave_tipo_operacao
23. aeronave_nivel_dano
24. total_fatalidades
25. aeronave_dia_extracao
Crie uma função que efetue a leitura do arquivo, sem a utilização de bibliotecas externas, e processe o arquivo produzindo dois novos arquivos.
OBS: Efetuar apenas uma leitura do arquivo de entrada
### Arquivo 1
O primeiro arquivo deve ter seu conteúdo em formato JSON, com o nome statistics.json, e deve possuir as as estatísticas:
* fase de operação
* número de total de ocorrências
* percentual de quanto essa fase representa dentro de todos os dados
Exemplo de como deve estar o arquivo:
```json
[
{
"fase_operacao": "APROXIMAÇÃO FINAL",
"ocorrencias": 234,
"percentual": "4,51%"
},
{
"fase_operacao": "INDETERMINADA",
"ocorrencias": 180,
"percentual": "2,43%"
},
{
"fase_operacao": "MANOBRA",
"ocorrencias": 80,
"percentual": "0,95%"
}
]
```
### Arquivo 2
Crie um arquivo de saida (formato CSV) com nome levels.csv contendo as seguintes informações:
* operation -> aeronave_operador_categoria
* type -> aeronave_tipo_veiculo
* manufacturer -> aeronave_fabricante
* engine_type aeronave_motor_tipo
* engines -> aeronave_motor_quantidade
* year_manufacturing -> aeronave_ano_fabricacao
* seating -> aeronave_assentos
* fatalities -> total_fatalidades
Considerando apenas acidentes cujo nível de dano da aeronave tenha sido LEVE ou NENHUM (coluna aeronave_nivel_dano) e que o número de fatalidades (total_fatalidades) tenha sido superior à 0 (zero)
## Análise
Receberemos um arquio de entrada no formato CSV, com o caractere separador ~ e entre “”.
O arquivo possuí 25 colunas
Devemos criar uma função que lê e processa o arquivo, sem a utilizaão de bibliotecas, e cria outros dois arquivos de saída.
Temos que efetuar a leitura do arquivo uma única vez.
## Implicações / Proposições / Afirmações
* Receberemos um arquivo de entrada
* O arquivo contém 10 anos de dados de acidentes de avição
* O arquivo contém 25 colunas
* O caractere separado do arquivo é `~` e `""`
* Devemos criar uma função que irá ler e precessar o arquivo de entrada
* Devemos criar dois arquivo de saída: um `.json`, com o nome `statistics.json`; e outro arquivo `.csv` com o nome `levels.csv`
* O arquivo `.json` deve ter um formato específico
* O arquivo `.json` deve conter as seguintes informações:
* fase de operação
* número de total de ocorrências
* percentual de quanto essa fase representa dentro de todos os dados
## Resolução e Resposta
```
# import das bibliotecas
import csv
import json
# Função que irá processar o Arquivo
def read_file(file_path):
"""Função que irá processar o arquivo de entrada para ajustar
as informações requisitadas de saída.
Argumentos:
file_path -- Caminho do arquivo de entrada
"""
# Estruturas que serão utilizadas para produzir as respostas
map_of_incidents = {} # Arquivo que produzirá a saída JSON
list_of_incidents = [] # Arquivo que produzirá a saída CSV
count = 0 # Variável utilizada para contarmos a quantidade de elementos
# Abre e processa o arquivo
with open(file_path, 'r', encoding='utf-8') as f:
reader = csv.DictReader(f, delimiter='~', quotechar='"')
# Itera pelas linhas do arquivo para
for row in reader:
count += 1
key = row['aeronave_fase_operacao']
# Verifica se a chave ja existe no dicionário que será
# utilizado para escrever o arquivo JSON
if key in map_of_incidents:
map_of_incidents[key] += 1 # Se existir, incrementa mais mais um acidente
else:
map_of_incidents[key] = 1 # Se não existir, cria a chave e adiciona um acidente
# Verifica se o dano na aeronave foi leve ou nenhum E se as
# fatalidades foram maiores que zero
if (
(row['aeronave_nivel_dano'] == 'LEVE' or row['aeronave_nivel_dano'] == 'NENHUM' ) and
(int(row['total_fatalidades']) > 0)
):
# Se sim, adiciona as informações pertinentes na lista que
# será utilizada para escrever o arquivo CSV
list_of_incidents.append([row['aeronave_operador_categoria'],
row['aeronave_tipo_veiculo'],
row['aeronave_fabricante'],
row['aeronave_motor_tipo'],
row['aeronave_motor_quantidade'],
row['aeronave_ano_fabricacao'],
row['aeronave_assentos'],
row['total_fatalidades']])
# Assim que juntarmos todas as informações, vamos criar os
# arquivos necessários
save_json(map_of_incidents, count)
save_csv(list_of_incidents)
# Função que irá criar o arquivo JSON
def save_json(data, count):
"""Função resposável por criar o arquivo JSON
Argumentos:
data -- Dados que será escritos no arquivo
count -- Quantidade de acidentes registrados no arquivo de entrada
"""
# Lista que será escrita dentro do arquivo JSON
array_of_incidents = []
# Itera sobre o dicionário com os dados
for key, value in data.items():
array_of_incidents.append(
{
"fase_operacao": key,
"ocorrencias": value,
"percentual": '{:.3%}'.format(value / count)
}
)
# Cria o arquivo com os dados ajustados
with open('statistics.json', 'w', encoding='utf8') as outfile:
json.dump(array_of_incidents, outfile, ensure_ascii=False, indent=4)
# Função que irá criar o arquivo CSV
def save_csv(data):
"""Função responsável por criar o arquivo .csv
Argumentos
data -- dados que serão escritos no arquivo
"""
# Cabeçalho do arquivo .csv
header = [
'operation',
'type',
'manufacturer',
'engine_type',
'engines',
'year_manufacturing',
'seating',
'fatalities'
]
# Cria o arquivo com os dados ajustados
with open('levels.csv', 'w', encoding='utf8', newline='') as outfile:
w = csv.writer(outfile)
w.writerow(header)
w.writerows(data)
# Execução da Aplicação
read_file('csv/anv.csv')
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors. [Licensed under the Apache License, Version 2.0](#scrollTo=ByZjmtFgB_Y5).
```
// #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
```
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/swift/tutorials/python_interoperability"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/swift/tutorials/python_interoperability.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/swift/tutorials/python_interoperability.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub)에서 소스 보기</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/swift/tutorials/python_interoperability.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도
불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.
이 번역에 개선할 부분이 있다면
[tensorflow/docs-l10n](https://github.com/tensorflow/docs-l10n/) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.
문서 번역이나 리뷰에 참여하려면
[docs-ko@tensorflow.org](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로
메일을 보내주시기 바랍니다.
# 파이썬 상호 호환성
텐서플로를 위한 스위프트는 파이썬과 상호 호환됩니다.
스위프트에서 파이썬 모듈을 임포트해서, 스위프트와 파이썬 사이의 값을 바꾸거나 파이썬 함수를 호출할 수 있습니다.
```
// comment so that Colab does not interpret `#if ...` as a comment
#if canImport(PythonKit)
import PythonKit
#else
import Python
#endif
print(Python.version)
```
## 파이썬 버전 설정
기본적으로 `import Python`를 하면, 스위프트는 시스템 라이브러리 경로에 따라 설치된 최신 버전의 파이썬을 검색합니다.
특정한 파이썬을 설치하려면, `PYTHON_LIBRARY` 환경변수에 설치시 제공받은 `libpython` 공유 라이브러리를 설정합니다. 예를 들어:
`export PYTHON_LIBRARY="~/anaconda3/lib/libpython3.7m.so"`
정확한 파일명은 파이썬 환경과 플랫폼마다 다를 수 있습니다.
또는 스위프트가 시스템 라이브러리 경로에서 알맞은 파이썬 버전을 찾도록 해서 `PYTHON_VERSION` 환경변수를 설정할 수 있습니다. `PYTHON_LIBRARY`가 `PYTHON_VERSION` 보다 우선한다는 점을 유의해야 합니다.
또한 코드에서 `PYTHON_VERSION` 설정과 동일한 기능을 하는 `PythonLibrary.useVersion` 함수를 호출할 수 있습니다.
```
// PythonLibrary.useVersion(2)
// PythonLibrary.useVersion(3, 7)
```
__Note: 파이썬을 임포트한 직후, 파이썬 코드를 호출하기 전에 `PythonLibrary.useVersion`을 실행해야 합니다. 이것은 파이썬 버전을 동적으로 바꾸는 데 사용될 수 없습니다.__
[파이썬 라이브러리 로딩 과정에서 생성되는 디버그 출력](https://github.com/apple/swift/pull/20674#discussion_r235207008)을 확인하기 위해서 `PYTHON_LOADER_LOGGING=1`를 설정하세요.
## 기초
스위프트에서 `PythonObject`는 파이썬 객체를 나타냅니다.
모든 파이썬 API는 `PythonObject` 인스턴스를 사용하거나 반환합니다.
스위프트에서 기본형(숫자 및 배열처럼)은 `PythonObject`로 전환할 수 있습니다. 몇몇 경우에 (`PythonConvertible` 인수를 받는 리터럴과 함수의 경우), 변환이 암묵적으로 일어납니다. 스위프트 값을 `PythonObject`에 명시적으로 지정하려면 `PythonObject` 이니셜라이저를 사용합니다.
`PythonObject`는 숫자 연산, 인덱싱, 반복을 포함한 많은 표준 연산을 정의합니다.
```
// 표준 스위프트 자료형을 Python으로 변환합니다.
let pythonInt: PythonObject = 1
let pythonFloat: PythonObject = 3.0
let pythonString: PythonObject = "Hello Python!"
let pythonRange: PythonObject = PythonObject(5..<10)
let pythonArray: PythonObject = [1, 2, 3, 4]
let pythonDict: PythonObject = ["foo": [0], "bar": [1, 2, 3]]
// 파이썬 객체에 표준 연산을 수행합니다.
print(pythonInt + pythonFloat)
print(pythonString[0..<6])
print(pythonRange)
print(pythonArray[2])
print(pythonDict["bar"])
// 파이썬 객체를 다시 스위프트로 변환합니다.
let int = Int(pythonInt)!
let float = Float(pythonFloat)!
let string = String(pythonString)!
let range = Range<Int>(pythonRange)!
let array: [Int] = Array(pythonArray)!
let dict: [String: [Int]] = Dictionary(pythonDict)!
// 표준 연산을 수행합니다.
// 출력은 파이썬과 동일합니다!
print(Float(int) + float)
print(string.prefix(6))
print(range)
print(array[2])
print(dict["bar"]!)
```
`PythonObject`는 많은 표준 스위프트 프로토콜에 대해 적합하도록 정의합니다:
* `Equatable`
* `Comparable`
* `Hashable`
* `SignedNumeric`
* `Strideable`
* `MutableCollection`
* 모든 `ExpressibleBy_Literal` 프로토콜
이러한 적합성은 형안전(type-safe)하지 않다는 점에 유의해야 합니다: 호환되지 않는 `PythonObject` 인스턴스에서 프로토콜 기능을 사용하려고 할 때 충돌이 발생할 수 있습니다.
```
let one: PythonObject = 1
print(one == one)
print(one < one)
print(one + one)
let array: PythonObject = [1, 2, 3]
for (i, x) in array.enumerated() {
print(i, x)
}
```
튜플을 파이썬에서 스위프트로 변환하려면, 정확한 튜플의 길이를 알아야 합니다.
다음 인스턴스 메서드 중 하나를 호출합니다:
- `PythonObject.tuple2`
- `PythonObject.tuple3`
- `PythonObject.tuple4`
```
let pythonTuple = Python.tuple([1, 2, 3])
print(pythonTuple, Python.len(pythonTuple))
// 스위프트로 변환합니다.
let tuple = pythonTuple.tuple3
print(tuple)
```
## 파이썬 내장 객체
전역 `Python` 인터페이스를 활용해 파이썬 내장 객체에 접근합니다.
```
// `Python.builtins`은 모든 파이썬 내장 객체의 딕셔너리입니다.
_ = Python.builtins
// 파이썬 내장 객체를 사용합니다.
print(Python.type(1))
print(Python.len([1, 2, 3]))
print(Python.sum([1, 2, 3]))
```
## 파이썬 모듈 임포트
`Python.import`를 사용하여 파이썬 모듈을 임포트합니다. 이것은 `Python`의 `import` 키워드처럼 동작합니다.
```
let np = Python.import("numpy")
print(np)
let zeros = np.ones([2, 3])
print(zeros)
```
안전하게 패키지를 가져오기 위해 예외처리 함수 `Python.attemptImport`를 사용하세요.
```
let maybeModule = try? Python.attemptImport("nonexistent_module")
print(maybeModule)
```
## `numpy.ndarray`로 변환
다음 스위프트 자료형은 `numpy.ndarray`로 변환할 수 있습니다:
- `Array<Element>`
- `ShapedArray<Scalar>`
- `Tensor<Scalar>`
`Numpy.ndarray`의 `dtype`이 `Element` 또는 `Scalar`의 일반 파라미터 타입과 호환되어야만 변환이 성공합니다.
`Array`의 경우 `numpy.ndarray`가 1-D일 경우에만 `numpy`에서 변환이 성공합니다.
```
import TensorFlow
let numpyArray = np.ones([4], dtype: np.float32)
print("Swift type:", type(of: numpyArray))
print("Python type:", Python.type(numpyArray))
print(numpyArray.shape)
// `numpy.ndarray`에서 스위프트 타입으로 변환하는 예제.
let array: [Float] = Array(numpy: numpyArray)!
let shapedArray = ShapedArray<Float>(numpy: numpyArray)!
let tensor = Tensor<Float>(numpy: numpyArray)!
// 스위프트 타입에서 `numpy.ndarray`으로 변환하는 예제.
print(array.makeNumpyArray())
print(shapedArray.makeNumpyArray())
print(tensor.makeNumpyArray())
// dtype이 다른 예제.
let doubleArray: [Double] = Array(numpy: np.ones([3], dtype: np.float))!
let intTensor = Tensor<Int32>(numpy: np.ones([2, 3], dtype: np.int32))!
```
## 이미지 표시
파이썬 노트북에서처럼 `matplotlib`를 이용해 이미지를 결과 창에 표시할 수 있습니다.
```
// 주피터 노트북에 그래프를 표시하기 위한 셀입니다.
// 다른 환경에서는 사용하지 마세요.
%include "EnableIPythonDisplay.swift"
IPythonDisplay.shell.enable_matplotlib("inline")
let np = Python.import("numpy")
let plt = Python.import("matplotlib.pyplot")
let time = np.arange(0, 10, 0.01)
let amplitude = np.exp(-0.1 * time)
let position = amplitude * np.sin(3 * time)
plt.figure(figsize: [15, 10])
plt.plot(time, position)
plt.plot(time, amplitude)
plt.plot(time, -amplitude)
plt.xlabel("Time (s)")
plt.ylabel("Position (m)")
plt.title("Oscillations")
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/darjuangeloys/LinearAlgebra2021/blob/main/Assignment2_DarJuan.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# **Welcome to Python Fundamentals!**

In this module, we are going to establish our skills in Python programming. In this notebook we are going to cover:
* Variables and Data Types
* Operations
* Input and Output Operations
* Logic Control
* Iterables
* Functions
## ***Variables and Data Types***

Variables and data types in python as the name suggests are the values that vary. In a programming language, a variable is a memory location where you store a value. The value that you have stored may change in the future according to the specifications. A variable in python is created as soon as a value is assigned to it. [1]
```
x = 1
a, b = 3, -2
type(x)
```
```
y = 3.0
type(y)
x = float(x)
type(x)
x
s, t, u = "1", '3', 'three'
type(u)
```
## ***Operations***

Operators are special symbols in Python that carry out arithmetic or logical computation. The value that the operator operates on is called the "operand" [2]
### *Arithmetic*

Arithmetic operators are used to perform mathematical operations like addition, subtraction, multiplication, etc.
```
w, x, y, z = 4.0, -3.0, 1, -32
### Addition
S = w + x
### Subtraction
D = y - z
### Multiplication
P = w*z
### Division
Q = y/x
### Floor Division
Qf = w//x
Qf
```
***What's the difference between a standard division and a floor division?***

-In floor division, it returns the largest possible integer. [3]
```
### Exponentiation
E = w**y
### Modulo
mod = z%x
mod
```
***What is Modulo?***

If in a floor division, it gives the largest possible integer, Modulo gives the remainder of the two divided values. [4]
### *Assignment*
Assignment operators are used to assign values to variables.
```
A, B, C, D, E = 0, 100, 2, 1, 2
A += w
B -= x
C *= w
D /= x
E **= y
```
### *Comparators*

Comparators or Comparison operators are used to compare values. It returns either True or False according to the condition.
```
size_1, size_2, size_3 = 1, 2.0, "1"
true_size = 1.0;
## Equality
size_1 == true_size
## Non-equality
size_2 != true_size
## Inequality
s1 = size_1 > size_2
s2 = size_1 < size_2/2
s3 = true_size >= size_1
s4 = size_2 <= true_size
```
### *Logical*
Logical operators are the and, or, not operators
```
size_1 == true_size
size_1 is true_size
size_1 is not true_size
P, Q = True, False
conj = P and Q
disj = P or Q
disj
nand = not(P and Q)
nand
xor = (not P and Q) or (P and not Q)
xor
```
## ***Input and Output***
These are functions used to connect with the user of the program. We use the print() function to output data to the standard output device and we use the input () to take the input from the user. [5]
```
print("Hello World!")
cnt = 14000
string = "Hello World!"
print(string, ", Current COVID count is:", cnt)
cnt += 10000
print(f"{string}, current count is: {cnt}")
sem_grade = 86.25
name = "Lois"
print("Hello {}, your semestral grades is: {}". format(name, sem_grade))
pg, mg, fg = 0.3, 0.3, 0.4
print("The weights of your semestral grade are:\
\n\t {:.2%} for Prelims\
\n\t {:.2%} for Midterms, and\
\n\t {:.2%} for Finals.".format(pg, mg, fg))
e = input("Enter a number: ")
e
name = input("Enter your name: ")
pg = input("Enter prelim grade: ")
mg = input("Enter midterm grade: ")
fg = input("Enter final grade: ")
sem_grade = None
print("Hello {}, your semestral grade is: {}".format(name, sem_grade))
```
## ***Looping statements***
Loops are powerful programming concepts supported by almost all modern programming languages. It allows a program to implement iterations, which basically means executing the same block of code two or more times depending on the specified conditions of the user or the codes in the program itself. [6]
## *While*

A while loop is used to iterate over a block of code as long as the test expression or condition is "true". [7]
```
## while loops
i, j = 0, 10
while(i <= j):
print(f"{i}\t|\t{j}")
i += 1
```
## *For*

The for loop in Python is used to iterate over a sequence (list, tuple, string) or other iterable objects [8]
```
# for(int = 0; i<=10, i++){
# printf(i)
# }
i = 0
for i in range(11):
print(i)
playlist = ["Bahay Kubo", "Magandang Kanta", "Buko"]
print('Now Playing:\n')
for song in playlist:
print(song)
```
## ***Flow Control***
Flow control is the order in which statements or the blocks of code in the program are executed at runtime based on a condition. [9]
### *Condition Statements*
```
num_1, num_2 = 14, 12
if(num_1 == num_2):
print("HAHA")
elif(num_1>num_2):
print("HOHO")
else:
print("HUHU")
```
## ***Functions***
A function is a group of related statements that performs a specific task. It helps break our program into smaller and modular chunks, making it more organized and manageable. [10]
```
# void DeleteUser (int userid){
# delete(userid);
#}
def delete_user (userid):
print("Successfully deleted user: {}".format(userid))
userid = 2020_100100
delete_user(2020_100100)
def add(addend1, addend2):
sum = addend1 + addend2
return sum
add(5, 4)
```
## **References:**
[1] Variables and Data Types in Python (2021). (https://www.edureka.co/blog/variables-and-data-types-in-python/)
[2] Python Operators (2021). (https://www.programiz.com/python-programming/operators)
[3] Python Floor Division (2021). https://www.pythontutorial.net/advanced-python/python-floor-division/
[4] Python Modulo in Practice: How to Use the % Operator (2020). (https://realpython.com/python-modulo-operator/)
[5] Python Input, Output, and Import (2021). (https://www.programiz.com/python-programming/input-output-import)
[6] Python Loops – For, While, Nested Loops With Examples (2021). (https://www.softwaretestinghelp.com/python/looping-in-python-for-while-nested-loops/)
[7] Python while Loop (2021). (https://www.programiz.com/python-programming/while-loop)
[8] Python for Loop (2021). (https://www.programiz.com/python-programming/for-loop)
[9] Python Control Flow Statements and Loops (2021). (https://pynative.com/python-control-flow-statements/)
[10] Python Functions (2021). (https://www.programiz.com/python-programming/function)
| github_jupyter |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D4_MachineLearning/student/W1D4_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Tutorial 1: GLMs for Encoding
**Week 1, Day 4: Machine Learning (GLMs)**
**By Neuromatch Academy**
__Content creators:__ Pierre-Etienne H. Fiquet, Ari Benjamin, Jakob Macke
__Content reviewers:__ Davide Valeriani, Alish Dipani, Michael Waskom
This is part 1 of a 2-part series about Generalized Linear Models (GLMs), which are a fundamental framework for supervised learning.
In this tutorial, the objective is to model a retinal ganglion cell spike train by fitting a temporal receptive field. First with a Linear-Gaussian GLM (also known as ordinary least-squares regression model) and then with a Poisson GLM (aka "Linear-Nonlinear-Poisson" model). In the next tutorial, we’ll extend to a special case of GLMs, logistic regression, and learn how to ensure good model performance.
This tutorial is designed to run with retinal ganglion cell spike train data from [Uzzell & Chichilnisky 2004](https://journals.physiology.org/doi/full/10.1152/jn.01171.2003?url_ver=Z39.88-2003&rfr_id=ori:rid:crossref.org&rfr_dat=cr_pub%20%200pubmed).
*Acknowledgements:*
- We thank EJ Chichilnisky for providing the dataset. Please note that it is provided for tutorial purposes only, and should not be distributed or used for publication without express permission from the author (ej@stanford.edu).
- We thank Jonathan Pillow, much of this tutorial is inspired by exercises asigned in his 'Statistical Modeling and Analysis of Neural Data' class.
# Setup
```
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.io import loadmat
#@title Figure settings
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
#@title Helper functions
def plot_stim_and_spikes(stim, spikes, dt, nt=120):
"""Show time series of stim intensity and spike counts.
Args:
stim (1D array): vector of stimulus intensities
spikes (1D array): vector of spike counts
dt (number): duration of each time step
nt (number): number of time steps to plot
"""
timepoints = np.arange(120)
time = timepoints * dt
f, (ax_stim, ax_spikes) = plt.subplots(
nrows=2, sharex=True, figsize=(8, 5),
)
ax_stim.plot(time, stim[timepoints])
ax_stim.set_ylabel('Stimulus intensity')
ax_spikes.plot(time, spikes[timepoints])
ax_spikes.set_xlabel('Time (s)')
ax_spikes.set_ylabel('Number of spikes')
f.tight_layout()
def plot_glm_matrices(X, y, nt=50):
"""Show X and Y as heatmaps.
Args:
X (2D array): Design matrix.
y (1D or 2D array): Target vector.
"""
from matplotlib.colors import BoundaryNorm
from mpl_toolkits.axes_grid1 import make_axes_locatable
Y = np.c_[y] # Ensure Y is 2D and skinny
f, (ax_x, ax_y) = plt.subplots(
ncols=2,
figsize=(6, 8),
sharey=True,
gridspec_kw=dict(width_ratios=(5, 1)),
)
norm = BoundaryNorm([-1, -.2, .2, 1], 256)
imx = ax_x.pcolormesh(X[:nt], cmap="coolwarm", norm=norm)
ax_x.set(
title="X\n(lagged stimulus)",
xlabel="Time lag (time bins)",
xticks=[4, 14, 24],
xticklabels=['-20', '-10', '0'],
ylabel="Time point (time bins)",
)
plt.setp(ax_x.spines.values(), visible=True)
divx = make_axes_locatable(ax_x)
caxx = divx.append_axes("right", size="5%", pad=0.1)
cbarx = f.colorbar(imx, cax=caxx)
cbarx.set_ticks([-.6, 0, .6])
cbarx.set_ticklabels(np.sort(np.unique(X)))
norm = BoundaryNorm(np.arange(y.max() + 1), 256)
imy = ax_y.pcolormesh(Y[:nt], cmap="magma", norm=norm)
ax_y.set(
title="Y\n(spike count)",
xticks=[]
)
ax_y.invert_yaxis()
plt.setp(ax_y.spines.values(), visible=True)
divy = make_axes_locatable(ax_y)
caxy = divy.append_axes("right", size="30%", pad=0.1)
cbary = f.colorbar(imy, cax=caxy)
cbary.set_ticks(np.arange(y.max()) + .5)
cbary.set_ticklabels(np.arange(y.max()))
def plot_spike_filter(theta, dt, **kws):
"""Plot estimated weights based on time lag model.
Args:
theta (1D array): Filter weights, not including DC term.
dt (number): Duration of each time bin.
kws: Pass additional keyword arguments to plot()
"""
d = len(theta)
t = np.arange(-d + 1, 1) * dt
ax = plt.gca()
ax.plot(t, theta, marker="o", **kws)
ax.axhline(0, color=".2", linestyle="--", zorder=1)
ax.set(
xlabel="Time before spike (s)",
ylabel="Filter weight",
)
def plot_spikes_with_prediction(
spikes, predicted_spikes, dt, nt=50, t0=120, **kws):
"""Plot actual and predicted spike counts.
Args:
spikes (1D array): Vector of actual spike counts
predicted_spikes (1D array): Vector of predicted spike counts
dt (number): Duration of each time bin.
nt (number): Number of time bins to plot
t0 (number): Index of first time bin to plot.
kws: Pass additional keyword arguments to plot()
"""
t = np.arange(t0, t0 + nt) * dt
f, ax = plt.subplots()
lines = ax.stem(t, spikes[:nt], use_line_collection=True)
plt.setp(lines, color=".5")
lines[-1].set_zorder(1)
kws.setdefault("linewidth", 3)
yhat, = ax.plot(t, predicted_spikes[:nt], **kws)
ax.set(
xlabel="Time (s)",
ylabel="Spikes",
)
ax.yaxis.set_major_locator(plt.MaxNLocator(integer=True))
ax.legend([lines[0], yhat], ["Spikes", "Predicted"])
plt.show()
#@title Data retrieval and loading
import os
import hashlib
import requests
fname = "RGCdata.mat"
url = "https://osf.io/mzujs/download"
expected_md5 = "1b2977453020bce5319f2608c94d38d0"
if not os.path.isfile(fname):
try:
r = requests.get(url)
except requests.ConnectionError:
print("!!! Failed to download data !!!")
else:
if r.status_code != requests.codes.ok:
print("!!! Failed to download data !!!")
elif hashlib.md5(r.content).hexdigest() != expected_md5:
print("!!! Data download appears corrupted !!!")
else:
with open(fname, "wb") as fid:
fid.write(r.content)
```
-----
#Section 1: Linear-Gaussian GLM
```
#@title Video 1: General linear model
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Yv89UHeSa9I", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
## Section 1.1: Load retinal ganglion cell activity data
In this exercise we use data from an experiment that presented a screen which randomly alternated between two luminance values and recorded responses from retinal ganglion cell (RGC), a type of neuron in the retina in the back of the eye. This kind of visual stimulus is called a "full-field flicker", and it was presented at ~120Hz (ie. the stimulus presented on the screen was refreshed about every 8ms). These same time bins were used to count the number of spikes emitted by each neuron.
The file `RGCdata.mat` contains three variablies:
- `Stim`, the stimulus intensity at each time point. It is an array with shape $T \times 1$, where $T=144051$.
- `SpCounts`, the binned spike counts for 2 ON cells, and 2 OFF cells. It is a $144051 \times 4$ array, and each column has counts for a different cell.
- `dtStim`, the size of a single time bin (in seconds), which is needed for computing model output in units of spikes / s. The stimulus frame rate is given by `1 / dtStim`.
Because these data were saved in MATLAB, where everything is a matrix, we will also process the variables to more Pythonic representations (1D arrays or scalars, where appropriate) as we load the data.
```
data = loadmat('RGCdata.mat') # loadmat is a function in scipy.io
dt_stim = data['dtStim'].item() # .item extracts a scalar value
# Extract the stimulus intensity
stim = data['Stim'].squeeze() # .squeeze removes dimensions with 1 element
# Extract the spike counts for one cell
cellnum = 2
spikes = data['SpCounts'][:, cellnum]
# Don't use all of the timepoints in the dataset, for speed
keep_timepoints = 20000
stim = stim[:keep_timepoints]
spikes = spikes[:keep_timepoints]
```
Use the `plot_stim_and_spikes` helper function to visualize the changes in stimulus intensities and spike counts over time.
```
plot_stim_and_spikes(stim, spikes, dt_stim)
```
### Exercise 1: Create design matrix
Our goal is to predict the cell's activity from the stimulus intensities preceding it. That will help us understand how RGCs process information over time. To do so, we first need to create the *design matrix* for this model, which organizes the stimulus intensities in matrix form such that the $i$th row has the stimulus frames preceding timepoint $i$.
In this exercise, we will create the design matrix $X$ using $d=25$ time lags. That is, $X$ should be a $T \times d$ matrix. $d = 25$ (about 200 ms) is a choice we're making based on our prior knowledge of the temporal window that influences RGC responses. In practice, you might not know the right duration to use.
The last entry in row `t` should correspond to the stimulus that was shown at time `t`, the entry to the left of it should contain the value that was show one time bin earlier, etc. Specifically, $X_{ij}$ will be the stimulus intensity at time $i + d - 1 - j$.
Note that for the first few time bins, we have access to the recorded spike counts but not to the stimulus shown in the recent past. For simplicity we are going to assume that values of `stim` are 0 for the time lags prior to the first timepoint in the dataset. This is known as "zero-padding", so that the design matrix has the same number of rows as the response vectors in `spikes`.
Your task is is to complete the function below to:
- make a zero-padded version of the stimulus
- initialize an empty design matrix with the correct shape
- **fill in each row of the design matrix, using the zero-padded version of the stimulus**
To visualize your design matrix (and the corresponding vector of spike counts), we will plot a "heatmap", which encodes the numerical value in each position of the matrix as a color. The helper functions include some code to do this.
```
def make_design_matrix(stim, d=25):
"""Create time-lag design matrix from stimulus intensity vector.
Args:
stim (1D array): Stimulus intensity at each time point.
d (number): Number of time lags to use.
Returns
X (2D array): GLM design matrix with shape T, d
"""
# Create version of stimulus vector with zeros before onset
padded_stim = np.concatenate([np.zeros(d - 1), stim])
#####################################################################
# Fill in missing code (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Complete the make_design_matrix function")
#####################################################################
# Construct a matrix where each row has the d frames of
# the stimulus proceeding and including timepoint t
T = len(...) # Total number of timepoints (hint: number of stimulus frames)
X = np.zeros((T, d))
for t in range(T):
X[t] = ...
return X
# Uncomment and run to test your function
# X = make_design_matrix(stim)
# plot_glm_matrices(X, spikes, nt=50)
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D4_MachineLearning/solutions/W1D4_Tutorial1_Solution_3eaecf0a.py)
*Example output:*
<img alt='Solution hint' align='left' width=415 height=558 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D4_MachineLearning/static/W1D4_Tutorial1_Solution_3eaecf0a_0.png>
##Section 1.2: Fit Linear-Gaussian regression model
First, we will use the design matrix to compute the maximum likelihood estimate for a linear-Gaussian GLM (aka "general linear model"). The maximum likelihood estimate of $\theta$ in this model can be solved analytically using the equation you learned about on Day 3:
$$\hat \theta = (X^TX)^{-1}X^Ty$$
Before we can apply this equation, we need to augment the design matrix to account for the mean of $y$, because the spike counts are all $\geq 0$. We do this by adding a constant column of 1's to the design matrix, which will allow the model to learn an additive offset weight. We will refer to this additional weight as $b$ (for bias), although it is alternatively known as a "DC term" or "intercept".
```
# Build the full design matrix
y = spikes
constant = np.ones_like(y)
X = np.column_stack([constant, make_design_matrix(stim)])
# Get the MLE weights for the LG model
theta = np.linalg.inv(X.T @ X) @ X.T @ y
theta_lg = theta[1:]
```
Plot the resulting maximum likelihood filter estimate (just the 25-element weight vector $\theta$ on the stimulus elements, not the DC term $b$).
```
plot_spike_filter(theta_lg, dt_stim)
```
---
### Exercise 2: Predict spike counts with Linear-Gaussian model
Now we are going to put these pieces together and write a function that outputs a predicted spike count for each timepoint using the stimulus information.
Your steps should be:
- Create the complete design matrix
- Obtain the MLE weights ($\hat \theta$)
- Compute $\hat y = X\hat \theta$
```
def predict_spike_counts_lg(stim, spikes, d=25):
"""Compute a vector of predicted spike counts given the stimulus.
Args:
stim (1D array): Stimulus values at each timepoint
spikes (1D array): Spike counts measured at each timepoint
d (number): Number of time lags to use.
Returns:
yhat (1D array): Predicted spikes at each timepoint.
"""
##########################################################################
# Fill in missing code (...) and then comment or remove the error to test
raise NotImplementedError("Complete the predict_spike_counts_lg function")
##########################################################################
# Create the design matrix
y = spikes
constant = ...
X = ...
# Get the MLE weights for the LG model
theta = ...
# Compute predicted spike counts
yhat = X @ theta
return yhat
# Uncomment and run to test your function and plot prediction
# predicted_counts = predict_spike_counts_lg(stim, spikes)
# plot_spikes_with_prediction(spikes, predicted_counts, dt_stim)
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D4_MachineLearning/solutions/W1D4_Tutorial1_Solution_6576a3e7.py)
*Example output:*
<img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D4_MachineLearning/static/W1D4_Tutorial1_Solution_6576a3e7_0.png>
Is this a good model? The prediction line more-or-less follows the bumps in the spikes, but it never predicts as many spikes as are actually observed. And, more troublingly, it's predicting *negative* spikes for some time points.
The Poisson GLM will help to address these failures.
### Bonus challenge
The "spike-triggered average" falls out as a subcase of the linear Gaussian GLM: $\mathrm{STA} = X^T y \,/\, \textrm{sum}(y)$, where $y$ is the vector of spike counts of the neuron. In the LG GLM, the term $(X^TX)^{-1}$ corrects for potential correlation between the regressors. Because the experiment that produced these data used a white noise stimulus, there are no such correlations. Therefore the two methods are equivalent. (How would you check the statement about no correlations?)
#Section 2: Linear-Nonlinear-Poisson GLM
```
#@title Video 2: Generalized linear model
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="wRbvwdze4uE", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
## Section 2.1: Nonlinear optimization with `scipy.optimize`
Before diving into the Poisson GLM case, let us review the use and importance of convexity in optimization:
- We have seen previously that in the Linear-Gaussian case, maximum likelihood parameter estimate can be computed analytically. That is great because it only takes us a single line of code!
- Unfortunately in general there is no analytical solution to our statistical estimation problems of interest. Instead, we need to apply a nonlinear optimization algorithm to find the parameter values that minimize some *objective function*. This can be extremely tedious because there is no general way to check whether we have found *the optimal solution* or if we are just stuck in some local minimum.
- Somewhere in between theses two extremes, the spetial case of convex objective function is of great practical importance. Indeed, such optimization problems can be solved very reliably (and usually quite rapidly too!) using some standard software.
Notes:
- a function is convex if and only if its curve lies below any chord joining two of its points
- to learn more about optimization, you can consult the book of Stephen Boyd and Lieven Vandenberghe [Convex Optimization](https://web.stanford.edu/~boyd/cvxbook/).
Here we will use the `scipy.optimize` module, it contains a function called [`minimize`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html) that provides a generic interface to a large number of optimization algorithms. This function expects as argument an objective function and an "initial guess" for the parameter values. It then returns a dictionary that includes the minimum function value, the parameters that give this minimum, and other information.
Let's see how this works with a simple example. We want to minimize the function $f(x) = x^2$:
```
f = np.square
res = minimize(f, x0=2)
print(
f"Minimum value: {res['fun']:.4g}",
f"at x = {res['x']}",
)
```
When minimizing a $f(x) = x^2$, we get a minimum value of $f(x) \approx 0$ when $x \approx 0$. The algorithm doesn't return exactly $0$, because it stops when it gets "close enough" to a minimum. You can change the `tol` parameter to control how it defines "close enough".
A point about the code bears emphasis. The first argument to `minimize` is not a number or a string but a *function*. Here, we used `np.square`. Take a moment to make sure you understand what's going on, because it's a bit unusual, and it will be important for the exercise you're going to do in a moment.
In this example, we started at $x_0 = 2$. Let's try different values for the starting point:
```
start_points = -1, 1.5
xx = np.linspace(-2, 2, 100)
plt.plot(xx, f(xx), color=".2")
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
for i, x0 in enumerate(start_points):
res = minimize(f, x0)
plt.plot(x0, f(x0), "o", color=f"C{i}", ms=10, label=f"Start {i}")
plt.plot(res["x"].item(), res["fun"], "x", c=f"C{i}", ms=10, mew=2, label=f"End {i}")
plt.legend()
```
The runs started at different points (the dots), but they each ended up at roughly the same place (the cross): $f(x_\textrm{final}) \approx 0$. Let's see what happens if we use a different function:
```
g = lambda x: x / 5 + np.cos(x)
start_points = -.5, 1.5
xx = np.linspace(-4, 4, 100)
plt.plot(xx, g(xx), color=".2")
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
for i, x0 in enumerate(start_points):
res = minimize(g, x0)
plt.plot(x0, g(x0), "o", color=f"C{i}", ms=10, label=f"Start {i}")
plt.plot(res["x"].item(), res["fun"], "x", color=f"C{i}", ms=10, mew=2, label=f"End {i}")
plt.legend()
```
Unlike $f(x) = x^2$, $g(x) = \frac{x}{5} + \cos(x)$ is not *convex*. We see that the final position of the minimization algorithm depends on the starting point, which adds a layer of comlpexity to such problems.
### Exercise 3: Fitting the Poisson GLM and prediction spikes
In this exercise, we will use [`scipy.optimize.minimize`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html) to compute maximum likelihood estimates for the filter weights in the Poissson GLM model with an exponential nonlinearity (LNP: Linear-Nonlinear-Poisson).
In practice, this will involve filling out two functions.
- The first should be an *objective function* that takes a design matrix, a spike count vector, and a vector of parameters. It should return a negative log likelihood.
- The second function should take `stim` and `spikes`, build the design matrix and then use `minimize` internally, and return the MLE parameters.
What should the objective function look like? We want it to return the negative log likelihood: $-\log P(y \mid X, \theta).$
In the Poisson GLM,
$$
\log P(\mathbf{y} \mid X, \theta) = \sum_t \log P(y_t \mid \mathbf{x_t},\theta),
$$
where
$$ P(y_t \mid \mathbf{x_t}, \theta) = \frac{\lambda_t^{y_t}\exp(-\lambda_t)}{y_t!} \text{, with rate } \lambda_t = \exp(\mathbf{x_t}^{\top} \theta).$$
Now, taking the log likelihood for all the data we obtain:
$\log P(\mathbf{y} \mid X, \theta) = \sum_t( y_t \log(\lambda_t) - \lambda_t - \log(y_t !)).$
Because we are going to minimize the negative log likelihood with respct to the parameters $\theta$, we can ignore the last term that does not depend on $\theta$. For faster implementation, let us rewrite this in matrix notation:
$$\mathbf{y}^T \log(\mathbf{\lambda}) - \mathbf{1}^T \mathbf{\lambda} \text{, with rate } \mathbf{\lambda} = \exp(X^{\top} \theta)$$
Finally, don't forget to add the minus sign for your function to return the negative log likelihood.
```
def neg_log_lik_lnp(theta, X, y):
"""Return -loglike for the Poisson GLM model.
Args:
theta (1D array): Parameter vector.
X (2D array): Full design matrix.
y (1D array): Data values.
Returns:
number: Negative log likelihood.
"""
#####################################################################
# Fill in missing code (...), then remove the error
raise NotImplementedError("Complete the neg_log_lik_lnp function")
#####################################################################
# Compute the Poisson log likeliood
rate = np.exp(X @ theta)
log_lik = y @ ... - ...
return ...
def fit_lnp(stim, spikes, d=25):
"""Obtain MLE parameters for the Poisson GLM.
Args:
stim (1D array): Stimulus values at each timepoint
spikes (1D array): Spike counts measured at each timepoint
d (number): Number of time lags to use.
Returns:
1D array: MLE parameters
"""
#####################################################################
# Fill in missing code (...), then remove the error
raise NotImplementedError("Complete the fit_lnp function")
#####################################################################
# Build the design matrix
y = spikes
constant = np.ones_like(y)
X = np.column_stack([constant, make_design_matrix(stim)])
# Use a random vector of weights to start (mean 0, sd .2)
x0 = np.random.normal(0, .2, d + 1)
# Find parameters that minmize the negative log likelihood function
res = minimize(..., args=(X, y))
return ...
# Uncomment and run to test your function
# theta_lnp = fit_lnp(stim, spikes)
# plot_spike_filter(theta_lg[1:], dt_stim, color=".5", label="LG")
# plot_spike_filter(theta_lnp[1:], dt_stim, label="LNP")
# plt.legend(loc="upper left");
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D4_MachineLearning/solutions/W1D4_Tutorial1_Solution_c0c87e2d.py)
*Example output:*
<img alt='Solution hint' align='left' width=558 height=414 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D4_MachineLearning/static/W1D4_Tutorial1_Solution_c0c87e2d_0.png>
Plotting the LG and LNP weights together, we see that they are broadly similar, but the LNP weights are generally larger. What does that mean for the model's ability to *predict* spikes? To see that, let's finish the exercise by filling out the `predict_spike_counts_lnp` function:
```
def predict_spike_counts_lnp(stim, spikes, theta=None, d=25):
"""Compute a vector of predicted spike counts given the stimulus.
Args:
stim (1D array): Stimulus values at each timepoint
spikes (1D array): Spike counts measured at each timepoint
theta (1D array): Filter weights; estimated if not provided.
d (number): Number of time lags to use.
Returns:
yhat (1D array): Predicted spikes at each timepoint.
"""
###########################################################################
# Fill in missing code (...) and then remove the error to test
raise NotImplementedError("Complete the predict_spike_counts_lnp function")
###########################################################################
y = spikes
constant = np.ones_like(spikes)
X = np.column_stack([constant, make_design_matrix(stim)])
if theta is None: # Allow pre-cached weights, as fitting is slow
theta = fit_lnp(X, y, d)
yhat = ...
return yhat
# Uncomment and run to test predict_spike_counts_lnp
# yhat = predict_spike_counts_lnp(stim, spikes, theta_lnp)
# plot_spikes_with_prediction(spikes, yhat, dt_stim)
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D4_MachineLearning/solutions/W1D4_Tutorial1_Solution_8fbc44cf.py)
*Example output:*
<img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D4_MachineLearning/static/W1D4_Tutorial1_Solution_8fbc44cf_0.png>
We see that the LNP model does a better job of fitting the actual spiking data. Importantly, it never predicts negative spikes!
*Bonus:* Our statement that the LNP model "does a better job" is qualitative and based mostly on the visual appearance of the plot. But how would you make this a quantitative statement?
## Summary
In this first tutorial, we used two different models to learn something about how retinal ganglion cells respond to a flickering white noise stimulus. We learned how to construct a design matrix that we could pass to different GLMs, and we found that the Linear-Nonlinear-Poisson (LNP) model allowed us to predict spike rates better than a simple Linear-Gaussian (LG) model.
In the next tutorial, we'll extend these ideas further. We'll meet yet another GLM — logistic regression — and we'll learn how to ensure good model performance even when the number of parameters `d` is large compared to the number of data points `N`.
| github_jupyter |
```
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
women = train_data.loc[train_data.Sex == 'female']["Survived"]
rate_women = sum(women)/len(women)
print("% of women who survived:", rate_women)
men = train_data.loc[train_data.Sex == 'male']["Survived"]
rate_men = sum(men)/len(men)
print("% of men who survived:", rate_men)
train_data.columns
leaves=[10,16,30,35,40,49,55,50]
y = train_data["Survived"]
features = ["Pclass", "Sex", "SibSp", "Parch", "Age","Fare","Embarked"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
print(X.info())
print(X_test.info())
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
from sklearn.impute import KNNImputer
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import OneHotEncoder
na_list=X.isna().sum() + X_test.isna().sum()
print(na_list)
num_cols=[col for col in X if X[col].dtype in ['int64','float64'] and na_list[col]>0]
cat_cols=[col for col in X_test if X[col].dtype in ['object']]
print(" Numerical Columns ",num_cols,"\n Categorical Columns",cat_cols)
X_train,X_valid,y_train,y_valid=train_test_split(X,y,random_state=21)
benchmark_model=XGBClassifier(random_state=21)
pip_b=Pipeline(steps=[("benchmark_model",benchmark_model)])
pip_b.fit(X_train,y_train)
pred=pip_b.predict(X_valid)
acc=accuracy_score(y_valid,pred)
print(acc)
import xgboost as xgb
from sklearn.model_selection import RandomizedSearchCV
# Create the parameter grid: gbm_param_grid
gbm_param_grid = {
'n_estimators': range(0,400,25),
'max_depth': range(3,7),
'learning_rate': [0.4, 0.45, 0.5, 0.55, 0.6],
'subsample':[0.8,0.9,1],
'colsample_bytree': [0.6, 0.7, 0.8, 0.9, 1],
'n_jobs':[4]
}
# Instantiate the regressor: gbm
gbm = XGBClassifier(random_state=21)
# Perform random search: grid_mse
xgb_random = RandomizedSearchCV(param_distributions=gbm_param_grid,
estimator = gbm, scoring = "accuracy",
verbose = 1, n_iter = 40, cv = 6,n_jobs=4)
# Fit randomized_mse to the data
xgb_random.fit(X, y)
# Print the best parameters and lowest RMSE
print("Best parameters found: ", xgb_random.best_params_)
print("Best accuracy found: ", xgb_random.best_score_)
model = XGBClassifiermodel_dummy =XGBClassifier(n_estimators=75,max_depth= 3,subsample=0.9,colsample_bytree=1,learning_rate=0.45,n_jobs=4,random_state=21)
#numerical_transformer = KNNImputer(n_neighbors=10)
#imputed_X_test=pd.DataFrame(numerical_transformer.fit_transform(X_test))
#imputed_X_test.columns=X_test.columns
#print(imputed_X_test.info())
pip = Pipeline(steps=[('model',model)])
pip.fit(X, y)
predic = pip.predict(X_test)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predic})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved!")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/ayulockin/Explore-NFNet/blob/main/Train_Basline_With_Gradient_Clipping.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# 🧰 Setups, Installations and Imports
```
%%capture
!pip install wandb --upgrade
!pip install albumentations
!git clone https://github.com/ayulockin/Explore-NFNet
import tensorflow as tf
print(tf.__version__)
import tensorflow_datasets as tfds
import sys
sys.path.append("Explore-NFNet")
import os
import cv2
import numpy as np
from functools import partial
import matplotlib.pyplot as plt
# Imports from the cloned repository
from models.resnet import resnet_v1
from models.mini_vgg import get_mini_vgg
# Augmentation related imports
import albumentations as A
# Seed everything for reproducibility
def seed_everything():
# Set the random seeds
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
np.random.seed(hash("improves reproducibility") % 2**32 - 1)
tf.random.set_seed(hash("by removing stochasticity") % 2**32 - 1)
seed_everything()
# Avoid TensorFlow to allocate all the GPU at once.
# Ref: https://www.tensorflow.org/guide/gpu
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
import wandb
from wandb.keras import WandbCallback
wandb.login()
DATASET_NAME = 'cifar10'
IMG_HEIGHT = 32
IMG_WIDTH = 32
NUM_CLASSES = 10
SHUFFLE_BUFFER = 1024
BATCH_SIZE = 256
EPOCHS = 100
AUTOTUNE = tf.data.experimental.AUTOTUNE
print(f'Global batch size is: {BATCH_SIZE}')
```
# ⛄ Download and Prepare Dataset
```
(train_ds, val_ds, test_ds), info = tfds.load(name=DATASET_NAME,
split=["train[:85%]", "train[85%:]", "test"],
with_info=True,
as_supervised=True)
@tf.function
def preprocess(image, label):
# preprocess image
image = tf.cast(image, tf.float32)
image = image/255.0
return image, label
# Define the augmentation policies. Note that they are applied sequentially with some probability p.
transforms = A.Compose([
A.HorizontalFlip(p=0.7),
A.Rotate(limit=30, p=0.7)
])
# Apply augmentation policies.
def aug_fn(image):
data = {"image":image}
aug_data = transforms(**data)
aug_img = aug_data["image"]
return aug_img
@tf.function
def apply_augmentation(image, label):
aug_img = tf.numpy_function(func=aug_fn, inp=[image], Tout=tf.float32)
aug_img.set_shape((IMG_HEIGHT, IMG_WIDTH, 3))
return aug_img, label
train_ds = (
train_ds
.shuffle(SHUFFLE_BUFFER)
.map(preprocess, num_parallel_calls=AUTOTUNE)
.map(apply_augmentation, num_parallel_calls=AUTOTUNE)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
val_ds = (
val_ds
.map(preprocess, num_parallel_calls=AUTOTUNE)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
test_ds = (
test_ds
.map(preprocess, num_parallel_calls=AUTOTUNE)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
def show_batch(image_batch, label_batch):
plt.figure(figsize=(10,10))
for n in range(25):
ax = plt.subplot(5,5,n+1)
plt.imshow(image_batch[n])
# plt.title(f'{np.argmax(label_batch[n].numpy())}')
plt.title(f'{label_batch[n].numpy()}')
plt.axis('off')
image_batch, label_batch = next(iter(train_ds))
show_batch(image_batch, label_batch)
print(image_batch.shape, label_batch.shape)
```
# 🐤 Model
```
class ResNetModel(tf.keras.Model):
def __init__(self, resnet):
super(ResNetModel, self).__init__()
self.resnet = resnet
def train_step(self, data):
images, labels = data
with tf.GradientTape() as tape:
predictions = self.resnet(images)
loss = self.compiled_loss(labels, predictions)
trainable_params = self.resnet.trainable_variables
gradients = tape.gradient(loss, trainable_params)
gradients_clipped = [tf.clip_by_norm(g, 0.01) for g in gradients]
self.optimizer.apply_gradients(zip(gradients_clipped, trainable_params))
self.compiled_metrics.update_state(labels, predictions)
return {m.name: m.result() for m in self.metrics}
def test_step(self, data):
images, labels = data
predictions = self.resnet(images, training=False)
loss = self.compiled_loss(labels, predictions)
self.compiled_metrics.update_state(labels, predictions)
return {m.name: m.result() for m in self.metrics}
def save_weights(self, filepath):
self.resnet.save_weights(filepath=filepath, save_format="tf")
def call(self, inputs, *args, **kwargs):
return self.resnet(inputs)
tf.keras.backend.clear_session()
test_model = ResNetModel(resnet_v1((IMG_HEIGHT, IMG_WIDTH, 3), 20, num_classes=NUM_CLASSES, use_bn=False))
test_model.build((1, IMG_HEIGHT, IMG_WIDTH, 3))
test_model.summary()
print(f"Total learnable parameters: {test_model.count_params()/1e6} M")
```
# 📲 Callbacks
```
earlystopper = tf.keras.callbacks.EarlyStopping(
monitor='val_loss', patience=10, verbose=0, mode='auto',
restore_best_weights=True
)
reducelronplateau = tf.keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.5,
patience=3, verbose=1
)
```
# 🚋 Train with W&B
```
tf.keras.backend.clear_session()
# Intialize model
model = ResNetModel(resnet_v1((IMG_HEIGHT, IMG_WIDTH, 3), 20, num_classes=NUM_CLASSES, use_bn=False))
model.compile('adam', 'sparse_categorical_crossentropy', metrics=['acc'])
# Intialize W&B run
run = wandb.init(entity='ayush-thakur', project='nfnet', job_type='train-baseline')
# Train model
model.fit(train_ds,
epochs=EPOCHS,
validation_data=val_ds,
callbacks=[WandbCallback(),
reducelronplateau,
earlystopper])
# Evaluate model on test set
loss, acc = model.evaluate(test_ds)
wandb.log({'Test Accuracy': round(acc, 3)})
# Close W&B run
run.finish()
```

| github_jupyter |
```
from model_2 import *
from data_2 import *
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
N_training = 150
N_validate = 15
batch_size = 4
train_ids = np.arange(N_training)
print(train_ids)
train_generator = DataGenerator(train_ids, '../training_data_v1/', '../training_data_v1/', do_fft = False, batch_size = batch_size)
validation_ids = np.arange(N_training, N_training+N_validate)
print(validation_ids)
validation_generator = DataGenerator(validation_ids, '../training_data_v1/', '../training_data_v1/', do_fft = False, batch_size = batch_size)
with tf.device('/GPU:1'):
model = unet(init_layers=128)
print(model.summary())
# Load test data
cardiac_test = np.load('../tagsim/test_data_v1.npz')
truth_data = cardiac_test['truth']
test_data = cardiac_test['test']
X = np.zeros(test_data.shape + (2,))
y = np.zeros(truth_data.shape + (2,))
X[:,:,:,0] = np.real(test_data)
X[:,:,:,1] = np.imag(test_data)
y[:,:,:,0] = np.real(truth_data)
y[:,:,:,1] = np.imag(truth_data)
with tf.device('/GPU:2'):
res = model.predict(X)
print(X.shape)
print(y.shape)
with tf.device('/GPU:1'):
hist = model.fit_generator(train_generator, validation_data= (X,y) , epochs=12)
plt.figure()
plt.plot(hist.history['loss'], label='Training Loss')
plt.plot(hist.history['val_loss'], label='Validation Loss')
plt.legend()
plt.figure()
plt.imshow(np.log(np.abs(np.fft.fftshift(np.fft.fft2(np.fft.fftshift(im_X))))))
plt.figure()
plt.imshow(np.log(np.abs(np.fft.fftshift(np.fft.fft2(np.fft.fftshift(im_res[11]))))))
plt.figure()
plt.imshow(np.log(np.abs(np.fft.fftshift(np.fft.fft2(np.fft.fftshift(im_truth[11]))))))
#Visualize magnitude of input, results, truth
tt = 11
im_X = X[tt,:,:,0] + 1j * X[tt,:,:,1]
im_res = res[tt,:,:,0] + 1j * res[tt,:,:,1]
im_truth = truth_data[tt]
fig, axs = plt.subplots(1, 3, squeeze=False, figsize=(18,8))
axs[0,0].imshow((np.abs(im_X)), cmap='gray')
axs[0,1].imshow((np.abs(im_res)), cmap='gray')
axs[0,2].imshow((np.abs(im_truth)), cmap='gray')
#Visualize phase of input, results, truth
tt = 11
im_X = X[tt,:,:,0] + 1j * X[tt,:,:,1]
im_res = res[tt,:,:,0] + 1j * res[tt,:,:,1]
im_truth = truth_data[tt]
fig, axs = plt.subplots(1, 3, squeeze=False, figsize=(18,8))
axs[0,0].imshow((np.angle(im_X)), cmap='gray')
axs[0,1].imshow((np.angle(im_res)), cmap='gray')
axs[0,2].imshow((np.angle(im_truth)), cmap='gray')
# Compute error:
# Phase difference of results and truth
im_res = res[:,:,:,0] + 1j * res[:,:,:,1]
im_truth = truth_data
ph_diff = np.angle(im_res * np.exp(-1j * np.angle(im_truth)))
# Plot phase difference
tt_show = 11 # timeframe to look at
plt.figure()
plt.imshow(ph_diff[tt_show])
plt.title('Phase difference')
# We dont actually care about anything but the hard, I saved a mask in the test dataset
mask = cardiac_test['mask']
plt.figure()
plt.imshow(mask[tt_show])
plt.title('Mask')
# Multiply results by the mask
ph_diff_mask = ph_diff * mask
plt.figure()
plt.imshow(ph_diff_mask[tt_show])
plt.title('Masked Phase difference')
# Convert to mm error
ke = 0.12 # This was set in simulation
mm_diff_mask = ph_diff_mask / (2*np.pi) / ke
plt.figure()
plt.imshow(mm_diff_mask[tt_show])
plt.colorbar()
plt.title('Masked mm difference')
# Calculate mean and standard deviation error, time frame specific
# We mask differently because we don't want to incorporate all the zeros from masking
err_mean = []
err_std = []
for tt in range(im_res.shape[0]):
t_diff = mm_diff_mask[tt, mask[tt]>0]
err_mean.append(t_diff.mean())
err_std.append(t_diff.std())
print('Time resolved mean error:', err_mean)
print('Time resolved stdev error:', err_std)
x_ax = np.arange(25)
plt.figure()
plt.errorbar(x_ax, err_mean, yerr = err_std, fmt='o')
plt.title('Mean error by timeframe')
plt.xlabel("time frames 0 to 24")
# And the total error as a single metric
total_err = mm_diff_mask[mask>0]
print('Total Error:', np.abs(total_err).mean(), 'mm +/-', np.abs(total_err).std(), 'mm')
```
## Tuning ideas
* Use all timeframes together
* As channels (3D convolutions will be too expensive)
* Operate in k-space
* Which loss function is best
* Operate in image space, but loss function in k-space
* Play with activation functions, where to use batch normalization, learning rate adjustments, batch size
* Check initial image normalization (zero mean, stdev = 1)
## Implement validation, test data
* Validate on different simulations
* Test on the cardiac simulation
* Eventually we will test on our acquired DENSE data (I need to prep it)
```
list(cardiac_test.keys())
X.shape
X.shape
```
| github_jupyter |
# Expression Quality Control (Part 2)
This is a template notebook for performing the final quality control on your organism's expression data. This requires a curated metadata sheet.
## Setup
```
import itertools
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from os import path
from scipy import stats
from tqdm.notebook import tqdm
sns.set_style('ticks')
```
### Inputs
```
logTPM_file = path.join('..','data','raw_data','log_tpm.csv') # Enter log-TPM filename here
all_metadata_file = path.join('..','data','interim','metadata_qc_part1_all.tsv') # Enter full metadata filename here
metadata_file = path.join('..','data','interim','metadata_qc_part1_curated.tsv') # Enter curated metadata filename here
```
### Load expression data
```
DF_log_tpm = pd.read_csv(logTPM_file,index_col=0).fillna(0)
print('Number of genes:',DF_log_tpm.shape[0])
print('Number of samples:',DF_log_tpm.shape[1])
DF_log_tpm.head()
```
### Load metadata
```
DF_metadata = pd.read_csv(metadata_file,index_col=0,sep='\t')
print('Number of samples with curated metadata:',DF_metadata.shape[0])
DF_metadata.head()
DF_metadata_all = pd.read_csv(all_metadata_file,index_col=0,sep='\t')
```
## Remove samples due to poor metadata
After curation, some samples either did not have enough replicates or metadata to warrant inclusion in this database.
```
DF_metadata_passed_step4 = DF_metadata[~DF_metadata.skip.fillna(False)].copy()
print('New number of samples with curated metadata:',DF_metadata_passed_step4.shape[0])
DF_metadata_passed_step4.head()
```
### Check curation
Since manual curation is error-prone, we want to make sure that all samples have labels for their project and condition. In addition, there should only be one reference condition in each project, and it should be in the project itself.
Any samples that fail these checks will be printed below.
```
assert(DF_metadata_passed_step4.project.notnull().all())
assert(DF_metadata_passed_step4.condition.notnull().all())
for name,group in DF_metadata_passed_step4.groupby('project'):
ref_cond = group.reference_condition.unique()
# Ensure that there is only one reference condition per project
if not len(ref_cond) == 1:
print('Multiple reference conditions for:, name')
# Ensure the reference condition is in fact in the project
ref_cond = ref_cond[0]
if not ref_cond in group.condition.tolist():
print('Reference condition not in project:', name)
```
Next, make a new column called ``full_name`` that gives every experimental condition a unique, human-readable identifier.
```
DF_metadata_passed_step4['full_name'] = DF_metadata_passed_step4['project'].str.cat(DF_metadata_passed_step4['condition'],sep=':')
```
### Remove samples with only one replicate
First, find sample names that have at least two replicates.
```
counts = DF_metadata_passed_step4.full_name.value_counts()
keep_samples = counts[counts >= 2].index
print(keep_samples[:5])
```
Only keep these samples
```
DF_metadata_passed_step4 = DF_metadata_passed_step4[DF_metadata_passed_step4.full_name.isin(keep_samples)]
print('New number of samples with curated metadata:',DF_metadata_passed_step4.shape[0])
DF_metadata_passed_step4.head()
```
### Save this information to the full metadata dataframe
```
DF_metadata_all['passed_curation'] = DF_metadata_all.index.isin(DF_metadata_passed_step4.index)
```
## Check correlations between replicates
### Remove failed data from log_tpm files
```
DF_log_tpm = DF_log_tpm[DF_metadata_passed_step4.index]
```
### Compute Pearson R Score
Biological replicates should have a Pearson R correlation above 0.95. For samples with more than 2 replicates, the replicates must have R >= 0.95 with at least one other replicate or it will be dropped. The correlation threshold can be changed below:
```
rcutoff = 0.95
```
The following code computes correlations between all samples and collects correlations between replicates and non-replicates.
```
rep_corrs = {}
rand_corrs = {}
num_comparisons = len(DF_metadata_passed_step4)*(len(DF_metadata_passed_step4)-1)/2
for exp1,exp2 in tqdm(itertools.combinations(DF_metadata_passed_step4.index,2),total=num_comparisons):
if DF_metadata_passed_step4.loc[exp1,'full_name'] == DF_metadata_passed_step4.loc[exp2,'full_name']:
rep_corrs[(exp1,exp2)] = stats.pearsonr(DF_log_tpm[exp1],DF_log_tpm[exp2])[0]
else:
rand_corrs[(exp1,exp2)] = stats.pearsonr(DF_log_tpm[exp1],DF_log_tpm[exp2])[0]
```
Correlations can be plotted on a histogram
```
fig,ax = plt.subplots(figsize=(5,5))
ax2 = ax.twinx()
ax2.hist(rep_corrs.values(),bins=50,range=(0.2,1),alpha=0.8,color='green',linewidth=0)
ax.hist(rand_corrs.values(),bins=50,range=(0.2,1),alpha=0.8,color='blue',linewidth=0)
ax.set_title('Pearson R correlation between experiments',fontsize=14)
ax.set_xlabel('Pearson R correlation',fontsize=14)
ax.set_ylabel('Different Conditions',fontsize=14)
ax2.set_ylabel('Known Replicates',fontsize=14)
med_corr = np.median([v for k,v in rep_corrs.items()])
print('Median Pearson R between replicates: {:.2f}'.format(med_corr))
```
Remove samples without any high-correlation replicates
```
dissimilar = []
for idx, grp in DF_metadata_passed_step4.groupby('full_name'):
ident = np.identity(len(grp))
corrs = (DF_log_tpm[grp.index].corr() - ident).max()
dissimilar.extend(corrs[corrs<rcutoff].index)
# Save this information in both the original metadata dataframe and the new metadata dataframe
DF_metadata_all['passed_replicate_correlations'] = ~DF_metadata_all.index.isin(dissimilar)
DF_metadata_passed_step4['passed_replicate_correlations'] = ~DF_metadata_passed_step4.index.isin(dissimilar)
DF_metadata_final = DF_metadata_passed_step4[DF_metadata_passed_step4['passed_replicate_correlations']]
print('# Samples that passed replicate correlations:',len(DF_metadata_final))
```
## Check that reference conditions still exist
If a reference condition was removed due to poor replicate correlations, a new reference condition needs to be defined.
Again, any samples that fail these checks will be printed below.
```
project_exprs = []
for name,group in DF_metadata_final.groupby('project'):
# Get reference condition
ref_cond = group.reference_condition.iloc[0]
# Ensure the reference condition is still in the project
if ref_cond not in group.condition.tolist():
print('Reference condition missing from:', name)
# Check that each project has at least two conditions (a reference and at least one test condition)
if len(group.condition.unique()) <= 1:
print('Only one condition in:', name)
```
If necessary, choose a new condition for failed projects and re-run notebook.
## Normalize dataset to reference conditions
```
DF_log_tpm_final = DF_log_tpm[DF_metadata_final.index]
project_exprs = []
for name,group in DF_metadata_final.groupby('project'):
# Get reference condition
ref_cond = group.reference_condition.iloc[0]
# Get reference condition sample ids
ref_samples = group[group.condition == ref_cond].index
# Get reference condition expression
ref_expr = DF_log_tpm_final[ref_samples].mean(axis=1)
# Subtract reference expression from project
project_exprs.append(DF_log_tpm_final[group.index].sub(ref_expr,axis=0))
DF_log_tpm_norm = pd.concat(project_exprs,axis=1)
```
## Save final datasets
```
logTPM_qc_file = path.join('..','data','processed_data','log_tpm.csv')
logTPM_norm_file = path.join('..','data','processed_data','log_tpm_norm.csv')
final_metadata_file = path.join('..','data','processed_data','metadata.tsv')
final_metadata_all_file = path.join('..','data','interim','metadata_qc_part2_all.tsv')
DF_log_tpm_final.to_csv(logTPM_qc_file)
DF_log_tpm_norm.to_csv(logTPM_norm_file)
DF_metadata_final.to_csv(final_metadata_file, sep='\t')
DF_metadata_all.to_csv(final_metadata_all_file, sep='\t')
```
| github_jupyter |
# Hyperparams And Distributions
This page introduces the hyperparams, and distributions in Neuraxle. You can find [Hyperparams Distribution API here](https://www.neuraxle.org/stable/api/neuraxle.hyperparams.distributions.html), and
[Hyperparameter Samples API here](https://www.neuraxle.org/stable/api/neuraxle.hyperparams.space.html).
Hyperparameter is a parameter drawn from a prior distribution. In Neuraxle, we have a few built-in distributions, and we are also compatible with scipy distributions.
Create a [Uniform Distribution](https://www.neuraxle.org/stable/api/neuraxle.hyperparams.distributions.html#neuraxle.hyperparams.distributions.Uniform):
```
from neuraxle.hyperparams.distributions import Uniform
hd = Uniform(
min_included=-10,
max_included=10,
null_default_value=0
)
```
Sample the random variable using [rvs](https://www.neuraxle.org/stable/api/neuraxle.hyperparams.distributions.html#neuraxle.hyperparams.distributions.HyperparameterDistribution.rvs):
```
sample = hd.rvs()
print(sample)
```
Nullify the random variable using [nullify](https://www.neuraxle.org/stable/api/neuraxle.hyperparams.distributions.html#neuraxle.hyperparams.distributions.HyperparameterDistribution.nullify):
```
nullified_sample = hd.nullify()
assert nullified_sample == hd.null_default_value
```
Get the probability distribution function value at `x` using [pdf](https://www.neuraxle.org/stable/api/neuraxle.hyperparams.distributions.html#neuraxle.hyperparams.distributions.HyperparameterDistribution.pdf):
```
pdf = hd.pdf(1)
print('pdf: {}'.format(pdf))
```
Get the cumulative probability distribution function value at `x` using [cdf](https://www.neuraxle.org/stable/api/neuraxle.hyperparams.distributions.html#neuraxle.hyperparams.distributions.HyperparameterDistribution.cdf)
```
cdf = hd.cdf(1)
print('cdf: {}'.format(cdf))
```
## Setting And Updating Hyperparams
In Neuraxle, each step has hyperparams of type [HyperparameterSamples](https://www.neuraxle.org/stable/api/neuraxle.hyperparams.space.html#neuraxle.hyperparams.space.HyperparameterSamples), and spaces of type [HyperparameterSpace](https://www.neuraxle.org/stable/api/neuraxle.hyperparams.distributions.html#neuraxle.hyperparams.distributions.HyperparameterDistribution).
Consider a simple pipeline that contains 2 MultiplyByN steps, and one PCA component inside a nested pipeline:
```
from sklearn.decomposition import PCA
from neuraxle.hyperparams.distributions import RandInt
from neuraxle.hyperparams.space import HyperparameterSpace, HyperparameterSamples
from neuraxle.pipeline import Pipeline
from neuraxle.steps.numpy import MultiplyByN
p = Pipeline([
('step1', MultiplyByN(2)),
('step2', MultiplyByN(2)),
Pipeline([
PCA(n_components=4)
])
])
```
We can set or update the hyperparams, and spaces by doing the following:
```
p.set_hyperparams(HyperparameterSamples({
'step1__multiply_by': 42,
'step2__multiply_by': -10,
'Pipeline__PCA__n_components': 2
}))
p.update_hyperparams(HyperparameterSamples({
'Pipeline__PCA__n_components': 3
}))
p.set_hyperparams_space(HyperparameterSpace({
'step1__multiply_by': RandInt(42, 50),
'step2__multiply_by': RandInt(-10, 0),
'Pipeline__PCA__n_components': RandInt(2, 3)
}))
```
We can sample the space of random variables:
```
samples = p.get_hyperparams_space().rvs()
assert 42 <= samples['step1__multiply_by'] <= 50
assert -10 <= samples['step2__multiply_by'] <= 0
assert samples['Pipeline__PCA__n_components'] in [2, 3]
```
We can get all hyperparams:
```
samples = p.get_hyperparams()
assert 42 <= samples['step1__multiply_by'] <= 50
assert -10 <= samples['step2__multiply_by'] <= 0
assert samples['Pipeline__PCA__n_components'] in [2, 3]
assert p['Pipeline']['PCA'].get_wrapped_sklearn_predictor().n_components in [2, 3]
```
## Neuraxle Custom Distributions
## Scipy Distributions
To define a scipy distribution that is compatible with Neuraxle, you need to wrap the scipy distribution with ScipyDistributionWrapper:
```
from neuraxle.hyperparams.scipy_distributions import ScipyDistributionWrapper, BaseContinuousDistribution, BaseDiscreteDistribution
from scipy.integrate import quad
from scipy.special import factorial
from scipy.stats import rv_continuous, norm, rv_discrete, rv_histogram, truncnorm, randint
import numpy as np
import math
hd = ScipyDistributionWrapper(
scipy_distribution=randint(low=0, high=10),
is_continuous=False,
null_default_value=0
)
```
### Discrete Distributions
For discrete distribution that inherit from [rv_discrete](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.html#scipy.stats.rv_discrete), you only need to implement _pmf. The rest is taken care of magically by scipy.
For example, here is a discrete poisson distribution:
```
class Poisson(BaseDiscreteDistribution):
def __init__(self, min_included: float, max_included: float, null_default_value: float = None, mu=0.6):
super().__init__(
min_included=min_included,
max_included=max_included,
name='poisson',
null_default_value=null_default_value
)
self.mu = mu
def _pmf(self, x):
return math.exp(-self.mu) * self.mu ** x / factorial(x)
```
### Continuous Distributions
For continous distribution that inherit from [rv_continuous](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.html), you only need to implement _pdf function. The rest is taken care of magically by scipy.
For example, here is a continous gaussian distribution:
```
class Gaussian(BaseContinuousDistribution):
def __init__(self, min_included: int, max_included: int, null_default_value: float = None):
self.max_included = max_included
self.min_included = min_included
BaseContinuousDistribution.__init__(
self,
name='gaussian',
min_included=min_included,
max_included=max_included,
null_default_value=null_default_value
)
def _pdf(self, x):
return math.exp(-x ** 2 / 2.) / np.sqrt(2.0 * np.pi)
```
### Custom Arguments
If you want to add more properties to calculate your distributions, just add them in self. They will be available in all of the scipy private methods you can override like _pmf, and _pdf.
```
class LogNormal(BaseContinuousDistribution):
def __init__(
self,
log2_space_mean: float,
log2_space_std: float,
hard_clip_min: float,
hard_clip_max: float,
null_default_value: float = None
):
if null_default_value is None:
null_default_value = hard_clip_min
if hard_clip_min is None:
hard_clip_min = np.nan
if hard_clip_max is None:
hard_clip_max = np.nan
self.log2_space_mean = log2_space_mean
self.log2_space_std = log2_space_std
super().__init__(
name='log_normal',
min_included=hard_clip_min,
max_included=hard_clip_max,
null_default_value=null_default_value
)
def _pdf(self, x):
if x <= 0:
return 0.
cdf_min = 0.
cdf_max = 1.
pdf_x = 1 / (x * math.log(2) * self.log2_space_std * math.sqrt(2 * math.pi)) * math.exp(
-(math.log2(x) - self.log2_space_mean) ** 2 / (2 * self.log2_space_std ** 2))
return pdf_x / (cdf_max - cdf_min)
```
### Scipy methods
All of the scipy distribution methods are available:
```
def get_many_samples_for(hd, num_trial):
return [hd.rvs() for _ in range(num_trial)]
samples = get_many_samples_for(hd, 1000)
for s in samples:
assert type(s) == int
hd = Gaussian(min_included=0, max_included=10, null_default_value=0)
assert 0.0 <= hd.rvs() <= 10.0
assert hd.pdf(10) < 0.001
assert hd.pdf(0) < 0.42
assert 0.55 > hd.cdf(5.0) > 0.45
assert hd.cdf(0) == 0.0
assert hd.logpdf(5) == -13.418938533204672
assert hd.logcdf(5) == -0.6931477538632531
assert hd.sf(5) == 0.5000002866515718
assert hd.logsf(5) == -0.693146607256966
assert np.all(hd.ppf([0.0, 0.01, 0.05, 0.1, 1 - 0.10, 1 - 0.05, 1 - 0.01, 1.0], 10))
assert np.isclose(hd.moment(2), 50.50000000091249)
assert hd.stats()[0]
assert hd.stats()[1]
assert np.array_equal(hd.entropy(), np.array(0.7094692666023363))
assert hd.median()
assert hd.mean() == 5.398942280397029
assert np.isclose(hd.std(), 4.620759921685374)
assert np.isclose(hd.var(), 21.35142225385382)
assert np.isclose(hd.expect(), 0.39894228040143276)
interval = hd.interval(alpha=[0.25, 0.50])
assert np.all(interval[0])
assert np.all(interval[1])
assert hd.support() == (0, 10)
```
## SKLearn Hyperparams
SKLearnWrapper wraps sklearn predictors so that they can be compatible with Neuraxle. When you set the hyperparams of an SKLearnWrapper, it automatically sets the params of the sklearn predictor for you:
```
from neuraxle.hyperparams.distributions import Choice
from neuraxle.hyperparams.distributions import RandInt
from neuraxle.hyperparams.space import HyperparameterSpace
from neuraxle.steps.sklearn import SKLearnWrapper
from sklearn.tree import DecisionTreeClassifier
decision_tree_classifier = SKLearnWrapper(
DecisionTreeClassifier(),
HyperparameterSpace({
'criterion': Choice(['gini', 'entropy']),
'splitter': Choice(['best', 'random']),
'min_samples_leaf': RandInt(2, 5),
'min_samples_split': RandInt(1, 3)
})
).set_hyperparams(HyperparameterSamples({
'criterion': 'gini',
'splitter': 'best',
'min_samples_leaf': 3,
'min_samples_split': 3
}))
```
| github_jupyter |
# Hyperparameter Tuning using SageMaker Tensorflow Container
This tutorial focuses on how to create a convolutional neural network model to train the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) using **SageMaker TensorFlow container**. It leverages hyperparameter tuning to kick off multiple training jobs with different hyperparameter combinations, to find the one with best model training result.
## Set up the environment
We will set up a few things before starting the workflow.
1. specify the s3 bucket and prefix where training data set and model artifacts will be stored
2. get the execution role which will be passed to sagemaker for accessing your resources such as s3 bucket
```
import sagemaker
import project_path
from lib import utils
bucket = '{{s3_workshop_bucket}}}'
prefix = 'sagemaker/DEMO-hpo-tensorflow-high' # you can customize the prefix (subfolder) here
role = sagemaker.get_execution_role() # we are using the notebook instance role for training in this example
```
Now we'll import the Python libraries we'll need.
```
import boto3
from time import gmtime, strftime
from sagemaker.tensorflow import TensorFlow
from sagemaker.tuner import IntegerParameter, CategoricalParameter, ContinuousParameter, HyperparameterTuner
```
## Download the MNIST dataset
```
import utils
from tensorflow.contrib.learn.python.learn.datasets import mnist
import tensorflow as tf
data_sets = mnist.read_data_sets('data', dtype=tf.uint8, reshape=False, validation_size=5000)
utils.convert_to(data_sets.train, 'train', 'data')
utils.convert_to(data_sets.validation, 'validation', 'data')
utils.convert_to(data_sets.test, 'test', 'data')
```
## Upload the data
We use the ```sagemaker.Session.upload_data``` function to upload our datasets to an S3 location. The return value identifies the location -- we will use this later when we start the training job.
```
inputs = sagemaker.Session().upload_data(path='data', bucket=bucket, key_prefix=prefix+'/data/mnist')
print (inputs)
```
## Construct a script for distributed training
Here is the full code for the network model:
```
!cat '../scripts/mnist.py'
```
The script here is and adaptation of the [TensorFlow MNIST example](https://github.com/tensorflow/models/tree/master/official/mnist). It provides a ```model_fn(features, labels, mode)```, which is used for training, evaluation and inference.
### A regular ```model_fn```
A regular **```model_fn```** follows the pattern:
1. [defines a neural network](https://github.com/tensorflow/models/blob/master/official/mnist/mnist.py#L96)
- [applies the ```features``` in the neural network](https://github.com/tensorflow/models/blob/master/official/mnist/mnist.py#L178)
- [if the ```mode``` is ```PREDICT```, returns the output from the neural network](https://github.com/tensorflow/models/blob/master/official/mnist/mnist.py#L186)
- [calculates the loss function comparing the output with the ```labels```](https://github.com/tensorflow/models/blob/master/official/mnist/mnist.py#L188)
- [creates an optimizer and minimizes the loss function to improve the neural network](https://github.com/tensorflow/models/blob/master/official/mnist/mnist.py#L193)
- [returns the output, optimizer and loss function](https://github.com/tensorflow/models/blob/master/official/mnist/mnist.py#L205)
### Writing a ```model_fn``` for distributed training
When distributed training happens, the same neural network will be sent to the multiple training instances. Each instance will predict a batch of the dataset, calculate loss and minimize the optimizer. One entire loop of this process is called **training step**.
#### Syncronizing training steps
A [global step](https://www.tensorflow.org/api_docs/python/tf/train/global_step) is a global variable shared between the instances. It necessary for distributed training, so the optimizer will keep track of the number of **training steps** between runs:
```python
train_op = optimizer.minimize(loss, tf.train.get_or_create_global_step())
```
That is the only required change for distributed training!
## Set up hyperparameter tuning job
*Note, with the default setting below, the hyperparameter tuning job can take about 30 minutes to complete.*
Now we will set up the hyperparameter tuning job using SageMaker Python SDK, following below steps:
* Create an estimator to set up the TensorFlow training job
* Define the ranges of hyperparameters we plan to tune, in this example, we are tuning "learning_rate"
* Define the objective metric for the tuning job to optimize
* Create a hyperparameter tuner with above setting, as well as tuning resource configurations
Similar to training a single TensorFlow job in SageMaker, we define our TensorFlow estimator passing in the TensorFlow script, IAM role, and (per job) hardware configuration.
```
estimator = TensorFlow(entry_point='../scripts/mnist.py',
role=role,
framework_version='1.11.0',
training_steps=1000,
evaluation_steps=100,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
base_job_name='DEMO-hpo-tensorflow')
```
Once we've defined our estimator we can specify the hyperparameters we'd like to tune and their possible values. We have three different types of hyperparameters.
- Categorical parameters need to take one value from a discrete set. We define this by passing the list of possible values to `CategoricalParameter(list)`
- Continuous parameters can take any real number value between the minimum and maximum value, defined by `ContinuousParameter(min, max)`
- Integer parameters can take any integer value between the minimum and maximum value, defined by `IntegerParameter(min, max)`
*Note, if possible, it's almost always best to specify a value as the least restrictive type. For example, tuning learning rate as a continuous value between 0.01 and 0.2 is likely to yield a better result than tuning as a categorical parameter with values 0.01, 0.1, 0.15, or 0.2.*
```
hyperparameter_ranges = {'learning_rate': ContinuousParameter(0.01, 0.2)}
```
Next we'll specify the objective metric that we'd like to tune and its definition, which includes the regular expression (Regex) needed to extract that metric from the CloudWatch logs of the training job. In this particular case, our script emits loss value and we will use it as the objective metric, we also set the objective_type to be 'minimize', so that hyperparameter tuning seeks to minize the objective metric when searching for the best hyperparameter setting. By default, objective_type is set to 'maximize'.
```
objective_metric_name = 'loss'
objective_type = 'Minimize'
metric_definitions = [{'Name': 'loss',
'Regex': 'loss = ([0-9\\.]+)'}]
```
Now, we'll create a `HyperparameterTuner` object, to which we pass:
- The TensorFlow estimator we created above
- Our hyperparameter ranges
- Objective metric name and definition
- Tuning resource configurations such as Number of training jobs to run in total and how many training jobs can be run in parallel.
```
tuner = HyperparameterTuner(estimator,
objective_metric_name,
hyperparameter_ranges,
metric_definitions,
max_jobs=9,
max_parallel_jobs=3,
objective_type=objective_type)
```
## Launch hyperparameter tuning job
And finally, we can start our hyperprameter tuning job by calling `.fit()` and passing in the S3 path to our train and test dataset.
After the hyperprameter tuning job is created, you should be able to describe the tuning job to see its progress in the next step, and you can go to SageMaker console->Jobs to check out the progress of the progress of the hyperparameter tuning job.
```
tuner.fit(inputs)
```
Let's just run a quick check of the hyperparameter tuning jobs status to make sure it started successfully.
```
boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['HyperParameterTuningJobStatus']
```
## Analyze tuning job results - after tuning job is completed
Please refer to "HPO_Analyze_TuningJob_Results.ipynb" to see example code to analyze the tuning job results.
## Deploy the best model
Now that we have got the best model, we can deploy it to an endpoint. Please refer to other SageMaker sample notebooks or SageMaker documentation to see how to deploy a model.
| github_jupyter |
# Your first neural network
In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
```
## Load and prepare the data
A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!
```
data_path = 'hour.csv'
rides = pd.read_csv(data_path)
rides.head()
```
## Checking out the data
This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the `cnt` column. You can see the first few rows of the data above.
Below is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.
```
rides[:24*10].plot(x='dteday', y='cnt')
```
### Dummy variables
Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to `get_dummies()`.
```
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
```
### Scaling target variables
To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.
The scaling factors are saved so we can go backwards when we use the network for predictions.
```
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
```
### Splitting the data into training, testing, and validation sets
We'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.
```
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
```
We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
```
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
```
## Time to build the network
Below you'll build your network. We've built out the structure. You'll implement both the forward pass and backwards pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.
<img src="assets/neural_network.png" width=300px>
The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called *forward propagation*.
We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called *backpropagation*.
> **Hint:** You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
Below, you have these tasks:
1. Implement the sigmoid function to use as the activation function. Set `self.activation_function` in `__init__` to your sigmoid function.
2. Implement the forward pass in the `train` method.
3. Implement the backpropagation algorithm in the `train` method, including calculating the output error.
4. Implement the forward pass in the `run` method.
```
#############
# In the my_answers.py file, fill out the TODO sections as specified
#############
from my_answers import NeuralNetwork
def MSE(y, Y):
return np.mean((y-Y)**2)
```
## Unit tests
Run these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project.
```
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
print(network.weights_hidden_to_output)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def runTest(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
```
## Training the network
Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.
You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.
### Choose the number of iterations
This is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, this process can have sharply diminishing returns and can waste computational resources if you use too many iterations. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. The ideal number of iterations would be a level that stops shortly after the validation loss is no longer decreasing.
### Choose the learning rate
This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. Normally a good choice to start at is 0.1; however, if you effectively divide the learning rate by n_records, try starting out with a learning rate of 1. In either case, if the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.
### Choose the number of hidden nodes
In a model where all the weights are optimized, the more hidden nodes you have, the more accurate the predictions of the model will be. (A fully optimized model could have weights of zero, after all.) However, the more hidden nodes you have, the harder it will be to optimize the weights of the model, and the more likely it will be that suboptimal weights will lead to overfitting. With overfitting, the model will memorize the training data instead of learning the true pattern, and won't generalize well to unseen data.
Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose. You'll generally find that the best number of hidden nodes to use ends up being between the number of input and output nodes.
```
import sys
####################
### Set the hyperparameters in you myanswers.py file ###
####################
from my_answers import iterations, learning_rate, hidden_nodes, output_nodes
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
```
## Check out your predictions
Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.
```
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
print(np.average(np.absolute((test_targets['cnt']*std + mean).values - predictions[0])))
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
```
## OPTIONAL: Thinking about your results(this question will not be evaluated in the rubric).
Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?
> **Note:** You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter
#### Your answer below
For normal days, it seems to predict pretty well. It follows the graph very well between peaks and valleys and is usually within 50 - 100 bikes or so. It is interesting that it predicts less than 0 bikes on occasion. It seems to fail at predicting the holidays, and I would expect that to be due to the lack of training data for this period of time.
## Submitting:
Open up the 'jwt' file in the first-neural-network directory (which also contains this notebook) for submission instructions
| github_jupyter |
## データの構造をざっと見てみる
```
import os
HOUSING_PATH = os.path.join('/src/datasets', 'housing')
import pandas as pd
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, 'housing.csv')
return pd.read_csv(csv_path)
housing = load_housing_data() # csvファイルを読み込む
housing.head() # headで最初の5行取得
# longiture=経度
# latitude=緯度
# housing_median_age=築年数の中央値
# total_rooms=部屋数
# total_bedrooms=寝室数
# population=人口
# households=世帯数
# median_income=収入の中央値
# median_house_value=住宅価格の中央値
# ocean_proximity=海との位置関係
housing.info() # infoで総行数、各属性のタイプ、nullではない値の数などが取得できる
# total_bedroomsはnullではない値が20,433個しかない
# つまりtotal_bedroomsという特徴量(feature)を持たない区域が207あるということ
# ocean_proximityのタイプはobject
# この場合はtextオブジェクト
# 先頭5行の出力から、カテゴリを示すものだとわかる
# value_counts()を使えば、どのようなカテゴリがあってそれぞれのカテゴリに何個の区域が含まれているかわかる
housing['ocean_proximity'].value_counts()
housing.describe() # describe()メソッドを使うと、数値属性の集計情報が表示される
# std(standard deviation)行は、標準偏差(値の散らばり具合)を示している
# 標準偏差に関する詳しい説明 https://atarimae.biz/archives/5379
# 25%, 50%, 75%の各行は、パーセンタイル(percentile)を示している
# パーセンタイルとは、観測値のグループのうち下から数えて指定された割合の観測値の値がどうなっているかを示す。
# 例えば、下から数えて25%の区域のhousing_median_ageは18年、50%の区域では29年、75%の区域では37年となる。
# ヒストグラムをプロットすることも、データの感じをつかむためには効果的
%matplotlib inline
import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(20,15)) # bins = 表示するビン(棒)の数
plt.show()
# 収入の中央値(median_income)はデータ収集時にスケーリングされたもの。上限15.0001,下限0.4999
# 機械学習では、前処理済みの属性を使うのは、ごく普通のことであるため、データがどのように計算されたものなのかは理解しておいた方がよい。
# 築年数の中央値(housing_median_age)と住宅価格の中央値(median_house_value)も条件を切ってある。後者の場合、ターゲット属性なので、重大な問題になる。
# 50万ドルを越えても正確な予測が必要な場合、以下の選択肢がある。
# a. 上限を越えている区域の正しいラベルを集める。
# b. 訓練セットからそれらの区域を取り除く(50万ドルを越える値を予測したときにシステムの評価が下がるので、テストセットからも取り除く)。
# ほとんどのヒストグラムがテールヘビー(tail heavy)になっている。
# つまり、中央値の左側より右側が大きく広がっている。
# このような形になると、一部の機械学習アルゴリズムはパターンを見つけにくくなることがある。
# そういった属性については、ベル型分布に近づくように変換する。
```
## テストセットを作る
```
import numpy as np
def split_train_test(data, test_ratio):
shuffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices] # pandas.DataFrame.ilocの説明 https://note.nkmk.me/python-pandas-at-iat-loc-iloc/
train_set, test_set = split_train_test(housing, 0.2)
print(len(train_set), 'train +', len(test_set), 'test')
# これでテストセットを作ることはできるが、完璧ではない。
# よく理解出来てない(p49~p50)
import hashlib
def test_set_check(identifier, test_ratio, hash):
return hash(np.int64(identifier)).digest()[-1] < 256 * test_ratio
def split_train_test_by_id(data, test_ratio, id_column, hash=hashlib.md5):
ids = data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio, hash))
return data.loc[~in_test_set], data.loc[in_test_set]
# 住宅価格データセットには、識別子の列がない。そのような場合、もっとも単純な方法は、行番号をIDにすること。
housing_with_id = housing.reset_index() # ID列の追加
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, 'index')
# この場合一意の識別子を作るためにもっと安定した方法は、区域の緯度経度を組み合わせる方法。
housing_with_id['id'] = housing['longitude'] * 1000 + housing['latitude']
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, 'id')
# scikit-learnには、さまざまな方法でデータセットを複数のサブセットに分割する関数がいくつもある。
# train_set_splitは、上記split_train_testとほぼ同じことに加え、乱数生成器の種を設定するrandom_state引数や、
# 複数のデータセットに同じ行番号を与え、同じインデックスでデータセットを分割する機能がある。
# これはラベルのために別個のDataFrameがある場合などにとても役立つ。
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
# これまでのものは、無作為なサンプリング方法。
# データセットが十分大規模かつ属性数と相対的な割合ならいいけど、
# そうでない場合、大きなサンプリングバイアスを持ち込む危険がある。
# 例えば、調査会社が1000人の人に電話をかけて質問をする時、電話帳で無作為に1000人の人々を拾い出すわけではない。
# 米国の人口は女性51.3%, 男性48.7%の比率なので、サンプルでも同じ火率にするため、
# 513人の女性と487人の男性に尋ねる
# これを層化抽出法(stractified sampling)という。
# 人口全体の層(stratum)と呼ばれる同種の下位集団に分割し、
# 各層から適切な数のインスタンスをサンプリング抽出し、テストセットが人口全体の代表になるようにする。
# 純粋に無作為なサンプリングを使うと、12%の確率で、女性が49%より少なく、54%よりも多い歪んだ検証セットをサンプリングしてしまう。
# 今回のケースでは、収入の中央値は、住宅価格の中央値を予測する上で非常に重要な属性。
# テストセットは、データセット全体のさまざまな収入カテゴリを代表するものにしたい
# 収入の中央値は連続的な数値属性なので、収入カテゴリという属性を作る必要がある。
# 収入の中央値のヒストグラムから、収入の中央値の大半は2~5万ドルの周辺に集まっているが、
# 一部の値は6万ドルを大きく越えている。
# データセットの各層に十分な数のインスタンスがあることが重要
# そうでなければ、層を重視することがバイアスになってしまう
# つまり、層の数が大きくなり過ぎないようにしながら、各層は十分に大きくなければならない
# 以下のコードは、収入の中央値を1.5で割り(収入カテゴリの数を減らすため)、
# ceilで端数を切り上げ(離散したカテゴリを作るため)て、
# 5以上のカテゴリをすべて5にまとめるという方法で収入カテゴリを作る。
housing['income_cat'] = np.ceil(housing['median_income'] / 1.5)
housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True)
housing['income_cat'].hist(bins=50, figsize=(8,6))
plt.show()
# これらの収入カテゴリに基づき、scikit-learnのStratified ShuffleSplitクラスで層化抽出をする
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
strat_test_set['income_cat'].value_counts() / len(strat_test_set)
# 同じようなコードを使ってデータセット全体の収入カテゴリの割合を調べることができる。
# 謎謎謎
```
| github_jupyter |
```
import numpy as np
import os
import sys
import xarray as xr
import scipy.io as sio
import matplotlib.pyplot as plt
import datetime
from dotenv import load_dotenv, find_dotenv
# find .env automagically by walking up directories until it's found
dotenv_path = find_dotenv()
load_dotenv(dotenv_path)
src_dir = os.environ.get('srcdir')
sys.path.append(src_dir)
# always reload modules marked with "%aimport"
%load_ext autoreload
%autoreload 1
%aimport features.bathy_smoothing
from features.resample_roms import resample
from features.bathy_smoothing import smoothing_PlusMinus_rx0,smoothing_PositiveVolume_rx0
from features.cartesian_grid_2d import haversine
#run = os.environ.get('run')
run ='waom10'
mr = 10 #km
smooth = False
deepen = False
#establish the grid with grid point distances of mr/2 in km
#we need double resolution to cover all of the staggered grid points (we subset to rho, psi, u, v points later)
#we need an extra line of u and v points at first to calculate all dx and dy on rho points
x,y = np.meshgrid(np.arange(-3000,3300+mr/2,mr/2),np.arange(-2700,2600+mr/2,mr/2))
#x,y = np.meshgrid(np.arange(-4300,4300+mr/2,mr/2),np.arange(-3700,3600+mr/2,mr/2))
#load south polar stereographic projection to convert from grid point distance in m to lat/lon and back
from mpl_toolkits.basemap import Basemap
m = Basemap(projection='spstere',lon_0=0,boundinglat=-50,lat_ts=-71)
#get lat/lon coordinates at all grid points by shifting the grid to the lower left corner of the map
lon,lat=m(x*1000+m.urcrnrx/2,y*1000+m.urcrnry/2,inverse=True)
#calculate curvilinear coordinate distances at rho points
dx = haversine(lon[1::2,0:-2:2],lat[1::2,0:-2:2],lon[1::2,2::2],lat[1::2,2::2])
dy = haversine(lon[0:-2:2,1::2],lat[0:-2:2,1::2],lon[2::2,1::2],lat[2::2,1::2])
#calculate curvilinear coordinate metrices
pm = 1.0/dx
pn = 1.0/dy
dndx = np.empty_like(pm)
dmde = np.empty_like(pn)
dndx[:,1:-1] = 0.5*(pn[:,2:] - pn[:,:-2])
dmde[1:-1,:] = 0.5*(pm[2:,:] - pm[:-2,:])
dndx[:,0] = 2*dndx[:,1] - dndx[:,2]
dndx[:,-1] = 2*dndx[:,-2] - dndx[:,-3]
dmde[0,:] = 2*dmde[1,:] - dmde[2,:]
dmde[-1,:] = 2*dmde[-2,:] - dmde[-3,:]
#subset lat and lon at rho, psi, u and v points
lon_rho = lon[1::2,1::2]
lat_rho = lat[1::2,1::2]
lon_psi = lon[2:-1:2,2:-1:2]
lat_psi = lat[2:-1:2,2:-1:2]
lon_u = lon[1::2,2:-1:2]
lat_u = lat[1::2,2:-1:2]
lon_v = lon[2:-1:2,1::2]
lat_v = lat[2:-1:2,1::2]
#load rtopo bed and ice topography and resample to rho points
rtopo_path = os.path.join(os.environ.get('extdir'),'rtopo','RTopo-2.0.1_30sec_*_S30.nc')
rtopo = xr.open_mfdataset(rtopo_path,data_vars='minimal')#.sel(latdim=np.arange(0,7501,50),londim=np.arange(0,43201,100))
rt_lon,rt_lat = np.meshgrid(rtopo.lon.values,rtopo.lat.values)
bed_raw = resample(rt_lon,rt_lat,lon_rho,lat_rho,rtopo.bedrock_topography.values)
ice_raw = resample(rt_lon,rt_lat,lon_rho,lat_rho,rtopo.ice_base_topography.values)
#make a copy of the raw bathymetry
bed = bed_raw.copy()
ice = ice_raw.copy()
#set bed minimum depth to 10 cm
bed[bed>-0.1]= -0.1
#set ice draft at these places to zero
ice[bed>0.1] = 0.0
#set ice mountains to zero
ice[ice>0]= 0.0
#set water column thickness to a small positive value (ROMS don't like when bed = ice draft)
wct = (-(bed-ice)).copy()
ice[wct==0] = bed[wct==0] + 0.1
#generate a land/ocean mask depending on water column thickness
#(distance between ice and bed or sea surface and bed)
#wct = (-(bed-ice)).copy()
mask = np.ones_like(wct)
mask[wct<20] = 0
#smooth=True
#deepen=True
if smooth:
#if smoothing is activated smooth wct and bed and set ice draft as bed + wct
mask = np.ones_like(wct)
mask[wct<=0.1] = 0
dA = 1.0/(pn*pm)
bed = -(smoothing_PositiveVolume_rx0(mask,-bed,0.8,dA))
wct = smoothing_PositiveVolume_rx0(mask,wct,0.8,dA)
ice = bed + wct
#update the minimum wct points as before
bed[bed>-0.1]= -0.1
ice[bed>0.1] = 0.0
ice[ice>0]= 0.0
wct = (-(bed-ice)).copy()
ice[wct==0] = bed[wct==0] + 0.1
#update the mask
wct = (-(bed-ice)).copy()
mask = np.ones_like(wct)
mask[wct<20] = 0
#if deepening is activated, deepen the bed to a minimum water column thickness of 50m
if deepen:
shallow = (wct<50)&(wct>=20)
bed[shallow] = ice[shallow]-50.0
#set spherical flag to 1, since we're creating a curvilinear spherical grid
spherical_da = xr.DataArray(int(1),name='spherical',attrs={'flag_meanings': 'Cartesian spherical',
'flag_values': np.array([0, 1], dtype=int),
'long_name': 'grid type logical switch'})
xl = mr*np.size(lat_rho,1)*1000
xl_da = xr.DataArray(xl,name='xl',attrs={'long_name': 'basin length in the XI-direction', 'units': 'meter'} )
el = mr*np.size(lon_rho,0)*1000
el_da = xr.DataArray(el,name='el',attrs={'long_name': 'basin length in the ETA-direction', 'units': 'meter'} )
angle = lon_rho/180.0*np.pi
angle_da = xr.DataArray(angle,name='angle',dims=['eta_rho','xi_rho'],attrs={'long_name': 'angle between XI-axis and EAST', 'units': 'radians'})
pn_da = xr.DataArray(pn,name="pn",dims=['eta_rho','xi_rho'],attrs={'long_name': 'curvilinear coordinate metric in ETA', 'units': 'meter-1'})
pm_da = xr.DataArray(pm,name='pm',dims=['eta_rho','xi_rho'],attrs={'long_name': 'curvilinear coordinate metric in XI', 'units': 'meter-1'})
dmde_da = xr.DataArray(dmde,name='dmde',dims=['eta_rho','xi_rho'],attrs={'long_name': 'ETA-derivative of inverse metric factor pm', 'units': 'meter'})
dndx_da = xr.DataArray(dndx,name='dndx',dims=['eta_rho','xi_rho'],attrs={'long_name': 'XI-derivative of inverse metric factor nm', 'units': 'meter'})
f = 2*7.29e-5*np.sin(lat_rho*np.pi/180)
f_da = xr.DataArray(f,name='f',dims=['eta_rho','xi_rho'],attrs={'long_name': 'Coriolis parameter at RHO-points', 'units': 'second-1'})
h_da = xr.DataArray(-bed,name='h',dims=['eta_rho','xi_rho'],attrs={'long_name': 'model bathymetry at RHO-points', 'units': 'meter'})
hraw_da = xr.DataArray(-bed_raw,name='hraw',dims=['eta_rho','xi_rho'],attrs={'long_name': 'Working bathymetry at RHO-points', 'units': 'meter'})
zice_da = xr.DataArray(ice,name='zice',dims=['eta_rho','xi_rho'],attrs={'long_name': 'model ice draft at RHO-points', 'units': 'meter'})
lon_rho_da = xr.DataArray(lon_rho,name='lon_rho',dims=['eta_rho','xi_rho'],attrs={'long_name': 'longitude of RHO-points',
'standard_name': 'longitude',
'units': 'degree_east'})
lat_rho_da = xr.DataArray(lat_rho,name='lat_rho',dims=['eta_rho','xi_rho'],attrs={'long_name': 'latitude of RHO-points',
'standard_name': 'latitude',
'units': 'degree_north'})
lon_psi_da = xr.DataArray(lon_psi,name='lon_psi',dims=['eta_psi','xi_psi'],attrs={'long_name': 'longitude of psi-points',
'standard_name': 'longitude',
'units': 'degree_east'})
lat_psi_da = xr.DataArray(lat_psi,name='lat_psi',dims=['eta_psi','xi_psi'],attrs={'long_name': 'latitude of psi-points',
'standard_name': 'latitude',
'units': 'degree_north'})
lon_u_da = xr.DataArray(lon_u,name='lon_u',dims=['eta_u','xi_u'],attrs={'long_name': 'longitude of u-points',
'standard_name': 'longitude',
'units': 'degree_east'})
lat_u_da = xr.DataArray(lat_u,name='lat_u',dims=['eta_u','xi_u'],attrs={'long_name': 'latitude of u-points',
'standard_name': 'latitude',
'units': 'degree_north'})
lon_v_da = xr.DataArray(lon_v,name='lon_v',dims=['eta_v','xi_v'],attrs={'long_name': 'longitude of v-points',
'standard_name': 'longitude',
'units': 'degree_east'})
lat_v_da = xr.DataArray(lat_v,name='lat_v',dims=['eta_v','xi_v'],attrs={'long_name': 'latitude of v-points',
'standard_name': 'latitude',
'units': 'degree_north'})
from features.mask_roms_uvp import uvp_masks
mask_rho = mask.copy()
mask_u,mask_v,mask_psi = uvp_masks(mask_rho)
mask_rho_da = xr.DataArray(mask_rho,name='mask_rho',dims=['eta_rho','xi_rho'],attrs={'flag_meanings': 'land water',
'flag_values': np.array([ 0., 1.]),
'long_name': 'mask on RHO-points'})
mask_psi_da = xr.DataArray(mask_psi,name='mask_psi',dims=['eta_psi','xi_psi'],attrs={'flag_meanings': 'land water',
'flag_values': np.array([ 0., 1.]),
'long_name': 'mask on psi-points'})
mask_u_da = xr.DataArray(mask_u,name='mask_u',dims=['eta_u','xi_u'],attrs={'flag_meanings': 'land water',
'flag_values': np.array([ 0., 1.]),
'long_name': 'mask on u-points'})
mask_v_da = xr.DataArray(mask_v,name='mask_v',dims=['eta_v','xi_v'],attrs={'flag_meanings': 'land water',
'flag_values': np.array([ 0., 1.]),
'long_name': 'mask on v-points'})
grd = xr.Dataset({'spherical':spherical_da,
'xl':xl_da,
'el':el_da,
'angle':angle_da,
'pm':pn_da,
'pn':pn_da,
'dndx':dndx_da,
'dmde':dmde_da,
'f':f_da,
'h':h_da,
'hraw':hraw_da,
'zice':zice_da,
'lon_rho':lon_rho_da,
'lat_rho':lat_rho_da,
'lon_psi':lon_psi_da,
'lat_psi':lat_psi_da,
'lon_u':lon_u_da,
'lat_u':lat_u_da,
'lon_v':lon_v_da,
'lat_v':lat_v_da,
'mask_rho':mask_rho_da,
'mask_psi':mask_psi_da,
'mask_u':mask_u_da,
'mask_v':mask_v_da,},
attrs={'history': 'GRID file using make_grid.py, smoothing='+str(smooth)+
', deepening='+str(deepen)+', '+str(datetime.date.today()),
'type': 'ROMS grid file'})
out_path = os.path.join(os.environ.get('intdir'),'waom'+str(mr)+'_grd_raw.nc')
#out_path = '~/raijin/short/m68/oxr581/waom10_test/waom10_grd_smooth.nc'
grd.to_netcdf(out_path,unlimited_dims='bath')
```
Below just left overs from development
| github_jupyter |
```
import matplotlib
matplotlib.use('Agg')
%matplotlib qt
import matplotlib.pyplot as plt
import numpy as np
import os
import SimpleITK as sitk
from os.path import expanduser, join
from scipy.spatial.distance import euclidean
os.chdir(join(expanduser('~'), 'Medical Imaging'))
import liversegmentation
```
---
# Read in DICOM images
```
sliceNum = 42
dicomPath = join(expanduser('~'), 'Documents', 'SlicerDICOMDatabase', 'TCIALocal', '0', 'images', '')
reader = sitk.ImageSeriesReader()
seriesIDread = reader.GetGDCMSeriesIDs(dicomPath)[1]
dicomFilenames = reader.GetGDCMSeriesFileNames(dicomPath, seriesIDread)
reader.SetFileNames(dicomFilenames)
imgSeries = reader.Execute()
imgSlice = imgSeries[:,:,sliceNum]
```
Note that the TCGA-BC-4073 patient has 2 series of images (series 9 & 10). The series IDs are:
```
reader.GetGDCMSeriesIDs(dicomPath)
```
By comparing images between OsiriX and plots of the SimpleITK images, the 2<sup>nd</sup> tuple element corresponds to series 9.
```
liversegmentation.sitk_show(imgSlice)
```
Cast original slice to unsigned 8-bit integer so that segmentations can be overlaid on top
```
imgSliceUInt8 = sitk.Cast(sitk.RescaleIntensity(imgSlice), sitk.sitkUInt8)
```
# Filtering
## Curvature anisotropic diffusion
```
anisoParams = (0.06, 9.0, 5)
imgFilter = liversegmentation.anisotropic_diffusion(imgSlice, *anisoParams)
liversegmentation.sitk_show(imgFilter)
```
## Median filter
```
med = sitk.MedianImageFilter()
med.SetRadius(3)
imgFilter = med.Execute(imgSlice)
liversegmentation.sitk_show(imgFilter)
```
# Edge potential
## Gradient magnitude recursive Gaussian
```
#sigma = 3.0
sigma = 1.0
imgGauss = liversegmentation.gradient_magnitude(imgFilter, sigma)
liversegmentation.sitk_show(imgGauss)
```
# Feature Image
## Sigmoid mapping
```
#K1, K2 = 20.0, 6.0
#K1, K2 = 14.0, 4.0
K1, K2 = 8.0, 2.0
imgSigmoid = liversegmentation.sigmoid_filter(imgGauss, K1, K2)
liversegmentation.sitk_show(imgSigmoid)
```
# Input level set
Create 2 lists, one to hold the seed coordinates and the other for the radii. The radius in the 1<sup>st</sup> index corresponds to the 1<sup>st</sup> index, and so on.
```
coords = [(118, 286), (135, 254), (202, 75), (169, 89), (145, 209), (142, 147), (252, 58), (205, 119)]
radii = [10, 10, 10, 10, 10, 10, 5, 5]
seed2radius = {tuple(reversed(p[0])): p[1] for p in zip(coords, radii)}
initImg = liversegmentation.input_level_set(imgSigmoid, seed2radius)
liversegmentation.sitk_show(initImg)
```
Creating new level set from segmentation of downsampled image.
First convert the segmentation result into a workable format:
```
binaryThresh = sitk.BinaryThresholdImageFilter()
binaryThresh.SetLowerThreshold(-2.3438)
binaryThresh.SetUpperThreshold(0.0)
binaryThresh.SetInsideValue(1)
binaryThresh.SetOutsideValue(0)
binaryImg = binaryThresh.Execute(imgGac2)
liversegmentation.sitk_show(binaryImg)
```
Add in new seeds:
```
coords2 = [(235, 108), (199, 188), (120, 113), (96, 140)]
radii2 = [5, 5, 5, 5]
seed2radius2 = {tuple(reversed(p[0])): p[1] for p in zip(coords2, radii2)}
```
Now create new level set image:
```
X_1 = sitk.GetArrayFromImage(binaryImg)
# create a 2nd seed matrix from the 2nd set of coordinates
setupImg = sitk.Image(imgSigmoid.GetSize()[0], imgSigmoid.GetSize()[1], sitk.sitkUInt8)
X_2 = sitk.GetArrayFromImage(setupImg)
for i in range(X_2.shape[0]):
for j in range(X_2.shape[1]):
for s in seed2radius2.keys():
if euclidean((i,j), s) <= seed2radius2[s]:
X_2[i,j] = 1
X = X_1.astype(bool) + X_2.astype(bool)
initImg2 = sitk.Cast(sitk.GetImageFromArray(X.astype(int)), imgSigmoid.GetPixelIDValue()) * -1 + 0.5
initImg2.SetSpacing(imgSigmoid.GetSpacing())
initImg2.SetOrigin(imgSigmoid.GetOrigin())
initImg2.SetDirection(imgSigmoid.GetDirection())
liversegmentation.sitk_show(initImg2)
```
Add in a 3<sup>rd</sup> set of seeds:
```
coords3 = [(225, 177), (246, 114), (83, 229), (78, 208), (82, 183), (238, 126)]
radii3 = [5, 10, 5, 5, 5, 15]
seed2radius3 = {tuple(reversed(p[0])): p[1] for p in zip(coords3, radii3)}
X_1 = sitk.GetArrayFromImage(binaryImg)
# create a 3rd seed matrix from the 3rd set of coordinates
setupImg = sitk.Image(imgSigmoid.GetSize()[0], imgSigmoid.GetSize()[1], sitk.sitkUInt8)
X_2 = sitk.GetArrayFromImage(setupImg)
for i in range(X_2.shape[0]):
for j in range(X_2.shape[1]):
for s in seed2radius3.keys():
if euclidean((i,j), s) <= seed2radius3[s]:
X_2[i,j] = 1
X = X_1.astype(bool) + X_2.astype(bool)
initImg3 = sitk.Cast(sitk.GetImageFromArray(X.astype(int)), imgSigmoid.GetPixelIDValue()) * -1 + 0.5
initImg3.SetSpacing(imgSigmoid.GetSpacing())
initImg3.SetOrigin(imgSigmoid.GetOrigin())
initImg3.SetDirection(imgSigmoid.GetDirection())
liversegmentation.sitk_show(initImg3)
```
# Segmentation
## Geodesic Active Contour
```
#gacParams = (1.0, 0.2, 4.5, 0.01, 250)
#gacParams = (1.0, 0.2, 4.5, 0.01, 200)
gacParams = (1.0, 0.2, 5.0, 0.01, 350)
imgGac3 = liversegmentation.geodesic_active_contour(initImg3, imgSigmoid, *gacParams)
liversegmentation.sitk_show(imgGac)
```
Display overlay of segmentation over original slice:
```
labelLowThresh = -2.3438
labelUpThresh = 0.0
binarySegImg3 = liversegmentation.binary_threshold(imgGac3, labelLowThresh, labelUpThresh)
liversegmentation.sitk_show(sitk.LabelOverlay(imgSliceUInt8, binarySegImg3, backgroundValue=255))
```
| github_jupyter |
# Plotting Lexical Dispersion - working with JSON reviews
```
import os
import pandas as pd
import json
```
## Data Munging
```
path = './data/690_webhose-2017-03_20170904112233'
good_review_folder = os.listdir(path)
good_reviews = []
for file in good_review_folder:
with open(path + '/' +file, 'r') as json_file:
data = json_file.readlines()
good_reviews.append(list(map(json.loads, data))[0])
print(len(good_reviews))
good_reviews[25]
keys = good_reviews[0].keys()
keys
for UON in good_reviews:
for key in UON.keys():
if key not in keys:
keys.append(key)
print('added ', key, ' to namespace')
else:
pass
columns = ['uuid', 'title', 'published', 'text']
review_df = pd.DataFrame.from_dict(good_reviews, orient = 'columns')
columns = ['uuid', 'title', 'published', 'text']
review_df[columns].head(3)
```
## Feature Engineering - True or False?
```
bed = "|".join(('pillow', 'bed', 'sheets', 'blankets', 'covers', 'comforter'))
desk = "|".join(('chair', 'desk', 'stationary', 'outlet', 'plug', 'plugs'))
room = "|".join(('carpet', 'wallpaper', 'paint', 'fridge', 'light', 'lights', 'curtain'))
bathroom = "|".join(('towels', 'bath', 'tub', 'shower', 'mirror', 'toilet', 'soap'))
elements = [bed, desk, room, bathroom]
for element in elements:
review_df[element[0:3]] = review_df['text'].str.contains(element)
review_df.rename(columns = {'pil': 'bed', 'cha': 'desk', 'car': 'room', 'tow': 'bathroom'}, inplace = True)
review_df.head(3)
print(review_df.bed.value_counts(), '\n')
print(review_df.desk.value_counts(), '\n')
print(review_df.room.value_counts(), '\n')
print(review_df.bathroom.value_counts(), '\n')
```
## Feature Engineering - List of Element Mentions
```
room_elements = ['pillow', 'bed', 'sheets', 'blankets', 'covers', 'comforter',
'chair', 'desk', 'stationary', 'outlet', 'plug', 'plugs',
'carpet', 'wallpaper', 'paint', 'fridge', 'light', 'lights', 'curtain',
'towels', 'bath', 'tub', 'shower', 'mirror', 'toilet', 'soap']
punctuation = ',?!.\/#@"><[]'
def room_list(x):
list_of_words = x.split(" ")
out_data = []
for word in list_of_words:
word = word.lower()
if word.strip(punctuation) in room_elements:
out_data.append(word.strip(punctuation))
return (str(out_data))
review_df['room_list'] = review_df.text.apply(room_list)
review_df.room_list.value_counts()
```
## Feature Engineering - Final Element Mentions
```
import string
def room_item(x):
list_of_words = x.split(" ")
for word in list_of_words:
word = word.lower()
if word.strip(punctuation) in room_elements:
return word.strip(punctuation)
else:
pass
review_df['room_item'] = review_df.text.apply(room_item)
review_df.room_item.value_counts()
review_df.room_item.fillna('none', inplace = True)
review_df[['uuid', 'published', 'url', 'language', 'text', 'title', 'bed', 'desk', 'room', 'bathroom', 'room_list', 'room_item']].to_csv('good_review_lexical_dispersion.csv', sep = ',')
review_df.columns
```
| github_jupyter |
```
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Lambda
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Conv1D, MaxPooling1D, LSTM, ConvLSTM2D, GRU, CuDNNLSTM, CuDNNGRU, BatchNormalization, LocallyConnected2D, Permute, TimeDistributed, Bidirectional
from keras.layers import Concatenate, Reshape, Softmax, Conv2DTranspose, Embedding, Multiply
from keras.callbacks import ModelCheckpoint, EarlyStopping, Callback
from keras import regularizers
from keras import backend as K
from keras.utils.generic_utils import Progbar
from keras.layers.merge import _Merge
import keras.losses
from functools import partial
from collections import defaultdict
import tensorflow as tf
from tensorflow.python.framework import ops
import isolearn.keras as iso
import numpy as np
import tensorflow as tf
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
import pandas as pd
import os
import pickle
import numpy as np
import scipy.sparse as sp
import scipy.io as spio
import matplotlib.pyplot as plt
import isolearn.io as isoio
import isolearn.keras as isol
from sequence_logo_helper import plot_dna_logo
import pandas as pd
from keras.backend.tensorflow_backend import set_session
def contain_tf_gpu_mem_usage() :
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess)
contain_tf_gpu_mem_usage()
class EpochVariableCallback(Callback) :
def __init__(self, my_variable, my_func) :
self.my_variable = my_variable
self.my_func = my_func
def on_epoch_begin(self, epoch, logs={}) :
K.set_value(self.my_variable, self.my_func(K.get_value(self.my_variable), epoch))
#Define dataset/experiment name
dataset_name = "apa_doubledope"
#Load cached dataframe
cached_dict = pickle.load(open('apa_doubledope_cached_set.pickle', 'rb'))
data_df = cached_dict['data_df']
print("len(data_df) = " + str(len(data_df)) + " (loaded)")
#Make generators
valid_set_size = 0.05
test_set_size = 0.05
batch_size = 32
#Generate training and test set indexes
data_index = np.arange(len(data_df), dtype=np.int)
train_index = data_index[:-int(len(data_df) * (valid_set_size + test_set_size))]
valid_index = data_index[train_index.shape[0]:-int(len(data_df) * test_set_size)]
test_index = data_index[train_index.shape[0] + valid_index.shape[0]:]
print('Training set size = ' + str(train_index.shape[0]))
print('Validation set size = ' + str(valid_index.shape[0]))
print('Test set size = ' + str(test_index.shape[0]))
data_gens = {
gen_id : iso.DataGenerator(
idx,
{'df' : data_df},
batch_size=batch_size,
inputs = [
{
'id' : 'seq',
'source_type' : 'dataframe',
'source' : 'df',
'extractor' : iso.SequenceExtractor('padded_seq', start_pos=180, end_pos=180 + 205),
'encoder' : iso.OneHotEncoder(seq_length=205),
'dim' : (1, 205, 4),
'sparsify' : False
}
],
outputs = [
{
'id' : 'hairpin',
'source_type' : 'dataframe',
'source' : 'df',
'extractor' : lambda row, index: row['proximal_usage'],
'transformer' : lambda t: t,
'dim' : (1,),
'sparsify' : False
}
],
randomizers = [],
shuffle = True if gen_id == 'train' else False
) for gen_id, idx in [('all', data_index), ('train', train_index), ('valid', valid_index), ('test', test_index)]
}
#Load data matrices
x_train = np.concatenate([data_gens['train'][i][0][0] for i in range(len(data_gens['train']))], axis=0)
x_test = np.concatenate([data_gens['test'][i][0][0] for i in range(len(data_gens['test']))], axis=0)
y_train = np.concatenate([data_gens['train'][i][1][0] for i in range(len(data_gens['train']))], axis=0)
y_test = np.concatenate([data_gens['test'][i][1][0] for i in range(len(data_gens['test']))], axis=0)
print("x_train.shape = " + str(x_train.shape))
print("x_test.shape = " + str(x_test.shape))
print("y_train.shape = " + str(y_train.shape))
print("y_test.shape = " + str(y_test.shape))
#Define sequence template (APA Doubledope sublibrary)
sequence_template = 'CTTCCGATCTNNNNNNNNNNNNNNNNNNNNCATTACTCGCATCCANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCAGCCAATTAAGCCNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCTAC'
sequence_mask = np.array([1 if sequence_template[j] == 'N' else 0 for j in range(len(sequence_template))])
#Visualize background sequence distribution
pseudo_count = 1.0
x_mean = (np.sum(x_train, axis=(0, 1)) + pseudo_count) / (x_train.shape[0] + 4. * pseudo_count)
x_mean_logits = np.log(x_mean / (1. - x_mean))
plot_dna_logo(np.copy(x_mean), sequence_template=sequence_template, figsize=(14, 0.65), logo_height=1.0, plot_start=0, plot_end=205)
#Calculate mean training set conservation
entropy = np.sum(x_mean * -np.log(x_mean), axis=-1) / np.log(2.0)
conservation = 2.0 - entropy
x_mean_conservation = np.sum(conservation) / np.sum(sequence_mask)
print("Mean conservation (bits) = " + str(x_mean_conservation))
#Calculate mean training set kl-divergence against background
x_train_clipped = np.clip(np.copy(x_train[:, 0, :, :]), 1e-8, 1. - 1e-8)
kl_divs = np.sum(x_train_clipped * np.log(x_train_clipped / np.tile(np.expand_dims(x_mean, axis=0), (x_train_clipped.shape[0], 1, 1))), axis=-1) / np.log(2.0)
x_mean_kl_divs = np.sum(kl_divs * sequence_mask, axis=-1) / np.sum(sequence_mask)
x_mean_kl_div = np.mean(x_mean_kl_divs)
print("Mean KL Div against background (bits) = " + str(x_mean_kl_div))
from tensorflow.python.framework import ops
#Stochastic Binarized Neuron helper functions (Tensorflow)
#ST Estimator code adopted from https://r2rt.com/beyond-binary-ternary-and-one-hot-neurons.html
#See Github https://github.com/spitis/
def st_sampled_softmax(logits):
with ops.name_scope("STSampledSoftmax") as namescope :
nt_probs = tf.nn.softmax(logits)
onehot_dim = logits.get_shape().as_list()[1]
sampled_onehot = tf.one_hot(tf.squeeze(tf.multinomial(logits, 1), 1), onehot_dim, 1.0, 0.0)
with tf.get_default_graph().gradient_override_map({'Ceil': 'Identity', 'Mul': 'STMul'}):
return tf.ceil(sampled_onehot * nt_probs)
def st_hardmax_softmax(logits):
with ops.name_scope("STHardmaxSoftmax") as namescope :
nt_probs = tf.nn.softmax(logits)
onehot_dim = logits.get_shape().as_list()[1]
sampled_onehot = tf.one_hot(tf.argmax(nt_probs, 1), onehot_dim, 1.0, 0.0)
with tf.get_default_graph().gradient_override_map({'Ceil': 'Identity', 'Mul': 'STMul'}):
return tf.ceil(sampled_onehot * nt_probs)
@ops.RegisterGradient("STMul")
def st_mul(op, grad):
return [grad, grad]
#Gumbel Distribution Sampler
def gumbel_softmax(logits, temperature=0.5) :
gumbel_dist = tf.contrib.distributions.RelaxedOneHotCategorical(temperature, logits=logits)
batch_dim = logits.get_shape().as_list()[0]
onehot_dim = logits.get_shape().as_list()[1]
return gumbel_dist.sample()
#PWM Masking and Sampling helper functions
def mask_pwm(inputs) :
pwm, onehot_template, onehot_mask = inputs
return pwm * onehot_mask + onehot_template
def sample_pwm_st(pwm_logits) :
n_sequences = K.shape(pwm_logits)[0]
seq_length = K.shape(pwm_logits)[2]
flat_pwm = K.reshape(pwm_logits, (n_sequences * seq_length, 4))
sampled_pwm = st_sampled_softmax(flat_pwm)
return K.reshape(sampled_pwm, (n_sequences, 1, seq_length, 4))
def sample_pwm_gumbel(pwm_logits) :
n_sequences = K.shape(pwm_logits)[0]
seq_length = K.shape(pwm_logits)[2]
flat_pwm = K.reshape(pwm_logits, (n_sequences * seq_length, 4))
sampled_pwm = gumbel_softmax(flat_pwm, temperature=0.5)
return K.reshape(sampled_pwm, (n_sequences, 1, seq_length, 4))
#Generator helper functions
def initialize_sequence_templates(generator, sequence_templates, background_matrices) :
embedding_templates = []
embedding_masks = []
embedding_backgrounds = []
for k in range(len(sequence_templates)) :
sequence_template = sequence_templates[k]
onehot_template = iso.OneHotEncoder(seq_length=len(sequence_template))(sequence_template).reshape((1, len(sequence_template), 4))
for j in range(len(sequence_template)) :
if sequence_template[j] not in ['N', 'X'] :
nt_ix = np.argmax(onehot_template[0, j, :])
onehot_template[:, j, :] = -4.0
onehot_template[:, j, nt_ix] = 10.0
elif sequence_template[j] == 'X' :
onehot_template[:, j, :] = -1.0
onehot_mask = np.zeros((1, len(sequence_template), 4))
for j in range(len(sequence_template)) :
if sequence_template[j] == 'N' :
onehot_mask[:, j, :] = 1.0
embedding_templates.append(onehot_template.reshape(1, -1))
embedding_masks.append(onehot_mask.reshape(1, -1))
embedding_backgrounds.append(background_matrices[k].reshape(1, -1))
embedding_templates = np.concatenate(embedding_templates, axis=0)
embedding_masks = np.concatenate(embedding_masks, axis=0)
embedding_backgrounds = np.concatenate(embedding_backgrounds, axis=0)
generator.get_layer('template_dense').set_weights([embedding_templates])
generator.get_layer('template_dense').trainable = False
generator.get_layer('mask_dense').set_weights([embedding_masks])
generator.get_layer('mask_dense').trainable = False
generator.get_layer('background_dense').set_weights([embedding_backgrounds])
generator.get_layer('background_dense').trainable = False
#Generator construction function
def build_sampler(batch_size, seq_length, n_classes=1, n_samples=1, sample_mode='st') :
#Initialize Reshape layer
reshape_layer = Reshape((1, seq_length, 4))
#Initialize background matrix
onehot_background_dense = Embedding(n_classes, seq_length * 4, embeddings_initializer='zeros', name='background_dense')
#Initialize template and mask matrices
onehot_template_dense = Embedding(n_classes, seq_length * 4, embeddings_initializer='zeros', name='template_dense')
onehot_mask_dense = Embedding(n_classes, seq_length * 4, embeddings_initializer='ones', name='mask_dense')
#Initialize Templating and Masking Lambda layer
masking_layer = Lambda(mask_pwm, output_shape = (1, seq_length, 4), name='masking_layer')
background_layer = Lambda(lambda x: x[0] + x[1], name='background_layer')
#Initialize PWM normalization layer
pwm_layer = Softmax(axis=-1, name='pwm')
#Initialize sampling layers
sample_func = None
if sample_mode == 'st' :
sample_func = sample_pwm_st
elif sample_mode == 'gumbel' :
sample_func = sample_pwm_gumbel
upsampling_layer = Lambda(lambda x: K.tile(x, [n_samples, 1, 1, 1]), name='upsampling_layer')
sampling_layer = Lambda(sample_func, name='pwm_sampler')
permute_layer = Lambda(lambda x: K.permute_dimensions(K.reshape(x, (n_samples, batch_size, 1, seq_length, 4)), (1, 0, 2, 3, 4)), name='permute_layer')
def _sampler_func(class_input, raw_logits) :
#Get Template and Mask
onehot_background = reshape_layer(onehot_background_dense(class_input))
onehot_template = reshape_layer(onehot_template_dense(class_input))
onehot_mask = reshape_layer(onehot_mask_dense(class_input))
#Add Template and Multiply Mask
pwm_logits = masking_layer([background_layer([raw_logits, onehot_background]), onehot_template, onehot_mask])
#Compute PWM (Nucleotide-wise Softmax)
pwm = pwm_layer(pwm_logits)
#Tile each PWM to sample from and create sample axis
pwm_logits_upsampled = upsampling_layer(pwm_logits)
sampled_pwm = sampling_layer(pwm_logits_upsampled)
sampled_pwm = permute_layer(sampled_pwm)
sampled_mask = permute_layer(upsampling_layer(onehot_mask))
return pwm_logits, pwm, sampled_pwm, onehot_mask, sampled_mask
return _sampler_func
#Scrambler network definition
def make_resblock(n_channels=64, window_size=8, dilation_rate=1, group_ix=0, layer_ix=0, drop_rate=0.0) :
#Initialize res block layers
batch_norm_0 = BatchNormalization(name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_batch_norm_0')
relu_0 = Lambda(lambda x: K.relu(x, alpha=0.0))
conv_0 = Conv2D(n_channels, (1, window_size), dilation_rate=dilation_rate, strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_conv_0')
batch_norm_1 = BatchNormalization(name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_batch_norm_1')
relu_1 = Lambda(lambda x: K.relu(x, alpha=0.0))
conv_1 = Conv2D(n_channels, (1, window_size), dilation_rate=dilation_rate, strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_conv_1')
skip_1 = Lambda(lambda x: x[0] + x[1], name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_skip_1')
drop_1 = None
if drop_rate > 0.0 :
drop_1 = Dropout(drop_rate)
#Execute res block
def _resblock_func(input_tensor) :
batch_norm_0_out = batch_norm_0(input_tensor)
relu_0_out = relu_0(batch_norm_0_out)
conv_0_out = conv_0(relu_0_out)
batch_norm_1_out = batch_norm_1(conv_0_out)
relu_1_out = relu_1(batch_norm_1_out)
if drop_rate > 0.0 :
conv_1_out = drop_1(conv_1(relu_1_out))
else :
conv_1_out = conv_1(relu_1_out)
skip_1_out = skip_1([conv_1_out, input_tensor])
return skip_1_out
return _resblock_func
def load_scrambler_network(n_groups=1, n_resblocks_per_group=4, n_channels=32, window_size=8, dilation_rates=[1], drop_rate=0.0) :
#Discriminator network definition
conv_0 = Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_conv_0')
skip_convs = []
resblock_groups = []
for group_ix in range(n_groups) :
skip_convs.append(Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_skip_conv_' + str(group_ix)))
resblocks = []
for layer_ix in range(n_resblocks_per_group) :
resblocks.append(make_resblock(n_channels=n_channels, window_size=window_size, dilation_rate=dilation_rates[group_ix], group_ix=group_ix, layer_ix=layer_ix, drop_rate=drop_rate))
resblock_groups.append(resblocks)
last_block_conv = Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_last_block_conv')
skip_add = Lambda(lambda x: x[0] + x[1], name='scrambler_skip_add')
final_conv = Conv2D(1, (1, 1), strides=(1, 1), padding='same', activation='softplus', kernel_initializer='glorot_normal', name='scrambler_final_conv')
onehot_to_logits = Lambda(lambda x: 2. * x - 1., name='scrambler_onehot_to_logits')
scale_logits = Lambda(lambda x: K.tile(x[0], (1, 1, 1, 4)) * x[1], name='scrambler_logit_scale')
def _scrambler_func(sequence_input) :
conv_0_out = conv_0(sequence_input)
#Connect group of res blocks
output_tensor = conv_0_out
#Res block group execution
skip_conv_outs = []
for group_ix in range(n_groups) :
skip_conv_out = skip_convs[group_ix](output_tensor)
skip_conv_outs.append(skip_conv_out)
for layer_ix in range(n_resblocks_per_group) :
output_tensor = resblock_groups[group_ix][layer_ix](output_tensor)
#Last res block extr conv
last_block_conv_out = last_block_conv(output_tensor)
skip_add_out = last_block_conv_out
for group_ix in range(n_groups) :
skip_add_out = skip_add([skip_add_out, skip_conv_outs[group_ix]])
#Final conv out
final_conv_out = final_conv(skip_add_out)
#Scale logits by importance scores
scaled_logits = scale_logits([final_conv_out, onehot_to_logits(sequence_input)])
return scaled_logits, final_conv_out
return _scrambler_func
#Keras loss functions
def get_sigmoid_nll() :
def _sigmoid_nll(y_true, y_pred) :
y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())
return K.mean(-y_true * K.log(y_pred) - (1.0 - y_true) * K.log(1.0 - y_pred), axis=-1)
return _sigmoid_nll
def get_kl_divergence() :
def _kl_divergence(y_true, y_pred) :
y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())
y_true = K.clip(y_true, K.epsilon(), 1.0 - K.epsilon())
left_mean_kl = K.mean(y_true * K.log(y_true / y_pred) + (1.0 - y_true) * K.log((1.0 - y_true) / (1.0 - y_pred)), axis=-1)
right_mean_kl = K.mean(y_pred * K.log(y_pred / y_true) + (1.0 - y_pred) * K.log((1.0 - y_pred) / (1.0 - y_true)), axis=-1)
return left_mean_kl + right_mean_kl
return _kl_divergence
def get_margin_entropy_ame_masked(pwm_start, pwm_end, pwm_background, max_bits=1.0) :
def _margin_entropy_ame_masked(pwm, pwm_mask) :
conservation = pwm[:, 0, pwm_start:pwm_end, :] * K.log(K.clip(pwm[:, 0, pwm_start:pwm_end, :], K.epsilon(), 1. - K.epsilon()) / K.constant(pwm_background[pwm_start:pwm_end, :])) / K.log(2.0)
conservation = K.sum(conservation, axis=-1)
mask = K.max(pwm_mask[:, 0, pwm_start:pwm_end, :], axis=-1)
n_unmasked = K.sum(mask, axis=-1)
mean_conservation = K.sum(conservation * mask, axis=-1) / n_unmasked
margin_conservation = K.switch(mean_conservation > K.constant(max_bits, shape=(1,)), mean_conservation - K.constant(max_bits, shape=(1,)), K.zeros_like(mean_conservation))
return margin_conservation
return _margin_entropy_ame_masked
def get_target_entropy_sme_masked(pwm_start, pwm_end, pwm_background, target_bits=1.0) :
def _target_entropy_sme_masked(pwm, pwm_mask) :
conservation = pwm[:, 0, pwm_start:pwm_end, :] * K.log(K.clip(pwm[:, 0, pwm_start:pwm_end, :], K.epsilon(), 1. - K.epsilon()) / K.constant(pwm_background[pwm_start:pwm_end, :])) / K.log(2.0)
conservation = K.sum(conservation, axis=-1)
mask = K.max(pwm_mask[:, 0, pwm_start:pwm_end, :], axis=-1)
n_unmasked = K.sum(mask, axis=-1)
mean_conservation = K.sum(conservation * mask, axis=-1) / n_unmasked
return (mean_conservation - target_bits)**2
return _target_entropy_sme_masked
def get_weighted_loss(loss_coeff=1.) :
def _min_pred(y_true, y_pred) :
return loss_coeff * y_pred
return _min_pred
#Initialize Encoder and Decoder networks
batch_size = 32
seq_length = 205
n_samples = 32
sample_mode = 'st'
#sample_mode = 'gumbel'
#Resnet parameters
resnet_n_groups = 1
resnet_n_resblocks_per_group = 4
resnet_n_channels = 32
resnet_window_size = 8
resnet_dilation_rates = [1]
resnet_drop_rate = 0.25
#Load scrambler
scrambler = load_scrambler_network(
n_groups=resnet_n_groups,
n_resblocks_per_group=resnet_n_resblocks_per_group,
n_channels=resnet_n_channels, window_size=resnet_window_size,
dilation_rates=resnet_dilation_rates,
drop_rate=resnet_drop_rate
)
#Load sampler
sampler = build_sampler(batch_size, seq_length, n_classes=1, n_samples=n_samples, sample_mode=sample_mode)
#Load Predictor
predictor_path = '../../../aparent/saved_models/aparent_plasmid_iso_cut_distalpas_all_libs_no_sampleweights_sgd.h5'
predictor = load_model(predictor_path)
predictor.trainable = False
predictor.compile(optimizer=keras.optimizers.SGD(lr=0.1), loss='mean_squared_error')
#Build scrambler model
scrambler_class = Input(shape=(1,), name='scrambler_class')
scrambler_input = Input(shape=(1, seq_length, 4), name='scrambler_input')
scrambled_logits, importance_scores = scrambler(scrambler_input)
pwm_logits, pwm, sampled_pwm, _, _ = sampler(scrambler_class, scrambled_logits)
scrambler_model = Model([scrambler_input, scrambler_class], [pwm_logits, pwm, sampled_pwm, importance_scores])
#Initialize Sequence Templates and Masks
initialize_sequence_templates(scrambler_model, [sequence_template], [x_mean_logits])
scrambler_model.compile(
optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999),
loss='mean_squared_error'
)
#Build Auto-scrambler pipeline
#Define model inputs
ae_scrambler_class = Input(shape=(1,), name='ae_scrambler_class')
ae_scrambler_input = Input(shape=(1, seq_length, 4), name='ae_scrambler_input')
#APARENT-specific tensors
aparent_lib = Input(shape=(13,), name='aparent_lib_input')
aparent_distal_pas = Input(shape=(1,), name='aparent_distal_pas_input')
#Run encoder and decoder
_, scrambled_pwm, scrambled_sample, pwm_mask, _ = sampler(ae_scrambler_class, scrambler(ae_scrambler_input)[0])
#Define layer to deflate sample axis
deflate_scrambled_sample = Lambda(lambda x: K.reshape(x, (batch_size * n_samples, 1, seq_length, 4)), name='deflate_scrambled_sample')
#Deflate sample axis
scrambled_sample_deflated = deflate_scrambled_sample(scrambled_sample)
def _make_prediction(inputs, predictor=predictor) :
pred_seq_in, pred_lib_in, pred_distal_pas_in = inputs
pred_seq_in_perm = K.expand_dims(pred_seq_in[:, 0, ...], axis=-1)
return predictor([pred_seq_in_perm, pred_lib_in, pred_distal_pas_in])[0]
def _make_prediction_scrambled(inputs, predictor=predictor, n_samples=n_samples) :
pred_seq_in, pred_lib_in, pred_distal_pas_in = inputs
pred_seq_in_perm = K.expand_dims(pred_seq_in[:, 0, ...], axis=-1)
return predictor([pred_seq_in_perm, K.tile(pred_lib_in, (n_samples, 1)), K.tile(pred_distal_pas_in, (n_samples, 1))])[0]
#Make reference prediction on non-scrambled input sequence
y_pred_non_scrambled = Lambda(_make_prediction, name='make_prediction_non_scrambled')([ae_scrambler_input, aparent_lib, aparent_distal_pas])
#Make prediction on scrambled sequence samples
y_pred_scrambled_deflated = Lambda(_make_prediction_scrambled, name='make_prediction_scrambled')([scrambled_sample_deflated, aparent_lib, aparent_distal_pas])
#Define layer to inflate sample axis
inflate_scrambled_prediction = Lambda(lambda x: K.reshape(x, (batch_size, n_samples)), name='inflate_scrambled_prediction')
#Inflate sample axis
y_pred_scrambled = inflate_scrambled_prediction(y_pred_scrambled_deflated)
#Cost function parameters
pwm_start = 10
pwm_end = 201
target_bits = 0.5
#NLL cost
nll_loss_func = get_kl_divergence()
#Conservation cost
conservation_loss_func = get_target_entropy_sme_masked(pwm_start=pwm_start, pwm_end=pwm_end, pwm_background=x_mean, target_bits=1.8)
#Entropy cost
entropy_loss_func = get_target_entropy_sme_masked(pwm_start=pwm_start, pwm_end=pwm_end, pwm_background=x_mean, target_bits=target_bits)
#entropy_loss_func = get_margin_entropy_ame_masked(pwm_start=pwm_start, pwm_end=pwm_end, pwm_background=x_mean, max_bits=target_bits)
#Define annealing coefficient
anneal_coeff = K.variable(1.0)
#Execute NLL cost
nll_loss = Lambda(lambda x: nll_loss_func(K.tile(x[0], (1, K.shape(x[1])[1])), x[1]), name='nll')([y_pred_non_scrambled, y_pred_scrambled])
#Execute conservation cost
conservation_loss = Lambda(lambda x: anneal_coeff * conservation_loss_func(x[0], x[1]), name='conservation')([scrambled_pwm, pwm_mask])
#Execute entropy cost
entropy_loss = Lambda(lambda x: (1. - anneal_coeff) * entropy_loss_func(x[0], x[1]), name='entropy')([scrambled_pwm, pwm_mask])
loss_model = Model(
[ae_scrambler_class, ae_scrambler_input, aparent_lib, aparent_distal_pas],
[nll_loss, conservation_loss, entropy_loss]
)
#Initialize Sequence Templates and Masks
initialize_sequence_templates(loss_model, [sequence_template], [x_mean_logits])
loss_model.compile(
optimizer=keras.optimizers.Adam(lr=0.0001, beta_1=0.5, beta_2=0.9),
loss={
'nll' : get_weighted_loss(loss_coeff=1.0),
'conservation' : get_weighted_loss(loss_coeff=1.0),
'entropy' : get_weighted_loss(loss_coeff=1.0)
}
)
scrambler_model.summary()
loss_model.summary()
#Training configuration
#Define number of training epochs
n_epochs = 50
#Define experiment suffix (optional)
experiment_suffix = ""
#Define anneal function
def _anneal_func(val, epoch, n_epochs=n_epochs) :
if epoch in [0] :
return 1.0
return 0.0
architecture_str = "resnet_" + str(resnet_n_groups) + "_" + str(resnet_n_resblocks_per_group) + "_" + str(resnet_n_channels) + "_" + str(resnet_window_size) + "_" + str(resnet_drop_rate).replace(".", "")
model_name = "autoscrambler_dataset_" + dataset_name + "_sample_mode_" + sample_mode + "_n_samples_" + str(n_samples) + "_" + architecture_str + "_n_epochs_" + str(n_epochs) + "_target_bits_" + str(target_bits).replace(".", "") + experiment_suffix
print("Model save name = " + model_name)
#Execute training procedure
callbacks =[
#ModelCheckpoint("model_checkpoints/" + model_name + "_epoch_{epoch:02d}.hdf5", monitor='val_loss', mode='min', period=10, save_weights_only=True),
EpochVariableCallback(anneal_coeff, _anneal_func)
]
s_train = np.zeros((x_train.shape[0], 1))
s_test = np.zeros((x_test.shape[0], 1))
aparent_l_train = np.zeros((x_train.shape[0], 13))
aparent_l_train[:, 4] = 1.
aparent_l_test = np.zeros((x_test.shape[0], 13))
aparent_l_test[:, 4] = 1.
aparent_d_train = np.ones((x_train.shape[0], 1))
aparent_d_test = np.ones((x_test.shape[0], 1))
# train the autoencoder
train_history = loss_model.fit(
[s_train, x_train, aparent_l_train, aparent_d_train],
[s_train, s_train, s_train],
shuffle=True,
epochs=n_epochs,
batch_size=batch_size,
validation_data=(
[s_test, x_test, aparent_l_test, aparent_d_test],
[s_test, s_test, s_test]
),
callbacks=callbacks
)
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(3 * 4, 3))
n_epochs_actual = len(train_history.history['nll_loss'])
ax1.plot(np.arange(1, n_epochs_actual + 1), train_history.history['nll_loss'], linewidth=3, color='green')
ax1.plot(np.arange(1, n_epochs_actual + 1), train_history.history['val_nll_loss'], linewidth=3, color='orange')
plt.sca(ax1)
plt.xlabel("Epochs", fontsize=14)
plt.ylabel("NLL", fontsize=14)
plt.xlim(1, n_epochs_actual)
plt.xticks([1, n_epochs_actual], [1, n_epochs_actual], fontsize=12)
plt.yticks(fontsize=12)
ax2.plot(np.arange(1, n_epochs_actual + 1), train_history.history['entropy_loss'], linewidth=3, color='green')
ax2.plot(np.arange(1, n_epochs_actual + 1), train_history.history['val_entropy_loss'], linewidth=3, color='orange')
plt.sca(ax2)
plt.xlabel("Epochs", fontsize=14)
plt.ylabel("Entropy Loss", fontsize=14)
plt.xlim(1, n_epochs_actual)
plt.xticks([1, n_epochs_actual], [1, n_epochs_actual], fontsize=12)
plt.yticks(fontsize=12)
ax3.plot(np.arange(1, n_epochs_actual + 1), train_history.history['conservation_loss'], linewidth=3, color='green')
ax3.plot(np.arange(1, n_epochs_actual + 1), train_history.history['val_conservation_loss'], linewidth=3, color='orange')
plt.sca(ax3)
plt.xlabel("Epochs", fontsize=14)
plt.ylabel("Conservation Loss", fontsize=14)
plt.xlim(1, n_epochs_actual)
plt.xticks([1, n_epochs_actual], [1, n_epochs_actual], fontsize=12)
plt.yticks(fontsize=12)
plt.tight_layout()
plt.show()
# Save model and weights
save_dir = 'saved_models'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name + '.h5')
scrambler_model.save(model_path)
print('Saved scrambler model at %s ' % (model_path))
#Load models
save_dir = 'saved_models'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name + '.h5')
scrambler_model = load_model(model_path, custom_objects={
'st_sampled_softmax' : st_sampled_softmax
})
print('Loaded scrambler model %s ' % (model_path))
#Load models
save_dir = 'saved_models'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name + '.h5')
scrambler_model.load_weights(model_path, by_name=True)
print('Loaded scrambler model %s ' % (model_path))
#Visualize a few reconstructed sequence patterns
sequence_template = 'CTTCCGATCTNNNNNNNNNNNNNNNNNNNNCATTACTCGCATCCANNNNNNNNNNNNNNNNNNNNNNNNNANTAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCAGCCAATTAAGCCNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCTAC'
save_examples = [2, 3, 4, 5, 6, 7]
s_test = np.zeros((x_test.shape[0], 1))
aparent_l_test = np.zeros((x_test.shape[0], 13))
aparent_l_test[:, 4] = 1.
aparent_d_test = np.ones((x_test.shape[0], 1))
_, pwm_test, sample_test, _ = scrambler_model.predict_on_batch(x=[x_test[:32], s_test[:32]])
for plot_i in range(0, 10) :
print("Test sequence " + str(plot_i) + ":")
y_test_hat_ref = predictor.predict(x=[np.expand_dims(np.expand_dims(x_test[plot_i, 0, :, :], axis=0), axis=-1), aparent_l_test[:1], aparent_d_test[:1]], batch_size=1)[0][0, 0]
y_test_hat = predictor.predict(x=[np.expand_dims(sample_test[plot_i, :, 0, :, :], axis=-1), aparent_l_test[:32], aparent_d_test[:32]], batch_size=32)[0][:10, 0].tolist()
print(" - Prediction (original) = " + str(round(y_test_hat_ref, 2))[:4])
print(" - Predictions (scrambled) = " + str([float(str(round(y_test_hat[i], 2))[:4]) for i in range(len(y_test_hat))]))
save_figs = False
if save_examples is not None and plot_i in save_examples :
save_figs = True
plot_dna_logo(x_test[plot_i, 0, :, :], sequence_template=sequence_template, figsize=(14, 0.65), plot_start=0, plot_end=205, plot_sequence_template=True, save_figs=save_figs, fig_name=model_name + "_test_ix_" + str(plot_i))
plot_dna_logo(pwm_test[plot_i, 0, :, :], sequence_template=sequence_template, figsize=(14, 0.65), plot_start=0, plot_end=205, plot_sequence_template=True, save_figs=save_figs, fig_name=model_name + "_test_ix_" + str(plot_i))
#Visualize a few reconstructed images
s_test = np.zeros((x_test.shape[0], 1))
_, pwm_test, sample_test, importance_scores_test = scrambler_model.predict(x=[x_test, s_test], batch_size=32, verbose=True)
#Save predicted importance scores
np.save(model_name + "_importance_scores_test", importance_scores_test)
#Calculate original and scrambled predictions
aparent_l_test = np.zeros((x_test.shape[0], 13))
aparent_l_test[:, 4] = 1.
aparent_d_test = np.ones((x_test.shape[0], 1))
y_test_hats = []
y_test_hats_scrambled = []
for i in range(x_test.shape[0]) :
y_test_hat_ref = predictor.predict(x=[np.expand_dims(np.expand_dims(x_test[i, 0, :, :], axis=0), axis=-1), aparent_l_test[:1], aparent_d_test[:1]], batch_size=1)[0][0, 0]
y_test_hat = np.mean(predictor.predict(x=[np.expand_dims(sample_test[i, :, 0, :, :], axis=-1), aparent_l_test[:32], aparent_d_test[:32]], batch_size=32)[0][:, 0])
y_test_hats.append(y_test_hat_ref)
y_test_hats_scrambled.append(y_test_hat)
y_test_hat = np.array(y_test_hats)
y_test_hat_scrambled = np.array(y_test_hats_scrambled)
from scipy.stats import pearsonr
save_figs = True
r_val, _ = pearsonr(y_test_hat, y_test_hat_scrambled)
left_kl_divs = y_test_hat * np.log(y_test_hat / y_test_hat_scrambled) + (1. - y_test_hat) * np.log((1. - y_test_hat) / (1. - y_test_hat_scrambled))
right_kl_divs = y_test_hat_scrambled * np.log(y_test_hat_scrambled / y_test_hat) + (1. - y_test_hat_scrambled) * np.log((1. - y_test_hat_scrambled) / (1. - y_test_hat))
mean_kl_div = np.mean(left_kl_divs + right_kl_divs)
f = plt.figure(figsize=(4, 4))
plt.scatter(y_test_hat, y_test_hat_scrambled, color='black', s=5, alpha=0.25)
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], [0.0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=14)
plt.yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], [0.0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=14)
plt.xlabel("Original Prediction", fontsize=14)
plt.ylabel("Scrambled Prediction", fontsize=14)
plt.title("R^2 = " + str(round(r_val**2, 2)) + ", KL = " + str(round(mean_kl_div, 2)), fontsize=14)
plt.tight_layout()
if save_figs :
plt.savefig(model_name + "_test_scatter.png", transparent=True, dpi=300)
plt.savefig(model_name + "_test_scatter.eps")
plt.show()
```
| github_jupyter |
```
import os
import json
import pickle
import random
from collections import defaultdict, Counter
from indra.literature.adeft_tools import universal_extract_text
from indra.databases.hgnc_client import get_hgnc_name, get_hgnc_id
from adeft.discover import AdeftMiner
from adeft.gui import ground_with_gui
from adeft.modeling.label import AdeftLabeler
from adeft.modeling.classify import AdeftClassifier
from adeft.disambiguate import AdeftDisambiguator, load_disambiguator
from adeft_indra.ground.ground import AdeftGrounder
from adeft_indra.model_building.s3 import model_to_s3
from adeft_indra.model_building.escape import escape_filename
from adeft_indra.db.content import get_pmids_for_agent_text, get_pmids_for_entity, \
get_plaintexts_for_pmids
adeft_grounder = AdeftGrounder()
shortforms = ['BP']
model_name = ':'.join(sorted(escape_filename(shortform) for shortform in shortforms))
results_path = os.path.abspath(os.path.join('../..', 'results', model_name))
miners = dict()
all_texts = {}
for shortform in shortforms:
pmids = get_pmids_for_agent_text(shortform)
text_dict = get_plaintexts_for_pmids(pmids, contains=shortforms)
text_dict = {pmid: text for pmid, text in text_dict.items() if len(text) > 5}
miners[shortform] = AdeftMiner(shortform)
miners[shortform].process_texts(text_dict.values())
all_texts.update(text_dict)
longform_dict = {}
for shortform in shortforms:
longforms = miners[shortform].get_longforms()
longforms = [(longform, count, score) for longform, count, score in longforms
if count*score > 2]
longform_dict[shortform] = longforms
combined_longforms = Counter()
for longform_rows in longform_dict.values():
combined_longforms.update({longform: count for longform, count, score
in longform_rows})
grounding_map = {}
names = {}
for longform in combined_longforms:
groundings = adeft_grounder.ground(longform)
if groundings:
grounding = groundings[0]['grounding']
grounding_map[longform] = grounding
names[grounding] = groundings[0]['name']
longforms, counts = zip(*combined_longforms.most_common())
pos_labels = []
list(zip(longforms, counts))
try:
disamb = load_disambiguator(shortforms[0])
for shortform, gm in disamb.grounding_dict.items():
for longform, grounding in gm.items():
grounding_map[longform] = grounding
for grounding, name in disamb.names.items():
names[grounding] = name
pos_labels = disamb.pos_labels
except Exception:
pass
names
grounding_map, names, pos_labels = ground_with_gui(longforms, counts,
grounding_map=grounding_map,
names=names, pos_labels=pos_labels, no_browser=True, port=8891)
result = [grounding_map, names, pos_labels]
result
grounding_map, names, pos_labels = [{'b phymatum stm815': 'ungrounded',
'back pain': 'MESH:D001416',
'back propagation': 'ungrounded',
'backpropagation': 'ungrounded',
'bacterial pneumonia': 'MESH:D018410',
'bacterial production': 'ungrounded',
'bacterioplankton production': 'ungrounded',
'ball peptide': 'ungrounded',
'banana peels': 'ungrounded',
'band pass': 'ungrounded',
'barometric pressure': 'ungrounded',
'basal progenitor': 'ungrounded',
'basic protein': 'ungrounded',
'basilar papilla': 'ungrounded',
'beat period': 'ungrounded',
'bee pollen': 'ungrounded',
'beet pulp': 'ungrounded',
'bell s palsy': 'MESH:D020330',
'bench press': 'ungrounded',
'benzo a pyrene': 'CHEBI:CHEBI:29865',
'benzo alpha pyrene': 'CHEBI:CHEBI:29865',
'benzo α pyrene': 'CHEBI:CHEBI:29865',
'benzophenone': 'ungrounded',
'benzopyrene': 'ungrounded',
'benzoyl peroxide': 'ungrounded',
'benzphetamine': 'CHEBI:CHEBI:3044',
'benzpyrene': 'CHEBI:CHEBI:29865',
'benzylpenicillin': 'CHEBI:CHEBI:18208',
'bereitschaftspotential': 'ungrounded',
'biliopancreatic': 'ungrounded',
'binding potential': 'ungrounded',
'binding protein': 'ungrounded',
'biodegradable polymer': 'ungrounded',
'biological process': 'ungrounded',
'biopharmaceutical': 'ungrounded',
'biopolymer': 'CHEBI:CHEBI:33694',
'biopterin': 'CHEBI:CHEBI:15373',
'biparental': 'ungrounded',
'biphenyl': 'CHEBI:CHEBI:17097',
'bipolar': 'MESH:D001714',
'bipolar disorder': 'MESH:D001714',
'bipolar patients': 'MESH:D001714',
'bipyridyl': 'CHEBI:CHEBI:35545',
'birch pollen': 'ungrounded',
'bisection point': 'ungrounded',
'bisphenol': 'CHEBI:CHEBI:22901',
'bisphosphonate': 'MESH:D004164',
'black phosphorus': 'ungrounded',
'blast phase': 'MESH:D001752',
'blastic phase': 'ungrounded',
'blocking peptide': 'ungrounded',
'blood perfusion': 'MESH:D010477',
'blood plasma': 'MESH:D010949',
'blood pressure': 'MESH:D001794',
'blueberry juice and probiotics': 'MESH:D019936',
'bodily pain': 'MESH:D010146',
'bodipy ® 665 676': 'ungrounded',
'body project': 'ungrounded',
'bovine pericardium': 'MESH:D010496',
'bowenoid papulosis': 'ungrounded',
'boysenberry polyphenol': 'CHEBI:CHEBI:26195',
'brachial plexus': 'ungrounded',
'branch point': 'ungrounded',
'branchpoint': 'ungrounded',
'brazilian propolis': 'MESH:D011429',
'breaking point': 'ungrounded',
'breakpoint': 'ungrounded',
'brevipedicellus': 'UP:P46639',
'bromopyruvate': 'MESH:C017092',
'bronchopneumonia': 'MESH:D001996',
'bruce peninsula': 'ungrounded',
'buckypaper': 'ungrounded',
'bullous pemphigoid': 'MESH:D010391',
'bupropion': 'CHEBI:CHEBI:3219',
'burkholderia pseudomallei': 'MESH:D016957',
'butyl paraben': 'CHEBI:CHEBI:85122',
'butylparaben': 'CHEBI:CHEBI:88542',
'butylphenol': 'ungrounded'},
{'MESH:D001416': 'Back Pain',
'MESH:D018410': 'Pneumonia, Bacterial',
'MESH:D020330': 'Bell Palsy',
'CHEBI:CHEBI:29865': 'benzo[a]pyrene',
'CHEBI:CHEBI:3044': 'benzphetamine',
'CHEBI:CHEBI:18208': 'benzylpenicillin',
'CHEBI:CHEBI:33694': 'biomacromolecule',
'CHEBI:CHEBI:15373': 'biopterin',
'CHEBI:CHEBI:17097': 'biphenyl',
'MESH:D001714': 'Bipolar Disorder',
'CHEBI:CHEBI:35545': 'bipyridine',
'CHEBI:CHEBI:22901': 'bisphenol',
'MESH:D004164': 'Diphosphonates',
'MESH:D001752': 'Blast Crisis',
'MESH:D010477': 'Perfusion',
'MESH:D010949': 'Plasma',
'MESH:D001794': 'Blood Pressure',
'MESH:D019936': 'Probiotics',
'MESH:D010146': 'Pain',
'MESH:D010496': 'Pericardium',
'CHEBI:CHEBI:26195': 'polyphenol',
'MESH:D011429': 'Propolis',
'UP:P46639': 'KNAT1',
'MESH:C017092': 'bromopyruvate',
'MESH:D001996': 'Bronchopneumonia',
'MESH:D010391': 'Pemphigoid, Bullous',
'CHEBI:CHEBI:3219': 'bupropion',
'MESH:D016957': 'Burkholderia pseudomallei',
'CHEBI:CHEBI:85122': 'paraben',
'CHEBI:CHEBI:88542': 'Butylparaben'},
['CHEBI:CHEBI:29865', 'MESH:D001794', 'MESH:D004164']]
excluded_longforms = ['bp']
grounding_dict = {shortform: {longform: grounding_map[longform]
for longform, _, _ in longforms if longform in grounding_map
and longform not in excluded_longforms}
for shortform, longforms in longform_dict.items()}
result = [grounding_dict, names, pos_labels]
if not os.path.exists(results_path):
os.mkdir(results_path)
with open(os.path.join(results_path, f'{model_name}_preliminary_grounding_info.json'), 'w') as f:
json.dump(result, f)
additional_entities = {}
unambiguous_agent_texts = {}
labeler = AdeftLabeler(grounding_dict)
corpus = labeler.build_from_texts((text, pmid) for pmid, text in all_texts.items())
agent_text_pmid_map = defaultdict(list)
for text, label, id_ in corpus:
agent_text_pmid_map[label].append(id_)
entity_pmid_map = {entity: set(get_pmids_for_entity(*entity.split(':', maxsplit=1),
major_topic=True))for entity in additional_entities}
intersection1 = []
for entity1, pmids1 in entity_pmid_map.items():
for entity2, pmids2 in entity_pmid_map.items():
intersection1.append((entity1, entity2, len(pmids1 & pmids2)))
intersection2 = []
for entity1, pmids1 in agent_text_pmid_map.items():
for entity2, pmids2 in entity_pmid_map.items():
intersection2.append((entity1, entity2, len(set(pmids1) & pmids2)))
intersection1
intersection2
all_used_pmids = set()
for entity, agent_texts in unambiguous_agent_texts.items():
used_pmids = set()
for agent_text in agent_texts[1]:
pmids = set(get_pmids_for_agent_text(agent_text))
new_pmids = list(pmids - all_texts.keys() - used_pmids)
text_dict = get_plaintexts_for_pmids(new_pmids, contains=agent_texts)
corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items() if len(text) >= 5])
used_pmids.update(new_pmids)
all_used_pmids.update(used_pmids)
for entity, pmids in entity_pmid_map.items():
new_pmids = list(set(pmids) - all_texts.keys() - all_used_pmids)
if len(new_pmids) > 10000:
new_pmids = random.choices(new_pmids, k=10000)
_, contains = additional_entities[entity]
text_dict = get_plaintexts_for_pmids(new_pmids, contains=contains)
corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items() if len(text) >= 5])
names.update({key: value[0] for key, value in additional_entities.items()})
names.update({key: value[0] for key, value in unambiguous_agent_texts.items()})
pos_labels = list(set(pos_labels) | additional_entities.keys() |
unambiguous_agent_texts.keys())
%%capture
classifier = AdeftClassifier(shortforms, pos_labels=pos_labels, random_state=1729)
param_grid = {'C': [100.0], 'max_features': [10000]}
texts, labels, pmids = zip(*corpus)
classifier.cv(texts, labels, param_grid, cv=5, n_jobs=5)
classifier.stats
disamb = AdeftDisambiguator(classifier, grounding_dict, names)
disamb.dump(model_name, results_path)
print(disamb.info())
model_to_s3(disamb)
preds = [disamb.disambiguate(text) for text in all_texts.values()]
texts = [text for pred, text in zip(preds, all_texts.values()) if pred[0] == 'HGNC:10967']
texts[3]
```
| github_jupyter |
# Import the Fashion MNIST dataset
This guide uses the Fashion MNIST dataset, which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 × 28 pixels), as seen here:
Fashion MNIST sprite
Figure 1. Fashion-MNIST samples (by Zalando, MIT License).
<table>
<tr><td>
<img src="https://tensorflow.org/images/fashion-mnist-sprite.png"
alt="Fashion MNIST sprite" width="600">
</td></tr>
<tr><td align="center">
<b>Figure 1.</b> <a href="https://github.com/zalandoresearch/fashion-mnist">Fashion-MNIST samples</a> (by Zalando, MIT License).<br/>
</td></tr>
</table>
Fashion MNIST is intended as a drop-in replacement for the classic MNIST dataset—often used as the "Hello, World" of machine learning programs for computer vision. The MNIST dataset contains images of handwritten digits (0, 1, 2, etc) in an identical format to the articles of clothing we'll use here.
This guide uses Fashion MNIST for variety, and because it's a slightly more challenging problem than regular MNIST. Both datasets are relatively small and are used to verify that an algorithm works as expected. They're good starting points to test and debug code.
We will use 60,000 images to train the network and 10,000 images to evaluate how accurately the network learned to classify images. You can access the Fashion MNIST directly from TensorFlow, using the Datasets API:
# Load the Libraries
```
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
%matplotlib inline
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
```
# Load and Explore Dataset
```
train_df = pd.read_csv('../input/fashionmnist/fashion-mnist_train.csv')
test_df = pd.read_csv('../input/fashionmnist/fashion-mnist_test.csv')
print('The shape of training dataset : ', train_df.shape)
print('The shape of testing dataset : ', test_df.shape)
train_df.head(5)
```
**Now we start with converting the pixel values into array format**
```
train = np.array(train_df, dtype = 'float32')
test = np.array(test_df, dtype = 'float32')
x_train = train[:,1:]/255
y_train = train[:,0]
x_test= test[:,1:]/255
y_test=test[:,0]
```
**Let us split the training and test datasets**
```
X_train, X_validate,y_train, y_validate = train_test_split(x_train, y_train, test_size = 0.2, random_state = 5000)
print('The size of training data after model selection : ', X_train.shape, y_train.shape)
print('The size of Validation data after model selection : ', X_validate.shape, y_validate.shape)
```
# Data Visualization
```
class_names = ['T_shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure(figsize=(17, 17))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.grid(False)
plt.imshow(x_train[i].reshape((28,28)))
plt.colorbar()
label_index = int(y_train[i])
plt.title(class_names[label_index])
plt.show()
```
# Model Development
**Before we start with model layout let's do some basic things**
```
#Assigning image dimensions for model
img_rows = 28
img_cols = 28
img_shape = (img_rows, img_cols,1)
X_train = X_train.reshape(X_train.shape[0],*img_shape)
x_test = x_test.reshape(x_test.shape[0],*img_shape)
X_validate = X_validate.reshape(X_validate.shape[0],*img_shape)
```
# Simple layered Neural Network
**Without Dropout**
```
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape = img_shape),
tf.keras.layers.Dense(512, activation = 'relu'),
tf.keras.layers.Dropout(0.8),
tf.keras.layers.Dense(10, activation = 'softmax') #since we want a probability based output
])
model.compile(optimizer = 'adam',
loss = tf.keras.losses.SparseCategoricalCrossentropy(),
metrics = ['accuracy'])
history = model.fit(X_train, y_train, epochs = 30, verbose=2, validation_data=(X_validate, y_validate))
```
**Lets check the plots**
```
plt.figure(figsize=(17,17))
plt.subplot(2, 2, 1)
plt.plot(history.history['loss'], label='Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.legend()
plt.title('Training - Loss Function')
plt.subplot(2, 2, 2)
plt.plot(history.history['accuracy'], label='Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.legend()
plt.title('Train - Accuracy')
```
**With Dropout**
```
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape = img_shape),
tf.keras.layers.Dense(512, activation = 'relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation = 'softmax') #since we want a probability based output
])
```
**Description of model is shown below**
```
model.summary()
model.compile(optimizer = 'adam',
loss = tf.keras.losses.SparseCategoricalCrossentropy(),
metrics = ['accuracy'])
history = model.fit(X_train, y_train, epochs = 30, verbose=2, validation_data=(X_validate, y_validate))
```
**Plots after using dropout**
```
plt.figure(figsize=(17,17))
plt.subplot(2, 2, 1)
plt.plot(history.history['loss'], label='Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.legend()
plt.title('Training - Loss Function')
plt.subplot(2, 2, 2)
plt.plot(history.history['accuracy'], label='Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.legend()
plt.title('Train - Accuracy')
```
# CNN Model
```
cnn_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (3,3), padding='same', activation=tf.nn.relu,
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D((2, 2), strides=2),
tf.keras.layers.Conv2D(64, (3,3), padding='same', activation=tf.nn.relu),
tf.keras.layers.MaxPooling2D((2, 2), strides=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
cnn_model.summary()
cnn_model.compile(optimizer = 'adam',
loss = tf.keras.losses.SparseCategoricalCrossentropy(),
metrics = ['accuracy'])
history = cnn_model.fit(X_train, y_train, epochs = 30, verbose=2, validation_data=(X_validate, y_validate))
plt.figure(figsize=(17,17))
plt.subplot(2, 2, 1)
plt.plot(history.history['loss'], label='Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.legend()
plt.title('Training - Loss Function')
plt.subplot(2, 2, 2)
plt.plot(history.history['accuracy'], label='Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.legend()
plt.title('Train - Accuracy')
```
# Discussion
Though the accuracy obtained on validation set using CNN was good this model still needs improvement and will be updating it in next versions!!
Do upvote my Kernel if you liked it. It is my first implementation code on CNN
| github_jupyter |
# Project 3: Implement SLAM
---
## Project Overview
In this project, you'll implement SLAM for robot that moves and senses in a 2 dimensional, grid world!
SLAM gives us a way to both localize a robot and build up a map of its environment as a robot moves and senses in real-time. This is an active area of research in the fields of robotics and autonomous systems. Since this localization and map-building relies on the visual sensing of landmarks, this is a computer vision problem.
Using what you've learned about robot motion, representations of uncertainty in motion and sensing, and localization techniques, you will be tasked with defining a function, `slam`, which takes in six parameters as input and returns the vector `mu`.
> `mu` contains the (x,y) coordinate locations of the robot as it moves, and the positions of landmarks that it senses in the world
You can implement helper functions as you see fit, but your function must return `mu`. The vector, `mu`, should have (x, y) coordinates interlaced, for example, if there were 2 poses and 2 landmarks, `mu` will look like the following, where `P` is the robot position and `L` the landmark position:
```
mu = matrix([[Px0],
[Py0],
[Px1],
[Py1],
[Lx0],
[Ly0],
[Lx1],
[Ly1]])
```
You can see that `mu` holds the poses first `(x0, y0), (x1, y1), ...,` then the landmark locations at the end of the matrix; we consider a `nx1` matrix to be a vector.
## Generating an environment
In a real SLAM problem, you may be given a map that contains information about landmark locations, and in this example, we will make our own data using the `make_data` function, which generates a world grid with landmarks in it and then generates data by placing a robot in that world and moving and sensing over some numer of time steps. The `make_data` function relies on a correct implementation of robot move/sense functions, which, at this point, should be complete and in the `robot_class.py` file. The data is collected as an instantiated robot moves and senses in a world. Your SLAM function will take in this data as input. So, let's first create this data and explore how it represents the movement and sensor measurements that our robot takes.
---
## Create the world
Use the code below to generate a world of a specified size with randomly generated landmark locations. You can change these parameters and see how your implementation of SLAM responds!
`data` holds the sensors measurements and motion of your robot over time. It stores the measurements as `data[i][0]` and the motion as `data[i][1]`.
#### Helper functions
You will be working with the `robot` class that may look familiar from the first notebook,
In fact, in the `helpers.py` file, you can read the details of how data is made with the `make_data` function. It should look very similar to the robot move/sense cycle you've seen in the first notebook.
```
import numpy as np
from helpers import make_data
# your implementation of slam should work with the following inputs
# feel free to change these input values and see how it responds!
# world parameters
num_landmarks = 5 # number of landmarks
N = 20 # time steps
world_size = 100.0 # size of world (square)
# robot parameters
measurement_range = 50.0 # range at which we can sense landmarks
motion_noise = 2.0 # noise in robot motion
measurement_noise = 2.0 # noise in the measurements
distance = 20.0 # distance by which robot (intends to) move each iteratation
# make_data instantiates a robot, AND generates random landmarks for a given world size and number of landmarks
data = make_data(N, num_landmarks, world_size, measurement_range, motion_noise, measurement_noise, distance)
```
### A note on `make_data`
The function above, `make_data`, takes in so many world and robot motion/sensor parameters because it is responsible for:
1. Instantiating a robot (using the robot class)
2. Creating a grid world with landmarks in it
**This function also prints out the true location of landmarks and the *final* robot location, which you should refer back to when you test your implementation of SLAM.**
The `data` this returns is an array that holds information about **robot sensor measurements** and **robot motion** `(dx, dy)` that is collected over a number of time steps, `N`. You will have to use *only* these readings about motion and measurements to track a robot over time and find the determine the location of the landmarks using SLAM. We only print out the true landmark locations for comparison, later.
In `data` the measurement and motion data can be accessed from the first and second index in the columns of the data array. See the following code for an example, where `i` is the time step:
```
measurement = data[i][0]
motion = data[i][1]
```
```
# print out some stats about the data
time_step = 0
print('Example measurements: \n', data[time_step][0])
print('\n')
print('Example motion: \n', data[time_step][1])
```
Try changing the value of `time_step`, you should see that the list of measurements varies based on what in the world the robot sees after it moves. As you know from the first notebook, the robot can only sense so far and with a certain amount of accuracy in the measure of distance between its location and the location of landmarks. The motion of the robot always is a vector with two values: one for x and one for y displacement. This structure will be useful to keep in mind as you traverse this data in your implementation of slam.
## Initialize Constraints
One of the most challenging tasks here will be to create and modify the constraint matrix and vector: omega and xi. In the second notebook, you saw an example of how omega and xi could hold all the values the define the relationships between robot poses `xi` and landmark positions `Li` in a 1D world, as seen below, where omega is the blue matrix and xi is the pink vector.
<img src='images/motion_constraint.png' width=50% height=50% />
In *this* project, you are tasked with implementing constraints for a 2D world. We are referring to robot poses as `Px, Py` and landmark positions as `Lx, Ly`, and one way to approach this challenge is to add *both* x and y locations in the constraint matrices.
<img src='images/constraints2D.png' width=50% height=50% />
You may also choose to create two of each omega and xi (one for x and one for y positions).
### TODO: Write a function that initializes omega and xi
Complete the function `initialize_constraints` so that it returns `omega` and `xi` constraints for the starting position of the robot. Any values that we do not yet know should be initialized with the value `0`. You may assume that our robot starts out in exactly the middle of the world with 100% confidence (no motion or measurement noise at this point). The inputs `N` time steps, `num_landmarks`, and `world_size` should give you all the information you need to construct intial constraints of the correct size and starting values.
*Depending on your approach you may choose to return one omega and one xi that hold all (x,y) positions *or* two of each (one for x values and one for y); choose whichever makes most sense to you!*
```
def initialize_constraints(N, num_landmarks, world_size):
''' This function takes in a number of time steps N, number of landmarks, and a world_size,
and returns initialized constraint matrices, omega and xi.'''
## Recommended: Define and store the size (rows/cols) of the constraint matrix in a variable
omega_size = (N+num_landmarks)*2
## TODO: Define the constraint matrix, Omega, with two initial "strength" values
## for the initial x, y location of our robot
omega = np.zeros((omega_size, omega_size))
omega[0,0] = 1.
omega[1,1] = 1.
## TODO: Define the constraint *vector*, xi
## you can assume that the robot starts out in the middle of the world with 100% confidence
xi = np.zeros((omega_size, 1))
xi[0,0] = world_size/2
xi[1,0] = world_size/2
return omega, xi
```
### Test as you go
It's good practice to test out your code, as you go. Since `slam` relies on creating and updating constraint matrices, `omega` and `xi` to account for robot sensor measurements and motion, let's check that they initialize as expected for any given parameters.
Below, you'll find some test code that allows you to visualize the results of your function `initialize_constraints`. We are using the [seaborn](https://seaborn.pydata.org/) library for visualization.
**Please change the test values of N, landmarks, and world_size and see the results**. Be careful not to use these values as input into your final smal function.
This code assumes that you have created one of each constraint: `omega` and `xi`, but you can change and add to this code, accordingly. The constraints should vary in size with the number of time steps and landmarks as these values affect the number of poses a robot will take `(Px0,Py0,...Pxn,Pyn)` and landmark locations `(Lx0,Ly0,...Lxn,Lyn)` whose relationships should be tracked in the constraint matrices. Recall that `omega` holds the weights of each variable and `xi` holds the value of the sum of these variables, as seen in Notebook 2. You'll need the `world_size` to determine the starting pose of the robot in the world and fill in the initial values for `xi`.
```
# import data viz resources
import matplotlib.pyplot as plt
from pandas import DataFrame
import seaborn as sns
%matplotlib inline
# define a small N and world_size (small for ease of visualization)
N_test = 5
num_landmarks_test = 2
small_world = 10
# initialize the constraints
initial_omega, initial_xi = initialize_constraints(N_test, num_landmarks_test, small_world)
# define figure size
plt.rcParams["figure.figsize"] = (10,7)
# display omega
sns.heatmap(DataFrame(initial_omega), cmap='Blues', annot=True, linewidths=.5)
# define figure size
plt.rcParams["figure.figsize"] = (1,7)
# display xi
sns.heatmap(DataFrame(initial_xi), cmap='Oranges', annot=True, linewidths=.5)
```
---
## SLAM inputs
In addition to `data`, your slam function will also take in:
* N - The number of time steps that a robot will be moving and sensing
* num_landmarks - The number of landmarks in the world
* world_size - The size (w/h) of your world
* motion_noise - The noise associated with motion; the update confidence for motion should be `1.0/motion_noise`
* measurement_noise - The noise associated with measurement/sensing; the update weight for measurement should be `1.0/measurement_noise`
#### A note on noise
Recall that `omega` holds the relative "strengths" or weights for each position variable, and you can update these weights by accessing the correct index in omega `omega[row][col]` and *adding/subtracting* `1.0/noise` where `noise` is measurement or motion noise. `Xi` holds actual position values, and so to update `xi` you'll do a similar addition process only using the actual value of a motion or measurement. So for a vector index `xi[row][0]` you will end up adding/subtracting one measurement or motion divided by their respective `noise`.
### TODO: Implement Graph SLAM
Follow the TODO's below to help you complete this slam implementation (these TODO's are in the recommended order), then test out your implementation!
#### Updating with motion and measurements
With a 2D omega and xi structure as shown above (in earlier cells), you'll have to be mindful about how you update the values in these constraint matrices to account for motion and measurement constraints in the x and y directions. Recall that the solution to these matrices (which holds all values for robot poses `P` and landmark locations `L`) is the vector, `mu`, which can be computed at the end of the construction of omega and xi as the inverse of omega times xi: $\mu = \Omega^{-1}\xi$
**You may also choose to return the values of `omega` and `xi` if you want to visualize their final state!**
```
## TODO: Complete the code to implement SLAM
## slam takes in 6 arguments and returns mu,
## mu is the entire path traversed by a robot (all x,y poses) *and* all landmarks locations
def slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise):
## TODO: Use your initilization to create constraint matrices, omega and xi
omega, xi = initialize_constraints(N, num_landmarks, world_size)
## TODO: Iterate through each time step in the data
## get all the motion and measurement data as you iterate
## TODO: update the constraint matrix/vector to account for all *measurements*
## this should be a series of additions that take into account the measurement noise
for r, x in enumerate(data):
# robot row/column index
robot_x = r * 2 # 0,2,4,6
robot_y = r * 2 + 1 # 1,3,5,7
# extract data
measurments = x[0]
motions = x[1]
# measurments row/column index
for m in measurments: # for every landmark robot saw
landmark_x = 2*(N + m[0]) # skip x and y for motions and add index of measurment
landmark_y = 2*(N + m[0]) + 1 # i.e. if N=3 in omega indexes 0-5 are for motions
# so 2*(3 + 0) (first landmark) = 6th and 7th index for
# measurments to landmark no. 0
# update omega
# x diagonal positive
omega[robot_x][robot_x] += 1./measurement_noise
omega[landmark_x][landmark_x] += 1./measurement_noise
# x off-diagonal negative
omega[robot_x][landmark_x] -= 1./measurement_noise
omega[landmark_x][robot_x] -= 1./measurement_noise
# y diagonal positive
omega[robot_y][robot_y] += 1./measurement_noise
omega[landmark_y][landmark_y] += 1./measurement_noise
# y off-diagonal negative
omega[robot_y][landmark_y] -= 1./measurement_noise
omega[landmark_y][robot_y] -= 1./measurement_noise
# update xi
# x
xi[robot_x][0] -= m[1]/measurement_noise
xi[landmark_x][0] += m[1]/measurement_noise
# y
xi[robot_y][0] -= m[2]/measurement_noise
xi[landmark_y][0] += m[2]/measurement_noise
## TODO: update the constraint matrix/vector to account for all *motion* and motion noise
# update omega
# x diagonal positive
omega[robot_x][robot_x] += 1./motion_noise
omega[robot_x+2][robot_x+2] += 1./motion_noise
# x off-diagonal negative
omega[robot_x][robot_x+2] -= 1./motion_noise
omega[robot_x+2][robot_x] -= 1./motion_noise
# y diagonal positive
omega[robot_y][robot_y] += 1./motion_noise
omega[robot_y+2][robot_y+2] += 1./motion_noise
# y off-diagonal negative
omega[robot_y][robot_y+2] -= 1./motion_noise
omega[robot_y+2][robot_y] -= 1./motion_noise
# update xi
# x
xi[robot_x][0] -= motions[0]/motion_noise
xi[robot_x+2][0] += motions[0]/motion_noise
# y
xi[robot_y][0] -= motions[1]/motion_noise
xi[robot_y+2][0] += motions[1]/motion_noise
## TODO: After iterating through all the data
## Compute the best estimate of poses and landmark positions
## using the formula, omega_inverse * Xi
mu = np.dot(np.linalg.inv(omega), xi)
return mu # return `mu`
```
## Helper functions
To check that your implementation of SLAM works for various inputs, we have provided two helper functions that will help display the estimated pose and landmark locations that your function has produced. First, given a result `mu` and number of time steps, `N`, we define a function that extracts the poses and landmarks locations and returns those as their own, separate lists.
Then, we define a function that nicely print out these lists; both of these we will call, in the next step.
```
# a helper function that creates a list of poses and of landmarks for ease of printing
# this only works for the suggested constraint architecture of interlaced x,y poses
def get_poses_landmarks(mu, N):
# create a list of poses
poses = []
for i in range(N):
poses.append((mu[2*i].item(), mu[2*i+1].item()))
# create a list of landmarks
landmarks = []
for i in range(num_landmarks):
landmarks.append((mu[2*(N+i)].item(), mu[2*(N+i)+1].item()))
# return completed lists
return poses, landmarks
def print_all(poses, landmarks):
print('\n')
print('Estimated Poses:')
for i in range(len(poses)):
print('['+', '.join('%.3f'%p for p in poses[i])+']')
print('\n')
print('Estimated Landmarks:')
for i in range(len(landmarks)):
print('['+', '.join('%.3f'%l for l in landmarks[i])+']')
```
## Run SLAM
Once you've completed your implementation of `slam`, see what `mu` it returns for different world sizes and different landmarks!
### What to Expect
The `data` that is generated is random, but you did specify the number, `N`, or time steps that the robot was expected to move and the `num_landmarks` in the world (which your implementation of `slam` should see and estimate a position for. Your robot should also start with an estimated pose in the very center of your square world, whose size is defined by `world_size`.
With these values in mind, you should expect to see a result that displays two lists:
1. **Estimated poses**, a list of (x, y) pairs that is exactly `N` in length since this is how many motions your robot has taken. The very first pose should be the center of your world, i.e. `[50.000, 50.000]` for a world that is 100.0 in square size.
2. **Estimated landmarks**, a list of landmark positions (x, y) that is exactly `num_landmarks` in length.
#### Landmark Locations
If you refer back to the printout of *exact* landmark locations when this data was created, you should see values that are very similar to those coordinates, but not quite (since `slam` must account for noise in motion and measurement).
```
# call your implementation of slam, passing in the necessary parameters
mu = slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise)
# print out the resulting landmarks and poses
if(mu is not None):
# get the lists of poses and landmarks
# and print them out
poses, landmarks = get_poses_landmarks(mu, N)
print_all(poses, landmarks)
```
## Visualize the constructed world
Finally, using the `display_world` code from the `helpers.py` file (which was also used in the first notebook), we can actually visualize what you have coded with `slam`: the final position of the robot and the positon of landmarks, created from only motion and measurement data!
**Note that these should be very similar to the printed *true* landmark locations and final pose from our call to `make_data` early in this notebook.**
```
# import the helper function
from helpers import display_world
# Display the final world!
# define figure size
plt.rcParams["figure.figsize"] = (20,20)
# check if poses has been created
if 'poses' in locals():
# print out the last pose
print('Last pose: ', poses[-1])
# display the last position of the robot *and* the landmark positions
display_world(int(world_size), poses[-1], landmarks)
```
### Question: How far away is your final pose (as estimated by `slam`) compared to the *true* final pose? Why do you think these poses are different?
You can find the true value of the final pose in one of the first cells where `make_data` was called. You may also want to look at the true landmark locations and compare them to those that were estimated by `slam`. Ask yourself: what do you think would happen if we moved and sensed more (increased N)? Or if we had lower/higher noise parameters.
**Answer**: It is very close dx = 0.3 and dy = 0.25. This poses are different due to uncertainty in motion and sensing (noise). If we had lower noise parameters the differences would be even smaller and vice versa - higher noise - larger differences. Sensing and moving for more time should also give us more exact results.
## Testing
To confirm that your slam code works before submitting your project, it is suggested that you run it on some test data and cases. A few such cases have been provided for you, in the cells below. When you are ready, uncomment the test cases in the next cells (there are two test cases, total); your output should be **close-to or exactly** identical to the given results. If there are minor discrepancies it could be a matter of floating point accuracy or in the calculation of the inverse matrix.
### Submit your project
If you pass these tests, it is a good indication that your project will pass all the specifications in the project rubric. Follow the submission instructions to officially submit!
```
# Here is the data and estimated outputs for test case 1
test_data1 = [[[[1, 19.457599255548065, 23.8387362100849], [2, -13.195807561967236, 11.708840328458608], [3, -30.0954905279171, 15.387879242505843]], [-12.2607279422326, -15.801093326936487]], [[[2, -0.4659930049620491, 28.088559771215664], [4, -17.866382374890936, -16.384904503932]], [-12.2607279422326, -15.801093326936487]], [[[4, -6.202512900833806, -1.823403210274639]], [-12.2607279422326, -15.801093326936487]], [[[4, 7.412136480918645, 15.388585962142429]], [14.008259661173426, 14.274756084260822]], [[[4, -7.526138813444998, -0.4563942429717849]], [14.008259661173426, 14.274756084260822]], [[[2, -6.299793150150058, 29.047830407717623], [4, -21.93551130411791, -13.21956810989039]], [14.008259661173426, 14.274756084260822]], [[[1, 15.796300959032276, 30.65769689694247], [2, -18.64370821983482, 17.380022987031367]], [14.008259661173426, 14.274756084260822]], [[[1, 0.40311325410337906, 14.169429532679855], [2, -35.069349468466235, 2.4945558982439957]], [14.008259661173426, 14.274756084260822]], [[[1, -16.71340983241936, -2.777000269543834]], [-11.006096015782283, 16.699276945166858]], [[[1, -3.611096830835776, -17.954019226763958]], [-19.693482634035977, 3.488085684573048]], [[[1, 18.398273354362416, -22.705102332550947]], [-19.693482634035977, 3.488085684573048]], [[[2, 2.789312482883833, -39.73720193121324]], [12.849049222879723, -15.326510824972983]], [[[1, 21.26897046581808, -10.121029799040915], [2, -11.917698965880655, -23.17711662602097], [3, -31.81167947898398, -16.7985673023331]], [12.849049222879723, -15.326510824972983]], [[[1, 10.48157743234859, 5.692957082575485], [2, -22.31488473554935, -5.389184118551409], [3, -40.81803984305378, -2.4703329790238118]], [12.849049222879723, -15.326510824972983]], [[[0, 10.591050242096598, -39.2051798967113], [1, -3.5675572049297553, 22.849456408289125], [2, -38.39251065320351, 7.288990306029511]], [12.849049222879723, -15.326510824972983]], [[[0, -3.6225556479370766, -25.58006865235512]], [-7.8874682868419965, -18.379005523261092]], [[[0, 1.9784503557879374, -6.5025974151499]], [-7.8874682868419965, -18.379005523261092]], [[[0, 10.050665232782423, 11.026385307998742]], [-17.82919359778298, 9.062000642947142]], [[[0, 26.526838150174818, -0.22563393232425621], [4, -33.70303936886652, 2.880339841013677]], [-17.82919359778298, 9.062000642947142]]]
# # Test Case 1
# #
# Estimated Pose(s):
# [50.000, 50.000]
# [37.858, 33.921]
# [25.905, 18.268]
# [13.524, 2.224]
# [27.912, 16.886]
# [42.250, 30.994]
# [55.992, 44.886]
# [70.749, 59.867]
# [85.371, 75.230]
# [73.831, 92.354]
# [53.406, 96.465]
# [34.370, 100.134]
# [48.346, 83.952]
# [60.494, 68.338]
# [73.648, 53.082]
# [86.733, 38.197]
# [79.983, 20.324]
# [72.515, 2.837]
# [54.993, 13.221]
# [37.164, 22.283]
# Estimated Landmarks:
# [82.679, 13.435]
# [70.417, 74.203]
# [36.688, 61.431]
# [18.705, 66.136]
# [20.437, 16.983]
## Uncomment the following three lines for test case 1 and compare the output to the values above ###
mu_1 = slam(test_data1, 20, 5, 100.0, 2.0, 2.0)
poses, landmarks = get_poses_landmarks(mu_1, 20)
print_all(poses, landmarks)
# Here is the data and estimated outputs for test case 2
test_data2 = [[[[0, 26.543274387283322, -6.262538160312672], [3, 9.937396825799755, -9.128540360867689]], [18.92765331253674, -6.460955043986683]], [[[0, 7.706544739722961, -3.758467215445748], [1, 17.03954411948937, 31.705489938553438], [3, -11.61731288777497, -6.64964096716416]], [18.92765331253674, -6.460955043986683]], [[[0, -12.35130507136378, 2.585119104239249], [1, -2.563534536165313, 38.22159657838369], [3, -26.961236804740935, -0.4802312626141525]], [-11.167066095509824, 16.592065417497455]], [[[0, 1.4138633151721272, -13.912454837810632], [1, 8.087721200818589, 20.51845934354381], [3, -17.091723454402302, -16.521500551709707], [4, -7.414211721400232, 38.09191602674439]], [-11.167066095509824, 16.592065417497455]], [[[0, 12.886743222179561, -28.703968411636318], [1, 21.660953298391387, 3.4912891084614914], [3, -6.401401414569506, -32.321583037341625], [4, 5.034079343639034, 23.102207946092893]], [-11.167066095509824, 16.592065417497455]], [[[1, 31.126317672358578, -10.036784369535214], [2, -38.70878528420893, 7.4987265861424595], [4, 17.977218575473767, 6.150889254289742]], [-6.595520680493778, -18.88118393939265]], [[[1, 41.82460922922086, 7.847527392202475], [3, 15.711709540417502, -30.34633659912818]], [-6.595520680493778, -18.88118393939265]], [[[0, 40.18454208294434, -6.710999804403755], [3, 23.019508919299156, -10.12110867290604]], [-6.595520680493778, -18.88118393939265]], [[[3, 27.18579315312821, 8.067219022708391]], [-6.595520680493778, -18.88118393939265]], [[], [11.492663265706092, 16.36822198838621]], [[[3, 24.57154567653098, 13.461499960708197]], [11.492663265706092, 16.36822198838621]], [[[0, 31.61945290413707, 0.4272295085799329], [3, 16.97392299158991, -5.274596836133088]], [11.492663265706092, 16.36822198838621]], [[[0, 22.407381798735177, -18.03500068379259], [1, 29.642444125196995, 17.3794951934614], [3, 4.7969752441371645, -21.07505361639969], [4, 14.726069092569372, 32.75999422300078]], [11.492663265706092, 16.36822198838621]], [[[0, 10.705527984670137, -34.589764174299596], [1, 18.58772336795603, -0.20109708164787765], [3, -4.839806195049413, -39.92208742305105], [4, 4.18824810165454, 14.146847823548889]], [11.492663265706092, 16.36822198838621]], [[[1, 5.878492140223764, -19.955352450942357], [4, -7.059505455306587, -0.9740849280550585]], [19.628527845173146, 3.83678180657467]], [[[1, -11.150789592446378, -22.736641053247872], [4, -28.832815721158255, -3.9462962046291388]], [-19.841703647091965, 2.5113335861604362]], [[[1, 8.64427397916182, -20.286336970889053], [4, -5.036917727942285, -6.311739993868336]], [-5.946642674882207, -19.09548221169787]], [[[0, 7.151866679283043, -39.56103232616369], [1, 16.01535401373368, -3.780995345194027], [4, -3.04801331832137, 13.697362774960865]], [-5.946642674882207, -19.09548221169787]], [[[0, 12.872879480504395, -19.707592098123207], [1, 22.236710716903136, 16.331770792606406], [3, -4.841206109583004, -21.24604435851242], [4, 4.27111163223552, 32.25309748614184]], [-5.946642674882207, -19.09548221169787]]]
## Test Case 2
##
# Estimated Pose(s):
# [50.000, 50.000]
# [69.035, 45.061]
# [87.655, 38.971]
# [76.084, 55.541]
# [64.283, 71.684]
# [52.396, 87.887]
# [44.674, 68.948]
# [37.532, 49.680]
# [31.392, 30.893]
# [24.796, 12.012]
# [33.641, 26.440]
# [43.858, 43.560]
# [54.735, 60.659]
# [65.884, 77.791]
# [77.413, 94.554]
# [96.740, 98.020]
# [76.149, 99.586]
# [70.211, 80.580]
# [64.130, 61.270]
# [58.183, 42.175]
# Estimated Landmarks:
# [76.777, 42.415]
# [85.109, 76.850]
# [13.687, 95.386]
# [59.488, 39.149]
# [69.283, 93.654]
## Uncomment the following three lines for test case 2 and compare to the values above ###
mu_2 = slam(test_data2, 20, 5, 100.0, 2.0, 2.0)
poses, landmarks = get_poses_landmarks(mu_2, 20)
print_all(poses, landmarks)
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# Automated Machine Learning
_**Classification of credit card fraudulent transactions with local run **_
## Contents
1. [Introduction](#Introduction)
1. [Setup](#Setup)
1. [Train](#Train)
1. [Results](#Results)
1. [Test](#Tests)
1. [Explanation](#Explanation)
1. [Acknowledgements](#Acknowledgements)
## Introduction
In this example we use the associated credit card dataset to showcase how you can use AutoML for a simple classification problem. The goal is to predict if a credit card transaction is considered a fraudulent charge.
This notebook is using the local machine compute to train the model.
If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace.
In this notebook you will learn how to:
1. Create an experiment using an existing workspace.
2. Configure AutoML using `AutoMLConfig`.
3. Train the model.
4. Explore the results.
5. Test the fitted model.
6. Explore any model's explanation and explore feature importance in azure portal.
7. Create an AKS cluster, deploy the webservice of AutoML scoring model and the explainer model to the AKS and consume the web service.
## Setup
As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
```
import logging
from matplotlib import pyplot as plt
import pandas as pd
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.core.dataset import Dataset
from azureml.train.automl import AutoMLConfig
from azureml.interpret import ExplanationClient
```
This sample notebook may use features that are not available in previous versions of the Azure ML SDK.
```
print("This notebook was created using version 1.20.0 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
ws = Workspace.from_config()
# choose a name for experiment
experiment_name = 'automl-classification-ccard-local'
experiment=Experiment(ws, experiment_name)
output = {}
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Experiment Name'] = experiment.name
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
```
### Load Data
Load the credit card dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model.
```
data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv"
dataset = Dataset.Tabular.from_delimited_files(data)
training_data, validation_data = dataset.random_split(percentage=0.8, seed=223)
label_column_name = 'Class'
```
## Train
Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.
|Property|Description|
|-|-|
|**task**|classification or regression|
|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|
|**enable_early_stopping**|Stop the run if the metric score is not showing improvement.|
|**n_cross_validations**|Number of cross validation splits.|
|**training_data**|Input dataset, containing both features and label column.|
|**label_column_name**|The name of the label column.|
**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)
```
automl_settings = {
"n_cross_validations": 3,
"primary_metric": 'average_precision_score_weighted',
"experiment_timeout_hours": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ability to find the best model possible
"verbosity": logging.INFO,
"enable_stack_ensemble": False
}
automl_config = AutoMLConfig(task = 'classification',
debug_log = 'automl_errors.log',
training_data = training_data,
label_column_name = label_column_name,
**automl_settings
)
```
Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while.
In this example, we specify `show_output = True` to print currently running iterations to the console.
```
local_run = experiment.submit(automl_config, show_output = True)
# If you need to retrieve a run that already started, use the following code
#from azureml.train.automl.run import AutoMLRun
#local_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')
local_run
```
## Results
#### Widget for Monitoring Runs
The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.
**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details
```
from azureml.widgets import RunDetails
RunDetails(local_run).show()
```
### Analyze results
#### Retrieve the Best Model
Below we select the best pipeline from our iterations. The `get_output` method on `automl_classifier` returns the best run and the fitted model for the last invocation. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.
```
best_run, fitted_model = local_run.get_output()
fitted_model
```
#### Print the properties of the model
The fitted_model is a python object and you can read the different properties of the object.
## Tests
Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values.
```
# convert the test data to dataframe
X_test_df = validation_data.drop_columns(columns=[label_column_name]).to_pandas_dataframe()
y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True).to_pandas_dataframe()
# call the predict functions on the model
y_pred = fitted_model.predict(X_test_df)
y_pred
```
### Calculate metrics for the prediction
Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values
from the trained model that was returned.
```
from sklearn.metrics import confusion_matrix
import numpy as np
import itertools
cf =confusion_matrix(y_test_df.values,y_pred)
plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')
plt.colorbar()
plt.title('Confusion Matrix')
plt.xlabel('Predicted')
plt.ylabel('Actual')
class_labels = ['False','True']
tick_marks = np.arange(len(class_labels))
plt.xticks(tick_marks,class_labels)
plt.yticks([-0.5,0,1,1.5],['','False','True',''])
# plotting text value inside cells
thresh = cf.max() / 2.
for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])):
plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')
plt.show()
```
## Explanation
In this section, we will show how to compute model explanations and visualize the explanations using azureml-interpret package. We will also show how to run the automl model and the explainer model through deploying an AKS web service.
Besides retrieving an existing model explanation for an AutoML model, you can also explain your AutoML model with different test data. The following steps will allow you to compute and visualize engineered feature importance based on your test data.
### Run the explanation
#### Download the engineered feature importance from artifact store
You can use ExplanationClient to download the engineered feature explanations from the artifact store of the best_run. You can also use azure portal url to view the dash board visualization of the feature importance values of the engineered features.
```
client = ExplanationClient.from_run(best_run)
engineered_explanations = client.download_model_explanation(raw=False)
print(engineered_explanations.get_feature_importance_dict())
print("You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\n" + best_run.get_portal_url())
```
#### Download the raw feature importance from artifact store
You can use ExplanationClient to download the raw feature explanations from the artifact store of the best_run. You can also use azure portal url to view the dash board visualization of the feature importance values of the raw features.
```
raw_explanations = client.download_model_explanation(raw=True)
print(raw_explanations.get_feature_importance_dict())
print("You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\n" + best_run.get_portal_url())
```
#### Retrieve any other AutoML model from training
```
automl_run, fitted_model = local_run.get_output(metric='accuracy')
```
#### Setup the model explanations for AutoML models
The fitted_model can generate the following which will be used for getting the engineered explanations using automl_setup_model_explanations:-
1. Featurized data from train samples/test samples
2. Gather engineered name lists
3. Find the classes in your labeled column in classification scenarios
The automl_explainer_setup_obj contains all the structures from above list.
```
X_train = training_data.drop_columns(columns=[label_column_name])
y_train = training_data.keep_columns(columns=[label_column_name], validate=True)
X_test = validation_data.drop_columns(columns=[label_column_name])
from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations
automl_explainer_setup_obj = automl_setup_model_explanations(fitted_model, X=X_train,
X_test=X_test, y=y_train,
task='classification')
```
#### Initialize the Mimic Explainer for feature importance
For explaining the AutoML models, use the MimicWrapper from azureml-interpret package. The MimicWrapper can be initialized with fields in automl_explainer_setup_obj, your workspace and a surrogate model to explain the AutoML model (fitted_model here). The MimicWrapper also takes the automl_run object where engineered explanations will be uploaded.
```
from interpret.ext.glassbox import LGBMExplainableModel
from azureml.interpret.mimic_wrapper import MimicWrapper
explainer = MimicWrapper(ws, automl_explainer_setup_obj.automl_estimator,
explainable_model=automl_explainer_setup_obj.surrogate_model,
init_dataset=automl_explainer_setup_obj.X_transform, run=automl_run,
features=automl_explainer_setup_obj.engineered_feature_names,
feature_maps=[automl_explainer_setup_obj.feature_map],
classes=automl_explainer_setup_obj.classes,
explainer_kwargs=automl_explainer_setup_obj.surrogate_model_params)
```
#### Use Mimic Explainer for computing and visualizing engineered feature importance
The explain() method in MimicWrapper can be called with the transformed test samples to get the feature importance for the generated engineered features. You can also use azure portal url to view the dash board visualization of the feature importance values of the engineered features.
```
# Compute the engineered explanations
engineered_explanations = explainer.explain(['local', 'global'], eval_dataset=automl_explainer_setup_obj.X_test_transform)
print(engineered_explanations.get_feature_importance_dict())
print("You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\n" + automl_run.get_portal_url())
```
#### Use Mimic Explainer for computing and visualizing raw feature importance
The explain() method in MimicWrapper can be called with the transformed test samples to get the feature importance for the original features in your data. You can also use azure portal url to view the dash board visualization of the feature importance values of the original/raw features.
```
# Compute the raw explanations
raw_explanations = explainer.explain(['local', 'global'], get_raw=True,
raw_feature_names=automl_explainer_setup_obj.raw_feature_names,
eval_dataset=automl_explainer_setup_obj.X_test_transform,
raw_eval_dataset=automl_explainer_setup_obj.X_test_raw)
print(raw_explanations.get_feature_importance_dict())
print("You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\n" + automl_run.get_portal_url())
```
#### Initialize the scoring Explainer, save and upload it for later use in scoring explanation
```
from azureml.interpret.scoring.scoring_explainer import TreeScoringExplainer
import joblib
# Initialize the ScoringExplainer
scoring_explainer = TreeScoringExplainer(explainer.explainer, feature_maps=[automl_explainer_setup_obj.feature_map])
# Pickle scoring explainer locally to './scoring_explainer.pkl'
scoring_explainer_file_name = 'scoring_explainer.pkl'
with open(scoring_explainer_file_name, 'wb') as stream:
joblib.dump(scoring_explainer, stream)
# Upload the scoring explainer to the automl run
automl_run.upload_file('outputs/scoring_explainer.pkl', scoring_explainer_file_name)
```
### Deploying the scoring and explainer models to a web service to Azure Kubernetes Service (AKS)
We use the TreeScoringExplainer from azureml.interpret package to create the scoring explainer which will be used to compute the raw and engineered feature importances at the inference time. In the cell below, we register the AutoML model and the scoring explainer with the Model Management Service.
```
# Register trained automl model present in the 'outputs' folder in the artifacts
original_model = automl_run.register_model(model_name='automl_model',
model_path='outputs/model.pkl')
scoring_explainer_model = automl_run.register_model(model_name='scoring_explainer',
model_path='outputs/scoring_explainer.pkl')
```
#### Create the conda dependencies for setting up the service
We need to download the conda dependencies using the automl_run object.
```
from azureml.automl.core.shared import constants
from azureml.core.environment import Environment
automl_run.download_file(constants.CONDA_ENV_FILE_PATH, 'myenv.yml')
myenv = Environment.from_conda_specification(name="myenv", file_path="myenv.yml")
myenv
```
#### Write the Entry Script
Write the script that will be used to predict on your model
```
%%writefile score.py
import joblib
import pandas as pd
from azureml.core.model import Model
from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations
def init():
global automl_model
global scoring_explainer
# Retrieve the path to the model file using the model name
# Assume original model is named original_prediction_model
automl_model_path = Model.get_model_path('automl_model')
scoring_explainer_path = Model.get_model_path('scoring_explainer')
automl_model = joblib.load(automl_model_path)
scoring_explainer = joblib.load(scoring_explainer_path)
def run(raw_data):
data = pd.read_json(raw_data, orient='records')
# Make prediction
predictions = automl_model.predict(data)
# Setup for inferencing explanations
automl_explainer_setup_obj = automl_setup_model_explanations(automl_model,
X_test=data, task='classification')
# Retrieve model explanations for engineered explanations
engineered_local_importance_values = scoring_explainer.explain(automl_explainer_setup_obj.X_test_transform)
# Retrieve model explanations for raw explanations
raw_local_importance_values = scoring_explainer.explain(automl_explainer_setup_obj.X_test_transform, get_raw=True)
# You can return any data type as long as it is JSON-serializable
return {'predictions': predictions.tolist(),
'engineered_local_importance_values': engineered_local_importance_values,
'raw_local_importance_values': raw_local_importance_values}
```
#### Create the InferenceConfig
Create the inference config that will be used when deploying the model
```
from azureml.core.model import InferenceConfig
inf_config = InferenceConfig(entry_script='score.py', environment=myenv)
```
#### Provision the AKS Cluster
This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it.
```
from azureml.core.compute import ComputeTarget, AksCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your cluster.
aks_name = 'scoring-explain'
# Verify that cluster does not exist already
try:
aks_target = ComputeTarget(workspace=ws, name=aks_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
prov_config = AksCompute.provisioning_configuration(vm_size='STANDARD_D3_V2')
aks_target = ComputeTarget.create(workspace=ws,
name=aks_name,
provisioning_configuration=prov_config)
aks_target.wait_for_completion(show_output=True)
```
#### Deploy web service to AKS
```
# Set the web service configuration (using default here)
from azureml.core.webservice import AksWebservice
from azureml.core.model import Model
aks_config = AksWebservice.deploy_configuration()
aks_service_name ='model-scoring-local-aks'
aks_service = Model.deploy(workspace=ws,
name=aks_service_name,
models=[scoring_explainer_model, original_model],
inference_config=inf_config,
deployment_config=aks_config,
deployment_target=aks_target)
aks_service.wait_for_deployment(show_output = True)
print(aks_service.state)
```
#### View the service logs
```
aks_service.get_logs()
```
#### Consume the web service using run method to do the scoring and explanation of scoring.
We test the web sevice by passing data. Run() method retrieves API keys behind the scenes to make sure that call is authenticated.
```
# Serialize the first row of the test data into json
X_test_json = X_test_df[:1].to_json(orient='records')
print(X_test_json)
# Call the service to get the predictions and the engineered and raw explanations
output = aks_service.run(X_test_json)
# Print the predicted value
print('predictions:\n{}\n'.format(output['predictions']))
# Print the engineered feature importances for the predicted value
print('engineered_local_importance_values:\n{}\n'.format(output['engineered_local_importance_values']))
# Print the raw feature importances for the predicted value
print('raw_local_importance_values:\n{}\n'.format(output['raw_local_importance_values']))
```
#### Clean up
Delete the service.
```
aks_service.delete()
```
## Acknowledgements
This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud
The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project
Please cite the following works:
• Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015
• Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon
• Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE
o Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)
• Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Aël; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier
• Carcillo, Fabrizio; Le Borgne, Yann-Aël; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing
| github_jupyter |
## Common plotting pitfalls that get worse with large data
When working with large datasets, visualizations are often the only way available to understand the properties of that dataset -- there are simply too many data points to examine each one! Thus it is very important to be aware of some common plotting problems that are minor inconveniences with small datasets but very serious problems with larger ones.
We'll cover:
1. [Overplotting](#1.-Overplotting)
2. [Oversaturation](#2.-Oversaturation)
3. [Undersampling](#3.-Undersampling)
4. [Undersaturation](#4.-Undersaturation)
5. [Underutilized range](#5.-Underutilized-range)
6. [Nonuniform colormapping](#6.-Nonuniform-colormapping)
You can [skip to the end](#Summary) if you just want to see an illustration of these problems.
This notebook requires [HoloViews](http://holoviews.org), [colorcet](https://github.com/bokeh/colorcet), and matplotlib, and optionally scikit-image, which can be installed with:
```
conda install -c bokeh -c ioam holoviews colorcet matplotlib scikit-image
```
We'll first load the plotting libraries and set up some defaults:
```
import numpy as np
np.random.seed(42)
import holoviews as hv
from holoviews.operation.datashader import datashade
from holoviews import opts, dim
hv.extension('matplotlib')
from colorcet import fire
datashade.cmap=fire[50:]
opts.defaults(
opts.Image(cmap="gray_r", axiswise=True),
opts.Points(cmap="bwr", edgecolors='k', s=50, alpha=1.0), # Remove color_index=2
opts.RGB(bgcolor="black", show_grid=False),
opts.Scatter3D(color=dim('c'), fig_size=250, cmap='bwr', edgecolor='k', s=50, alpha=1.0)) #color_index=3
```
### 1. Overplotting
Let's consider plotting some 2D data points that come from two separate categories, here plotted as blue and red in **A** and **B** below. When the two categories are overlaid, the appearance of the result can be very different depending on which one is plotted first:
```
def blue_points(offset=0.5,pts=300):
blues = (np.random.normal( offset,size=pts), np.random.normal( offset,size=pts), -1 * np.ones((pts)))
return hv.Points(blues, vdims=['c']).opts(color=dim('c'))
def red_points(offset=0.5,pts=300):
reds = (np.random.normal(-offset,size=pts), np.random.normal(-offset,size=pts), 1*np.ones((pts)))
return hv.Points(reds, vdims=['c']).opts(color=dim('c'))
blues, reds = blue_points(), red_points()
blues + reds + (reds * blues) + (blues * reds)
```
Plots **C** and **D** shown the same distribution of points, yet they give a very different impression of which category is more common, which can lead to incorrect decisions based on this data. Of course, both are equally common in this case, so neither **C** nor **D** accurately reflects the data. The cause for this problem is simply occlusion:
```
hmap = hv.HoloMap({0:blues,0.000001:reds,1:blues,2:reds}, kdims=['level'])
hv.Scatter3D(hmap.table(), kdims=['x','y','level'], vdims=['c'])
```
Occlusion of data by other data is called **overplotting** or **overdrawing**, and it occurs whenever a datapoint or curve is plotted on top of another datapoint or curve, obscuring it. It's thus a problem not just for scatterplots, as here, but for curve plots, 3D surface plots, 3D bar graphs, and any other plot type where data can be obscured.
### 2. Oversaturation
You can reduce problems with overplotting by using transparency/opacity, via the alpha parameter provided to control opacity in most plotting programs. E.g. if alpha is 0.1, full color saturation will be achieved only when 10 points overlap, reducing the effects of plot ordering but making it harder to see individual points:
```
layout = blues + reds + (reds * blues) + (blues * reds)
layout.opts(opts.Points(s=50, alpha=0.1))
```
Here **C **and **D **look very similar (as they should, since the distributions are identical), but there are still a few locations with **oversaturation**, a problem that will occur when more than 10 points overlap. In this example the oversaturated points are located near the middle of the plot, but the only way to know whether they are there would be to plot both versions and compare, or to examine the pixel values to see if any have reached full saturation (a necessary but not sufficient condition for oversaturation). Locations where saturation has been reached have problems similar to overplotting, because only the last 10 points plotted will affect the final color (for alpha of 0.1).
Worse, even if one has set the alpha value to approximately or usually avoid oversaturation, as in the plot above, the correct value depends on the dataset. If there are more points overlapping in that particular region, a manually adjusted alpha setting that worked well for a previous dataset will systematically misrepresent the new dataset:
```
blues, reds = blue_points(pts=600), red_points(pts=600)
layout = blues + reds + (reds * blues) + (blues * reds)
layout.opts(opts.Points(alpha=0.1))
```
Here **C **and **D **again look qualitatively different, yet still represent the same distributions. Since we're assuming that the point of the visualization is to reveal the underlying dataset, having to tune visualization parameters manually based on the properties of the dataset itself is a serious problem.
To make it even more complicated, the correct alpha also depends on the dot size, because smaller dots have less overlap for the same dataset. With smaller dots, **C **and **D **look more similar, but the color of the dots is now difficult to see in all cases because the dots are too transparent for this size:
```
layout = blues + reds + (reds * blues) + (blues * reds)
layout.opts(opts.Points(s=10, alpha=0.1, edgecolor=None))
```
As you can see, it is very difficult to find settings for the dotsize and alpha parameters that correctly reveal the data, even for relatively small and obvious datasets like these. With larger datasets with unknown contents, it is difficult to detect that such problems are occuring, leading to false conclusions based on inappropriately visualized data.
### 3. Undersampling
With a single category instead of the multiple categories shown above, oversaturation simply obscures spatial differences in density. For instance, 10, 20, and 2000 single-category points overlapping will all look the same visually, for alpha=0.1. Let's again consider an example that has a sum of two normal distributions slightly offset from one another, but no longer using color to separate them into categories:
```
def gaussians(specs=[(1.5,0,1.0),(-1.5,0,1.0)],num=100):
"""
A concatenated list of points taken from 2D Gaussian distributions.
Each distribution is specified as a tuple (x,y,s), where x,y is the mean
and s is the standard deviation. Defaults to two horizontally
offset unit-mean Gaussians.
"""
np.random.seed(1)
dists = [(np.random.normal(x,s,num), np.random.normal(y,s,num)) for x,y,s in specs]
return np.hstack([d[0] for d in dists]), np.hstack([d[1] for d in dists])
points = (hv.Points(gaussians(num=600), label="600 points", group="Small dots") +
hv.Points(gaussians(num=60000), label="60000 points", group="Small dots") +
hv.Points(gaussians(num=600), label="600 points", group="Tiny dots") +
hv.Points(gaussians(num=60000), label="60000 points", group="Tiny dots"))
points.opts(
opts.Points('Small_dots', s=1, alpha=1),
opts.Points('Tiny_dots', s=0.1, alpha=0.1))
```
Just as shown for the multiple-category case above, finding settings to avoid overplotting and oversaturation is difficult. The "Small dots" setting (size 0.1, full alpha) works fairly well for a sample of 600 points **A,** but it has serious overplotting issues for larger datasets, obscuring the shape and density of the distribution **B.** Using the "Tiny dots" setting (10 times smaller dots, alpha 0.1) works well for the larger dataset **D,** but not at all for the 600-point dataset **C.** Clearly, not all of these settings are accurately conveying the underlying distribution, as they all appear quite different from one another. Similar problems occur for the same size of dataset, but with greater or lesser levels of overlap between points, which of course varies with every new dataset.
In any case, as dataset size increases, at some point plotting a full scatterplot like any of these will become impractical with current plotting software. At this point, people often simply subsample their dataset, plotting 10,000 or perhaps 100,000 randomly selected datapoints. But as panel **A **shows, the shape of an **undersampled** distribution can be very difficult or impossible to make out, leading to incorrect conclusions about the distribution. Such problems can occur even when taking very large numbers of samples, if examining sparsely populated regions of the space, which will approximate panel **A **for some plot settings and panel **C **for others. The actual shape of the distribution is only visible if sufficient datapoints are available in that region *and* appropriate plot settings are used, as in **D,** but ensuring that both conditions are true is a quite difficult process of trial and error, making it very likely that important features of the dataset will be missed.
To avoid undersampling large datasets, researchers often use 2D histograms visualized as heatmaps, rather than scatterplots showing individual points. A heatmap has a fixed-size grid regardless of the dataset size, so that they can make use of all the data. Heatmaps effectively approximate a probability density function over the specified space, with coarser heatmaps averaging out noise or irrelevant variations to reveal an underlying distribution, and finer heatmaps able to represent more details in the distribution.
Let's look at some heatmaps with different numbers of bins for the same two-Gaussians distribution:
```
def heatmap(coords,bins=10,offset=0.0,transform=lambda d,m:d, label=None):
"""
Given a set of coordinates, bins them into a 2d histogram grid
of the specified size, and optionally transforms the counts
and/or compresses them into a visible range starting at a
specified offset between 0 and 1.0.
"""
hist,xs,ys = np.histogram2d(coords[0], coords[1], bins=bins)
counts = hist[:,::-1].T
transformed = transform(counts,counts!=0)
span = transformed.max()-transformed.min()
compressed = np.where(counts!=0,offset+(1.0-offset)*transformed/span,0)
args = dict(label=label) if label else {}
return hv.Image(compressed,bounds=(xs[-1],ys[-1],xs[1],ys[1]),**args)
hv.Layout([heatmap(gaussians(num=60000),bins) for bins in [8,20,200]])
```
As you can see, a too-coarse binning grid **A **cannot represent this distribution faithfully, but with enough bins **C,** the heatmap will approximate a tiny-dot scatterplot like plot **D **in the previous figure. For intermediate grid sizes **B **the heatmap can average out the effects of undersampling; **B **is actually a more faithful representation of the *distribution* than **C **is (which we know is two offset 2D Gaussians), while **C **more faithfully represents the *sampling* (i.e., the individual points drawn from this distribution). Thus choosing a good binning grid size for a heatmap does take some expertise and knowledge of the goals of the visualization, and it's always useful to look at multiple binning-grid spacings for comparison. Still, at least the binning parameter is something meaningful at the data level (how coarse a view of the data is desired?) rather than just a plotting detail (what size and transparency should I use for the points?) that must be determined arbitrarily.
In any case, at least in principle, the heatmap approach can entirely avoid the first three problems above: **overplotting** (since multiple data points sum arithmetically into the grid cell, without obscuring one another), **oversaturation** (because the minimum and maximum counts observed can automatically be mapped to the two ends of a visible color range), and **undersampling** (since the resulting plot size is independent of the number of data points, allowing it to use an unbounded amount of incoming data).
### 4. Undersaturation
Of course, heatmaps come with their own plotting pitfalls. One rarely appreciated issue common to both heatmaps and alpha-based scatterplots is **undersaturation**, where large numbers of data points can be missed entirely because they are spread over many different heatmap bins or many nearly transparent scatter points. To look at this problem, let's again consider a set of multiple 2D Gaussians, but this time with different amounts of spread (standard deviation):
```
dist = gaussians(specs=[(2,2,0.02), (2,-2,0.1), (-2,-2,0.5), (-2,2,1.0), (0,0,3)],num=10000)
hv.Points(dist) + hv.Points(dist).opts(s=0.1) + hv.Points(dist).opts(s=0.01, alpha=0.05)
```
Plots **A,** **B,** and **C **are all scatterplots for the same data, which is a sum of 5 Gaussian distributions at different locations and with different standard deviations:
1. Location (2,2): very narrow spread
2. Location (2,-2): narrow spread
3. Location (-2,-2): medium spread
4. Location (-2,2): large spread
5. Location (0,0): very large spread
In plot **A,** of course, the very large spread covers up everything else, completely obscuring the structure of this dataset by overplotting. Plots **B **and **C **reveal the structure better, but they required hand tuning and neither one is particularly satisfactory. In **B **there are four clearly visible Gaussians, but all but the largest appear to have the same density of points per pixel, which we know is not the case from how the dataset was constructed, and the smallest is nearly invisible. Each of the five Gaussians has the same number of data points (10000), but the second-largest looks like it has more than the others, and the narrowest one is likely to be overlooked altogether, which is thus a clear example of oversaturation obscuring important features. Yet if we try to combat the oversaturation by using transparency in **C,** we now get a clear problem with **undersaturation** -- the "very large spread" Gaussian is now essentially invisible. Again, there are just as many datapoints in that category, but we'd never even know they were there if only looking at **C.**
Similar problems occur for a heatmap view of the same data:
```
hv.Layout([heatmap(dist,bins) for bins in [8,20,200]])
```
Here the narrow-spread distributions lead to pixels with a very high count, and if the other pixels are linearly ramped into the available color range, from zero to that high count value, then the wider-spread values are obscured (as in **B **) or entirely invisible (as in **C **).
To avoid undersaturation, you can add an offset to ensure that low-count (but nonzero) bins are mapped into a visible color, with the remaining intensity scale used to indicate differences in counts:
```
hv.Layout([heatmap(dist,bins,offset=0.2) for bins in [8,20,200]]).cols(4)
```
Such mapping entirely avoids undersaturation, since all pixels are either clearly zero (in the background color, i.e. white in this case), or a non-background color taken from the colormap. The widest-spread Gaussian is now clearly visible in all cases.
However, the actual structure (5 Gaussians of different spreads) is still not visible. In **A **the problem is clearly too-coarse binning, but in **B **the binning is also somewhat too coarse for this data, since the "very narrow spread" and "narrow spread" Gaussians show up identically, each mapping entirely into a single bin (the two black pixels). **C **shouldn't suffer from too-coarse binning, yet it still looks more like a plot of the "very large spread" distribution alone, than a plot of these five distributions of different spreads, and it is thus still highly misleading despite the correction for undersaturation.
### 5. Underutilized range
So, what is the problem in plot **C **above? By construction, we've avoided the first four pitfalls: **overplotting**, **oversaturation**, **undersampling**, and **undersaturation**. But the problem is now more subtle: differences in datapoint density are not visible between the five Gaussians, because all or nearly all pixels end up being mapped into either the bottom end of the visible range (light gray), or the top end (black, used only for the single pixel holding the "very narrow spread" distribution). The entire rest of the visible colors in this gray colormap are unused, conveying no information to the viewer about the rich structure that we know this distribution contains. If the data were uniformly distributed over the range from minimum to maximum counts per pixel (0 to 10,000, in this case), then the above plot would work well, but that's not the case for this dataset or for most real-world datasets.
So, let's try transforming the data from its default linear representation (integer count values) into something that preserves relative differences in count values but maps them into visually distinct colors. A logarithmic transformation is one common choice:
```
hv.Layout([heatmap(dist,bins,offset=0.2,transform=lambda d,m: np.where(m,np.log1p(d),0)) for bins in [8,20,200]])
```
Aha! We can now see the full structure of the dataset, with all five Gaussians clearly visible in **B **and **C,** and the relative spreads also clearly visible in **C.**
We still have a problem, though. The choice of a logarithmic transform was fairly arbitrary, and it mainly works well because we happened to have used an approximately geometric progression of spread sizes when constructing the example. For large datasets with truly unknown structure, can we have a more principled approach to mapping the dataset values into a visible range?
Yes, if we think of the visualization problem in a different way. The underlying difficulty in plotting this dataset (as for very many real-world datasets) is that the values in each bin are numerically very different (ranging from 10,000, in the bin for the "very narrow spread" Gaussian, to 1 (for single datapoints from the "very large spread" Gaussian)). Given the 256 gray levels available in a normal monitor (and the similarly limited human ability to detect differences in gray values), numerically mapping the data values into the visible range is not going to work well. But given that we are already backing off from a direct numerical mapping in the above approaches for correcting undersaturation and for doing log transformations, what if we entirely abandon the numerical mapping approach, using the numbers only to form a partial ordering of the data values? Such an approach would be a rank-order plot, preserving order and not magnitudes. For 100 gray values, you can think of it as a percentile-based plot, with the lowest 1% of the data values mapping to the first visible gray value, the next 1% mapping to the next visible gray value, and so on to the top 1% of the data values mapping to the gray value 255 (black in this case). The actual data values would be ignored in such plots, but their relative magnitudes would still determine how they map onto colors on the screen, preserving the structure of the distribution rather than the numerical values.
We can approximate such a rank-order or percentile encoding using the histogram equalization function from an image-processing package, which makes sure that each gray level is used for about the same number of pixels in the plot:
```
try:
from skimage.exposure import equalize_hist
eq_hist = lambda d,m: equalize_hist(1000*d,nbins=100000,mask=m)
except ImportError:
eq_hist = lambda d,m: d
print("scikit-image not installed; skipping histogram equalization")
hv.Layout([heatmap(dist,bins,transform=eq_hist) for bins in [8,20,200]])
```
Plot **C** now reveals the full structure that we know was in this dataset, i.e. five Gaussians with different spreads, with no arbitrary parameter choices. (Well, there is a "number of bins" parameter for building the histogram for equalizing, but for integer data like this even that parameter can be eliminated entirely.) The differences in counts between pixels are now very clearly visible, across the full (and very wide) range of counts in the original data.
Of course, we've lost the actual counts themselves, and so we can no longer tell just how many datapoints are in the "very narrow spread" pixel in this case. So plot **C** is accurately conveying the structure, but additional information would need to be provided to show the actual counts, by adding a color key mapping from the visible gray values into the actual counts and/or by providing hovering value information.
At this point, one could also consider explicitly highlighting hotspots so that they cannot be overlooked. In plots B and C above, the two highest-density pixels are mapped to the two darkest pixel colors, which can reveal problems with your monitor settings if they were adjusted to make dark text appear blacker. Thus on those monitors, the highest values may not be clearly distinguishable from each other or from nearby grey values, which is a possible downside to fully utilizing the dynamic range available. But once the data is reliably and automatically mapped into a repeatable, reliable, fully utilized range for display, making explicit adjustments (e.g. based on wanting to make hotspots particularly clear) can be done in a principled way that doesn't depend on the actual data distribution (e.g. by just making the top few pixel values into a different color, or by stretching out those portions of the color map to show the extremes more safely across different monitors). Before getting into such specialized manipulations, there's a big pitfall to avoid first:
### 6. Nonuniform colormapping
Let's say you've managed avoid pitfalls 1-5 somehow. However, there is one more problem waiting to catch you at the last stage, ruining all of your work eliminating the other issues: using a perceptually non-uniform colormap. A heatmap requires a colormap before it can be visualized, i.e., a lookup table from a data value (typically a normalized magnitude in the range 0 to 1) to a pixel color. The goal of a scientific visualization is to reveal the underlying properties of the data to your visual system, and to do so it is necessary to choose colors for each pixel that lead the viewer to perceive that data faithfully. Unfortunately, most of the colormaps in common use in plotting programs are highly *non*uniform.
For instance, in "jet" (the default colormap for matlab and matplotlib until 2015), a large range of data values will all appear in shades of green that are perceptually indistinguishable, and similarly for the yellow regions of their "hot" colormaps:

In this image, a good colormap would have "teeth" equally visible at all data values, as for the perceptually uniform equivalents from the [colorcet](https://github.com/bokeh/colorcet) package:

We can easily see these effects if we look at our example dataset after histogram equalization, where all the different data levels are known to be distributed evenly in the array of normalized magnitudes:
```
hv.Layout([heatmap(dist,200,transform=eq_hist,label=cmap).opts(cmap=cmap) for cmap in ["hot","fire"]]).cols(2)
```
Comparing **A ** to **B **it should be clear that the "fire" colormap is revealing much more of the data, accurately rendering the density differences between each of the different blobs. The unsuitable "hot" colormap is mapping all of the high density regions to perceptually indistinguishable shades of bright yellow/white, giving an "oversaturated" appearance even though we know the underlying heatmap array is *not* oversaturated (by construction). Luckily it is easy to avoid this problem; just use one of the 50 perceptually uniform colormaps available in the [colorcet](https://github.com/bokeh/colorcet) package, one of the four shipped with matplotlib [(viridis, plasma, inferno, or magma)](https://bids.github.io/colormap), or the Parula colormap shipped with Matlab.
## Summary
Starting with plots of specific datapoints, we showed how typical visualization techniques will systematically misrepresent the distribution of those points. Here's an example of each of those six problems, all for the same distribution:
```
layout = (hv.Points(dist,label="1. Overplotting") +
hv.Points(dist,label="2. Oversaturation").opts(s=0.1,alpha=0.5) +
hv.Points((dist[0][::200],dist[1][::200]),label="3. Undersampling").opts(s=2,alpha=0.5) +
hv.Points(dist,label="4. Undersaturation").opts(s=0.01,alpha=0.05) +
heatmap(dist,200,offset=0.2,label="5. Underutilized dynamic range") +
heatmap(dist,200,transform=eq_hist,label="6. Nonuniform colormapping").opts(cmap="hot"))
layout.opts(
opts.Points(axiswise=False),
opts.Layout(sublabel_format="", tight=True)).cols(3)
```
Here we could avoid each of these problems by hand, using trial and error based on our knowledge about the underlying dataset, since we created it. But for big data in general, these issues are major problems, because you don't know what the data *should* look like. Thus:
#### For big data, you don't know when the viz is lying
I.e., visualization is supposed to help you explore and understand your data, but if your visualizations are systematically misrepresenting your data because of **overplotting**, **oversaturation**, **undersampling**, **undersaturation**, **underutilized range**, and **nonuniform colormapping**, then you won't be able to discover the real qualities of your data and will be unable to make the right decisions.
Luckily, using the systematic approach outlined in this discussion, you can avoid *all* of these pitfalls, allowing you to render your data faithfully without requiring *any* "magic parameters" that depend on your dataset:
```
heatmap(dist,200,transform=eq_hist).opts(cmap="fire")
```
### [Datashader](https://github.com/bokeh/datashader)
The steps above show how to avoid the six main plotting pitfalls by hand, but it can be awkward and relatively slow to do so. Luckily there is a new Python library available to automate and optimize these steps, named [Datashader](https://github.com/bokeh/datashader). Datashader avoids users having to make dataset-dependent decisions and parameter settings when visualizing a new dataset. Datashader makes it practical to create accurate visualizations of datasets too large to understand directly, up to a billion points on a normal laptop and larger datasets on a compute cluster. As a simple teaser, the above steps can be expressed very concisely using the Datashader interface provided by [HoloViews](http://holoviews.org):
```
hv.output(size=200)
datashade(hv.Points(dist))
```
Without any change to the settings, the same command will work with dataset sizes too large for most plotting programs, like this 50-million-point version of the distribution:
```
dist = gaussians(specs=[(2,2,0.02), (2,-2,0.1), (-2,-2,0.5), (-2,2,1.0), (0,0,3)], num=10000000)
datashade(hv.Points(dist))
```
See the [Datashader web site](https://raw.githubusercontent.com/bokeh/datashader/master/examples/README.md) for details and examples to help you get started.
| github_jupyter |
**Chapter 11 – Deep Learning**
_This notebook contains all the sample code and solutions to the exercises in chapter 11._
# Setup
First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
```
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "deep"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
```
# Vanishing/Exploding Gradients Problem
```
def logit(z):
return 1 / (1 + np.exp(-z))
z = np.linspace(-5, 5, 200)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [1, 1], 'k--')
plt.plot([0, 0], [-0.2, 1.2], 'k-')
plt.plot([-5, 5], [-3/4, 7/4], 'g--')
plt.plot(z, logit(z), "b-", linewidth=2)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Saturating', xytext=(3.5, 0.7), xy=(5, 1), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Saturating', xytext=(-3.5, 0.3), xy=(-5, 0), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Linear', xytext=(2, 0.2), xy=(0, 0.5), arrowprops=props, fontsize=14, ha="center")
plt.grid(True)
plt.title("Sigmoid activation function", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
save_fig("sigmoid_saturation_plot")
plt.show()
```
## Xavier and He Initialization
Note: the book uses `tensorflow.contrib.layers.fully_connected()` rather than `tf.layers.dense()` (which did not exist when this chapter was written). It is now preferable to use `tf.layers.dense()`, because anything in the contrib module may change or be deleted without notice. The `dense()` function is almost identical to the `fully_connected()` function. The main differences relevant to this chapter are:
* several parameters are renamed: `scope` becomes `name`, `activation_fn` becomes `activation` (and similarly the `_fn` suffix is removed from other parameters such as `normalizer_fn`), `weights_initializer` becomes `kernel_initializer`, etc.
* the default `activation` is now `None` rather than `tf.nn.relu`.
* it does not support `tensorflow.contrib.framework.arg_scope()` (introduced later in chapter 11).
* it does not support regularizer params (introduced later in chapter 11).
```
import tensorflow as tf
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
he_init = tf.variance_scaling_initializer()
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu,
kernel_initializer=he_init, name="hidden1")
```
## Nonsaturating Activation Functions
### Leaky ReLU
```
def leaky_relu(z, alpha=0.01):
return np.maximum(alpha*z, z)
plt.plot(z, leaky_relu(z, 0.05), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([0, 0], [-0.5, 4.2], 'k-')
plt.grid(True)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Leak', xytext=(-3.5, 0.5), xy=(-5, -0.2), arrowprops=props, fontsize=14, ha="center")
plt.title("Leaky ReLU activation function", fontsize=14)
plt.axis([-5, 5, -0.5, 4.2])
save_fig("leaky_relu_plot")
plt.show()
```
Implementing Leaky ReLU in TensorFlow:
```
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
def leaky_relu(z, name=None):
return tf.maximum(0.01 * z, z, name=name)
hidden1 = tf.layers.dense(X, n_hidden1, activation=leaky_relu, name="hidden1")
```
Let's train a neural network on MNIST using the Leaky ReLU. First let's create the graph:
```
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=leaky_relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=leaky_relu, name="hidden2")
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
```
Let's load the data:
**Warning**: `tf.examples.tutorials.mnist` is deprecated. We will use `tf.keras.datasets.mnist` instead.
```
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train = X_train.astype(np.float32).reshape(-1, 28*28) / 255.0
X_test = X_test.astype(np.float32).reshape(-1, 28*28) / 255.0
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
X_valid, X_train = X_train[:5000], X_train[5000:]
y_valid, y_train = y_train[:5000], y_train[5000:]
def shuffle_batch(X, y, batch_size):
rnd_idx = np.random.permutation(len(X))
n_batches = len(X) // batch_size
for batch_idx in np.array_split(rnd_idx, n_batches):
X_batch, y_batch = X[batch_idx], y[batch_idx]
yield X_batch, y_batch
n_epochs = 40
batch_size = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if epoch % 5 == 0:
acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_valid = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Batch accuracy:", acc_batch, "Validation accuracy:", acc_valid)
save_path = saver.save(sess, "./my_model_final.ckpt")
```
### ELU
```
def elu(z, alpha=1):
return np.where(z < 0, alpha * (np.exp(z) - 1), z)
plt.plot(z, elu(z), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [-1, -1], 'k--')
plt.plot([0, 0], [-2.2, 3.2], 'k-')
plt.grid(True)
plt.title(r"ELU activation function ($\alpha=1$)", fontsize=14)
plt.axis([-5, 5, -2.2, 3.2])
save_fig("elu_plot")
plt.show()
```
Implementing ELU in TensorFlow is trivial, just specify the activation function when building each layer:
```
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.elu, name="hidden1")
```
### SELU
This activation function was proposed in this [great paper](https://arxiv.org/pdf/1706.02515.pdf) by Günter Klambauer, Thomas Unterthiner and Andreas Mayr, published in June 2017. During training, a neural network composed exclusively of a stack of dense layers using the SELU activation function and LeCun initialization will self-normalize: the output of each layer will tend to preserve the same mean and variance during training, which solves the vanishing/exploding gradients problem. As a result, this activation function outperforms the other activation functions very significantly for such neural nets, so you should really try it out. Unfortunately, the self-normalizing property of the SELU activation function is easily broken: you cannot use ℓ<sub>1</sub> or ℓ<sub>2</sub> regularization, regular dropout, max-norm, skip connections or other non-sequential topologies (so recurrent neural networks won't self-normalize). However, in practice it works quite well with sequential CNNs. If you break self-normalization, SELU will not necessarily outperform other activation functions.
```
from scipy.special import erfc
# alpha and scale to self normalize with mean 0 and standard deviation 1
# (see equation 14 in the paper):
alpha_0_1 = -np.sqrt(2 / np.pi) / (erfc(1/np.sqrt(2)) * np.exp(1/2) - 1)
scale_0_1 = (1 - erfc(1 / np.sqrt(2)) * np.sqrt(np.e)) * np.sqrt(2 * np.pi) * (2 * erfc(np.sqrt(2))*np.e**2 + np.pi*erfc(1/np.sqrt(2))**2*np.e - 2*(2+np.pi)*erfc(1/np.sqrt(2))*np.sqrt(np.e)+np.pi+2)**(-1/2)
def selu(z, scale=scale_0_1, alpha=alpha_0_1):
return scale * elu(z, alpha)
plt.plot(z, selu(z), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [-1.758, -1.758], 'k--')
plt.plot([0, 0], [-2.2, 3.2], 'k-')
plt.grid(True)
plt.title(r"SELU activation function", fontsize=14)
plt.axis([-5, 5, -2.2, 3.2])
save_fig("selu_plot")
plt.show()
```
By default, the SELU hyperparameters (`scale` and `alpha`) are tuned in such a way that the mean output of each neuron remains close to 0, and the standard deviation remains close to 1 (assuming the inputs are standardized with mean 0 and standard deviation 1 too). Using this activation function, even a 1,000 layer deep neural network preserves roughly mean 0 and standard deviation 1 across all layers, avoiding the exploding/vanishing gradients problem:
```
np.random.seed(42)
Z = np.random.normal(size=(500, 100)) # standardized inputs
for layer in range(1000):
W = np.random.normal(size=(100, 100), scale=np.sqrt(1 / 100)) # LeCun initialization
Z = selu(np.dot(Z, W))
means = np.mean(Z, axis=0).mean()
stds = np.std(Z, axis=0).mean()
if layer % 100 == 0:
print("Layer {}: mean {:.2f}, std deviation {:.2f}".format(layer, means, stds))
```
The `tf.nn.selu()` function was added in TensorFlow 1.4. For earlier versions, you can use the following implementation:
```
def selu(z, scale=alpha_0_1, alpha=scale_0_1):
return scale * tf.where(z >= 0.0, z, alpha * tf.nn.elu(z))
```
However, the SELU activation function cannot be used along with regular Dropout (this would cancel the SELU activation function's self-normalizing property). Fortunately, there is a Dropout variant called Alpha Dropout proposed in the same paper. It is available in `tf.contrib.nn.alpha_dropout()` since TF 1.4 (or check out [this implementation](https://github.com/bioinf-jku/SNNs/blob/master/selu.py) by the Institute of Bioinformatics, Johannes Kepler University Linz).
Let's create a neural net for MNIST using the SELU activation function:
```
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=selu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=selu, name="hidden2")
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 40
batch_size = 50
```
Now let's train it. Do not forget to scale the inputs to mean 0 and standard deviation 1:
```
means = X_train.mean(axis=0, keepdims=True)
stds = X_train.std(axis=0, keepdims=True) + 1e-10
X_val_scaled = (X_valid - means) / stds
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
X_batch_scaled = (X_batch - means) / stds
sess.run(training_op, feed_dict={X: X_batch_scaled, y: y_batch})
if epoch % 5 == 0:
acc_batch = accuracy.eval(feed_dict={X: X_batch_scaled, y: y_batch})
acc_valid = accuracy.eval(feed_dict={X: X_val_scaled, y: y_valid})
print(epoch, "Batch accuracy:", acc_batch, "Validation accuracy:", acc_valid)
save_path = saver.save(sess, "./my_model_final_selu.ckpt")
```
# Batch Normalization
Note: the book uses `tensorflow.contrib.layers.batch_norm()` rather than `tf.layers.batch_normalization()` (which did not exist when this chapter was written). It is now preferable to use `tf.layers.batch_normalization()`, because anything in the contrib module may change or be deleted without notice. Instead of using the `batch_norm()` function as a regularizer parameter to the `fully_connected()` function, we now use `batch_normalization()` and we explicitly create a distinct layer. The parameters are a bit different, in particular:
* `decay` is renamed to `momentum`,
* `is_training` is renamed to `training`,
* `updates_collections` is removed: the update operations needed by batch normalization are added to the `UPDATE_OPS` collection and you need to explicity run these operations during training (see the execution phase below),
* we don't need to specify `scale=True`, as that is the default.
Also note that in order to run batch norm just _before_ each hidden layer's activation function, we apply the ELU activation function manually, right after the batch norm layer.
Note: since the `tf.layers.dense()` function is incompatible with `tf.contrib.layers.arg_scope()` (which is used in the book), we now use python's `functools.partial()` function instead. It makes it easy to create a `my_dense_layer()` function that just calls `tf.layers.dense()` with the desired parameters automatically set (unless they are overridden when calling `my_dense_layer()`). As you can see, the code remains very similar.
```
reset_graph()
import tensorflow as tf
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
training = tf.placeholder_with_default(False, shape=(), name='training')
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1")
bn1 = tf.layers.batch_normalization(hidden1, training=training, momentum=0.9)
bn1_act = tf.nn.elu(bn1)
hidden2 = tf.layers.dense(bn1_act, n_hidden2, name="hidden2")
bn2 = tf.layers.batch_normalization(hidden2, training=training, momentum=0.9)
bn2_act = tf.nn.elu(bn2)
logits_before_bn = tf.layers.dense(bn2_act, n_outputs, name="outputs")
logits = tf.layers.batch_normalization(logits_before_bn, training=training,
momentum=0.9)
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
training = tf.placeholder_with_default(False, shape=(), name='training')
```
To avoid repeating the same parameters over and over again, we can use Python's `partial()` function:
```
from functools import partial
my_batch_norm_layer = partial(tf.layers.batch_normalization,
training=training, momentum=0.9)
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1")
bn1 = my_batch_norm_layer(hidden1)
bn1_act = tf.nn.elu(bn1)
hidden2 = tf.layers.dense(bn1_act, n_hidden2, name="hidden2")
bn2 = my_batch_norm_layer(hidden2)
bn2_act = tf.nn.elu(bn2)
logits_before_bn = tf.layers.dense(bn2_act, n_outputs, name="outputs")
logits = my_batch_norm_layer(logits_before_bn)
```
Let's build a neural net for MNIST, using the ELU activation function and Batch Normalization at each layer:
```
reset_graph()
batch_norm_momentum = 0.9
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
training = tf.placeholder_with_default(False, shape=(), name='training')
with tf.name_scope("dnn"):
he_init = tf.variance_scaling_initializer()
my_batch_norm_layer = partial(
tf.layers.batch_normalization,
training=training,
momentum=batch_norm_momentum)
my_dense_layer = partial(
tf.layers.dense,
kernel_initializer=he_init)
hidden1 = my_dense_layer(X, n_hidden1, name="hidden1")
bn1 = tf.nn.elu(my_batch_norm_layer(hidden1))
hidden2 = my_dense_layer(bn1, n_hidden2, name="hidden2")
bn2 = tf.nn.elu(my_batch_norm_layer(hidden2))
logits_before_bn = my_dense_layer(bn2, n_outputs, name="outputs")
logits = my_batch_norm_layer(logits_before_bn)
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
```
Note: since we are using `tf.layers.batch_normalization()` rather than `tf.contrib.layers.batch_norm()` (as in the book), we need to explicitly run the extra update operations needed by batch normalization (`sess.run([training_op, extra_update_ops],...`).
```
n_epochs = 20
batch_size = 200
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run([training_op, extra_update_ops],
feed_dict={training: True, X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
```
What!? That's not a great accuracy for MNIST. Of course, if you train for longer it will get much better accuracy, but with such a shallow network, Batch Norm and ELU are unlikely to have very positive impact: they shine mostly for much deeper nets.
Note that you could also make the training operation depend on the update operations:
```python
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
training_op = optimizer.minimize(loss)
```
This way, you would just have to evaluate the `training_op` during training, TensorFlow would automatically run the update operations as well:
```python
sess.run(training_op, feed_dict={training: True, X: X_batch, y: y_batch})
```
One more thing: notice that the list of trainable variables is shorter than the list of all global variables. This is because the moving averages are non-trainable variables. If you want to reuse a pretrained neural network (see below), you must not forget these non-trainable variables.
```
[v.name for v in tf.trainable_variables()]
[v.name for v in tf.global_variables()]
```
## Gradient Clipping
Let's create a simple neural net for MNIST and add gradient clipping. The first part is the same as earlier (except we added a few more layers to demonstrate reusing pretrained models, see below):
```
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
n_hidden2 = 50
n_hidden3 = 50
n_hidden4 = 50
n_hidden5 = 50
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, name="hidden3")
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4")
hidden5 = tf.layers.dense(hidden4, n_hidden5, activation=tf.nn.relu, name="hidden5")
logits = tf.layers.dense(hidden5, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
learning_rate = 0.01
```
Now we apply gradient clipping. For this, we need to get the gradients, use the `clip_by_value()` function to clip them, then apply them:
```
threshold = 1.0
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(loss)
capped_gvs = [(tf.clip_by_value(grad, -threshold, threshold), var)
for grad, var in grads_and_vars]
training_op = optimizer.apply_gradients(capped_gvs)
```
The rest is the same as usual:
```
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 20
batch_size = 200
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
```
## Reusing Pretrained Layers
## Reusing a TensorFlow Model
First you need to load the graph's structure. The `import_meta_graph()` function does just that, loading the graph's operations into the default graph, and returning a `Saver` that you can then use to restore the model's state. Note that by default, a `Saver` saves the structure of the graph into a `.meta` file, so that's the file you should load:
```
reset_graph()
saver = tf.train.import_meta_graph("./my_model_final.ckpt.meta")
```
Next you need to get a handle on all the operations you will need for training. If you don't know the graph's structure, you can list all the operations:
```
for op in tf.get_default_graph().get_operations():
print(op.name)
```
Oops, that's a lot of operations! It's much easier to use TensorBoard to visualize the graph. The following hack will allow you to visualize the graph within Jupyter (if it does not work with your browser, you will need to use a `FileWriter` to save the graph and then visualize it in TensorBoard):
```
from tensorflow_graph_in_jupyter import show_graph
show_graph(tf.get_default_graph())
```
Once you know which operations you need, you can get a handle on them using the graph's `get_operation_by_name()` or `get_tensor_by_name()` methods:
```
X = tf.get_default_graph().get_tensor_by_name("X:0")
y = tf.get_default_graph().get_tensor_by_name("y:0")
accuracy = tf.get_default_graph().get_tensor_by_name("eval/accuracy:0")
training_op = tf.get_default_graph().get_operation_by_name("GradientDescent")
```
If you are the author of the original model, you could make things easier for people who will reuse your model by giving operations very clear names and documenting them. Another approach is to create a collection containing all the important operations that people will want to get a handle on:
```
for op in (X, y, accuracy, training_op):
tf.add_to_collection("my_important_ops", op)
```
This way people who reuse your model will be able to simply write:
```
X, y, accuracy, training_op = tf.get_collection("my_important_ops")
```
Now you can start a session, restore the model's state and continue training on your data:
```
with tf.Session() as sess:
saver.restore(sess, "./my_model_final.ckpt")
# continue training the model...
```
Actually, let's test this for real!
```
with tf.Session() as sess:
saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_new_model_final.ckpt")
```
Alternatively, if you have access to the Python code that built the original graph, you can use it instead of `import_meta_graph()`:
```
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
n_hidden2 = 50
n_hidden3 = 50
n_hidden4 = 50
n_hidden5 = 50
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, name="hidden3")
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4")
hidden5 = tf.layers.dense(hidden4, n_hidden5, activation=tf.nn.relu, name="hidden5")
logits = tf.layers.dense(hidden5, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
learning_rate = 0.01
threshold = 1.0
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(loss)
capped_gvs = [(tf.clip_by_value(grad, -threshold, threshold), var)
for grad, var in grads_and_vars]
training_op = optimizer.apply_gradients(capped_gvs)
saver = tf.train.Saver()
```
And continue training:
```
with tf.Session() as sess:
saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_new_model_final.ckpt")
```
In general you will want to reuse only the lower layers. If you are using `import_meta_graph()` it will load the whole graph, but you can simply ignore the parts you do not need. In this example, we add a new 4th hidden layer on top of the pretrained 3rd layer (ignoring the old 4th hidden layer). We also build a new output layer, the loss for this new output, and a new optimizer to minimize it. We also need another saver to save the whole graph (containing both the entire old graph plus the new operations), and an initialization operation to initialize all the new variables:
```
reset_graph()
n_hidden4 = 20 # new layer
n_outputs = 10 # new layer
saver = tf.train.import_meta_graph("./my_model_final.ckpt.meta")
X = tf.get_default_graph().get_tensor_by_name("X:0")
y = tf.get_default_graph().get_tensor_by_name("y:0")
hidden3 = tf.get_default_graph().get_tensor_by_name("dnn/hidden3/Relu:0")
new_hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="new_hidden4")
new_logits = tf.layers.dense(new_hidden4, n_outputs, name="new_outputs")
with tf.name_scope("new_loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=new_logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("new_eval"):
correct = tf.nn.in_top_k(new_logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("new_train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
new_saver = tf.train.Saver()
```
And we can train this new model:
```
with tf.Session() as sess:
init.run()
saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = new_saver.save(sess, "./my_new_model_final.ckpt")
```
If you have access to the Python code that built the original graph, you can just reuse the parts you need and drop the rest:
```
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300 # reused
n_hidden2 = 50 # reused
n_hidden3 = 50 # reused
n_hidden4 = 20 # new!
n_outputs = 10 # new!
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1") # reused
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2") # reused
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, name="hidden3") # reused
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4") # new!
logits = tf.layers.dense(hidden4, n_outputs, name="outputs") # new!
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
```
However, you must create one `Saver` to restore the pretrained model (giving it the list of variables to restore, or else it will complain that the graphs don't match), and another `Saver` to save the new model, once it is trained:
```
reuse_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope="hidden[123]") # regular expression
restore_saver = tf.train.Saver(reuse_vars) # to restore layers 1-3
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs): # not shown in the book
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size): # not shown
sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) # not shown
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid}) # not shown
print(epoch, "Validation accuracy:", accuracy_val) # not shown
save_path = saver.save(sess, "./my_new_model_final.ckpt")
```
## Reusing Models from Other Frameworks
In this example, for each variable we want to reuse, we find its initializer's assignment operation, and we get its second input, which corresponds to the initialization value. When we run the initializer, we replace the initialization values with the ones we want, using a `feed_dict`:
```
reset_graph()
n_inputs = 2
n_hidden1 = 3
original_w = [[1., 2., 3.], [4., 5., 6.]] # Load the weights from the other framework
original_b = [7., 8., 9.] # Load the biases from the other framework
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
# [...] Build the rest of the model
# Get a handle on the assignment nodes for the hidden1 variables
graph = tf.get_default_graph()
assign_kernel = graph.get_operation_by_name("hidden1/kernel/Assign")
assign_bias = graph.get_operation_by_name("hidden1/bias/Assign")
init_kernel = assign_kernel.inputs[1]
init_bias = assign_bias.inputs[1]
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init, feed_dict={init_kernel: original_w, init_bias: original_b})
# [...] Train the model on your new task
print(hidden1.eval(feed_dict={X: [[10.0, 11.0]]})) # not shown in the book
```
Note: the weights variable created by the `tf.layers.dense()` function is called `"kernel"` (instead of `"weights"` when using the `tf.contrib.layers.fully_connected()`, as in the book), and the biases variable is called `bias` instead of `biases`.
Another approach (initially used in the book) would be to create dedicated assignment nodes and dedicated placeholders. This is more verbose and less efficient, but you may find this more explicit:
```
reset_graph()
n_inputs = 2
n_hidden1 = 3
original_w = [[1., 2., 3.], [4., 5., 6.]] # Load the weights from the other framework
original_b = [7., 8., 9.] # Load the biases from the other framework
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
# [...] Build the rest of the model
# Get a handle on the variables of layer hidden1
with tf.variable_scope("", default_name="", reuse=True): # root scope
hidden1_weights = tf.get_variable("hidden1/kernel")
hidden1_biases = tf.get_variable("hidden1/bias")
# Create dedicated placeholders and assignment nodes
original_weights = tf.placeholder(tf.float32, shape=(n_inputs, n_hidden1))
original_biases = tf.placeholder(tf.float32, shape=n_hidden1)
assign_hidden1_weights = tf.assign(hidden1_weights, original_weights)
assign_hidden1_biases = tf.assign(hidden1_biases, original_biases)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
sess.run(assign_hidden1_weights, feed_dict={original_weights: original_w})
sess.run(assign_hidden1_biases, feed_dict={original_biases: original_b})
# [...] Train the model on your new task
print(hidden1.eval(feed_dict={X: [[10.0, 11.0]]}))
```
Note that we could also get a handle on the variables using `get_collection()` and specifying the `scope`:
```
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="hidden1")
```
Or we could use the graph's `get_tensor_by_name()` method:
```
tf.get_default_graph().get_tensor_by_name("hidden1/kernel:0")
tf.get_default_graph().get_tensor_by_name("hidden1/bias:0")
```
### Freezing the Lower Layers
```
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300 # reused
n_hidden2 = 50 # reused
n_hidden3 = 50 # reused
n_hidden4 = 20 # new!
n_outputs = 10 # new!
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1") # reused
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2") # reused
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, name="hidden3") # reused
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4") # new!
logits = tf.layers.dense(hidden4, n_outputs, name="outputs") # new!
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"): # not shown in the book
optimizer = tf.train.GradientDescentOptimizer(learning_rate) # not shown
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope="hidden[34]|outputs")
training_op = optimizer.minimize(loss, var_list=train_vars)
reuse_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope="hidden[123]") # regular expression
restore_saver = tf.train.Saver(reuse_vars) # to restore layers 1-3
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_new_model_final.ckpt")
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300 # reused
n_hidden2 = 50 # reused
n_hidden3 = 50 # reused
n_hidden4 = 20 # new!
n_outputs = 10 # new!
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu,
name="hidden1") # reused frozen
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu,
name="hidden2") # reused frozen
hidden2_stop = tf.stop_gradient(hidden2)
hidden3 = tf.layers.dense(hidden2_stop, n_hidden3, activation=tf.nn.relu,
name="hidden3") # reused, not frozen
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu,
name="hidden4") # new!
logits = tf.layers.dense(hidden4, n_outputs, name="outputs") # new!
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
```
The training code is exactly the same as earlier:
```
reuse_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope="hidden[123]") # regular expression
restore_saver = tf.train.Saver(reuse_vars) # to restore layers 1-3
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_new_model_final.ckpt")
```
### Caching the Frozen Layers
```
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300 # reused
n_hidden2 = 50 # reused
n_hidden3 = 50 # reused
n_hidden4 = 20 # new!
n_outputs = 10 # new!
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu,
name="hidden1") # reused frozen
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu,
name="hidden2") # reused frozen & cached
hidden2_stop = tf.stop_gradient(hidden2)
hidden3 = tf.layers.dense(hidden2_stop, n_hidden3, activation=tf.nn.relu,
name="hidden3") # reused, not frozen
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu,
name="hidden4") # new!
logits = tf.layers.dense(hidden4, n_outputs, name="outputs") # new!
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
reuse_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope="hidden[123]") # regular expression
restore_saver = tf.train.Saver(reuse_vars) # to restore layers 1-3
init = tf.global_variables_initializer()
saver = tf.train.Saver()
import numpy as np
n_batches = len(X_train) // batch_size
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_model_final.ckpt")
h2_cache = sess.run(hidden2, feed_dict={X: X_train})
h2_cache_valid = sess.run(hidden2, feed_dict={X: X_valid}) # not shown in the book
for epoch in range(n_epochs):
shuffled_idx = np.random.permutation(len(X_train))
hidden2_batches = np.array_split(h2_cache[shuffled_idx], n_batches)
y_batches = np.array_split(y_train[shuffled_idx], n_batches)
for hidden2_batch, y_batch in zip(hidden2_batches, y_batches):
sess.run(training_op, feed_dict={hidden2:hidden2_batch, y:y_batch})
accuracy_val = accuracy.eval(feed_dict={hidden2: h2_cache_valid, # not shown
y: y_valid}) # not shown
print(epoch, "Validation accuracy:", accuracy_val) # not shown
save_path = saver.save(sess, "./my_new_model_final.ckpt")
```
# Faster Optimizers
## Momentum optimization
```
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
momentum=0.9)
```
## Nesterov Accelerated Gradient
```
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
momentum=0.9, use_nesterov=True)
```
## AdaGrad
```
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
```
## RMSProp
```
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate,
momentum=0.9, decay=0.9, epsilon=1e-10)
```
## Adam Optimization
```
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
```
## Learning Rate Scheduling
```
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
n_hidden2 = 50
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"): # not shown in the book
initial_learning_rate = 0.1
decay_steps = 10000
decay_rate = 1/10
global_step = tf.Variable(0, trainable=False, name="global_step")
learning_rate = tf.train.exponential_decay(initial_learning_rate, global_step,
decay_steps, decay_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
training_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 5
batch_size = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
```
# Avoiding Overfitting Through Regularization
## $\ell_1$ and $\ell_2$ regularization
Let's implement $\ell_1$ regularization manually. First, we create the model, as usual (with just one hidden layer this time, for simplicity):
```
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
logits = tf.layers.dense(hidden1, n_outputs, name="outputs")
```
Next, we get a handle on the layer weights, and we compute the total loss, which is equal to the sum of the usual cross entropy loss and the $\ell_1$ loss (i.e., the absolute values of the weights):
```
W1 = tf.get_default_graph().get_tensor_by_name("hidden1/kernel:0")
W2 = tf.get_default_graph().get_tensor_by_name("outputs/kernel:0")
scale = 0.001 # l1 regularization hyperparameter
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
logits=logits)
base_loss = tf.reduce_mean(xentropy, name="avg_xentropy")
reg_losses = tf.reduce_sum(tf.abs(W1)) + tf.reduce_sum(tf.abs(W2))
loss = tf.add(base_loss, scale * reg_losses, name="loss")
```
The rest is just as usual:
```
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 20
batch_size = 200
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
```
Alternatively, we can pass a regularization function to the `tf.layers.dense()` function, which will use it to create operations that will compute the regularization loss, and it adds these operations to the collection of regularization losses. The beginning is the same as above:
```
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
n_hidden2 = 50
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
```
Next, we will use Python's `partial()` function to avoid repeating the same arguments over and over again. Note that we set the `kernel_regularizer` argument:
```
scale = 0.001
my_dense_layer = partial(
tf.layers.dense, activation=tf.nn.relu,
kernel_regularizer=tf.contrib.layers.l1_regularizer(scale))
with tf.name_scope("dnn"):
hidden1 = my_dense_layer(X, n_hidden1, name="hidden1")
hidden2 = my_dense_layer(hidden1, n_hidden2, name="hidden2")
logits = my_dense_layer(hidden2, n_outputs, activation=None,
name="outputs")
```
Next we must add the regularization losses to the base loss:
```
with tf.name_scope("loss"): # not shown in the book
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits( # not shown
labels=y, logits=logits) # not shown
base_loss = tf.reduce_mean(xentropy, name="avg_xentropy") # not shown
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([base_loss] + reg_losses, name="loss")
```
And the rest is the same as usual:
```
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 20
batch_size = 200
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
```
## Dropout
Note: the book uses `tf.contrib.layers.dropout()` rather than `tf.layers.dropout()` (which did not exist when this chapter was written). It is now preferable to use `tf.layers.dropout()`, because anything in the contrib module may change or be deleted without notice. The `tf.layers.dropout()` function is almost identical to the `tf.contrib.layers.dropout()` function, except for a few minor differences. Most importantly:
* you must specify the dropout rate (`rate`) rather than the keep probability (`keep_prob`), where `rate` is simply equal to `1 - keep_prob`,
* the `is_training` parameter is renamed to `training`.
```
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
training = tf.placeholder_with_default(False, shape=(), name='training')
dropout_rate = 0.5 # == 1 - keep_prob
X_drop = tf.layers.dropout(X, dropout_rate, training=training)
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X_drop, n_hidden1, activation=tf.nn.relu,
name="hidden1")
hidden1_drop = tf.layers.dropout(hidden1, dropout_rate, training=training)
hidden2 = tf.layers.dense(hidden1_drop, n_hidden2, activation=tf.nn.relu,
name="hidden2")
hidden2_drop = tf.layers.dropout(hidden2, dropout_rate, training=training)
logits = tf.layers.dense(hidden2_drop, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 20
batch_size = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch, training: True})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
```
## Max norm
Let's go back to a plain and simple neural net for MNIST with just 2 hidden layers:
```
reset_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 50
n_outputs = 10
learning_rate = 0.01
momentum = 0.9
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
```
Next, let's get a handle on the first hidden layer's weight and create an operation that will compute the clipped weights using the `clip_by_norm()` function. Then we create an assignment operation to assign the clipped weights to the weights variable:
```
threshold = 1.0
weights = tf.get_default_graph().get_tensor_by_name("hidden1/kernel:0")
clipped_weights = tf.clip_by_norm(weights, clip_norm=threshold, axes=1)
clip_weights = tf.assign(weights, clipped_weights)
```
We can do this as well for the second hidden layer:
```
weights2 = tf.get_default_graph().get_tensor_by_name("hidden2/kernel:0")
clipped_weights2 = tf.clip_by_norm(weights2, clip_norm=threshold, axes=1)
clip_weights2 = tf.assign(weights2, clipped_weights2)
```
Let's add an initializer and a saver:
```
init = tf.global_variables_initializer()
saver = tf.train.Saver()
```
And now we can train the model. It's pretty much as usual, except that right after running the `training_op`, we run the `clip_weights` and `clip_weights2` operations:
```
n_epochs = 20
batch_size = 50
with tf.Session() as sess: # not shown in the book
init.run() # not shown
for epoch in range(n_epochs): # not shown
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size): # not shown
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
clip_weights.eval()
clip_weights2.eval() # not shown
acc_valid = accuracy.eval(feed_dict={X: X_valid, y: y_valid}) # not shown
print(epoch, "Validation accuracy:", acc_valid) # not shown
save_path = saver.save(sess, "./my_model_final.ckpt") # not shown
```
The implementation above is straightforward and it works fine, but it is a bit messy. A better approach is to define a `max_norm_regularizer()` function:
```
def max_norm_regularizer(threshold, axes=1, name="max_norm",
collection="max_norm"):
def max_norm(weights):
clipped = tf.clip_by_norm(weights, clip_norm=threshold, axes=axes)
clip_weights = tf.assign(weights, clipped, name=name)
tf.add_to_collection(collection, clip_weights)
return None # there is no regularization loss term
return max_norm
```
Then you can call this function to get a max norm regularizer (with the threshold you want). When you create a hidden layer, you can pass this regularizer to the `kernel_regularizer` argument:
```
reset_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 50
n_outputs = 10
learning_rate = 0.01
momentum = 0.9
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
max_norm_reg = max_norm_regularizer(threshold=1.0)
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu,
kernel_regularizer=max_norm_reg, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu,
kernel_regularizer=max_norm_reg, name="hidden2")
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
```
Training is as usual, except you must run the weights clipping operations after each training operation:
```
n_epochs = 20
batch_size = 50
clip_all_weights = tf.get_collection("max_norm")
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
sess.run(clip_all_weights)
acc_valid = accuracy.eval(feed_dict={X: X_valid, y: y_valid}) # not shown
print(epoch, "Validation accuracy:", acc_valid) # not shown
save_path = saver.save(sess, "./my_model_final.ckpt") # not shown
```
# Exercise solutions
## 1. to 7.
See appendix A.
## 8. Deep Learning
### 8.1.
_Exercise: Build a DNN with five hidden layers of 100 neurons each, He initialization, and the ELU activation function._
We will need similar DNNs in the next exercises, so let's create a function to build this DNN:
```
he_init = tf.variance_scaling_initializer()
def dnn(inputs, n_hidden_layers=5, n_neurons=100, name=None,
activation=tf.nn.elu, initializer=he_init):
with tf.variable_scope(name, "dnn"):
for layer in range(n_hidden_layers):
inputs = tf.layers.dense(inputs, n_neurons, activation=activation,
kernel_initializer=initializer,
name="hidden%d" % (layer + 1))
return inputs
n_inputs = 28 * 28 # MNIST
n_outputs = 5
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
dnn_outputs = dnn(X)
logits = tf.layers.dense(dnn_outputs, n_outputs, kernel_initializer=he_init, name="logits")
Y_proba = tf.nn.softmax(logits, name="Y_proba")
```
### 8.2.
_Exercise: Using Adam optimization and early stopping, try training it on MNIST but only on digits 0 to 4, as we will use transfer learning for digits 5 to 9 in the next exercise. You will need a softmax output layer with five neurons, and as always make sure to save checkpoints at regular intervals and save the final model so you can reuse it later._
Let's complete the graph with the cost function, the training op, and all the other usual components:
```
learning_rate = 0.01
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss, name="training_op")
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
init = tf.global_variables_initializer()
saver = tf.train.Saver()
```
Now let's create the training set, validation and test set (we need the validation set to implement early stopping):
```
X_train1 = X_train[y_train < 5]
y_train1 = y_train[y_train < 5]
X_valid1 = X_valid[y_valid < 5]
y_valid1 = y_valid[y_valid < 5]
X_test1 = X_test[y_test < 5]
y_test1 = y_test[y_test < 5]
n_epochs = 1000
batch_size = 20
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train1))
for rnd_indices in np.array_split(rnd_idx, len(X_train1) // batch_size):
X_batch, y_batch = X_train1[rnd_indices], y_train1[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid1, y: y_valid1})
if loss_val < best_loss:
save_path = saver.save(sess, "./my_mnist_model_0_to_4.ckpt")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
with tf.Session() as sess:
saver.restore(sess, "./my_mnist_model_0_to_4.ckpt")
acc_test = accuracy.eval(feed_dict={X: X_test1, y: y_test1})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
```
This test accuracy is not too bad, but let's see if we can do better by tuning the hyperparameters.
### 8.3.
_Exercise: Tune the hyperparameters using cross-validation and see what precision you can achieve._
Let's create a `DNNClassifier` class, compatible with Scikit-Learn's `RandomizedSearchCV` class, to perform hyperparameter tuning. Here are the key points of this implementation:
* the `__init__()` method (constructor) does nothing more than create instance variables for each of the hyperparameters.
* the `fit()` method creates the graph, starts a session and trains the model:
* it calls the `_build_graph()` method to build the graph (much lile the graph we defined earlier). Once this method is done creating the graph, it saves all the important operations as instance variables for easy access by other methods.
* the `_dnn()` method builds the hidden layers, just like the `dnn()` function above, but also with support for batch normalization and dropout (for the next exercises).
* if the `fit()` method is given a validation set (`X_valid` and `y_valid`), then it implements early stopping. This implementation does not save the best model to disk, but rather to memory: it uses the `_get_model_params()` method to get all the graph's variables and their values, and the `_restore_model_params()` method to restore the variable values (of the best model found). This trick helps speed up training.
* After the `fit()` method has finished training the model, it keeps the session open so that predictions can be made quickly, without having to save a model to disk and restore it for every prediction. You can close the session by calling the `close_session()` method.
* the `predict_proba()` method uses the trained model to predict the class probabilities.
* the `predict()` method calls `predict_proba()` and returns the class with the highest probability, for each instance.
```
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.exceptions import NotFittedError
class DNNClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, n_hidden_layers=5, n_neurons=100, optimizer_class=tf.train.AdamOptimizer,
learning_rate=0.01, batch_size=20, activation=tf.nn.elu, initializer=he_init,
batch_norm_momentum=None, dropout_rate=None, random_state=None):
"""Initialize the DNNClassifier by simply storing all the hyperparameters."""
self.n_hidden_layers = n_hidden_layers
self.n_neurons = n_neurons
self.optimizer_class = optimizer_class
self.learning_rate = learning_rate
self.batch_size = batch_size
self.activation = activation
self.initializer = initializer
self.batch_norm_momentum = batch_norm_momentum
self.dropout_rate = dropout_rate
self.random_state = random_state
self._session = None
def _dnn(self, inputs):
"""Build the hidden layers, with support for batch normalization and dropout."""
for layer in range(self.n_hidden_layers):
if self.dropout_rate:
inputs = tf.layers.dropout(inputs, self.dropout_rate, training=self._training)
inputs = tf.layers.dense(inputs, self.n_neurons,
kernel_initializer=self.initializer,
name="hidden%d" % (layer + 1))
if self.batch_norm_momentum:
inputs = tf.layers.batch_normalization(inputs, momentum=self.batch_norm_momentum,
training=self._training)
inputs = self.activation(inputs, name="hidden%d_out" % (layer + 1))
return inputs
def _build_graph(self, n_inputs, n_outputs):
"""Build the same model as earlier"""
if self.random_state is not None:
tf.set_random_seed(self.random_state)
np.random.seed(self.random_state)
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
if self.batch_norm_momentum or self.dropout_rate:
self._training = tf.placeholder_with_default(False, shape=(), name='training')
else:
self._training = None
dnn_outputs = self._dnn(X)
logits = tf.layers.dense(dnn_outputs, n_outputs, kernel_initializer=he_init, name="logits")
Y_proba = tf.nn.softmax(logits, name="Y_proba")
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
optimizer = self.optimizer_class(learning_rate=self.learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# Make the important operations available easily through instance variables
self._X, self._y = X, y
self._Y_proba, self._loss = Y_proba, loss
self._training_op, self._accuracy = training_op, accuracy
self._init, self._saver = init, saver
def close_session(self):
if self._session:
self._session.close()
def _get_model_params(self):
"""Get all variable values (used for early stopping, faster than saving to disk)"""
with self._graph.as_default():
gvars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
return {gvar.op.name: value for gvar, value in zip(gvars, self._session.run(gvars))}
def _restore_model_params(self, model_params):
"""Set all variables to the given values (for early stopping, faster than loading from disk)"""
gvar_names = list(model_params.keys())
assign_ops = {gvar_name: self._graph.get_operation_by_name(gvar_name + "/Assign")
for gvar_name in gvar_names}
init_values = {gvar_name: assign_op.inputs[1] for gvar_name, assign_op in assign_ops.items()}
feed_dict = {init_values[gvar_name]: model_params[gvar_name] for gvar_name in gvar_names}
self._session.run(assign_ops, feed_dict=feed_dict)
def fit(self, X, y, n_epochs=100, X_valid=None, y_valid=None):
"""Fit the model to the training set. If X_valid and y_valid are provided, use early stopping."""
self.close_session()
# infer n_inputs and n_outputs from the training set.
n_inputs = X.shape[1]
self.classes_ = np.unique(y)
n_outputs = len(self.classes_)
# Translate the labels vector to a vector of sorted class indices, containing
# integers from 0 to n_outputs - 1.
# For example, if y is equal to [8, 8, 9, 5, 7, 6, 6, 6], then the sorted class
# labels (self.classes_) will be equal to [5, 6, 7, 8, 9], and the labels vector
# will be translated to [3, 3, 4, 0, 2, 1, 1, 1]
self.class_to_index_ = {label: index
for index, label in enumerate(self.classes_)}
y = np.array([self.class_to_index_[label]
for label in y], dtype=np.int32)
self._graph = tf.Graph()
with self._graph.as_default():
self._build_graph(n_inputs, n_outputs)
# extra ops for batch normalization
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# needed in case of early stopping
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
best_params = None
# Now train the model!
self._session = tf.Session(graph=self._graph)
with self._session.as_default() as sess:
self._init.run()
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X))
for rnd_indices in np.array_split(rnd_idx, len(X) // self.batch_size):
X_batch, y_batch = X[rnd_indices], y[rnd_indices]
feed_dict = {self._X: X_batch, self._y: y_batch}
if self._training is not None:
feed_dict[self._training] = True
sess.run(self._training_op, feed_dict=feed_dict)
if extra_update_ops:
sess.run(extra_update_ops, feed_dict=feed_dict)
if X_valid is not None and y_valid is not None:
loss_val, acc_val = sess.run([self._loss, self._accuracy],
feed_dict={self._X: X_valid,
self._y: y_valid})
if loss_val < best_loss:
best_params = self._get_model_params()
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
else:
loss_train, acc_train = sess.run([self._loss, self._accuracy],
feed_dict={self._X: X_batch,
self._y: y_batch})
print("{}\tLast training batch loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_train, acc_train * 100))
# If we used early stopping then rollback to the best model found
if best_params:
self._restore_model_params(best_params)
return self
def predict_proba(self, X):
if not self._session:
raise NotFittedError("This %s instance is not fitted yet" % self.__class__.__name__)
with self._session.as_default() as sess:
return self._Y_proba.eval(feed_dict={self._X: X})
def predict(self, X):
class_indices = np.argmax(self.predict_proba(X), axis=1)
return np.array([[self.classes_[class_index]]
for class_index in class_indices], np.int32)
def save(self, path):
self._saver.save(self._session, path)
```
Let's see if we get the exact same accuracy as earlier using this class (without dropout or batch norm):
```
dnn_clf = DNNClassifier(random_state=42)
dnn_clf.fit(X_train1, y_train1, n_epochs=1000, X_valid=X_valid1, y_valid=y_valid1)
```
The model is trained, let's see if it gets the same accuracy as earlier:
```
from sklearn.metrics import accuracy_score
y_pred = dnn_clf.predict(X_test1)
accuracy_score(y_test1, y_pred)
```
Yep! Working fine. Now we can use Scikit-Learn's `RandomizedSearchCV` class to search for better hyperparameters (this may take over an hour, depending on your system):
```
from sklearn.model_selection import RandomizedSearchCV
def leaky_relu(alpha=0.01):
def parametrized_leaky_relu(z, name=None):
return tf.maximum(alpha * z, z, name=name)
return parametrized_leaky_relu
param_distribs = {
"n_neurons": [10, 30, 50, 70, 90, 100, 120, 140, 160],
"batch_size": [10, 50, 100, 500],
"learning_rate": [0.01, 0.02, 0.05, 0.1],
"activation": [tf.nn.relu, tf.nn.elu, leaky_relu(alpha=0.01), leaky_relu(alpha=0.1)],
# you could also try exploring different numbers of hidden layers, different optimizers, etc.
#"n_hidden_layers": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
#"optimizer_class": [tf.train.AdamOptimizer, partial(tf.train.MomentumOptimizer, momentum=0.95)],
}
rnd_search = RandomizedSearchCV(DNNClassifier(random_state=42), param_distribs, n_iter=50,
cv=3, random_state=42, verbose=2)
rnd_search.fit(X_train1, y_train1, X_valid=X_valid1, y_valid=y_valid1, n_epochs=1000)
# If you have Scikit-Learn 0.18 or earlier, you should upgrade, or use the fit_params argument:
# fit_params = dict(X_valid=X_valid1, y_valid=y_valid1, n_epochs=1000)
# rnd_search = RandomizedSearchCV(DNNClassifier(random_state=42), param_distribs, n_iter=50,
# fit_params=fit_params, random_state=42, verbose=2)
# rnd_search.fit(X_train1, y_train1)
rnd_search.best_params_
y_pred = rnd_search.predict(X_test1)
accuracy_score(y_test1, y_pred)
```
Wonderful! Tuning the hyperparameters got us up to 98.91% accuracy! It may not sound like a great improvement to go from 97.26% to 98.91% accuracy, but consider the error rate: it went from roughly 2.6% to 1.1%. That's almost 60% reduction of the number of errors this model will produce!
It's a good idea to save this model:
```
rnd_search.best_estimator_.save("./my_best_mnist_model_0_to_4")
```
### 8.4.
_Exercise: Now try adding Batch Normalization and compare the learning curves: is it converging faster than before? Does it produce a better model?_
Let's train the best model found, once again, to see how fast it converges (alternatively, you could tweak the code above to make it write summaries for TensorBoard, so you can visualize the learning curve):
```
dnn_clf = DNNClassifier(activation=leaky_relu(alpha=0.1), batch_size=500, learning_rate=0.01,
n_neurons=140, random_state=42)
dnn_clf.fit(X_train1, y_train1, n_epochs=1000, X_valid=X_valid1, y_valid=y_valid1)
```
The best loss is reached at epoch 5.
Let's check that we do indeed get 98.9% accuracy on the test set:
```
y_pred = dnn_clf.predict(X_test1)
accuracy_score(y_test1, y_pred)
```
Good, now let's use the exact same model, but this time with batch normalization:
```
dnn_clf_bn = DNNClassifier(activation=leaky_relu(alpha=0.1), batch_size=500, learning_rate=0.01,
n_neurons=90, random_state=42,
batch_norm_momentum=0.95)
dnn_clf_bn.fit(X_train1, y_train1, n_epochs=1000, X_valid=X_valid1, y_valid=y_valid1)
```
The best params are reached during epoch 20, that's actually a slower convergence than earlier. Let's check the accuracy:
```
y_pred = dnn_clf_bn.predict(X_test1)
accuracy_score(y_test1, y_pred)
```
Great, batch normalization improved accuracy! Let's see if we can find a good set of hyperparameters that will work even better with batch normalization:
```
from sklearn.model_selection import RandomizedSearchCV
param_distribs = {
"n_neurons": [10, 30, 50, 70, 90, 100, 120, 140, 160],
"batch_size": [10, 50, 100, 500],
"learning_rate": [0.01, 0.02, 0.05, 0.1],
"activation": [tf.nn.relu, tf.nn.elu, leaky_relu(alpha=0.01), leaky_relu(alpha=0.1)],
# you could also try exploring different numbers of hidden layers, different optimizers, etc.
#"n_hidden_layers": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
#"optimizer_class": [tf.train.AdamOptimizer, partial(tf.train.MomentumOptimizer, momentum=0.95)],
"batch_norm_momentum": [0.9, 0.95, 0.98, 0.99, 0.999],
}
rnd_search_bn = RandomizedSearchCV(DNNClassifier(random_state=42), param_distribs, n_iter=50, cv=3,
random_state=42, verbose=2)
rnd_search_bn.fit(X_train1, y_train1, X_valid=X_valid1, y_valid=y_valid1, n_epochs=1000)
# If you have Scikit-Learn 0.18 or earlier, you should upgrade, or use the fit_params argument:
# fit_params = dict(X_valid=X_valid1, y_valid=y_valid1, n_epochs=1000)
# rnd_search_bn = RandomizedSearchCV(DNNClassifier(random_state=42), param_distribs, n_iter=50,
# fit_params=fit_params, random_state=42, verbose=2)
# rnd_search_bn.fit(X_train1, y_train1)
rnd_search_bn.best_params_
y_pred = rnd_search_bn.predict(X_test1)
accuracy_score(y_test1, y_pred)
```
Slightly better than earlier: 99.49% vs 99.42%. Let's see if dropout can do better.
### 8.5.
_Exercise: is the model overfitting the training set? Try adding dropout to every layer and try again. Does it help?_
Let's go back to the model we trained earlier and see how it performs on the training set:
```
y_pred = dnn_clf.predict(X_train1)
accuracy_score(y_train1, y_pred)
```
The model performs significantly better on the training set than on the test set (99.51% vs 99.00%), which means it is overfitting the training set. A bit of regularization may help. Let's try adding dropout with a 50% dropout rate:
```
dnn_clf_dropout = DNNClassifier(activation=leaky_relu(alpha=0.1), batch_size=500, learning_rate=0.01,
n_neurons=90, random_state=42,
dropout_rate=0.5)
dnn_clf_dropout.fit(X_train1, y_train1, n_epochs=1000, X_valid=X_valid1, y_valid=y_valid1)
```
The best params are reached during epoch 17. Dropout somewhat slowed down convergence.
Let's check the accuracy:
```
y_pred = dnn_clf_dropout.predict(X_test1)
accuracy_score(y_test1, y_pred)
```
We are out of luck, dropout does not seem to help. Let's try tuning the hyperparameters, perhaps we can squeeze a bit more performance out of this model:
```
from sklearn.model_selection import RandomizedSearchCV
param_distribs = {
"n_neurons": [10, 30, 50, 70, 90, 100, 120, 140, 160],
"batch_size": [10, 50, 100, 500],
"learning_rate": [0.01, 0.02, 0.05, 0.1],
"activation": [tf.nn.relu, tf.nn.elu, leaky_relu(alpha=0.01), leaky_relu(alpha=0.1)],
# you could also try exploring different numbers of hidden layers, different optimizers, etc.
#"n_hidden_layers": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
#"optimizer_class": [tf.train.AdamOptimizer, partial(tf.train.MomentumOptimizer, momentum=0.95)],
"dropout_rate": [0.2, 0.3, 0.4, 0.5, 0.6],
}
rnd_search_dropout = RandomizedSearchCV(DNNClassifier(random_state=42), param_distribs, n_iter=50,
cv=3, random_state=42, verbose=2)
rnd_search_dropout.fit(X_train1, y_train1, X_valid=X_valid1, y_valid=y_valid1, n_epochs=1000)
# If you have Scikit-Learn 0.18 or earlier, you should upgrade, or use the fit_params argument:
# fit_params = dict(X_valid=X_valid1, y_valid=y_valid1, n_epochs=1000)
# rnd_search_dropout = RandomizedSearchCV(DNNClassifier(random_state=42), param_distribs, n_iter=50,
# fit_params=fit_params, random_state=42, verbose=2)
# rnd_search_dropout.fit(X_train1, y_train1)
rnd_search_dropout.best_params_
y_pred = rnd_search_dropout.predict(X_test1)
accuracy_score(y_test1, y_pred)
```
Oh well, dropout did not improve the model. Better luck next time! :)
But that's okay, we have ourselves a nice DNN that achieves 99.49% accuracy on the test set using Batch Normalization, or 98.91% without BN. Let's see if some of this expertise on digits 0 to 4 can be transferred to the task of classifying digits 5 to 9. For the sake of simplicity we will reuse the DNN without BN.
## 9. Transfer learning
### 9.1.
_Exercise: create a new DNN that reuses all the pretrained hidden layers of the previous model, freezes them, and replaces the softmax output layer with a new one._
Let's load the best model's graph and get a handle on all the important operations we will need. Note that instead of creating a new softmax output layer, we will just reuse the existing one (since it has the same number of outputs as the existing one). We will reinitialize its parameters before training.
```
reset_graph()
restore_saver = tf.train.import_meta_graph("./my_best_mnist_model_0_to_4.meta")
X = tf.get_default_graph().get_tensor_by_name("X:0")
y = tf.get_default_graph().get_tensor_by_name("y:0")
loss = tf.get_default_graph().get_tensor_by_name("loss:0")
Y_proba = tf.get_default_graph().get_tensor_by_name("Y_proba:0")
logits = Y_proba.op.inputs[0]
accuracy = tf.get_default_graph().get_tensor_by_name("accuracy:0")
```
To freeze the lower layers, we will exclude their variables from the optimizer's list of trainable variables, keeping only the output layer's trainable variables:
```
learning_rate = 0.01
output_layer_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="logits")
optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam2")
training_op = optimizer.minimize(loss, var_list=output_layer_vars)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
init = tf.global_variables_initializer()
five_frozen_saver = tf.train.Saver()
```
### 9.2.
_Exercise: train this new DNN on digits 5 to 9, using only 100 images per digit, and time how long it takes. Despite this small number of examples, can you achieve high precision?_
Let's create the training, validation and test sets. We need to subtract 5 from the labels because TensorFlow expects integers from 0 to `n_classes-1`.
```
X_train2_full = X_train[y_train >= 5]
y_train2_full = y_train[y_train >= 5] - 5
X_valid2_full = X_valid[y_valid >= 5]
y_valid2_full = y_valid[y_valid >= 5] - 5
X_test2 = X_test[y_test >= 5]
y_test2 = y_test[y_test >= 5] - 5
```
Also, for the purpose of this exercise, we want to keep only 100 instances per class in the training set (and let's keep only 30 instances per class in the validation set). Let's create a small function to do that:
```
def sample_n_instances_per_class(X, y, n=100):
Xs, ys = [], []
for label in np.unique(y):
idx = (y == label)
Xc = X[idx][:n]
yc = y[idx][:n]
Xs.append(Xc)
ys.append(yc)
return np.concatenate(Xs), np.concatenate(ys)
X_train2, y_train2 = sample_n_instances_per_class(X_train2_full, y_train2_full, n=100)
X_valid2, y_valid2 = sample_n_instances_per_class(X_valid2_full, y_valid2_full, n=30)
```
Now let's train the model. This is the same training code as earlier, using early stopping, except for the initialization: we first initialize all the variables, then we restore the best model trained earlier (on digits 0 to 4), and finally we reinitialize the output layer variables.
```
import time
n_epochs = 1000
batch_size = 20
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_best_mnist_model_0_to_4")
t0 = time.time()
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
X_batch, y_batch = X_train2[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid2, y: y_valid2})
if loss_val < best_loss:
save_path = five_frozen_saver.save(sess, "./my_mnist_model_5_to_9_five_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
t1 = time.time()
print("Total training time: {:.1f}s".format(t1 - t0))
with tf.Session() as sess:
five_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_five_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
```
Well that's not a great accuracy, is it? Of course with such a tiny training set, and with only one layer to tweak, we should not expect miracles.
### 9.3.
_Exercise: try caching the frozen layers, and train the model again: how much faster is it now?_
Let's start by getting a handle on the output of the last frozen layer:
```
hidden5_out = tf.get_default_graph().get_tensor_by_name("hidden5_out:0")
```
Now let's train the model using roughly the same code as earlier. The difference is that we compute the output of the top frozen layer at the beginning (both for the training set and the validation set), and we cache it. This makes training roughly 1.5 to 3 times faster in this example (this may vary greatly, depending on your system):
```
import time
n_epochs = 1000
batch_size = 20
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_best_mnist_model_0_to_4")
t0 = time.time()
hidden5_train = hidden5_out.eval(feed_dict={X: X_train2, y: y_train2})
hidden5_valid = hidden5_out.eval(feed_dict={X: X_valid2, y: y_valid2})
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
h5_batch, y_batch = hidden5_train[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={hidden5_out: h5_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={hidden5_out: hidden5_valid, y: y_valid2})
if loss_val < best_loss:
save_path = five_frozen_saver.save(sess, "./my_mnist_model_5_to_9_five_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
t1 = time.time()
print("Total training time: {:.1f}s".format(t1 - t0))
with tf.Session() as sess:
five_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_five_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
```
### 9.4.
_Exercise: try again reusing just four hidden layers instead of five. Can you achieve a higher precision?_
Let's load the best model again, but this time we will create a new softmax output layer on top of the 4th hidden layer:
```
reset_graph()
n_outputs = 5
restore_saver = tf.train.import_meta_graph("./my_best_mnist_model_0_to_4.meta")
X = tf.get_default_graph().get_tensor_by_name("X:0")
y = tf.get_default_graph().get_tensor_by_name("y:0")
hidden4_out = tf.get_default_graph().get_tensor_by_name("hidden4_out:0")
logits = tf.layers.dense(hidden4_out, n_outputs, kernel_initializer=he_init, name="new_logits")
Y_proba = tf.nn.softmax(logits)
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
```
And now let's create the training operation. We want to freeze all the layers except for the new output layer:
```
learning_rate = 0.01
output_layer_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="new_logits")
optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam2")
training_op = optimizer.minimize(loss, var_list=output_layer_vars)
init = tf.global_variables_initializer()
four_frozen_saver = tf.train.Saver()
```
And once again we train the model with the same code as earlier. Note: we could of course write a function once and use it multiple times, rather than copying almost the same training code over and over again, but as we keep tweaking the code slightly, the function would need multiple arguments and `if` statements, and it would have to be at the beginning of the notebook, where it would not make much sense to readers. In short it would be very confusing, so we're better off with copy & paste.
```
n_epochs = 1000
batch_size = 20
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_best_mnist_model_0_to_4")
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
X_batch, y_batch = X_train2[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid2, y: y_valid2})
if loss_val < best_loss:
save_path = four_frozen_saver.save(sess, "./my_mnist_model_5_to_9_four_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
with tf.Session() as sess:
four_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_four_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
```
Still not fantastic, but much better.
### 9.5.
_Exercise: now unfreeze the top two hidden layers and continue training: can you get the model to perform even better?_
```
learning_rate = 0.01
unfrozen_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="hidden[34]|new_logits")
optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam3")
training_op = optimizer.minimize(loss, var_list=unfrozen_vars)
init = tf.global_variables_initializer()
two_frozen_saver = tf.train.Saver()
n_epochs = 1000
batch_size = 20
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
four_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_four_frozen")
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
X_batch, y_batch = X_train2[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid2, y: y_valid2})
if loss_val < best_loss:
save_path = two_frozen_saver.save(sess, "./my_mnist_model_5_to_9_two_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
with tf.Session() as sess:
two_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_two_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
```
Let's check what accuracy we can get by unfreezing all layers:
```
learning_rate = 0.01
optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam4")
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
no_frozen_saver = tf.train.Saver()
n_epochs = 1000
batch_size = 20
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
two_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_two_frozen")
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
X_batch, y_batch = X_train2[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid2, y: y_valid2})
if loss_val < best_loss:
save_path = no_frozen_saver.save(sess, "./my_mnist_model_5_to_9_no_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
with tf.Session() as sess:
no_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_no_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
```
Let's compare that to a DNN trained from scratch:
```
dnn_clf_5_to_9 = DNNClassifier(n_hidden_layers=4, random_state=42)
dnn_clf_5_to_9.fit(X_train2, y_train2, n_epochs=1000, X_valid=X_valid2, y_valid=y_valid2)
y_pred = dnn_clf_5_to_9.predict(X_test2)
accuracy_score(y_test2, y_pred)
```
Transfer learning allowed us to go from 84.8% accuracy to 91.3%. Not too bad!
## 10. Pretraining on an auxiliary task
In this exercise you will build a DNN that compares two MNIST digit images and predicts whether they represent the same digit or not. Then you will reuse the lower layers of this network to train an MNIST classifier using very little training data.
### 10.1.
Exercise: _Start by building two DNNs (let's call them DNN A and B), both similar to the one you built earlier but without the output layer: each DNN should have five hidden layers of 100 neurons each, He initialization, and ELU activation. Next, add one more hidden layer with 10 units on top of both DNNs. You should use TensorFlow's `concat()` function with `axis=1` to concatenate the outputs of both DNNs along the horizontal axis, then feed the result to the hidden layer. Finally, add an output layer with a single neuron using the logistic activation function._
**Warning**! There was an error in the book for this exercise: there was no instruction to add a top hidden layer. Without it, the neural network generally fails to start learning. If you have the latest version of the book, this error has been fixed.
You could have two input placeholders, `X1` and `X2`, one for the images that should be fed to the first DNN, and the other for the images that should be fed to the second DNN. It would work fine. However, another option is to have a single input placeholder to hold both sets of images (each row will hold a pair of images), and use `tf.unstack()` to split this tensor into two separate tensors, like this:
```
n_inputs = 28 * 28 # MNIST
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, 2, n_inputs), name="X")
X1, X2 = tf.unstack(X, axis=1)
```
We also need the labels placeholder. Each label will be 0 if the images represent different digits, or 1 if they represent the same digit:
```
y = tf.placeholder(tf.int32, shape=[None, 1])
```
Now let's feed these inputs through two separate DNNs:
```
dnn1 = dnn(X1, name="DNN_A")
dnn2 = dnn(X2, name="DNN_B")
```
And let's concatenate their outputs:
```
dnn_outputs = tf.concat([dnn1, dnn2], axis=1)
```
Each DNN outputs 100 activations (per instance), so the shape is `[None, 100]`:
```
dnn1.shape
dnn2.shape
```
And of course the concatenated outputs have a shape of `[None, 200]`:
```
dnn_outputs.shape
```
Now lets add an extra hidden layer with just 10 neurons, and the output layer, with a single neuron:
```
hidden = tf.layers.dense(dnn_outputs, units=10, activation=tf.nn.elu, kernel_initializer=he_init)
logits = tf.layers.dense(hidden, units=1, kernel_initializer=he_init)
y_proba = tf.nn.sigmoid(logits)
```
The whole network predicts `1` if `y_proba >= 0.5` (i.e. the network predicts that the images represent the same digit), or `0` otherwise. We compute instead `logits >= 0`, which is equivalent but faster to compute:
```
y_pred = tf.cast(tf.greater_equal(logits, 0), tf.int32)
```
Now let's add the cost function:
```
y_as_float = tf.cast(y, tf.float32)
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_as_float, logits=logits)
loss = tf.reduce_mean(xentropy)
```
And we can now create the training operation using an optimizer:
```
learning_rate = 0.01
momentum = 0.95
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss)
```
We will want to measure our classifier's accuracy.
```
y_pred_correct = tf.equal(y_pred, y)
accuracy = tf.reduce_mean(tf.cast(y_pred_correct, tf.float32))
```
And the usual `init` and `saver`:
```
init = tf.global_variables_initializer()
saver = tf.train.Saver()
```
### 10.2.
_Exercise: split the MNIST training set in two sets: split #1 should containing 55,000 images, and split #2 should contain contain 5,000 images. Create a function that generates a training batch where each instance is a pair of MNIST images picked from split #1. Half of the training instances should be pairs of images that belong to the same class, while the other half should be images from different classes. For each pair, the training label should be 0 if the images are from the same class, or 1 if they are from different classes._
The MNIST dataset returned by TensorFlow's `input_data()` function is already split into 3 parts: a training set (55,000 instances), a validation set (5,000 instances) and a test set (10,000 instances). Let's use the first set to generate the training set composed image pairs, and we will use the second set for the second phase of the exercise (to train a regular MNIST classifier). We will use the third set as the test set for both phases.
```
X_train1 = X_train
y_train1 = y_train
X_train2 = X_valid
y_train2 = y_valid
X_test = X_test
y_test = y_test
```
Let's write a function that generates pairs of images: 50% representing the same digit, and 50% representing different digits. There are many ways to implement this. In this implementation, we first decide how many "same" pairs (i.e. pairs of images representing the same digit) we will generate, and how many "different" pairs (i.e. pairs of images representing different digits). We could just use `batch_size // 2` but we want to handle the case where it is odd (granted, that might be overkill!). Then we generate random pairs and we pick the right number of "same" pairs, then we generate the right number of "different" pairs. Finally we shuffle the batch and return it:
```
def generate_batch(images, labels, batch_size):
size1 = batch_size // 2
size2 = batch_size - size1
if size1 != size2 and np.random.rand() > 0.5:
size1, size2 = size2, size1
X = []
y = []
while len(X) < size1:
rnd_idx1, rnd_idx2 = np.random.randint(0, len(images), 2)
if rnd_idx1 != rnd_idx2 and labels[rnd_idx1] == labels[rnd_idx2]:
X.append(np.array([images[rnd_idx1], images[rnd_idx2]]))
y.append([1])
while len(X) < batch_size:
rnd_idx1, rnd_idx2 = np.random.randint(0, len(images), 2)
if labels[rnd_idx1] != labels[rnd_idx2]:
X.append(np.array([images[rnd_idx1], images[rnd_idx2]]))
y.append([0])
rnd_indices = np.random.permutation(batch_size)
return np.array(X)[rnd_indices], np.array(y)[rnd_indices]
```
Let's test it to generate a small batch of 5 image pairs:
```
batch_size = 5
X_batch, y_batch = generate_batch(X_train1, y_train1, batch_size)
```
Each row in `X_batch` contains a pair of images:
```
X_batch.shape, X_batch.dtype
```
Let's look at these pairs:
```
plt.figure(figsize=(3, 3 * batch_size))
plt.subplot(121)
plt.imshow(X_batch[:,0].reshape(28 * batch_size, 28), cmap="binary", interpolation="nearest")
plt.axis('off')
plt.subplot(122)
plt.imshow(X_batch[:,1].reshape(28 * batch_size, 28), cmap="binary", interpolation="nearest")
plt.axis('off')
plt.show()
```
And let's look at the labels (0 means "different", 1 means "same"):
```
y_batch
```
Perfect!
### 10.3.
_Exercise: train the DNN on this training set. For each image pair, you can simultaneously feed the first image to DNN A and the second image to DNN B. The whole network will gradually learn to tell whether two images belong to the same class or not._
Let's generate a test set composed of many pairs of images pulled from the MNIST test set:
```
X_test1, y_test1 = generate_batch(X_test, y_test, batch_size=len(X_test))
```
And now, let's train the model. There's really nothing special about this step, except for the fact that we need a fairly large `batch_size`, otherwise the model fails to learn anything and ends up with an accuracy of 50%:
```
n_epochs = 100
batch_size = 500
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(len(X_train1) // batch_size):
X_batch, y_batch = generate_batch(X_train1, y_train1, batch_size)
loss_val, _ = sess.run([loss, training_op], feed_dict={X: X_batch, y: y_batch})
print(epoch, "Train loss:", loss_val)
if epoch % 5 == 0:
acc_test = accuracy.eval(feed_dict={X: X_test1, y: y_test1})
print(epoch, "Test accuracy:", acc_test)
save_path = saver.save(sess, "./my_digit_comparison_model.ckpt")
```
All right, we reach 97.6% accuracy on this digit comparison task. That's not too bad, this model knows a thing or two about comparing handwritten digits!
Let's see if some of that knowledge can be useful for the regular MNIST classification task.
### 10.4.
_Exercise: now create a new DNN by reusing and freezing the hidden layers of DNN A and adding a softmax output layer on top with 10 neurons. Train this network on split #2 and see if you can achieve high performance despite having only 500 images per class._
Let's create the model, it is pretty straightforward. There are many ways to freeze the lower layers, as explained in the book. In this example, we chose to use the `tf.stop_gradient()` function. Note that we need one `Saver` to restore the pretrained DNN A, and another `Saver` to save the final model:
```
reset_graph()
n_inputs = 28 * 28 # MNIST
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
dnn_outputs = dnn(X, name="DNN_A")
frozen_outputs = tf.stop_gradient(dnn_outputs)
logits = tf.layers.dense(frozen_outputs, n_outputs, kernel_initializer=he_init)
Y_proba = tf.nn.softmax(logits)
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
dnn_A_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="DNN_A")
restore_saver = tf.train.Saver(var_list={var.op.name: var for var in dnn_A_vars})
saver = tf.train.Saver()
```
Now on to training! We first initialize all variables (including the variables in the new output layer), then we restore the pretrained DNN A. Next, we just train the model on the small MNIST dataset (containing just 5,000 images):
```
n_epochs = 100
batch_size = 50
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_digit_comparison_model.ckpt")
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
X_batch, y_batch = X_train2[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if epoch % 10 == 0:
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print(epoch, "Test accuracy:", acc_test)
save_path = saver.save(sess, "./my_mnist_model_final.ckpt")
```
Well, 96.5% accuracy, that's not the best MNIST model we have trained so far, but recall that we are only using a small training set (just 500 images per digit). Let's compare this result with the same DNN trained from scratch, without using transfer learning:
```
reset_graph()
n_inputs = 28 * 28 # MNIST
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
dnn_outputs = dnn(X, name="DNN_A")
logits = tf.layers.dense(dnn_outputs, n_outputs, kernel_initializer=he_init)
Y_proba = tf.nn.softmax(logits)
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
dnn_A_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="DNN_A")
restore_saver = tf.train.Saver(var_list={var.op.name: var for var in dnn_A_vars})
saver = tf.train.Saver()
n_epochs = 150
batch_size = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
X_batch, y_batch = X_train2[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if epoch % 10 == 0:
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print(epoch, "Test accuracy:", acc_test)
save_path = saver.save(sess, "./my_mnist_model_final.ckpt")
```
Only 94.6% accuracy... So transfer learning helped us reduce the error rate from 5.4% to 3.5% (that's over 35% error reduction). Moreover, the model using transfer learning reached over 96% accuracy in less than 10 epochs.
Bottom line: transfer learning does not always work, but when it does it can make a big difference. So try it out!
| github_jupyter |
```
# GPU: 32*40 in 9.87s = 130/s
# CPU: 32*8 in 31.9s = 8/s
import os
import sys
import numpy as np
import mxnet as mx
from collections import namedtuple
print("OS: ", sys.platform)
print("Python: ", sys.version)
print("Numpy: ", np.__version__)
print("MXNet: ", mx.__version__)
!cat /proc/cpuinfo | grep processor | wc -l
!nvidia-smi --query-gpu=gpu_name --format=csv
Batch = namedtuple('Batch', ['data'])
BATCH_SIZE = 32
RESNET_FEATURES = 2048
BATCHES_GPU = 40
BATCHES_CPU = 8
def give_fake_data(batches):
""" Create an array of fake data to run inference on"""
np.random.seed(0)
dta = np.random.rand(BATCH_SIZE*batches, 224, 224, 3).astype(np.float32)
return dta, np.swapaxes(dta, 1, 3)
def yield_mb(X, batchsize):
""" Function yield (complete) mini_batches of data"""
for i in range(len(X)//batchsize):
yield i, X[i*batchsize:(i+1)*batchsize]
# Create batches of fake data
fake_input_data_cl, fake_input_data_cf = give_fake_data(BATCHES_GPU)
print(fake_input_data_cl.shape, fake_input_data_cf.shape)
# Download Resnet weights
path='http://data.mxnet.io/models/imagenet/'
[mx.test_utils.download(path+'resnet/50-layers/resnet-50-symbol.json'),
mx.test_utils.download(path+'resnet/50-layers/resnet-50-0000.params')]
# Load model
sym, arg_params, aux_params = mx.model.load_checkpoint('resnet-50', 0)
# List the last 10 layers
all_layers = sym.get_internals()
print(all_layers.list_outputs()[-10:])
def predict_fn(classifier, data, batchsize):
""" Return features from classifier """
out = np.zeros((len(data), RESNET_FEATURES), np.float32)
for idx, dta in yield_mb(data, batchsize):
classifier.forward(Batch(data=[mx.nd.array(dta)]))
out[idx*batchsize:(idx+1)*batchsize] = classifier.get_outputs()[0].asnumpy().squeeze()
return out
```
## 1. GPU
```
# Get last layer
fe_sym = all_layers['flatten0_output']
# Initialise GPU
fe_mod = mx.mod.Module(symbol=fe_sym, context=[mx.gpu(0)], label_names=None)
fe_mod.bind(for_training=False, inputs_need_grad=False,
data_shapes=[('data', (BATCH_SIZE,3,224,224))])
fe_mod.set_params(arg_params, aux_params)
cold_start = predict_fn(fe_mod, fake_input_data_cf, BATCH_SIZE)
%%time
# GPU: 9.87s
features = predict_fn(fe_mod, fake_input_data_cf, BATCH_SIZE)
```
## 2. CPU
```
# Kill all GPUs ...
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# Get last layer
fe_sym = all_layers['flatten0_output']
# Initialise CPU
fe_mod = mx.mod.Module(symbol=fe_sym, context=mx.cpu(), label_names=None)
fe_mod.bind(for_training=False, inputs_need_grad=False,
data_shapes=[('data', (BATCH_SIZE,3,224,224))])
fe_mod.set_params(arg_params, aux_params)
# Create batches of fake data
fake_input_data_cl, fake_input_data_cf = give_fake_data(BATCHES_CPU)
print(fake_input_data_cl.shape, fake_input_data_cf.shape)
cold_start = predict_fn(fe_mod, fake_input_data_cf, BATCH_SIZE)
%%time
# CPU: 31.9s
features = predict_fn(fe_mod, fake_input_data_cf, BATCH_SIZE)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import os
import json
import altair as alt
import numpy as np
import glob
DATA_DIR = "/Users/user/Documents/GeneInvestigator/results/BDNF/Recombinants"
RELAX_FILES = glob.glob(os.path.join(DATA_DIR, "*.RELAX.json"))
pvalue_threshold = 0.05
RELAX_FILES
def getRELAX_TR(JSON):
with open(JSON, "r") as in_d:
json_data = json.load(in_d)
return json_data["test results"]
def getRELAX_fits(JSON):
with open(JSON, "r") as in_d:
json_data = json.load(in_d)
return json_data["fits"]
# Main
passedThreshold = []
for file in RELAX_FILES:
pval = getRELAX_TR(file)["p-value"]
if pval <= pvalue_threshold:
passedThreshold.append(file)
print("# Passed p-value threshold:", file.split("/")[-1])
# Process them separately
#TEST_FILE = "/Users/user/Documents/GeneInvestigator/results/BDNF/Recombinants/BDNF_codons_RDP_recombinationFree.fas.Carnivora.RELAX.json"
#TEST_FILE = "/Users/user/Documents/GeneInvestigator/results/BDNF/Recombinants/BDNF_codons_RDP_recombinationFree.fas.Primates.RELAX.json"
TEST_FILE = '/Users/user/Documents/GeneInvestigator/results/BDNF/Recombinants/BDNF_codons_RDP_recombinationFree.fas.Eulipotyphla.RELAX.json'
label = TEST_FILE.split("/")[-1].split(".")[2]
print("Label:", label)
fits = getRELAX_fits(TEST_FILE)
fits.keys()
# print(fits["RELAX partitioned descriptive"])
df = pd.DataFrame.from_dict(fits, orient='index')
df.T
df2_Ref = pd.DataFrame.from_dict(fits["RELAX partitioned descriptive"]["Rate Distributions"]["Reference"], orient='index')
df2_Ref
df3_Test = pd.DataFrame.from_dict(fits["RELAX partitioned descriptive"]["Rate Distributions"]["Test"], orient='index')
df3_Test
# Unconstrained model
source = df2_Ref
df2_Ref["log(omega)"] = np.log10(df2_Ref["omega"])
df2_Ref["Color"] = "Reference"
line = alt.Chart(source).mark_bar().encode(
x='log(omega)',
y='proportion',
color="Color"
).properties(
width=500,
height=400,
title=label
)
line.configure_title(
fontSize=20,
font='Courier',
anchor='start',
color='gray'
)
df3_Test["log(omega)"] = np.log10(df3_Test["omega"])
df3_Test["Color"] = "Test"
source = df3_Test
line2 = alt.Chart(source).mark_bar().encode(
x='log(omega)',
y='proportion',
color="Color"
).properties(
width=500,
height=400)
line + line2
# Process them together as subplots
# RELAX Interpretation
# http://hyphy.org/methods/selection-methods/#relax
# method paper: https://academic.oup.com/mbe/article/32/3/820/981440
```
# Figure legend
Patterns of natural selection across taxonomic groups under the Partitioned Descriptive model of the RELAX method. Selection profiles for BDNF are shown along Reference and Test branches for each taxonomic group. Three omega parameters and the relative proportion of sites they represent are plotted for Test (orange) and Referenc (blue) branches. Only omega categories representing nonzero proportions of sites are shown. Neutral selection corresponds to the omega=1.0 in this log10 scaled X-axis. These taxonomic groups represent datasets where significant (p<= 0.05) for relaxed selection was detected between test and reference branches.
| github_jupyter |
# Residual Networks (ResNet)
:label:`sec_resnet`
As we design increasingly deeper networks it becomes imperative to understand how adding layers can increase the complexity and expressiveness of the network.
Even more important is the ability to design networks where adding layers makes networks strictly more expressive rather than just different.
To make some progress we need a bit of mathematics.
## Function Classes
Consider $\mathcal{F}$, the class of functions that a specific network architecture (together with learning rates and other hyperparameter settings) can reach.
That is, for all $f \in \mathcal{F}$ there exists some set of parameters (e.g., weights and biases) that can be obtained through training on a suitable dataset.
Let us assume that $f^*$ is the "truth" function that we really would like to find.
If it is in $\mathcal{F}$, we are in good shape but typically we will not be quite so lucky.
Instead, we will try to find some $f^*_\mathcal{F}$ which is our best bet within $\mathcal{F}$.
For instance,
given a dataset with features $\mathbf{X}$
and labels $\mathbf{y}$,
we might try finding it by solving the following optimization problem:
$$f^*_\mathcal{F} \stackrel{\mathrm{def}}{=} \mathop{\mathrm{argmin}}_f L(\mathbf{X}, \mathbf{y}, f) \text{ subject to } f \in \mathcal{F}.$$
It is only reasonable to assume that if we design a different and more powerful architecture $\mathcal{F}'$ we should arrive at a better outcome. In other words, we would expect that $f^*_{\mathcal{F}'}$ is "better" than $f^*_{\mathcal{F}}$. However, if $\mathcal{F} \not\subseteq \mathcal{F}'$ there is no guarantee that this should even happen. In fact, $f^*_{\mathcal{F}'}$ might well be worse.
As illustrated by :numref:`fig_functionclasses`,
for non-nested function classes, a larger function class does not always move closer to the "truth" function $f^*$. For instance,
on the left of :numref:`fig_functionclasses`,
though $\mathcal{F}_3$ is closer to $f^*$ than $\mathcal{F}_1$, $\mathcal{F}_6$ moves away and there is no guarantee that further increasing the complexity can reduce the distance from $f^*$.
With nested function classes
where $\mathcal{F}_1 \subseteq \ldots \subseteq \mathcal{F}_6$
on the right of :numref:`fig_functionclasses`,
we can avoid the aforementioned issue from the non-nested function classes.

:label:`fig_functionclasses`
Thus,
only if larger function classes contain the smaller ones are we guaranteed that increasing them strictly increases the expressive power of the network.
For deep neural networks,
if we can
train the newly-added layer into an identity function $f(\mathbf{x}) = \mathbf{x}$, the new model will be as effective as the original model. As the new model may get a better solution to fit the training dataset, the added layer might make it easier to reduce training errors.
This is the question that He et al. considered when working on very deep computer vision models :cite:`He.Zhang.Ren.ea.2016`.
At the heart of their proposed *residual network* (*ResNet*) is the idea that every additional layer should
more easily
contain the identity function as one of its elements.
These considerations are rather profound but they led to a surprisingly simple
solution, a *residual block*.
With it, ResNet won the ImageNet Large Scale Visual Recognition Challenge in 2015. The design had a profound influence on how to
build deep neural networks.
## Residual Blocks
Let us focus on a local part of a neural network, as depicted in :numref:`fig_residual_block`. Denote the input by $\mathbf{x}$.
We assume that the desired underlying mapping we want to obtain by learning is $f(\mathbf{x})$, to be used as the input to the activation function on the top.
On the left of :numref:`fig_residual_block`,
the portion within the dotted-line box
must directly learn the mapping $f(\mathbf{x})$.
On the right,
the portion within the dotted-line box
needs to
learn the *residual mapping* $f(\mathbf{x}) - \mathbf{x}$,
which is how the residual block derives its name.
If the identity mapping $f(\mathbf{x}) = \mathbf{x}$ is the desired underlying mapping,
the residual mapping is easier to learn:
we only need to push the weights and biases
of the
upper weight layer (e.g., fully-connected layer and convolutional layer)
within the dotted-line box
to zero.
The right figure in :numref:`fig_residual_block` illustrates the *residual block* of ResNet,
where the solid line carrying the layer input
$\mathbf{x}$ to the addition operator
is called a *residual connection* (or *shortcut connection*).
With residual blocks, inputs can
forward propagate faster through the residual connections across layers.

:label:`fig_residual_block`
ResNet follows VGG's full $3\times 3$ convolutional layer design. The residual block has two $3\times 3$ convolutional layers with the same number of output channels. Each convolutional layer is followed by a batch normalization layer and a ReLU activation function. Then, we skip these two convolution operations and add the input directly before the final ReLU activation function.
This kind of design requires that the output of the two convolutional layers has to be of the same shape as the input, so that they can be added together. If we want to change the number of channels, we need to introduce an additional $1\times 1$ convolutional layer to transform the input into the desired shape for the addition operation. Let us have a look at the code below.
```
from d2l import mxnet as d2l
from mxnet import np, npx
from mxnet.gluon import nn
npx.set_np()
class Residual(nn.Block): #@save
"""The Residual block of ResNet."""
def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):
super().__init__(**kwargs)
self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1,
strides=strides)
self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2D(num_channels, kernel_size=1,
strides=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm()
self.bn2 = nn.BatchNorm()
def forward(self, X):
Y = npx.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
return npx.relu(Y + X)
```
This code generates two types of networks: one where we add the input to the output before applying the ReLU nonlinearity whenever `use_1x1conv=False`, and one where we adjust channels and resolution by means of a $1 \times 1$ convolution before adding. :numref:`fig_resnet_block` illustrates this:

:label:`fig_resnet_block`
Now let us look at a situation where the input and output are of the same shape.
```
blk = Residual(3)
blk.initialize()
X = np.random.uniform(size=(4, 3, 6, 6))
blk(X).shape
```
We also have the option to halve the output height and width while increasing the number of output channels.
```
blk = Residual(6, use_1x1conv=True, strides=2)
blk.initialize()
blk(X).shape
```
## ResNet Model
The first two layers of ResNet are the same as those of the GoogLeNet we described before: the $7\times 7$ convolutional layer with 64 output channels and a stride of 2 is followed by the $3\times 3$ maximum pooling layer with a stride of 2. The difference is the batch normalization layer added after each convolutional layer in ResNet.
```
net = nn.Sequential()
net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),
nn.BatchNorm(), nn.Activation('relu'),
nn.MaxPool2D(pool_size=3, strides=2, padding=1))
```
GoogLeNet uses four modules made up of Inception blocks.
However, ResNet uses four modules made up of residual blocks, each of which uses several residual blocks with the same number of output channels.
The number of channels in the first module is the same as the number of input channels. Since a maximum pooling layer with a stride of 2 has already been used, it is not necessary to reduce the height and width. In the first residual block for each of the subsequent modules, the number of channels is doubled compared with that of the previous module, and the height and width are halved.
Now, we implement this module. Note that special processing has been performed on the first module.
```
def resnet_block(num_channels, num_residuals, first_block=False):
blk = nn.Sequential()
for i in range(num_residuals):
if i == 0 and not first_block:
blk.add(Residual(num_channels, use_1x1conv=True, strides=2))
else:
blk.add(Residual(num_channels))
return blk
```
Then, we add all the modules to ResNet. Here, two residual blocks are used for each module.
```
net.add(resnet_block(64, 2, first_block=True),
resnet_block(128, 2),
resnet_block(256, 2),
resnet_block(512, 2))
```
Finally, just like GoogLeNet, we add a global average pooling layer, followed by the fully-connected layer output.
```
net.add(nn.GlobalAvgPool2D(), nn.Dense(10))
```
There are 4 convolutional layers in each module (excluding the $1\times 1$ convolutional layer). Together with the first $7\times 7$ convolutional layer and the final fully-connected layer, there are 18 layers in total. Therefore, this model is commonly known as ResNet-18.
By configuring different numbers of channels and residual blocks in the module, we can create different ResNet models, such as the deeper 152-layer ResNet-152. Although the main architecture of ResNet is similar to that of GoogLeNet, ResNet's structure is simpler and easier to modify. All these factors have resulted in the rapid and widespread use of ResNet. :numref:`fig_resnet18` depicts the full ResNet-18.

:label:`fig_resnet18`
Before training ResNet, let us observe how the input shape changes across different modules in ResNet. As in all the previous architectures, the resolution decreases while the number of channels increases up until the point where a global average pooling layer aggregates all features.
```
X = np.random.uniform(size=(1, 1, 224, 224))
net.initialize()
for layer in net:
X = layer(X)
print(layer.name, 'output shape:\t', X.shape)
```
## Training
We train ResNet on the Fashion-MNIST dataset, just like before.
```
lr, num_epochs, batch_size = 0.05, 10, 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)
d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr)
```
## Summary
* Nested function classes are desirable. Learning an additional layer in deep neural networks as an identity function (though this is an extreme case) should be made easy.
* The residual mapping can learn the identity function more easily, such as pushing parameters in the weight layer to zero.
* We can train an effective deep neural network by having residual blocks. Inputs can forward propagate faster through the residual connections across layers.
* ResNet had a major influence on the design of subsequent deep neural networks, both for convolutional and sequential nature.
## Exercises
1. What are the major differences between the Inception block in :numref:`fig_inception` and the residual block? After removing some paths in the Inception block, how are they related to each other?
1. Refer to Table 1 in the ResNet paper :cite:`He.Zhang.Ren.ea.2016` to
implement different variants.
1. For deeper networks, ResNet introduces a "bottleneck" architecture to reduce
model complexity. Try to implement it.
1. In subsequent versions of ResNet, the authors changed the "convolution, batch
normalization, and activation" structure to the "batch normalization,
activation, and convolution" structure. Make this improvement
yourself. See Figure 1 in :cite:`He.Zhang.Ren.ea.2016*1`
for details.
1. Why can't we just increase the complexity of functions without bound, even if the function classes are nested?
[Discussions](https://discuss.d2l.ai/t/85)
| github_jupyter |
Perusprosessissa Data Wrangling
<br>
https://en.wikipedia.org/wiki/Data_wrangling
<br>
eli datan valmistelu (ETL putsaus jne) vie yleensä 80% työajasta. Datan valmistelu koneoppimisen malleja varten:
<br>
https://nbviewer.jupyter.org/github/taanila/tilastoapu/blob/master/datan_valmistelu.ipynb
<br>
Luokittele sen jälkeen pystyrivit (sarakkeet) joko kategorisiin tai määrällisiin. Ja esitä analytiikan osa-alueen (perinteisen neliportaisen tason) mukaisia, siis ihmismielelle mielekkäitä, kysymyksiä aineistolle:
#### Ennakoiva analytiikka: Koneoppiminen
https://tilastoapu.wordpress.com/2019/08/03/koneoppiminen-ja-scikit-learn-kirjasto/
#### Lähestyminen valitaan sen mukaan<br>1. onko jo etukäteen saatavilla ennustettavan muuttujan todellisia arvoja (label) vahvistettuina tietoina (training set eli opetusdata, supervised learning)<br>2. vai ei (unsupervised learning) ja kolmantena mahdollisuutena on<br>3. algoritmin palkitseminen tai rankaiseminen (reinforcement learning) sen suorittaessa analytiikkaa
Koneoppiminen opetusdatasta (supervised learning):
<br>
Lähestyminen valitaan sen mukaan onko ennustettavan muuttujan (target) arvot kategorisia (discrete label) vai määrällisiä (continuous label):
<br>
Kategoriselle muuttujalle luokittelumalli
<br>
Määrälliselle muuttujalle regressio-malli
<br>
Feature matrix tarkoittaa muuttujia, jotka selittävät ennustettavaa muuttujaa (target)
<br>
<font color="grey">Koneoppiminen ilman opetusdataa (unsupervised learning):
<br>
Klusterointi-malli (pyritään löytämään yhteen kuuluvat havainnot, esim. K-means)
<br>
Yksinkertaistamisen malli (core: pienin mahdollinen määrä muuttujia joka selittää riittävästi asiaa, esim. pääkomponenttianalyysi)
<br>
<i>Syvä oppiminen (deep learning) pohjautuu neuroverkkomenetelmiin, joilla pyritään myös jäljittelemään ihmisaivojenkin toimintaa</i>
</font>
SUPERVISED LEARNING (<b><font color="red">eri malleja pitää vaan työläästi kokeilla</font></b>)
<br>
<b>Luokittelumalleja kategoriselle target muuttujalle</b>:
<br>
K lähintä naapuria (K nearest neighbor), Päätöspuut (decision trees), Gaussian Naiivi Bayes, Logistinen regressio.
<br>
käyttötarkoituksena esim:
<br>
OCR (Optical Character Recognition) eli kuvapikseleistä esim. auton rekisterinumeron muuttaminen tekstiksi, kuvapikseleistä esineen tunnistaminen, röntgenkuvasta sairauden tunnistaminen, maksuhäiriöön ajautumisen todennäköisyys, vakuutuspetos, roskapostin suodatus jne
<br>
HUOM! Mikäli kategorista targettia selittävään feature matrixiin otetaan mukaan kategorisia muuttujia, tulee ne kategoriset feature matrixin selittävät muuttujat muuttaa dikotomisiksi dummy muuttujiksi eli nolliksi ja ykkösiksi pandas kirjaston 'get_dummies()' komennolla
<br>
df_dummies = pd.get_dummies(df)
<br>
HUOM! Yllä oleva komento muuttaa data framen kaikki tekstimuotoiset kategoriset muuttujat dummyiksi, mutta jos kategorinen muuttuja on muuta muotoa, niin silloin tulee antaa lisäparametreilla pandakselle tieto mitkä sarakkeet muutetaan
<br>
<br>
<b>Useimmiten regressiomalli on target määrälliselle muuttujalle tarkoituksenmukaisin</b> (selittävä feature matrix on määrällinen muuttuja, koska korrelaatio):
<br>
lineaarinen regressio (etsitään suora viiva joka parhaiten kulkee havaintojen kautta)
<br>
käyttötarkoituksena esim:
<br>
kysynnän ennustaminen, asunnon hinta (onko saunaa, parveketta tms), käytetyn auton hinta jne
LUOKITTELU
```
#Tuodaan käytettäväksi data-analytiikan kirjasto pandas ja
#lyhennetään sitä kutsuttavaksi aliaksella pd
## https://pandas.pydata.org/docs/user_guide/dsintro.html#dataframe
import pandas as pd
#Tuodaan graafiseen esittämiseen matplotlib ja sen käyttöliittymäksi pyplot
## https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.html?highlight=pyplot#module-matplotlib.pyplot
import matplotlib.pyplot as plt
#Vaaditaan vanhoissa Juptter Notebook versioissa, jotta kaaviot tulostuvat
%matplotlib inline
#Katsotaan millaisia erilaisia muotoilutyylejä on saatavilla
plt.style.available
#Valitaan graafinen esittäminen muotoiltavaksi tietyllä tyylillä
plt.style.use('seaborn-whitegrid')
#Asetetaan näytettävien rivien rajoite (ei rajoitetta)
pd.options.display.max_rows = None
#Asetetaan näytettävien sarakkeiden rajoite (ei rajoitetta)
pd.options.display.max_columns = None
#Tuodaan graafisen esittämisen tyylimäärittelyt erilaisilla kaaviolajeilla
#ja käytetään tätä tarvittaessa matplotlib esitysten "ylikirjoittamiseen"
#(korvaamiseen silloin kun on silmälle mukavampia vaihtoehtoja tarjolla)
import seaborn as sns
# Harjoitteluun tarkoitettu datasetti Kurjenmiekat-kasveista (englanniksi iris) löytyy seaborn-kirjastosta
## https://fi.wikipedia.org/wiki/Kurjenmiekat
iris = sns.load_dataset('iris')
#Tarkistetaan lähdetiedoston metatiedoista monessako sarakkeessa on
#minkäkin verran arvoja ja mitä tyyppiä ne sisältää
iris.info()
#Kurkataan 5 vikaa riviä niinkuin Linuxissa ja nähdään sitenkin rivien määrä
iris.tail()
##tai 5 ekaa riviä
##iris.head()
#Näytetään kolme pienintä arvoa sarakkeesta 'petal_length'
iris.nsmallest(n=3,columns='petal_length')
#Näytetään kolme suurinta arvoa sarakkeesta 'petal_length'
iris.nlargest(n=3,columns='petal_length')
#Numeromuotoisen sarakkeen suodatus
##Noudetaan data framesta Pandas kirjaston toiminnolla ne rivt, joissa
#sarakkeen 'petal_length arvo on suurempi kuin 6.3 (suodatetaan rajoittimella > 6.3)
iris[ iris['petal_length'] > 6.3 ]
##Uloimmat hakasulut viittaavat muuttujan iris "alkioon" / sarakkeeseen ja
#niiden hakasulkujen sisällä suoritetaan suodatus
```
Yllä olevassa taulukossa
<br>
sepal = verholehti
<br>
https://fi.wikipedia.org/wiki/Verhi%C3%B6
<br>
petal = terälehti
<br>
https://fi.wikipedia.org/wiki/Teri%C3%B6
<br>
Lajikkeet ovat Setosa, Versicolor ja Virginica
```
#Mahdollisten tyhjien arvojen tarkistus
iris.isnull().sum()
#Hajontakaaviossa voi kategorisen muuttujan eri arvoja esittää eri väreillä
sns.pairplot(iris, hue='species')
```
Tiedon esittäminen visualisoituna (datasetin aineistosta graafisessa muodossa) paljastaa hyvin havainnollistavasti alarivillä Y-akselin 'petal_width' ja X-akselin 'petal_length' leikkauspisteen ruudukossa kuinka Setosa-lajike erottautuu selkeästi muista Kurjenmiekoista (iris) pelkästään petal (terälehti) pituuden ja leveyden osalta.
<br>
<br>
Yhtä selkeää eroa ei ole nähtävissä Versicolor ja Virginica lajikkeiden toisistaan erotteluun
Ottamalla kaikki verho- ja terälehti muuttujat ('sepal_length', 'sepal_width', 'petal_length' ja 'petal_width') mukaan feature matrixiin voitaneen koulutusdatasta kouluttaa hyvin osuva koneoppimisen malli, joka oppii lajittelemaan Kurjenmiekan sen verho- ja terälehtien mittojen mukaan tarkoituksenmukaiseen targettiin (lajikkeeseen). <b><font color="red">Eri malleja pitää vaan työläästi kokeilla</font></b>
Datan valmistelua (Data Wrangling) sen jakamiseksi koulutusdataan ja testidataan
```
#Ei asenneta Anacondassa vaan kerrotaan mistä "from foo.bar" tuodaan käytettäväksi "import lorem_ipsus"
##toiminnallisuus, joka jakaa datan
from sklearn.model_selection import train_test_split
#Vakiintuneen tavan mukaan feature matrix (selittävä muuttuja) on iso X
##Poistetaan lajitieto
X = iris.drop('species', axis=1)
#Vakiintuneen tavan mukaan target (ennustettava muuttuja) on y
y = iris['species']
#random_state arvo määrittelee miten data erotellaan opetus- ja testidataan
##eri kokeilukerroilla tulee käyttää samaa arvoa (arvolla itsellään ei ole mitään merkitystä)
##sillä jos aineisto on jaettu eri tavalla osiin voi päätyä hyvinkin erilaiseen malliin
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=4)
###Käytännössä datasetissä otoksen jakamisen 'random_state' arvolla on
###jonkilainen vaikutus ennustusprosentin osumatarkkuuteen
### X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=5)
#HUOM! Ennen tuotantoon deployausta toistetaan splittausta moneen eri kertaan eri jaotusarvoilla
#ja tuotantoon pääty niistä kokeiluista jonkilainen keskiarvon tyyppinen ratkaisu
#Katsotaan millaisiin paloihin jako tapahtui
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
```
### Ennakoivan analytiikan mallin sovitus datasettiin (1/4)
Koneoppimisessa kokeillaan erilaisia algoritmeja samaan historiadataan ja katsotaan mikä niistä arvaa parhaiten, kun ennustetta verrataan jo tiedossa oleviin toteumiin
#### 1. algoritmi kokeilu (ensimmäinen sovitusyritys: Miten malli sopii toteumaan)
#### KNN eli K-nearest neighbors (K-lähimmät naapurit) -menetelmässä
<b><font color="red">eri "K"-arvoja pitää vaan työläästi kokeilla</font></b>, jotta luotavan koneoppimisen malli opetusdatasta (supervised learning) luokittelee riittävän tarkasti uudet havainnot tarkoituksenmukaisiin kategorioihin
<br>
<br>
Suora copy+paste
<br>
"Etsitään luokiteltavalle havainnolle K lähintä naapuria opetusdatasta.
Luokiteltava havainto sijoitetaan siihen luokkaan, joka on enemmistönä K:n lähimmän naapurin joukossa."
<br>
https://nbviewer.jupyter.org/github/taanila/tilastoapu/blob/master/iris_knn.ipynb
```
#Ei asenneta Anacondassa vaan kerrotaan mistä "from foo.bar" tuodaan käytettäväksi "import lorem_ipsus"
##toiminnallisuus, joka suorittaa KNN-vertailun
from sklearn.neighbors import KNeighborsClassifier
#Luodaan malli-olio, jossa parametrin 'n_neighbors' arvo on 'K' eli
#syötteenä saatavan Kurjenmiekan verho- ja terälehtien pituuksien ja paksuuksien
#perusteella verrataan sitä, tässä esimerkkitapauksessa viiteen, 5 lähimpään
#samoilla mitoilla oleviin naapureihin
##("Etsitään luokiteltavalle havainnolle K lähintä naapuria opetusdatasta")
#JA
#katsotaan siitä (tässä esimerkkitapauksessa viiden) 5 naapurostosta mihin
#kategorisen muuttujan luokkaan 'species' (Kurjenmiekka-kasvin laji) enemmistö
# naapureista on luokiteltu, ja päätellään syötteenä saatavan Kurjenmiekan
# kuuluvan myös siihen samaan luokkaan
##"Luokiteltava havainto sijoitetaan siihen luokkaan, joka on enemmistönä K:n lähimmän naapurin joukossa."
malli = KNeighborsClassifier(n_neighbors=5)
#Ennustemallille annetaan parametreina:
#feature matrix (muutosta selittävät muuttujat) ja
#target (ennustettava muuttuja, joka muuttuu feature matrixin muutosten vuoksi)
malli.fit(X_train, y_train)
###komennolla fit() sovitetaan malli dataan. Se on se komento, jolla konetta opetetaan
#Olion sisältämät toiminnallisuudet saa IDEssä esiin kirjoittamalla 'malli.'
#ja naputtelemalla tabulaattoria (tai CTRL+SPACE)
malli.get_params()
```
#### 1. algoritmin soveltuvuuden arviointi: Miten ennuste osuu toteumaan
```
#Malli-olion predict-toiminnolla voidaan laskea ennuste opetusdatalle
y_train_malli = malli.predict(X_train)
#Malli-olion predict-toiminnolla voidaan laskea ennuste testidatalle
y_test_malli = malli.predict(X_test)
#Ei asenneta Anacondassa vaan kerrotaan mistä "from foo.bar" tuodaan käytettäväksi "import lorem_ipsus"
##toiminnallisuus, joka vertaa ennustuksen ja toteuman osumatarkkuutta
from sklearn.metrics import accuracy_score
#Opetusdatalle lasketun ennusteen vertaaminen tiedossa oleville
#todellisille arvoille (label) ennustuksen osumatarkkuus prosentteina
accuracy_score(y_train, y_train_malli)
#Testidatalle lasketun ennusteen vertaaminen tiedossa oleville
#todellisille arvoille (label) ennustuksen osumatarkkuus prosentteina
accuracy_score(y_test, y_test_malli)
##Algoritmi ei ole nähnyt testidataa koulutusvaiheessa ja testidatalla
##varmistetaan ettei ole ylimallinnettu koulutusvaiheessa
#Ei asenneta Anacondassa vaan kerrotaan mistä "from foo.bar" tuodaan käytettäväksi "import lorem_ipsus"
##toiminnallisuus, joka paljastaa epäonnistuneet ennustukset
from sklearn.metrics import confusion_matrix
#Opetusdatan osalta väärin ennustettujen esiin nostaminen
print(confusion_matrix(y_train, y_train_malli))
```
Yllä olevassa Kurjenmiekat-kasvin taulukossa ovat lajikkeet Setosa, Versicolor ja Virginica. Talukkoa luetaan siten, että luotu ennustemalli on onnistunut ennustamaan
* ensimmäisellä rivillä (ja ensimmäisessä sarakkeessa) kaikki Setosat oikein
* keskimmäisellä rivillä (ja keskimmäisessä sarakkeessa) Versicolorista lähes kaikki oikein, mutta osa oli mennyt vahingossa viimeiseen sarakkeeseen Virginicalle
* viimeisellä rivillä (ja viimeisessä sarakkeessa) Virginicasta lähes kaikki oikein, mutta osa oli mennyt vahingossa keskimmäiseen sarakkeeseen Versicolorille
```
#Testidatan osalta väärin ennustettujen esiin nostaminen
print(confusion_matrix(y_test, y_test_malli))
##Algoritmi ei ole nähnyt testidataa koulutusvaiheessa ja testidatalla
##varmistetaan ettei ole ylimallinnettu koulutusvaiheessa
```
Vaikuttaa siltä, että ei olla ylimallinnettu, koska algoritmiltä piilossa olleelle datalle saadaan ainoastaan yksi pieleen mennyt ennuste
```
#Täysin uuden Kurjenmiekka datasetin käyttöönotto, josta puuttuu
#etukäteen tiedossa oleva 'species' label
##Vakiintuneen tavan mukaan feature matrix (selittävä muuttuja) on iso X
Xnew = pd.read_excel('http://taanila.fi/irisnew.xlsx')
# Jos CSV https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html
#TARKISTA TARVITAANKO
##sepstr, default ‘,’
##usecolslist-like or callable, optional
#Pandas kirjastolla noudetaan txt-tiedostot ikään kuin ne olisivat csv-tiedostoja
##erotimerkin muoto tulee ilmaista
### sep='\s+'
#Vakiintuneen tavan mukaan feature matrix (selittävä muuttuja) on iso X
##Malli-olion predict-toiminnolla suoritetaan täysin uuden
##syötteen luokittelu kategoriaan
malli.predict(Xnew)
```
Uudessa datasetissä ei yksinkertaisuuden vuoksi ollut kuin kolme havaintoa. Yllä olevasta tulosteesta näkee, että juuri koulutettu koneoppimisen algoritmi jakoi taulukon
* ensimmäisen rivin osalta Setosaksi
* toisen rivin osalta Virginicaksi
* kolmannen rivin osalta Versicoloriksi
### Ennakoivan analytiikan mallin sovitus datasettiin (2/4)
Koneoppimisessa kokeillaan erilaisia algoritmeja samaan historiadataan ja katsotaan mikä niistä arvaa parhaiten, kun ennustetta verrataan jo tiedossa oleviin toteumiin
#### 2. algoritmi kokeilu (ensimmäinen sovitusyritys: Miten malli sopii toteumaan)
#### Päätöspuu-menetelmässä
<b><font color="red">puun haarautumisen syvyyden eri arvoja pitää vaan työläästi kokeilla</font></b>, jotta luotavan koneoppimisen malli opetusdatasta (supervised learning) luokittelee riittävän tarkasti uudet havainnot tarkoituksenmukaisiin kategorioihin
<br>
<br>
Suora copy+paste
<br>
"Jokaisessa haarautumisessa algoritmi valitsee parhaiten erottelevan selittävän muuttujan ja siihen liittyvän rajakohdan.
Gini = millä todennäköisyydellä tehdään väärä luokittelu? Toimivassa päätöspuussa on haarautumisten jälkeen vain pieniä gini-arvoja."
<br>
https://nbviewer.jupyter.org/github/taanila/tilastoapu/blob/master/iris_dectree.ipynb
```
#random_state arvo määrittelee miten data erotellaan opetus- ja testidataan
##eri kokeilukerroilla tulee käyttää samaa arvoa (arvolla itsellään ei ole mitään merkitystä)
##sillä jos aineisto on jaettu eri tavalla osiin voi päätyä hyvinkin erilaiseen malliin
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=4)
###Käytännössä datasetissä otoksen jakamisen 'random_state' arvolla on
###jonkilainen vaikutus ennustusprosentin osumatarkkuuteen
### X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=5)
#HUOM! Ennen tuotantoon deployausta toistetaan splittausta moneen eri kertaan eri jaotusarvoilla
#ja tuotantoon pääty niistä kokeiluista jonkilainen keskiarvon tyyppinen ratkaisu
#Katsotaan millaisiin paloihin jako tapahtui
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
#Ei asenneta Anacondassa vaan kerrotaan mistä "from foo.bar" tuodaan käytettäväksi "import lorem_ipsus"
##toiminnallisuus, joka muodostaa päätöspuun
from sklearn import tree
#Kirjastosta saatavat puun erilaiset toiminnallisuudet saa IDEssä esiin kirjoittamalla 'tree.'
#ja naputtelemalla tabulaattoria (tai CTRL+SPACE)
tree.plot_tree
#Otetaan käyttöön perusmalli 'DecisionTreeClassifier'
##ja annetaan päätöspuun haarautumisen enimmäissyvyys
##parametrilla 'max_depth'
malli_2 = tree.DecisionTreeClassifier(max_depth = 4)
#Syvyttä säätelemällä mallin osumatarkkuus vaihtelee, mutta ei kannata
#ylimallintaa suurilla luvuilla, sillä päätöspuulla kyllä päästään
#tarvittaessa 100%:n osumatarkkuuteen, joka ei tuntemattoman datan kanssa
#sitten enää toimikaan
#Ennustemallille annetaan parametreina:
#feature matrix (muutosta selittävät muuttujat) ja
#target (ennustettava muuttuja, joka muuttuu feature matrixin muutosten vuoksi)
malli_2.fit(X_train, y_train)
###komennolla fit() sovitetaan malli dataan. Se on se komento, jolla konetta opetetaan
#Malli-olion predict-toiminnolla voidaan laskea ennuste opetusdatalle
y_train_malli_2 = malli_2.predict(X_train)
#Malli-olion predict-toiminnolla voidaan laskea ennuste testidatalle
y_test_malli_2 = malli_2.predict(X_test)
#Opetusdatalle lasketun ennusteen vertaaminen tiedossa oleville
#todellisille arvoille (label) ennustuksen osumatarkkuus prosentteina
accuracy_score(y_train, y_train_malli_2)
#Testidatalle lasketun ennusteen vertaaminen tiedossa oleville
#todellisille arvoille (label) ennustuksen osumatarkkuus prosentteina
accuracy_score(y_test, y_test_malli_2)
##Algoritmi ei ole nähnyt testidataa koulutusvaiheessa ja testidatalla
##varmistetaan ettei ole ylimallinnettu koulutusvaiheessa
#Opetusdatan osalta väärin ennustettujen esiin nostaminen
print(confusion_matrix(y_train, y_train_malli_2))
```
Yllä olevassa Kurjenmiekat-kasvin taulukossa ovat lajikkeet Setosa, Versicolor ja Virginica. Talukkoa luetaan siten, että luotu ennustemalli on onnistunut ennustamaan
* ensimmäisellä rivillä (ja ensimmäisessä sarakkeessa) kaikki Setosat oikein
* keskimmäisellä rivillä (ja keskimmäisessä sarakkeessa) kaikki Versicolorista oikein
* viimeisellä rivillä (ja viimeisessä sarakkeessa) kaikki Virginicasta oikein
```
#Testidatan osalta väärin ennustettujen esiin nostaminen
print(confusion_matrix(y_test, y_test_malli_2))
##Algoritmi ei ole nähnyt testidataa koulutusvaiheessa ja testidatalla
##varmistetaan ettei ole ylimallinnettu koulutusvaiheessa
```
Yllä olevassa Kurjenmiekat-kasvin taulukossa ovat lajikkeet Setosa, Versicolor ja Virginica. Talukkoa luetaan siten, että luotu ennustemalli on onnistunut ennustamaan
* ensimmäisellä rivillä (ja ensimmäisessä sarakkeessa) kaikki Setosat oikein
* keskimmäisellä rivillä (ja keskimmäisessä sarakkeessa) Versicolorista lähes kaikki oikein, mutta osa oli mennyt vahingossa viimeiseen sarakkeeseen Virginicalle
* viimeisellä rivillä (ja viimeisessä sarakkeessa) kaikki Virginicat oikein
```
#Suurennetaan esitysaluetta oletuskoostaan suuremmaksi
plt.figure(figsize=(18, 10))
#Taustalle piiloon luotu päätöspuumalli esitetään plotaten graafisena visualisointina
tree.plot_tree(malli_2)
#Kirjastosta saatavat puun erilaiset toiminnallisuudet saa IDEssä esiin kirjoittamalla 'tree.'
#ja naputtelemalla tabulaattoria (tai CTRL+SPACE)
```
"Jokaisessa haarautumisessa algoritmi valitsee parhaiten erottelevan selittävän muuttujan ja siihen liittyvän rajakohdan.
<br>
Gini = millä todennäköisyydellä tehdään väärä luokittelu? Toimivassa päätöspuussa on haarautumisten jälkeen vain pieniä gini-arvoja."
<br>
<br>
Yllä olevassa päätöspuussa ylimmällä rivillä lähtötilanteessa X[2] viittaa terälehden pituuteen ('petal_length') If Then haaroituksessa booleania
* vasemmalle mennään jos 'petal_length' on yhtäsuuri tai pienempi kuin 2,45 ja muutoin mennään oikealle (aiemmin rivillä 8 huomattiin tiedon visualisoinnissa kuinka kaikki Setosat olivat eroteltävissa terälehden avulla)
```
iris.head()
#Vakiintuneen tavan mukaan feature matrix (selittävä muuttuja) on iso X
##Malli-olion predict-toiminnolla suoritetaan täysin uuden
##syötteen luokittelu kategoriaan
malli_2.predict(Xnew)
```
Uudessa datasetissä ei yksinkertaisuuden vuoksi ollut kuin kolme havaintoa. Yllä olevasta tulosteesta näkee, että juuri koulutettu koneoppimisen algoritmi jakoi taulukon
* ensimmäisen rivin osalta Setosaksi
* toisen rivin osalta Virginicaksi
* kolmannen rivin osalta Versicoloriksi
### Ennakoivan analytiikan mallin sovitus datasettiin (3/4)
Koneoppimisessa kokeillaan erilaisia algoritmeja samaan historiadataan ja katsotaan mikä niistä arvaa parhaiten, kun ennustetta verrataan jo tiedossa oleviin toteumiin
#### 3. algoritmi kokeilu (ensimmäinen sovitusyritys: Miten malli sopii toteumaan)
#### Gaussian Naive Bayes -menetelmässä
<br>
eri target kategorioihin luokittelussa feature matrixin selittävät muuttujat arvioidaan normaalijakauma-olettamalla, josta lasketaan todennäköisyys kategoriaan kuulumisesta
<br>
<br>
Suora copy+paste
<br>
"todennäköisyyksien lukuarvot eivät sellaisenaan ole luotettavia. Olennaista on mallin toteuttama luokittelu."
<br>
ja
<br>
"Mallin oletuksena on, että selittävien muuttujien arvot ovat kussakin luokassa toisistaan riippumattomia. Käytännössä Gaussian Naive Bayes toimii hyvin monenlaisten datojen kohdalla vaikka riippumattomuusoletus ei toteutuisikaan."
<br>
https://nbviewer.jupyter.org/github/taanila/tilastoapu/blob/master/iris_naive_bayes.ipynb
```
#random_state arvo määrittelee miten data erotellaan opetus- ja testidataan
##eri kokeilukerroilla tulee käyttää samaa arvoa (arvolla itsellään ei ole mitään merkitystä)
##sillä jos aineisto on jaettu eri tavalla osiin voi päätyä hyvinkin erilaiseen malliin
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=4)
###Käytännössä datasetissä otoksen jakamisen 'random_state' arvolla on
###jonkilainen vaikutus ennustusprosentin osumatarkkuuteen
### X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=5)
#HUOM! Ennen tuotantoon deployausta toistetaan splittausta moneen eri kertaan eri jaotusarvoilla
#ja tuotantoon pääty niistä kokeiluista jonkilainen keskiarvon tyyppinen ratkaisu
#Katsotaan millaisiin paloihin jako tapahtui
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
#Ei asenneta Anacondassa vaan kerrotaan mistä "from foo.bar" tuodaan käytettäväksi "import lorem_ipsus"
##toiminnallisuus, joka perustuu Bayesin teoreemaan (Gaussian Naive Bayes -menetelmä)
from sklearn.naive_bayes import GaussianNB
#Luodaan malli-olio
malli_3 = GaussianNB()
#Ennustemallille annetaan parametreina:
#feature matrix (muutosta selittävät muuttujat) ja
#target (ennustettava muuttuja, joka muuttuu feature matrixin muutosten vuoksi)
malli_3.fit(X_train, y_train)
###komennolla fit() sovitetaan malli dataan. Se on se komento, jolla konetta opetetaan
#Olion sisältämät toiminnallisuudet saa IDEssä esiin kirjoittamalla 'malli.'
#ja naputtelemalla tabulaattoria (tai CTRL+SPACE)
malli_3.get_params()
#Malli-olion predict-toiminnolla voidaan laskea ennuste opetusdatalle
y_train_malli_3 = malli_3.predict(X_train)
#Malli-olion predict-toiminnolla voidaan laskea ennuste testidatalle
y_test_malli_3 = malli_3.predict(X_test)
#Opetusdatalle lasketun ennusteen vertaaminen tiedossa oleville
#todellisille arvoille (label) ennustuksen osumatarkkuus prosentteina
accuracy_score(y_train, y_train_malli_3)
#Testidatalle lasketun ennusteen vertaaminen tiedossa oleville
#todellisille arvoille (label) ennustuksen osumatarkkuus prosentteina
accuracy_score(y_test, y_test_malli_3)
##Algoritmi ei ole nähnyt testidataa koulutusvaiheessa ja testidatalla
##varmistetaan ettei ole ylimallinnettu koulutusvaiheessa
#Opetusdatan osalta väärin ennustettujen esiin nostaminen
print(confusion_matrix(y_train, y_train_malli_3))
```
Yllä olevassa Kurjenmiekat-kasvin taulukossa ovat lajikkeet Setosa, Versicolor ja Virginica. Talukkoa luetaan siten, että luotu ennustemalli on onnistunut ennustamaan
* ensimmäisellä rivillä (ja ensimmäisessä sarakkeessa) kaikki Setosat oikein
* keskimmäisellä rivillä (ja keskimmäisessä sarakkeessa) Versicolorista lähes kaikki oikein, mutta osa oli mennyt vahingossa viimeiseen sarakkeeseen Virginicalle
* viimeisellä rivillä (ja viimeisessä sarakkeessa) Virginicatista lähes kaikki oikein, mutta osa oli mennyt vahingossa keskimmäiseen sarakkeeseen Versicolorille
```
#Testidatan osalta väärin ennustettujen esiin nostaminen
print(confusion_matrix(y_test, y_test_malli_3))
##Algoritmi ei ole nähnyt testidataa koulutusvaiheessa ja testidatalla
##varmistetaan ettei ole ylimallinnettu koulutusvaiheessa
```
Yllä olevassa Kurjenmiekat-kasvin taulukossa ovat lajikkeet Setosa, Versicolor ja Virginica. Talukkoa luetaan siten, että luotu ennustemalli on onnistunut ennustamaan
* ensimmäisellä rivillä (ja ensimmäisessä sarakkeessa) kaikki Setosat oikein
* keskimmäisellä rivillä (ja keskimmäisessä sarakkeessa) kaikki Versicolorista oikein
* viimeisellä rivillä (ja viimeisessä sarakkeessa) Virginicatista lähes kaikki oikein, mutta osa oli mennyt vahingossa keskimmäiseen sarakkeeseen Versicolorille
```
#Vakiintuneen tavan mukaan feature matrix (selittävä muuttuja) on iso X
##Malli-olion predict-toiminnolla suoritetaan täysin uuden
##syötteen luokittelu kategoriaan
malli_3.predict(Xnew)
```
Uudessa datasetissä ei yksinkertaisuuden vuoksi ollut kuin kolme havaintoa. Yllä olevasta tulosteesta näkee, että juuri koulutettu koneoppimisen algoritmi jakoi taulukon
* ensimmäisen rivin osalta Setosaksi
* toisen rivin osalta Virginicaksi
* kolmannen rivin osalta Versicoloriksi
### Ennakoivan analytiikan mallin sovitus datasettiin (4/4)
Koneoppimisessa kokeillaan erilaisia algoritmeja samaan historiadataan ja katsotaan mikä niistä arvaa parhaiten, kun ennustetta verrataan jo tiedossa oleviin toteumiin
#### 4. algoritmi kokeilu (ensimmäinen sovitusyritys: Miten malli sopii toteumaan)
#### Logistinen regressio -menetelmästä
voi lukea lisää sijainnissa https://tilastoapu.wordpress.com/2014/04/25/logistinen-regressio/
<br>
Logistisella regressiolla saadaan ennustavan luokittelun lisäksi myös hyviä todennäköisyyksiä luokittelun paikkaansapitävyydelle
<br>
<br>
Suora copy+paste
<br>
"Logistinen regressio on luokittelumenetelmä eikä sitä pidä sekoittaa tavalliseen regressioon."
<br>
https://nbviewer.jupyter.org/github/taanila/tilastoapu/blob/master/iris_logr.ipynb
```
#random_state arvo määrittelee miten data erotellaan opetus- ja testidataan
##eri kokeilukerroilla tulee käyttää samaa arvoa (arvolla itsellään ei ole mitään merkitystä)
##sillä jos aineisto on jaettu eri tavalla osiin voi päätyä hyvinkin erilaiseen malliin
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=4)
###Käytännössä datasetissä otoksen jakamisen 'random_state' arvolla on
###jonkilainen vaikutus ennustusprosentin osumatarkkuuteen
### X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=5)
#HUOM! Ennen tuotantoon deployausta toistetaan splittausta moneen eri kertaan eri jaotusarvoilla
#ja tuotantoon pääty niistä kokeiluista jonkilainen keskiarvon tyyppinen ratkaisu
#Katsotaan millaisiin paloihin jako tapahtui
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
#Ei asenneta Anacondassa vaan kerrotaan mistä "from foo.bar" tuodaan käytettäväksi "import lorem_ipsus"
##toiminnallisuus, joka toteuttaa Logistisen regressiomallin
from sklearn.linear_model import LogisticRegression
#Luodaan malli-olio ja herjan vuoksi lisätään iteraatioita
malli_4 = LogisticRegression(max_iter = 10000)
#Ennustemallille annetaan parametreina:
#feature matrix (muutosta selittävät muuttujat) ja
#target (ennustettava muuttuja, joka muuttuu feature matrixin muutosten vuoksi)
malli_4.fit(X_train, y_train)
###komennolla fit() sovitetaan malli dataan. Se on se komento, jolla konetta opetetaan
#Olion sisältämät toiminnallisuudet saa IDEssä esiin kirjoittamalla 'malli.'
#ja naputtelemalla tabulaattoria (tai CTRL+SPACE)
malli_4.get_params()
#Malli-olion predict-toiminnolla voidaan laskea ennuste opetusdatalle
y_train_malli_4 = malli_4.predict(X_train)
#Malli-olion predict-toiminnolla voidaan laskea ennuste testidatalle
y_test_malli_4 = malli_4.predict(X_test)
#Opetusdatalle lasketun ennusteen vertaaminen tiedossa oleville
#todellisille arvoille (label) ennustuksen osumatarkkuus prosentteina
accuracy_score(y_train, y_train_malli_4)
#Testidatalle lasketun ennusteen vertaaminen tiedossa oleville
#todellisille arvoille (label) ennustuksen osumatarkkuus prosentteina
accuracy_score(y_test, y_test_malli_4)
##Algoritmi ei ole nähnyt testidataa koulutusvaiheessa ja testidatalla
##varmistetaan ettei ole ylimallinnettu koulutusvaiheessa
#Opetusdatan osalta väärin ennustettujen esiin nostaminen
print(confusion_matrix(y_train, y_train_malli_4))
#Testidatan osalta väärin ennustettujen esiin nostaminen
print(confusion_matrix(y_test, y_test_malli_4))
##Algoritmi ei ole nähnyt testidataa koulutusvaiheessa ja testidatalla
##varmistetaan ettei ole ylimallinnettu koulutusvaiheessa
```
Yllä olevassa Kurjenmiekat-kasvin taulukossa ovat lajikkeet Setosa, Versicolor ja Virginica. Talukkoa luetaan siten, että luotu ennustemalli on onnistunut ennustamaan
* ensimmäisellä rivillä (ja ensimmäisessä sarakkeessa) kaikki Setosat oikein
* keskimmäisellä rivillä (ja keskimmäisessä sarakkeessa) Versicolorista lähes kaikki oikein, mutta osa oli mennyt vahingossa viimeiseen sarakkeeseen Virginicalle
* viimeisellä rivillä (ja viimeisessä sarakkeessa) kaikki Virginicatit oikein
```
#Vakiintuneen tavan mukaan feature matrix (selittävä muuttuja) on iso X
##Malli-olion predict-toiminnolla suoritetaan täysin uuden
##syötteen luokittelu kategoriaan
malli_4.predict(Xnew)
```
Uudessa datasetissä ei yksinkertaisuuden vuoksi ollut kuin kolme havaintoa. Yllä olevasta tulosteesta näkee, että juuri koulutettu koneoppimisen algoritmi jakoi taulukon
* ensimmäisen rivin osalta Setosaksi
* toisen rivin osalta Virginicaksi
* kolmannen rivin osalta Versicoloriksi
```
#Logistisella regressiolla saadaan ennustavan luokittelun lisäksi
#myös hyviä todennäköisyyksiä luokittelun paikkaansapitävyydelle
malli_4.predict_proba(Xnew)
```
Yllä olevaa taulukkoa luetaan siten, että taulukon ensimmäinen havainto on 96,63% todennäköisyydellä Setosa ja 3.37% todennäköisyydellä Versicolor sekä 0,00001% todennäköisyydellä Virginica
```
from datetime import datetime
print(f'Lopeteltu {datetime.now()}')
```
| github_jupyter |
# Breast Cancer Diagnosis
In this notebook we will apply the LogitBoost algorithm to a toy dataset to classify cases of breast cancer as benign or malignant.
## Imports
```
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='darkgrid', palette='colorblind', color_codes=True)
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
from sklearn.manifold import TSNE
from logitboost import LogitBoost
```
## Loading the Data
The breast cancer dataset imported from [scikit-learn](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_breast_cancer.html) contains 569 samples with 30 real, positive features (including cancer mass attributes like mean radius, mean texture, mean perimeter, et cetera).
Of the samples, 212 are labeled "malignant" and 357 are labeled "benign".
We load this data into a 569-by-30 feature matrix and a 569-dimensional target vector.
Then we randomly shuffle the data and designate two thirds for training and one third for testing.
```
data = load_breast_cancer()
X = data.data
y = data.target_names[data.target]
n_classes = data.target.size
# Shuffle data and split it into training/testing samples
test_size = 1 / 3
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size,
shuffle=True, stratify=y,
random_state=0)
```
## Visualizing the Training Set
Although the features are 30-dimensional, we can visualize the training set by using [t-distributed stochastic neighbor embedding](https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding) (t-SNE) to project the features onto a 2-dimensional space.
```
tsne = TSNE(n_components=2, random_state=0)
X_train_tsne = tsne.fit_transform(X_train)
plt.figure(figsize=(10, 8))
mask_benign = (y_train == 'benign')
mask_malignant = (y_train == 'malignant')
plt.scatter(X_train_tsne[mask_benign, 0], X_train_tsne[mask_benign, 1],
marker='s', c='g', label='benign', edgecolor='k', alpha=0.7)
plt.scatter(X_train_tsne[mask_malignant, 0], X_train_tsne[mask_malignant, 1],
marker='o', c='r', label='malignant', edgecolor='k', alpha=0.7)
plt.title('t-SNE plot of the training data')
plt.xlabel('1st embedding axis')
plt.ylabel('2nd embedding axis')
plt.legend(loc='best', frameon=True, shadow=True)
plt.tight_layout()
plt.show()
plt.close()
```
## Fitting the LogitBoost Model
Next, we initialize a LogitBoost classifier and fit it to the training data.
By default, LogitBoost uses decision stumps (decision trees with depth 1, i.e., a single split) as its base estimator.
```
lboost = LogitBoost(n_estimators=200, random_state=0)
lboost.fit(X_train, y_train)
```
## Prediction Accuracy
As a first indicator of how well the model predicts the correct labels, we can check its accuracy score (number of correct predictions over the number of total predictions) on the training and test data.
If the classifier is good, then the accuracy score should be close to 1.
```
y_pred_train = lboost.predict(X_train)
y_pred_test = lboost.predict(X_test)
accuracy_train = accuracy_score(y_train, y_pred_train)
accuracy_test = accuracy_score(y_test, y_pred_test)
print('Training accuracy: %.4f' % accuracy_train)
print('Test accuracy: %.4f' % accuracy_test)
```
## Precision and Recall
We can also report our LogitBoost model's [precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall).
```
report_train = classification_report(y_train, y_pred_train)
report_test = classification_report(y_test, y_pred_test)
print('Training\n%s' % report_train)
print('Testing\n%s' % report_test)
```
## Visualizing Accuracy During Boosting
```
iterations = np.arange(1, lboost.n_estimators + 1)
staged_accuracy_train = list(lboost.staged_score(X_train, y_train))
staged_accuracy_test = list(lboost.staged_score(X_test, y_test))
plt.figure(figsize=(10, 8))
plt.plot(iterations, staged_accuracy_train, label='Training', marker='.')
plt.plot(iterations, staged_accuracy_test, label='Test', marker='.')
plt.xlabel('Iteration')
plt.ylabel('Accuracy')
plt.title('Ensemble accuracy during each boosting iteration')
plt.legend(loc='best', shadow=True, frameon=True)
plt.tight_layout()
plt.show()
plt.close()
```
## Contribution of Each Estimator in the Ensemble
Like other ensemble models, the LogitBoost model can suffer from *over-specialization*: estimators added to the ensemble in later boosting iterations make relatively small or even negligible contributions toward improving the overall predictions on the training set.
This can be quantified by computing the mean of the absolute prediction of each estimator in the ensemble taken over the training set.
```
contrib_train = lboost.contributions(X_train)
plt.figure(figsize=(10, 8))
plt.plot(iterations, contrib_train, lw=2)
plt.xlabel('Estimator Number')
plt.ylabel('Average Absolute Contribution')
plt.title('Average absolute contribution of the estimators in the ensemble')
plt.show()
plt.close()
```
## Appendix: System Information
This is included for replicability.
```
# sys_info.py is a file in the same directory as these example notebooks:
# doc/source/examples
import sys_info
```
| github_jupyter |
Title: Are the Warriors better without Kevin Durant?
Date: 2019-06-10 12:00
Tags: python
Slug: ab_kd
In the media, there have been debates about whether or not the Golden State Warriors (GSW) are better without Kevin Durant (KD). From the eye-test, it's laughable to even suggest this, as he's one of the top 3 players in the league (Lebron, KD, Kawhi). Nonetheless, people argue that ball movement is better without him, and therefore make the GSW more lethal.
But, just because the Warriors won a title without KD, does not mean they don't need him more than ever. At the time of writing, the Toronto Raptors lead 3-1 in the Finals! #WeTheNorth 🦖🍁
Using Bayesian estimation, we can test this hypothesis, by comparing two treatment groups, games played with KD and without KD.
Bayesian statistics are an excellent tool to reach for when sample sizes are small, as we can introduce explicit assumptions into the model, when there aren't thousands of observations.
---
# Primer on Bayesian Statistics
<img src="images/dist.png" class="img-responsive">
$$P\left(model\;|\;data\right) = \frac{P\left(data\;|\;model\right)}{P(data)}\; P\left(model\right)$$
---
$$ \text{prior} = P\left(model\right) $$
> The **prior** is our belief in the model given no additional information. In our example, this is the mean win % with KD playing.
$$ \text{likelihood} = P\left(data\;|\;model\right) $$
> The **likelihood** is the probability of the data we observed occurring given the model.
$$ \text{marginal probability of data} = P(data) $$
> The **marginal probability** of the data is the probability that our data are observed regardless of what model we choose or believe in.
$$ \text{posterior} = P\left(model\;|\;data\right) $$
> The **posterior** is our _updated_ belief in the model given the new data we have observed. Bayesian statistics are all about updating a prior belief we have about the world with new data, so we're transforming our _prior_ belief into this new _posterior_ belief about the world. <br><br> In this example, this is the GSW mean winning % with KD playing, given the game logs from the past three seasons.
Note, a Bayesian approach is different from a Frequentist's. Rather than only testing whether two groups are different, we instead pursue an estimate of _how_ different they are, from the posterior distribution.
## Objective
To calculate the distribution of the posterior probability of GSW mean winning % with KD and without KD.
Moreover, we can calculate the _delta_ between both probabilities to determine if the mean is statistically different from zero (i.e. no difference with or without him).
---
# Observed Data
```
import pandas as pd
import numpy as np
import scipy.stats as stats
import pymc3 as pm
from IPython.display import HTML
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('fivethirtyeight')
from IPython.core.pylabtools import figsize
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'x-large',
'figure.figsize': (15, 10),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
```
As the competition is much higher in the playoffs, let's analyze Playoff vs. Regular Season data separately. We can run one test on the regular season, and one test for the playoffs.
Data is from [Basketball Reference](https://www.basketball-reference.com/).
---
# Regular Season
<table class="table">
<thead class="table-responsive table-bordered">
<tr>
<th scope="col">Regular Season</th>
<th scope="col">With Kevin Durant</th>
<th scope="col">No Kevin Durant</th>
<th scope="col">Notes</th>
</tr>
</thead>
<tbody>
<tr>
<td>2019</td>
<td>0.69 <br> {'W': 54, 'L': 24} </td>
<td>0.75 <br> {'W': 3, 'L': 1} </td>
<td>Record is better when KD is out, but small sample size.</td>
</tr>
<tr>
<td>2018</td>
<td>0.72 <br> {'W': 49, 'L': 19} </td>
<td>0.64 <br> {'W': 9, 'L': 5} </td>
<td>Record is better when KD plays</td>
</tr>
<tr>
<td>2017</td>
<td>0.82 <br> {'W': 51, 'L': 11} </td>
<td>0.80 <br> {'W': 16, 'L': 4} </td>
<td>Record is better when KD plays</td>
</tr>
<tr>
<td>Total (3 seasons)</td>
<td>0.740 <br> {'W': 154, 'L': 54} </td>
<td>0.737 <br> {'W': 28, 'L': 10} </td>
<td>Record is better when KD plays</td>
</tr>
</tbody>
</table>
Over the last three seasons with the Warriors, KD has missed 38 games regular season games, and played in 208.
```
def occurrences(year, kd=True):
'''occurences(2019, kd=True)
By default, kd=True means with KD healthy'''
# clean data
# regular season
data = pd.read_csv(f'./data/{year}.txt', sep=',')
new_columns = ['Rk', 'G', 'Date', 'Age', 'Tm', 'Away', 'Opp', 'Result', 'GS',
'MP', 'FG', 'FGA', 'FG%', '3P', '3PA', '3P%', 'FT', 'FTA', 'FT%', 'ORB',
'DRB', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS', 'GmSc', '+/-']
data.columns=new_columns
# replace did not dress with inactive
data.GS = np.where(data.GS == 'Did Not Dress','Inactive',data.GS)
if kd == False:
game_logs = list(data[data.GS=='Inactive'].Result)
else:
game_logs = list(data[data.GS!='Inactive'].Result)
results = [game.split(' ')[0] for game in game_logs]
occurrences = [1 if result == 'W' else 0 for result in results]
return occurrences
regular_season_with_kd = occurrences(2019, kd=True)+occurrences(2018, kd=True)+occurrences(2017, kd=True)
regular_season_no_kd = occurrences(2019, kd=False)+occurrences(2018, kd=False)+occurrences(2017, kd=False)
print(f'Observed win % when Kevin Durant plays: {round(np.mean(regular_season_with_kd),4)}')
print(f'Observed win % when Kevin Durant does not play: {round(np.mean(regular_season_no_kd),4)}')
```
* Note, we do not know the true win %, only the observed win %. We infer the true quantity from the observed data.
* Notice the unequal sample sizes (208 vs. 38), but this is not problem in Bayesian analysis. We will see the uncertainty of the smaller sample size captured in the posterior distribution.
---
## Bayesian Tests with MCMC
* Markov Chain Monte Carlo (MCMC) is a method to find the posterior distribution of our parameter of interest.
> This type of algorithm generates Monte Carlo simulations in a way that relies on the Markov property, then accepts these simulations at a certain rate to get the posterior distribution.
* We will use [PyMC3](https://docs.pymc.io/), a probabilistic library for Python to generate MC simulations.
* Before seeing any of the data, my prior is that GSW will win between 50% - 90% of their games, because they are an above average basketball team, and no team has ever won more than 72 games.
```
# Instantiate
observations_A = regular_season_with_kd
observations_B = regular_season_no_kd
with pm.Model() as model:
# Assume Uniform priors for p_A and p_B
p_A = pm.Uniform("p_A", 0.5, .9)
p_B = pm.Uniform("p_B", 0.5, .9)
# Define the deterministic delta function. This is our unknown of interest.
# Delta is deterministic, no uncertainty beyond p_A and p_B
delta = pm.Deterministic("delta", p_A - p_B)
# We have two observation datasets: A, B
# Posterior distribution is Bernoulli
obs_A = pm.Bernoulli("obs_A", p_A, observed=observations_A)
obs_B = pm.Bernoulli("obs_B", p_B, observed=observations_B)
# Draw samples from the posterior distribution
trace = pm.sample(20000)
burned_trace=trace[1000:]
```
* Using PyMC3, we generated a trace, or chain of values from the posterior distribution
* Generated 20,000 samples from the posterior distribution (20,000 samples / chain / core)
Because this algorithm needs to converge, we set a number of tuning steps (1,000) to occur first and where the algorithm should "start exploring." It's good to see the Markov Chains overlap, which suggests convergence.
```
pm.traceplot(trace);
# plt.savefig('trace.svg');
df = pm.summary(burned_trace).round(2)[['mean', 'sd', 'hpd_2.5', 'hpd_97.5']]
HTML(df.to_html(classes="table table-responsive table-striped table-bordered"))
```
* Unlike with confidence intervals (frequentist), there is a measure of probability with the credible interval.
* There is a 95% probability that the true win rate with KD is in the interval (0.68, 0.79).
* There is a 95% probability that the true win rate with no KD is in the interval (0.59, 0.85).
```
p_A_samples = burned_trace["p_A"]
p_B_samples = burned_trace["p_B"]
delta_samples = burned_trace["delta"]
figsize(15, 10)
ax = plt.subplot(311)
plt.xlim(0, 1)
plt.hist(p_A_samples, histtype='stepfilled', bins=25, alpha=0.85,
label="posterior of $p_A$", color="#006BB6", density=True)
plt.vlines(df.iloc[0]["mean"], 0, 12.5, color="white", alpha=0.5,linestyle="--",
label=f'mean')
plt.vlines(df.iloc[0]["hpd_2.5"], 0, 1.3, color="black", alpha=0.5,linestyle="--",
label='2.5%')
plt.vlines(df.iloc[0]["hpd_97.5"], 0, 1.3, color="black", alpha=0.5,linestyle="--",
label='97.5%')
plt.legend(loc="upper right")
plt.title("Regular Season \n Posterior distributions of $p_A$, $p_B$, and delta unknowns \n\n $p_A$: Mean Win % with KD")
ax = plt.subplot(312)
plt.xlim(0, 1)
plt.hist(p_B_samples, histtype='stepfilled', bins=25, alpha=0.85,
label="posterior of $p_B$", color="#FDB927", density=True)
plt.vlines(df.iloc[1]["mean"], 0, 5.5, color="white", alpha=0.5,linestyle="--",
label=f'mean')
plt.vlines(df.iloc[1]["hpd_2.5"], 0, .8, color="black", alpha=0.5,linestyle="--",
label='2.5%')
plt.vlines(df.iloc[1]["hpd_97.5"], 0, .8, color="black", alpha=0.5,linestyle="--",
label='97.5%')
plt.legend(loc="upper right")
plt.title("$p_B$: Mean Win % No KD")
ax = plt.subplot(313)
plt.xlim(-0.5, 0.5)
plt.hist(delta_samples, histtype='stepfilled', bins=30, alpha=0.85,
label="posterior of delta", color="#BE3A34", density=True)
plt.vlines(df.iloc[2]["mean"], 0, 5, color="white", alpha=0.5,linestyle="--",
label=f'mean delta')
plt.vlines(df.iloc[2]["hpd_2.5"], 0, 1, color="black", alpha=0.5,linestyle="--",
label='2.5%')
plt.vlines(df.iloc[2]["hpd_97.5"], 0, 1, color="black", alpha=0.5,linestyle="--",
label='97.5%')
plt.legend(loc="upper right");
plt.title("$delta$ = $p_A - p_B$")
plt.savefig('reg_season.svg');
```
Note, the 2.5% and 97.5% markers indicate the quantiles for the credible interval, similar to the confidence interval in frequentist statistics.
---
## Results
* In the third graph, the posterior win rate is 1.2% higher when KD plays in the regular season.
* Observe that because have less data for when KD is out, our posterior distribution of 𝑝𝐵 is wider, implying we are less certain about the true value of 𝑝𝐵 than we are of 𝑝𝐴. The 95% credible interval is much wider for $p_B$, as there is a smaller sample size, for when KD did not play. We are less certain that the GSW wins 73% of the time without KD.
* The difference in sample sizes ($N_B$ < $N_A$) naturally fits into Bayesian analysis, whereas you need the same populations for frequentist approach!
```
# Count the number of samples less than 0, i.e. the area under the curve
print("Probability that GSW is worse with Kevin Durant in the regular season: %.2f" % \
np.mean(delta_samples < 0))
print("Probability that GSW is better with Kevin Durant in the regular season: %.2f" % \
np.mean(delta_samples > 0))
```
The probabilities are pretty close, so we can chalk this up to the Warriors having a experienced supporting cast.
There is significant overlap between the distribution pf posterior pA and posterior of pB, so one is not better than the other with high probability. The majority of the distribution of delta is around 0, so there is no statistically difference between the groups in the regular season.
Ideally, we should perform more trials when KD is injured (as each data point for scenario B contributes more inferential power than each additional point for scenario A). One could do a similar analysis for when he played on the Oklahoma City Thunder.
---
# Playoffs
## Do superstars shine when the stakes are highest?
<table class="table">
<thead class="table-responsive table-bordered">
<tr>
<th scope="col">Playoffs</th>
<th scope="col">With Kevin Durant</th>
<th scope="col">No Kevin Durant</th>
<th scope="col">Notes</th>
</tr>
</thead>
<tbody>
<tr>
<td>2019</td>
<td>0.64 <br> {'W': 7, 'L': 4} </td>
<td>0.66 <br> {'W': 6, 'L': 3} </td>
<td>Record is marginally better when KD is out, but small sample size. Skewed by Portland series, which GSW won 4-0 with KD injured.</td>
</tr>
<tr>
<td>2018</td>
<td>0.76 <br> {'W': 16, 'L': 5} </td>
<td>n/a <br> {'W': 0, 'L': 0} </td>
<td>KD did not miss any games. Won Championship.</td>
</tr>
<tr>
<td>2017</td>
<td>0.82 <br> {'W': 14, 'L': 1} </td>
<td>1 <br> {'W': 2, 'L': 0}. Small sample size. </td>
<td>Won championship.</td>
</tr>
<td>Total (3 seasons)</td>
<td>0.79 <br> {'W': 37, 'L': 10} </td>
<td>0.73 <br> {'W': 8, 'L': 3} </td>
<td>Record is better when KD plays</td>
</tbody>
</table>
```
playoffs_with_kd = occurrences('2019_playoffs', kd=True)+occurrences('2018_playoffs', kd=True)+occurrences('2017_playoffs', kd=True)
playoffs_no_kd = occurrences('2019_playoffs', kd=False)+occurrences('2018_playoffs', kd=False)+occurrences('2017_playoffs', kd=False)
print(f'Observed win % when Kevin Durant plays: {round(np.mean(playoffs_with_kd),2)}')
print(f'Observed win % when Kevin Durant does not play: {round(np.mean(playoffs_no_kd),2)}')
```
Over the last three playoff runs with the Warriors, KD has missed 11, and played in 47.
See how the difference is much more pronounced with more data across three seasons. Let's similar if the GSW has a higher win % with KD in the playoffs.
```
playoff_obs_A = playoffs_with_kd
playoff_obs_B = playoffs_no_kd
with pm.Model() as playoff_model:
playoff_p_A = pm.Uniform("playoff_p_A", 0, 1)
playoff_p_B = pm.Uniform("playoff_p_B", 0, 1)
playoff_delta = pm.Deterministic("playoff_delta", playoff_p_A - playoff_p_B)
playoff_obs_A = pm.Bernoulli("playoff_obs_A", playoff_p_A, observed=playoff_obs_A)
playoff_obs_B = pm.Bernoulli("playoff_obs_B", playoff_p_B, observed=playoff_obs_B)
playoff_trace = pm.sample(20000)
playoff_burned_trace=playoff_trace[1000:]
df2 = pm.summary(playoff_burned_trace).round(2)[['mean', 'sd', 'hpd_2.5', 'hpd_97.5']]
HTML(df2.to_html(classes="table table-responsive table-striped table-bordered"))
playoff_p_A_samples = playoff_burned_trace['playoff_p_A']
playoff_p_B_samples = playoff_burned_trace["playoff_p_B"]
playoff_delta_samples = playoff_burned_trace["playoff_delta"]
figsize(15, 10)
#histogram of posteriors
ax = plt.subplot(311)
plt.xlim(0, 1)
plt.hist(playoff_p_A_samples, histtype='stepfilled', bins=25, alpha=0.85,
label="posterior of $p_A$", color="#006BB6", density=True)
plt.vlines(df2.iloc[0]["mean"], 0, 7.5, color="white", alpha=0.5,linestyle="--",
label=f'mean delta')
plt.vlines(df2.iloc[0]["hpd_2.5"], 0, 1, color="black", alpha=0.5,linestyle="--",
label='2.5%')
plt.vlines(df2.iloc[0]["hpd_97.5"], 0, 1, color="black", alpha=0.5,linestyle="--",
label='97.5%')
plt.legend(loc="upper right")
plt.title("Playoffs \n Posterior distributions of $p_A$, $p_B$, and delta unknowns \n\n $p_A$: Mean Win % with KD")
ax = plt.subplot(312)
plt.xlim(0, 1)
plt.hist(playoff_p_B_samples, histtype='stepfilled', bins=25, alpha=0.85,
label="posterior of $p_B$", color="#FDB927", density=True)
plt.vlines(df2.iloc[1]["mean"], 0, 3, color="white", alpha=0.5,linestyle="--",
label=f'mean delta')
plt.vlines(df2.iloc[1]["hpd_2.5"], 0, .8, color="black", alpha=0.5,linestyle="--",
label='2.5%')
plt.vlines(df2.iloc[1]["hpd_97.5"], 0, .8, color="black", alpha=0.5,linestyle="--",
label='97.5%')
plt.legend(loc="upper right")
plt.title("$p_B$: Mean Win % No KD")
ax = plt.subplot(313)
plt.xlim(-0.5, 0.5)
plt.hist(playoff_delta_samples, histtype='stepfilled', bins=30, alpha=0.85,
label="posterior of delta", color="#BE3A34", density=True)
plt.vlines(df2.iloc[2]["mean"], 0, 3, color="white", alpha=0.5,linestyle="--",
label=f'mean delta')
plt.vlines(df2.iloc[2]["hpd_2.5"], 0, 0.25, color="black", alpha=0.5,linestyle="--",
label='2.5%')
plt.vlines(df2.iloc[2]["hpd_97.5"], 0, 0.25, color="black", alpha=0.5,linestyle="--",
label='97.5%')
plt.legend(loc="upper right");
plt.title("$delta$: $p_A - p_B$")
plt.savefig('playoffs.svg');
# Count the number of samples less than 0, i.e. the area under the curve
print("Probability that GSW is worse with Kevin Durant in the playoffs: %.2f" % \
np.mean(playoff_delta_samples < 0))
print("Probability that GSW is better with Kevin Durant in the playoffs: %.2f" % \
np.mean(playoff_delta_samples > 0))
```
---
## Are the Warriors better without Kevin Durant? No.
By combining results from the past three seasons, we obtain a larger test group, which allows us to observe a real change vs. looking at the pure stats for a single year.
We can see that while delta=0 (i.e. no effect when KD plays) is in the credible interval at 95%, the majority of the distribution is above delta=0, implying the treatment group with KD is likely better than the group without KD. In fact, the probability that GSW is better with Kevin Durant in the playoffs is 71%, a significant improvement than 55% in the regular season!
Superstars make a significant difference. The regular season is where you make your name, but the postseason is where you make your fame. The delta is 8% higher with KD. That's the advantage you gain with a player of his caliber, as he can hit clutch shots when it matters most.
As a basketball fan, I hope to see Kevin Durant healthy and back in action soon.
# References
* https://multithreaded.stitchfix.com/blog/2015/05/26/significant-sample/
* https://multithreaded.stitchfix.com/blog/2015/02/12/may-bayes-theorem-be-with-you/
| github_jupyter |
it is a playground notebook for related data, such as track length, waypoint distance, car size
```
import math
7*15
100/105
1/105 *10
100/300
1/2
from race_utils import SampleGenerator
generator = SampleGenerator()
testing_param = generator.random_sample()
waypoints = testing_param['waypoints']
len(waypoints)
testing_param['track_length']/len(waypoints)
step_time = 1/15
1.4 * step_time
4.0 * step_time
testing_param['closest_waypoints']
params = testing_param
x = params['x']
y = params['y']
heading = params['heading']
waypoints = params['waypoints']
waypoints_length = len(waypoints)
front_waypoint = params['closest_waypoints'][1]
rabbit_waypoint = front_waypoint + 1
if (rabbit_waypoint >= waypoints_length):
rabbit_waypoint = rabbit_waypoint % waypoints_length
rabbit = [waypoints[rabbit_waypoint][0],waypoints[rabbit_waypoint][1]]
radius = math.hypot(x - rabbit[0], y - rabbit[1])
pointing = [0,0]
pointing[0] = x + (radius * math.cos(heading))
pointing[1] = y + (radius * math.sin(heading))
vector_delta = math.hypot(pointing[0] - rabbit[0], pointing[1] - rabbit[1])
reward = 0.001
reward += ( 1 - ( vector_delta / (radius * 2)))
reward
front_waypoint = params['closest_waypoints'][1]
rabbit_waypoint = front_waypoint + 1
rabbit = [waypoints[rabbit_waypoint][0],waypoints[rabbit_waypoint][1]]
rabbit
front_waypoint
rabbit_waypoint
rabbit_waypoint_data = waypoints[ribbit_waypoint]
rabbit = [waypoints[rabbit_waypoint][0],waypoints[closest_waypoints[1]][1]]
radius = math.hypot(x - rabbit[0], y - rabbit[1])
pointing[0] = x + (radius * math.cos(heading))
pointing[1] = y + (radius * math.sin(heading))
vector_delta = math.hypot(pointing[0] - rabbit[0], pointing[1] - rabbit[1])
# Max distance for pointing away will be the radius * 2
# Min distance means we are pointing directly at the next waypoint
# We can setup a reward that is a ratio to this max.
if vector_delta == 0:
reward += 1
else:
reward += ( 1 - ( vector_delta / (radius * 2)))
len(waypoints)
waypoints[-1]
waypoints[0]
waypoints[118]
front_waypoint
rabbit = [waypoints[closest_waypoints+1][0],waypoints[closest_waypoints+1][1]]
radius = math.hypot(x - rabbit[0], y - rabbit[1])
pointing[0] = x + (radius * math.cos(car_orientation))
pointing[1] = y + (radius * math.sin(car_orientation))
vector_delta = math.hypot(pointing[0] - rabbit[0], pointing[1] - rabbit[1])
zhouju= 0.17
lunju = 0.155
testing_param
def distance_between(x1, y1 , x2, y2):
dx = x1 - x2
dy = y1 - y2
return math.sqrt(dx*dx + dy*dy)
waypoints = testing_param['waypoints']
prev_waypoint = waypoints[-1]
total_distance = 0
for waypoint in testing_param['waypoints']:
distance = distance_between(prev_waypoint[0], prev_waypoint[1], waypoint[0], waypoint[1])
total_distance = total_distance + distance
prev_waypoint = waypoint
total_distance
pow(2,3)
pow(0.98, 105)
17.709159380834848
```
| github_jupyter |
```
import requests
import arrow
import pprint
import json
from urllib.parse import urlencode
from functools import reduce
token = open("./NOTION_TOKEN", "r").readlines()[0]
notion_version = "2021-08-16"
extra_data = {"filter": {"and": [{"property": "标签",
"multi_select": {"is_not_empty": True}},],},}
r_database = requests.post(
url="https://api.notion.com/v1/databases/cecf4bb039dc46bca130a29a9db58906/query",
headers={"Authorization": "Bearer " + token,
"Notion-Version": notion_version,
"Content-Type": "application/json",
},
data=json.dumps(extra_data),
)
respond = json.loads(r_database.text)
def take_page_plain_text(respond: dict):
for result in respond["results"]:
page_id = result["url"].split("/")[-1].split("-")[-1]
r_page = requests.get(
url=f"https://api.notion.com/v1/blocks/{page_id}/children",
headers={"Authorization": f"Bearer {token}",
"Notion-Version": notion_version,
"Content-Type": "application/json",
},
)
for block in json.loads(r_page.text).get("results", []):
for key in block:
if not isinstance(block[key], dict):
continue
if "text" not in block[key]:
continue
for text in block[key]["text"]:
yield text["plain_text"]
text_list = list(take_page_plain_text(respond))
text_list[:3]
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
import jieba
import sys
from unicodedata import category
codepoints = range(sys.maxunicode + 1)
punctuation = {c for k in codepoints if category(c := chr(k)).startswith("P")}
from functional import seq
split_text_list = [jieba.lcut(text, HMM=True) for text in text_list]
from glob import glob
stopfiles = glob("./stopwords/*stopwords.txt")
stopwords = reduce(lambda x,y: x.union(y), [set([x.strip() for x in open(file, "r").readlines()]) for file in stopfiles])
def check_stopwords(word):
return word in stopwords \
or word in punctuation \
or word.isdigit()
sequence = seq(split_text_list).map(lambda sent: [word for word in sent if not check_stopwords(word)])
uniqueWords = (sequence
.map(lambda sent: set(sent))
.reduce(lambda x, y: x.union(y))
)
word2sents = {word.lower(): set() for word in uniqueWords}
for text in text_list:
for word in uniqueWords:
if word in text:
word2sents[word.lower()].add(text)
```
## 现有库
```
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform(sequence.map(lambda x: " ".join(x)).to_list())
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
df = pd.DataFrame(denselist, columns=feature_names)
df.max(axis=0).sort_values(key=lambda x: -x).to_csv("./tf_idf_topic.csv")
for word in df.max(axis=0).sort_values(key=lambda x: -x).head(3).index:
print(word)
print(word2sents[word])
print("-" * 10)
```
## 自定义(不是tf*idf)
```
uniqueWords = (sequence
.map(lambda sent: set(sent))
.reduce(lambda x, y: x.union(y))
)
def computeTF(wordDict, bagOfWords):
tfDict = {}
bagOfWordsCount = len(bagOfWords)
for word, count in wordDict.items():
tfDict[word] = count / float(bagOfWordsCount)
return tfDict
def computeIDF(documents):
import math
N = len(documents)
idfDict = dict.fromkeys(documents[0].keys(), 0)
for document in documents:
for word, val in document.items():
if val > 0:
idfDict[word] += 1
for word, val in idfDict.items():
idfDict[word] = math.log(N / float(val))
return idfDict
```
| github_jupyter |

<div class = 'alert alert-block alert-info'
style = 'background-color:#4c1c84;
color:#eeebf1;
border-width:5px;
border-color:#4c1c84;
font-family:Comic Sans MS;
border-radius: 50px 50px'>
<p style = 'font-size:24px'>Exp 027</p>
<a href = "#Config"
style = "color:#eeebf1;
font-size:14px">1.Config</a><br>
<a href = "#Settings"
style = "color:#eeebf1;
font-size:14px">2.Settings</a><br>
<a href = "#Data-Load"
style = "color:#eeebf1;
font-size:14px">3.Data Load</a><br>
<a href = "#Pytorch-Settings"
style = "color:#eeebf1;
font-size:14px">4.Pytorch Settings</a><br>
<a href = "#Training"
style = "color:#eeebf1;
font-size:14px">5.Training</a><br>
</div>
<p style = 'font-size:24px;
color:#4c1c84'>
実施したこと
</p>
<li style = "color:#4c1c84;
font-size:14px">使用データ:Jigsaw2nd</li>
<li style = "color:#4c1c84;
font-size:14px">使用モデル:DeBERTa-Base</li>
<li style = "color:#4c1c84;
font-size:14px">New!! Attentionの可視化</li>
<br>
<h1 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;">
Config
</h1>
<br>
```
import sys
sys.path.append("../src/utils/iterative-stratification/")
sys.path.append("../src/utils/detoxify")
sys.path.append("../src/utils/coral-pytorch/")
import warnings
warnings.simplefilter('ignore')
import os
import gc
gc.enable()
import sys
import glob
import copy
import math
import time
import random
import string
import psutil
import pathlib
from pathlib import Path
from contextlib import contextmanager
from collections import defaultdict
from box import Box
from typing import Optional
from pprint import pprint
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import japanize_matplotlib
from tqdm.auto import tqdm as tqdmp
from tqdm.autonotebook import tqdm as tqdm
tqdmp.pandas()
## Model
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import StratifiedKFold, KFold
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer, AutoModel, AdamW
from transformers import RobertaModel, RobertaForSequenceClassification
from transformers import RobertaTokenizer
from transformers import LukeTokenizer, LukeModel, LukeConfig
from transformers import get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup
from transformers import BertTokenizer, BertForSequenceClassification
from transformers import RobertaTokenizer, RobertaForSequenceClassification
from transformers import XLMRobertaTokenizer, XLMRobertaForSequenceClassification
from transformers import DebertaTokenizer, DebertaModel
# Pytorch Lightning
import pytorch_lightning as pl
from pytorch_lightning.utilities.seed import seed_everything
from pytorch_lightning import callbacks
from pytorch_lightning.callbacks.progress import ProgressBarBase
from pytorch_lightning import LightningDataModule, LightningDataModule
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, LearningRateMonitor
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.loggers.csv_logs import CSVLogger
from pytorch_lightning.callbacks import RichProgressBar
from sklearn.linear_model import Ridge
from sklearn.svm import SVC, SVR
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy.stats import rankdata
from cuml.svm import SVR as cuml_SVR
from cuml.linear_model import Ridge as cuml_Ridge
import cudf
from detoxify import Detoxify
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
import torch
config = {
"exp_comment":"Wiki AttackデータをLukeで学習",
"seed": 42,
"root": "/content/drive/MyDrive/kaggle/Jigsaw/raw",
"n_fold": 5,
"epoch": 5,
"max_length": 128,
"environment": "AWS",
"project": "Jigsaw",
"entity": "dataskywalker",
"exp_name": "027_exp",
"margin": 0.5,
"train_fold": [0, 1, 2, 3, 4],
"trainer": {
"gpus": 1,
"accumulate_grad_batches": 8,
"progress_bar_refresh_rate": 1,
"fast_dev_run": True,
"num_sanity_val_steps": 0,
},
"train_loader": {
"batch_size": 4,
"shuffle": True,
"num_workers": 1,
"pin_memory": True,
"drop_last": True,
},
"valid_loader": {
"batch_size": 4,
"shuffle": False,
"num_workers": 1,
"pin_memory": True,
"drop_last": False,
},
"test_loader": {
"batch_size": 4,
"shuffle": False,
"num_workers": 1,
"pin_memory": True,
"drop_last": False,
},
"backbone": {
"name": "microsoft/deberta-base",
"output_dim": 1,
},
"optimizer": {
"name": "torch.optim.AdamW",
"params": {
"lr": 1e-6,
},
},
"scheduler": {
"name": "torch.optim.lr_scheduler.CosineAnnealingWarmRestarts",
"params": {
"T_0": 20,
"eta_min": 0,
},
},
"loss": "nn.BCEWithLogitsLoss",
}
config = Box(config)
config.tokenizer = DebertaTokenizer.from_pretrained(config.backbone.name)
config.model = DebertaModel.from_pretrained(config.backbone.name)
# pprint(config)
config.tokenizer.save_pretrained(f"../data/processed/{config.backbone.name}")
pretrain_model = DebertaModel.from_pretrained(config.backbone.name)
pretrain_model.save_pretrained(f"../data/processed/{config.backbone.name}")
# 個人的にAWSやKaggle環境やGoogle Colabを行ったり来たりしているのでまとめています
import os
import sys
from pathlib import Path
if config.environment == 'AWS':
INPUT_DIR = Path('/mnt/work/data/kaggle/Jigsaw/')
MODEL_DIR = Path(f'../models/{config.exp_name}/')
OUTPUT_DIR = Path(f'../data/interim/{config.exp_name}/')
UTIL_DIR = Path('/mnt/work/shimizu/kaggle/PetFinder/src/utils')
os.makedirs(MODEL_DIR, exist_ok=True)
os.makedirs(OUTPUT_DIR, exist_ok=True)
print(f"Your environment is 'AWS'.\nINPUT_DIR is {INPUT_DIR}\nMODEL_DIR is {MODEL_DIR}\nOUTPUT_DIR is {OUTPUT_DIR}\nUTIL_DIR is {UTIL_DIR}")
elif config.environment == 'Kaggle':
INPUT_DIR = Path('../input/*****')
MODEL_DIR = Path('./')
OUTPUT_DIR = Path('./')
print(f"Your environment is 'Kaggle'.\nINPUT_DIR is {INPUT_DIR}\nMODEL_DIR is {MODEL_DIR}\nOUTPUT_DIR is {OUTPUT_DIR}")
elif config.environment == 'Colab':
INPUT_DIR = Path('/content/drive/MyDrive/kaggle/Jigsaw/raw')
BASE_DIR = Path("/content/drive/MyDrive/kaggle/Jigsaw/interim")
MODEL_DIR = BASE_DIR / f'{config.exp_name}'
OUTPUT_DIR = BASE_DIR / f'{config.exp_name}/'
os.makedirs(MODEL_DIR, exist_ok=True)
os.makedirs(OUTPUT_DIR, exist_ok=True)
if not os.path.exists(INPUT_DIR):
print('Please Mount your Google Drive.')
else:
print(f"Your environment is 'Colab'.\nINPUT_DIR is {INPUT_DIR}\nMODEL_DIR is {MODEL_DIR}\nOUTPUT_DIR is {OUTPUT_DIR}")
else:
print("Please choose 'AWS' or 'Kaggle' or 'Colab'.\nINPUT_DIR is not found.")
# Seed固定
seed_everything(config.seed)
## 処理時間計測
@contextmanager
def timer(name:str, slack:bool=False):
t0 = time.time()
p = psutil.Process(os.getpid())
m0 = p.memory_info()[0] / 2. ** 30
print(f'<< {name} >> Start')
yield
m1 = p.memory_info()[0] / 2. ** 30
delta = m1 - m0
sign = '+' if delta >= 0 else '-'
delta = math.fabs(delta)
print(f"<< {name} >> {m1:.1f}GB({sign}{delta:.1f}GB):{time.time() - t0:.1f}sec", file=sys.stderr)
```
<br>
<h1 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;">
Data Load
</h1>
<br>
```
## Data Check
for dirnames, _, filenames in os.walk(INPUT_DIR):
for filename in filenames:
print(f'{dirnames}/{filename}')
val_df = pd.read_csv("/mnt/work/data/kaggle/Jigsaw/validation_data.csv")
test_df = pd.read_csv("/mnt/work/data/kaggle/Jigsaw/comments_to_score.csv")
display(val_df.head())
display(test_df.head())
```
<br>
<h2 style = "font-size:45px;
font-family:Comic Sans MS ;
font-weight : normal;
background-color: #eeebf1 ;
color : #4c1c84;
text-align: center;
border-radius: 100px 100px;">
Jigsaw Classification
</h2>
<br>
```
train_df = pd.read_csv("../data/external/jigsaw-unbiased/train.csv")
train_df = train_df.rename(columns={"target": "toxicity"})
train_df.iloc[:, :20].head()
target_cols = [
"toxicity",
"severe_toxicity",
"identity_attack",
"insult",
"threat",
"sexual_explicit"
]
plt.figure(figsize=(12, 5))
sns.histplot(train_df["toxicity"], color="#4c1c84")
plt.grid()
plt.show()
def sample_df(df:pd.DataFrame, frac=0.2):
'''
train_dfからtoxicとnon_toxicを抽出
non_toxicの割合をfracで調整
'''
print(f"Before: {df.shape}")
label_cols = [
"toxicity",
"severe_toxicity",
"identity_attack",
"insult",
"threat",
"sexual_explicit"
]
df["y"] = df[label_cols].sum(axis=1)
df["y"] = df["y"]/df["y"].max()
toxic_df = df[df["y"]>0].reset_index(drop=True)
nontoxic_df = df[df["y"]==0].reset_index(drop=True)
nontoxic_df = nontoxic_df.sample(frac=frac, random_state=config.seed)
df = pd.concat([toxic_df, nontoxic_df], axis=0).sample(frac=1).reset_index(drop=True)
print(f"After: {df.shape}")
return df
with timer("sampling df"):
train_df = sample_df(train_df, frac=0.2)
display(train_df.head())
```
<br>
<h1 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;">
Pytorch Dataset
</h1>
<br>
```
class JigsawDataset:
def __init__(self, df, tokenizer, max_length, mode, target_cols):
self.df = df
self.max_len = max_length
self.tokenizer = tokenizer
self.mode = mode
self.target_cols = target_cols
if self.mode == "train":
self.text = df["comment_text"].values
self.target = df[target_cols].values
elif self.mode == "valid":
self.more_toxic = df["more_toxic"].values
self.less_toxic = df["less_toxic"].values
else:
self.text == df["text"].values
def __len__(self):
return len(self.df)
def __getitem__(self, index):
if self.mode == "train":
text = self.text[index]
target = self.target[index]
inputs_text = self.tokenizer.encode_plus(
text,
truncation=True,
return_attention_mask=True,
return_token_type_ids=True,
max_length = self.max_len,
padding="max_length",
)
text_ids = inputs_text["input_ids"]
text_mask = inputs_text["attention_mask"]
text_token_type_ids = inputs_text["token_type_ids"]
return {
'text_ids': torch.tensor(text_ids, dtype=torch.long),
'text_mask': torch.tensor(text_mask, dtype=torch.long),
'text_token_type_ids': torch.tensor(text_token_type_ids, dtype=torch.long),
'target': torch.tensor(target, dtype=torch.float)
}
elif self.mode == "valid":
more_toxic = self.more_toxic[index]
less_toxic = self.less_toxic[index]
inputs_more_toxic = self.tokenizer.encode_plus(
more_toxic,
truncation=True,
return_attention_mask=True,
return_token_type_ids=True,
max_length = self.max_len,
padding="max_length",
)
inputs_less_toxic = self.tokenizer.encode_plus(
less_toxic,
truncation=True,
return_attention_mask=True,
return_token_type_ids=True,
max_length = self.max_len,
padding="max_length",
)
target = 1
more_toxic_ids = inputs_more_toxic["input_ids"]
more_toxic_mask = inputs_more_toxic["attention_mask"]
more_token_type_ids = inputs_more_toxic["token_type_ids"]
less_toxic_ids = inputs_less_toxic["input_ids"]
less_toxic_mask = inputs_less_toxic["attention_mask"]
less_token_type_ids = inputs_less_toxic["token_type_ids"]
return {
'more_toxic_ids': torch.tensor(more_toxic_ids, dtype=torch.long),
'more_toxic_mask': torch.tensor(more_toxic_mask, dtype=torch.long),
'more_token_type_ids': torch.tensor(more_token_type_ids, dtype=torch.long),
'less_toxic_ids': torch.tensor(less_toxic_ids, dtype=torch.long),
'less_toxic_mask': torch.tensor(less_toxic_mask, dtype=torch.long),
'less_token_type_ids': torch.tensor(less_token_type_ids, dtype=torch.long),
'target': torch.tensor(target, dtype=torch.float)
}
else:
text = self.text[index]
input_text = self.tokenizer.encode_plus(
text,
truncation=True,
return_attention_mask=True,
return_token_type_ids=True,
max_length = self.max_len,
padding="max_length",
)
text_ids = inputs_text["input_ids"]
text_mask = inputs_text["attention_mask"]
text_token_type_ids = inputs_text["token_type_ids"]
return {
'text_ids': torch.tensor(text_ids, dtype=torch.long),
'text_mask': torch.tensor(text_mask, dtype=torch.long),
'text_token_type_ids': torch.tensor(text_token_type_ids, dtype=torch.long),
}
```
<br>
<h2 style = "font-size:45px;
font-family:Comic Sans MS ;
font-weight : normal;
background-color: #eeebf1 ;
color : #4c1c84;
text-align: center;
border-radius: 100px 100px;">
DataModule
</h2>
<br>
```
class JigsawDataModule(LightningDataModule):
def __init__(self, train_df, valid_df, test_df, cfg):
super().__init__()
self._train_df = train_df
self._valid_df = valid_df
self._test_df = test_df
self._cfg = cfg
def train_dataloader(self):
dataset = JigsawDataset(
df=self._train_df,
tokenizer=self._cfg.tokenizer,
max_length=self._cfg.max_length,
mode="train",
target_cols=target_cols
)
return DataLoader(dataset, **self._cfg.train_loader)
def val_dataloader(self):
dataset = JigsawDataset(
df=self._valid_df,
tokenizer=self._cfg.tokenizer,
max_length=self._cfg.max_length,
mode="valid",
target_cols=target_cols
)
return DataLoader(dataset, **self._cfg.valid_loader)
def test_dataloader(self):
dataset = JigsawDataset(
df=self._test_df,
tokenizer = self._cfg.tokenizer,
max_length=self._cfg.max_length,
mode="test",
target_cols=target_cols
)
return DataLoader(dataset, **self._cfg.test_loader)
## DataCheck
seed_everything(config.seed)
sample_dataloader = JigsawDataModule(train_df, val_df, test_df, config).train_dataloader()
for data in sample_dataloader:
break
print(data["text_ids"].size())
print(data["text_mask"].size())
print(data["text_token_type_ids"].size())
print(data["target"].size())
print(data["target"])
output = config.model(
data["text_ids"],
data["text_mask"],
data["text_token_type_ids"],
output_attentions=True
)
print(output["last_hidden_state"].size(), output["attentions"][-1].size())
print(output["last_hidden_state"][:, 0, :].size(), output["attentions"][-1].size())
```
<br>
<h2 style = "font-size:45px;
font-family:Comic Sans MS ;
font-weight : normal;
background-color: #eeebf1 ;
color : #4c1c84;
text-align: center;
border-radius: 100px 100px;">
LigitningModule
</h2>
<br>
```
class JigsawModel(pl.LightningModule):
def __init__(self, cfg, fold_num):
super().__init__()
self.cfg = cfg
self.__build_model()
self.criterion = eval(self.cfg.loss)()
self.save_hyperparameters(cfg)
self.fold_num = fold_num
def __build_model(self):
self.base_model = DebertaModel.from_pretrained(
self.cfg.backbone.name
)
print(f"Use Model: {self.cfg.backbone.name}")
self.norm = nn.LayerNorm(768)
self.drop = nn.Dropout(p=0.3)
self.head = nn.Linear(768, self.cfg.backbone.output_dim)
def forward(self, ids, mask, token_type_ids):
output = self.base_model(
input_ids=ids,
attention_mask=mask,
token_type_ids=token_type_ids,
output_attentions=True
)
feature = self.norm(output["last_hidden_state"][:, 0, :])
out = self.drop(feature)
out = self.head(out)
return {
"logits":out,
"attention":output["attentions"],
"mask":mask,
}
def training_step(self, batch, batch_idx):
text_ids = batch["text_ids"]
text_mask = batch['text_mask']
text_token_type_ids = batch['text_token_type_ids']
targets = batch['target']
outputs = self.forward(text_ids, text_mask, text_token_type_ids)
loss = self.criterion(outputs["logits"], targets)
return {
"loss":loss,
"targets":targets,
}
def training_epoch_end(self, training_step_outputs):
loss_list = []
for out in training_step_outputs:
loss_list.extend([out["loss"].cpu().detach().tolist()])
meanloss = sum(loss_list)/len(loss_list)
logs = {f"train_loss/fold{self.fold_num+1}": meanloss,}
self.log_dict(
logs,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True
)
def validation_step(self, batch, batch_idx):
more_toxic_ids = batch['more_toxic_ids']
more_toxic_mask = batch['more_toxic_mask']
more_text_token_type_ids = batch['more_token_type_ids']
less_toxic_ids = batch['less_toxic_ids']
less_toxic_mask = batch['less_toxic_mask']
less_text_token_type_ids = batch['less_token_type_ids']
targets = batch['target']
more_outputs = self.forward(
more_toxic_ids,
more_toxic_mask,
more_text_token_type_ids
)
less_outputs = self.forward(
less_toxic_ids,
less_toxic_mask,
less_text_token_type_ids
)
more_outputs = torch.sum(more_outputs["logits"], 1)
less_outputs = torch.sum(less_outputs["logits"], 1)
outputs = more_outputs - less_outputs
logits = outputs.clone()
logits[logits > 0] = 1
loss = self.criterion(logits, targets)
return {
"loss":loss,
"pred":outputs,
"targets":targets,
}
def validation_epoch_end(self, validation_step_outputs):
loss_list = []
pred_list = []
target_list = []
for out in validation_step_outputs:
loss_list.extend([out["loss"].cpu().detach().tolist()])
pred_list.append(out["pred"].detach().cpu().numpy())
target_list.append(out["targets"].detach().cpu().numpy())
meanloss = sum(loss_list)/len(loss_list)
pred_list = np.concatenate(pred_list)
pred_count = sum(x>0 for x in pred_list)/len(pred_list)
logs = {
f"valid_loss/fold{self.fold_num+1}":meanloss,
f"valid_acc/fold{self.fold_num+1}":pred_count,
}
self.log_dict(
logs,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True
)
def configure_optimizers(self):
optimizer = eval(self.cfg.optimizer.name)(
self.parameters(), **self.cfg.optimizer.params
)
self.scheduler = eval(self.cfg.scheduler.name)(
optimizer, **self.cfg.scheduler.params
)
scheduler = {"scheduler": self.scheduler, "interval": "step",}
return [optimizer], [scheduler]
```
<br>
<h2 style = "font-size:45px;
font-family:Comic Sans MS ;
font-weight : normal;
background-color: #eeebf1 ;
color : #4c1c84;
text-align: center;
border-radius: 100px 100px;">
Training
</h2>
<br>
```
sns.distplot(train_df["y"])
skf = KFold(
n_splits=config.n_fold,
shuffle=True,
random_state=config.seed
)
for fold, (_, val_idx) in enumerate(skf.split(X=train_df, y=train_df["y"])):
train_df.loc[val_idx, "kfold"] = int(fold)
train_df["kfold"] = train_df["kfold"].astype(int)
train_df.head()
## Debug
config.trainer.fast_dev_run = True
config.backbone.output_dim = len(target_cols)
for fold in config.train_fold:
print("★"*25, f" Fold{fold+1} ", "★"*25)
df_train = train_df[train_df.kfold != fold].reset_index(drop=True)
datamodule = JigsawDataModule(df_train, val_df, test_df, config)
sample_dataloader = JigsawDataModule(df_train, val_df, test_df, config).train_dataloader()
config.scheduler.params.T_0 = config.epoch * len(sample_dataloader)
model = JigsawModel(config, fold)
lr_monitor = callbacks.LearningRateMonitor()
loss_checkpoint = callbacks.ModelCheckpoint(
filename=f"best_acc_fold{fold+1}",
monitor=f"valid_acc/fold{fold+1}",
save_top_k=1,
mode="max",
save_last=False,
dirpath=MODEL_DIR,
)
wandb_logger = WandbLogger(
project=config.project,
entity=config.entity,
name = f"{config.exp_name}",
tags = ['DeBERTa-Base', "Jigsaw-Unbiased"]
)
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer(
max_epochs=config.epoch,
callbacks=[loss_checkpoint, lr_monitor, RichProgressBar()],
# deterministic=True,
logger=[wandb_logger],
**config.trainer
)
trainer.fit(model, datamodule=datamodule)
## Training
config.trainer.fast_dev_run = False
config.backbone.output_dim = len(target_cols)
for fold in config.train_fold:
print("★"*25, f" Fold{fold+1} ", "★"*25)
df_train = train_df[train_df.kfold != fold].reset_index(drop=True)
datamodule = JigsawDataModule(df_train, val_df, test_df, config)
sample_dataloader = JigsawDataModule(df_train, val_df, test_df, config).train_dataloader()
config.scheduler.params.T_0 = config.epoch * len(sample_dataloader)
model = JigsawModel(config, fold)
lr_monitor = callbacks.LearningRateMonitor()
loss_checkpoint = callbacks.ModelCheckpoint(
filename=f"best_acc_fold{fold+1}",
monitor=f"valid_acc/fold{fold+1}",
save_top_k=1,
mode="max",
save_last=False,
dirpath=MODEL_DIR,
)
wandb_logger = WandbLogger(
project=config.project,
entity=config.entity,
name = f"{config.exp_name}",
tags = ['DeBERTa-Base', "Jigsaw-Unbiased"]
)
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer(
max_epochs=config.epoch,
callbacks=[loss_checkpoint, lr_monitor, RichProgressBar()],
# deterministic=True,
logger=[wandb_logger],
**config.trainer
)
trainer.fit(model, datamodule=datamodule)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Device == {device}")
MORE = np.zeros(len(val_df))
LESS = np.zeros(len(val_df))
PRED = np.zeros(len(test_df))
attention_array = np.zeros((len(df), 256)) # attention格納
mask_array = np.zeros((len(df), 256)) # mask情報格納,後でattentionと掛け合わせる
for fold in config.train_fold:
pred_list = []
print("★"*25, f" Fold{fold+1} ", "★"*25)
valid_dataloader = JigsawDataModule(train_df, val_df, test_df, config).val_dataloader()
model = JigsawModel(config, fold)
loss_checkpoint = callbacks.ModelCheckpoint(
filename=f"best_acc_fold{fold+1}",
monitor=f"valid_acc/fold{fold+1}",
save_top_k=1,
mode="max",
save_last=False,
dirpath="../input/toxicroberta/",
)
model = model.load_from_checkpoint(MODEL_DIR/f"best_acc_fold{fold+1}.ckpt", cfg=config, fold_num=fold)
model.to(device)
model.eval()
more_list = []
less_list = []
for step, data in tqdm(enumerate(valid_dataloader), total=len(valid_dataloader)):
more_toxic_ids = data['more_toxic_ids'].to(device)
more_toxic_mask = data['more_toxic_mask'].to(device)
more_text_token_type_ids = data['more_token_type_ids'].to(device)
less_toxic_ids = data['less_toxic_ids'].to(device)
less_toxic_mask = data['less_toxic_mask'].to(device)
less_text_token_type_ids = data['less_token_type_ids'].to(device)
more_outputs = model(
more_toxic_ids,
more_toxic_mask,
more_text_token_type_ids,
)
less_outputs = model(
less_toxic_ids,
less_toxic_mask,
less_text_token_type_ids
)
more_list.append(more_outputs[:, 0].detach().cpu().numpy())
less_list.append(less_outputs[:, 0].detach().cpu().numpy())
MORE += np.concatenate(more_list)/len(config.train_fold)
LESS += np.concatenate(less_list)/len(config.train_fold)
# PRED += pred_list/len(config.train_fold)
plt.figure(figsize=(12, 5))
plt.scatter(LESS, MORE)
plt.xlabel("less-toxic")
plt.ylabel("more-toxic")
plt.grid()
plt.show()
val_df["less_attack"] = LESS
val_df["more_attack"] = MORE
val_df["diff_attack"] = val_df["more_attack"] - val_df["less_attack"]
attack_score = val_df[val_df["diff_attack"]>0]["diff_attack"].count()/len(val_df)
print(f"Wiki Attack Score: {attack_score:.6f}")
```
| github_jupyter |
```
def create_simple_convnet_model(*, input_size, output_size, verbose=False, **kwargs):
# Convolutional Network, ........................
model = keras.Sequential()
#.. 1st cnn, layer
model.add(keras.layers.Conv2D(
filters=kwargs['Conv2D_1__filters'],
kernel_size=kwargs['Conv2D_1__kernel_size'],
strides=kwargs['Conv2D_1__stride'],
activation=kwargs['Conv2D_1__activation'],
input_shape=input_size
))
#.. maxpool 1.
model.add(keras.layers.MaxPool2D(pool_size=kwargs['MaxPool2D_1__pool_size']))
#.. 2nd cnn layer,
model.add(keras.layers.Conv2D(
filters=kwargs['Conv2D_2__filters'],
kernel_size=kwargs['Conv2D_2__kernel_size'],
strides=kwargs['Conv2D_2__stride'],
activation=kwargs['Conv2D_2__activation'],
))
#.. maxpool 2,
model.add(keras.layers.MaxPool2D(pool_size=kwargs['MaxPool2D_2__pool_size']))
# flatten the results, .........................
model.add(keras.layers.Flatten())
# dense nn, ....................................
#.. First hidden layer
model.add(Dense(
units=kwargs['h1_unit_size'],
activation=kwargs["h1_activation"],
kernel_initializer=initializers.VarianceScaling(scale=2.0, seed=0)
))
model.add(tf.keras.layers.Dropout(kwargs["h1_Dropout"]))
#.. Output layer
model.add(Dense(
units=output_size,
activation=kwargs["out_activation"],
kernel_regularizer=tf.keras.regularizers.l2(0.001),
kernel_initializer=initializers.VarianceScaling(scale=1.0, seed=0)
))
# Print network summary
if verbose==True:
print(model.summary())
else:
pass
# Define Loss Function and Trianing Operation
model.compile(
optimizer= kwargs["optimizer"],
loss= losses.sparse_categorical_crossentropy,
metrics= kwargs["metrics"] # even one arg must be in the list
)
return model
grid = ParameterGrid({
"random_state":[0], # used to divide train,valid datasets,
"train_test_split__train_size":[0.7],
# -- generators, ........................
"generator__batch_size": [20],
"generator__target_size" :[(299, 299)], # tuple, for each image x,y dimension in pixels,
"generator__validation_split" : [0.3], # caution its opposite then in train_test_split__train_size"
# -- conv model, ........................
"model":["two_layers"],
'Conv2D_1__filters': [64],
'Conv2D_1__kernel_size': [5],
'Conv2D_1__stride': [2],
'Conv2D_1__activation' : ['relu'],
'MaxPool2D_1__pool_size': [2],
'Conv2D_2__filters': [64],
'Conv2D_2__kernel_size': [3],
'Conv2D_2__stride': [1],
'Conv2D_2__activation' : ["relu"],
'MaxPool2D_2__pool_size': [2],
# -- dense nn, ...........................
"h1_unit_size":[24],
"h1_Dropout" : [0],
"h1_activation": ["relu"],
"out_activation":["softmax"],
"optimizer":["Adam"],
"metrics": [["acc"]],
# -- training, ...........................
"EarlyStopping__patience": [6],
"fit_generator__epoch": [2]
})
params
```
| github_jupyter |
# Visualizations in Data
Data visualization is the presentation of data in graphical format. Data visualization is both an art and a science as it combines creating visualizations that are both engaging and accurate. In matheimatical applications visualizations can help you better observe trends and patterns in data, or discribe large datasets in a concise way. In this lesson we will focus on some of the most common graphs used to visualize data and describe some tools in Python that can help you create these visualizations!
## Matplotlib
To create our visulizations in Python we will be using the matplotlib library, which will give us the tools to easily create graphs and customiza them. We will cover some of the matplotlib functionality in this lesson, but check out [this resource](https://heartbeat.fritz.ai/introduction-to-matplotlib-data-visualization-in-python-d9143287ae39) if you want some more introduction to how to use this library.
## Dataset
In this lesson we will be using the drinks file from the [Starbucks nutrition dataset](https://www.kaggle.com/starbucks/starbucks-menu). This dataset includes the nutritional information for Starbucks’ food and drink menu items. All nutritional information for drinks are for a 12oz serving size.
## Using and storing data
Data is often stored in comma separated value files or .csv files. For many interesting datasets to try in your projects check out [Kaggle](https://www.kaggle.com/). In this lesson we will be using the starbucks drinks csv file as the basis for our visualizations. In python there is a library called csv that makes handling csv files easier. Take a look at the example below to learn more about how to use this library.
```
import csv #the csv library
with open('starbucks_drinkMenu_expanded.csv') as csvfile: #open the file
#creates a csv reader object which stores the lines of the files in lists and lets us iterate over them
drinksreader = csv.reader(csvfile)
headers = next(drinksreader, None) #skip over the headers
for row in drinksreader:
print(row)#take a look at what is being printed out
```
## Bar Chart
Let's start by creating a visualization that you might already be familiar with: a bar chart. Bar charts are used to show comparisons between categories of data. A bar chart will have two axis, one will typically be numerical values while the other will be some sort of category. There are two types of bar charts: vertical and horizontal. Let's looks at some examples of how to create a bar chart using our dataset!
In this example let's compare the sugar content of different types of drinks (lattes, mochas, and teas) using our dataset. Here are the steps we are going to perform to create this visualization:
1. Read in the data
2. Extract the headers
3. Find the index which corresponds to the beverage category and grams of sugar
4. Filter for the types of drinks we are interested in (lattes, mochas, and teas)
5. Store in a list
6. Average the amount of sugar per type
7. Use matplotlib to build a bar chart
The first axis of our bar chart will be the beverage type, the second will be the the average sugar content in grams. More on bar charts in matplotlib [here](https://pythonspot.com/matplotlib-bar-chart/)
```
import csv #the csv library
import matplotlib.pyplot as plt #The visualization library
import numpy as np #provides math functions
with open('starbucks_drinkMenu_expanded.csv') as csvfile: #open the file
#creates a csv reader object which stores the lines of the files in lists and lets us iterate over them
drinksreader = csv.reader(csvfile)
headers = next(drinksreader, None) #skip over the headers
#get the index that corresponds to the information we are interested in
drink_category_index = headers.index("Beverage")
sugars_index = headers.index(" Sugars (g)")
#This is where we will store the sugar info for our different beverage types
sugar_in_lattes = []
sugar_in_teas = []
sugar_in_mochas = []
for row in drinksreader:
drink_category = row[drink_category_index]
sugar_grams = row[sugars_index]
if 'Latte' in drink_category:
sugar_in_lattes.append(float(sugar_grams))
if 'Tea' in drink_category:
sugar_in_teas.append(float(sugar_grams))
if 'Mocha' in drink_category:
sugar_in_mochas.append(float(sugar_grams))
beverage_categories = ["Latte", 'Tea', 'Mocha']
#average the sugar content
average_sugar_in_lattes = np.mean(sugar_in_lattes)
average_sugar_in_teas = np.mean(sugar_in_teas)
average_sugar_in_mochas = np.mean(sugar_in_mochas)
average_sugars = [average_sugar_in_lattes, average_sugar_in_teas, average_sugar_in_mochas]
vertical_bar_chart_figure = plt.figure() #The outer container
vertical_bar_chart_axes = vertical_bar_chart_figure.add_axes([0.1, 0.2, 0.8, 0.9]) #The actual chart inside the figure
#For more explanation: https://heartbeat.fritz.ai/introduction-to-matplotlib-data-visualization-in-python-d9143287ae39
#Create the bar chart using the bar() method
#The color argument lets us specify a list of colors for each of the bars
vertical_bar_chart_axes.bar(beverage_categories, average_sugars, color=["pink", "blue", "green"])
#Let's customize our chart!
#Give it a title
vertical_bar_chart_axes.set_title('Vertical bar chart of average sugar in grams for different types of beverages on the Starbucks menu')
#Always label your axis or no one will be able to understand what the chart is showing
vertical_bar_chart_axes.set_ylabel('Average sugar content in grams')
vertical_bar_chart_axes.set_xlabel('Beverage type')
#How would we create a horizontal bar chart? Use the barh() method!
horizontal_bar_chart_figure = plt.figure() #The outer container
horizontal_bar_chart_axes = horizontal_bar_chart_figure.add_axes([0.1, 0.2, 0.8, 0.9]) #The actual chart inside the figure
#For more explanation: https://heartbeat.fritz.ai/introduction-to-matplotlib-data-visualization-in-python-d9143287ae39
#Create the bar chart using the barh() method
#The color argument lets us specify a list of colors for each of the bars
horizontal_bar_chart_axes.barh(beverage_categories, average_sugars, color=["pink", "blue", "green"])
#Let's customize our chart!
#Give it a title
horizontal_bar_chart_axes.set_title('Horixontal bar chart of average sugar in grams for different types of beverages on the Starbucks menu')
#Always label your axis or no one will be able to understand what the chart is showing
horizontal_bar_chart_axes.set_ylabel('Average sugar content in grams')
horizontal_bar_chart_axes.set_xlabel('Beverage type')
```
## Class Dicussion: what do these bar charts show you? Were you surprised by the results?
# Line Graph
Line graphs are a type of graph where each data point is connected by lines. This can help us understand how something changes in value. In this next example we will use the data we processed in the bar chart examples to create a line graph using the plot() method.
```
line_graph_figure = plt.figure() #The outer container
line_graph_axes = line_graph_figure.add_axes([0.1, 0.2, 0.8, 0.9]) #The actual chart inside the figure
#For more explanation: https://heartbeat.fritz.ai/introduction-to-matplotlib-data-visualization-in-python-d9143287ae39
#Create the line graph using the plot() method
line_graph_axes.plot(beverage_categories, average_sugars)
#Let's customize our chart!
#Give it a title
line_graph_axes.set_title('Line graph of average sugar in grams for different types of beverages on the Starbucks menu')
#Always label your axis or no one will be able to understand what the chart is showing
line_graph_axes.set_ylabel('Average sugar content in grams')
line_graph_axes.set_xlabel('Beverage type')
```
## Activity: make a bar chart and line graph for the average protein in grams for Cappuccinos, Macchiatos, and Smoothies
```
#Make you charts here!
with open('starbucks_drinkMenu_expanded.csv') as csvfile: #open the file
#creates a csv reader object which stores the lines of the files in lists and lets us iterate over them
drinksreader = csv.reader(csvfile)
headers = next(drinksreader, None)
protein_in_cappuccinos = []
protein_in_macchiatos = []
protein_in_smoothies = []
for row in drinksreader:
drink_category = row[drink_category_index]
sugar_grams = row[sugars_index]
if 'Cappuccino' in drink_category:
protein_in_cappuccinos.append(float(protein_grams))
if 'Macchiato' in drink_category:
protein_in_macchiatos.append(float(protein_grams))
if 'Smoothie' in drink_category:
protein_in_smoothies.append(float(protein_grams))
beverage_categories = ["Latte", 'Tea', 'Mocha']
#average the sugar content
average_protein_in_capp = np.mean(protein_in_cappuccinos)
average_protein_in_mach = np.mean(protein_in_macchiatos)
average_protein_in_smoothies = np.mean(protein_in_smoothies)
average_protein = [average_protein_in_capp, average_protein_in_mach, average_protein_in_smoothies]
vertical_bar_chart_figure = plt.figure() #The outer container
vertical_bar_chart_axes = vertical_bar_chart_figure.add_axes([0.1, 0.2, 0.8, 0.9]) #The actual chart inside the figure
#For more explanation: https://heartbeat.fritz.ai/introduction-to-matplotlib-data-visualization-in-python-d9143287ae39
#Create the bar chart using the bar() method
#The color argument lets us specify a list of colors for each of the bars
vertical_bar_chart_axes.bar(beverage_categories, average_protein, color=["pink", "blue", "green"])
#Let's customize our chart!
#Give it a title
vertical_bar_chart_axes.set_title('Vertical bar chart of average protein in grams for different types of beverages on the Starbucks menu')
#Always label your axis or no one will be able to understand what the chart is showing
vertical_bar_chart_axes.set_ylabel('Average protein content in grams')
vertical_bar_chart_axes.set_xlabel('Beverage type')
```
## Histogram
Histograms are similar to bar charts, but a histogram groups numbers into ranges. The x axis of a histogram typically shows the the value ranges and the y axis corresponds to the number of items in each range. Histograms help us better visulize and understand the distribution of the data for certain values.
If you have continuous numerical data, in order to group the data into ranges you need to split the data into intervals, as known as bins. Let's look at an example by creating histograms for the sugar content of each type starbucks beverage.
You might be wondering how do we decided how many bins to use? This is [an interesting topic](https://stats.stackexchange.com/questions/798/calculating-optimal-number-of-bins-in-a-histogram) and there are many ways to choose the bin number. For this class we don't need to worry too much about that and can just try out some different options and choose which one helps us visualize the data best.
```
#recall that sugar_in_lattes, sugar_in_teas, and sugar_in_mochas are all lists that store the sugar in grams data
#We will create histograms that help us better see the sugar content distributions for each type of beverage
number_of_bins = 10
latte_histogram_figure = plt.figure()
latte_histogram_axes = latte_histogram_figure.add_axes([0.1, 0.2, 0.8, 0.9])
latte_histogram_axes.hist(sugar_in_lattes, bins=number_of_bins)
latte_histogram_axes.set_title('Histogram of sugar content in Lattes')
latte_histogram_axes.set_ylabel('Frequency')
latte_histogram_axes.set_xlabel('Sugar in grams')
tea_histogram_figure = plt.figure()
tea_histogram_axes = tea_histogram_figure.add_axes([0.1, 0.2, 0.8, 0.9])
tea_histogram_axes.hist(sugar_in_teas, bins=number_of_bins)
tea_histogram_axes.set_title('Histogram of sugar content in Teas')
tea_histogram_axes.set_ylabel('Frequency')
tea_histogram_axes.set_xlabel('Sugar in grams')
mocha_histogram_figure = plt.figure()
mocha_histogram_axes = mocha_histogram_figure.add_axes([0.1, 0.2, 0.8, 0.9])
mocha_histogram_axes.hist(sugar_in_mochas, bins=number_of_bins)
mocha_histogram_axes.set_title('Histogram of sugar content in Mochas')
mocha_histogram_axes.set_ylabel('Frequency')
mocha_histogram_axes.set_xlabel('Sugar in grams')
```
## Class Dicussion: What observations about the data can you make with these histograms?
## Activity: Create a histogram of the protein content in two different beverages of your choice!
| github_jupyter |
# Регрессия - последняя подготовка перед боем!
> 🚀 В этой практике нам понадобятся: `numpy==1.21.2, pandas==1.3.3, matplotlib==3.4.3, scikit-learn==0.24.2, seaborn==0.11.2`
> 🚀 Установить вы их можете с помощью команды: `!pip install numpy==1.21.2, pandas==1.3.3, matplotlib==3.4.3, scikit-learn==0.24.2, seaborn==0.11.2`
# Содержание <a name="content"></a>
* [Лирическое вступление](#Liricheskoe_vstuplenie)
* [Первые реальные данные](#Pervye_real_nye_dannye)
* [Анализ одной переменной (унивариантный - univariate)](#Analiz_odnoj_peremennoj_(univariantnyj_-_univariate))
* [Анализ нескольких переменных (мультивариантный - multivariate)](#Analiz_neskol_kih_peremennyh_(mul_tivariantnyj_-_multivariate))
* [LSTAT - MEDV](#LSTAT_-_MEDV)
* [RM - MEDV](#RM_-_MEDV)
* [Подготовка кода предобработки](#Podgotovka_koda_predobrabotki)
* [fit()](#fit())
* [transform()](#transform())
* [Back to programming!](#Back_to_programming!)
* [Заключение](#Zakljuchenie)
* [Вопросы для закрепления](#Voprosy_dlja_zakreplenija)
* [Полезные ссылки](#Poleznye_ssylki)
```
# Настройки для визуализации
# Если используется темная тема - лучше текст сделать белым
import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
import random
TEXT_COLOR = 'black'
matplotlib.rcParams['figure.figsize'] = (15, 10)
matplotlib.rcParams['text.color'] = TEXT_COLOR
matplotlib.rcParams['font.size'] = 14
matplotlib.rcParams['lines.markersize'] = 15
matplotlib.rcParams['axes.labelcolor'] = TEXT_COLOR
matplotlib.rcParams['xtick.color'] = TEXT_COLOR
matplotlib.rcParams['ytick.color'] = TEXT_COLOR
sns.set_style('darkgrid')
# Зафиксируем состояние случайных чисел
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
```
## Лирическое вступление <a name="intro"></a>
И снова привет!
К этому моменту мы многому научились и уже знаем немало! Тем не менее, много знаний не бывает, ведь мы приближаемся к первой боевой задаче!
Да-да, скоро вам предстоит самостоятельно провести работу с набором данных! Правда, мы немного считерим, потому что в этой практике с этими данными частично познакомимся, но сделаем это частично, чтобы не забирать у вас всё веселье!
Ранее мы много говорили о том, как учить модель машинного обучения, как разделять данные, как анализировать модель и т.д. В работе с данными эта часть зовётся "обучение и анализ модели". В этой практике мы поговорим о совершенно новой части в работе с данными и научимся данные анализировать.
Зачем это нужно? Ну, просто обучить модель на данных - это зовётся **baseline**. **Baseline** как правило - это самое быстрое и простое решение, которое даёт результат!
Вот, например, у нас есть данные о ценах на земли в городе. Задача - на основе этих данных предсказывать цены на другие участки земли. Самым простым решением будет взять сумму целевых значений (цен) и поделить на количество! Так мы получим среднее значение цены в данных и его можно постоянно предсказывать!
Вот таким простым способом мы получили модель, которая всё время предсказывает постоянное значение. Да, у неё есть какая-то ошибка, да, это вообще не будет похоже на зависимость в данных, но не это важно!
Важно то, что имея baseline, вы будете точно знать, относительно какого решения нужно улучшать вашу модель! Уже и MAE/RMSE есть с чем сравнить - одни плюсы!
> Обратите внимание, что показатель R2 как раз в этом случае будет равень 0, так как значения больше нуля - а значит, модель лучше, чем простое предсказание среднего!
> 🤓 **Baseline решение** - простое и быстро достижимое решение, используется для дальнейшей оценки улучшений предсказаний при работе с данными.
Так вот к чему всё это? Сейчас мы пока что с вами научились строить baseline модели.
А как научиться делать что-то лучше? Вот тут то и не хватает недостающей части, о которой мы с вами поговорим! И часть это зовется - **анализ данных**!
Но зачем он нужен, если модель делает всё за нас? Учится на данных, регуляризацией мы убираем оверфит, на всякий проверим показатели на тестовой выборке - куда лучше?
Поверьте, есть куда стремиться!
В работе с реальными данными есть простое правило - не сложность модели определяет, кто будет круче, а качество и количество данных!
> ⚠️ Ещё раз, данные важнее, чем модели!
То есть, важно понимать, что происходит с моделью, оверфит это или нужна сложность модели побольше (недообучение). Но хорошее качество и количество данных могут дать намного больший прирост точности, так как шума и выбросов в них будет меньше, а зависимости более выражены.
И как же тогда нам сделать данные качественнее, если вот у нас есть датасет, и сделать его больше мы не можем?
Ответ прост - как можно лучше понять данные и предобработать, а для этого - проанализировать их в первую очередь!
> ⚠️⚠️ Очень важный аспект - **понимание данных**. Если вы хорошо понимаете, что за данные вы имеете и что каждый признак означает, то высока вероятность, что вы лучше их обработаете и очистите!
В таком случае, подводим **итог**! Создавать baseline модели на тех данных, что мы имеем - полезный навык. Но если мы хотим сделать нашу модель ещё круче и эффективнее, то нужно данные проанализировать и подготовить.
> ⚠️ Все новые термины **обработка**, **очистка** и другие действия с данными относятся к общему понятию **подготовка данных** для модели. Baseline может строиться на неподготовленных данных и решать задачу (вероятнее всего плохо), подготовка данных нацелена на улучшение качества данных, чтобы модель, которая на них учится, выявила необходимые зависимости без влияния шума.
> ⚠️ Для реализации хорошей **подготовки данных** необходимо провести **анализ данных**, чтобы данные лучше понять.
Это всё слова, но пора к делу!
Вы ещё увидите, почему анализ данных иногда бывает намного интереснее простого обучения модельки!
## Первые реальные данные <a name="real_data"></a>
Настройтесь, сейчас мы с вами загрузим наши первые реальные данные и начнём с ними работать. Чувствуете это предвкушение?
<p align="center"><img src="https://vk.com/sticker/1-2920-512-9" width=300/></p>
Стоп, а где эти данные взять?
Не переживайте, сегодня не вы одни занимаете наукой о данных, поэтому есть очень много ресурсов с разными данными, а мы постучимся на [Kaggle](https://www.kaggle.com/)! Для начала вам нужно там зарегистрироваться, если вы этого ещё не сделали!
Дальше, нам нужно достать данные, которые нам сейчас нужны - мы воспользуемся [этим датасетом](https://www.kaggle.com/fedesoriano/the-boston-houseprice-data). После регистрации у вас будет возможность скачать CSV файл `boston.csv`.
После этого всё зависит от того, где вы работаете. Если вы проходите практики на Google Colab, то вам нужно загрузить файл с данными на сам Colab (для этого есть меню слева).
Если вы работаете локально, на своей машине (компьютере), то достаточно положить рядом с ноутбуком!
> ✨ Если вы всё выполнили верно, то код дальше будет выполняться без проблем. Если нет - обратитесь к преподавателю за помощью!
```
df_src = pd.read_csv('boston.csv')
```
Когда данные успешно загружены, то важно первым делом посмотреть на размер данных и на сами данные!
```
df_src.shape
df_src.head(10)
df_src.info()
# И конечно, сразу посмотреть на общие пропуски в данных
df_src.isnull().sum()
```
Смотрите, пара действий, а мы уже видим некоторую информацию о данных.
* Во-первых, у нас есть 14 переменных, из которых как минимум одну мы планируем предсказывать.
* Во-вторых, во всём наборе данных есть всего 506 записей (примеров). Это немного, но хватит, чтобы много обсудить!
Но здесь есть важная особенность, каждая колонка имеет название, но все они в виде аббревиатур! Это плохо, так как это затруднит разбор данных и может ухудшить понимание. Небольшой поиск по странице датасета и в интернете даёт как минимум два источника, в которых есть следующая информация о данных:
- https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html#:~:text=The%20Boston%20Housing%20Dataset,the%20area%20of%20Boston%20Mass
- https://scikit-learn.org/stable/datasets/toy_dataset.html#boston-house-prices-dataset
Информация о колонках:
- CRIM - per capita crime rate by town
- ZN - proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS - proportion of non-retail business acres per town
- CHAS - Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX - nitric oxides concentration (parts per 10 million)
- RM - average number of rooms per dwelling
- AGE - proportion of owner-occupied units built prior to 1940
- DIS - weighted distances to five Boston employment centres
- RAD - index of accessibility to radial highways
- TAX - full-value property-tax rate per $10,000
- PTRATIO - pupil-teacher ratio by town
- B - 1000(Bk - 0.63)^2 where Bk is the proportion of black people by town
- LSTAT - % lower status of the population
- MEDV - Median value of owner-occupied homes in $1000’s
Отлично, какая-то информация есть и её можно перевести с английского, что даёт нам:
- CRIM - уровень преступности на душу населения по городам
- ZN - доля жилой земли, зонированной для участков площадью более 25 000 кв. футов.
- INDUS - доля акров нетоварного бизнеса в городе
- CHAS - переменная-флаг приближенности к реке (= 1 если рядом с рекой; 0 в ином случае)
- NOX - концентрация оксидов азота (частей на 10 миллионов)
- RM - среднее количество комнат в одном жилом помещении
- AGE - доля квартир, занятых владельцами, построенных до 1940 года
- DIS - взвешенные расстояния до пяти бостонских центров занятости
- RAD - индекс доступности радиальных магистралей
- TAX - недвижимость с полной стоимостью-ставка налога за 10 000 долларов США
- PTRATIO - соотношение числа учащихся и учителей по городам
- B - 1000(Bk - 0.63)^2, где Bk - доля чернокожего населения по городам
- LSTAT - процент бедности населения
- MEDV - средняя стоимость домов, занятых владельцами, в 1000 долларов США
Шикарно, это пригодится нам в ходе анализа!
Уже сейчас мы можем сформировать постановку задачи предсказания - нам нужно предсказывать **цену дома (MEDV)** по 13-ти имеющимся признакам. Не факт, что мы всеми признаками воспользуемся, но всё-таки это то, что мы сейчас имеем.
> Не бойтесь, работа с 13 переменными, когда мы вот только работали всего с одной - не так страшна, как кажется. Более того, когда мы строили полиномиальную регрессию 15-го порядка, то там у нас было аж 15 признаков!
Так с чего же начинается анализ данных? Самое простое - с анализа каждой переменной!
Что мы хотим увидеть? В анализе одной переменной важно понять:
- что представляет из себя переменная
- есть ли у неё пропуски и как лучше их заполнитиь
- есть ли у переменной явные выбросы
- какое у переменной распределение и есть ли смещение
- и другие интересности, которые мы заметим =)
В этой практике мы пройдёмся по наиболее важным переменным, а вот в реальной задаче вам предстоит проанализировать каждую переменную! Так можно составить более полную картину данных!
> ⚠️ Этот список не исчерпывающий, но он сообщает, что любые странности и закономерности в данных важно выявить и проанализировать на предмет того, полезный ли эффект наблюдается или его лучше убрать, чтобы моделе было проще искать базовые зависимости в данных.
## Анализ одной переменной (унивариантный - univariate) <a name="uni"></a>
Начнем с анализа под названием унивариантный. Он так называется, потому что мы анализируем каждую переменную по отдельности. Обычно, самым простым вариантом является построение распределения переменной, чтобы понять характер распределения.
Здесь для примера мы возьмем переменную RM (среднее количество комнат в одном жилом помещении).
```
sns.displot(df_src['RM'], kde=True, height=7, aspect=1.5)
```
Что мы видим на графике?
Распределение этой переменной близко к нормальному (Gauss-like - близко к Гауссовому).
Пределы значений в диапазоне около [3; 9] комнат.
Здесь важный акцент мы сделаем на "нормальности" распределения, так как бывают разные вариации нормальности. При анализе другой переменной мы это увидим.
Тогда по этой переменной мы можем заключить следующее:
* по таблице пропусков переменная пропусков не имеет
* распределение близкое к нормальному
* значения лежат в пределах, ожидаемых для описания этой переменной - количество комнат.
Не сложно, правда?
Другую переменную мы возьмём явно с интересным эффектом:
```
sns.displot(df_src['DIS'], kde=True, height=7, aspect=1.5)
```
Вот эту переменную уже сложнее назвать нормально распределённой. Она имеет явное **смещение влево**. Ещё это назвают **правый хвост**, так как правая часть похожа на хвост.
Что делать с такими переменными?
Ну, есть разные способы. Тут мы уже с вами говорим про методы модификации данных, а значит начинаем строить план обработки данных!
Можно выделить два наиболее явных способа исправления распределения:
- исправление с помощью логарифма (он исправляет левое смещение)
- воспользоваться автоматизированными способами коррекции, например, [PowerTransformer](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html)
Первый способ мы попробуем сейчас, а вот со вторым вы можете разобраться самостоятельно, когда в следующей практике ринетесь в бой!
```
dis_log_col = np.log(df_src['DIS'])
sns.displot(dis_log_col, kde=True, height=7, aspect=1.5)
```
Как видите, центр распределения сместился ближе к середине и само распределение стало больше похоже на нормальное, результат - успех!
> 🔥 Не только в DS, но и в других областях, где вы модифицируете данные - всегда проверяйте результат и сравнивайте с ожиданиями! Это важно, так как без проверки промежуточного результата может появиться проблема, которая доставит много головной боли потом!
> ⚠️ Исправление распределения очень важно для линейных моделей. Мы сейчас не заостряем внимание на этом, но в следующей самостоятельной практике обязательно сравните результаты с исправлением и без него!
В результате, вывод по переменной:
* пропусков не имеет
* *распределение смещено, поэтому требуется исправление*
Последний вывод важно записать в список дел, так как по результатам мы будм делать всю обработку данных единым образом.
Давайте для примера возьмём ещё одну переменную, чтобы проанализировать нестандартное распределение:
```
sns.displot(df_src['CHAS'], kde=True, height=7, aspect=1.5)
```
Можно было бы сказать, что распределение смещено влево, но обратите внимание - в данных всего два значения: 0 и 1. Давайте это проверим:
```
df_src['CHAS'].unique()
```
Действительно, что же нам в таком случае делать?
Да ничего, это распределение бимодальное, поэтому мы не будем пытаться его исправить.
Вывод по переменной:
* пропусков нет
* распределение бимодальное
Делать с этой переменной пока ничего не будем!
Остальные переменные мы оставим за кадром, чтобы вам тоже было, с чем поработать!
По результату анализа одной переменной делается вывод об основных особенностях каждой переменной. Мы с вами ещё научимся другим подходам анализа и многому интересному, но пока достаточно понимать следующие вещи:
- имеет ли переменная пропуски (как их заполнять узнаем потом)?
- понимаем ли мы суть переменной, сходится ли с описанием и логичные ли значения?
- нужно ли корректировать распределение?
## Анализ нескольких переменных (мультивариантный - multivariate) <a name="multi"></a>
Вот мы переходим к более вкусному анализу - зависимости между переменными!
И начнем мы с определения **корреляций**!
Мы уже много говорили о том, что в данных есть зависимости, но наблюдали мы их только на графиках. Как и во всех методах - хорошо бы иметь метод, который численно подтвердит наличие зависимости в данных! Есть он у меня для вас!
Для примера мы возьмём пару переменных - полный анализ (все переменные) вы проведёте самостоятельно!
```
# Для примера выберем следующие признаки
# Мы специально включили целевую переменную, чтобы показать, как проводить вместе в ней анализ
features = ['CRIM', 'LSTAT', 'RM', 'MEDV']
correlation_mtrx = df_src[features].corr()
correlation_mtrx
```
Таблица - это хорошо, но, как обычно, график лучше воспринимается =)
```
sns.heatmap(correlation_mtrx, annot=True, fmt='.2f')
```
Корреляция - это способ численно показать наличие зависимости между двумя переменными.
Давайте попробуем проанализировать то, что мы видим здесь.
С целевой переменной (MEDV) имеют близкую к высокой корреляция (считается, что высокая корреляция +/- 0.8-0.85 и выше по модулю) переменные RM и LSTAT. Это **может** означать, что эти переменные сильнее влияют на формирование цены, чем признак CRIM.
Почему **может**? Да потому, что коэффициент корреляции - это лишь число, которое может не полностью отражать картину, поэтому такие выводы должны лишь заставлять задуматься, но ни в коем случае не делать конечные выводы лишь на основе корреляции!
> 🤓 Корреляция всегда оценивается по модулю. Она может быть как высокой положительной, так и высокой отрицательной. Это для случая коэффициента Пирсона. Есть и другие коэффициенты, которые имеют диапазон [0; 1], но это уже совсем другая история =)
Поглядите, что такое корреляция на более общем представлении разных ситуаций:
<p align="center"><img src="https://raw.githubusercontent.com/kail4ek/ml_edu/master/assets/correlations.png" width=600/></p>
> ⚠️ Высокая корреляция переменных между собой является эффектом **мультиколлинеарности признаков**. Это плохой эффект для модели, так как в случае сильной взаимосвязи переменных между собой модель может запутаться в расставлении весов независимым переменным. Они ведь не просто так зовутся независимыми! Одна из практик - в данных для предсказания оставлять одну из пары зависимых между собой переменных, а другую убирать из данных.
По умолчанию, метод `.corr()` вычисляет коэффициент корреляции Пирсона. Этот тип коэффициента корреляции хорошо оценивает линейные зависимости. Попробуйте разобраться в документации, как оценить корреляцию по Спирману (Spearman) и выведите матрицу. Оцените, как изменились коэффициенты. Как изменился показатель на LSTAT-MEDV? Почему?
```
# TODO - выведите матрицу корреляции по Спирману и проанализируйте ее
```
Отлично, вот так незатейливо мы научились анализировать зависимости в данных без просмотра данных.
На основе этого мы можем построить первоначальные выводы, но не посмотреть на данные (визуализировать их) - это сродне очень серьезной ошибке. Всегда важно по максимуму визуализировать данные и просматривать их. Так можно тщательнее провести анализ и узнать больше полезной информации о данных!
Поэтому, давайте воспользуемся хитрым графиком для отображения зависимостей между данными:
```
sns.pairplot(df_src[features], diag_kind='auto', height=6)
```
Что мы видим на графике?
По главной диагонали отображается распределение самой переменной, так как на 2d графике показывать точки переменной самой с собой - это будет просто линия. В отличных от диагональных ячейках располагаются графики распределения в плоскости одной переменной против другой.
Здесь сразу можно сделать два вывода:
- LSTAT-MEDV имееть нелинейную зависимость (видите, как замедляется уменьшение MEDV при увеличении LSTAT?)
- На графике RM-MEDV видны точки, который очень "странно" лежат. Явно видно, что с увеличением RM MEDV растёт, но есть несколько точек, которые лежат как бы на прямой, вне зависимости от RM. Их нужно проанализировать!
Давайте перейдем к конкретному разбору!
### LSTAT - MEDV <a name="lstat_medv"></a>
Попробуем вывести точечный график переменных:
```
sns.scatterplot(x='LSTAT', y='MEDV', data=df_src)
```
Здесь явно выделяется нелинейная зависимость, поэтому мы в ходе предобработки сформируем новый признак - вторая степень от LSTAT. Это обусловлено этой явной нелинейностью. Запишем в планы!
### RM - MEDV <a name="rm_medv"></a>
Аналогично более подробно смотрим точечный график переменных:
```
sns.scatterplot(x='RM', y='MEDV', data=df_src)
```
Смотрите, у на есть два типа потенциальных **выбросов**.
* Одни выбросы - лежат на прямой на уровне около MEDV ~= 50.
* Другие - выбиваются от общей зависимости в диапазонах: RM < 4 и (RM > 8 & MEDV < 30).
При обработке выбросов важно смотреть, что из себя представляют данные, поэтому выведем примеры и глянем на них:
```
outliers_1 = df_src[df_src['MEDV'] >= 50]
outliers_2 = df_src[(df_src['RM'] < 4) | ((df_src['RM'] > 8) & (df_src['MEDV'] < 30))]
outliers_1
outliers_2
```
Давайте посмотрим, выбросы по уровню цены = 50, которые очень нестандартно лежат на плоскости.
По данным явно не видно очевидной зависимости, поэтому трудно сразу сказать, что это явные выбросы. Как правило, выбросы имеют сильные искажения в данных, что видно и по другим переменным.
Если всмотреться, то выбиваются именно точки, которые имеют RM < 7, а у них значение TAX = 666. Если построить распределение переменной TAX (вы это проделаете сами), то можно заметить, что значение 666 отстоит от основных данных, но таких записей с этим значением - аж 130, что сложно назвать выбросом.
Тем не менее, это повторяется и в выбросах, которые отстают от основной группы точек, что наводит на мысль, что это всё-таки их обощает.
Одно из предположений, которое можно сделать - **цензурирование данных**. Это подход, при котором в данных суммы и информация, которую важно закрыть, заменяется каким-то константным значением.
Поэтому, при обработке, мы удалим эти данные, так как цензурирование искажает зависимости и это может сказаться на результатах работы.
Давайте попробуем подчистить данные и посмотреть, как изменятся распределения точек на графиках:
> ⚠️ Очистка данных - процесс очень выборочный, поэтому важно ещё раз всё перепроверять, чтобы не совершить ошибки, так как в результате данных становится меньше.
> ⚠️ В ходе очистки удаляются записи данных - строки.
```
outliers_mask_1 = df_src['MEDV'] == 50
outliers_mask_2 = df_src['RM'] < 4
outliers_mask_3 = (df_src['RM'] > 8) & (df_src['MEDV'] < 30)
outliers_mask = outliers_mask_1 | outliers_mask_2 | outliers_mask_3
df_cleaned = df_src.loc[~outliers_mask]
sns.pairplot(df_cleaned[features], diag_kind='auto', height=6)
```
Как видите, график стал почище, а зависимость RM-MEDV стала более выраженной. Можем даже по-новой проверить корреляцию:
> ⚠️ Если вы обратили внимание, что на графике CRIM-MEDV много точек лежит на значении CRIM=0 - молодцы! Внимательность - это отлично! В данном случае мы не рассматриваем их в качестве кандидатов на выбросы, так как их мало и нам ещё помогает **смысл переменной**: много домов с низким криминальным уровнем - это нормально.
```
sns.heatmap(df_cleaned[features].corr(), annot=True, fmt='.2f')
```
RM-MEDV ранее был 0.7, а теперь стал 0.73 и всё благодаря чистке данных!
Как видите, как анализ одной переменной, так и анализ нескольких переменных не отличается чем-то сверх-научным. Как правило, данные достаточно посмотреть, пропустить через пару вычислений (как, например, корреляция) и уже можно составлять определённую картину.
Также, в подготовке и очистке данных помогает понимание данных. Так, например, если бы в наших данных количество комнат (RM) имело бы значения -1, то мы понимали бы, что такого быть не может и тоже рассматривали бы это как выбросы.
В результате, мы научились базовому анализу нескольких переменных (multivariate), рассмотрели, как можно детектировать выбросы и как оценивать зависимости численно - отличный результат, мы молодцы!
## Подготовка кода предобработки <a name="preproc"></a>
Помимо того, что на каждом из этапов анализа проверяется своя подготовка, очистка и другая обработка данных - важно в конечном итоге сформировать единый код для предобработки данных, чтобы пользоваться было им удобно и он был более-менее универсален (была возможность применить его на новых данных).
Давайте выделим два этапа:
* очистка данных
* предобработка
Очистка делается для процесса обучения, чтобы модели предоставить более чистые данные без выбросов и лишнего шума.
Предобработка делатся как для обучения, так и для обработки новых данных.
> ⚠️ Помним, что конечная цель модели машинного обучения не просто обучиться и показать высокую метрику, а давать предсказания на новых данных и делать это хорошо.
Так вот важно предобработку нормально оформить, чтобы потом не пришлось корячиться с кодом, когда надо будет его разворачивать в облаке =)
Для этого нам поможет парадигма классов в Python!
Но перед этим, мы быстренько оформим код очитки данных:
```
# TODO - напишите функцию clean_dataset(), который принимает DataFrame на вход и выдает его очищенным
# NOTE - в функции надо выбрать выбросы той методикой, которую мы уже выработали и вернуть почищенный датасет
# TEST
_test_df = pd.DataFrame({
'MEDV': [10, 20, 50, 50, 30, 10],
'RM': [5, 6, 7, 7, 3, 8],
})
_test_result = clean_dataset(_test_df)
pd.testing.assert_index_equal(pd.Index([0, 1, 5]), _test_result.index)
print("Well done!")
```
Отлично, функция очистки написана и её мы применим только для нашего датасета, поэтому её универсальность не так важна!
А теперь приступим к проработке класса для нашей собственной предобработки!
Начнём с архитектуры, вот так будет выглядеть наш класс:
```
class DataPreprocessing:
def __init__(self):
pass
def fit(self, df):
pass
def transform(self, df):
return df
```
Вот и весь класс, ничего страшного =)
Только, его методы (а-ля функции) ещё не реализованы, поэтому рано говорить о размерах кода =)
Давайте обсудим, что мы уже написали и зачем нужны эти методы:
### fit() <a name="fit"></a>
`.fit()` - это метод, который занимается сбором статистики с данных, чтобы их потом обработать. Собранную статистику мы будет хранить в атрибутах класса.
Что такое *сбор статистики*?
Всё просто. Давайте вспомним, как в прошлый раз масштабировали данные с помощью MinMaxScale. По сути, нам нужно вычислить минимум и максимум в данных и затем применить формулу с этими константами.
А теперь вспомним, что нам надо масштабировать на обучающей выборке и выборке для теста.
Давайте рассмотрим плохой вариант (*неправильный*): мы вычисляем мин-макс на обучающей выборке, допустим, получили (минимум = 10 и максимум = 100). Преобразовали обучающую выборку и всё ок.
Теперь, берём тестовую и вычисляем то же самое (получаем, минимум = 20 и максимум = 105). Преобразовали тестовую выборку.
А что дальше?
Ну, модель обучится, ведь обучение - простая математика и предсказания будут как-то работать, но будет **концептуальная** ошибка!
Именно в том, что модель учится на данных, ей приходит значение признака 1.0, а в исходных данных 1.0 ~ 100 (ведь максимум на обучающей = 100). Потом мы передаём тестовую и там тоже есть значение 1.0, но только на тестовой это означает 105.
К чему это приводит?
Модель ничего не заметит, сделает предсказание, а в нём будет ошибка! Ведь мы, хоть и не специально, начинаем модель путать, подавая данные, которые означают совсем другое, нежели на чём модель училась.
Что же мы можем сделать?
А что если, мы на обучающей выборке найдем минимум и максимум, запомним их и применим как к обучающей, так и тестовой выборке! Тогда, во всех данных (и даже в новых), 1.0 будет означать 100 и мы никого путать не будем!
> 🤓 Да, в нашем случае на тестовой будут значения больше 1.0, но это не страшно! Главное для масштабирования - привести к одинаковым порядкам, а для правильной обработки - собрать статистику на обучающей выборке (train) и дальше применять её для трансформации как на обучающей, так и на тестовой выборке!
Так вот мы и подошли к главному правилу в организации `fit()-transform()`: `fit()` всегда применяется только на train выборке! Эта функция собирает статистику, а её надо собирать только на обучающей выборке! На полной (train+test), не тестовой (test), а только на обучающей (train)!
### transform() <a name="transform"></a>
Ну тут уже все проще. Все этапы обработки данных, что требуют сбор статистики - собирают в `fit()`, ну а дальше просто применяем всю обработку в `transform()`! Все просто! =)
## Back to programming! <a name="prog"></a>
Отлично, мы разобрались, зачем нужен каждый метод! Давайте попробуем написать свой класс для предобработки!
Реализуем следующую предобработку:
- Выравнивание распределения для признака `DIS` с помощью логарифма
- Нужно создать новый признак `DIS_log`, а старый удалить
- Генерация полиномиального признака для `LSTAT` с названием `LSTAT_poly_2`
- MinMaxScale - посмотрите на класс [MinMaxScaler](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html)
- Сделайте масштабирование всех признаков
По сути, это небольшой набор того, как мы запланировали предобработать данные по результатам анализа!
> 🔥 Объекты трансформеров из `sklearn` работают по аналогичному принципу, как мы с вами обсудили. Поэтому, при работе с ними можно сами объекты трансформеров создавать прямо в конструкторе нашего класса. `fit()` трансформеров вызывать в нашем методе `fit()`, ну и `transform()`, соответственно.
```
# TODO - реализуйте описанную предобработку
class DataPreprocessing:
def __init__(self):
pass
def fit(self, df):
# Скопируем исходные данные, чтобы не изменять их
df_copy = df.copy()
# Здесь обратите внимание, что нужно сгенерировать полином и выровнять логарифмом, чтобы MinMaxScaler обучился и на них тоже
pass
def transform(self, df):
# Возвращать transform() должен тоже DataFrame!
return df
# TEST
_test_df = pd.DataFrame({'DIS': [2.3, 1.9, 0.4, 2.2], 'LSTAT': [0.1, 0.2, 0.3, 0.4], 'MORE_FEAT': [1, 2, 3, 4]}, index=[4, 6, 10, 12])
preproc = DataPreprocessing()
preproc.fit(_test_df)
_test_result = preproc.transform(_test_df)
_test_expected = pd.DataFrame({
'DIS_log': [1.0, 0.8907756387942631, 0.0, 0.9745873735075969],
'LSTAT': [0.0, 0.333, 0.666, 1.0],
'LSTAT_poly_2': [0.0, 0.2, 0.5333, 1.],
'MORE_FEAT': [0.0, 0.333, 0.666, 1.0]
}, index=_test_df.index)
pd.testing.assert_frame_equal(_test_result, _test_expected, check_like=True, atol=1e-3)
print("Well done!")
```
Если вы прошли тест - значит вы большие молодцы!!
В результате такой класс можно спокойно применять для подготовки данных для обучения модели и более того, для подготовки данных при поступлении новых!
А это значит, мы ещё не обучили, но уже готовы предсказывать и показывать, как круто наша модель работает! Стремимся к высоким целям!
## Заключение <a name="conclusion"></a>
В результате прохождения этой практики вы узнали очень важный факт (а может и несколько).
**Анализ данных нужен и важен!**
Конечно, мы только увидели пару приёмов, но в следующей практике, вы попробуете их в бою и увидите, что это действительно работает!
## Вопросы для закрепления <a name="qa"></a>
А теперь пара вопросов, чтобы закрепить материал!
1. Зачем нужны классы в DS?
2. Чем полезна предобработка данных?
3. Опасно ли удалять какие-то данные из исходных? Когда можно такое делать?
4. На какой выборке применяется метод-fit?
5. На какой выборке применяется метод-transform?
# Полезные ссылки <a name='links'></a>
* [Linear Discriminant Analysis (LDA) от StatQuest](https://www.youtube.com/watch?v=azXCzI57Yfc)
* [Basic Statistics for Data Science на Medium](https://medium.com/mlearning-ai/important-statistical-concepts-for-data-scientists-54e09106b75e)
* [Quartiles for Beginners in DS на Medium](https://medium.com/@vinitasilaparasetty/quartiles-for-beginners-in-data-science-2ca5a640b07b)
* [Understanding Value of Correlations in DS на Medium](https://medium.com/fintechexplained/did-you-know-the-importance-of-finding-correlations-in-data-science-1fa3943debc2)
* [Correlation](https://luminousmen.com/post/data-science-correlation)
* [Fundamentals of Statistics](https://towardsdatascience.com/fundamentals-of-statistics-for-data-scientists-and-data-analysts-69d93a05aae7)
| github_jupyter |
# Hypothesis Testing
From lecture, we know that hypothesis testing is a critical tool in determing what the value of a parameter could be.
We know that the basis of our testing has two attributes:
**Null Hypothesis: $H_0$**
**Alternative Hypothesis: $H_a$**
The tests we have discussed in lecture are:
* One Population Proportion
* Difference in Population Proportions
* One Population Mean
* Difference in Population Means
In this tutorial, I will introduce some functions that are extremely useful when calculating a t-statistic and p-value for a hypothesis test.
Let's quickly review the following ways to calculate a test statistic for the tests listed above.
The equation is:
$$\frac{Best\ Estimate - Hypothesized\ Estimate}{Standard\ Error\ of\ Estimate}$$
We will use the examples from our lectures and use python functions to streamline our tests.
```
import statsmodels.api as sm
import numpy as np
import pandas as pd
```
### One Population Proportion
#### Research Question
In previous years 52% of parents believed that electronics and social media was the cause of their teenager’s lack of sleep. Do more parents today believe that their teenager’s lack of sleep is caused due to electronics and social media?
**Population**: Parents with a teenager (age 13-18)
**Parameter of Interest**: p
**Null Hypothesis:** p = 0.52
**Alternative Hypthosis:** p > 0.52
1018 Parents
56% believe that their teenager’s lack of sleep is caused due to electronics and social media.
```
help(sm.stats.proportions_ztest)
n = 1018
pnull = .52
phat = .56
sm.stats.proportions_ztest(phat * n, n, pnull)
```
### Difference in Population Proportions
#### Research Question
Is there a significant difference between the population proportions of parents of black children and parents of Hispanic children who report that their child has had some swimming lessons?
**Populations**: All parents of black children age 6-18 and all parents of Hispanic children age 6-18
**Parameter of Interest**: p1 - p2, where p1 = black and p2 = hispanic
**Null Hypothesis:** p1 - p2 = 0
**Alternative Hypthosis:** p1 - p2 $\neq$ 0
247 Parents of Black Children
36.8% of parents report that their child has had some swimming lessons.
308 Parents of Hispanic Children
38.9% of parents report that their child has had some swimming lessons.
```
help(sm.stats.ttest_ind)
n1 = 247
p1 = .37
n2 = 308
p2 = .39
population1 = np.random.binomial(1, p1, n1)
population2 = np.random.binomial(1, p2, n2)
sm.stats.ttest_ind(population1, population2)
```
### One Population Mean
#### Research Question
Is the average cartwheel distance (in inches) for adults
more than 80 inches?
**Population**: All adults
**Parameter of Interest**: $\mu$, population mean cartwheel distance.
**Null Hypothesis:** $\mu$ = 80
**Alternative Hypthosis:** $\mu$ > 80
25 Adults
$\mu = 82.46$
$\sigma = 15.06$
```
df = pd.read_csv("Cartwheeldata.csv")
df.head()
n = len(df)
mean = df["CWDistance"].mean()
sd = df["CWDistance"].std()
(n, mean, sd)
help(sm.stats.proportions_ztest)
sm.stats.ztest(df["CWDistance"], value = 80, alternative = "larger")
```
### Difference in Population Means
#### Research Question
Considering adults in the NHANES data, do males have a significantly higher mean Body Mass Index than females?
**Population**: Adults in the NHANES data.
**Parameter of Interest**: $\mu_1 - \mu_2$, Body Mass Index.
**Null Hypothesis:** $\mu_1 = \mu_2$
**Alternative Hypthosis:** $\mu_1 \neq \mu_2$
2976 Females
$\mu_1 = 29.94$
$\sigma_1 = 7.75$
2759 Male Adults
$\mu_2 = 28.78$
$\sigma_2 = 6.25$
$\mu_1 - \mu_2 = 1.16$
```
url = "nhanes_2015_2016.csv"
da = pd.read_csv(url)
da.head()
females = da[da["RIAGENDR"] == 2]
male = da[da["RIAGENDR"] == 1]
n1 = len(females)
mu1 = females["BMXBMI"].mean()
sd1 = females["BMXBMI"].std()
(n1, mu1, sd1)
n2 = len(male)
mu2 = male["BMXBMI"].mean()
sd2 = male["BMXBMI"].std()
(n2, mu2, sd2)
sm.stats.ztest(females["BMXBMI"].dropna(), male["BMXBMI"].dropna())
```
| github_jupyter |
### University of Washington: Machine Learning and Statistics
# Lecture 6: Density Estimation 1
Andrew Connolly and Stephen Portillo
##### Resources for this notebook include:
- [Textbook](https://press.princeton.edu/books/hardcover/9780691198309/statistics-data-mining-and-machine-learning-in-astronomy) Chapter 8.
- [astroML website](https://www.astroml.org/index.html)
This notebook is developed based on material from A. Connolly, Z. Ivezic, M. Juric, S. Portillo, G. Richards, B. Sipocz, J. VanderPlas, D. Hogg, Killian Weinberger and many others.
The notebook and assoociated material are available from [github](https://github.com/uw-astro/astr-598a-win22).
Make sure you are using the latest version of astroML
> pip install --pre -U astroml
<a id='toc'></a>
## This notebook includes:
[Introduction to Clustering ](#basics)
[1-D hypothesis testing](#1Dht)
[K-means clustering algorithm](#kmeans)
[Kernel Density Estimation](#kde)
[K-nearest neighbors](#knn)
## Introduction to Clustering <a id='basics'></a>
[Go to top](#toc)
“Clustering” in astronomy refers to a number of different aspects of data analysis. Given a multivariate point data set, we can ask whether it displays any structure, that is, concentrations of points. Alternatively, when a density estimate is available we can search for “overdensities”. Another way to interpret clustering is to seek a partitioning or segmentation of data into smaller parts according to some criteria.
#### Unsupervised vs. Supervised Classification
In density estimation, we estimate joint probability distributions from multivariate data sets to identify the inherent clustering. This is essentially **unsupervised classification**. Here “unsupervised” means that there is no prior information about the number and properties of clusters. In other words, this method is a search for unknown structure in your (multi-dimensional) dataset.
If we have labels for some of these data points (e.g., an object is tall, short, red, or blue), we can develop a relationship between the label and the properties of a source. This is **supervised classification**. In other words, this method is finding objects in your (multi-dimensional) dataset that "look like" objects in your training set.
Classification, regression, and density estimation are all related. For example, the regression function $\hat{y} = f(y|\vec{x})$ is the best estimated value of $y$ given a value of $\vec{x}$. In classification $y$ is categorical and $f(y|\vec{x})$ is called the _discriminant function_
## 1-D hypothesis testing <a id='1Dht'></a>
[Go to top](#toc)
How do we decide about the existance of a cluster? Let's start with
the simplest but fundamental example: 1-D hypothesis testing.
**Motivating question:** You just measured x = 3, with a negligible measurement error.
You know that you could have drawn this value from one of two possible populations (e.g. stars and galaxies). One population can be described as N(0,2), and the other one as N(4,1).
Which population is more likely, given your x?
Naive answer: 3 is closer to 4 ("1 $\sigma$ away") than to 0
("1.5 $\sigma$ away") so the second population is more likely.
Let's see why this answer is wrong...
If the underlying distribution, h(x), is the sum of two populations
$$h(x) = (1-a) h_B (x) + a h_S (x) $$
with $a$ the normalization coefficient. Given ${x_i}$ we want to know $p_S(x_i)$ (which means $p_B(x_i) = 1 - p_S(x_i)$)

We can choose a classification boundary, $x_c$. From this we can defined the expected number of spurious sources (false positives or Type I errors)
$$n_{spurious} = N(1-a) \int_{x_c}^{\infty} h_B(x)dx $$
and the number of missed (false negative or Type II errors)
$$n_{missed} = N a \int_{0}^{x_c} h_S(x)dx $$
Number of sources will be
$$n_{sources} = N a - n_{missed} + n_{spurious} $$
The completeness of the sample (sometimes called the recall or sensitivity) is then
$$\eta = \frac{N a - n_{missed} }{N a} = 1 - \int_{0}^{x_c} h_S(x)dx $$
and the contamination of the sample is
$$\epsilon = \frac{n_{spurious}}{n_{source}} $$
and the decision boundary is the $x$ value at which each class is equally likely,
$$\pi_1 p_1(x) = \pi_2 p_2(x) $$
$\pi_i$ is the prior on the object being in class $i$ (estimated from the relative numbers of sources in each class). The form of $h_S$ and $h_B$ and the priors are needed in deciding the classification threshold
## K-means clustering algorithm <a id='kmeans'></a>
[Go to top](#toc)

Question is: how do we find clusters or estimate density efficiently?
<u> The _K-means_ algorithm </u>
The first approach for finding clusters that is always taught is $K$-means (simple and works well)
$K$-means partitions points into $K$ disjoint subsets ($C_k$) with each subset containing $N_k$
points
It minimizes the objective/cost/likelihood function,
$\sum_{k=1}^K \sum_{i \in C_k} || x_i - \mu_k ||^2$
$\mu_k = \frac{1}{N_k} \sum_{i \in C_k} x_i$ is the mean of the
points in set $C_k$
_Procedure:_
1. define the number of clusters $K$
2. choose the centroid, $\mu_k$, of each of the $K$ clusters
3. assign each point to the cluster that it is closest to
4. update the centroid of each cluster by recomputing $\mu_k$ according to the new assignments.
5. goto (3) until there are no new assignments.
Global optima are not guaranteed but the process never increases the sum-of-squares error.
Typically we run multiple times with different starting values for the
centroids of $C_k$.
We will start with looking at the density of stars as a function of metalicity and use scikit-learns preprocessing. We use the StandardScaler function to normalize each feature
```
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
from scipy.stats import norm
from sklearn.cluster import KMeans
from sklearn import preprocessing
from astroML.datasets import fetch_sdss_sspp
#------------------------------------------------------------
# Get data
data = fetch_sdss_sspp(cleaned=True)
X = np.vstack([data['FeH'], data['alphFe']]).T
# truncate dataset for speed
X = X[::5]
#------------------------------------------------------------
# Compute a 2D histogram of the input
# Fe vs H
#O, Ne, Mg, Si, S, Ar, Ca, and Ti vs Fe
H, FeH_bins, alphFe_bins = np.histogram2d(data['FeH'], data['alphFe'], 50)
#------------------------------------------------------------
# Compute the KMeans clustering
n_clusters = 2
scaler = preprocessing.StandardScaler()
clf = KMeans(n_clusters)
clf.fit(scaler.fit_transform(X.astype("float")))
#------------------------------------------------------------
# Visualize the results
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot()
# plot density
ax = plt.axes()
ax.imshow(H.T, origin='lower', interpolation='nearest', aspect='auto',
extent=[FeH_bins[0], FeH_bins[-1],
alphFe_bins[0], alphFe_bins[-1]],
cmap=plt.cm.binary)
# plot cluster centers
cluster_centers = scaler.inverse_transform(clf.cluster_centers_)
ax.scatter(cluster_centers[:, 0], cluster_centers[:, 1],
s=40, c='w', edgecolors='k')
# plot cluster boundaries
FeH_centers = 0.5 * (FeH_bins[1:] + FeH_bins[:-1])
alphFe_centers = 0.5 * (alphFe_bins[1:] + alphFe_bins[:-1])
Xgrid = np.meshgrid(FeH_centers, alphFe_centers)
Xgrid = np.array(Xgrid).reshape((2, 50 * 50)).T
H = clf.predict(scaler.transform(Xgrid)).reshape((50, 50))
for i in range(n_clusters):
Hcp = H.copy()
flag = (Hcp == i)
Hcp[flag] = 1
Hcp[~flag] = 0
ax.contour(FeH_centers, alphFe_centers, Hcp, [-0.5, 0.5],
linewidths=2, colors='k')
ax.xaxis.set_major_locator(plt.MultipleLocator(0.3))
ax.set_xlim(-1.101, 0.101)
ax.set_ylim(alphFe_bins[0], alphFe_bins[-1])
ax.set_xlabel(r'$\rm [Fe/H]$')
ax.set_ylabel(r'$\rm [\alpha/Fe]$')
plt.show()
```
***How do you choose the number of clusters?***
## Kernel Density Estimation <a id='kde'></a>
[Go to top](#toc)
$N(x) = \frac{1}{Nh^D} \sum_{i=1}^N K\left( \frac{d(x,x_i)}{h} \right),$
K: kernel (defined by the bandwidth h) is any smooth function which is positive at all values
Too narrow a kernel, too spiky the results (high variance)
Too broad a kernel, too smooth or washed out the results (bias)
_Common kernels_
Squard exponential (Normal): $ K(u) = \frac{1}{(2\pi)^{D/2}} e^{- u^2 / 2}$ D: dimension
Tophat: $ K(u) = \left\{
\begin{array}{ll}
\frac{1}{V_D(r)} & {\rm if}\ u \le r,\\
0 & {\rm if}\ u > r,
\end{array}
\right.$
Exponential: $ K(u) = \frac{1}{D!\, V_D(r)}e^{-|u|}$
with $V_D(r)$ the volume of a hypersphere radius $r$; $V_D(r) = \frac{2r^D\pi^{D/2}}{D\ \Gamma(D/2)}$
<img src="figures/funcs.png">
Perhaps surprisingly the primary feature is the bandwidth of these distributions not the exact shape. Choosing the bandwidth is usually done through cross-validation
To demonstrate this, the plot projects galaxies in SDSS "Great Wall" as scatted points by their spatial locations onto the equatorial plane (declination ~ $0^o$). The graph below shows the location of each point, but it is hard to get "clustered information" from.
```
from matplotlib.colors import LogNorm
from sklearn.neighbors import KernelDensity
from astroML.datasets import fetch_great_wall
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
#------------------------------------------------------------
# Fetch the great wall data
X = fetch_great_wall()
#------------------------------------------------------------
# Create the grid on which to evaluate the results
Nx = 50
Ny = 125
xmin, xmax = (-375, -175)
ymin, ymax = (-300, 200)
#------------------------------------------------------------
# Evaluate for several models
Xgrid = np.vstack(map(np.ravel, np.meshgrid(np.linspace(xmin, xmax, Nx),
np.linspace(ymin, ymax, Ny)))).T
kernels = ['gaussian']
dens = []
bandwidth=5
kde = KernelDensity(bandwidth=bandwidth, kernel='gaussian')
log_dens = kde.fit(X).score_samples(Xgrid)
dens = X.shape[0] * np.exp(log_dens).reshape((Ny, Nx))
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(15, 8))
fig.subplots_adjust(left=0.12, right=0.95, bottom=0.2, top=0.9,
hspace=0.01, wspace=0.01)
# First plot: scatter the points
ax1 = plt.subplot(221, aspect='equal')
ax1.scatter(X[:, 1], X[:, 0], s=1, lw=0, c='k')
ax1.text(0.95, 0.9, "input", ha='right', va='top',
transform=ax1.transAxes,
bbox=dict(boxstyle='round', ec='k', fc='w'))
# Second plot: gaussian kernel
ax2 = plt.subplot(222, aspect='equal')
ax2.imshow(dens.T, origin='lower', norm=LogNorm(),
extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary)
ax2.text(0.95, 0.9, "Gaussian h={}".format(bandwidth), ha='right', va='top',
transform=ax2.transAxes,
bbox=dict(boxstyle='round', ec='k', fc='w'))
for ax in [ax1, ax2]:
ax.set_xlim(ymin, ymax - 0.01)
ax.set_ylim(xmin, xmax)
for ax in [ax1, ax2]:
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlabel('$y$ (Mpc)')
for ax in [ax2]:
ax.yaxis.set_major_formatter(plt.NullFormatter())
for ax in [ax1]:
ax.set_ylabel('$x$ (Mpc)')
plt.show()
```
##### Exercise: Use Kernel Density Estimation with any kernel you choose on the color-magnitude diagrams (CMDs) of the two data sets Field A and Field B. Plot the density for each CMD in each panel (i.e., $g-r$ on the x axis and $g$ on the y axis) - a Hess diagrams.
Experiment with different kernel bandwidths, plotting one that visually seems "best" (i.e., a good balance of bias vs. variance) for each kernel.
Don't forget to change the figure size so that individual panels have aspect ratios closer to what is common for color-magnitude diagrams (i.e., x:y ~ 4:6 or so).
Subtract the "best" density for Field B from A to see if there are structures present in the CMD. What are they?
```
#Hess diagrams with SDSS data
import pandas as pd
fieldA = pd.read_csv('data/fieldA.csv')
fieldB = pd.read_csv('data/fieldB.csv')
# Add a column for color
fieldA['g-r'] = fieldA.g - fieldA.r
fieldB['g-r'] = fieldB.g - fieldB.r
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(121, aspect='equal')
ax.scatter(fieldA['g-r'],fieldA['g'], s=1, alpha=0.5)
ax.set_xlim(-0.5, 2)
ax.set_ylim(22,11)
ax.set_xlabel('g-r')
ax.set_ylabel('g')
ax = fig.add_subplot(122, aspect='equal')
ax.scatter(fieldB['g-r'],fieldB['g'], s=1, alpha=0.5)
ax.set_xlim(-0.5, 2)
ax.set_ylim(22,11)
ax.set_xlabel('g-r')
ax.set_ylabel('g')
plt.show()
```
## Nearest neighbor estimation <a id='knn'></a>
[Go to top](#toc)
Simple (simplest?) density estimator heavily used in astrophysics (cluster detection, large scale structure measures), originally proposed by [Dressler et al. 1980](https://ui.adsabs.harvard.edu/abs/1980ApJ...236..351D/abstract) .
For each point we find the distance to the $K$th-nearest neighbor, $d_K$. **Note: we are not choosing clusters here** In this method, the implied point density at an arbitrary position x is estimated as
$$\hat{f_K}(x) = \frac{K}{V_D(d_K)}$$
where $V_D$ is evaluated volume, and D is the problem dimensionality.
By taking the assumption that the underlying density field is locally constant, we can further simplify this method as
$$\hat{f_K}(x) = \frac{C}{d_K^D}$$
where C is a scaling factor evaluated by requiring that the sum of the product of $\hat{f_K}(x)$ and
pixel volume is equal to the total number of data points.
The error on $\hat{f}_K(x)$ is
$$\sigma_f = K^{1/2}/V_D (d_K)$$
The fractional (relative) error is
$$\sigma_f/\hat{f} = 1/K^{1/2}$$.
We can see that the
* fractional accuracy increases with $K$ at expense of the spatial resolution (bias-variance trade-off)
* effective resolution scales with $K^{1/D}$
The method can be improved by considering distances to _all_ $K$ nearest neighbors
$$\hat{f}_K(x) = {C \over \sum_{i=1}^K d_i^D}$$
The normalization when computing local density without regard to overall mean density is
$$C = \frac{K\, (K + 1)}{2 V_D(r)}$$
In this method, we can change parameter k to get different estimation result. k should be at least 5 because the estimator is biased and has a large variance for smaller k; see [Casertano, S. and Hut, P.](https://ui.adsabs.harvard.edu/abs/1985ApJ...298...80C/abstract)
```
from sklearn.neighbors import KernelDensity
from astroML.density_estimation import KNeighborsDensity
#------------------------------------------------------------
# Create the grid on which to evaluate the results
Nx = 50
Ny = 125
xmin, xmax = (-375, -175)
ymin, ymax = (-300, 200)
#------------------------------------------------------------
# Evaluate for several models
Xgrid = np.vstack(map(np.ravel, np.meshgrid(np.linspace(xmin, xmax, Nx),
np.linspace(ymin, ymax, Ny)))).T
kde = KernelDensity(kernel='gaussian', bandwidth=5)
log_pdf_kde = kde.fit(X).score_samples(Xgrid).reshape((Ny, Nx))
dens_KDE = np.exp(log_pdf_kde)
knn5 = KNeighborsDensity('bayesian', 5)
dens_k5 = knn5.fit(X).eval(Xgrid).reshape((Ny, Nx))
knn40 = KNeighborsDensity('bayesian', 40)
dens_k40 = knn40.fit(X).eval(Xgrid).reshape((Ny, Nx))
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(9, 4.0))
fig.subplots_adjust(left=0.1, right=0.95, bottom=0.14, top=0.9,
hspace=0.01, wspace=0.01)
# First plot: scatter the points
ax1 = plt.subplot(221, aspect='equal')
ax1.scatter(X[:, 1], X[:, 0], s=1, lw=0, c='k')
ax1.text(0.98, 0.95, "input", ha='right', va='top',
transform=ax1.transAxes, fontsize=12,
bbox=dict(boxstyle='round', ec='k', fc='w'))
# Second plot: KDE
ax2 = plt.subplot(222, aspect='equal')
ax2.imshow(dens_KDE.T, origin='lower', norm=LogNorm(),
extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary)
ax2.text(0.98, 0.95, "KDE: gaussian $(h=5)$", ha='right', va='top',
transform=ax2.transAxes, fontsize=12,
bbox=dict(boxstyle='round', ec='k', fc='w'))
# Third plot: KNN, k=5
ax3 = plt.subplot(223, aspect='equal')
ax3.imshow(dens_k5.T, origin='lower', norm=LogNorm(),
extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary)
ax3.text(0.98, 0.95, "KNN $(k=5)$", ha='right', va='top',
transform=ax3.transAxes, fontsize=12,
bbox=dict(boxstyle='round', ec='k', fc='w'))
# Fourth plot: KNN, k=40
ax4 = plt.subplot(224, aspect='equal')
ax4.imshow(dens_k40.T, origin='lower', norm=LogNorm(),
extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary)
ax4.text(0.98, 0.95, "KNN $(k=40)$", ha='right', va='top',
transform=ax4.transAxes, fontsize=12,
bbox=dict(boxstyle='round', ec='k', fc='w'))
for ax in [ax1, ax2, ax3, ax4]:
ax.set_xlim(ymin, ymax - 0.01)
ax.set_ylim(xmin, xmax)
for ax in [ax1, ax2]:
ax.xaxis.set_major_formatter(plt.NullFormatter())
for ax in [ax3, ax4]:
ax.set_xlabel('$y$ (Mpc)')
for ax in [ax2, ax4]:
ax.yaxis.set_major_formatter(plt.NullFormatter())
for ax in [ax1, ax3]:
ax.set_ylabel('$x$ (Mpc)')
plt.show()
```
| github_jupyter |
### How does Python import Modules?
When we run a statement such as
`import fractions`
what is Python actually doing?
The first thing to note is that Python is doing the import at **run time**, i.e. while your code is actually running.
This is different from traditional compiled languages such as C where modules are compiled and linked at compile time.
In both cases though, the system needs to know **where** those code files exist.
Python uses a relatively complex system of how to find and load modules. I'm not going to even attempt to describe this in detail, but we'll take a brief look at the main points.
The `sys` module has a few properties that define where Python is going to look for modules (either built-in or standard library as well as our own or 3rd party):
```
import sys
```
Where is Python installed?
```
sys.prefix
```
Where are the compiled C binaries located?
```
sys.exec_prefix
```
These two properties are how virtual environments are basically able to work with different environments. Python is installed to a different set of directories, and these prefixes are manipulated to reflect the current Python location.
Where does Python look for imports?
```
sys.path
```
Basically when we import a module, Python will search for the module in the paths contained in `sys.path`.
If it does not find the module in one of those paths, the import will fail.
So if you ever run into a problem where Python is not able to import a module or package, you should check this first to make sure the path to your module/package is in that list.
At a high level, this is how Python imports a module from file:
* checks the `sys.modules` cache to see if the module has already been imported - if so it simply uses the reference in there, otherwise:
* creates a new module object (`types.ModuleType`)
* loads the source code from file
* adds an entry to `sys.modules` with name as key and the newly created
* compiles and executes the source code
One thing that's really to important to note is that when a module is imported, the module code is **executed**.
Let's switch over to PyCharm (or your favorite IDE, which may well be VI/emacs and the command line!). All the files are included in the lecture resources or my github repository.
#### Example 1
This example shows that when we import a module, the module code is actually **executed**.
Furthermore, that module now has its own namespace that can be seen in `__dict__`.
#### Example 2
In this example, we can see that when we `import` a module, Python first looks for it in `sys.modules`.
To make the point, we put a key/value pair in `sys.modules` ourselves, and then import it.
In fact we put a function in there instead of a module, and import that.
Please **DO NOT** this, I'm just making the point that `import` will first look in the cache and immediately just return the object if the name is found, basically just as if we had written:
`
module = sys.modules['module']
`
```
sys.modules['test'] = lambda: 'Testing module caching'
import test
```
See, it got the "module" from sys...
```
test
test()
```
#### Example 3a
In this example we look at a simplified view of how Python imports a module.
We use two built-in functions, `compile` and `exec`.
The `compile` function compiles source (e.g. text) into a code object.
The `exec` function is used to execute a code object. Optionally we can specify what dictionary should be used to store global symbols.
In our case we are going to want to use our module's `__dict__`.
#### Example 3b
This is essentially the same as example 3a, except we make our importer into a function and use it to show how we technically should look for a cached version of the module first.
| github_jupyter |
# Self-Driving Car Engineer Nanodegree
## Deep Learning
## Project: Build a Traffic Sign Recognition Classifier
In this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary.
> **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n",
"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/481/view) for this project.
The [rubric](https://review.udacity.com/#!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file.
>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
---
## Step 0: Load The Data
```
# Load pickled data
import pickle
# TODO: Fill this in based on where you saved the training and testing data
training_file = '/home/workspace/data/train.p'
validation_file='/home/workspace/data/valid.p'
testing_file = '/home/workspace/data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
```
---
## Step 1: Dataset Summary & Exploration
The pickled data is a dictionary with 4 key/value pairs:
- `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).
- `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.
- `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.
- `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES**
Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results.
### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas
```
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
import numpy as np
# TODO: Number of training examples
n_train = X_train.shape[0]
# TODO: Number of validation examples
# n_validation = ?
# TODO: Number of testing examples.
n_test = X_test.shape[0]
# TODO: What's the shape of an traffic sign image?
image_shape = X_train.shape[1:]
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(y_train))
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
```
### Include an exploratory visualization of the dataset
Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc.
The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python.
**NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?
```
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
import random
import csv
# Visualizations will be shown in the notebook.
%matplotlib inline
def figures_plotting(figures, nrows = 1, ncols = 1, labels = None):
fig, axs = plt.subplots(ncols, nrows, figsize = (12, 14))
axs = axs.ravel()
for index, title in zip(range(len(figures)), figures):
axs[index].imshow(figures[title], plt.gray())
if(labels != None):
axs[index].set_title(labels[index])
else:
axs[index].set_title(title)
axs[index].set_axis_off()
plt.tight_layout()
name_values = np.genfromtxt('signnames.csv', skip_header = 1, dtype = [('myint', 'i8'), ('mystring', 'S55')], delimiter = ',')
number_of_images = 8
figures = {}
labels = {}
for i in range(number_of_images):
index = random.randint(0, n_train -1)
labels[i] = name_values[y_train[index]][1].decode('ascii')
# print(name_values[y_train[index]][1].decode('ascii'))
figures[i] = X_train[index]
figures_plotting(figures, 4, 2, labels)
```
#### Dataset Sign Counts check
```
## This code block is to check the distribution of dataset for training, validation and testing purpose.
## From the results it seems that data is uniformly distributed for training, validation and test purposes.
unique_train, counts_train = np.unique(y_train, return_counts=True)
plt.bar(unique_train, counts_train)
plt.grid()
plt.title("Train Dataset Sign Counts")
plt.show()
unique_test, counts_test = np.unique(y_test, return_counts=True)
plt.bar(unique_test, counts_test)
plt.grid()
plt.title("Test Dataset Sign Counts")
plt.show()
unique_valid, counts_valid = np.unique(y_valid, return_counts=True)
plt.bar(unique_valid, counts_valid)
plt.grid()
plt.title("Valid Dataset Sign Counts")
plt.show()
plt.savefig('Train Dataset Sign Counts')
plt.savefig('Test Dataset Sign Counts')
plt.savefig('Valid Dataset Sign Counts')
```
----
## Step 2: Design and Test a Model Architecture
Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).
The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play!
With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission.
There are various aspects to consider when thinking about this problem:
- Neural network architecture (is the network over or underfitting?)
- Play around preprocessing techniques (normalization, rgb to grayscale, etc)
- Number of examples per label (some have more than others).
- Generate fake data.
Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these.
### Pre-process the Data Set (normalization, grayscale, etc.)
Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project.
Other pre-processing steps are optional. You can try different techniques to see if it improves performance.
Use the code cell (or multiple code cells, if necessary) to implement the first step of your project.
```
### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include
### converting to grayscale, etc.
### Feel free to use as many code cells as needed.
import tensorflow as tf
from tensorflow.contrib.layers import flatten
from math import ceil
from sklearn.utils import shuffle
## Converting RGB image to grayscale image
X_train_rgb = X_train
X_train_gray = np.sum(X_train/3, axis=3, keepdims=True)
X_test_rgb = X_test
X_test_gray = np.sum(X_test/3, axis=3, keepdims=True)
X_valid_rgb = X_valid
X_valid_gray = np.sum(X_valid/3, axis=3, keepdims=True)
print(X_train_rgb.shape)
print(X_train_gray.shape)
print(X_test_rgb.shape)
print(X_test_gray.shape)
print(X_valid_rgb.shape)
print(X_valid_gray.shape)
## As we have now converted all the rgb images to grayscale, here onwards we will be using this processed image to give it as an input to our network.
X_train = X_train_gray
X_test = X_test_gray
X_valid = X_valid_gray
image_depth_channels = X_train.shape[3]
print(image_depth_channels)
number_of_images = 8
figures = {}
random_signs = []
for i in range(number_of_images):
index = random.randint(0, n_train-1)
labels[i] = name_values[y_train[index]][1].decode('ascii')
figures[i] = X_train[index].squeeze()
random_signs.append(index)
print(random_signs)
figures_plotting(figures, 4, 2, labels)
import cv2
X_train_1 = []
y_train_1 = []
X_train_2 = []
y_train_2 = []
new_counts_train = counts_train
for i in range(n_train):
if(new_counts_train[y_train[i]] < 3000):
for j in range(3):
dx, dy = np.random.randint(-1.7, 1.8, 2)
M = np.float32([[1,0,dx], [0, 1, dy]])
dst = cv2.warpAffine(X_train[i], M, (X_train[i].shape[0], X_train[i].shape[1]))
dst = dst[:,:, None]
X_train_1.append(dst)
y_train_1.append(y_train[i])
random_higher_bound = random.randint(27, 32)
random_lower_bound = random.randint(0, 5)
points_one = np.float32([[0,0],[32,0],[0,32],[32,32]])
points_two = np.float32([[0, 0], [random_higher_bound, random_lower_bound], [random_lower_bound, 32],[32, random_higher_bound]])
M = cv2.getPerspectiveTransform(points_one, points_two)
dst = cv2.warpPerspective(X_train[i], M, (32,32))
X_train_2.append(dst)
y_train_2.append(y_train[i])
tilt = random.randint(-12, 12)
M = cv2.getRotationMatrix2D((X_train[i].shape[0]/2, X_train[i].shape[1]/2), tilt, 1)
dst = cv2.warpAffine(X_train[i], M, (X_train[i].shape[0], X_train[i].shape[1]))
X_train_2.append(dst)
y_train_2.append(y_train[i])
new_counts_train[y_train[i]] += 2
X_train_1 = np.array(X_train_1)
y_train_1 = np.array(y_train_1)
X_train = np.concatenate((X_train, X_train_1), axis=0)
y_train = np.concatenate((y_train, y_train_1), axis=0)
X_train_2 = np.array(X_train_2)
y_train_2 = np.array(y_train_2)
X_train_2 = np.reshape(X_train_2, (np.shape(X_train_2)[0], 32, 32, 1))
X_train = np.concatenate((X_train, X_train_2), axis=0)
y_train = np.concatenate((y_train, y_train_2), axis=0)
X_train = np.concatenate((X_train, X_valid), axis=0)
y_train = np.concatenate((y_train, y_valid), axis=0)
figures1 = {}
labels = {}
figures1[0] = X_train[n_train+1].squeeze()
labels[0] = y_train[n_train+1]
figures1[1] = X_train[0].squeeze()
labels[1] = y_train[0]
figures_plotting(figures1, 1, 2, labels)
from sklearn.model_selection import train_test_split
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size = 0.2, random_state=0)
print("Modified Dataset Size : {}".format(X_train.shape[0]))
unique, counts = np.unique(y_train, return_counts=True)
plt.bar(unique, counts)
plt.grid()
plt.title("Modified Train Dataset Sign Counts")
plt.show()
unique, counts = np.unique(y_test, return_counts=True)
plt.bar(unique, counts)
plt.grid()
plt.title("Modified Test Dataset Sign Counts")
plt.show()
unique, counts = np.unique(y_valid, return_counts=True)
plt.bar(unique, counts)
plt.grid()
plt.title("Modified Valid Dataset Sign Counts")
plt.show()
plt.savefig('Train Dataset Sign Counts')
plt.savefig('Test Dataset Sign Counts')
plt.savefig('Valid Dataset Sign Counts')
def normalize(a):
return -np.log(1/((1 + a)/257) - 1)
X_train_normalized = X_train/127.5 - 1
X_test_normalized = X_test/127.5 - 1
stop = 8
figures = {}
count = 0
for i in random_signs:
labels[count] = name_values[y_train[i]][1].decode('ascii')
figures[count] = X_train_normalized[i].squeeze()
count += 1;
figures_plotting(figures, 4, 2, labels)
X_train = X_train_normalized
X_test = X_test_normalized
```
### Model Architecture
```
### Define your architecture here.
### Feel free to use as many code cells as needed.
def conv2d(x, W, b, strides = 1):
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding = 'VALID')
print(x.shape)
return tf.nn.relu(x)
def LeNet(x):
mu = 0
sigma = 0.1
W_1 = tf.Variable(tf.truncated_normal(shape = (5, 5, image_depth_channels, 6), mean = mu, stddev = sigma))
b_1 = tf.Variable(tf.zeros(6))
layer_1 = conv2d(x, W_1, b_1, 1)
layer_1 = tf.nn.max_pool(layer_1, ksize=[1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'VALID')
print(layer_1.shape)
print()
W_2 = tf.Variable(tf.truncated_normal(shape = (5, 5, 6, 16), mean = mu, stddev = sigma))
b_2 = tf.Variable(tf.zeros(16))
layer_2 = conv2d(layer_1, W_2, b_2, 1)
layer_2 = tf.nn.max_pool(layer_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID')
print(layer_2.shape)
print()
W_2_a = tf.Variable(tf.truncated_normal(shape = (5, 5, 16, 412), mean = mu, stddev = sigma))
b_2_a = tf.Variable(tf.zeros(412))
layer_2_a = conv2d(layer_2, W_2_a, b_2_a, 1)
print(layer_2_a.shape)
print()
flat = flatten(layer_2_a)
W_3 = tf.Variable(tf.truncated_normal(shape = (412, 122), mean = mu, stddev = sigma))
b_3 = tf.Variable(tf.zeros(122))
layer_3 = tf.nn.relu(tf.nn.bias_add(tf.matmul(flat, W_3), b_3))
layer_3 = tf.nn.dropout(layer_3, keep_prob)
W_4 = tf.Variable(tf.truncated_normal(shape = (122, 84), mean = mu, stddev = sigma))
b_4 = tf.Variable(tf.zeros(84))
layer_4 = tf.nn.relu(tf.nn.bias_add(tf.matmul(layer_3, W_4), b_4))
layer_4 = tf.nn.dropout(layer_4, keep_prob)
W_5 = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
b_5 = tf.Variable(tf.zeros(43))
layer_5 = tf.nn.bias_add(tf.matmul(layer_4, W_5), b_5)
return layer_5
x = tf.placeholder(tf.float32, (None, 32, 32, image_depth_channels))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
keep_prob = tf.placeholder(tf.float32)
```
### Train, Validate and Test the Model
A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation
sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
```
EPOCHS = 45
batch_size = 120
rate = 0.00097
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
### Evaluation model
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, batch_size):
batch_x, batch_y = X_data[offset:offset + batch_size], y_data[offset:offset + batch_size]
accuracy = sess.run(accuracy_operation, feed_dict = {x: batch_x,
y: batch_y,
keep_prob: 1.0})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
validation_accuracy_figure = []
test_accuracy_figure = []
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, batch_size):
end = offset + batch_size
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})
validation_accuracy = evaluate(X_valid, y_valid)
validation_accuracy_figure.append(validation_accuracy)
test_accuracy = evaluate(X_train, y_train)
test_accuracy_figure.append(test_accuracy)
print("EPOCH {} ...".format(i+1))
print("Test Accuracy = {:.3f}".format(test_accuracy))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
plt.plot(test_accuracy_figure, label = 'test')
#plt.title("Test Accuracy")
#plt.show()
plt.plot(validation_accuracy_figure, label = 'validation')
plt.title("Validation Accuracy")
plt.legend()
plt.show()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
train_accuracy = evaluate(X_train, y_train)
print("Train Accuracy = {:.3f}".format(train_accuracy))
valid_accuracy = evaluate(X_valid, y_valid)
print("Valid Accuracy = {:.3f}".format(valid_accuracy))
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
```
---
## Step 3: Test a Model on New Images
To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.
You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name.
### Load and Output the Images
```
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
import glob
import cv2
all_images = sorted(glob.glob('./test_images/*.png'))
all_labels = np.array([1, 22, 35, 15, 37, 18])
figures = {}
labels = {}
signs = []
index = 0
for image in all_images:
img = cv2.cvtColor(cv2.imread(image), cv2.COLOR_BGR2RGB)
signs.append(img)
figures[index] = img
labels[index] = name_values[all_labels[index]][1].decode('ascii')
index += 1
figures_plotting(figures, 3, 2, labels)
signs = np.array(signs)
gray_signs = np.sum(signs/3, axis = 3, keepdims=True)
normalized_signs = gray_signs/127.5-1
number_of_images = 6
figures = {}
labels = {}
for i in range(number_of_images):
labels[i] = name_values[all_labels[i]][1].decode('ascii')
figures[i] = gray_signs[i].squeeze()
figures_plotting(figures, 3, 2, labels)
```
### Predict the Sign Type for Each Image
```
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.import_meta_graph('./lenet.meta')
saver.restore(sess, "./lenet")
my_accuracy = evaluate(normalized_signs, all_labels)
print("My Data Set Accuracy = {:.3f}".format(my_accuracy))
```
### Analyze Performance
```
### Calculate the accuracy for these 5 new images.
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
my_single_item_array = []
my_single_item_label_array = []
for i in range(6):
my_single_item_array.append(normalized_signs[i])
my_single_item_label_array.append(all_labels[i])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# saver = tf.train.import_meta_graph('./lenet.meta')
saver.restore(sess, "./lenet")
my_accuracy = evaluate(my_single_item_array, my_single_item_label_array)
print('Image {}'.format(i+1))
print("Image Accuracy = {:.3f}".format(my_accuracy))
print()
```
### Output Top 5 Softmax Probabilities For Each Image Found on the Web
For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here.
The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.
`tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.
Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability:
```
# (5, 6) array
a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497,
0.12789202],
[ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401,
0.15899337],
[ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 ,
0.23892179],
[ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 ,
0.16505091],
[ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137,
0.09155967]])
```
Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces:
```
TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202],
[ 0.28086119, 0.27569815, 0.18063401],
[ 0.26076848, 0.23892179, 0.23664738],
[ 0.29198961, 0.26234032, 0.16505091],
[ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5],
[0, 1, 4],
[0, 5, 1],
[1, 3, 5],
[1, 4, 3]], dtype=int32))
```
Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices.
```
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
k_size = 5
softmax_logits = tf.nn.softmax(logits)
top_k = tf.nn.top_k(softmax_logits, k=k_size)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, "./lenet")
my_softmax_logits = sess.run(softmax_logits, feed_dict={x: normalized_signs, keep_prob: 1.0})
my_top_k = sess.run(top_k, feed_dict={x: normalized_signs, keep_prob: 1.0})
for i in range(6):
figures = {}
labels = {}
figures[0] = signs[i]
labels[0] = "Original"
for j in range(k_size):
labels[j+1] = 'Guess {} : ({:.0f}%)'.format(j+1, 100*my_top_k[0][i][j])
figures[j+1] = X_valid[np.argwhere(y_valid == my_top_k[1][i][j])[0]].squeeze()
figures_plotting(figures, 1, 6, labels)
```
### Project Writeup
Once you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file.
> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n",
"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
| github_jupyter |
```
from systemtools.hayj import *
from systemtools.basics import *
from systemtools.file import *
from systemtools.printer import *
from systemtools.logger import *
from annotator.annot import *
from datatools.jsonutils import *
from nlptools.tokenizer import *
from datatools.htmltools import *
from newssource.goodarticle.utils import *
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVR, LinearSVC
from sklearn import linear_model
from sklearn.model_selection import StratifiedKFold
data = []
for file in sortedGlob("goodarticle*.json"):
data += fromJsonFile(file)
stopwords = set(fileToStrList("stopwords.txt"))
startswithExcludes = set(fileToStrList("startswith-excludes.txt"))
newData = []
for i in range(len(data)):
data[i]["text"] = newsPreclean(data[i]["text"], startswithExcludes=startswithExcludes)
if len(data[i]["text"]) > 0:
newData.append(data[i])
data = newData
bp(data, 2)
print(len(data))
def basicFeatures\
(
text,
longLine=140,
shortLine=20,
tooLongDocument=60000,
stopwords={},
punct={',', ')', '...', "'", ';', '-', '!', ':', '?', '"', '.', '('},
logger=None,
verbose=True,
asDict=False,
asNpArray=True,
):
# Checking vars:
if stopwords is None or len(stopwords) == 0 or punct is None or len(punct) == 0:
logWarning("Please give a stopwords list and a punct list", logger, verbose=verbose)
features = OrderedDict()
# Too long document ?
features["tooLongDocument"] = len(text) >= tooLongDocument
# Len of the text:
features["length"] = len(text)
# The count of non-blank lines:
lines = [e for e in text.split("\n") if e != '']
features["linesCount"] = len(lines)
# The count of tokens:
loweredText = text.lower()
tokens = [e for e in text.split() if e != '']
loweredTokens = [e for e in loweredText.split() if e != '']
features["tokensCount"] = len(tokens)
# Count of long lines, mean lines length, count of short lines:
longLinesCount = 0
shortLinesCount = 0
meanLinesLength = 0
for line in lines:
if len(line) >= longLine:
longLinesCount += 1
if len(line) <= shortLine:
shortLinesCount += 1
meanLinesLength += len(line)
meanLinesLength = meanLinesLength / len(lines)
features["longLinesCount"] = longLinesCount
features["shortLinesCount"] = shortLinesCount
features["meanLinesLength"] = meanLinesLength
features["longLinesRatio"] = longLinesCount / len(lines)
features["shortLinesRatio"] = shortLinesCount / len(lines)
# The ratio of stopwords / punct:
stopwordsAndPunct = stopwords.union(punct)
c = len([e for e in loweredTokens if e in stopwordsAndPunct])
features["stopwordsPunctRatio"] = c / len(loweredTokens)
# The mean overlap:
nonSWPTokens = [e for e in loweredTokens if e not in stopwordsAndPunct]
c = dict()
for token in nonSWPTokens:
if token not in c:
c[token] = 0
c[token] += 1
theMean = 0
for token, count in c.items():
theMean += count
theMean = theMean / len(c)
features["nonSWPMeanOverlap"] = theMean
# Ratio of only uppercased words:
upperWordCount = len([e for e in tokens if hasLetter(e) and not hasLowerLetter(e)])
features["upperWordCount"] = upperWordCount
features["upperWordRatio"] = upperWordCount / len(tokens)
# Ratio of non words:
nonWordCount = len([e for e in tokens if not hasLetter(e)])
features["nonWordCount"] = nonWordCount
features["nonWordRatio"] = nonWordCount / len(tokens)
# Ratio of html:
htmlCharCount = len(text) - len(html2Text(text))
if htmlCharCount < 0:
htmlCharCount = 0
features["htmlCharCount"] = htmlCharCount
features["htmlCharRatio"] = htmlCharCount / len(text)
# Ratio of words that has at least on upper case:
c = 0
for token in tokens:
if hasUpperLetter(token):
c += 1
features["hasUpperRatio"] = c / len(tokens)
# Ratio of lines that start with a non word:
c = 0
for line in lines:
line = line.split()
if len(line) > 0:
if not hasLetter(line[0]):
c += 1
features["lineStartWithNonWordRatio"] = c / len(lines)
# Encoding prob count:
encCount = 0
encCount += text.count("â")
encCount += text.count("ï")
encCount += text.count("U+")
encCount += text.count("Ï")
encCount += text.count("À")
encCount += text.count("Á")
encCount += text.count("Ã")
encCount += text.count("�")
encCount += text.count("")
features["encodingProbCount"] = encCount
# Finally we return all features:
if asDict:
return features
else:
result = list(features.values())
if asNpArray:
return np.array(result)
else:
return result
def accuracy(predictions, y, thresholds=[0.25, 0.75]):
assert len(predictions) == len(y)
wellClassified = 0
for i in range(len(y)):
prediction = predictions[i]
currentPredictedClass = continuous2discret(prediction, thresholds)
currentY = y[i]
currentClass = continuous2discret(currentY, thresholds)
if currentPredictedClass == currentClass:
wellClassified += 1
return wellClassified / len(y)
def continuous2discret(y, thresholds):
currentClass = 0
for threshold in thresholds:
if y <= threshold:
return currentClass
currentClass += 1
return currentClass
for i, current in enumerate(data):
if current["relevance"] == 0.0:
text = current["text"]
text = newsPreclean(text, startswithExcludes=startswithExcludes)
bp(basicFeatures(text, stopwords=stopwords, asDict=True), 5)
print()
print(text)
print()
print()
print()
if i >= 3:
break
X = np.array([basicFeatures(current["text"], stopwords=stopwords) for current in data])
y = np.array([continuous2discret(current["relevance"], [0.51]) for current in data])
bp(X)
bp(y)
print(len(y))
if False:
from sklearn.metrics import make_scorer
scorer = make_scorer(accuracy, greater_is_better=True) # scorer(clf, X, y)
param_grid = \
{
'loss': ['squared_hinge'], # 'hinge'
'penalty': ['l1', 'l2',], # l1, l2
'C': list(range(0, 20 + 1)),
'multi_class': ['ovr'], # , 'crammer_singer'
'dual': [False, True],
'random_state': [0],
}
clf = GridSearchCV(LinearSVC(), param_grid=param_grid, scoring='accuracy',
cv=StratifiedKFold(n_splits=5, random_state=0, shuffle=True), n_jobs=cpuCount(),
error_score=0.0)
clf.fit(X, y)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print(clf.best_score_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
best = clf.best_estimator_
predictions = best.predict(X)
predictions
wellClassified = 0
for i in range(len(predictions)):
if predictions[i] == y[i]:
wellClassified += 1
else:
print("prediction: " + str(predictions[i]))
print("true label: " + str(y[i]))
print(data[i]["text"])
print()
print()
print()
print()
print(wellClassified / len(y))
bestParams = {'C': 6, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}
clf = LinearSVC(**bestParams)
clf.fit(X, y)
best = clf
import pickle
serialize(clf, "best.pickle")
best = deserialize("best.pickle")
s
```
| github_jupyter |
ERROR: type should be string, got "https://github.com/kikocorreoso/brythonmagic\n\nhttp://nbviewer.ipython.org/github/kikocorreoso/brythonmagic/blob/master/notebooks/Brython%20usage%20in%20the%20IPython%20notebook.ipynb\n\n```\nimport IPython\nIPython.version_info\n%install_ext https://raw.github.com/kikocorreoso/brythonmagic/master/brythonmagic.py\n%load_ext brythonmagic\n%%HTML\n<script type=\"text/javascript\" src=\"https://brython.info/src/brython_dist.js\"></script>\n%%brython -c my_container\n# 假如要列出所產生的 html 則使用 -p\nfrom browser import doc, html\n\n# This will be printed in the js console of your browser\nprint('Hello world!')\n\n# This will be printed in the container div on the output below\ndoc[\"my_container\"] <= html.P(\"文字位於 div 標註內\", \n style = {\"backgroundColor\": \"cyan\"})\n%%brython\nfrom browser import alert\n\nalert('Hello world!, Welcome to the brythonmagic!')\n%%brython -c simple_example\nfrom browser import doc, html\n\nfor i in range(10):\n doc[\"simple_example\"] <= html.P(i)\n%%brython -c table\nfrom browser import doc, html\n\ntable = html.TABLE()\n\nfor i in range(10):\n color = ['cyan','#dddddd'] * 5\n table <= html.TR(\n html.TD(str(i+1) + ' x 2 =', style = {'backgroundColor':color[i]}) + \n html.TD((i+1)*2, style = {'backgroundColor':color[i]}))\ndoc['table'] <= table\n%%brython -c canvas_example\nfrom browser.timer import request_animation_frame as raf\nfrom browser.timer import cancel_animation_frame as caf\nfrom browser import doc, html\nfrom time import time\nimport math\n\n# First we create a table to insert the elements\ntable = html.TABLE(cellpadding = 10)\nbtn_anim = html.BUTTON('Animate', Id=\"btn-anim\", type=\"button\")\nbtn_stop = html.BUTTON('Stop', Id=\"btn-stop\", type=\"button\")\ncnvs = html.CANVAS(Id=\"raf-canvas\", width=256, height=256)\n\ntable <= html.TR(html.TD(btn_anim + btn_stop) +\n html.TD(cnvs))\n\ndoc['canvas_example'] <= table\n# Now we access the canvas context\nctx = doc['raf-canvas'].getContext( '2d' ) \n\n# And we create several functions in charge to animate and stop the draw animation\ntoggle = True\n\ndef draw():\n t = time() * 3\n x = math.sin(t) * 96 + 128\n y = math.cos(t * 0.9) * 96 + 128\n global toggle\n if toggle:\n toggle = False\n else:\n toggle = True\n ctx.fillStyle = 'rgb(200,200,20)' if toggle else 'rgb(20,20,200)'\n ctx.beginPath()\n ctx.arc( x, y, 6, 0, math.pi * 2, True)\n ctx.closePath()\n ctx.fill()\n\ndef animate(i):\n global id\n id = raf(animate)\n draw()\n\ndef stop(i):\n global id\n print(id)\n caf(id)\n\ndoc[\"btn-anim\"].bind(\"click\", animate)\ndoc[\"btn-stop\"].bind(\"click\", stop)\n%%HTML\n<script type=\"text/javascript\" src=\"https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.6/d3.js\"></script>\n%%brython -c simple_d3\nfrom browser import window, document, html\nfrom javascript import JSObject\n\nd3 = window.d3\n\ncontainer = JSObject(d3.select(\"#simple_d3\"))\nsvg = container.append(\"svg\").attr(\"width\", 100).attr(\"height\", 100)\ncircle1 = svg.append(\"circle\").style(\"stroke\", \"gray\").style(\"fill\", \"gray\").attr(\"r\", 40)\ncircle1.attr(\"cx\", 50).attr(\"cy\", 50).attr(\"id\", \"mycircle\")\n\ncircle2 = svg.append(\"circle\").style(\"stroke\", \"gray\").style(\"fill\", \"white\").attr(\"r\", 20)\ncircle2.attr(\"cx\", 50).attr(\"cy\", 50)\n\ndef over(ev):\n document[\"mycircle\"].style.fill = \"blue\"\n\ndef out(ev):\n document[\"mycircle\"].style.fill = \"gray\"\n\ndocument[\"mycircle\"].bind(\"mouseover\", over)\ndocument[\"mycircle\"].bind(\"mouseout\", out)\n%%brython -c manipulating\nfrom browser import document, html\n\ndef hide(ev):\n divs = document.get(selector = 'div.input')\n for div in divs:\n div.style.display = \"none\"\n\ndef show(ev):\n divs = document.get(selector = 'div.input')\n for div in divs:\n div.style.display = \"inherit\"\n\ndocument[\"manipulating\"] <= html.BUTTON('Hide code cells', Id=\"btn-hide\")\ndocument[\"btn-hide\"].bind(\"click\", hide)\n\ndocument[\"manipulating\"] <= html.BUTTON('Show code cells', Id=\"btn-show\")\ndocument[\"btn-show\"].bind(\"click\", show)\nfrom random import randint\n\nn = 100\nx = [randint(0,800) for i in range(n)]\ny = [randint(0,600) for i in range(n)]\nr = [randint(25,50) for i in range(n)]\nred = [randint(0,255) for i in range(n)]\ngreen = [randint(0,255) for i in range(n)]\nblue = [randint(0,255) for i in range(n)]\n%%brython -c other_d3 -i x y r red green blue\nfrom browser import window, document, html\n\nd3 = window.d3\n\nWIDTH = 800\nHEIGHT = 600\n\ncontainer = d3.select(\"#other_d3\")\nsvg = container.append(\"svg\").attr(\"width\", WIDTH).attr(\"height\", HEIGHT)\n\nclass AddShapes:\n def __init__(self, x, y, r, red, green, blue, shape = \"circle\", interactive = True):\n self.shape = shape\n self.interactive = interactive\n self._color = \"gray\"\n self.add(x, y, r, red, green, blue)\n\n def over(self, ev):\n self._color = ev.target.style.fill\n document[ev.target.id].style.fill = \"white\"\n \n def out(self, ev):\n document[ev.target.id].style.fill = self._color\n \n def add(self, x, y, r, red, green, blue):\n for i in range(len(x)):\n self.idx = self.shape + '_' + str(i) \n self._color = \"rgb(%s,%s,%s)\" % (red[i], green[i], blue[i])\n shaped = svg.append(self.shape).style(\"stroke\", \"gray\").style(\"fill\", self._color).attr(\"r\", r[i])\n shaped.attr(\"cx\", x[i]).attr(\"cy\", y[i]).attr(\"id\", self.idx)\n if self.interactive:\n document[self.idx].bind(\"mouseover\", self.over)\n document[self.idx].bind(\"mouseout\", self.out)\n\nplot = AddShapes(x, y, r, red, green, blue, interactive = True)\n%%HTML\n<script type=\"text/javascript\" src=\"https://cdnjs.cloudflare.com/ajax/libs/openlayers/2.13.1/OpenLayers.js\"></script>\n%%brython -c ol_map\n# we need to get map png in SSL\n# take a look at http://gis.stackexchange.com/questions/83953/openlayer-maps-issue-with-ssl\nfrom browser import document, window\nfrom javascript import JSConstructor, JSObject\n\n## Div layout\ndocument['ol_map'].style.width = \"800px\"\ndocument['ol_map'].style.height = \"400px\"\ndocument['ol_map'].style.border = \"1px solid black\"\n\nOpenLayers = window.OpenLayers\n\n## Map\n_map = JSConstructor(OpenLayers.Map)('ol_map')\n\n## Addition of a OpenStreetMap layer\n_layer = JSConstructor(OpenLayers.Layer.OSM)( 'Simple OSM map')\n_map.addLayer(_layer)\n\n## Map centered on Lon, Lat = (-3.671416, 40.435897) and a zoom = 14\n## with a projection = \"EPSG:4326\" (Lat-Lon WGS84)\n_proj = JSConstructor(OpenLayers.Projection)(\"EPSG:4326\")\n_center = JSConstructor(OpenLayers.LonLat)(-3.671416, 40.435897)\n_center.transform(_proj, _map.getProjectionObject())\n_map.setCenter(_center, 10)\n\n## Addition of some points around the defined location\nlons = [-3.670, -3.671, -3.672, -3.672, -3.672,\n -3.671, -3.670, -3.670]\nlats = [40.435, 40.435, 40.435, 40.436, 40.437,\n 40.437, 40.437, 40.436]\n\nsite_points = []\nsite_style = {}\n\npoints_layer = JSConstructor(OpenLayers.Layer.Vector)(\"Point Layer\")\n_map.addLayer(points_layer)\n\nfor lon, lat in zip(lons, lats):\n point = JSConstructor(OpenLayers.Geometry.Point)(lon, lat)\n point.transform(_proj, _map.getProjectionObject())\n _feat = JSConstructor(OpenLayers.Feature.Vector)(point)\n points_layer.addFeatures(_feat)\n%%brython -s styling\nfrom browser import doc, html\n\n# Changing the background color\nbody = doc[html.BODY][0]\nbody.style = {\"backgroundColor\": \"#99EEFF\"}\n \n# Changing the color of the imput prompt\ninps = body.get(selector = \".input_prompt\")\nfor inp in inps:\n inp.style = {\"color\": \"blue\"}\n \n# Changin the color of the output cells\nouts = body.get(selector = \".output_wrapper\")\nfor out in outs:\n out.style = {\"backgroundColor\": \"#E0E0E0\"}\n \n# Changing the font of the text cells\ntext_cells = body.get(selector = \".text_cell\")\nfor cell in text_cells:\n cell.style = {\"fontFamily\": \"\"\"\"Courier New\", Courier, monospace\"\"\",\n \"fontSize\": \"20px\"}\n \n# Changing the color of the code cells.\ncode_cells = body.get(selector = \".CodeMirror\")\nfor cell in code_cells:\n cell.style = {\"backgroundColor\": \"#D0D0D0\"}\n```\n\n" | github_jupyter |
```
%pylab inline
import numpy as np
import matplotlib.pyplot as plt
# PyTorch imports
import torch
# This has neural network layer primitives that you can use to build things quickly
import torch.nn as nn
# This has things like activation functions and other useful nonlinearities
from torch.nn import functional as F
# This has various gradient descent algorithms
import torch.optim
# In order to take derivatives, we have to wrap things as a Variable or a Parameter.
# Variables are things like inputs to the model
# Parameters are things like weights
# If you make a child class of nn.Module, it automatically keeps tracks of all parameters declared during
# __init__ for you - really handy!
from torch.autograd import Variable
from torch.nn import Parameter
from IPython import display
import time
```
## Generative Adversarial Networks
Generative adversarial networks (GANs) are a method to learn to produce samples from high-dimensional distributions based only on a set of samples from that distribution. The basic idea is that you have two networks which are competing with eachother on a shared game. One network (the Generator) must create samples from the target distribution, while the other network (the Discriminator) must correctly predict whether a given sample came from the Generator or from the actual data set.
For this game, the Nash equilibrium is for the Generator to produce samples exactly according to the probability density of the data distribution, and for the Discriminator to return the probability density of a given input sample. So a trained GAN in principle gives you both a way to sample from a distribution as well as a way to evaluate the local probability density around a sample.
In practice, the Generator and Discriminator may not converge to the Nash equilibrium, but will often oscillate around it, overspecialize to sub-regions of the distribution ('mode collapse'), etc. As such, there are a large family of algorithms designed to improve the convergence properties of the basic setup.
In this example, we'll just implement a basic GAN to reproduce some 2d distributions (so that the quality of the reconstruction can be easily checked).
```
# Some utility functions
def toFloatVar(x):
return Variable(torch.FloatTensor(x), requires_grad=False)
def toLongVar(x):
return Variable(torch.LongTensor(x), requires_grad=False)
```
## Generator network
First we'll specify the Generator. This network needs to produce a distribution of outcomes, not just an input-output relationship or single output, so we need to provide it a source of noise that it will transform into the target distribution. In essence, the Generator implements a transform from one probability distribution $p(z)$ to a target distribution (in a different set of variables) $q(x)$ - one sample at a time.
So basically the procedure is, we sample a random $z$ from $p(z)$ (which will just be a high-dimensional Gaussian), then apply the network to get $x = G(z)$.
```
class Generator(nn.Module):
def __init__(self, noiseDimension = 16, hiddenDimension = 64, targetDimension = 2):
super(Generator,self).__init__()
self.layer1 = nn.Linear(noiseDimension, hiddenDimension)
self.layer2 = nn.Linear(hiddenDimension, hiddenDimension)
self.layer3 = nn.Linear(hiddenDimension, hiddenDimension)
self.layer4 = nn.Linear(hiddenDimension, targetDimension)
self.noiseDimension = noiseDimension
# Each network will have its own optimizer, so we can train them at cross purposes to each-other
self.optimizer = torch.optim.Adam(self.parameters(), lr = 1e-3)
# For forward, we want to get samples based on specific values of the noise input
def forward(self, x):
z = F.relu(self.layer1(x))
z = F.relu(self.layer2(z))
z = F.relu(self.layer3(z))
z = self.layer4(z)
return z
# For convenience, lets also make a function that generates a batch of random samples
def sample(self, N=100):
z = toFloatVar(np.random.randn(N, self.noiseDimension))
return self.forward(z)
```
## Discriminator Network
The Discriminator network takes a sample either from the true dataset or from fakes made by the Generator, and should return a probability that the sample is real or fake.
```
class Discriminator(nn.Module):
def __init__(self, hiddenDimension = 64, targetDimension = 2):
super(Discriminator,self).__init__()
self.layer1 = nn.Linear(targetDimension, hiddenDimension)
self.layer2 = nn.Linear(hiddenDimension, hiddenDimension)
self.layer3 = nn.Linear(hiddenDimension, hiddenDimension)
self.layer4 = nn.Linear(hiddenDimension, 1)
# Each network will have its own optimizer, so we can train them at cross purposes to each-other
self.optimizer = torch.optim.Adam(self.parameters(), lr = 1e-3)
def forward(self, x):
z = F.relu(self.layer1(x))
z = F.relu(self.layer2(z))
z = F.relu(self.layer3(z))
# Clamp for numerical stability
z = torch.clamp( F.sigmoid(self.layer4(z)), 1e-6, 1-1e-6)
return z
```
## Training
The training procedure involves two steps: training the Discriminator and training the Generator. We'll do these separately for clarity, despite that introducing a bit of redundancy.
Training the discriminator:
- Form a batch which contains 50% samples from true distribution and 50% samples from the generator
- If $D()$ is the output of the discriminator and $x$ the true data, minimize the logistic loss: $L = -\log(D(x)) - \log(1-D(G(z)))$
- Update the discriminator weights only
Training the generator:
- Form a batch containing 100% samples from the generator
- Apply the discriminator to get $D(G(z))$
- Update the generator to maximize the discriminator's loss: $L = \log(1-D(G(z)))$.
```
def trainDiscriminator(data, generator, discriminator):
fakes = generator.sample(N=data.shape[0])
# Zero the discriminator gradient
discriminator.zero_grad()
# Get the fake batch and true batch
p_fakes = discriminator.forward(fakes)
p_true = discriminator.forward(data)
# Compute the loss
loss = torch.mean(-torch.log(p_true)) + torch.mean(-torch.log(1-p_fakes))
# Update the discriminator weights only
loss.backward()
discriminator.optimizer.step()
# Get the loss to follow training progress
return loss.data.numpy().copy()
# Training the generator doesn't require access to the dataset
# Careful though - training to completion on a fixed discriminator leads to mode collapse
# We have to train them together dynamically
def trainGenerator(generator, discriminator):
# Zero generator gradient
generator.zero_grad()
fakes = generator.sample(N=250)
p_fakes = discriminator.forward(fakes)
# Get the generator loss
loss = torch.mean(torch.log(1-p_fakes))
# Update generator weights
loss.backward()
generator.optimizer.step()
# Track generator loss for training
return loss.data.numpy().copy()
```
## Data distribution
We'll learn a simple bimodal distribution to test the GAN
```
def generateData(N):
# Generate which mode we're in
x = np.random.randint(2,size=(N,1))
# Generate Gaussian fluctuations around the mode
z = np.random.randn(N,2)*0.5
# Centers of the two modes
centers = np.array([[-1.5,0.5], [0.6, 1.3]])
return centers[x[:,0]] + z
data = generateData(250)
plt.scatter(data[:,0],data[:,1])
plt.show()
```
## Training the GAN
```
generator = Generator()
discriminator = Discriminator()
gen_loss = []
disc_loss = []
for epoch in range(1000):
# It's often better for the discriminator to be slightly better than the generator for stability
# So we'll use two steps here
dl = trainDiscriminator(toFloatVar(data), generator, discriminator)
dl = trainDiscriminator(toFloatVar(data), generator, discriminator)
gl = trainGenerator(generator, discriminator)
gen_loss.append(gl)
disc_loss.append(dl)
if epoch%5 == 0:
samples = generator.sample(N=250)
plt.clf()
plt.subplot(1,2,1)
plt.title("Generated Distribution")
plt.scatter(data[:,0],data[:,1])
plt.scatter(samples[:,0],samples[:,1])
plt.xlim(-4,2.5)
plt.ylim(-1.5,4)
plt.subplot(1,2,2)
plt.title("Training Loss")
plt.plot(disc_loss,label="Discriminator")
plt.plot(gen_loss,label="Generator")
plt.legend()
plt.gcf().set_size_inches((12,6))
display.clear_output(wait=True)
display.display(plt.gcf())
time.sleep(0.01)
```
| github_jupyter |
## Include the script for your app below. Be sure to include the instructions!
```
import os
import ee
import geemap
import ipywidgets as widgets
from bqplot import pyplot as plt
from ipyleaflet import WidgetControl
ee.Authenticate()
ee.Initialize()
# Create an interactive map
Map = geemap.Map(center=[40, -100], zoom=4, add_google_map=False)
Map.add_basemap('HYBRID')
Map.add_basemap('ROADMAP')
# Add Earth Engine data
fc = ee.FeatureCollection('TIGER/2018/Counties')
Map.addLayer(fc, {}, 'US Counties')
states = ee.FeatureCollection('TIGER/2018/States')
Map.addLayer(states, {}, 'US States')
Map
# Designe interactive widgets
style = {'description_width': 'initial'}
output_widget = widgets.Output(layout={'border': '1px solid black'})
output_control = WidgetControl(widget=output_widget, position='bottomright')
Map.add_control(output_control)
admin1_widget = widgets.Text(
description='State:',
value='Tennessee',
width=200,
style=style
)
admin2_widget = widgets.Text(
description='County:',
value='Knox',
width=300,
style=style
)
aoi_widget = widgets.Checkbox(
value=False,
description='Use user-drawn AOI',
style=style
)
download_widget = widgets.Checkbox(
value=False,
description='Download chart data',
style=style
)
def aoi_change(change):
Map.layers = Map.layers[:4]
Map.user_roi = None
Map.user_rois = None
Map.draw_count = 0
admin1_widget.value = ''
admin2_widget.value = ''
output_widget.clear_output()
aoi_widget.observe(aoi_change, names='value')
band_combo = widgets.Dropdown(
description='Band combo:',
options=['Red/Green/Blue', 'NIR/Red/Green', 'SWIR2/SWIR1/NIR', 'NIR/SWIR1/Red','SWIR2/NIR/Red',
'SWIR2/SWIR1/Red', 'SWIR1/NIR/Blue', 'NIR/SWIR1/Blue', 'SWIR2/NIR/Green', 'SWIR1/NIR/Red'],
value='NIR/Red/Green',
style=style
)
year_widget = widgets.IntSlider(min=1984, max=2020, value=2010, description='Selected year:', width=400, style=style)
fmask_widget = widgets.Checkbox(
value=True,
description='Apply fmask(remove cloud, shadow, snow)',
style=style
)
# Normalized Satellite Indices: https://www.usna.edu/Users/oceano/pguth/md_help/html/norm_sat.htm
nd_options = ['Vegetation Index (NDVI)',
'Water Index (NDWI)',
'Modified Water Index (MNDWI)',
'Snow Index (NDSI)',
'Soil Index (NDSI)',
'Burn Ratio (NBR)',
'Customized']
nd_indices = widgets.Dropdown(options=nd_options, value='Modified Water Index (MNDWI)', description='Normalized Difference Indes:', style=style)
first_band = widgets.Dropdown(
description='1st band:',
options=['Blue', 'Green','Red','NIR', 'SWIR1', 'SWIR2'],
value='Green',
style=style
)
second_band = widgets.Dropdown(
description='2nd band:',
options=['Blue', 'Green','Red','NIR', 'SWIR1', 'SWIR2'],
value='SWIR1',
style=style
)
nd_threshold = widgets.FloatSlider(
value=0,
min=-1,
max=1,
step=0.01,
description='Threshold:',
orientation='horizontal',
style=style
)
nd_color = widgets.ColorPicker(
concise=False,
description='Color:',
value='blue',
style=style
)
def nd_index_change(change):
if nd_indices.value == 'Vegetation Index (NDVI)':
first_band.value = 'NIR'
second_band.value = 'Red'
elif nd_indices.value == 'Water Index (NDWI)':
first_band.value = 'NIR'
second_band.value = 'SWIR1'
elif nd_indices.value == 'Modified Water Index (MNDWI)':
first_band.value = 'Green'
second_band.value = 'SWIR1'
elif nd_indices.value == 'Snow Index (NDSI)':
first_band.value = 'Green'
second_band.value = 'SWIR1'
elif nd_indices.value == 'Soil Index (NDSI)':
first_band.value = 'SWIR1'
second_band.value = 'NIR'
elif nd_indices.value == 'Burn Ratio (NBR)':
first_band.value = 'NIR'
second_band.value = 'SWIR2'
elif nd_indices.value == 'Customized':
first_band.value = None
second_band.value = None
nd_indices.observe(nd_index_change, names='value')
submit = widgets.Button(
description='Submit',
button_style='primary',
tooltip='Click me',
style=style
)
full_widget = widgets.VBox([
widgets.HBox([admin1_widget, admin2_widget, aoi_widget, download_widget]),
widgets.HBox([band_combo, year_widget, fmask_widget]),
widgets.HBox([nd_indices, first_band, second_band, nd_threshold, nd_color]),
submit
])
full_widget
# Capture user interaction with the map
def handle_interaction(**kwargs):
latlon = kwargs.get('coordinates')
if kwargs.get('type') == 'click' and not aoi_widget.value:
Map.default_style = {'cursor': 'wait'}
xy = ee.Geometry.Point(latlon[::-1])
selected_fc = fc.filterBounds(xy)
with output_widget:
output_widget.clear_output()
try:
feature = selected_fc.first()
admin2_id = feature.get('NAME').getInfo()
statefp = feature.get('STATEFP')
admin1_fc = ee.Feature(states.filter(ee.Filter.eq('STATEFP', statefp)).first())
admin1_id = admin1_fc.get('NAME').getInfo()
admin1_widget.value = admin1_id
admin2_widget.value = admin2_id
Map.layers = Map.layers[:4]
geom = selected_fc.geometry()
layer_name = admin1_id + '-' + admin2_id
Map.addLayer(ee.Image().paint(geom, 0, 2), {'palette': 'red'}, layer_name)
print(layer_name)
except:
print('No feature could be found')
Map.layers = Map.layers[:4]
Map.default_style = {'cursor': 'pointer'}
else:
Map.draw_count = 0
Map.on_interaction(handle_interaction)
# Click event handler
def submit_clicked(b):
with output_widget:
output_widget.clear_output()
print('Computing...')
Map.default_style = {'cursor': 'wait'}
try:
admin1_id = admin1_widget.value
admin2_id = admin2_widget.value
band1 = first_band.value
band2 = second_band.value
selected_year = year_widget.value
threshold = nd_threshold.value
bands = band_combo.value.split('/')
apply_fmask = fmask_widget.value
palette = nd_color.value
use_aoi = aoi_widget.value
download = download_widget.value
if use_aoi:
if Map.user_roi is not None:
roi = Map.user_roi
layer_name = 'User drawn AOI'
geom = roi
else:
output_widget.clear_output()
print('No user AOI could be found.')
return
else:
statefp = ee.Feature(states.filter(ee.Filter.eq('NAME', admin1_id)).first()).get('STATEFP')
roi = fc.filter(ee.Filter.And(ee.Filter.eq('NAME', admin2_id), ee.Filter.eq('STATEFP', statefp)))
layer_name = admin1_id + '-' + admin2_id
geom = roi.geometry()
Map.layers = Map.layers[:4]
Map.addLayer(ee.Image().paint(geom, 0, 2), {'palette': 'red'}, layer_name)
images = geemap.landsat_timeseries(roi=roi, start_year=1984, end_year=2020, start_date='01-01', end_date='12-31', apply_fmask=apply_fmask)
nd_images = images.map(lambda img: img.normalizedDifference([band1, band2]))
result_images = nd_images.map(lambda img: img.gt(threshold))
selected_image = ee.Image(images.toList(images.size()).get(selected_year - 1984))
selected_result_image = ee.Image(result_images.toList(result_images.size()).get(selected_year - 1984)).selfMask()
vis_params = {
'bands': bands,
'min': 0,
'max': 3000
}
Map.addLayer(selected_image, vis_params, 'Landsat ' + str(selected_year))
Map.addLayer(selected_result_image, {'palette': palette}, 'Result ' + str(selected_year))
def cal_area(img):
pixel_area = img.multiply(ee.Image.pixelArea()).divide(1e4)
img_area = pixel_area.reduceRegion(**{
'geometry': geom,
'reducer': ee.Reducer.sum(),
'scale': 1000,
'maxPixels': 1e12,
'bestEffort': True
})
return img.set({'area': img_area})
areas = result_images.map(cal_area)
stats = areas.aggregate_array('area').getInfo()
x = list(range(1984, 2021))
y = [item.get('nd') for item in stats]
fig = plt.figure(1)
fig.layout.height = '270px'
plt.clear()
plt.plot(x, y)
plt.title('Temporal trend (1984-2020)')
plt.xlabel('Year')
plt.ylabel('Area (ha)')
output_widget.clear_output()
plt.show()
if download:
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
out_name = 'chart_' + geemap.random_string() + '.csv'
out_csv = os.path.join(out_dir, out_name)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(out_csv, 'w') as f:
f.write('year, area (ha)\n')
for index, item in enumerate(x):
line = '{},{:.2f}\n'.format(item, y[index])
f.write(line)
link = geemap.create_download_link(
out_csv, title="Click here to download the chart data: ")
display(link)
except Exception as e:
print(e)
print('An error occurred during computation.')
Map.default_style = {'cursor': 'default'}
submit.on_click(submit_clicked)
```
| github_jupyter |
[Index](Index.ipynb) - [Back](Widget Basics.ipynb) - [Next](Output Widget.ipynb)
# Widget List
```
import ipywidgets as widgets
```
## Numeric widgets
There are many widgets distributed with IPython that are designed to display numeric values. Widgets exist for displaying integers and floats, both bounded and unbounded. The integer widgets share a similar naming scheme to their floating point counterparts. By replacing `Float` with `Int` in the widget name, you can find the Integer equivalent.
### IntSlider
```
widgets.IntSlider(
value=7,
min=0,
max=10,
step=1,
description='Test:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
```
### FloatSlider
```
widgets.FloatSlider(
value=7.5,
min=0,
max=10.0,
step=0.1,
description='Test:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
```
Sliders can also be **displayed vertically**.
```
widgets.FloatSlider(
value=7.5,
min=0,
max=10.0,
step=0.1,
description='Test:',
disabled=False,
continuous_update=False,
orientation='vertical',
readout=True,
readout_format='.1f',
)
```
### FloatLogSlider
The `FloatLogSlider` has a log scale, which makes it easy to have a slider that covers a wide range of positive magnitudes. The `min` and `max` refer to the minimum and maximum exponents of the base, and the `value` refers to the actual value of the slider.
```
widgets.FloatLogSlider(
value=10,
base=10,
min=-10, # max exponent of base
max=10, # min exponent of base
step=0.2, # exponent step
description='Log Slider'
)
```
### IntRangeSlider
```
widgets.IntRangeSlider(
value=[5, 7],
min=0,
max=10,
step=1,
description='Test:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
)
```
### FloatRangeSlider
```
widgets.FloatRangeSlider(
value=[5, 7.5],
min=0,
max=10.0,
step=0.1,
description='Test:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
```
### IntProgress
```
widgets.IntProgress(
value=7,
min=0,
max=10,
step=1,
description='Loading:',
bar_style='', # 'success', 'info', 'warning', 'danger' or ''
orientation='horizontal'
)
```
### FloatProgress
```
widgets.FloatProgress(
value=7.5,
min=0,
max=10.0,
step=0.1,
description='Loading:',
bar_style='info',
orientation='horizontal'
)
```
The numerical text boxes that impose some limit on the data (range, integer-only) impose that restriction when the user presses enter.
### BoundedIntText
```
widgets.BoundedIntText(
value=7,
min=0,
max=10,
step=1,
description='Text:',
disabled=False
)
```
### BoundedFloatText
```
widgets.BoundedFloatText(
value=7.5,
min=0,
max=10.0,
step=0.1,
description='Text:',
disabled=False
)
```
### IntText
```
widgets.IntText(
value=7,
description='Any:',
disabled=False
)
```
### FloatText
```
widgets.FloatText(
value=7.5,
description='Any:',
disabled=False
)
```
## Boolean widgets
There are three widgets that are designed to display a boolean value.
### ToggleButton
```
widgets.ToggleButton(
value=False,
description='Click me',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check'
)
```
### Checkbox
```
widgets.Checkbox(
value=False,
description='Check me',
disabled=False
)
```
### Valid
The valid widget provides a read-only indicator.
```
widgets.Valid(
value=False,
description='Valid!',
)
```
## Selection widgets
There are several widgets that can be used to display single selection lists, and two that can be used to select multiple values. All inherit from the same base class. You can specify the **enumeration of selectable options by passing a list** (options are either (label, value) pairs, or simply values for which the labels are derived by calling `str`). You can **also specify the enumeration as a dictionary**, in which case the **keys will be used as the item displayed** in the list and the corresponding **value will be used** when an item is selected (in this case, since dictionaries are unordered, the displayed order of items in the widget is unspecified).
### Dropdown
```
widgets.Dropdown(
options=['1', '2', '3'],
value='2',
description='Number:',
disabled=False,
)
```
The following is also valid:
```
widgets.Dropdown(
options={'One': 1, 'Two': 2, 'Three': 3},
value=2,
description='Number:',
)
```
### RadioButtons
```
widgets.RadioButtons(
options=['pepperoni', 'pineapple', 'anchovies'],
# value='pineapple',
description='Pizza topping:',
disabled=False
)
```
### Select
```
widgets.Select(
options=['Linux', 'Windows', 'OSX'],
value='OSX',
# rows=10,
description='OS:',
disabled=False
)
```
### SelectionSlider
```
widgets.SelectionSlider(
options=['scrambled', 'sunny side up', 'poached', 'over easy'],
value='sunny side up',
description='I like my eggs ...',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True
)
```
### SelectionRangeSlider
The value, index, and label keys are 2-tuples of the min and max values selected. The options must be nonempty.
```
import datetime
dates = [datetime.date(2015,i,1) for i in range(1,13)]
options = [(i.strftime('%b'), i) for i in dates]
widgets.SelectionRangeSlider(
options=options,
index=(0,11),
description='Months (2015)',
disabled=False
)
```
### ToggleButtons
```
widgets.ToggleButtons(
options=['Slow', 'Regular', 'Fast'],
description='Speed:',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Description of slow', 'Description of regular', 'Description of fast'],
# icons=['check'] * 3
)
```
### SelectMultiple
Multiple values can be selected with <kbd>shift</kbd> and/or <kbd>ctrl</kbd> (or <kbd>command</kbd>) pressed and mouse clicks or arrow keys.
```
widgets.SelectMultiple(
options=['Apples', 'Oranges', 'Pears'],
value=['Oranges'],
#rows=10,
description='Fruits',
disabled=False
)
```
## String widgets
There are several widgets that can be used to display a string value. The `Text` and `Textarea` widgets accept input. The `HTML` and `HTMLMath` widgets display a string as HTML (`HTMLMath` also renders math). The `Label` widget can be used to construct a custom control label.
### Text
```
widgets.Text(
value='Hello World',
placeholder='Type something',
description='String:',
disabled=False
)
```
### Textarea
```
widgets.Textarea(
value='Hello World',
placeholder='Type something',
description='String:',
disabled=False
)
```
### Label
The `Label` widget is useful if you need to build a custom description next to a control using similar styling to the built-in control descriptions.
```
widgets.HBox([widgets.Label(value="The $m$ in $E=mc^2$:"), widgets.FloatSlider()])
```
### HTML
```
widgets.HTML(
value="Hello <b>World</b>",
placeholder='Some HTML',
description='Some HTML',
)
```
### HTML Math
```
widgets.HTMLMath(
value=r"Some math and <i>HTML</i>: \(x^2\) and $$\frac{x+1}{x-1}$$",
placeholder='Some HTML',
description='Some HTML',
)
```
## Image
```
file = open("images/WidgetArch.png", "rb")
image = file.read()
widgets.Image(
value=image,
format='png',
width=300,
height=400,
)
```
## Button
```
widgets.Button(
description='Click me',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
icon='check'
)
```
## Output
The `Output` widget can capture and display stdout, stderr and [rich output generated by IPython](http://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html#module-IPython.display). For detailed documentation, see the [output widget examples](/examples/Output Widget.html).
## Play (Animation) widget
The `Play` widget is useful to perform animations by iterating on a sequence of integers with a certain speed. The value of the slider below is linked to the player.
```
play = widgets.Play(
# interval=10,
value=50,
min=0,
max=100,
step=1,
description="Press play",
disabled=False
)
slider = widgets.IntSlider()
widgets.jslink((play, 'value'), (slider, 'value'))
widgets.HBox([play, slider])
```
## Date picker
The date picker widget works in Chrome and IE Edge, but does not currently work in Firefox or Safari because they do not support the HTML date input field.
```
widgets.DatePicker(
description='Pick a Date',
disabled=False
)
```
## Color picker
```
widgets.ColorPicker(
concise=False,
description='Pick a color',
value='blue',
disabled=False
)
```
## Controller
The `Controller` allows a game controller to be used as an input device.
```
widgets.Controller(
index=0,
)
```
## Container/Layout widgets
These widgets are used to hold other widgets, called children. Each has a `children` property that may be set either when the widget is created or later.
### Box
```
items = [widgets.Label(str(i)) for i in range(4)]
widgets.Box(items)
```
### HBox
```
items = [widgets.Label(str(i)) for i in range(4)]
widgets.HBox(items)
```
### VBox
```
items = [widgets.Label(str(i)) for i in range(4)]
left_box = widgets.VBox([items[0], items[1]])
right_box = widgets.VBox([items[2], items[3]])
widgets.HBox([left_box, right_box])
```
### Accordion
```
accordion = widgets.Accordion(children=[widgets.IntSlider(), widgets.Text()])
accordion.set_title(0, 'Slider')
accordion.set_title(1, 'Text')
accordion
```
### Tabs
In this example the children are set after the tab is created. Titles for the tabes are set in the same way they are for `Accordion`.
```
tab_contents = ['P0', 'P1', 'P2', 'P3', 'P4']
children = [widgets.Text(description=name) for name in tab_contents]
tab = widgets.Tab()
tab.children = children
for i in range(len(children)):
tab.set_title(i, str(i))
tab
```
### Accordion and Tab use `selected_index`, not value
Unlike the rest of the widgets discussed earlier, the container widgets `Accordion` and `Tab` update their `selected_index` attribute when the user changes which accordion or tab is selected. That means that you can both see what the user is doing *and* programmatically set what the user sees by setting the value of `selected_index`.
Setting `selected_index = None` closes all of the accordions or deselects all tabs.
In the cells below try displaying or setting the `selected_index` of the `tab` and/or `accordion`.
```
tab.selected_index = 3
accordion.selected_index = None
```
### Nesting tabs and accordions
Tabs and accordions can be nested as deeply as you want. If you have a few minutes, try nesting a few accordions or putting an accordion inside a tab or a tab inside an accordion.
The example below makes a couple of tabs with an accordion children in one of them
```
tab_nest = widgets.Tab()
tab_nest.children = [accordion, accordion]
tab_nest.set_title(0, 'An accordion')
tab_nest.set_title(1, 'Copy of the accordion')
tab_nest
```
[Index](Index.ipynb) - [Back](Widget Basics.ipynb) - [Next](Output Widget.ipynb)
| github_jupyter |
# Advanced Tutorial: Creating Gold Annotation Labels with BRAT
This is a short tutorial on how to use BRAT (Brat Rapid Annotation Tool), an
online environment for collaborative text annotation.
http://brat.nlplab.org/
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os
# TO USE A DATABASE OTHER THAN SQLITE, USE THIS LINE
# Note that this is necessary for parallel execution amongst other things...
# os.environ['SNORKELDB'] = 'postgres:///snorkel-intro'
from snorkel import SnorkelSession
session = SnorkelSession()
```
## Step 1: Define a `Candidate` Type
We repeat our definition of the `Spouse` `Candidate` subclass from Parts II and III.
```
from snorkel.models import candidate_subclass, Document, Candidate
Spouse = candidate_subclass('Spouse', ['person1', 'person2'])
```
### a) Select an example `Candidate` and `Document`
Candidates are divided into 3 splits, each mapped to a unique integer id:
- 0: _training_
- 1: _development_
- 2: _testing_
In this tutorial, we'll load our training set candidates and create gold labels for a document using the BRAT interface
## Step 2: Launching BRAT
BRAT runs as as seperate server application. Snorkel will automatically download and configure a BRAT instance for you. When you first initialize this server, you need to provide your applications `Candidate` type. For this tutorial, we use the `Spouse` relation defined above, which consists of a pair of `PERSON` named entities connected by marriage.
Currently, we only support 1 relation type per-application.
```
from snorkel.contrib.brat import BratAnnotator
brat = BratAnnotator(session, Spouse, encoding='utf-8')
```
### a) Initialize our document collection
BRAT creates a local copy of all the documents and annotations found in a `split` set. We initialize a document collection by defining a unique set name, _spouse/train_, and then passing in our training set candidates via the `split` id. Annotations are stored as plain text files in [standoff](http://brat.nlplab.org/standoff.html) format.
<img align="left" src="imgs/brat-login.jpg" width="200px" style="margin-right:50px">
After launching the BRAT annotator for the first time, you will need to login to begin editing annotations. Navigate your mouse to the upper right-hand corner of the BRAT interface (see Fig. 1) click 'login' and enter the following information:
- **login**: _brat_
- **password**: _brat_
Advanced BRAT users can setup multiple annotator accounts by adding USER/PASSWORD key pairs to the `USER_PASSWORD` dictionary found in `snokel/contrib/brat/brat-v1.3_Crunchy_Frog/config.py`. This is useful if you would like to keep track of multiple annotator judgements for later adjudication or use as labeling functions as per our tutorial on using [Snorkel for Crowdsourcing](https://github.com/HazyResearch/snorkel/blob/master/tutorials/crowdsourcing/Crowdsourced_Sentiment_Analysis.ipynb).
```
brat.init_collection("spouse/train", split=0)
```
We've already generated some BRAT annotations for you, so let's import an existing collection for purposes of this tutorial.
```
brat.import_collection("data/brat_spouse.zip", overwrite=True)
```
### b) Launch BRAT Interface in a New Window
Once our collection is initialized, we can view specific documents for annotation. The default mode is to generate a HTML link to a new BRAT browser window. Click this link to connect to launch the annotator editor.
Optionally, you can launch BRAT in an embedded window by calling:
brat.view("spouse/train", doc, new_window=False)
```
doc_name = '5ede8912-59c9-4ba9-93df-c58cebb542b7'
doc = session.query(Document).filter(Document.name==doc_name).one()
brat.view("spouse/train", doc)
```
If you do not have a specific document to edit, you can optionally launch BRAT and use their file browser to navigate through all files found in the target collection.
```
brat.view("spouse/train")
```
## Step 3: Creating Gold Label Annotations
### a) Annotating Named Entities
`Spouse` relations consist of 2 `PERSON` named entities. When annotating our validation documents,
the first task is to identify our target entities. In this tutorial, we will annotate all `PERSON`
mentions found in our example document, though for your application you may choose to only label
those that particpate in a true relation.
<img align="right" src="imgs/brat-anno-dialog.jpg" width="400px" style="margin-left:50px">
Begin by selecting and highlighting the text corresponding to a `PERSON` entity. Once highlighted, an annotation dialog will appear on your screen (see image of the BRAT Annotation Dialog Window to the right). If this is correct, click ok. Repeat this for every entity you find in the document.
**Annotation Guidelines**
When developing gold label annotations, you should always discuss and agree on a set of _annotator guidelines_ to share with human labelers. These are the guidelines we used to label the `Spouse` relation:
- **<span style="color:red">Do not</span>** include formal titles associated with professional roles e.g., _**Pastor** Jeff_, _**Prime Minister** Prayut Chan-O-Cha_
- Do include English honorifics unrelated to a professional role, e.g., _**Mr.** John Cleese_.
- **<span style="color:red">Do not</span>** include family names/surnames that do not reference a single individual, e.g., _the Duggar family_.
- Do include informal titles, stage names, fictional characters, and nicknames, e.g., _**Dog the Bounty Hunter**_
- Include possessive's, e.g., _Anna**'s**_.
### b) Annotating Relations
To annotate `Spouse` relations, we look through all pairs of `PERSON` entities found within a single sentence. BRAT identifies the bounds of each sentence and renders a numbered row in the annotation window (see the left-most column in the image below).
<img align="right" src="imgs/brat-relation.jpg" width="500px" style="margin-left:50px">
Annotating relations is done through simple drag and drop. Begin by clicking and holding on a single `PERSON` entity and then drag that entity to its corresponding spouse entity. That is it!
**Annotation Guidelines**
- Restrict `PERSON` pairs to those found in the same sentence.
- The order of `PERSON` arguments does not matter in this application.
- **<span style="color:red">Do not</span>** include relations where a `PERSON` argument is wrong or otherwise incomplete.
## Step 4: Scoring Models using BRAT Labels
### a) Evaluating System Recall
Creating gold validation data with BRAT is a critical evaluation step because it allows us to compute an estimate of our model's _true recall_. When we create labeled data over a candidate set created by Snorkel, we miss mentions of relations that our candidate extraction step misses. This causes us to overestimate the system's true recall.
In the code below, we show how to map BRAT annotations to an existing set of Snorkel candidates and compute some associated metrics.
```
train_cands = session.query(Spouse).filter(Spouse.split == 0).order_by(Spouse.id).all()
```
### b) Mapping BRAT Annotations to Snorkel Candidates
We annotated a single document using BRAT to illustrate the difference in scores when we factor in the effects of candidate generation.
```
%time brat.import_gold_labels(session, "spouse/train", train_cands)
```
Our candidate extractor only captures 7/14 (50%) of true mentions in this document. Our real system's recall is likely even worse, since we won't correctly predict the label for all true candidates.
### c) Re-loading the Trained LSTM
We'll load the LSTM model we trained in [Workshop_4_Discriminative_Model_Training.ipynb](Workshop_4_Discriminative_Model_Training.ipynb) and use to to predict marginals for our test candidates.
```
test_cands = session.query(Spouse).filter(Spouse.split == 2).order_by(Spouse.id).all()
from snorkel.learning.disc_models.rnn import reRNN
lstm = reRNN(seed=1701, n_threads=None)
lstm.load("spouse.lstm")
marginals = lstm.marginals(test_cands)
```
### d) Create a Subset of Test for Evaluation
Our measures assume BRAT annotations are complete for the given set of documents! Rather than manually annotating the entire test set, we define a small subset of 10 test documents for hand lableing. We'll then compute the full, recall-corrected metrics for this subset.
First, let's build a query to initalize this candidate collection.
```
doc_ids = set(open("data/brat_test_docs.tsv","rb").read().splitlines())
cid_query = [c.id for c in test_cands if c.get_parent().document.name in doc_ids]
brat.init_collection("spouse/test-subset", cid_query=cid_query)
brat.view("spouse/test-subset")
```
### e) Comparing Unadjusted vs. Adjusted Scores
```
import matplotlib.pyplot as plt
plt.hist(marginals, bins=20)
plt.show()
from snorkel.annotations import load_gold_labels
L_gold_dev = load_gold_labels(session, annotator_name='gold', split=1, load_as_array=True, zero_one=True)
L_gold_test = load_gold_labels(session, annotator_name='gold', split=2, zero_one=True)
```
**Recall-uncorrected Score** If we don't account for candidates missed during extraction, our model score will overestimate real performance, as is the case for the model evaluation below.
```
brat.score(session, test_cands, marginals, "spouse/test-subset", recall_correction=False)
```
**Recall-corrected Score** Though this is a small sample of documents, we see how missing candidates can impact our real system score.
```
brat.score(session, test_cands, marginals, "spouse/test-subset")
```
This is the full model, evaluated on all our gold candidate labels.
```
tp, fp, tn, fn = lstm.error_analysis(session, test_cands, L_gold_test)
```
| github_jupyter |
```
import copy
import numpy as np
from dm_control import suite
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
def display_video(frames, framerate=30):
height, width, _ = frames[0].shape
dpi = 70
orig_backend = matplotlib.get_backend()
matplotlib.use('Agg') # Switch to headless 'Agg' to inhibit figure rendering.
fig, ax = plt.subplots(1, 1, figsize=(width / dpi, height / dpi), dpi=dpi)
matplotlib.use(orig_backend) # Switch back to the original backend.
ax.set_axis_off()
ax.set_aspect('equal')
ax.set_position([0, 0, 1, 1])
im = ax.imshow(frames[0])
def update(frame):
im.set_data(frame)
return [im]
interval = 1000/framerate
anim = animation.FuncAnimation(fig=fig, func=update, frames=frames,
interval=interval, blit=True, repeat=False)
return anim
random_state = np.random.RandomState(42)
env = suite.load("cartpole", "balance")
spec = env.action_spec()
duration = 4 # Seconds
frames = []
ticks = []
rewards = []
observations = []
while env.physics.data.time < duration:
action = random_state.uniform(spec.minimum, spec.maximum, spec.shape)
time_step = env.step(action)
camera0 = env.physics.render(camera_id=0, height=200, width=200)
camera1 = env.physics.render(camera_id=1, height=200, width=200)
frames.append(np.hstack((camera0, camera1)))
rewards.append(time_step.reward)
observations.append(copy.deepcopy(time_step.observation))
ticks.append(env.physics.data.time)
#print(env.physics.data.time, time_step)
anim = display_video(frames, framerate=1./env.control_timestep())
num_sensors = len(time_step.observation)
_, ax = plt.subplots(1 + num_sensors, 1, sharex=True, figsize=(4, 8))
ax[0].plot(ticks, rewards)
ax[0].set_ylabel('reward')
ax[-1].set_xlabel('time')
for i, key in enumerate(time_step.observation):
data = np.asarray([observations[j][key] for j in range(len(observations))])
ax[i+1].plot(ticks, data, label=key)
ax[i+1].set_ylabel(key)
writervideo = animation.FFMpegWriter(fps=1./env.control_timestep())
anim.save('cartpole.mp4', writer=writervideo)
plt.figure()
plt.imshow(camera1)
print(time_step)
print(np.concatenate(list(time_step.observation.values())))
from util.util import DMC2GymWrapper
env_dmc = suite.load("cartpole", "balance")
env_gym = DMC2GymWrapper(env_dmc, max_step=100)
obs = env_gym.reset()
for i in range(2000):
obs, r, done, info = env_gym.step(env_gym.action_space.sample())
print(env_gym._step, obs, r, done, info)
if done:
print("Done!")
break
```
| github_jupyter |
## Observations and Insights
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
from scipy.stats import sem
from scipy.stats import linregress
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
study_data_complete = pd.merge(study_results, mouse_metadata, how="left", on="Mouse ID")
# Display the data table for preview
study_data_complete.head()
# Checking the number of mice.
len(study_data_complete["Mouse ID"].unique())
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
duplicate_mouse_ids = study_data_complete.loc[study_data_complete.duplicated(subset=['Mouse ID', 'Timepoint']),'Mouse ID'].unique()
duplicate_mouse_ids
# Optional: Get all the data for the duplicate mouse ID.
duplicate_mouse_data = study_data_complete.loc[study_data_complete["Mouse ID"] == "g989"]
duplicate_mouse_data
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
clean_study_data = study_data_complete[study_data_complete['Mouse ID'].isin(duplicate_mouse_ids)==False]
clean_study_data.head()
# Checking the number of mice in the clean DataFrame.
len(clean_study_data["Mouse ID"].unique())
```
## Summary Statistics
```
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
mean = np.mean(clean_study_data["Tumor Volume (mm3)"])
median = np.median(clean_study_data["Tumor Volume (mm3)"])
variance = np.var(clean_study_data["Tumor Volume (mm3)"], ddof = 0)
sd = np.std(clean_study_data["Tumor Volume (mm3)"], ddof = 0)
sample_volume = clean_study_data.sample(75)
volume = sem(sample_volume["Tumor Volume (mm3)"])
summary_statistics = pd.DataFrame({"Mean":[mean],
"Median":[median],
"Variance":[variance],
"Standard Deviation":[sd],
"SEM":[volume],
})
summary_statistics.head()
# This method is the most straightforward, creating multiple series and putting them all together at the end.
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
regimen = clean_study_data.groupby('Drug Regimen')
regimen_mean = regimen.mean()
regimen_median = regimen.median()
regimen_variance = regimen.var()
regimen_sd = regimen.std()
regimen_sem = regimen.sem()
summary_statistics2 = pd.DataFrame({"Mean": regimen_mean["Tumor Volume (mm3)"],
"Median": regimen_median["Tumor Volume (mm3)"],
"Variance": regimen_variance["Tumor Volume (mm3)"],
"Standard Deviation": regimen_sd["Tumor Volume (mm3)"],
"SEM": regimen_sem["Tumor Volume (mm3)"]
})
summary_statistics2
# This method produces everything in a single groupby function.
```
## Bar Plots
```
# Generate a bar plot showing the number of mice per time point for each treatment throughout the course of the study using pandas.
mice_df = clean_study_data.groupby("Drug Regimen")
var = mice_df['Mouse ID'].count()
var.plot(kind = 'bar',color ='r',title = "Total Mice per Treatment", alpha = .75, edgecolor = 'k')
plt.ylabel('Number of Mice')
plt.show()
# Generate a bar plot showing the number of mice per time point for each treatment throughout the course of the study using pyplot.
plt.bar(var.index,var,color='r',alpha=.75,edgecolor='k')
plt.xticks(rotation=90)
plt.ylabel('Number of Mice')
plt.xlabel('Regimen')
plt.show()
```
## Pie Plots
```
# Generate a pie plot showing the distribution of female versus male mice using pandas
gender = mouse_metadata.loc[mouse_metadata['Mouse ID'] != 'g989']
gender_plot = gender['Sex'].value_counts()
gender_plot.plot(kind='pie', shadow = True, autopct = '%1.2f%%')
plt.title('Number of Mice by Gender')
plt.show()
# Generate a pie plot showing the distribution of female versus male mice using pyplot
labels = gender_plot.index
sizes = gender_plot
chart = plt.pie(sizes,autopct='%1.2f%%',labels=labels, shadow=True)
plt.ylabel('Sex')
plt.show()
```
## Quartiles, Outliers and Boxplots
```
# Calculate the final tumor volume of each mouse across four of the most promising treatment regimens.
treatment = ["Capomulin", "Ramicane", "Infubinol", "Ceftamin"]
#start by getting the last (greatest) timepoint for each mouse
timepoint_df = clean_study_data[['Mouse ID', 'Timepoint', 'Drug Regimen']]
filtered_df=timepoint_df[timepoint_df['Drug Regimen'].isin(treatment)]
grouped_df = filtered_df.groupby('Mouse ID')['Timepoint'].max()
# merge this group df with the original dataframe to get the tumor volume at the last timepoint
merged_df = pd.merge(grouped_df,clean_study_data,on=['Mouse ID','Timepoint'],how = 'left')
merged_df.head()
#Put treatments into a list for a for loop (and later for plot labels)
# Create empty list to fill with tumor vol data (for plotting)
#tumor_vol_list = []
for drug in treatment:
quartiles = merged_df[drug].quantile([.25,.5,.75]).round(2)
lowerq = quartiles[.25].round(2)
upperq = quartiles[.75].round(2)
iqr = round(upperq-lowerq,2)
lower_bound = round(lowerq - (1.5*iqr),2)
upper_bound = round(upperq+(1.5*iqr),2)
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volume
# add subset
# Determine outliers using upper and lower bounds
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
```
## Line and Scatter Plots
```
#capomulin df
capomulin_df = clean_study_data.loc[clean_study_data['Drug Regimen']=='Capomulin']
print(len(capomulin_df['Mouse ID'].unique()))
capomulin_df.head()
# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin
capomulin_mouse = clean_study_data.loc[clean_study_data['Mouse ID']=='u364']
x_axis=capomulin_mouse['Timepoint']
y_axis=capomulin_mouse['Tumor Volume (mm3)']
plt.ylabel('Tumor Volume')
plt.xlabel('Timepoint')
plt.title('Timepoint vs. Tumor Volume')
plt.plot(x_axis,y_axis)
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
capomulin_mouse = clean_study_data.loc[clean_study_data['Drug Regimen']=='Capomulin']
capomulin_df = capomulin_mouse.groupby('Weight (g)')
mean_tumor= capomulin_df['Tumor Volume (mm3)'].mean()
weight_tumor=pd.DataFrame(mean_tumor).reset_index()
weight_tumor.plot(kind='scatter',x='Weight (g)',y = 'Tumor Volume (mm3)')
plt.title('Weight (g) vs. Tumor Volume (mm3)')
plt.show()
```
## Correlation and Regression
```
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
var1 = weight_tumor['Weight (g)']
var2 = weight_tumor['Tumor Volume (mm3)']
corr = st.pearsonr(var1,var2)
print(f"The correlation coefficient of weight and average tumor volume is {corr[0]}")
(slope, intercept, rvalue, pvalue, stderr) = linregress(var1,var2)
regress_vals = var1*slope+intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(var1,var2)
plt.plot(var1, regress_vals,'r-')
plt.annotate(line_eq,(20,37), fontsize= 15,color ='r')
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
import numpy as np
import pickle
import os
import os.path
import scipy,scipy.spatial
import matplotlib
matplotlib.rcParams['figure.dpi'] = 100
from data_utilities import *
# from definitions import *
# from run_train_eval_net import run_train_eval_net,run_eval_net
import os
GPU = "1"
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=GPU
dataset_name = 'ManySig'
dataset_path='../../orbit_rf_dataset/data/compact_pkl_datasets/'
compact_dataset = load_compact_pkl_dataset(dataset_path,dataset_name)
tx_list = compact_dataset['tx_list']
rx_list = [compact_dataset['rx_list'][0]]
equalized = 0
capture_date_list = compact_dataset['capture_date_list']
n_tx = len(tx_list)
n_rx = len(rx_list)
print(n_tx,n_rx)
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import regularizers
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import *
import tensorflow.keras.backend as K
def create_net():
inputs = Input(shape=(256,2))
x = Reshape((256,2,1))(inputs)
x = Conv2D(8,(3,2),activation='relu',padding = 'same')(x)
x = MaxPool2D((2,1))(x)
x = Conv2D(16,(3,2),activation='relu',padding = 'same')(x)
x = MaxPool2D((2,1))(x)
x = Conv2D(16,(3,2),activation='relu',padding = 'same')(x)
x = MaxPool2D((2,2))(x)
x = Conv2D(32,(3,1),activation='relu',padding = 'same')(x)
x = MaxPool2D((2,1))(x)
x = Conv2D(16,(3,1),activation='relu',padding = 'same')(x)
#x = resnet(x,64,(3,2),'6')
#x = MaxPool2D((2,2))(x)
x = Flatten()(x)
x = Dense(100, activation='relu', kernel_regularizer = keras.regularizers.l2(0.0001))(x)
# x = Dropout(0.3)(x)
x = Dense(80, activation='relu',kernel_regularizer = keras.regularizers.l2(0.0001))(x)
x = Dropout(0.5)(x)
x = Dense(n_tx, activation='softmax',kernel_regularizer = keras.regularizers.l2(0.0001))(x)
ops = x
classifier = Model(inputs,ops)
classifier.compile(loss='categorical_crossentropy',metrics=['categorical_accuracy'],optimizer=keras.optimizers.Adam(0.0005))
return classifier
classifier = create_net()
classifier.summary()
def evaluate_test(classifier):
pred = classifier.predict(sig_dfTest)
acc = np.mean(np.argmax(pred,1)==txidNum_dfTest)
test_indx = ()
for indx in range(len(tx_list)):
cls_indx = np.where(txidNum_dfTest == indx)
test_indx = test_indx + (cls_indx[0][:n_test_samples],)
test_indx = np.concatenate(test_indx)
acc_bal = np.mean(np.argmax(pred[test_indx,:],1)==txidNum_dfTest[test_indx])
return acc,acc_bal
TRAIN = True
continue_training = True
nreal = 5
real_list = list(range(nreal))
patience = 5
n_epochs = 100
capture_date_test_list = capture_date_list[-1]
dataset_test = merge_compact_dataset(compact_dataset,capture_date_test_list,tx_list,rx_list, equalized=equalized)
test_augset_dfDay,_,_ = prepare_dataset(dataset_test,tx_list,
val_frac=0, test_frac=0)
[sig_dfTest,txidNum_dfTest,txid_dfTest,cls_weights] = test_augset_dfDay
smTest_results_real = []
dfTest_results_real = []
for nday in range(3):
print("");print("")
print("nday: {} ".format(nday))
fname_w = 'weights/d007_{:04d}.hd5'.format(nday)
rx_train_list= rx_list
dataset = merge_compact_dataset(compact_dataset,capture_date_list[:nday+1],tx_list,rx_list, equalized=equalized)
train_augset,val_augset,test_augset_smRx = prepare_dataset(dataset,tx_list,
val_frac=0.1, test_frac=0.1)
[sig_train,txidNum_train,txid_train,cls_weights] = train_augset
[sig_valid,txidNum_valid,txid_valid,_] = val_augset
[sig_smTest,txidNum_smTest,txid_smTest,cls_weights] = test_augset_smRx
if continue_training:
skip = os.path.isfile(fname_w)
else:
skip = False
classifier = create_net()
if TRAIN and not skip:
filepath = 't_weights_'+GPU
c=[ keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True),
keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience)]
history = classifier.fit(sig_train,txid_train,class_weight=cls_weights,
validation_data=(sig_valid , txid_valid),callbacks=c, epochs=n_epochs)
classifier.load_weights(filepath)
classifier.save_weights(fname_w,save_format="h5")
else:
classifier.load_weights(fname_w)
smTest_r = classifier.evaluate(sig_smTest,txid_smTest,verbose=0)[1]
dfTest_r = classifier.evaluate(sig_dfTest,txid_dfTest,verbose=0)[1]
print(smTest_r,dfTest_r)
smTest_results_real.append(smTest_r)
dfTest_results_real.append(dfTest_r)
K.clear_session()
plt.plot(range(1,4),smTest_results_real)
plt.plot(range(1,4),dfTest_results_real)
plt.xlabel('No of Training Days')
plt.ylabel('Accuracy')
plt.legend(['Same Day','Diff Day'])
print(range(1,4))
print(smTest_results_real)
print(dfTest_results_real)
```
| github_jupyter |
... ***CURRENTLY UNDER DEVELOPMENT*** ...
## Validation of the total water level
inputs required:
* historical wave conditions
* emulator output - synthetic wave conditions of TWL
* emulator output - synthetic wave conditions of TWL with 3 scenarios of SLR
in this notebook:
* Comparison of the extreme distributions
```
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# common
import os
import os.path as op
# pip
import numpy as np
import xarray as xr
from datetime import datetime
import matplotlib.pyplot as plt
# DEV: override installed teslakit
import sys
sys.path.insert(0, op.join(os.path.abspath(''), '..', '..', '..', '..'))
# teslakit
from teslakit.database import Database
from teslakit.climate_emulator import Climate_Emulator
from teslakit.extremes import Peaks_Over_Threshold as POT
from teslakit.util.time_operations import xds_reindex_daily
from teslakit.plotting.extremes import Plot_ReturnPeriodValidation_CC
from teslakit.plotting.estela import Plot_DWTs_Probs
from teslakit.plotting.wts import Plot_Probs_WT_WT
from teslakit.plotting.outputs import Plot_LevelVariables_Histograms
```
## Database and Site parameters
```
# --------------------------------------
# Teslakit database
p_data = r'/Users/albacid/Projects/TeslaKit_projects'
# offshore
db = Database(p_data)
db.SetSite('ROI')
# climate change - S1
db_S1 = Database(p_data)
db_S1.SetSite('ROI_CC_S1')
# climate change - S2
db_S2 = Database(p_data)
db_S2.SetSite('ROI_CC_S2')
# climate change - S3
db_S3 = Database(p_data)
db_S3.SetSite('ROI_CC_S3')
# --------------------------------------
# Load complete hourly data for extremes analysis
# Historical
HIST_C_h = db.Load_HIST_OFFSHORE(vns=['TWL'],decode_times=True)
# Simulation (1000 yrs)
SIM_C_h = db.Load_SIM_OFFSHORE_all(vns=['TWL'], decode_times=True, use_cftime=True)
# Simulation climate change S1 (100 yrs)
SIM_C_h_CChange_S1 = db_S1.Load_SIM_OFFSHORE_all(decode_times=True, use_cftime=True)
# Simulation climate change S2 (100 yrs)
SIM_C_h_CChange_S2 = db_S2.Load_SIM_OFFSHORE_all(decode_times=True, use_cftime=True)
# Simulation climate change S3 (100 yrs)
SIM_C_h_CChange_S3 = db_S3.Load_SIM_OFFSHORE_all(decode_times=True, use_cftime=True)
# Keep first 100 years of simulation without climate change
SIM_C_h = SIM_C_h.isel(time=slice(0, len(SIM_C_h_CChange_S1.time))) # 100 years
```
## Level Variables (TWL) - Histograms
```
from teslakit.plotting.outputs import axplot_compare_histograms
from teslakit.plotting.config import _faspect, _fsize
import matplotlib.gridspec as gridspec
# Plot TWL histogram comparison between historical and simulated data for different SLR scenarios
data_fit = HIST_C_h['TWL'].values[:]; data_fit = data_fit[~np.isnan(data_fit)]
data_sim = SIM_C_h['TWL'].sel(n_sim = 0).values[:]; data_sim = data_sim[~np.isnan(data_sim)]
data_sim_1 = SIM_C_h_CChange_S1['TWL'].sel(n_sim = 0).values[:]; data_sim_1 = data_sim_1[~np.isnan(data_sim_1)]
data_sim_2 = SIM_C_h_CChange_S2['TWL'].sel(n_sim = 0).values[:]; data_sim_2 = data_sim_2[~np.isnan(data_sim_2)]
data_sim_3 = SIM_C_h_CChange_S3['TWL'].sel(n_sim = 0).values[:]; data_sim_3 = data_sim_3[~np.isnan(data_sim_3)]
# plot figure
fig = plt.figure(figsize=(_faspect*_fsize, _fsize*2/2.3))
gs = gridspec.GridSpec(2, 2)
n_bins = np.linspace(np.nanmin([np.nanmin(data_fit), np.nanmin(data_sim_3)]),np.nanmax([np.nanmax(data_fit), np.nanmax(data_sim_3)]), 40)
ax = plt.subplot(gs[0, 0])
axplot_compare_histograms(ax, data_fit, data_sim, ttl='TWL', n_bins=n_bins,
color_1='white', color_2='skyblue', alpha_1=0.9, alpha_2=0.7,
label_1='Historical', label_2='Simulation')
ax = plt.subplot(gs[0, 1])
axplot_compare_histograms(ax, data_sim, data_sim_1, ttl='TWL', n_bins=n_bins,
color_1='white', color_2='skyblue', alpha_1=0.9, alpha_2=0.7,
label_1='Simulation', label_2='Simulation Climate Change S1')
ax = plt.subplot(gs[1, 0])
axplot_compare_histograms(ax, data_sim, data_sim_2, ttl='TWL', n_bins=n_bins,
color_1='white', color_2='skyblue', alpha_1=0.9, alpha_2=0.7,
label_1='Simulation', label_2='Simulation Climate Change S2')
ax = plt.subplot(gs[1, 1])
axplot_compare_histograms(ax, data_sim, data_sim_3, ttl='TWL', n_bins=n_bins,
color_1='white', color_2='skyblue', alpha_1=0.9, alpha_2=0.7,
label_1='Simulation', label_2='Simulation Climate Change S3')
```
## TWL - Annual Maxima for different SLR scenarios
```
# Plot TWL annual maxima
# calculate Annual Maxima values for historical and simulated data
hist_A = HIST_C_h['TWL'].groupby('time.year').max(dim='time')
sim_A = SIM_C_h['TWL'].groupby('time.year').max(dim='time')
```
### SLR S1 (intermediate low, +0.5m)
```
sim_B = SIM_C_h_CChange_S1['TWL'].groupby('time.year').max(dim='time')
# Return Period historical vs. simulations
Plot_ReturnPeriodValidation_CC(hist_A, sim_A.transpose(), sim_B.transpose());
```
### SLR S2 (intermediate, +1m)
```
sim_B = SIM_C_h_CChange_S2['TWL'].groupby('time.year').max(dim='time')
# Return Period historical vs. simulations
Plot_ReturnPeriodValidation_CC(hist_A, sim_A.transpose(), sim_B.transpose());
```
### SLR S3 (intermediate high, +1.5m)
```
sim_B = SIM_C_h_CChange_S3['TWL'].groupby('time.year').max(dim='time')
# Return Period historical vs. simulations
Plot_ReturnPeriodValidation_CC(hist_A, sim_A.transpose(), sim_B.transpose());
```
| github_jupyter |
<small><small><i>
All the IPython Notebooks in this lecture series by Dr. Milan Parmar are available @ **[GitHub](https://github.com/milaan9/02_Python_Datatypes)**
</i></small></small>
# Python Strings
In this class you will learn to create, format, modify and delete strings in Python. Also, you will be introduced to various string operations and functions.
## What is String in Python?
A string is a built-in type sequence of characters. It is used to handle **textual data** in python. Python **Strings are immutable sequences** of **Unicode** points. Creating Strings are simplest and easy to use in Python.
A character is simply a symbol. For example, the English language has 26 characters.
Computers do not deal with characters, they deal with numbers (binary). Even though you may see characters on your screen, internally it is stored and manipulated as a combination of 0s and 1s.
This conversion of character to a number is called encoding, and the reverse process is decoding. ASCII and Unicode are some of the popular encodings used.
In Python, a string is a sequence of Unicode characters. Unicode was introduced to include every character in all languages and bring uniformity in encoding. These Unicodes range from **$0_{hex}$** to **$10FFFF_{hex}$**. Normally, a Unicode is referred to by writing **"U+"** followed by its **hexadecimal** number. Thus strings in Python are a sequence of Unicode values. You can learn about Unicode from **[Python Unicode](https://docs.python.org/3.3/howto/unicode.html)**.
<div>
<img src="img/s0.png" width="600"/>
</div>
## How to create a string in Python?
Strings can be created by enclosing characters inside a **single quote** or **double-quotes**. Even **triple quotes** can be used in Python but generally used to represent multiline strings and docstrings.
```
# Example:
# defining strings in Python
# all of the following are equivalent
my_string = 'Hello'
print(my_string)
my_string = "Hello"
print(my_string)
my_string = '''Hello'''
print(my_string)
# triple quotes string can extend multiple lines
my_string = """Hello, welcome to
the world of Python"""
print(my_string)
a = "Hello,"
b= 'World!'
print(a+b)
print(a+" "+b)
string1='World'
string2='!'
print('Hello,' + " " + string1 + string2)
```
## How to access characters in a string?
* In Python, Strings are stored as individual characters in a **contiguous memory location**.
* The benefit of using String is that it can be accessed from both the **directions** (forward and backward).
* Both forward as well as backward indexing are provided using Strings in Python.
* Forward indexing starts with **`0,1,2,3,.... `**
* Backward indexing starts with **`-1,-2,-3,-4,.... `**
* Trying to access a character out of index range will raise an **`IndexError`**. The index must be an integer. We can't use floats or other types, this will result into **`IndexError`**.
* Strings can be indexed with square brackets. Indexing starts from zero in Python.
* We can access a range of items in a string by using the slicing operator **`:`**(colon).
* And the **`len()`** function provides the length of a string
```python
str[0] = 'P' = str[-6] ,
str[1] = 'Y' = str[-5] ,
str[2] = 'T' = str[-4] ,
str[3] = 'H' = str[-3] ,
str[4] = 'O' = str[-2] , # refers to the second last item
str[5] = 'N' = str[-1]. # refers to the last item
```
<div>
<img src="img/s3.png" width="300"/>
</div>
```
# Accessing string characters in Python
str = 'PYTHON'
print('str = ', str)
#first character
print('str[0] = ', str[0])
#last character
print('str[-1] = ', str[-1])
#slicing 2nd to 5th character
print('str[1:5] = ', str[1:5])
#slicing 6th to 2nd last character
print('str[5:-2] = ', str[3:-1])
```
If we try to access an index out of the range or use numbers other than an integer, we will get errors.
```
# Accessing string characters in Python
str = 'PYTHON'
print('str = ', str)
# index must be in range
print('str[15] = ', str[15])
# Accessing string characters in Python
str = 'PYTHON'
print('str = ', str)
# index must be an integer
print('str[1.50] = ', str[1.5])
s = '123456789' #Indexing strats from 0 to 8
print("The string '%s' string is %d characters long" %(s, len(s)) )
print('First character of',s,'is',s[0])
print('Last character of',s,'is',s[8])
print('Last character of',s,'is',s[len(s)-1]) # [9-1] = [8] is 9
```
Negative indices can be used to start counting from the back
```
print('First character of',s,'is',s[-len(s)])
print('First character of',s,'is',s[(-9)])
print('Second character of',s,'is',s[(-8)])
print('Last character of',s,'is',s[-1])
```
Finally a substring (range of characters) an be specified as using $a:b$ to specify the characters at index $a,a+1,\ldots,b-1$. Note that the last charcter is *not* included.
```
print("First three characters",s[0:3])
print("Next three characters",s[3:6])
```
An empty beginning and end of the range denotes the beginning/end of the string:
```
s = '123456789' #Indexing strats from 0 to 8
print("First three characters", s[:3])
print("Last three characters", s[-3:])
# Here, we are creating a simple program to retrieve String in reverse as well as normal form.
name="Milan"
length=len(name)
i=0
for n in range(-1,(-length-1),-1):
print(name[i],"\t",name[n])
i+=1
```
## How to slice a string in Python?
Python String **slice** can be defined as a **substring** which is the part of the string. Therefore further substring can be obtained from a string.
There can be many forms to slice a string, as string can be accessed or indexed from both the direction and hence string can also be sliced from both the directions.
Slicing can be best visualized by considering the index to be between the elements as shown below.
If we want to access a range, we need the index that will slice the portion from the string.
<div>
<img src="img/s16.png" width="300"/>
</div>
**Syntax** of Slice Operator :
```python
str[start : stop : step ]
```
other syntax of slice:
```python
str[start : stop] # items start through stop-1
str[start : ] # items start through the rest of the array
str[ : stop] # items from the beginning through stop-1
str[ : ] # a copy of the whole array
```
```
# Example:
s="Milan Python"
print(s[6:10])
print(s[-12:-7])
print(s[-1: :-1]) #reversed all string
print(s[2: 10: 2]) #step = 2
print(s[ : : -1]) #reversed all string
print(s[ : 5]) #from 0 to 4
print(s[3 : ]) #from 3 to end of the string
print(s[ : ]) #copy all string
```
**NOTE**: Both the operands passed for concatenation must be of same type, else it will show an error.
## Breaking appart strings
When processing text, the ability to split strings appart is particularly useful.
* `partition(separator)`: breaks a string into three parts based on a separator
* `split()`: breaks string into words separated by white-space (optionally takes a separator as argument)
* `join()`: joins the result of a split using string as separator
```
s = "one -> two -> three"
print( s.partition("->") )
print( s.split() )
print( s.split(" -> ") )
print( ";".join( s.split(" -> ") ) )
"This will split all words into a list".split()
' '.join(['This', 'will', 'join', 'all', 'words', 'into', 'a', 'string'])
'Happy New Year'.find('ew')
'Happy New Year'.replace('Happy','Brilliant')
```
## How to change or delete a string?
Strings are immutable. This means that elements of a string cannot be changed once they have been assigned. We can simply reassign different strings to the same name.
```
my_string = 'python'
my_string[5] = 'a'
s='012345'
sX=s[:2]+'X'+s[3:] # this creates a new string with 2 replaced by X
print("creating new string",sX,"OK")
sX=s.replace('2','X') # the same thing
print(sX,"still OK")
s[2] = 'X' # an error!!!
```
We cannot delete or remove characters from a string. But deleting the string entirely is possible using the **`del`** keyword.
```
my_string = 'python'
del my_string[1] # deleting element of string generates error!
my_string = 'python'
del my_string # deleting whole string using 'del' keyword can delete it.
my_string
```
## Python Strings Operations
There are many operations that can be performed with strings which makes it one of the most used data types in Python.
To learn more about the data types available in Python visit: **[Python Data Types](https://github.com/milaan9/01_Python_Introduction/blob/main/009_Python_Data_Types.ipynb)**.
To perform operation on string, Python provides basically 3 types of Operators that are given below.
* Basic Operators/Concatenation of Two or More Strings.
* Membership Operators.
* Relational Operators.
### 1. Basic Operators for concatenation of two or more strings
There are two types of basic operators in String **`+`** and **`*`**.
The **`+`** (concatenation) operator can be used to concatenates two or more string literals together.
The **`*`** (Replication) operator can be used to repeat the string for a given number of times.
#### String Concatenation Operator (**`+`**)
Joining of two or more strings into a single one is called concatenation.
```
# Example:
str1="Hello"
str2="World!"
print(str1+str2)
```
| Expression | Output |
|:----| :--- |
| **`"10" + "50"`** | **"1050"** |
| **`"hello" + "009"`** | **"hello009"** |
| **`"hello99" + "world66" `** | **"hello99world66"** |
>**Note:** Both the operands passed for concatenation must be of same type, else it will show an error.
```
# Example:
print("HelloWorld"+99)
```
#### Python String Replication Operator (**`*`**)
**Replication operator** uses two parameters for operation, One is the integer value and the other one is the String argument.
The Replication operator is used to **repeat a string** number of times. The string will be repeated the number of times which is given by the **integer value**.
| Expression | Output |
|:----| :--- |
| **`"ArcX" \* 2`** | **"ArcXArcX"** |
| **`3 *'5'`** | **"555"** |
| **`'@'* 5 `** | **"@@@@@"** |
>**Note:**: We can use Replication operator in any way i.e., int **`*`** string or string **`*`** **`int`**. Both the parameters passed cannot be of same type.
```
# Example:
print("HelloWorld" * 5)
print(3 * "Python")
print("Hello World! "*5) #note the space in between 'Hello' and 'World!'
# Python String Operations
str1 = 'Hello'
str2 ='World!'
# using +
print('str1 + str2 = ', str1 + str2)
# using *
print('str1 * 3 =', str1 * 3)
```
If we want to concatenate strings in different lines, we can use parentheses **`()`**.
```
# two string literals together
'Hello ''World!'
# using parentheses
s = ('Hello '
'World')
s
```
### Iterating Through a string
We can iterate through a string using a **[for loop](https://github.com/milaan9/03_Python_Flow_Control/blob/main/005_Python_for_Loop.ipynb)**. Here is an example to count the number of 'l's in a string.
```
# Iterating through a string
count = 0
for letter in 'Hello World':
if(letter == 'l'):
count += 1
print(count,'letters found')
```
### 2. Python String Membership Operators
Membership Operators are already discussed in the Operators section. Let see with context of String.
There are two types of Membership operators :
1. **`in`** - "in" operator returns true if a character or the entire substring is present in the specified string, otherwise false.
2. **`not in`** - "not in" operator returns true if a character or entire substring does not exist in the specified string, otherwise false.
```
# Example:
str1="HelloWorld"
str2="Hello"
str3="World"
str4="Milan"
print('Exmple of in operator ::')
print(str2 in str1)
print(str3 in str1)
print(str4 in str1)
print()
print(str2 not in str1)
print(str3 not in str1)
print(str4 not in str1)
>>> 'a' in 'program'
True
>>> 'at' not in 'battle'
False
```
### 3. Python Relational Operators
All the comparison (relational) operators i.e., **(<, ><=, >=, ==, !=, <>)** are also applicable for strings. The Strings are compared based on the **ASCII value** or **Unicode**(i.e., dictionary Order).
```
# Example:
print("HelloWorld"=="HelloWorld")
print("helloWorld">="HelloWorld")
print("H"<"h")
```
**Explanation:**
The ASCII value of a is 97, b is 98, c is 99 and so on. The ASCII value of A is 65, B is 66, C is 67 and so on. The comparison between strings are done on the basis on ASCII value.
The **`%`** operator is used to format a string inserting the value that comes after. It relies on the string containing a format specifier that identifies where to insert the value. The most common types of format specifiers are:
- **`%s`** -> string
- **`%d`** -> Integer
- **`%f`** -> Float
- **`%o`** -> Octal
- **`%x`** -> Hexadecimal
- **`%e`** -> exponential
These will be very familiar to anyone who has ever written a C or Java program and follow nearly exactly the same rules as the **[printf() function](https://en.wikipedia.org/wiki/Printf_format_string)**.
```
print("Hello %s" % string1)
print("Actual Number = %d" %19)
print("Float of the number = %f" %19)
print("Octal equivalent of the number = %o" %19)
print("Hexadecimal equivalent of the number = %x" %19)
print("Exponential equivalent of the number = %e" %19)
```
When referring to multiple variables parentheses is used. Values are inserted in the order they appear in the parantheses (more on tuples in the next section)
```
print("Hello %s %s. My name is Bond, you can call me %d" %(string1,string2,99))
```
We can also specify the width of the field and the number of decimal places to be used.
For example:
```
print('Print width 10: |%10s|'%'x')
print('Print width 10: |%-10s|'%'x') # left justified
print("The number pi = %.1f to 1 decimal places"%3.1415)
print("The number pi = %.2f to 2 decimal places"%3.1415)
print("More space pi = %10.2f"%3.1415)
print("Pad pi with 0 = %010.2f"%3.1415) # pad with zeros
```
### Built-in functions to Work with Python
Various built-in functions that work with sequence work with strings as well.
Some of the commonly used ones are **`enumerate()`** and **`len()`**. The **[enumerate()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/018_Python_enumerate%28%29.ipynb)** function returns an enumerate object. It contains the index and value of all the items in the string as pairs. This can be useful for iteration.
Similarly, **[len()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/040_Python_len%28%29.ipynb)** returns the length (number of characters) of the string.
```
str = 'cold'
# enumerate()
list_enumerate = list(enumerate(str))
print('list(enumerate(str) = ', list_enumerate)
#character count
print('len(str) = ', len(str))
```
## Python String Formatting
### Escape Sequence
If we want to print a text like `He said, "What's there?"`, we can neither use single quotes nor double quotes. This will result in a SyntaxError as the text itself contains both single and double quotes.
```
print("He said, "What's there?"")
```
One way to get around this problem is to use triple quotes. Alternatively, we can use escape sequences.
An escape sequence starts with a backslash and is interpreted differently. If we use a single quote to represent a string, all the single quotes inside the string must be escaped. Similar is the case with double quotes. Here is how it can be done to represent the above text.
```
# using triple quotes
print('''He said, "What's there?"''')
# escaping single quotes
print('He said, "What\'s there?"')
# escaping double quotes
print("He said, \"What's there?\"")
```
### Here is a list of all the escape sequences supported by Python.
| Escape Sequence | Description |
|:----:| :--- |
| **`\newline`** | Backslash and newline ignored |
| **`\\`** | Backslash |
| **`\'`** | Single quote |
| **`\"`** | Double quote |
| **`\a`** | ASCII Bell |
| **`\b`** | ASCII Backspace |
| **`\f`** | ASCII Formfeed |
| **`\n`** | ASCII Linefeed |
| **`\r`** | ASCII Carriage Return |
| **`\t`** | ASCII Horizontal Tab |
| **`\v`** | ASCII Vertical Tab |
| **`\ooo`** | Character with octal value ooo |
| **`\xHH`** | Character with hexadecimal value HH |
```
# Here are some examples
print("C:\\Python32\\Lib")
#C:\Python32\Lib
print("This is printed\nin two lines")
#This is printed
#in two lines
print("This is \x48\x45\x58 representation")
#This is HEX representation
```
### Raw String to ignore escape sequence
Sometimes we may wish to ignore the escape sequences inside a string. To do this we can place **`r`** or **`R`** in front of the string. This will imply that it is a raw string and any escape sequence inside it will be ignored.
```
print("This is \x61 \ngood example")
print(r"This is \x61 \ngood example")
```
### The `format()` Method for Formatting Strings
The **`format()`** method that is available with the string object is very versatile and powerful in formatting strings. Format strings contain curly braces **`{}`** as placeholders or replacement fields which get replaced.
We can use positional arguments or keyword arguments to specify the order.
```
# Python string format() method
# default(implicit) order
default_order = "{}, {} and {}".format('Allan','Bill','Cory')
print('\n--- Default Order ---')
print(default_order)
# order using positional argument
positional_order = "{1}, {0} and {2}".format('Allan','Bill','Cory')
print('\n--- Positional Order ---')
print(positional_order)
# order using keyword argument
keyword_order = "{s}, {b} and {j}".format(j='Allan',b='Bill',s='Cory')
print('\n--- Keyword Order ---')
print(keyword_order)
```
The **`format()`** method can have optional format specifications. They are separated from the field name using colon. For example, we can left-justify **`<`**, right-justify **`>`** or center **`^`** a string in the given space.
We can also format integers as binary, hexadecimal, etc. and floats can be rounded or displayed in the exponent format. There are tons of formatting you can use. Visit here for all the **[string formatting available with the format()](https://github.com/milaan9/02_Python_Datatypes/blob/main/002_Python_String_Methods/009_Python_String_format%28%29.ipynb)** method.
```
# formatting integers
"Binary representation of {0} is {0:b}".format(12)
# formatting floats
"Exponent representation: {0:e}".format(1966.365)
# round off
"One third is: {0:.3f}".format(1/3)
# string alignment
"|{:<10}|{:^10}|{:>10}|".format('bread','butter','jam')
```
### Old style formatting
We can even format strings like the old **`sprintf()`** style used in C programming language. We use the **`%`** operator to accomplish this.
```
x = 36.3456789
print('The value of x is %3.2f' %x)
print('The value of x is %3.4f' %x)
```
## Common Python String Methods
There are numerous methods available with the string object. The **`format()`** method that we mentioned above is one of them.
Strings can be tranformed by a variety of functions that are all methods on a string. That is they are called by putting the function name with a **`.`** after the string. They include:
* Upper vs lower case: **`upper()`**, **`lower()`**, **`captialize()`**, **`title()`** and **`swapcase()`**, **`join()`**, **`split()`**, **`find()`**, **`replace()`** etc, with mostly the obvious meaning. Note that `capitalize` makes the first letter of the string a capital only, while **`title`** selects upper case for the first letter of every word.
* Padding strings: **`center(n)`**, **`ljust(n)`** and **`rjust(n)`** each place the string into a longer string of length n padded by spaces (centered, left-justified or right-justified respectively). **`zfill(n)`** works similarly but pads with leading zeros.
* Stripping strings: Often we want to remove spaces, this is achived with the functions **`strip()`**, **`lstrip()`**, and **`rstrip()`** respectively to remove from spaces from the both end, just left or just the right respectively. An optional argument can be used to list a set of other characters to be removed.
Here is a complete list of all the **[built-in methods to work with Strings in Python](https://github.com/milaan9/02_Python_Datatypes/tree/main/002_Python_String_Methods)**.
```
# Example:
s="heLLo wORLd!"
print(s.capitalize(),"vs",s.title())
print("upper case: '%s'"%s.upper(),"lower case: '%s'"%s.lower(),"and swapped: '%s'"%s.swapcase())
print('|%s|' % "Hello World".center(30)) # center in 30 characters
print('|%s|'% " lots of space ".strip()) # remove leading and trailing whitespace
print('%s without leading/trailing d,h,L or ! = |%s|',s.strip("dhL!"))
print("Hello World".replace("World","Class"))
```
#### Inspecting Strings
There are also lost of ways to inspect or check strings. Examples of a few of these are given here:
* Checking the start or end of a string: **`startswith("string")`** and **`endswith("string")`** checks if it starts/ends with the string given as argument
* Capitalisation: There are boolean counterparts for all forms of capitalisation, such as **`isupper()`**, **`islower()`** and **`istitle()`**
* Character type: does the string only contain the characters:
* 0-9: **`isdecimal()`**. Note there is also **`isnumeric()`** and **`isdigit()`** which are effectively the same function except for certain unicode characters
* a-zA-Z: **`isalpha()`** or combined with digits: **`isalnum()`**
* non-control code: **`isprintable()`** accepts anything except '\n' an other ASCII control codes
* \t\n \r (white space characters): **`isspace()`**
* Suitable as variable name: **`isidentifier()`**
* Find elements of string: **`s.count(w)`** finds the number of times **`w`** occurs in **`s`**, while **`s.find(w)`** and **`s.rfind(w)`** find the first and last position of the string **`w`** in **`s`**.
```
# Example:
s="Hello World"
print("The length of '%s' is"%s,len(s),"characters") # len() gives length of the string
s.startswith("Hello") and s.endswith("World") # check start/end
# count strings
print("There are %d 'l's but only %d World in %s" % (s.count('l'),s.count('World'),s))
print('"el" is at index',s.find('el'),"in",s) #index from 0 or -1
```
## Advanced string processing
For more advanced string processing there are many libraries available in Python including for example:
* **re** for regular expression based searching and splitting of strings
* **html** for manipulating HTML format text
* **textwrap** for reformatting ASCII text
* ... and many more
| github_jupyter |
# Loading Image Data
So far we've been working with fairly artificial datasets that you wouldn't typically be using in real projects. Instead, you'll likely be dealing with full-sized images like you'd get from smart phone cameras. In this notebook, we'll look at how to load images and use them to train neural networks.
We'll be using a [dataset of cat and dog photos](https://www.kaggle.com/c/dogs-vs-cats) available from Kaggle. Here are a couple example images:
<img src='assets/dog_cat.png'>
We'll use this dataset to train a neural network that can differentiate between cats and dogs. These days it doesn't seem like a big accomplishment, but five years ago it was a serious challenge for computer vision systems.
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torchvision import datasets, transforms
import helper
```
The easiest way to load image data is with `datasets.ImageFolder` from `torchvision` ([documentation](http://pytorch.org/docs/master/torchvision/datasets.html#imagefolder)). In general you'll use `ImageFolder` like so:
```python
dataset = datasets.ImageFolder('path/to/data', transform=transform)
```
where `'path/to/data'` is the file path to the data directory and `transform` is a list of processing steps built with the [`transforms`](http://pytorch.org/docs/master/torchvision/transforms.html) module from `torchvision`. ImageFolder expects the files and directories to be constructed like so:
```
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
```
where each class has it's own directory (`cat` and `dog`) for the images. The images are then labeled with the class taken from the directory name. So here, the image `123.png` would be loaded with the class label `cat`. You can download the dataset already structured like this [from here](https://s3.amazonaws.com/content.udacity-data.com/nd089/Cat_Dog_data.zip). I've also split it into a training set and test set.
### Transforms
When you load in the data with `ImageFolder`, you'll need to define some transforms. For example, the images are different sizes but we'll need them to all be the same size for training. You can either resize them with `transforms.Resize()` or crop with `transforms.CenterCrop()`, `transforms.RandomResizedCrop()`, etc. We'll also need to convert the images to PyTorch tensors with `transforms.ToTensor()`. Typically you'll combine these transforms into a pipeline with `transforms.Compose()`, which accepts a list of transforms and runs them in sequence. It looks something like this to scale, then crop, then convert to a tensor:
```python
transform = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor()])
```
There are plenty of transforms available, I'll cover more in a bit and you can read through the [documentation](http://pytorch.org/docs/master/torchvision/transforms.html).
### Data Loaders
With the `ImageFolder` loaded, you have to pass it to a [`DataLoader`](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader). The `DataLoader` takes a dataset (such as you would get from `ImageFolder`) and returns batches of images and the corresponding labels. You can set various parameters like the batch size and if the data is shuffled after each epoch.
```python
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True)
```
Here `dataloader` is a [generator](https://jeffknupp.com/blog/2013/04/07/improve-your-python-yield-and-generators-explained/). To get data out of it, you need to loop through it or convert it to an iterator and call `next()`.
```python
# Looping through it, get a batch on each loop
for images, labels in dataloader:
pass
# Get one batch
images, labels = next(iter(dataloader))
```
>**Exercise:** Load images from the `Cat_Dog_data/train` folder, define a few transforms, then build the dataloader.
```
data_dir = 'Cat_Dog_data/train'
transform = transforms.Compose([
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor()
])
dataset = datasets.ImageFolder(data_dir, transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True)
# Run this to test your data loader
images, labels = next(iter(dataloader))
helper.imshow(images[0], normalize=False)
```
If you loaded the data correctly, you should see something like this (your image will be different):
<img src='assets/cat_cropped.png' width=244>
## Data Augmentation
A common strategy for training neural networks is to introduce randomness in the input data itself. For example, you can randomly rotate, mirror, scale, and/or crop your images during training. This will help your network generalize as it's seeing the same images but in different locations, with different sizes, in different orientations, etc.
To randomly rotate, scale and crop, then flip your images you would define your transforms like this:
```python
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])])
```
You'll also typically want to normalize images with `transforms.Normalize`. You pass in a list of means and list of standard deviations, then the color channels are normalized like so
```input[channel] = (input[channel] - mean[channel]) / std[channel]```
Subtracting `mean` centers the data around zero and dividing by `std` squishes the values to be between -1 and 1. Normalizing helps keep the network work weights near zero which in turn makes backpropagation more stable. Without normalization, networks will tend to fail to learn.
You can find a list of all [the available transforms here](http://pytorch.org/docs/0.3.0/torchvision/transforms.html). When you're testing however, you'll want to use images that aren't altered (except you'll need to normalize the same way). So, for validation/test images, you'll typically just resize and crop.
>**Exercise:** Define transforms for training data and testing data below. Leave off normalization for now.
```
data_dir = 'Cat_Dog_data'
# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
test_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.ToTensor()])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=32)
testloader = torch.utils.data.DataLoader(test_data, batch_size=32)
# change this to the trainloader or testloader
data_iter = iter(testloader)
images, labels = next(data_iter)
fig, axes = plt.subplots(figsize=(10,4), ncols=4)
for ii in range(4):
ax = axes[ii]
helper.imshow(images[ii], ax=ax, normalize=False)
```
Your transformed images should look something like this.
<center>Training examples:</center>
<img src='assets/train_examples.png' width=500px>
<center>Testing examples:</center>
<img src='assets/test_examples.png' width=500px>
At this point you should be able to load data for training and testing. Now, you should try building a network that can classify cats vs dogs. This is quite a bit more complicated than before with the MNIST and Fashion-MNIST datasets. To be honest, you probably won't get it to work with a fully-connected network, no matter how deep. These images have three color channels and at a higher resolution (so far you've seen 28x28 images which are tiny).
In the next part, I'll show you how to use a pre-trained network to build a model that can actually solve this problem.
| github_jupyter |
## Utility function test
This notebook is for test of utility functions
```
# Import dependencies
import numpy as np
import scipy.sparse
from scipy.io import savemat, loadmat
from gurobipy import *
```
#### Online Algorithm
```
def fastLP(A, b, c, K, Method):
m = A.shape[0]
n = A.shape[1]
# It is worth considerinvg whether it is better to exclude K here
# stepsize = 1 / np.sqrt(n * K)
# Initialize dual solution
if Method == "M":
y = np.ones((m, 1)) / np.exp(1)
else:
y = np.zeros((m, 1))
# Initialize resource
d = b / n
# Initialize primal solution
x = np.zeros((n, 1))
# Start dual descent
for i in range(K):
p = np.random.permutation(n)
for j in p:
stepsize = 1 / np.sqrt(n * (i + 1))
aa = A[:, j].reshape(m, 1)
xk = (c[j] > np.dot(aa.T, y))
if Method == "M":
y = np.multiply(y, np.exp(- stepsize * (d - aa * xk)))
else:
y = y - stepsize * (d - aa * xk)
y = np.maximum(y, 0.0)
x[j] += xk[0][0]
obj = np.dot(c.T, x / K)
return {"x": x / K, "y": y, "obj": obj}
def GRBLP(A, b, c):
model = Model()
x = model.addMVar(n, lb=0.0, ub=1.0, vtype=GRB.CONTINUOUS)
constr = model.addMConstrs(A, x, GRB.LESS_EQUAL, b.squeeze())
model.setMObjective(Q=None, c=c.squeeze(), constant=0.0, sense=GRB.MAXIMIZE)
model.update()
model.optimize()
optdual = model.getAttr(GRB.Attr.Pi, model.getConstrs())
optx = model.getAttr(GRB.Attr.X, model.getVars())
time = model.getAttr(GRB.Attr.Runtime)
obj = model.getAttr(GRB.Attr.ObjVal)
return {"x": optx, "y": optdual, "time": time, "model": model, "obj": obj}
def GRBMIP(A, b, c):
model = Model()
x = model.addMVar(n, vtype=GRB.BINARY)
constr = model.addMConstrs(A, x, GRB.LESS_EQUAL, b.squeeze())
model.setMObjective(Q=None, c=c.squeeze(), constant=0.0, sense=GRB.MAXIMIZE)
model.update()
model.optimize()
optdual = model.getAttr(GRB.Attr.Pi, model.getConstrs())
optx = model.getAttr(GRB.Attr.X, model.getVars())
time = model.getAttr(GRB.Attr.Runtime)
obj = model.getAttr(GRB.Attr.ObjVal)
return {"x": optx, "y": optdual, "time": time, "model": model, "obj": obj}
# Test of online algorithm
m = 5
n = 100
A = np.random.randint(1, 1000, (m, n)) / 100
b = np.sum(A, axis=1).reshape(m, 1) * 0.25
c = np.sum(A, axis=0).reshape(n, 1) / m + np.random.rand(n, 1) * 5
res = fastLP(A, b, c, 1, "S")
gres = GRBLP(A, b, c)
for n in [10, 100, 1000, 10000]
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import ges
import sempler
import numpy as np
import scipy.stats as st
from ges.scores.gauss_obs_l0_pen import GaussObsL0Pen
from ges.scores.general import GeneralScore
```
## Find Causal Graph and get confidence interval [one trial]
```
d = 20 # of attributes
n = 500 # of datapoints
mu_lb, mu_ub = 0, 10 # range for means of the d components
sig_lb, sig_ub = 0, 10 # range for means of the variance components
```
## useful fns
```
def get_parents(x, G):
parents = []
for i in range(G.shape[0]):
if(G[i, x] == 1):
parents.append(i)
return parents
def get_all_family(x, G):
visited = np.zeros(G.shape[0])
visited[x] = 1
x_parents = get_parents(x, G)
to_search = x_parents
reach_from_x = []
while len(to_search):
to_search_new = []
for y in to_search:
if(visited[y]):
continue
else:
visited[y] = 1
y_parents = get_parents(y, G)
to_search_new += y_parents
reach_from_x.append(y)
to_search = to_search_new
return reach_from_x
```
## Experiment Definition (assume n >= 30)
```
def get_conf_interval(a, b, conf_lvl=.95):
effect_size, resid, _, _ = np.linalg.lstsq(a, b, rcond=None)
sq_tot_dev = sum([(a_i - np.mean(a))**2 for a_i in a])
SE = np.sqrt(resid / ((n-2) * sq_tot_dev))
conf = st.norm.ppf(conf_lvl) * SE
return (effect_size[0] - conf[0], effect_size[0] + conf[0])
def experiment(d=10, n=500, trials=30, eps_noisy_max=0, eps_abv_thrsh=0, mu_range=(0, 10), sig_range=(1,1)):
success = 0
for trial in range(trials):
# start from empty causal graph, generate data & fit causal graph
G = np.zeros((d, d))
data = sempler.LGANM(G, mu_range, sig_range).sample(n=n)
estimate, score = ges.fit(GeneralScore(data), eps_noisy_max=eps_noisy_max, \
eps_abv_thrsh=eps_abv_thrsh, max_iter=1)
if(len(np.where(estimate>0)[0]) == 0): # GES found empty graph so it is correct and we stop early
success += 1
continue
# o/w choose arbirary edge & find confidence interval of effect size
connections = np.where(estimate>0)
#idx = np.random.randint(0, len(connections[0]))
for idx in range(len(connections)):
## check if needs backdoor adj
backdoor = [x for x in get_all_family(connections[0][idx], estimate) \
if x in get_all_family(connections[1][idx], estimate)]
if(len(backdoor) == 0):
break
A = data[:, connections[0][idx]].reshape((n,1))
for node in backdoor:
A = np.column_stack((A, data[:, node]))
b = data[:, connections[1][idx]]
(conf_lb, conf_ub) = get_conf_interval(A, b)
# check if 0 is in the interval
if(conf_lb <= 0 and 0 <= conf_ub):
success+=1
return success / trials
results = {}
for noise_lvl in [0, 100, 200, 400, 800]:
for d in [2]:
for n in range(10, 901, 200):
results[(noise_lvl,d,n)] = []
for seed in range(5):
results[(noise_lvl,d,n)].append(experiment(d=d, n=n, eps_noisy_max=noise_lvl/n, \
eps_abv_thrsh=noise_lvl/n))
print("noise_lvl=", noise_lvl, "d=",d, ", n=", n," results:", results[(noise_lvl,d,n)])
import statistics as stats
import matplotlib.pyplot as plt
results_per_d_mean = {}
results_mean = np.zeros((1001, 50, 901))
results_CI = np.zeros((1001, 50, 901))
for noise_lvl in [0, 100, 200, 400, 800]:
for d in [15]:
for n in range(100, 901, 200):
results_mean[noise_lvl, d, n] = np.mean(results[(noise_lvl,d,n)])
results_CI[noise_lvl, d, n] = 0.878 * stats.stdev(results[(noise_lvl,d,n)])
d=15
plt.plot(range(100,901, 200), results_mean[0,d,100::200], 'b-', label="noise="+str(0))
#plt.plot(range(100,901, 200), results_mean[0,10,100::200] - results_CI[0,10,100::200], 'b--')
#plt.plot(range(100,901, 200), results_mean[0,10,100::200] + results_CI[0,10,100::200], 'b--')
plt.plot(range(100,901, 200), results_mean[100,d,100::200], 'c-', label="noise="+str(100))
#plt.plot(range(100,901, 200), results_mean[1,10,100::200] - results_CI[1,10,100::200], 'c--')
#plt.plot(range(100,901, 200), results_mean[1,10,100::200] + results_CI[1,10,100::200], 'c--')
plt.plot(range(100,901, 200), results_mean[200,d,100::200], 'g-', label="noise="+str(200))
#plt.plot(range(100,901, 200), results_mean[10,10,100::200] - results_CI[10,10,100::200], 'g--')
#plt.plot(range(100,901, 200), results_mean[10,10,100::200] + results_CI[10,10,100::200], 'g--')
plt.plot(range(100,901, 200), results_mean[400,d,100::200], 'y-', label="noise="+str(400))
#plt.plot(range(100,901, 200), results_mean[100,10,100::200] - results_CI[100,10,100::200], 'y--')
#plt.plot(range(100,901, 200), results_mean[100,10,100::200] + results_CI[100,10,100::200], 'y--')
plt.plot(range(100,901, 200), results_mean[800,d,100::200], 'r-', label="noise="+str(800))
plt.ylim((0,1.))
plt.xlabel('# datapoints')
plt.ylabel('success rate')
plt.legend()
plt.title("d=" + str(d))
plt.savefig("graph_d=15_comparison.pdf");
results = {}
for noise_lvl in [1., 2., 4., 8., 16., 32.]:
for d in [10, 15,20]:
for n in range(100, 901, 200):
results[(noise_lvl,d,n)] = []
for seed in range(5):
results[(noise_lvl,d,n)].append(experiment(d=d, n=n, noise_lvls=(noise_lvl/n, noise_lvl/n)))
print("noise_lvl=", noise_lvl, "d=",d, ", n=", n," results:", results[(noise_lvl,d,n)])
for noise_lvl in [1., 2., 4., 8., 16., 32.]:
for d in [10, 15,20]:
for n in range(100, 901, 200):
results[(noise_lvl,d,n)] = []
for seed in range(5):
results[(noise_lvl,d,n)].append(experiment(d=d, n=n, noise_lvls=(noise_lvl/n, noise_lvl/n)))
print("noise_lvl=", noise_lvl, "d=",d, ", n=", n," results:", results[(noise_lvl,d,n)])
```
| github_jupyter |
```
import pandas as pd
import numpy as np
visit = pd.read_csv("visitorCount.csv",dtype=str)
a = visit.melt( id_vars=['time'])
# a.to_csv("visitorMelt.csv")
movement = pd.read_csv("movements.csv")
movement = movement.astype('category')
len(movement)
stations = pd.read_csv("stations.csv")
stations['double_count'] = False
stations[stations['serial']=="000000007b5207b6"]
any(movement[movement['hash']=='013c76b508f0d5d70b060e9f7248771ef4314b90b811f7b0b1734824'].groupby(['time']).size() > 1)
# df = pd.DataFrame(columns=['Hash',"Serial"])
mat = np.array([])
for i in movement.groupby(['hash','serial']).size()[0:70].index:
try:
if(i[0] == a[0]):
mat = np.append(mat,np.array(a))
mat = np.append(mat,np.array(i))
except:
pass
a = i
mat = mat.reshape(int(len(mat)/2),2)
mat
for row in mat:
if(any(movement[movement['hash']==row[0]].groupby(['time']).size() > 1)):
a = stations[stations['serial']==row[1]].index[0]
stations.at[a,'double_count'] = True
# stations.to_csv("stations_dbl_count.csv")
hashToAddress = dict(list(zip(stations['serial'].values,stations['address'].values)))
hashToAddress
from sklearn.preprocessing import normalize
weather = pd.read_csv("Helsinki_weather_data.csv", dtype=str)
weather['Time'] = weather['d'] + "/" + weather['m'] + "/" + weather['Year'] + " "+ weather['Time']
# normalize(weather[['Cloud amount (1/8)','Pressure (msl) (hPa)','Relative humidity (%)',
# 'Precipitation intensity (mm/h)','Snow depth (cm)','Air temperature (degC)',
# 'Dew-point temperature (degC)','Horizontal visibility (m)','Wind direction (deg)',
# 'Gust speed (m/s)','Wind speed (m/s)']], axis=1).ravel()
# weather[['Cloud amount (1/8)','Pressure (msl) (hPa)']]
# weather.to_csv("weather_mod.csv")
from datetime import datetime
new = pd.DataFrame()
weather['time'] = weather['Time'].apply(lambda x: datetime.strptime(x,'%d/%m/%Y %H:%M'))
visit['time'] = visit['time'].apply(lambda x: datetime.strptime(x,'%d/%m/%Y %H:%M'))
# sum(new['w_time'] == new['v_time'])
weather.drop(columns='Time',inplace=True)
weather.dtypes
visit.rename(columns=hashToAddress,inplace=True)
visit
output = visit.set_index('time').join(weather.set_index('time'),how="left",rsuffix = "_").reset_index()
output.to_csv('joined_Visit_Weather_updated.csv')
output
output
accuracy = pd.DataFrame()
for hashs in hashToAddress.keys():
try:
a = pd.read_csv(hashs+"Prediction.csv")
a['station'] = hashToAddress[hashs]
a['Time'] = pd.date_range(start='19/8/2019', end='20/8/2019',freq="120s")
accuracy = accuracy.append(a)
# print(a.head())
except:
pass
accuracy.to_csv("accuracyPlot_updated.csv")
accuracy
pd.date_range(start='19/8/2019', end='20/8/2019',freq="120s")
```
| github_jupyter |
# **Setting up the Environment**
All the necessary paths for datasets on drive and jdk are passed.
Also all the required libraries are installed and imported along with configuration of spark context for future use.
```
# Mounting the google drive for easy access of the dataset
from google.colab import drive
drive.mount('/content/drive')
# Path for Java
JAVA_HOME = "/usr/lib/jvm/java-8-openjdk-amd64"
```
#**PySpark implementation**
```
!pip install pyspark
!pip install -U -q PyDrive
!apt install openjdk-8-jdk-headless -qq
import os
os.environ["JAVA_HOME"] = JAVA_HOME
# Installing and importing the required python libraries
import requests
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import time
%matplotlib inline
import pyspark
from pyspark.sql import *
from pyspark.sql.types import *
from pyspark.sql.functions import *
from pyspark import SparkContext, SparkConf
# import the SparkConfiguration and SparkContext
# if we wanted to change any configuration settings for this session only we would define them here
conf = (SparkConf()
.setMaster("local")
.setAppName("Recommender_System")
.set("spark.executor.memory", "16G")
.set("spark.driver.memory", "16G")
.set("spark.executor.cores", "8"))
# create a SparkContext using the above configuration
sc = SparkContext(conf=conf)
spark = SparkSession.builder.getOrCreate()
spark
# this command shows the current configuration settings
sc._conf.getAll()
```
# **Loading the Dataset**
Let's read the Movie and TV ratings dataset from the Google drive and do a quick inspection of the dataset
```
# read in the dataset from google drive
rating = spark.read.load("/content/drive/My Drive/Ratings_Movies_and_TV.csv",
format="csv",
inferSchema="true",
header="false"
)
# number of rows in the dataset
rating.count()
# Printing the head of dataset
rating.show()
# Renaming the columns of the dataset for Easy reference
ratings = rating.select(col("_c0").alias("UserId"), col("_c1").alias("MovieId"),col("_c2").alias("Rating"),col("_c3").alias("Timestamp"))
ratings.show()
```
# **Data Exploration**
Let's investigate the data and try to find and make some observations by looking at summary statistics
```
print ('Distinct values of ratings:')
print (ratings.select('Rating').distinct().rdd.map(lambda r: r[0]).collect())
tmp1 = ratings.groupBy("UserId").count().select('count').rdd.min()[0]
tmp2 = ratings.groupBy("MovieId").count().select('count').rdd.min()[0]
print ('For the users that rated movies and the movies that were rated:')
print ('Minimum number of ratings per User is {}'.format(tmp1))
print ('Minimum number of ratings per Movie is {}'.format(tmp2))
tmp1 = ratings.groupBy("movieId").count().withColumnRenamed("count", "rating count")\
.groupBy("rating count").count().orderBy('rating count').first()[1]
# Or use pandas: tmp1 = sum(ratings.groupBy("movieId").count().toPandas()['count'] == 1)
tmp2 = ratings.select('movieId').distinct().count()
print ('{} out of {} movies are rated by only one user'.format(tmp1, tmp2))
print ("Number of users who rated movies:", ratings.select('UserId').distinct().count())
print ("Number of rated movies:", ratings.select('MovieId').distinct().count())
```
**Summary of Descriptive Statistics**
```
ratings.describe().toPandas()
```
Converting Spark data to well-known Pandas could be done easily with toPandas() method:
```
# To access plotting libraries, we need to first transform our PySpark DataFrame into a Pandas DataFrame
Ratings_pdf = ratings.toPandas()
```
**Analysis of Rating Distributions**
```
with sns.axes_style('white'):
g = sns.catplot("Rating",
data=Ratings_pdf,
kind="count", aspect=2,color='steelblue')
Ratings_pdf_mean_counts = pd.DataFrame(Ratings_pdf.groupby('MovieId')['Rating'].mean())
Ratings_pdf_mean_counts['Rating_Counts'] = pd.DataFrame(Ratings_pdf.groupby('MovieId')['Rating'].count())
plt.figure(figsize=(12,10))
plt.rcParams['patch.force_edgecolor'] = True
sns.jointplot(x='Rating', y='Rating_Counts', data=Ratings_pdf_mean_counts, alpha=0.4)
```
# **Sampling**
Here I have decided to take a sample of the dataset for recommendation as the entire dataset is quite huge and implementing various ML techniques like model training, hyperparameter tuning etc. is difficult on free/local resources without crashing
I have use stratified sampling to have the best unbiased sample with appropriate size
```
ratings.groupBy("MovieId").count().show()
fractions = ratings.select("MovieId").distinct().withColumn("fraction", lit(0.7)).rdd.collectAsMap()
print(fractions)
sampled_ratings = ratings.stat.sampleBy("MovieId", fractions, seed=1234)
sampled_ratings.show()
sampled_ratings.count()
sampled_ratings.groupBy("MovieId").count().show()
```
Converting Spark data to well-known Pandas could be done easily with toPandas() method:
```
# To access plotting libraries, we need to first transform our PySpark DataFrame into a Pandas DataFrame
Rating_pdf = sampled_ratings.toPandas()
with sns.axes_style('white'):
ax = sns.violinplot(x=Ratings_pdf["Rating"])
Rating_pdf_mean_counts = pd.DataFrame(Rating_pdf.groupby('MovieId')['Rating'].mean())
Rating_pdf_mean_counts['Rating_Counts'] = pd.DataFrame(Rating_pdf.groupby('MovieId')['Rating'].count())
plt.figure(figsize=(12,10))
plt.rcParams['patch.force_edgecolor'] = True
sns.jointplot(x='Rating', y='Rating_Counts', data=Rating_pdf_mean_counts, alpha=0.4)
```
# **Data Pre-processing**
The data is processed to have the right set of variables and its types for implementing the ML algorithm, in our case it is Alternating Least Square (ALS) which recommends using Collaborative filtering
```
Ratings = sampled_ratings.select("UserId", "MovieId", "Rating")
# inspect the schema of the data frame
Ratings.printSchema()
# The userId and movieId have to be integers or double, and the rating has to be float/double numbers.
from pyspark.ml import Pipeline
from pyspark.ml.feature import StringIndexer
indexers = [StringIndexer(inputCol=column, outputCol=column+"_index").fit(Ratings) for column in list(set(Ratings.columns)-set(['Rating'])) ]
pipeline = Pipeline(stages=indexers)
Ratings_df = pipeline.fit(Ratings).transform(Ratings)
Ratings_df.show()
Ratings_df = Ratings_df.drop('UserId','MovieId')
# inspect the schema again
Ratings_df.printSchema()
```
**Extracting the stratified sampling file "sample_ratings" and save the file in the drive**
```
Ratings_df.coalesce(1).write.csv('/content/drive/My Drive/sample_ratings.csv')
```
**Loading the sampled dataset**
Let's read the Movie and TV ratings dataset from the Google drive and do a quick inspection of the dataset
```
# read in the dataset from google drive
ratings_df = spark.read.load("/content/drive/My Drive/sample_ratings.csv",
format="csv",
inferSchema="true",
header="false"
)
# number of rows in the dataset
ratings_df.count()
# Printing the head of dataset
ratings_df.show()
# Renaming the columns of the dataset for Easy reference
ratings_df = ratings_df.select(col("_c0").alias("Rating"), col("_c1").alias("MovieId_index"),col("_c2").alias("UserId_index"))
ratings_df.show()
```
# **Splitting the dataset into Train and Test**
I will split the data into training/testing sets using a 80/20 random splits.
This is done to Train the model on train set and finally evaluate the model post predicting on the test set
```
(train, test) = ratings_df.randomSplit([0.8, 0.2])
print ("The number of ratings in each set: {}, {}".format(train.count(), test.count()))
```
# **Alternating Least Squares (ALS)**
Let's take a look on parameters, and try to find any improvements.
Parameters of ALS Model in PySpark realization are following:
* **NumBlocks** is the number of blocks the users and items will be partitioned intoin order to parallelize computation.(set to -1 to auto-configure).
* **rank** is the number of latent factors in the model.
* **maxIter** is the maximum number of iterations to run.
* **regParam** specifies the regularization parameter in ALS.
* **implicitPrefs** specifies whether to use the explicit feedback ALS variant or one adapted for implicit feedback data (defaults to false which means using explicit feedback).
* **alpha** is a parameter applicable to the implicit feedback variant of ALS that governs the baseline confidence in preference observations (defaults to 1.0).
```
# Build the recommendation model using ALS on the training data
# Note we set cold start strategy to 'drop' to ensure we don't get NaN evaluation metrics
from pyspark.ml.recommendation import ALS
als = ALS(rank = 25, maxIter=5, regParam=0.01,
userCol="UserId_index", itemCol="MovieId_index", ratingCol="Rating",
coldStartStrategy="drop",
implicitPrefs=True)
# fit the model to the training data
model = als.fit(train)
```
#**Make predictions on test_data**
```
predictions_als = model.transform(test)
# View the predictions
predictions_als.show()
```
#**Evaluate the predictions**
Evaluate the model by computing the RMSE on the test data
```
# Evaluate the model by computing the RMSE on the test data
from pyspark.ml.evaluation import RegressionEvaluator
evaluator = RegressionEvaluator(metricName="rmse", labelCol="Rating",
predictionCol="prediction")
rmse = evaluator.evaluate(predictions_als)
print("Root-mean-square error = " + str(rmse))
```
#**Evaluate the predictions**
Evaluate the model by computing the MAE on the test data
```
# instantiate evaluator, specifying the desired metric "mae" and the columns
# that contain the predictions and the actual values
evaluator = RegressionEvaluator(metricName="mae", predictionCol="prediction", labelCol="Rating")
# evaluate the output of our model
mae = evaluator.evaluate(predictions_als)
print('The Mean Absolute Error is %.3f' % (mae))
```
In this we have achieved very high RMSE & MAE score
#**Parameter Optimization**
#**TrainValidationSplit**
The parameters we will search over are:
* **Rank** - The number of hidden features that we will use to describe the users/movies.
* **RegParam** - The regularization parameter applied to the cost function.
**Root Mean Square Error**
```
from pyspark.ml.tuning import TrainValidationSplit, ParamGridBuilder
from pyspark.ml.recommendation import ALS
from pyspark.ml.evaluation import RegressionEvaluator
#create a new ALS estimator
als = ALS(userCol="UserId_index", itemCol="MovieId_index", ratingCol="Rating", coldStartStrategy="drop")
#define a grid for both parameters
paramGrid = ParamGridBuilder() \
.addGrid(als.rank, [5, 10, 15]) \
.addGrid(als.regParam, [1, 0.1, 0.01]) \
.build()
# Define evaluator as RMSE
evaluator = RegressionEvaluator(metricName="rmse", predictionCol="prediction", labelCol="Rating")
# split the data with a ratio of 80% training, 20% validation
# define the estimator and evaluator to use to determine the best model
# also pass in the parameter grid to search over
trainValSplit = TrainValidationSplit(estimator = als, estimatorParamMaps=paramGrid,
evaluator = RegressionEvaluator(metricName="rmse", predictionCol="prediction", labelCol="Rating"),
trainRatio = 0.8, parallelism = 4)
# fit the model to the training data
model = trainValSplit.fit(train)
# retrieve the best model
bestModel = model.bestModel
```
Unfortunately there is currently no way in spark to see which combination of hyperparameters were used in the best model. We now use the best model to transform the test data and compute predictions & evaluate.
#**Make predictions on test_data**
```
# transform test data using bestModel
predictions = bestModel.transform(test)
# View the predictions
predictions.show()
```
#**Evaluate the predictions**
Evaluate the model by computing the RMSE on the test data
```
# evaluate the predictions
rmse = evaluator.evaluate(predictions)
print('Root Mean Square Error = ' + str(rmse))
```
# Our RMSE score improved really well over our previous RMSE of 4.361782907696021
#**TrainValidationSplit**
**Mean Absolute Error**
```
# Define evaluator as MAE
evaluator = RegressionEvaluator(metricName="mae", predictionCol="prediction", labelCol="Rating")
# split the data with a ratio of 80% training, 20% validation
# define the estimator and evaluator to use to determine the best model
# also pass in the parameter grid to search over
trainValSplit_1 = TrainValidationSplit(estimator = als, estimatorParamMaps=paramGrid,
evaluator = RegressionEvaluator(metricName="mae", predictionCol="prediction", labelCol="Rating"),
trainRatio = 0.8, parallelism = 4)
# fit the model to the training data
model_1 = trainValSplit_1.fit(train)
# retrieve the best model
bestModel_1 = model_1.bestModel
```
Unfortunately there is currently no way in spark to see which combination of hyperparameters were used in the best model. We now use the best model to transform the test data and compute predictions & evaluate.
#**Make predictions on test_data**
```
# transform test data using bestModel
predictions_1 = bestModel_1.transform(test)
# View the predictions
predictions_1.show()
```
#**Evaluate the predictions**
Evaluate the model by computing the MAE on the test data
```
# evaluate the predictions
mae = evaluator.evaluate(predictions_1)
print('The Mean Absolute Error is %.3f' % (mae))
```
Our MAE score improved really well over our previous MAE of 4.027
#**Cross Validation**
**Root Mean Square Error**
```
from pyspark.ml.recommendation import ALS
# Build generic ALS model without hyperparameters
als = ALS(userCol="UserId_index", itemCol="MovieId_index", ratingCol="Rating", coldStartStrategy="drop")
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
from pyspark.ml.evaluation import RegressionEvaluator
param_grid = ParamGridBuilder() \
.addGrid(als.rank, [10, 25]) \
.addGrid(als.maxIter, [10]) \
.addGrid(als.regParam, [.01, .1]) \
.build()
# Define evaluator as RMSE
evaluator = RegressionEvaluator(metricName="rmse", labelCol="Rating",
predictionCol="prediction")
# Print length of evaluator
print ("Num models to be tested using param_grid: ", len(param_grid))
# Build cross validation step using CrossValidator
cv = CrossValidator(estimator = als,
estimatorParamMaps = param_grid,
evaluator = evaluator,
numFolds = 5)
# Run the cv on the training data
cv_model = cv.fit(train)
# Extract best combination of values from cross validation
best_model = cv_model.bestModel
```
Unfortunately there is currently no way in spark to see which combination of hyperparameters were used in the best model. We now use the best model to transform the test data and compute predictions & evaluate.
#**Make predictions on test_data**
```
# Generate test set predictions and evaluate using RMSE
predictions_2 = best_model.transform(test)
# View the predictions
predictions_2.show()
```
#**Evaluate the predictions**
Evaluate the model by computing the RMSE on the test data
```
rmse = evaluator.evaluate(predictions_2)
# Print evaluation metrics and model parameters
print("**Best Model**")
print("Root Mean Square Error: {:.3f}".format(rmse))
print("RegParam: "), best_model._java_obj.parent().getRegParam()
```
Our RMSE score is improved a bit over our previous RMSE
#**Cross Validation**
**Mean Absolute Error**
```
from pyspark.ml.recommendation import ALS
# Build generic ALS model without hyperparameters
als = ALS(userCol="UserId_index", itemCol="MovieId_index", ratingCol="Rating", coldStartStrategy="drop")
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
from pyspark.ml.evaluation import RegressionEvaluator
param_grid = ParamGridBuilder() \
.addGrid(als.rank, [10, 25]) \
.addGrid(als.maxIter, [10]) \
.addGrid(als.regParam, [.01, .1]) \
.build()
# Define evaluator as MAE
evaluator = RegressionEvaluator(metricName="mae", labelCol="Rating",
predictionCol="prediction")
# Print length of evaluator
print ("Num models to be tested using param_grid: ", len(param_grid))
# Build cross validation step using CrossValidator
cv_1 = CrossValidator(estimator = als,
estimatorParamMaps = param_grid,
evaluator = evaluator,
numFolds = 5)
# Run the cv on the training data
cv_model_1 = cv_1.fit(train)
# Extract best combination of values from cross validation
best_model_1 = cv_model_1.bestModel
```
Unfortunately there is currently no way in spark to see which combination of hyperparameters were used in the best model. We now use the best model to transform the test data and compute predictions & evaluate.
#**Make predictions on test_data**
```
# Generate test set predictions and evaluate using MAE
predictions_3 = best_model_1.transform(test)
# View the predictions
predictions_3.show()
```
#**Evaluate the predictions**
Evaluate the model by computing the MAE on the test data
```
mae = evaluator.evaluate(predictions_3)
# Print evaluation metrics and model parameters
print("**Best Model**")
print("Mean Absolute Error: {:.3f}".format(mae))
print("RegParam: "), best_model_1._java_obj.parent().getRegParam()
```
Our MAE score is improved a bit over our previous MAE
# **Provide top Recommendations to all users**
The best model converged to, with the use of cross validation is used to provide recommendations for all users
```
# Finally, using the best model to make recommendations for users
ALS_recommendations = best_model.recommendForAllUsers(numItems = 10)
ALS_recommendations.show(n = 10)
```
| github_jupyter |
# Classifying Fashion-MNIST
Now it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world.
<img src='assets/fashion-mnist-sprite.png' width=500px>
In this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebooks though as you work through this.
First off, let's load the dataset through torchvision.
```
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import torch
from torchvision import datasets, transforms
import helper
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# Download and load the training data
trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
```
Here we can see one of the images.
```
image, label = next(iter(trainloader))
helper.imshow(image[0,:]);
```
## Building the network
Here you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers.
```
# TODO: Define your network architecture here
from torch import nn
from collections import OrderedDict
model = nn.Sequential(OrderedDict([
('fc1',nn.Linear(784, 256)),
('relu1',nn.ReLU()),
('fc2',nn.Linear(256, 128)),
('relu2',nn.ReLU()),
('fc3',nn.Linear(128, 64)),
('relu3',nn.ReLU()),
('fc4',nn.Linear(64, 10)),
('lgSoft1',nn.LogSoftmax(dim = 1))
]))
```
# Train the network
Now you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.html#loss-functions) ( something like `nn.CrossEntropyLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`).
Then write the training code. Remember the training pass is a fairly straightforward process:
* Make a forward pass through the network to get the logits
* Use the logits to calculate the loss
* Perform a backward pass through the network with `loss.backward()` to calculate the gradients
* Take a step with the optimizer to update the weights
By adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4.
```
# TODO: Create the network, define the criterion and optimizer
criterion = nn.CrossEntropyLoss()
import torch.optim as optim
optimizer = optim.SGD(model.parameters(), lr = 0.01)
# TODO: Train the network here
epoch = 10
for e in range(epoch):
running_loss = 0;
for images, labels in trainloader:
images = images.view(images.shape[0], -1)#flattening
#resetting optimizer value
optimizer.zero_grad()
output = model(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss +=loss.item()
else:
print(f"training loss: {running_loss/ len(trainloader)}")
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import torch.nn.functional as F
import helper
# Test out your network!
dataiter = iter(testloader)
images, labels = dataiter.next()
img = images[0]
# Convert 2D image to 1D vector
img = img.resize_(1, 784)
# TODO: Calculate the class probabilities (softmax) for img
ps = torch.exp(model(img))
# Plot the image and probabilities
helper.view_classify(img.resize_(1, 28, 28), ps, version='Fashion')
```
| github_jupyter |
```
# This cell is used to change parameter of the rise slideshow,
# such as the window width/height and enabling a scroll bar
from notebook.services.config import ConfigManager
cm = ConfigManager()
cm.update('livereveal', {
'width': 1000,
'height': 600,
'scroll': True,
})
```
# OGTC Hackathon: Data analysis from Tesla vehicle
* by: Elon's Little Helpers
* Chinedu Pascal Ezenkwu
* Carlos Moreno-Garcia
* John Guntoro
* Joseph Sheratt
* Darren Nicol
## Problem Setting
* By reverse-engineering the Tesla API, Intelligent Plant was able to download the data historian of the usage of a Tesla car (three months).
* $\approx182$ variables can be obtained.
* More information of these variable can be found [here](https://tesla-api.timdorr.com) (unofficial).
* Some information that can be obtained:
* Screen status
* Battery charge
* Odometer data
* Heating
* Windshields
* Fan
* And much more!
## Gestalt tool API
* By querying $*car*$, one can visualise the different variables:

* You can plot different variables to see the performance across time:

* The spike-down is a moment where no data was recorded.

* All data that falls above 0 is considered screen on (i.e. car on)
* 8 is a mode called "dog mode", which monitors an animal inside the car!
## Data Analysis
**Installing the necessary packages**
```
%pip install pandas matplotlib
```
**Importing the necessary packages**
```
import intelligent_plant.app_store_client as app_store_client
import intelligent_plant.utility as utility
from os.path import expanduser
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from math import *
```
**Getting the data from the repository**
```
app_store = app_store_client.AppStoreClient(open(expanduser("~/.access_token"), "r").read())
data_core = app_store.get_data_core_client()
app_store.get_data_core_client()
app_store.get_user_info()
data_core.get_tags("FCBB05262EADC0B147746EE6DFB2B3EA5C272C33C2C5E3FE8F473D85529461CA.Edge Historian")
tags = {
"FCBB05262EADC0B147746EE6DFB2B3EA5C272C33C2C5E3FE8F473D85529461CA.Edge Historian": [
"StevesCar..response.vehicle_state.odometer",
"StevesCar..response.charge_state.battery_level",
"StevesCar..response.climate_state.outside_temp",
"StevesCar..response.vehicle_state.center_display_state",
"StevesCar..response.drive_state.speed",
"StevesCar..response.climate_state.seat_heater_right"
]
}
```
**Data filtering**
* All interpolated data from 10 days ago in 3-minute intervals.
```
resp = data_core.get_processed_data(tags, "*-10d", "*", "3m", "interp")
```
**Plotting odometer data**
```
df = utility.query_result_to_data_frame(resp)
plt.plot(df["StevesCar..response.vehicle_state.odometer"])
plt.xlabel('Data Index')
plt.ylabel('Odometer miles')
plt.show()
```
**Creation of a *new* differential dataset**
```
import pandas as pd
type(df.loc[0].TimeStamp)
df.loc[1].TimeStamp-df.loc[0].TimeStamp
df_new=[]
for i in range(1,len(df)-1):
df_new.append(df.loc[i]-df.loc[i-1])
df_new =pd.DataFrame(df_new)
```
* This dataset could be used for time series analysis purposes.
**Distance travelled**
```
plt.plot(df_new["StevesCar..response.vehicle_state.odometer"])
plt.ylabel('Distance travelled in time intervaled (miles)')
plt.xlabel('Data Index')
```
* By plotting the differential of the odometer, we can see the distance travelled (in miles) for every entry.
**Plotting the differential data on the battery level**
```
plt.plot(df_new["StevesCar..response.charge_state.battery_level"])
plt.ylabel('Change in Battery % in time interval')
plt.xlabel('Data Index')
```
* Negative entries occur when battery discharges and vice versa.
**Plotting efficiency**
* We created our own efficiency metric as the ratio between battery level differential and odometer differential.
```
df_new['Efficiency'] = df_new["StevesCar..response.charge_state.battery_level"]/df_new["StevesCar..response.vehicle_state.odometer"]
plt.plot(df_new['Efficiency'], 'bo')
plt.ylim([-1, 0])
plt.ylabel('Battery % drop per mile')
plt.xlabel('Data Index')
plt.show()
```
* Data seems to be random as for each entry, we obtain a very spread distribution.
* We only want to see efficiency from 0 to -1, as others would be considered outliers
* Very huge or very little efficiencies don't make sense, as you cannot have more battery level than actual mileage.
**Another view of the data**
```
df_new.replace(inf, 0)
df_new.replace(-inf, 0)
plt.plot(df_new['Efficiency'], 'bo')
```
* Here we can see the presence of outliers.
**Histogram of efficiency metric**
```
eff=np.array(df_new["Efficiency"])
sh=np.array(df_new["StevesCar..response.climate_state.seat_heater_right"])
# Limit the efficiency between 0 and -3
eff2= eff[np.where(eff>-3)]
eff3 = eff2[np.where(eff2<0)]
plt.hist(eff3,bins=20)
plt.xlabel('Battery % drop per mile')
plt.ylabel('Number')
plt.show()
```
**Efficiency vs other variables**
* Here we compare our efficiency metric against other variables, such as speed or outside temp.
* Purpose: Try to find correlations.
* Speed may show some correlation, outside temperature does not.
```
temp = np.array(df["StevesCar..response.climate_state.outside_temp"])[0:-2]
speed = np.array(df["StevesCar..response.drive_state.speed"])[0:-2]
temp2 = temp[np.where(eff>-3)]
temp3 = temp2[np.where(eff2<0)]
speed2 = speed[np.where(eff>-3)]
speed3 = speed2[np.where(eff2<0)]
plt.plot(speed3, eff3, 'bo')
plt.xlim([0, 60])
plt.ylabel('Battery % drop per mile')
plt.xlabel('Speed mph')
plt.show()
plt.plot(temp3, eff3, 'ro')
plt.ylabel('Battery % drop per mile')
plt.xlabel('Outside Temp degC')
plt.show()
```
**Measuring the effect of heating the driver's seat**
* We were also able to isolate this measurement and compare efficiency w.r.t. right seat heated/unheated.
* There does not seem to be a change!
```
sh = np.array(df["StevesCar..response.climate_state.seat_heater_right"])[0:-2]
eff_without_sh = eff[np.where(sh==0)]
eff_with_sh = eff[np.where(sh>0)]
print(len(eff_without_sh))
print(len(eff_with_sh))
eff2_without= eff_without_sh[np.where(eff_without_sh>-2)]
eff3_without = eff2_without[np.where(eff2_without<0)]
speed_without = speed[np.where(sh==0)]
speed2_without = speed_without[np.where(eff_without_sh>-2)]
speed3_without = speed2_without[np.where(eff2_without<0)]
eff2_with= eff_with_sh[np.where(eff_with_sh>-2)]
eff3_with = eff2_with[np.where(eff2_with<0)]
speed_with = speed[np.where(sh>0)]
speed2_with = speed_with[np.where(eff_with_sh>-2)]
speed3_with = speed2_with[np.where(eff2_with<0)]
plt.plot(speed3_without, eff3_without, 'bo', label='Without Seat Heater')
plt.plot(speed3_with, eff3_with, 'ro',label='With Seat Heater')
plt.xlim([0, 60])
plt.xlabel('Speed mph')
plt.ylabel('Battery % drop per mile')
plt.legend()
plt.show()
```
# If we had more time...
* Investigate the dataset in-depth
* Discover new relations
* Train a regression model to predict efficiency
| github_jupyter |
# Introduction #
In this lesson we're going to see how we can build neural networks capable of learning the complex kinds of relationships deep neural nets are famous for.
The key idea here is *modularity*, building up a complex network from simpler functional units. We've seen how a linear unit computes a linear function -- now we'll see how to combine and modify these single units to model more complex relationships.
# Layers #
Neural networks typically organize their neurons into **layers**. When we collect together linear units having a common set of inputs we get a **dense** layer.
<figure style="padding: 1em;">
<img src="https://i.imgur.com/2MA4iMV.png" width="300" alt="A stack of three circles in an input layer connected to two circles in a dense layer.">
<figcaption style="textalign: center; font-style: italic"><center>A dense layer of two linear units receiving two inputs and a bias.
</center></figcaption>
</figure>
You could think of each layer in a neural network as performing some kind of relatively simple transformation. Through a deep stack of layers, a neural network can transform its inputs in more and more complex ways. In a well-trained neural network, each layer is a transformation getting us a little bit closer to a solution.
<blockquote style="margin-right:auto; margin-left:auto; background-color: #ebf9ff; padding: 1em; margin:24px;">
<strong>Many Kinds of Layers</strong><br>
A "layer" in Keras is a very general kind of thing. A layer can be, essentially, any kind of <em>data transformation</em>. Many layers, like the <a href="https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D">convolutional</a> and <a href="https://www.tensorflow.org/api_docs/python/tf/keras/layers/RNN">recurrent</a> layers, transform data through use of neurons and differ primarily in the pattern of connections they form. Others though are used for <a href="https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding">feature engineering</a> or just <a href="https://www.tensorflow.org/api_docs/python/tf/keras/layers/Add">simple arithmetic</a>. There's a whole world of layers to discover -- <a href="https://www.tensorflow.org/api_docs/python/tf/keras/layers">check them out</a>!
</blockquote>
# The Activation Function #
It turns out, however, that two dense layers with nothing in between are no better than a single dense layer by itself. Dense layers by themselves can never move us out of the world of lines and planes. What we need is something *nonlinear*. What we need are activation functions.
<figure style="padding: 1em;">
<img src="https://i.imgur.com/OLSUEYT.png" width="400" alt=" ">
<figcaption style="textalign: center; font-style: italic"><center>Without activation functions, neural networks can only learn linear relationships. In order to fit curves, we'll need to use activation functions.
</center></figcaption>
</figure>
An **activation function** is simply some function we apply to each of a layer's outputs (its *activations*). The most common is the *rectifier* function $max(0, x)$.
<figure style="padding: 1em;">
<img src="https://i.imgur.com/aeIyAlF.png" width="400" alt="A graph of the rectifier function. The line y=x when x>0 and y=0 when x<0, making a 'hinge' shape like '_/'.">
<figcaption style="textalign: center; font-style: italic"><center>
</center></figcaption>
</figure>
The rectifier function has a graph that's a line with the negative part "rectified" to zero. Applying the function to the outputs of a neuron will put a *bend* in the data, moving us away from simple lines.
When we attach the rectifier to a linear unit, we get a **rectified linear unit** or **ReLU**. (For this reason, it's common to call the rectifier function the "ReLU function".) Applying a ReLU activation to a linear unit means the output becomes `max(0, w * x + b)`, which we might draw in a diagram like:
<figure style="padding: 1em;">
<img src="https://i.imgur.com/eFry7Yu.png" width="250" alt="Diagram of a single ReLU. Like a linear unit, but instead of a '+' symbol we now have a hinge '_/'. ">
<figcaption style="textalign: center; font-style: italic"><center>A rectified linear unit.
</center></figcaption>
</figure>
# Stacking Dense Layers #
Now that we have some nonlinearity, let's see how we can stack layers to get complex data transformations.
<figure style="padding: 1em;">
<img src="https://i.imgur.com/Y5iwFQZ.png" width="450" alt="An input layer, two hidden layers, and a final linear layer.">
<figcaption style="textalign: center; font-style: italic"><center>A stack of dense layers makes a "fully-connected" network.
</center></figcaption>
</figure>
The layers before the output layer are sometimes called **hidden** since we never see their outputs directly. And though we haven't shown them in this diagram each of these neurons would also be receiving a bias (one bias for each neuron).
Now, notice that the final (output) layer is a linear unit (meaning, no activation function). That makes this network appropriate to a regression task, where we are trying to predict some arbitrary numeric value. Other tasks (like classification) might require an activation function on the output.
## Building Sequential Models ##
The `Sequential` model we've been using will connect together a list of layers in order from first to last: the first layer gets the input, the last layer produces the output. This creates the model in the figure above:
```
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential([
# the hidden ReLU layers
layers.Dense(units=4, activation='relu', input_shape=[2]),
layers.Dense(units=3, activation='relu'),
# the linear output layer
layers.Dense(units=1),
])
```
Be sure to pass all the layers together in a list, like `[layer, layer, layer, ...]`, instead of as separate arguments. To add an activation function to a layer, just give its name in the `activation` argument.
# Your Turn #
Now, [**create a deep neural network**](#$NEXT_NOTEBOOK_URL$) for the *Concrete* dataset.
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Load-data" data-toc-modified-id="Load-data-1"><span class="toc-item-num">1 </span>Load data</a></span></li><li><span><a href="#Data-Growth" data-toc-modified-id="Data-Growth-2"><span class="toc-item-num">2 </span>Data Growth</a></span></li><li><span><a href="#Hierarchical-Clustering" data-toc-modified-id="Hierarchical-Clustering-3"><span class="toc-item-num">3 </span>Hierarchical Clustering</a></span></li><li><span><a href="#PCA" data-toc-modified-id="PCA-4"><span class="toc-item-num">4 </span>PCA</a></span></li><li><span><a href="#Normalize-to-reference-conditions" data-toc-modified-id="Normalize-to-reference-conditions-5"><span class="toc-item-num">5 </span>Normalize to reference conditions</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#ONLY-FOR-PRECISE-DATA" data-toc-modified-id="ONLY-FOR-PRECISE-DATA-5.0.1"><span class="toc-item-num">5.0.1 </span>ONLY FOR PRECISE DATA</a></span></li></ul></li></ul></li></ul></div>
<font size="4">This is a template notebook for exploratory analysis on your organism's QC'ed dataset.</font>
```
import pandas as pd
import os
from os import path
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set_style('ticks')
```
# Load data
```
organism = "Synechococcus_elongatus"
data_dir = path.join('/home/tahani/Documents/elongatus/data/1_iModulon')
os.listdir(data_dir)
DF_metadata = pd.read_csv(os.path.join(data_dir,'3_metadata_qc_ICA.csv'),index_col=0)
DF_log_tpm = pd.read_csv(os.path.join(data_dir,'3_log_tpm_qc_ICA.csv'),index_col=0)
DF_metadata.shape
DF_log_tpm.shape
DF_metadata['ref_condition'][100]
```
# Data Growth
```
DF_metadata['ReleaseDate'] = pd.to_datetime(DF_metadata['ReleaseDate'])
local = DF_metadata.loc[[x for x in DF_metadata.index if x.startswith('Paeru')]]
first_date = min(DF_metadata['ReleaseDate'])
last_date = max(DF_metadata['ReleaseDate'])
date_range = pd.date_range(start=first_date,end=last_date,freq='YS')
fig,ax = plt.subplots(figsize=(5,5))
growth = DF_metadata['ReleaseDate'].value_counts().sort_index().cumsum()
growth2 = local['ReleaseDate'].value_counts().sort_index().cumsum()
growth2.loc[pd.Timestamp('2013-02-01 00:00:00')] = 0
growth2.loc[pd.Timestamp('2020-03-01 00:00:00')] = growth2.max()
growth.plot(ax=ax,label='All samples')
growth2.plot(ax=ax,label='PRECISE samples')
plt.xticks(date_range,date_range.strftime('%Y'),rotation=0,ha='center')
ax.tick_params(labelsize=12)
ax.set_xlabel('Year',fontsize=14)
ax.set_ylabel('Number of Samples',fontsize=14)
plt.legend(fontsize=12)
```
# Hierarchical Clustering
<font size=4> A clustermap is a great way to visualize the global correlations between one sample and all others. The following code uses hierarchical clustering to identify specific clusters in the clustermap <font size=4>
<font size=4> To increase the number of clusters, decrease the value of `thresh`. To decrease the number of clusters, increase the value of `thresh` <font size=4>
```
import scipy.cluster.hierarchy as sch
import matplotlib.patches as patches
# change this to get different number of clusters
thresh = .4
# retrieve clusters using fcluster
corr = DF_log_tpm.corr()
corr.fillna(0,inplace=True)
dist = sch.distance.pdist(corr)
link = sch.linkage(dist, method='complete')
clst = pd.DataFrame(index=DF_log_tpm.columns)
clst['cluster'] = sch.fcluster(link, thresh * dist.max(), 'distance')
#get colors for each cluster
cm = plt.cm.get_cmap('tab20')
clr = dict(zip(clst.cluster.unique(), cm.colors))
clst['color'] = clst.cluster.map(clr)
print('Number of cluster: ', len(clr))
```
<font size="4">To view sample IDs in the clustermap, set `xticklabels` and `yticklabels` to `True`. You can increase the `size` variable to improve readability of sample IDs<font>
```
size = 9
legend_TN = [patches.Patch(color=c, label=l) for l,c in clr.items()]
sns.set(rc={'figure.facecolor':'white'})
g = sns.clustermap(DF_log_tpm.corr(), figsize=(size,size),
row_linkage=link, col_linkage=link, col_colors=clst.color,
yticklabels=False, xticklabels=False, vmin=0, vmax=1)
l2=g.ax_heatmap.legend(loc='upper left', bbox_to_anchor=(1.01,0.85), handles=legend_TN,frameon=True)
l2.set_title(title='Clusters',prop={'size':10})
```
# PCA
```
from sklearn.decomposition import PCA
import numpy as np
```
<font size="4"> First compute principal components.</font>
```
pca = PCA()
DF_weights = pd.DataFrame(pca.fit_transform(DF_log_tpm.T),index=DF_log_tpm.columns)
DF_components = pd.DataFrame(pca.components_.T,index=DF_log_tpm.index)
```
<font size="4"> Next, plot the cumulative explained variance</font>
```
# Set the explained variance threshold
var_cutoff = 0.99
fig,ax = plt.subplots(figsize=(5,3.5))
pca_var = np.cumsum(pca.explained_variance_ratio_)
ax.plot(pca_var)
dims = np.where(pca_var > var_cutoff)[0][0] + 1
ax.vlines(dims,0,1,linestyles='dotted')
ax.hlines(var_cutoff,0,len(DF_log_tpm.columns),linestyles='dotted')
ax.set_ylim(0,1)
ax.set_xlim(0,len(DF_log_tpm.columns))
ax.set_ylabel('Fraction of Explained Variance',fontsize=12)
ax.set_xlabel('Number of Dimensions',fontsize=12)
ax.set_title('Cumulative Explained Variance',fontsize=16)
print('Number of dimensions for 99% of variance:',dims)
```
<font size="4">Finally, plot the first two principle components. The following code colors data by Project Name.</font>
```
fig,ax = plt.subplots(figsize=(7,5))
for name,group in DF_metadata.groupby('project'):
idx = DF_log_tpm.loc[:,group.index.tolist()].columns.tolist()
ax.scatter(DF_weights.loc[idx,0],
DF_weights.loc[idx,1],
label=name,alpha=0.8)
ax.set_xlabel('Component 1: %.1f%%'%(pca.explained_variance_ratio_[0]*100),fontsize=14)
ax.set_ylabel('Component 2: %.1f%%'%(pca.explained_variance_ratio_[1]*100),fontsize=14)
ax.set_title('Principal Component Plot',fontsize=18)
plt.legend(bbox_to_anchor=(1,1),fontsize=12,ncol=2)
```
# Normalize to reference conditions
```
DF_metadata.project
project_exprs = []
for name,group in DF_metadata.groupby('project'):
ref_cond = group.ref_condition.unique()
# Ensure that there is only one reference condition per project
assert(len(ref_cond) == 1)
ref_cond = ref_cond[0]
# Ensure the reference condition is in fact in the project
assert(ref_cond in group.condition.tolist())
# Get reference condition sample ids
ref_samples = group[group.condition == ref_cond].index
# Get reference condition expression
ref_expr = DF_log_tpm[ref_samples].mean(axis=1)
# Subtract reference expression from project
project_exprs.append(DF_log_tpm[group.index].sub(ref_expr,axis=0))
DF_log_tpm_norm = pd.concat(project_exprs,axis=1)
DF_log_tpm_norm.head()
```
Tahani Tuesday Oct 6, 4pm
<font size=4>Uncomment this code to save the log_tpm_norm file</font>
```
DF_log_tpm_norm.to_csv(path.join(data_dir,'3_log_tpm_normalized_ICA_second_run.csv'))
DF_log_tpm_norm.shape
```
### ONLY FOR PRECISE DATA
```
fig,ax = plt.subplots(figsize=(2,3))
plt.bar(range(5),[-2,-3,-2.6,-20,-19],width=1,linewidth=0,color=['tab:orange']*3+['tab:blue']*2)
plt.xticks([1,3.5],labels=('Ctrl','Izd'),fontsize=16)
plt.yticks([])
plt.ylabel('sigD Activity',fontsize=16)
DF_metadata = pd.read_csv('/home/anand/Downloads/metadata_all.csv',index_col=0)
DF_metadata['ReleaseDate'] = pd.to_datetime(DF_metadata['ReleaseDate'])
local = pd.read_csv('/home/anand/Downloads/local_metadata.csv',index_col=0)
local['ReleaseDate'] = pd.to_datetime(local['ReleaseDate'])
DF_metadata = pd.concat([DF_metadata,local])
first_date = min(DF_metadata['ReleaseDate'])
last_date = max(DF_metadata['ReleaseDate'])
date_range = pd.date_range(start=first_date,end=last_date,freq='YS')
fig,ax = plt.subplots(figsize=(5,5))
growth = DF_metadata['ReleaseDate'].value_counts().sort_index().cumsum()
growth2 = local['ReleaseDate'].value_counts().sort_index().cumsum()
growth2.loc[pd.Timestamp('2016-05-01 00:00:00')] = 0
#growth2.loc[pd.Timestamp('2020-03-01 00:00:00')] = growth2.max()
growth.plot(ax=ax,label='All samples')
growth2.plot(ax=ax,label='PRECISE samples')
plt.xticks(date_range,date_range.strftime('%Y'),rotation=0,ha='center')
ax.tick_params(labelsize=12)
ax.set_xlabel('Year',fontsize=14)
ax.set_ylabel('Number of Samples',fontsize=14)
plt.legend(fontsize=12)
len(local)
DF_metadata['ReleaseDate'] = pd.to_datetime(DF_metadata['ReleaseDate'])
local = pd.read_csv('/home/anand/Downloads/Abaum_metadata.csv',index_col=0)
local['ReleaseDate'] = pd.to_datetime(local['harvest-date'])
DF_metadata = pd.concat([DF_metadata,local])
first_date = min(DF_metadata['ReleaseDate'])
last_date = max(DF_metadata['ReleaseDate'])
date_range = pd.date_range(start=first_date,end=last_date,freq='YS')
fig,ax = plt.subplots(figsize=(5,5))
growth = DF_metadata['ReleaseDate'].value_counts().sort_index().cumsum()
growth2 = local['ReleaseDate'].value_counts().sort_index().cumsum()
growth2.loc[pd.Timestamp('2014-06-01 00:00:00')] = 0
growth2.loc[pd.Timestamp('2020-03-01 00:00:00')] = growth2.max()
growth.plot(ax=ax,label='All samples')
growth2.plot(ax=ax,label='PRECISE samples')
plt.xticks(date_range,date_range.strftime('%Y'),rotation=0,ha='center')
ax.tick_params(labelsize=12)
ax.set_xlabel('Year',fontsize=14)
ax.set_ylabel('Number of Samples',fontsize=14)
plt.legend(fontsize=12)
```
| github_jupyter |
# Time series in Pastas
*R.A. Collenteur, University of Graz, 2020*
Time series are at the heart of time series analysis, and therefore need to be considered carefully when dealing with time series models. In this notebook more background information is provided on important characteristics of time series and how these may influence your modeling results. In general, Pastas depends heavily on Pandas for dealing with time series, but adds capabilities to deal with irregular time series and missing data.
All time series should be provided to Pastas as `pandas.Series` with a `pandas.DatetimeIndex`. Internally these time series are stored in a `pastas.TimeSeries` object. The goal of this object is to validate the user-provided time series and enable resampling (changing frequencies) of the independent time series. The TimeSeries object also has capabilities to deal with missing data in the user-provided time series. As much of these operations occur internally, this notebook is meant to explain users what is happening and how to check for this.
<div class="alert alert-info">
<b>Note</b>
* The standard Pastas data type for a date is the `pandas.Timestamp`.
* The standard Pastas data type for a sequence of dates is the `pandas.DatetimeIndex` with `pandas.Timestamp`.
* The standard Pastas data type for a time series is a `pandas.Series` with a `pandas.DatetimeIndex`
</div>
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pastas as ps
ps.show_versions()
```
## Different types of time series
Time series data may generally be defined as a set of data values observed at certain times, ordered in a way that the time indices are increasing. Many time series analysis method assume that the time step between the observations is regular, the time series has evenly-spaced observations. These evenly spaced time series may have missing data, but it will still be possible to lay the values on a time-grid with constant time steps.
This is generally also assumed to be the case for the independent time series in hydrological studies. For example, the precipitation records may have some missing data but the precipitation is reported as the total rainfall over one day. In the case of missing data, we may impute a zero (no rain) or the rainfall amount from a nearby measurement station.
Groundwater level time series do generally not share these characteristics with other hydrological time series, and are measured at irregular time intervals. This is especially true for historic time series that were measured by hand. The result is that the measurements can not be laid on a regular time grid. The figure below graphically shows the difference between the three types of time series.
```
regular = pd.Series(index=pd.date_range("2000-01-01", "2000-01-10", freq="D"),
data=np.ones(10))
missing_data = regular.copy()
missing_data.loc[["2000-01-03", "2000-01-08"]] = np.nan
index = [t + pd.Timedelta(np.random.rand()*24, unit="H") for t in missing_data.index]
irregular = missing_data.copy()
irregular.index = index
fig, axes = plt.subplots(3,1, figsize=(6, 5), sharex=True, sharey=True)
regular.plot(ax=axes[0], linestyle=" ", marker="o", x_compat=True)
missing_data.plot(ax=axes[1], linestyle=" ", marker="o", x_compat=True)
irregular.plot(ax=axes[2], linestyle=" ", marker="o", x_compat=True)
for i, name in enumerate(["(a) Regular time steps", "(b) Missing Data", "(c) Irregular time steps"]):
axes[i].grid()
axes[i].set_title(name)
plt.tight_layout()
```
## Independent and dependent time series
We can differentiate between two types of input time series for Pastas models: the dependent and independent time series. The dependent time series are those that we want to explain (e.g., the groundwater levels) and the independent time series are those that we use to explain the dependent time series (e.g., precipitation or evaporation). The requirements for these time series are different:
- The dependent time series may be of any kind: regular, missing data or irregular.
- The independent time series has to have regular time steps.
In practice, this means that the time series provided to `pastas.Model` may be of any kind, and that the time series used by the stressmodels (e.g., `pastas.RerchargeModel`) need to have regular time steps. The regular time steps are required to simulate contributions to the groundwater level fluctuations. As there are virtually no restrictions on the dependent time series, the remainder of this notebook will discuss primarily the independent time series.
## How does the TimeSeries object validate a time series?
To ensure that a time series can be used for simulation a number of things are checked and changed:
1. Make sure the values are floats. Values are change to dtype=float if not.
2. Make sure the index is a `pandas.DatetimeIndex`. Index is changed if not.
3. Make sure the timestamps in the index are increasing. Index is sorted if not.
4. Make sure there are no nan-values at the start and end of a time series.
5. Determine the frequency of the time series.
6. Make sure there are no duplicate indices. Values are averaged if this is the case.
7. Remove or fill up nan-values, depending on the settings.
For each of these steps an INFO message will be returned by Pastas to inform the user if a change is made. The first four steps generally do not have a large impact and are there to prevent some basic issues. Preferably, no changes are reported.
### Frequency of the input data
Pastas tries to determine the frequency in step 5, and will **always** report the result. It is generally good practice to double-check if the reported frequency agrees with what you know about the time series. Pastas will also report if no frequency can be inferred. If no frequency is reported there is probably some wrong and the user should fix either fix the input time series or provide Pastas with more information.
Below we consider a time series with precipitation data, measured every day. We will use `settings="prec` as a shortcut for the settings to fill nans and resample. We will come back to those settings later.
```
rain = pd.read_csv('../examples/data/rain_nb1.csv', parse_dates=['date'],
index_col='date', squeeze=True)
ps.TimeSeries(rain, settings="prec")
```
Pastas correctly report the frequency and we can continue with this time series. Note that the input time series thus agrees with all the checks for the time series validation. Let's now introduce a nan-value and see what happens.
```
rain["1989-01-01"] = np.nan
ps.TimeSeries(rain, settings="prec")
```
This also works fine. The frequency was inferred (stored as freq_original) and one nan-value was filled up with 0.0. Now we take the same time series, but drop the nan-value.
```
ps.TimeSeries(rain.dropna(), settings="prec")
```
The above result is probably not what we want. Pastas could not infer the frequency and therefore resorts to the `timestep_weighted_resample` method. Documentation for this method is available in utils.py.
If we know the original frequency of the time series, we can tell this to Pastas through the `freq_original` argument. As we can see below, the user-provided frequency is used.
```
rain = pd.read_csv('../examples/data/rain_nb1.csv', parse_dates=['date'],
index_col='date', squeeze=True)
rain["1989-01-01"] = np.nan
ps.TimeSeries(rain.dropna(), settings="prec", freq_original="D")
```
The above example shows how to obtain the same or different result with four different methods. Some of these methods requires good knowledge about the TimeSeries object and how it processes your time series. It is often preferred to provide Pastas with a better initial time series by resampling it yourself. This has the additional benefit that you are interacting more closely with the data. Most of the examples also follow this pattern.
<div class="alert alert-info">
<b>Best practice</b>
Try and modify your original time series such that Pastas returns a message that it was able to infer the frequency from the time series itself: **INFO: Inferred frequency for time series rain: freq=D**
</div>
## Time series settings
In the examples above we used the `settings` keyword when creating the TimeSeries. This is a shortcut method to select a number of settings from a predefined set of options. These predefined options can accessed through `ps.rcParams["timeseries"]`:
```
pd.DataFrame.from_dict(ps.rcParams["timeseries"])
```
Each column name is a valid option for the `settings` argument. The rows shows the settings that may be chosen for changing the original time series. Once a TimeSeries is created, we can access the existing settings as follows:
```
ts = ps.TimeSeries(rain, settings="prec")
ts.settings
```
This settings dictionary now includes both settings used to resample (sample_up, sample_down), extend (fill_before, fill_after), normalize (norm), and fill nans in the time series, but also dynamic settings such as the start and end date (tmin, tmax), the frequency (freq) and the time offset.
To update these settings you the `update_series` method is available. For example, if we want to resample the above time series to a 7-day frequency and sum up the values we can use:
```
ts.update_series(freq="7D", sample_down="sum")
```
Because the original series are stored in the TimeSeries object as well, it is also possible to go back again. The changing made to the time series are always started from the original validated time series again. For more information on the possible settings see the API-docs for the [TimeSeries and update_series method](https://pastas.readthedocs.io/en/latest/api/timeseries.html) on the documentation website.
## An example with a Pastas Model
By now you may be wondering why all these settings exist in the first place. The main reason (apart from validating the user-provided time series) is to change the time step of the simulation of the independent time series. It may also be used to extend the time series in time.
Below we load some time series, visualize them and create a Pastas model with precipitation and evaporation to explain the groundwater level fluctuations. It is generally recommended to plot your time series for a quick visual check of the input data.
```
head = pd.read_csv("../examples/data/B32C0639001.csv", parse_dates=['date'],
index_col='date', squeeze=True)
rain = pd.read_csv('../examples/data/rain_nb1.csv', parse_dates=['date'],
index_col='date', squeeze=True)
evap = pd.read_csv('../examples/data/evap_nb1.csv', parse_dates=['date'],
index_col='date', squeeze=True)
fig, axes = plt.subplots(3,1, figsize=(10,6), sharex=True)
head.plot(ax=axes[0], x_compat=True, linestyle=" ", marker=".")
evap.plot(ax=axes[1], x_compat=True)
rain.plot(ax=axes[2], x_compat=True)
axes[0].set_ylabel("Head [m]")
axes[1].set_ylabel("Evap [mm/d]")
axes[2].set_ylabel("Rain [mm/d]")
plt.xlim("1985", "2005");
ml = ps.Model(head)
rch = ps.rch.Linear()
rm = ps.RechargeModel(rain, evap, recharge=rch, rfunc=ps.Gamma, name="rch")
ml.add_stressmodel(rm)
ml.solve(noise=True, tmin="1990", report="basic")
```
## What is the model freq?
The output below shows that the time series have frequencies of `freq=D`. The fit report also shows a frequency of `freq=D`. The frequency reported in the fit_report is the time step of the simulation for the independent time series, and is internally passed on to the stressmodels. The user-provided dependent time series are stored in the stressmodel object and can be accessed as follows.
```
ml.stressmodels["rch"].stress
```
If we want to change the resample method, for example we want to sum the precipitation and evaporation when sampling down (e.g., daily to weekly) we may do the following:
```
for stress in ml.stressmodels["rch"].stress:
stress.update_series(sample_down="sum")
```
After changing the methods for sampling down, we now solve the model with a simulation time step of 14 days. The precipitation and evaporation are then summed up over 14 day intervals, before being translated to a groundwater fluctuation using a respons function.
```
ml.settings
ml.solve(freq="14D", tmin="1980", report="basic")
ml.plots.results(figsize=(10,6), tmin="1970");
ml.stressmodels["rch"].stress[1].update_series(tmin="1960")
ml.stressmodels["rch"].stress[1].settings
```
Another method to obtain the settings of the time series used in a stressmodel is as follows:
```
ml.get_stressmodel_settings("rch")
```
## Warnings
Because the TimeSeries object is a relatively complicated object that can potentially change model results extra care has to be taken in some cases. Below is a number of outstanding warnings and the related GitHub issues.
<div class="alert alert-warning">
<b>A note on dependent time series</b>
The dependent time series (stored as `ml.oseries`) are also stored in a TimeSeries object and therefore have the same capabilities. Usage of these methods on the dependent time series is however experimental and not recommended for real world use cases. See also [Issue #68](https://github.com/pastas/pastas/issues/68) and [Discussion #199](https://github.com/pastas/pastas/discussions/199)
</div>
<div class="alert alert-warning">
<b>A note on monthly data</b>
Monthly data is strictly irregular data, and poses additional challenges when resampling to regular frequencies. Pastas does not differentiate between monthly data reported at months end (`freq=M`) or months beginning (`freq=MS`) and the default settings are selected for `freq=M`. There may also be issues with extending the time series. See also [Issue #239](https://github.com/pastas/pastas/issues/239)
</div>
| github_jupyter |
<h3><center><span style="font-size: 200%;">bTwin</span><sup>β</sup> Find your Bollywood Twin </center></h3>
bTwin is an acronym for Bollywood Twin. The idea is to let the user find his celebrity twin by using the technique of computer vision using convolution neural networks (CNNs).
The current dataset is a collection of pictures of top 100 celebrities of bollywood listed by hungama.com.
## 1. Data Collection:
Importing the necessary libraries for data collection from the csv file generated by using the support modules.<br>
The <b>Pandas</b> library will help in doing all operations on the data stored in form of a dataframe. <br>
The <b>Numpy</b> library is for all linear algebra and mathematical operations.
```
import pandas as pd
import numpy as np
```
The following lines of code restricts the GPU from using complete memory.
```
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('gpu')
if len(physical_devices) > 0:
print("Using GPU. ")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
else:
print("No GPU found. ")
```
Importing the csv file which contains the value of each pixel of 100 X 100 pixel grayscale image of each celebrity.
```
df = pd.read_csv('../data.csv')
print(df)
```
The data comprises 12167 images of 100 Bollywood celebrities. The images are grayscaled and are of 100 X 100 pixel resolution, i.e. a total of 10000 pixels.
<br>
The column indexed as Label stores a number/index representing the name of the celebrity based on the following list [in Alphabetical order]: <br>
<details>
<summary>
The Label number corresponding to the name of the celebrity <i>[Click the arrow for details]</i>
</summary>
1. Aamir_Khan<br>
2. Abhay_Deol<br>
3. Abhishek_Bachchan<br>
4. Aftab_Shivdasani<br>
5. Aishwarya_Rai<br>
6. Ajay_Devgn<br>
7. Akshaye_Khanna<br>
8. Akshay_Kumar<br>
9. Alia_Bhatt<br>
10. Ameesha_Patel<br>
11. Amitabh_Bachchan<br>
12. Amrita_Rao<br>
13. Amy_Jackson<br>
14. Anil_Kapoor<br>
15. Anushka_Sharma<br>
16. Anushka_Shetty<br>
17. Arjun_Kapoor<br>
18. Arjun_Rampal<br>
19. Arshad_Warsi<br>
20. Asin<br>
21. Ayushmann_Khurrana<br>
22. Bhumi_Pednekar<br>
23. Bipasha_Basu<br>
24. Bobby_Deol<br>
25. Deepika_Padukone<br>
26. Disha_Patani<br>
27. Emraan_Hashmi<br>
28. Esha_Gupta<br>
29. Farhan_Akhtar<br>
30. Govinda<br>
31. Hrithik_Roshan<br>
32. Huma_Qureshi<br>
33. Ileana_DCruz<br>
34. Irrfan_Khan<br>
35. Jacqueline_Fernandez<br>
36. John_Abraham<br>
37. Juhi_Chawla<br>
38. Kajal_Aggarwal<br>
39. Kajol<br>
40. Kangana_Ranaut<br>
41. Kareena_Kapoor<br>
42. Karisma_Kapoor<br>
43. Kartik_Aaryan<br>
44. Katrina_Kaif<br>
45. Kiara_Advani<br>
46. Kriti_Kharbanda<br>
47. Kriti_Sanon<br>
48. Kunal_Khemu<br>
49. Lara_Dutta<br>
50. Madhuri_Dixit<br>
51. Manoj_Bajpayee<br>
52. Mrunal_Thakur<br>
53. Nana_Patekar<br>
54. Nargis_Fakhri<br>
55. Naseeruddin_Shah<br>
56. Nushrat_Bharucha<br>
57. Paresh_Rawal<br>
58. Parineeti_Chopra<br>
59. Pooja_Hegde<br>
60. Prabhas<br>
61. Prachi_Desai<br>
62. Preity_Zinta<br>
63. Priyanka_Chopra<br>
64. Rajkummar_Rao<br>
65. Ranbir_Kapoor<br>
66. Randeep_Hooda<br>
67. Rani_Mukerji<br>
68. Ranveer_Singh<br>
69. Richa_Chadda<br>
70. Riteish_Deshmukh<br>
71. R_Madhavan<br>
72. Saif_Ali_Khan<br>
73. Salman_Khan<br>
74. Sanjay_Dutt<br>
75. Sara_Ali_Khan<br>
76. Shahid_Kapoor<br>
77. Shah_Rukh_Khan<br>
78. Shilpa_Shetty<br>
79. Shraddha_Kapoor<br>
80. Shreyas_Talpade<br>
81. Shruti_Haasan<br>
82. Sidharth_Malhotra<br>
83. Sonakshi_Sinha<br>
84. Sonam_Kapoor<br>
85. Suniel_Shetty<br>
86. Sunny_Deol<br>
87. Sushant_Singh_Rajput<br>
88. Taapsee_Pannu<br>
89. Tabu<br>
90. Tamannaah_Bhatia<br>
91. Tiger_Shroff<br>
92. Tusshar_Kapoor<br>
93. Uday_Chopra<br>
94. Vaani_Kapoor<br>
95. Varun_Dhawan<br>
96. Vicky_Kaushal<br>
97. Vidya_Balan<br>
98. Vivek_Oberoi<br>
99. Yami_Gautam<br>
100. Zareen_Khan<br>
</details>
### The best practice to prepare any dataset is to split it into 3 parts:
<u><b>Training Set:</b></u> This set will be used to train the model and develop a relation between the labels and the training data. *[60% of total data]*<br>
<u><b>Validation Set:</b></u> This set will be used to validate the reletationship developed by the model on the training data to check if the model is overfitting or underfitting or is perfect for the given data. With the result of this data we decide if we should show the test data to the model or to tune the hyperparameters or model architecture again. *[20% of total data]*<br>
<u><b>Test Set:</b></u> This set is the final test to check if the model is perfect and is ready to go for the deployment. *[20% of total data]*
```
train, validate, test = np.split(df.sample(frac=1, random_state=42), [int(.6*len(df)), int(.8*len(df))])
print("Training Data: ")
print(train)
print("Validation Data: ")
print(validate)
print("Test Data: ")
print(test)
```
Seprating the labels from the training, validation and testing data. And converting to numpy arrays.
```
X_train = train.drop(['Label'], axis=1)
Y_train = train['Label']
X_validate = validate.drop(['Label'], axis=1)
Y_validate = validate['Label']
X_test = test.drop(['Label'], axis=1)
Y_test = test['Label']
```
## 2. Data preprocessing:
Converting the input dataframes (X_train, X_validate, X_test) to numpy arrays for better processing and easier operations.
```
X_train = X_train.to_numpy()
X_validate = X_validate.to_numpy()
X_test = X_test.to_numpy()
```
The values of pixel range from 0 to 255 based on the intensity of the colour, for easier and faster processing, we'll scale the values to be in between 0 and 1.
```
X_train = X_train / 255.0
X_validate = X_validate / 255.0
X_test = X_test / 255.0
```
Reshaping the values of the 3 arrays to (size X 100 X 100 X 1) where size is the number of images in the array, 100 X 100 represents the resolution of the image and 1 represents the channel i.e. the image is a grayscale image.
```
X_train = X_train.reshape(-1, 100, 100, 1)
X_validate = X_validate.reshape(-1, 100, 100, 1)
X_test = X_test.reshape(-1, 100, 100, 1)
print(Y_test.iloc[0])
```
Now the training input is preprocessed for better processing by the planned deep learning architecture.
Now, we'll convert the labels to one-hot encoded vectors by using to_categorical() function from the utils module of keras package.
```
from keras.utils import to_categorical
Y_train = to_categorical(Y_train, num_classes=100)
Y_validate = to_categorical(Y_validate, num_classes=100)
Y_test = to_categorical(Y_test, num_classes=100)
print(Y_test[1,:])
```
## 3. Model Creation and Training:
```
from keras import models
from keras.models import Sequential
from keras import layers
from keras.layers import Conv2D, MaxPool2D, Flatten, Dense
from keras.optimizers import Adam
model = Sequential()
model.add(Conv2D(input_shape=(100,100,1),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Flatten())
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=100, activation="softmax"))
model.summary()
model.compile(optimizer=Adam(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
```
These callbacks will help us in the model training
```
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc',
verbose=1, save_best_only=True, save_weights_only=False, mode='auto', save_freq=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=20, verbose=1, mode='auto')
learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', patience= 5, verbose=1, factor=0.25, min_lr=0.00001)
```
The following callback is for tensorboard to give a visualization of the training.
```
import os
import datetime
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
os.mkdir(log_dir)
tensorboard = tf.keras.callbacks.TensorBoard(log_dir='log_dir', histogram_freq=1, embeddings_freq=1)
model.fit(X_train, Y_train, steps_per_epoch=100, epochs = 10, batch_size=32,
validation_data=(X_validate, Y_validate), verbose=1,
callbacks=[learning_rate_reduction, tensorboard, early], shuffle=False)
```
| github_jupyter |
# Sentiment Classification & How To "Frame Problems" for a Neural Network
by Andrew Trask
- **Twitter**: @iamtrask
- **Blog**: http://iamtrask.github.io
### What You Should Already Know
- neural networks, forward and back-propagation
- stochastic gradient descent
- mean squared error
- and train/test splits
### Where to Get Help if You Need it
- Re-watch previous Udacity Lectures
- Leverage the recommended Course Reading Material - [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning) (Check inside your classroom for a discount code)
- Shoot me a tweet @iamtrask
### Tutorial Outline:
- Intro: The Importance of "Framing a Problem" (this lesson)
- [Curate a Dataset](#lesson_1)
- [Developing a "Predictive Theory"](#lesson_2)
- [**PROJECT 1**: Quick Theory Validation](#project_1)
- [Transforming Text to Numbers](#lesson_3)
- [**PROJECT 2**: Creating the Input/Output Data](#project_2)
- Putting it all together in a Neural Network (video only - nothing in notebook)
- [**PROJECT 3**: Building our Neural Network](#project_3)
- [Understanding Neural Noise](#lesson_4)
- [**PROJECT 4**: Making Learning Faster by Reducing Noise](#project_4)
- [Analyzing Inefficiencies in our Network](#lesson_5)
- [**PROJECT 5**: Making our Network Train and Run Faster](#project_5)
- [Further Noise Reduction](#lesson_6)
- [**PROJECT 6**: Reducing Noise by Strategically Reducing the Vocabulary](#project_6)
- [Analysis: What's going on in the weights?](#lesson_7)
# Lesson: Curate a Dataset<a id='lesson_1'></a>
The cells from here until Project 1 include code Andrew shows in the videos leading up to mini project 1. We've included them so you can run the code along with the videos without having to type in everything.
```
def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
```
**Note:** The data in `reviews.txt` we're using has already been preprocessed a bit and contains only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like `The`, `the`, and `THE`, all the same way.
```
len(reviews)
reviews[0]
labels[0]
```
# Lesson: Develop a Predictive Theory<a id='lesson_2'></a>
```
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
```
# Project 1: Quick Theory Validation<a id='project_1'></a>
There are multiple ways to implement these projects, but in order to get your code closer to what Andrew shows in his solutions, we've provided some hints and starter code throughout this notebook.
You'll find the [Counter](https://docs.python.org/2/library/collections.html#collections.Counter) class to be useful in this exercise, as well as the [numpy](https://docs.scipy.org/doc/numpy/reference/) library.
```
from collections import Counter
import numpy as np
```
We'll create three `Counter` objects, one for words from postive reviews, one for words from negative reviews, and one for all the words.
```
# Create three Counter objects to store positive, negative and total counts
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
```
**TODO:** Examine all the reviews. For each word in a positive review, increase the count for that word in both your positive counter and the total words counter; likewise, for each word in a negative review, increase the count for that word in both your negative counter and the total words counter.
**Note:** Throughout these projects, you should use `split(' ')` to divide a piece of text (such as a review) into individual words. If you use `split()` instead, you'll get slightly different results than what the videos and solutions show.
```
# TODO: Loop over all the words in all the reviews and increment the counts in the appropriate counter objects
```
Run the following two cells to list the words used in positive reviews and negative reviews, respectively, ordered from most to least commonly used.
```
# Examine the counts of the most common words in positive reviews
positive_counts.most_common()
# Examine the counts of the most common words in negative reviews
negative_counts.most_common()
```
As you can see, common words like "the" appear very often in both positive and negative reviews. Instead of finding the most common words in positive or negative reviews, what you really want are the words found in positive reviews more often than in negative reviews, and vice versa. To accomplish this, you'll need to calculate the **ratios** of word usage between positive and negative reviews.
**TODO:** Check all the words you've seen and calculate the ratio of postive to negative uses and store that ratio in `pos_neg_ratios`.
>Hint: the positive-to-negative ratio for a given word can be calculated with `positive_counts[word] / float(negative_counts[word]+1)`. Notice the `+1` in the denominator – that ensures we don't divide by zero for words that are only seen in positive reviews.
```
# Create Counter object to store positive/negative ratios
pos_neg_ratios = Counter()
# TODO: Calculate the ratios of positive and negative uses of the most common words
# Consider words to be "common" if they've been used at least 100 times
```
Examine the ratios you've calculated for a few words:
```
print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"]))
print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"]))
print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"]))
```
Looking closely at the values you just calculated, we see the following:
* Words that you would expect to see more often in positive reviews – like "amazing" – have a ratio greater than 1. The more skewed a word is toward postive, the farther from 1 its positive-to-negative ratio will be.
* Words that you would expect to see more often in negative reviews – like "terrible" – have positive values that are less than 1. The more skewed a word is toward negative, the closer to zero its positive-to-negative ratio will be.
* Neutral words, which don't really convey any sentiment because you would expect to see them in all sorts of reviews – like "the" – have values very close to 1. A perfectly neutral word – one that was used in exactly the same number of positive reviews as negative reviews – would be almost exactly 1. The `+1` we suggested you add to the denominator slightly biases words toward negative, but it won't matter because it will be a tiny bias and later we'll be ignoring words that are too close to neutral anyway.
Ok, the ratios tell us which words are used more often in postive or negative reviews, but the specific values we've calculated are a bit difficult to work with. A very positive word like "amazing" has a value above 4, whereas a very negative word like "terrible" has a value around 0.18. Those values aren't easy to compare for a couple of reasons:
* Right now, 1 is considered neutral, but the absolute value of the postive-to-negative rations of very postive words is larger than the absolute value of the ratios for the very negative words. So there is no way to directly compare two numbers and see if one word conveys the same magnitude of positive sentiment as another word conveys negative sentiment. So we should center all the values around netural so the absolute value fro neutral of the postive-to-negative ratio for a word would indicate how much sentiment (positive or negative) that word conveys.
* When comparing absolute values it's easier to do that around zero than one.
To fix these issues, we'll convert all of our ratios to new values using logarithms.
**TODO:** Go through all the ratios you calculated and convert their values using the following formulas:
> * For any postive words, convert the ratio using `np.log(ratio)`
> * For any negative words, convert the ratio using `-np.log(1/(ratio + 0.01))`
That second equation may look strange, but what it's doing is dividing one by a very small number, which will produce a larger positive number. Then, it takes the `log` of that, which produces numbers similar to the ones for the postive words. Finally, we negate the values by adding that minus sign up front. In the end, extremely positive and extremely negative words will have positive-to-negative ratios with similar magnitudes but oppositite signs.
```
# TODO: Convert ratios to logs
```
Examine the new ratios you've calculated for the same words from before:
```
print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"]))
print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"]))
print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"]))
```
If everything worked, now you should see neutral words with values close to zero. In this case, "the" is near zero but slightly positive, so it was probably used in more positive reviews than negative reviews. But look at "amazing"'s ratio - it's above `1`, showing it is clearly a word with positive sentiment. And "terrible" has a similar score, but in the opposite direction, so it's below `-1`. It's now clear that both of these words are associated with specific, opposing sentiments.
Now run the following cells to see more ratios.
The first cell displays all the words, ordered by how associated they are with postive reviews. (Your notebook will most likely truncate the output so you won't actually see *all* the words in the list.)
The second cell displays the 30 words most associated with negative reviews by reversing the order of the first list and then looking at the first 30 words. (If you want the second cell to display all the words, ordered by how associated they are with negative reviews, you could just write `reversed(pos_neg_ratios.most_common())`.)
You should continue to see values similar to the earlier ones we checked – neutral words will be close to `0`, words will get more positive as their ratios approach and go above `1`, and words will get more negative as their ratios approach and go below `-1`. That's why we decided to use the logs instead of the raw ratios.
```
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
# Note: Above is the code Andrew uses in his solution video,
# so we've included it here to avoid confusion.
# If you explore the documentation for the Counter class,
# you will see you could also find the 30 least common
# words like this: pos_neg_ratios.most_common()[:-31:-1]
```
# End of Project 1.
## Watch the next video to see Andrew's solution, then continue on to the next lesson.
# Transforming Text into Numbers<a id='lesson_3'></a>
The cells here include code Andrew shows in the next video. We've included it so you can run the code along with the video without having to type in everything.
```
from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png')
```
# Project 2: Creating the Input/Output Data<a id='project_2'></a>
**TODO:** Create a [set](https://docs.python.org/3/tutorial/datastructures.html#sets) named `vocab` that contains every word in the vocabulary.
```
# TODO: Create set named "vocab" containing all of the words from all of the reviews
vocab = None
```
Run the following cell to check your vocabulary size. If everything worked correctly, it should print **74074**
```
vocab_size = len(vocab)
print(vocab_size)
```
Take a look at the following image. It represents the layers of the neural network you'll be building throughout this notebook. `layer_0` is the input layer, `layer_1` is a hidden layer, and `layer_2` is the output layer.
```
from IPython.display import Image
Image(filename='sentiment_network_2.png')
```
**TODO:** Create a numpy array called `layer_0` and initialize it to all zeros. You will find the [zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html) function particularly helpful here. Be sure you create `layer_0` as a 2-dimensional matrix with 1 row and `vocab_size` columns.
```
# TODO: Create layer_0 matrix with dimensions 1 by vocab_size, initially filled with zeros
layer_0 = None
```
Run the following cell. It should display `(1, 74074)`
```
layer_0.shape
from IPython.display import Image
Image(filename='sentiment_network.png')
```
`layer_0` contains one entry for every word in the vocabulary, as shown in the above image. We need to make sure we know the index of each word, so run the following cell to create a lookup table that stores the index of every word.
```
# Create a dictionary of words in the vocabulary mapped to index positions
# (to be used in layer_0)
word2index = {}
for i,word in enumerate(vocab):
word2index[word] = i
# display the map of words to indices
word2index
```
**TODO:** Complete the implementation of `update_input_layer`. It should count
how many times each word is used in the given review, and then store
those counts at the appropriate indices inside `layer_0`.
```
def update_input_layer(review):
""" Modify the global layer_0 to represent the vector form of review.
The element at a given index of layer_0 should represent
how many times the given word occurs in the review.
Args:
review(string) - the string of the review
Returns:
None
"""
global layer_0
# clear out previous state by resetting the layer to be all 0s
layer_0 *= 0
# TODO: count how many times each word is used in the given review and store the results in layer_0
```
Run the following cell to test updating the input layer with the first review. The indices assigned may not be the same as in the solution, but hopefully you'll see some non-zero values in `layer_0`.
```
update_input_layer(reviews[0])
layer_0
```
**TODO:** Complete the implementation of `get_target_for_labels`. It should return `0` or `1`,
depending on whether the given label is `NEGATIVE` or `POSITIVE`, respectively.
```
def get_target_for_label(label):
"""Convert a label to `0` or `1`.
Args:
label(string) - Either "POSITIVE" or "NEGATIVE".
Returns:
`0` or `1`.
"""
# TODO: Your code here
```
Run the following two cells. They should print out`'POSITIVE'` and `1`, respectively.
```
labels[0]
get_target_for_label(labels[0])
```
Run the following two cells. They should print out `'NEGATIVE'` and `0`, respectively.
```
labels[1]
get_target_for_label(labels[1])
```
# End of Project 2.
## Watch the next video to see Andrew's solution, then continue on to the next lesson.
# Project 3: Building a Neural Network<a id='project_3'></a>
**TODO:** We've included the framework of a class called `SentimentNetork`. Implement all of the items marked `TODO` in the code. These include doing the following:
- Create a basic neural network much like the networks you've seen in earlier lessons and in Project 1, with an input layer, a hidden layer, and an output layer.
- Do **not** add a non-linearity in the hidden layer. That is, do not use an activation function when calculating the hidden layer outputs.
- Re-use the code from earlier in this notebook to create the training data (see `TODO`s in the code)
- Implement the `pre_process_data` function to create the vocabulary for our training data generating functions
- Ensure `train` trains over the entire corpus
### Where to Get Help if You Need it
- Re-watch earlier Udacity lectures
- Chapters 3-5 - [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning) - (Check inside your classroom for a discount code)
```
import time
import sys
import numpy as np
# Encapsulate our neural network in a class
class SentimentNetwork:
def __init__(self, reviews, labels, hidden_nodes = 10, learning_rate = 0.1):
"""Create a SentimenNetwork with the given settings
Args:
reviews(list) - List of reviews used for training
labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews
hidden_nodes(int) - Number of nodes to create in the hidden layer
learning_rate(float) - Learning rate to use while training
"""
# Assign a seed to our random number generator to ensure we get
# reproducable results during development
np.random.seed(1)
# process the reviews and their associated labels so that everything
# is ready for training
self.pre_process_data(reviews, labels)
# Build the network to have the number of hidden nodes and the learning rate that
# were passed into this initializer. Make the same number of input nodes as
# there are vocabulary words and create a single output node.
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
# TODO: populate review_vocab with all of the words in the given reviews
# Remember to split reviews into individual words
# using "split(' ')" instead of "split()".
# Convert the vocabulary set to a list so we can access words via indices
self.review_vocab = list(review_vocab)
label_vocab = set()
# TODO: populate label_vocab with all of the words in the given labels.
# There is no need to split the labels because each one is a single word.
# Convert the label vocabulary set to a list so we can access labels via indices
self.label_vocab = list(label_vocab)
# Store the sizes of the review and label vocabularies.
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
# Create a dictionary of words in the vocabulary mapped to index positions
self.word2index = {}
# TODO: populate self.word2index with indices for all the words in self.review_vocab
# like you saw earlier in the notebook
# Create a dictionary of labels mapped to index positions
self.label2index = {}
# TODO: do the same thing you did for self.word2index and self.review_vocab,
# but for self.label2index and self.label_vocab instead
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Store the number of nodes in input, hidden, and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Store the learning rate
self.learning_rate = learning_rate
# Initialize weights
# TODO: initialize self.weights_0_1 as a matrix of zeros. These are the weights between
# the input layer and the hidden layer.
self.weights_0_1 = None
# TODO: initialize self.weights_1_2 as a matrix of random values.
# These are the weights between the hidden layer and the output layer.
self.weights_1_2 = None
# TODO: Create the input layer, a two-dimensional matrix with shape
# 1 x input_nodes, with all values initialized to zero
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# TODO: You can copy most of the code you wrote for update_input_layer
# earlier in this notebook.
#
# However, MAKE SURE YOU CHANGE ALL VARIABLES TO REFERENCE
# THE VERSIONS STORED IN THIS OBJECT, NOT THE GLOBAL OBJECTS.
# For example, replace "layer_0 *= 0" with "self.layer_0 *= 0"
pass
def get_target_for_label(self,label):
# TODO: Copy the code you wrote for get_target_for_label
# earlier in this notebook.
pass
def sigmoid(self,x):
# TODO: Return the result of calculating the sigmoid activation function
# shown in the lectures
pass
def sigmoid_output_2_derivative(self,output):
# TODO: Return the derivative of the sigmoid activation function,
# where "output" is the original output from the sigmoid fucntion
pass
def train(self, training_reviews, training_labels):
# make sure out we have a matching number of reviews and labels
assert(len(training_reviews) == len(training_labels))
# Keep track of correct predictions to display accuracy during training
correct_so_far = 0
# Remember when we started for printing time statistics
start = time.time()
# loop through all the given reviews and run a forward and backward pass,
# updating weights for every item
for i in range(len(training_reviews)):
# TODO: Get the next review and its correct label
# TODO: Implement the forward pass through the network.
# That means use the given review to update the input layer,
# then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Do not use an activation function for the hidden layer,
# but use the sigmoid activation function for the output layer.
# TODO: Implement the back propagation pass here.
# That means calculate the error for the forward pass's prediction
# and update the weights in the network according to their
# contributions toward the error, as calculated via the
# gradient descent and back propagation algorithms you
# learned in class.
# TODO: Keep track of correct predictions. To determine if the prediction was
# correct, check that the absolute value of the output error
# is less than 0.5. If so, add one to the correct_so_far count.
# For debug purposes, print out our prediction accuracy and speed
# throughout the training process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \
+ " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
"""
Attempts to predict the labels for the given testing_reviews,
and uses the test_labels to calculate the accuracy of those predictions.
"""
# keep track of how many correct predictions we make
correct = 0
# we'll time how many predictions per second we make
start = time.time()
# Loop through each of the given reviews and call run to predict
# its label.
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
# For debug purposes, print out our prediction accuracy and speed
# throughout the prediction process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct) + " #Tested:" + str(i+1) \
+ " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
"""
Returns a POSITIVE or NEGATIVE prediction for the given review.
"""
# TODO: Run a forward pass through the network, like you did in the
# "train" function. That means use the given review to
# update the input layer, then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Note: The review passed into this function for prediction
# might come from anywhere, so you should convert it
# to lower case prior to using it.
# TODO: The output layer should now contain a prediction.
# Return `POSITIVE` for predictions greater-than-or-equal-to `0.5`,
# and `NEGATIVE` otherwise.
pass
```
Run the following cell to create a `SentimentNetwork` that will train on all but the last 1000 reviews (we're saving those for testing). Here we use a learning rate of `0.1`.
```
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
```
Run the following cell to test the network's performance against the last 1000 reviews (the ones we held out from our training set).
**We have not trained the model yet, so the results should be about 50% as it will just be guessing and there are only two possible values to choose from.**
```
mlp.test(reviews[-1000:],labels[-1000:])
```
Run the following cell to actually train the network. During training, it will display the model's accuracy repeatedly as it trains so you can see how well it's doing.
```
mlp.train(reviews[:-1000],labels[:-1000])
```
That most likely didn't train very well. Part of the reason may be because the learning rate is too high. Run the following cell to recreate the network with a smaller learning rate, `0.01`, and then train the new network.
```
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
```
That probably wasn't much different. Run the following cell to recreate the network one more time with an even smaller learning rate, `0.001`, and then train the new network.
```
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)
mlp.train(reviews[:-1000],labels[:-1000])
```
With a learning rate of `0.001`, the network should finall have started to improve during training. It's still not very good, but it shows that this solution has potential. We will improve it in the next lesson.
# End of Project 3.
## Watch the next video to see Andrew's solution, then continue on to the next lesson.
# Understanding Neural Noise<a id='lesson_4'></a>
The following cells include includes the code Andrew shows in the next video. We've included it here so you can run the cells along with the video without having to type in everything.
```
from IPython.display import Image
Image(filename='sentiment_network.png')
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
review_counter = Counter()
for word in reviews[0].split(" "):
review_counter[word] += 1
review_counter.most_common()
```
# Project 4: Reducing Noise in Our Input Data<a id='project_4'></a>
**TODO:** Attempt to reduce the noise in the input data like Andrew did in the previous video. Specifically, do the following:
* Copy the `SentimentNetwork` class you created earlier into the following cell.
* Modify `update_input_layer` so it does not count how many times each word is used, but rather just stores whether or not a word was used.
```
# TODO: -Copy the SentimentNetwork class from Projet 3 lesson
# -Modify it to reduce noise, like in the video
```
Run the following cell to recreate the network and train it. Notice we've gone back to the higher learning rate of `0.1`.
```
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.train(reviews[:-1000],labels[:-1000])
```
That should have trained much better than the earlier attempts. It's still not wonderful, but it should have improved dramatically. Run the following cell to test your model with 1000 predictions.
```
mlp.test(reviews[-1000:],labels[-1000:])
```
# End of Project 4.
## Andrew's solution was actually in the previous video, so rewatch that video if you had any problems with that project. Then continue on to the next lesson.
# Analyzing Inefficiencies in our Network<a id='lesson_5'></a>
The following cells include the code Andrew shows in the next video. We've included it here so you can run the cells along with the video without having to type in everything.
```
Image(filename='sentiment_network_sparse.png')
layer_0 = np.zeros(10)
layer_0
layer_0[4] = 1
layer_0[9] = 1
layer_0
weights_0_1 = np.random.randn(10,5)
layer_0.dot(weights_0_1)
indices = [4,9]
layer_1 = np.zeros(5)
for index in indices:
layer_1 += (1 * weights_0_1[index])
layer_1
Image(filename='sentiment_network_sparse_2.png')
layer_1 = np.zeros(5)
for index in indices:
layer_1 += (weights_0_1[index])
layer_1
```
# Project 5: Making our Network More Efficient<a id='project_5'></a>
**TODO:** Make the `SentimentNetwork` class more efficient by eliminating unnecessary multiplications and additions that occur during forward and backward propagation. To do that, you can do the following:
* Copy the `SentimentNetwork` class from the previous project into the following cell.
* Remove the `update_input_layer` function - you will not need it in this version.
* Modify `init_network`:
>* You no longer need a separate input layer, so remove any mention of `self.layer_0`
>* You will be dealing with the old hidden layer more directly, so create `self.layer_1`, a two-dimensional matrix with shape 1 x hidden_nodes, with all values initialized to zero
* Modify `train`:
>* Change the name of the input parameter `training_reviews` to `training_reviews_raw`. This will help with the next step.
>* At the beginning of the function, you'll want to preprocess your reviews to convert them to a list of indices (from `word2index`) that are actually used in the review. This is equivalent to what you saw in the video when Andrew set specific indices to 1. Your code should create a local `list` variable named `training_reviews` that should contain a `list` for each review in `training_reviews_raw`. Those lists should contain the indices for words found in the review.
>* Remove call to `update_input_layer`
>* Use `self`'s `layer_1` instead of a local `layer_1` object.
>* In the forward pass, replace the code that updates `layer_1` with new logic that only adds the weights for the indices used in the review.
>* When updating `weights_0_1`, only update the individual weights that were used in the forward pass.
* Modify `run`:
>* Remove call to `update_input_layer`
>* Use `self`'s `layer_1` instead of a local `layer_1` object.
>* Much like you did in `train`, you will need to pre-process the `review` so you can work with word indices, then update `layer_1` by adding weights for the indices used in the review.
```
# TODO: -Copy the SentimentNetwork class from Project 4 lesson
# -Modify it according to the above instructions
```
Run the following cell to recreate the network and train it once again.
```
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.train(reviews[:-1000],labels[:-1000])
```
That should have trained much better than the earlier attempts. Run the following cell to test your model with 1000 predictions.
```
mlp.test(reviews[-1000:],labels[-1000:])
```
# End of Project 5.
## Watch the next video to see Andrew's solution, then continue on to the next lesson.
# Further Noise Reduction<a id='lesson_6'></a>
```
Image(filename='sentiment_network_sparse_2.png')
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.plotting import figure, show, output_file
from bokeh.io import output_notebook
output_notebook()
hist, edges = np.histogram(list(map(lambda x:x[1],pos_neg_ratios.most_common())), density=True, bins=100, normed=True)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="Word Positive/Negative Affinity Distribution")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555")
show(p)
frequency_frequency = Counter()
for word, cnt in total_counts.most_common():
frequency_frequency[cnt] += 1
hist, edges = np.histogram(list(map(lambda x:x[1],frequency_frequency.most_common())), density=True, bins=100, normed=True)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="The frequency distribution of the words in our corpus")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555")
show(p)
```
# Project 6: Reducing Noise by Strategically Reducing the Vocabulary<a id='project_6'></a>
**TODO:** Improve `SentimentNetwork`'s performance by reducing more noise in the vocabulary. Specifically, do the following:
* Copy the `SentimentNetwork` class from the previous project into the following cell.
* Modify `pre_process_data`:
>* Add two additional parameters: `min_count` and `polarity_cutoff`
>* Calculate the positive-to-negative ratios of words used in the reviews. (You can use code you've written elsewhere in the notebook, but we are moving it into the class like we did with other helper code earlier.)
>* Andrew's solution only calculates a postive-to-negative ratio for words that occur at least 50 times. This keeps the network from attributing too much sentiment to rarer words. You can choose to add this to your solution if you would like.
>* Change so words are only added to the vocabulary if they occur in the vocabulary more than `min_count` times.
>* Change so words are only added to the vocabulary if the absolute value of their postive-to-negative ratio is at least `polarity_cutoff`
* Modify `__init__`:
>* Add the same two parameters (`min_count` and `polarity_cutoff`) and use them when you call `pre_process_data`
```
# TODO: -Copy the SentimentNetwork class from Project 5 lesson
# -Modify it according to the above instructions
```
Run the following cell to train your network with a small polarity cutoff.
```
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.05,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
```
And run the following cell to test it's performance. It should be
```
mlp.test(reviews[-1000:],labels[-1000:])
```
Run the following cell to train your network with a much larger polarity cutoff.
```
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.8,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
```
And run the following cell to test it's performance.
```
mlp.test(reviews[-1000:],labels[-1000:])
```
# End of Project 6.
## Watch the next video to see Andrew's solution, then continue on to the next lesson.
# Analysis: What's Going on in the Weights?<a id='lesson_7'></a>
```
mlp_full = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=0,polarity_cutoff=0,learning_rate=0.01)
mlp_full.train(reviews[:-1000],labels[:-1000])
Image(filename='sentiment_network_sparse.png')
def get_most_similar_words(focus = "horrible"):
most_similar = Counter()
for word in mlp_full.word2index.keys():
most_similar[word] = np.dot(mlp_full.weights_0_1[mlp_full.word2index[word]],mlp_full.weights_0_1[mlp_full.word2index[focus]])
return most_similar.most_common()
get_most_similar_words("excellent")
get_most_similar_words("terrible")
import matplotlib.colors as colors
words_to_visualize = list()
for word, ratio in pos_neg_ratios.most_common(500):
if(word in mlp_full.word2index.keys()):
words_to_visualize.append(word)
for word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]:
if(word in mlp_full.word2index.keys()):
words_to_visualize.append(word)
pos = 0
neg = 0
colors_list = list()
vectors_list = list()
for word in words_to_visualize:
if word in pos_neg_ratios.keys():
vectors_list.append(mlp_full.weights_0_1[mlp_full.word2index[word]])
if(pos_neg_ratios[word] > 0):
pos+=1
colors_list.append("#00ff00")
else:
neg+=1
colors_list.append("#000000")
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
words_top_ted_tsne = tsne.fit_transform(vectors_list)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="vector T-SNE for most polarized words")
source = ColumnDataSource(data=dict(x1=words_top_ted_tsne[:,0],
x2=words_top_ted_tsne[:,1],
names=words_to_visualize,
color=colors_list))
p.scatter(x="x1", y="x2", size=8, source=source, fill_color="color")
word_labels = LabelSet(x="x1", y="x2", text="names", y_offset=6,
text_font_size="8pt", text_color="#555555",
source=source, text_align='center')
p.add_layout(word_labels)
show(p)
# green indicates positive words, black indicates negative words
```
| github_jupyter |
# CPE 646 Final Project
## Live Memetic Detection
```
import os
from PIL import Image
import numpy as np
from numpy import *
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.optimizers import SGD,RMSprop,adam
from keras.utils import np_utils
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
# input image dimensions
img_rows = 150
img_cols = 150
# number of channels
img_channels = 1
path1 = './images/train' #path of folder of images
path2 = './data/train' #path of folder to save images
listing = os.listdir(path1)
num_samples=size(listing)
print(num_samples)
# Resize and grey images before saving them in data folder
for file in listing:
im = Image.open(path1 + '/' + file)
img = im.resize((img_rows,img_cols))
gray = img.convert('L')
gray.save(path2 +'/' + file, "JPEG")
imlist = os.listdir(path2)
im1 = array(Image.open(path2 + '/'+ imlist[0])) # open one image to get size
m,n = im1.shape[0:2] # get the size of the images
imnbr = len(imlist) # get the number of images
# create matrix to store all flattened images
immatrix = array([array(Image.open(path2 + '/' + im2)).flatten()
for im2 in imlist],'f')
# Create array of labels and go through image name to dtermine class
label=np.ones((num_samples,),dtype = int)
index = 0
for im in imlist:
if 'doge' in im:
label[index] = 1
index += 1
else:
label[index] = 0
index += 1
# Shuffle data as to reduce data skew
data,Label = shuffle(immatrix,label, random_state=2)
train_data = [data,Label]
# Output image to check
img=immatrix[32].reshape(img_rows,img_cols)
plt.imshow(img)
plt.imshow(img,cmap='gray')
plt.title('Class '+ str(label[32]))
print(train_data[0].shape)
print(train_data[1].shape)
# number of output classes
nb_classes = 2
# Organize data into sample and label
(X, y) = (train_data[0],train_data[1])
# Split X and y into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=4)
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
# Output to see if working
img = X_train[10].reshape(img_rows,img_cols)
plt.imshow(img)
plt.imshow(img,cmap='gray')
model = Sequential()
# Loosely based on Alexnet with stacked conv. layers
model.add(Conv2D(32, (3, 3), input_shape=(img_rows, img_cols,1)))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(4,4)))
model.add(Conv2D(64,(3, 3)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
# Flatten before inputing into NN
model.add(Flatten())
# Fully connected layers
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(2))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy'])
model.summary()
gen = ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3,
height_shift_range=0.08, zoom_range=0.08)
test_gen = ImageDataGenerator()
batch = 128
train_generator = gen.flow(X_train, Y_train, batch_size=batch)
test_generator = test_gen.flow(X_test, Y_test, batch_size=batch)
model.fit_generator(train_generator, steps_per_epoch=X_train.shape[0]//batch, epochs=100,
validation_data=test_generator, validation_steps=X_test.shape[0]//batch)
path3 = './images/test'
path4 = './data/test'
listingT = os.listdir(path3)
num_samplesT = size(listingT)
print(num_samplesT)
for file in listingT:
imT = Image.open(path3 + '/' + file)
imgT = imT.resize((img_rows,img_cols))
grayT = imgT.convert('L')
#need to do some more processing here
grayT.save(path4 +'/' + file, "JPEG")
imlistT = os.listdir(path4)
imT = array(Image.open(path4 + '/'+ imlistT[0])) # open one image to get size
mT,nT = imT.shape[0:2] # get the size of the images
imnbrT = len(imlistT) # get the number of images
# create matrix to store all flattened images
immatrixT = array([array(Image.open(path4 + '/' + imT)).flatten()
for imT in imlistT],'f')
# 185 Doge samples, 600 dog samples
labelT = np.ones((num_samplesT,),dtype = int)
index = 0
for imT in imlistT:
if 'doge' in imT:
labelT[index] = 1
index += 1
else:
labelT[index] = 0
index += 1
#labelT[0:12]=1
#labelT[13:24]=0
train_dataT = [immatrixT,labelT]
imgT=immatrixT[20].reshape(img_rows,img_cols)
plt.imshow(imgT)
plt.imshow(imgT,cmap='gray')
plt.title('Class '+ str(labelT[20]))
print(train_dataT[0].shape)
print(train_dataT[1].shape)
(XT, yT) = (train_dataT[0],train_dataT[1])
X_val = XT.reshape(XT.shape[0], img_rows, img_cols, 1)
Y_val = np_utils.to_categorical(yT, nb_classes)
X_val = X_val.astype('float32')
X_val /= 255
print('X_train shape:', X_val.shape)
print(X_val.shape[0], 'train samples')
score = model.evaluate(X_val, Y_val)
print()
print('Test accuracy: ', score[1])
predictions = model.predict_classes(X_val)
predictions = list(predictions)
actuals = list(Y_val)
sub = pd.DataFrame({'Actual': labelT, 'Predictions': predictions})
print(sub)
```
| github_jupyter |
```
#import the necessary modules
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy
import sklearn
import itertools
from itertools import cycle
import os.path as op
import timeit
import json
import math
import multiprocessing as m_proc
m_proc.cpu_count()
# Import MDAnalysis
import MDAnalysis as mda
import statsmodels as stats
from MDAnalysis.analysis import polymer, distances, rdf
import matplotlib.font_manager as font_manager
from polymer_MD_analysis import pers_length, get_rg_pers_poly, bavg_pers_cnt
```
## PEG/dmso system analysis
### N = 6 PEG/DMSO
```
# For the right Rg calculation using MD Analysis, use trajactory without pbc
n6_peg_dmso = mda.Universe("n6peg_dmso/n6pegonly_dmso.pdb", "n6peg_dmso/nodmso_n6peg.xtc")
n6_peg_dmso.trajectory
len(n6_peg_dmso.trajectory)
#Select the polymer heavy atoms
peg_n6dmso = n6_peg_dmso.select_atoms("resname sPEG PEG tPEG and not type H")
crv_n6peg_dmso = pers_length(peg_n6dmso,6)
crv_n6peg_dmso
com_bond = np.zeros(shape=(1,18000))
count = 0
for ts in n6_peg_dmso.trajectory[0:18000]:
n6_mon1_dmso = n6_peg_dmso.select_atoms("resid 1")
n6_mon2_dmso = n6_peg_dmso.select_atoms("resid 2")
oo_len = mda.analysis.distances.distance_array(n6_mon1_dmso.center_of_mass(), n6_mon2_dmso.center_of_mass(),
box=n6_peg_dmso.trajectory.ts.dimensions)
com_bond[0, count] = oo_len
count += 1
com_bond
lb_avg_pn6 = np.mean(com_bond)
lb_avg_pn6
np.std(com_bond)
```
### Radius of Gyration vs. time N = 6 PEG/dmso
```
n6peg_rgens_dmso, cor_n6peg_dmso, N6peg_cos_dmso, rgdmso_n6peg = get_rg_pers_poly(peg_n6dmso, n6_peg_dmso, 0, 18000)
n6peg_rgens_dmso[0].shape
cor_n6peg_dmso[3]
N6peg_cos_dmso
rgdmso_n6peg
np.std(n6peg_rgens_dmso)
trj_len = np.arange(18000)
#trj_len += 1
trj_len
plt.figure(figsize=(7,7))
plt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01)
plt.xlabel(r'Time [ns]', fontsize=15)
plt.ylabel(r'$R_{g}$ [nm]', fontsize=15)
plt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69')
plt.tick_params(labelsize=14)
plt.legend(['N = 6 in DMSO'], frameon=False, fontsize=14)
#plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial')
plt.xlim(0,180)
plt.ylim(0.2,2)
```
#### Correlation values at each arc length for the whole 180 ns trajectory, N = 6 PEG/dmso
```
# x values
blen_dmso = cor_n6peg_dmso[3]*lb_avg_pn6
#nt_tt[0] = 0
blen_dmso
# Error prop. into natural log std deviation
mk_n6p_dmso = cor_n6peg_dmso[1]/cor_n6peg_dmso[0]
mk_n6p_dmso
plt.figure(figsize=(7,7))
plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='b', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
# All the points give the best fits for N = 6 peg in water
n6_blkspeg_dmso , n6peg_lpdmso = bavg_pers_cnt(5, peg_n6dmso, n6_peg_dmso, lb_avg_pn6, 5, 3000 , 18000)
n6_blkspeg_dmso
n6peg_lpdmso
n6peg_lpdmso[2]
np.mean(n6peg_lpdmso[3])
def line_fit(slope, x):
return slope*x
blen_dmso
gg_n6peg_dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_dmso)
gg_n6peg_dmso
```
### Block averaged Radius of gyration and persistence length, N = 6 PEG/DMSO
```
np.mean(n6_blkspeg_dmso["Avg persistence length"])
np.std(n6_blkspeg_dmso["Avg persistence length"])
np.mean(n6_blkspeg_dmso["Avg Radius of gyration"])
np.std(n6_blkspeg_dmso["Avg Radius of gyration"])
plt.figure(figsize=(7,7))
plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.plot(blen_dmso, gg_n6peg_dmso, color='#1F2E69')
plt.title(r'Ensemble Averaged ln(Cosine $\theta$) in DMSO', fontsize=15, y=1.01)
plt.xlabel(r'Bond Length', fontsize=15)
plt.ylabel(r'ln$\left< Cos(\theta)\right >$', fontsize=15)
#plt.ylim(-1.9,0)
font = font_manager.FontProperties(family='Arial', style='normal', size='14')
plt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.60 $\AA$'], loc=3, frameon=0, fontsize=14, prop=font)
plt.tick_params(labelsize=14)
#plt.text(0.5, -6.94,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.61 $\AA$', fontsize=15, color='#1F2E69')
rgpeg_olig_dmso = pd.DataFrame(data=n6_blkspeg_dmso["Avg Radius of gyration"], columns=['$R_{g}$ [Angstrom] N = 6 PEG DMSO'])
rgpeg_olig_dmso
pers_pegt_dmso = pd.DataFrame(data=n6_blkspeg_dmso["Avg persistence length"], columns=[r"$L_{p}$ [Angstrom] N = 6 PEG DMSO "])
pers_pegt_dmso
```
### N = 8 PEG/DMSO
```
# For the right Rg calculation using MD Analysis, use trajactory without pbc
n8_peg_dmso = mda.Universe("n8peg_dmso/n8pegonly_dmso.pdb", "n8peg_dmso/nodmso_n8peg.xtc")
n8_peg_dmso.trajectory
len(n8_peg_dmso.trajectory)
#Select the polymer heavy atoms
peg_n8dmso = n8_peg_dmso.select_atoms("resname sPEG PEG tPEG and not type H")
crv_n8peg_dmso = pers_length(peg_n8dmso,8)
crv_n8peg_dmso
com_bond_n8dmso = np.zeros(shape=(1,18000))
count = 0
for ts in n8_peg_dmso.trajectory[0:18000]:
n8_mon1_dmso = n8_peg_dmso.select_atoms("resid 1")
n8_mon2_dmso = n8_peg_dmso.select_atoms("resid 2")
oo_len = mda.analysis.distances.distance_array(n8_mon1_dmso.center_of_mass(), n8_mon2_dmso.center_of_mass(),
box=n8_peg_dmso.trajectory.ts.dimensions)
com_bond_n8dmso[0, count] = oo_len
count += 1
com_bond
lb_avg_pn6
np.std(com_bond)
np.mean(com_bond_n8dmso)
np.std(com_bond_n8dmso)
```
### Radius of Gyration vs. time N = 8 PEG/dmso
```
n8peg_rgens_dmso, cor_n8peg_dmso, N8peg_cos_dmso, rgdmso_n8peg = get_rg_pers_poly(peg_n8dmso, n8_peg_dmso, 0, 18000)
n8peg_rgens_dmso[0].shape
cor_n8peg_dmso[3]
N8peg_cos_dmso
rgdmso_n8peg
np.std(n8peg_rgens_dmso)
trj_len = np.arange(18000)
#trj_len += 1
trj_len
plt.figure(figsize=(7,7))
plt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01)
plt.xlabel(r'Time [ns]', fontsize=15)
plt.ylabel(r'$R_{g}$ [nm]', fontsize=15)
plt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69')
plt.plot(trj_len/100, n8peg_rgens_dmso[0]/10,linewidth=2, color='#4C80ED')
plt.tick_params(labelsize=14)
plt.legend(['N = 6 in DMSO','N = 8 in DMSO'], frameon=False, fontsize=14)
#plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial')
plt.xlim(0,180)
plt.ylim(0.2,2)
```
#### Correlation values at each arc length for the whole 180 ns trajectory, N = 8 PEG/dmso
```
# x values
blen_n8dmso = cor_n8peg_dmso[3]*lb_avg_pn6
#nt_tt[0] = 0
blen_n8dmso
# Error prop. into natural log std deviation
mk_n8p_dmso = cor_n8peg_dmso[1]/cor_n8peg_dmso[0]
mk_n8p_dmso
plt.figure(figsize=(7,7))
plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.legend(['N = 6 in DMSO','N = 8 in DMSO'], frameon=False, fontsize=14)
# All the points give the best fits for N = 6 peg in water
n8_blkspeg_dmso , n8peg_lpdmso = bavg_pers_cnt(5, peg_n8dmso, n8_peg_dmso, lb_avg_pn6, 5, 3000 , 18000)
n8_blkspeg_dmso
n8peg_lpdmso
n8peg_lpdmso[2]
np.mean(n8peg_lpdmso[3])
blen_dmso
blen_n8dmso
gg_n8peg_dmso = line_fit(np.mean(n8peg_lpdmso[2]),blen_n8dmso)
gg_n6peg_n8dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_n8dmso)
gg_n8peg_dmso
```
### Block averaged Radius of gyration and persistence length, N = 8 PEG/DMSO
```
np.mean(n8_blkspeg_dmso["Avg persistence length"])
np.std(n8_blkspeg_dmso["Avg persistence length"])
np.mean(n8_blkspeg_dmso["Avg Radius of gyration"])
np.std(n8_blkspeg_dmso["Avg Radius of gyration"])
plt.figure(figsize=(7,7))
plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.plot(blen_n8dmso, gg_n6peg_n8dmso, color='#1F2E69')
plt.plot(blen_n8dmso, gg_n8peg_dmso, color='#4C80ED')
plt.title(r'Ensemble Averaged ln(Cosine $\theta$) in DMSO', fontsize=15, y=1.01)
plt.xlabel(r'Bond Length', fontsize=15)
plt.ylabel(r'ln$\left< Cos(\theta)\right >$', fontsize=15)
plt.ylim(-6,1)
plt.xlim(0,30)
#plt.ylim(-1.9,0)
font = font_manager.FontProperties(family='Arial', style='normal', size='14')
#plt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.60 $\AA$'], loc=3, frameon=0, fontsize=14, prop=font)
plt.tick_params(labelsize=14)
plt.text(0.5, -5.9,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.61 $\AA$', fontsize=15, color='#1F2E69')
plt.text(0.5, -5.58,r'$N_{PEG}$ = 8: $L_{p}$ = 22.1 $\AA$ ± 2.49 $\AA$', fontsize=15, color='#4C80ED')
rgpeg_olig_dmso[r"$R_{g}$ [Angstrom] N = 8 PEG DMSO "] = n8_blkspeg_dmso["Avg Radius of gyration"]
rgpeg_olig_dmso
pers_pegt_dmso[r"$L_{p}$ [Angstrom] N = 8 PEG DMSO "] = n8_blkspeg_dmso["Avg persistence length"]
pers_pegt_dmso
```
### N = 10 PEG/DMSO
```
# For the right Rg calculation using MD Analysis, use trajactory without pbc
n10_peg_dmso = mda.Universe("n10peg_dmso/n10pegonly_dmso.pdb", "n10peg_dmso/nodmso_n10peg.xtc")
n10_peg_dmso.trajectory
len(n10_peg_dmso.trajectory)
#Select the polymer heavy atoms
peg_n10dmso = n10_peg_dmso.select_atoms("resname sPEG PEG tPEG and not type H")
crv_n10peg_dmso = pers_length(peg_n10dmso,10)
crv_n10peg_dmso
com_bond_n10dmso = np.zeros(shape=(1,18000))
count = 0
for ts in n10_peg_dmso.trajectory[0:18000]:
n10_mon1_dmso = n10_peg_dmso.select_atoms("resid 1")
n10_mon2_dmso = n10_peg_dmso.select_atoms("resid 2")
oo_len = mda.analysis.distances.distance_array(n10_mon1_dmso.center_of_mass(), n10_mon2_dmso.center_of_mass(),
box=n10_peg_dmso.trajectory.ts.dimensions)
com_bond_n10dmso[0, count] = oo_len
count += 1
com_bond
lb_avg_pn6
np.std(com_bond)
np.mean(com_bond_n10dmso)
np.std(com_bond_n10dmso)
```
### Radius of Gyration vs. time N = 10 PEG/dmso
```
n10peg_rgens_dmso, cor_n10peg_dmso, N10peg_cos_dmso, rgdmso_n10peg = get_rg_pers_poly(peg_n10dmso, n10_peg_dmso, 0, 18000)
n10peg_rgens_dmso[0].shape
cor_n10peg_dmso[3]
N10peg_cos_dmso
rgdmso_n10peg
np.std(n10peg_rgens_dmso)
trj_len = np.arange(18000)
#trj_len += 1
trj_len
plt.figure(figsize=(7,7))
plt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01)
plt.xlabel(r'Time [ns]', fontsize=15)
plt.ylabel(r'$R_{g}$ [nm]', fontsize=15)
plt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69')
plt.plot(trj_len/100, n8peg_rgens_dmso[0]/10,linewidth=2, color='#4C80ED')
plt.plot(trj_len/100, n10peg_rgens_dmso[0]/10,linewidth=2, color='#8C52FC')
plt.tick_params(labelsize=14)
plt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO' ], frameon=False, fontsize=14)
#plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial')
plt.xlim(0,180)
plt.ylim(0.2,2)
```
#### Correlation values at each arc length for the whole 180 ns trajectory, N = 8 PEG/dmso
```
# x values
blen_n10dmso = cor_n10peg_dmso[3]*lb_avg_pn6
#nt_tt[0] = 0
blen_n10dmso
# Error prop. into natural log std deviation
mk_n10p_dmso = cor_n10peg_dmso[1]/cor_n10peg_dmso[0]
mk_n10p_dmso
plt.figure(figsize=(7,7))
plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO'], frameon=False, fontsize=14)
# All the points give the best fits for N = 6 peg in water
n10_blkspeg_dmso , n10peg_lpdmso = bavg_pers_cnt(5, peg_n10dmso, n10_peg_dmso, lb_avg_pn6, 5, 3000 , 18000)
n10_blkspeg_dmso
n10peg_lpdmso
n10peg_lpdmso[2]
np.mean(n10peg_lpdmso[3])
blen_dmso
blen_n10dmso
gg_n10peg_dmso = line_fit(np.mean(n10peg_lpdmso[2]),blen_n10dmso)
gg_n6peg_n10dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_n10dmso)
gg_n8peg_n10dmso = line_fit(np.mean(n8peg_lpdmso[2]),blen_n10dmso)
gg_n10peg_dmso
```
### Block averaged Radius of gyration and persistence length, N = 10 PEG/DMSO
```
np.mean(n10_blkspeg_dmso["Avg persistence length"])
np.std(n10_blkspeg_dmso["Avg persistence length"])
np.mean(n10_blkspeg_dmso["Avg Radius of gyration"])
np.std(n10_blkspeg_dmso["Avg Radius of gyration"])
plt.figure(figsize=(7,7))
plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.plot(blen_n10dmso, gg_n6peg_n10dmso, color='#1F2E69')
plt.plot(blen_n10dmso, gg_n8peg_n10dmso, color='#4C80ED')
plt.plot(blen_n10dmso, gg_n10peg_dmso, color='#8C52FC')
plt.title(r'Ensemble Averaged ln(Cosine $\theta$) in DMSO', fontsize=15, y=1.01)
plt.xlabel(r'Bond Length', fontsize=15)
plt.ylabel(r'ln$\left< Cos(\theta)\right >$', fontsize=15)
plt.ylim(-6,1)
plt.xlim(0,30)
#plt.ylim(-1.9,0)
font = font_manager.FontProperties(family='Arial', style='normal', size='14')
#plt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.60 $\AA$'], loc=3, frameon=0, fontsize=14, prop=font)
plt.tick_params(labelsize=14)
plt.text(0.5, -5.9,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.61 $\AA$', fontsize=15, color='#1F2E69')
plt.text(0.5, -5.58,r'$N_{PEG}$ = 8: $L_{p}$ = 22.1 $\AA$ ± 2.49 $\AA$', fontsize=15, color='#4C80ED')
plt.text(0.5, -5.23,r'$N_{PEG}$ = 10: $L_{p}$ = 21.2 $\AA$ ± 2.31 $\AA$', fontsize=15, color='#8C52FC')
rgpeg_olig_dmso[r"$R_{g}$ [Angstrom] N = 10 PEG DMSO "] = n10_blkspeg_dmso["Avg Radius of gyration"]
rgpeg_olig_dmso
pers_pegt_dmso[r"$L_{p}$ [Angstrom] N = 10 PEG DMSO "] = n10_blkspeg_dmso["Avg persistence length"]
pers_pegt_dmso
```
### N = 20 PEG/DMSO
```
# For the right Rg calculation using MD Analysis, use trajactory without pbc
n20_peg_dmso = mda.Universe("n20peg_dmso/n20pegonly_dmso.pdb", "n20peg_dmso/nodmso_n20peg.xtc")
n20_peg_dmso.trajectory
len(n20_peg_dmso.trajectory)
#Select the polymer heavy atoms
peg_n20dmso = n20_peg_dmso.select_atoms("resname sPEG PEG tPEG and not type H")
crv_n20peg_dmso = pers_length(peg_n20dmso,20)
crv_n20peg_dmso
com_bond_n20dmso = np.zeros(shape=(1,18000))
count = 0
for ts in n20_peg_dmso.trajectory[0:18000]:
n20_mon1_dmso = n20_peg_dmso.select_atoms("resid 1")
n20_mon2_dmso = n20_peg_dmso.select_atoms("resid 2")
oo_len = mda.analysis.distances.distance_array(n20_mon1_dmso.center_of_mass(), n20_mon2_dmso.center_of_mass(),
box=n20_peg_dmso.trajectory.ts.dimensions)
com_bond_n20dmso[0, count] = oo_len
count += 1
com_bond
lb_avg_pn6
np.std(com_bond)
np.mean(com_bond_n20dmso)
np.std(com_bond_n20dmso)
```
### Radius of Gyration vs. time N = 20 PEG/dmso
```
n20peg_rgens_dmso, cor_n20peg_dmso, N20peg_cos_dmso, rgdmso_n20peg = get_rg_pers_poly(peg_n20dmso, n20_peg_dmso, 0, 18000)
n20peg_rgens_dmso[0].shape
cor_n20peg_dmso[3]
N20peg_cos_dmso
rgdmso_n20peg
np.std(n20peg_rgens_dmso)
plt.figure(figsize=(7,7))
plt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01)
plt.xlabel(r'Time [ns]', fontsize=15)
plt.ylabel(r'$R_{g}$ [nm]', fontsize=15)
plt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69')
plt.plot(trj_len/100, n8peg_rgens_dmso[0]/10,linewidth=2, color='#4C80ED')
plt.plot(trj_len/100, n10peg_rgens_dmso[0]/10,linewidth=2, color='#8C52FC')
plt.plot(trj_len/100, n20peg_rgens_dmso[0]/10,linewidth=2, color='#8B7F47')
plt.tick_params(labelsize=14)
plt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO','N = 20 in DMSO'], frameon=False, fontsize=14)
#plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial')
plt.xlim(0,180)
plt.ylim(0.2,2)
```
#### Correlation values at each arc length for the whole 180 ns trajectory, N = 20 PEG/dmso
```
# x values
blen_n20dmso = cor_n20peg_dmso[3]*lb_avg_pn6
#nt_tt[0] = 0
blen_n20dmso
# Error prop. into natural log std deviation
mk_n20p_dmso = cor_n20peg_dmso[1]/cor_n20peg_dmso[0]
mk_n20p_dmso
plt.figure(figsize=(7,7))
plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n20dmso, np.log(cor_n20peg_dmso[0]), yerr=mk_n20p_dmso, color='#8B7F47', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO','N = 20 in DMSO'], frameon=False, fontsize=14)
# All the points give the best fits for N = 6 peg in water
n20_blkspeg_dmso , n20peg_lpdmso = bavg_pers_cnt(5, peg_n20dmso, n20_peg_dmso, lb_avg_pn6, 5, 3000 , 18000)
n20_blkspeg_dmso
n20peg_lpdmso
n20peg_lpdmso[2]
np.mean(n20peg_lpdmso[3])
blen_dmso
blen_n20dmso
gg_n20peg_dmso = line_fit(np.mean(n20peg_lpdmso[2]),blen_n20dmso)
gg_n6peg_n20dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_n20dmso)
gg_n8peg_n20dmso = line_fit(np.mean(n8peg_lpdmso[2]),blen_n20dmso)
gg_n10peg_n20dmso = line_fit(np.mean(n10peg_lpdmso[2]),blen_n20dmso)
gg_n20peg_dmso
```
### Block averaged Radius of gyration and persistence length, N = 20 PEG/DMSO
```
np.mean(n20_blkspeg_dmso["Avg persistence length"])
np.std(n20_blkspeg_dmso["Avg persistence length"])
np.mean(n20_blkspeg_dmso["Avg Radius of gyration"])
np.std(n20_blkspeg_dmso["Avg Radius of gyration"])
plt.figure(figsize=(7,7))
plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n20dmso, np.log(cor_n20peg_dmso[0]), yerr=mk_n20p_dmso, color='#8B7F47', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.plot(blen_n20dmso[:15], gg_n6peg_n20dmso[:15], color='#1F2E69')
plt.plot(blen_n20dmso[:15], gg_n8peg_n20dmso[:15], color='#4C80ED')
plt.plot(blen_n20dmso[:15], gg_n10peg_n20dmso[:15], color='#8C52FC')
plt.plot(blen_n20dmso[:15], gg_n20peg_dmso[:15], color='#8B7F47')
plt.title(r'Ensemble Averaged ln(Cosine $\theta$) in DMSO', fontsize=15, y=1.01)
plt.xlabel(r'Bond Length', fontsize=15)
plt.ylabel(r'ln$\left< Cos(\theta)\right >$', fontsize=15)
plt.ylim(-6,1)
plt.xlim(0,70)
#plt.ylim(-1.9,0)
font = font_manager.FontProperties(family='Arial', style='normal', size='14')
#plt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.60 $\AA$'], loc=3, frameon=0, fontsize=14, prop=font)
plt.tick_params(labelsize=14)
plt.text(0.5, -5.9,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.61 $\AA$', fontsize=15, color='#1F2E69')
plt.text(0.5, -5.58,r'$N_{PEG}$ = 8: $L_{p}$ = 22.1 $\AA$ ± 2.49 $\AA$', fontsize=15, color='#4C80ED')
plt.text(0.5, -5.23,r'$N_{PEG}$ = 10: $L_{p}$ = 21.2 $\AA$ ± 2.31 $\AA$', fontsize=15, color='#8C52FC')
plt.text(0.5, -4.90,r'$N_{PEG}$ = 20: $L_{p}$ = 22.9 $\AA$ ± 1.21 $\AA$', fontsize=15, color='#8B7F47')
rgpeg_olig_dmso[r"$R_{g}$ [Angstrom] N = 20 PEG DMSO "] = n20_blkspeg_dmso["Avg Radius of gyration"]
rgpeg_olig_dmso
pers_pegt_dmso[r"$L_{p}$ [Angstrom] N = 20 PEG DMSO "] = n20_blkspeg_dmso["Avg persistence length"]
pers_pegt_dmso
```
### N = 30 PEG/DMSO
```
# For the right Rg calculation using MD Analysis, use trajactory without pbc
n30_peg_dmso = mda.Universe("n30peg_dmso/n30pegonly_dmso.pdb", "n30peg_dmso/nodmso_n30peg.xtc")
n30_peg_dmso.trajectory
len(n30_peg_dmso.trajectory)
#Select the polymer heavy atoms
peg_n30dmso = n30_peg_dmso.select_atoms("resname sPEG PEG tPEG and not type H")
crv_n30peg_dmso = pers_length(peg_n30dmso,30)
crv_n30peg_dmso
com_bond_n30dmso = np.zeros(shape=(1,18000))
count = 0
for ts in n30_peg_dmso.trajectory[0:18000]:
n30_mon1_dmso = n30_peg_dmso.select_atoms("resid 1")
n30_mon2_dmso = n30_peg_dmso.select_atoms("resid 2")
oo_len = mda.analysis.distances.distance_array(n30_mon1_dmso.center_of_mass(), n30_mon2_dmso.center_of_mass(),
box=n30_peg_dmso.trajectory.ts.dimensions)
com_bond_n30dmso[0, count] = oo_len
count += 1
com_bond
lb_avg_pn6
np.std(com_bond)
np.mean(com_bond_n30dmso)
np.std(com_bond_n30dmso)
```
### Radius of Gyration vs. time N = 30 PEG/dmso
```
n30peg_rgens_dmso, cor_n30peg_dmso, N30peg_cos_dmso, rgdmso_n30peg = get_rg_pers_poly(peg_n30dmso, n30_peg_dmso, 0, 18000)
n30peg_rgens_dmso[0].shape
cor_n30peg_dmso[3]
N30peg_cos_dmso
rgdmso_n30peg
np.std(n30peg_rgens_dmso)
plt.figure(figsize=(7,7))
plt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01)
plt.xlabel(r'Time [ns]', fontsize=15)
plt.ylabel(r'$R_{g}$ [nm]', fontsize=15)
plt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69')
plt.plot(trj_len/100, n8peg_rgens_dmso[0]/10,linewidth=2, color='#4C80ED')
plt.plot(trj_len/100, n10peg_rgens_dmso[0]/10,linewidth=2, color='#8C52FC')
plt.plot(trj_len/100, n20peg_rgens_dmso[0]/10,linewidth=2, color='#8B7F47')
plt.plot(trj_len/100, n30peg_rgens_dmso[0]/10,linewidth=2, color='#63ACBE')
plt.tick_params(labelsize=14)
plt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO','N = 20 in DMSO','N = 30 in DMSO'], frameon=False, fontsize=14)
#plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial')
plt.xlim(0,180)
plt.ylim(0.2,3)
```
#### Correlation values at each arc length for the whole 180 ns trajectory, N = 30 PEG/dmso
```
# x values
blen_n30dmso = cor_n30peg_dmso[3]*lb_avg_pn6
#nt_tt[0] = 0
blen_n30dmso
# Error prop. into natural log std deviation
mk_n30p_dmso = cor_n30peg_dmso[1]/cor_n30peg_dmso[0]
mk_n30p_dmso
plt.figure(figsize=(7,7))
plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n20dmso, np.log(cor_n20peg_dmso[0]), yerr=mk_n20p_dmso, color='#8B7F47', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n30dmso, np.log(cor_n30peg_dmso[0]), yerr=mk_n30p_dmso, color='#63ACBE', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO','N = 20 in DMSO','N = 30 in DMSO'], frameon=False, fontsize=14)
# All the points give the best fits for N = 6 peg in water
n30_blkspeg_dmso , n30peg_lpdmso = bavg_pers_cnt(5, peg_n30dmso, n30_peg_dmso, lb_avg_pn6, 5, 3000 , 18000)
n30_blkspeg_dmso
n30peg_lpdmso
n30peg_lpdmso[2]
np.mean(n30peg_lpdmso[3])
blen_dmso
blen_n30dmso
gg_n30peg_dmso = line_fit(np.mean(n30peg_lpdmso[2]),blen_n30dmso)
gg_n6peg_n30dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_n30dmso)
gg_n8peg_n30dmso = line_fit(np.mean(n8peg_lpdmso[2]),blen_n30dmso)
gg_n10peg_n30dmso = line_fit(np.mean(n10peg_lpdmso[2]),blen_n30dmso)
gg_n20peg_n30dmso = line_fit(np.mean(n20peg_lpdmso[2]),blen_n30dmso)
gg_n30peg_dmso
```
### Block averaged Radius of gyration and persistence length, N = 30 PEG/DMSO
```
np.mean(n30_blkspeg_dmso["Avg persistence length"])
np.std(n30_blkspeg_dmso["Avg persistence length"])
np.mean(n30_blkspeg_dmso["Avg Radius of gyration"])
np.std(n30_blkspeg_dmso["Avg Radius of gyration"])
plt.figure(figsize=(7,7))
plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n20dmso, np.log(cor_n20peg_dmso[0]), yerr=mk_n20p_dmso, color='#8B7F47', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.errorbar(blen_n30dmso, np.log(cor_n30peg_dmso[0]), yerr=mk_n30p_dmso, color='#63ACBE', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.plot(blen_n20dmso[:15], gg_n6peg_n30dmso[:15], color='#1F2E69')
plt.plot(blen_n20dmso[:15], gg_n8peg_n30dmso[:15], color='#4C80ED')
plt.plot(blen_n20dmso[:15], gg_n10peg_n30dmso[:15], color='#8C52FC')
plt.plot(blen_n20dmso[:15], gg_n20peg_n30dmso[:15], color='#8B7F47')
plt.plot(blen_n30dmso[:15], gg_n30peg_dmso[:15], color='#63ACBE')
plt.title(r'Ensemble Averaged ln(Cosine $\theta$) in DMSO', fontsize=15, y=1.01)
plt.xlabel(r'Bond Length', fontsize=15)
plt.ylabel(r'ln$\left< Cos(\theta)\right >$', fontsize=15)
plt.ylim(-6,1)
plt.xlim(0,90)
#plt.ylim(-1.9,0)
font = font_manager.FontProperties(family='Arial', style='normal', size='14')
#plt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.60 $\AA$'], loc=3, frameon=0, fontsize=14, prop=font)
plt.tick_params(labelsize=14)
plt.text(0.5, -5.9,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.61 $\AA$', fontsize=15, color='#1F2E69')
plt.text(0.5, -5.58,r'$N_{PEG}$ = 8: $L_{p}$ = 22.1 $\AA$ ± 2.49 $\AA$', fontsize=15, color='#4C80ED')
plt.text(0.5, -5.23,r'$N_{PEG}$ = 10: $L_{p}$ = 21.2 $\AA$ ± 2.31 $\AA$', fontsize=15, color='#8C52FC')
plt.text(0.5, -4.90,r'$N_{PEG}$ = 20: $L_{p}$ = 22.9 $\AA$ ± 1.21 $\AA$', fontsize=15, color='#8B7F47')
plt.text(0.5, -4.50,r'$N_{PEG}$ = 30: $L_{p}$ = 24.2 $\AA$ ± 1.25 $\AA$', fontsize=15, color='#63ACBE')
rgpeg_olig_dmso[r"$R_{g}$ [Angstrom] N = 30 PEG DMSO "] = n30_blkspeg_dmso["Avg Radius of gyration"]
rgpeg_olig_dmso
pers_pegt_dmso[r"$L_{p}$ [Angstrom] N = 30 PEG DMSO "] = n30_blkspeg_dmso["Avg persistence length"]
pers_pegt_dmso
rgpeg_olig_dmso.to_pickle("PEG_dmso_Rg.pkl")
pers_pegt_dmso.to_pickle("PEG_dmso_Lp.pkl")
```
### Fluory Exponent, PEG/DMSO systems
```
n_peg = np.array([6,8,10,20,30])
rg_npeg_dmso = np.array([np.mean(n6_blkspeg_dmso["Avg Radius of gyration"])
,np.mean(n8_blkspeg_dmso["Avg Radius of gyration"]),np.mean(n10_blkspeg_dmso["Avg Radius of gyration"])
,np.mean(n20_blkspeg_dmso["Avg Radius of gyration"]),np.mean(n30_blkspeg_dmso["Avg Radius of gyration"])])
rg_npeg_dmso
rgdmso_npeg_std = np.array([np.std(np.log10(n6_blkspeg_dmso["Avg Radius of gyration"]))
,np.std(np.log10(n8_blkspeg_dmso["Avg Radius of gyration"]))
,np.std(np.log10(n10_blkspeg_dmso["Avg Radius of gyration"]))
,np.std(np.log10(n20_blkspeg_dmso["Avg Radius of gyration"]))
,np.std(np.log10(n30_blkspeg_dmso["Avg Radius of gyration"]))])
rgdmso_npeg_std
n_peg
np.log10(rg_npeg_dmso)
np.log10(n_peg)
# From fitting all points, I get best fit
from sklearn.linear_model import LinearRegression
model_vdmso = LinearRegression(fit_intercept=True)
model_vdmso.fit(np.log10(n_peg).reshape(-1,1), np.log10(rg_npeg_dmso))
# Slope here is in nanometers
print("Model slope: ", model_vdmso.coef_[0])
print("Model intercept:", model_vdmso.intercept_)
gg_dmso = model_vdmso.predict(np.log10(n_peg.reshape(-1,1)))
gg_dmso
print("Mean Std Error:", sklearn.metrics.mean_squared_error(np.log10(rg_npeg_dmso), gg_dmso))
print("R2 score:", sklearn.metrics.r2_score(np.log10(rg_npeg_dmso), gg_dmso))
# Residuals between the true y data and model y data
resid_vdmso = np.log10(rg_npeg_dmso) - gg_dmso
resid_vdmso
# How to calculate Sum((Xi - avg(X))^2): X values are the bond length values
nt_ttace = np.log10(n_peg)
nt_ttace -= np.mean(nt_ttace)
nhui_ace = nt_ttace**2
np.sum(nhui_ace)
# t-value with 95 % confidence intervals
scipy.stats.t.ppf(0.975, 4)
# How to calculate 95% confidence interval for the slope
flc_vdmso = scipy.stats.t.ppf(0.975, 4)*np.sqrt((np.sum(resid_vdmso**2)/len(resid_vdmso))/(np.sum(nhui_ace)))
flc_vdmso
plt.figure(figsize=(7,7))
plt.errorbar(np.log10(n_peg), np.log10(rg_npeg_dmso), yerr=rgdmso_npeg_std, color='#A58262', linestyle="None",marker='o',
capsize=5, capthick=1, ecolor='black')
plt.plot(np.log10(n_peg), gg_dmso, color='#A58262')
plt.title(r'Fluory Exponent', fontsize=15)
plt.xlabel(r'Log($N_{PEG}$)', fontsize=15)
plt.ylabel(r'Log($R_{g}$)', fontsize=15)
plt.tick_params(labelsize=14)
plt.text(1.1, 0.75, r'$v_{DMSO}$ = 0.63 ± 0.02', fontsize=15, color='#A58262')
```
| github_jupyter |
```
# Python Libraries
%matplotlib inline
import pickle
import numpy as np
import pandas as pd
import matplotlib
from keras.datasets import cifar10
from keras import backend as K
import os,sys
#import Pillow
# Custom Networks
#from networks.lenet import LeNet
#sys.path.append('./')
from networks.pure_cnn import PureCnn
from networks.network_in_network import NetworkInNetwork
from networks.resnet import ResNet
from networks.densenet import DenseNet
from networks.wide_resnet import WideResNet
from networks.capsnet import CapsNet
# Helper functions
from differential_evolution import differential_evolution
import helper
import scipy.misc
#from scipy.misc import imsave
matplotlib.style.use('ggplot')
np.random.seed(100)
def load_results():
with open('networks/results/targeted_results.pkl', 'rb') as file:
targeted = pickle.load(file)
return targeted
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
K.tensorflow_backend._get_available_gpus()
#nin = NetworkInNetwork()
resnet = ResNet()
#densenet = DenseNet()
models = [resnet]
network_stats, correct_imgs = helper.evaluate_models(models, x_test, y_test)
correct_imgs = pd.DataFrame(correct_imgs, columns=['name', 'img', 'label', 'confidence', 'pred'])
network_stats = pd.DataFrame(network_stats, columns=['name', 'accuracy', 'param_count'])
network_stats
targeted = load_results()
columns = ['model', 'pixels', 'image', 'true', 'predicted', 'success', 'cdiff', 'prior_probs', 'predicted_probs', 'perturbation']
targeted_results = pd.DataFrame(targeted, columns=columns)
stats = []
for model in models:
print(models)
val_accuracy = np.array(network_stats[network_stats.name == model.name].accuracy)[0]
m_result = targeted_results[targeted_results.model == model.name]
pixels = list(set(m_result.pixels))
print(model,pixels)
for pixel in pixels:
p_result = m_result[m_result.pixels == pixel]
success_rate = len(p_result[p_result.success]) / len(p_result)
print(len(p_result[p_result.success]))
print(len(p_result))
stats.append([model.name, val_accuracy, pixel, success_rate])
#helper.attack_stats(targeted_results, models, network_stats)
model.name
def visualize_attack(df, class_names):
_, (x_test, _) = cifar10.load_data()
results = df[df.success]#.sample(9)
print(results.shape)
z = zip(results.perturbation, x_test[results.image])
images = np.array([perturb_image(p, img)[0]
for p,img in z])
labels_true = np.array(results.true)
labels_pred = np.array(results.predicted)
#titles = np.array(results.model)
def perturb_image(xs, img):
# If this function is passed just one perturbation vector,
# pack it in a list to keep the computation the same
if xs.ndim < 2:
xs = np.array([xs])
# Copy the image n == len(xs) times so that we can
# create n new perturbed images
tile = [len(xs)] + [1]*(xs.ndim+1)
imgs = np.tile(img, tile)
# Make sure to floor the members of xs as int types
xs = xs.astype(int)
for x,img in zip(xs, imgs):
# Split x into an array of 5-tuples (perturbation pixels)
# i.e., [[x,y,r,g,b], ...]
pixels = np.split(x, len(x) // 5)
for pixel in pixels:
# At each pixel's x,y position, assign its rgb value
x_pos, y_pos, *rgb = pixel
img[x_pos, y_pos] = rgb
return imgs
# print('Targeted Attack')
# helper.visualize_attack(targeted_results, class_names)
model_id = model.name
pixel_id = 5
_, (x_test, _) = cifar10.load_data()
results = targeted_results[targeted_results.success]
#print(results.model)
results = results[results.model==model.name]
results = results[results.pixels==pixel_id]
print(results.shape)
z = zip(results.perturbation, x_test[results.image])
images = np.array([perturb_image(p, img)[0] for p,img in z])
labels_true = np.array(results.true)
labels_pred = np.array(results.predicted)
image_id = [id for id in results.image]
count = 0
for i in range(len(labels_true)):
name = str(count)+'_'+str(image_id[i])+'_'+str(labels_true[i])+'_'+str(labels_pred[i])+'.png'
image = images[i]
print(name)
#import cv2 as cv
out_path = 'non_'+model_id+'_p'+str(pixel_id)+'/'
if not os.path.exists(out_path):
os.system("mkdir -p %s"%(out_path))
out_name = out_path + name
#cv.imwrite(out_name, image)
from PIL import Image
im = Image.fromarray(image)
print(out_name)
im.save(out_name)
count += 1
#import imageio
#imageio.imwrite(out_name,image)
#imsave(out_name,image)
results
```
| github_jupyter |
# Image level consistency check
```
import numpy as np
import pandas as pd
import os
import os.path
import matplotlib.pyplot as plt
import plotly.express as px
from core import *
from config import image_stats_file, xls_file, figures_dir, latex_dir, image_level_results_file, image_level_threshold
pd.set_option('display.max_rows', None)
pd.set_option('display.max_colwidth', 100)
pd.set_option('display.width', 10000)
# reading image statistics
data= pd.read_csv(image_stats_file)
# reading the summary page
methods= pd.read_excel(xls_file, engine='openpyxl')
#methods= methods.iloc[:methods[methods['key'].isnull()].index[0]]
methods= methods[methods['flag'] == 'primary']
methods.index= methods['key']
# reading the image level figures
xl= pd.ExcelFile(xls_file, engine='openpyxl')
image_level= {}
for s in xl.sheet_names[1:]:
image_level[s]= xl.parse(s)
print('image level figures available for: %s' % str(list(image_level.keys())))
methods.columns
methods.index
# test images with annotations #1 as ground truth
data_test= data[(data['test'] == True) & (data['annotator'] == 1)].reset_index()
# test images with annotations #2 as ground truth
data_test_obs2= data[(data['test'] == True) & (data['annotator'] == 2)].reset_index()
# extracting figures with and without FoV
data_test_with_fov= data_test[data_test['fov'] == True].reset_index(drop=True)
data_test_without_fov= data_test[data_test['fov'] == False].reset_index(drop=True)
data_test_with_fov_obs2= data_test_obs2[data_test_obs2['fov'] == True].reset_index(drop=True)
data_test_without_fov_obs2= data_test_obs2[data_test_obs2['fov'] == False].reset_index(drop=True)
data_test_with_fov
```
## Calculating the scores for all image level figures
```
# checking the consistencies at the image level
for s in image_level:
if not s in methods.index:
continue
print('processing', s)
for i, row in image_level[s].iterrows():
image_id= row['image']
p_with_fov= data_test_with_fov[data_test_with_fov['id'] == image_id]['p'].values[0]
n_with_fov= data_test_with_fov[data_test_with_fov['id'] == image_id]['n'].values[0]
p_without_fov= data_test_without_fov[data_test_without_fov['id'] == image_id]['p'].values[0]
n_without_fov= data_test_without_fov[data_test_without_fov['id'] == image_id]['n'].values[0]
p_with_fov_obs2= data_test_with_fov[data_test_with_fov_obs2['id'] == image_id]['p'].values[0]
n_with_fov_obs2= data_test_with_fov[data_test_with_fov_obs2['id'] == image_id]['n'].values[0]
p_without_fov_obs2= data_test_without_fov[data_test_without_fov_obs2['id'] == image_id]['p'].values[0]
n_without_fov_obs2= data_test_without_fov[data_test_without_fov_obs2['id'] == image_id]['n'].values[0]
digits= methods.loc[s]['digits']
if digits > 2:
eps= 10.0**(-digits)
else:
eps= 10.0**(-digits)/2
image_level[s].loc[i, 'n_with_fov']= n_with_fov
image_level[s].loc[i, 'n_without_fov']= n_without_fov
image_level[s].loc[i, 'n_with_fov_obs2']= n_with_fov_obs2
image_level[s].loc[i, 'n_without_fov_obs2']= n_without_fov_obs2
image_level[s].loc[i, 'p_with_fov']= p_with_fov
image_level[s].loc[i, 'p_without_fov']= p_without_fov
image_level[s].loc[i, 'p_with_fov_obs2']= p_with_fov_obs2
image_level[s].loc[i, 'p_without_fov_obs2']= p_without_fov_obs2
image_level[s].loc[i, 'consistency_with_fov']= consistency_image_level(p_with_fov, n_with_fov, row['acc'], row['sens'], row['spec'], eps)
image_level[s].loc[i, 'consistency_without_fov']= consistency_image_level(p_without_fov, n_without_fov, row['acc'], row['sens'], row['spec'], eps)
image_level[s].loc[i, 'consistency_with_fov_obs2']= consistency_image_level(p_with_fov_obs2, n_with_fov_obs2, row['acc'], row['sens'], row['spec'], eps)
image_level[s].loc[i, 'consistency_without_fov_obs2']= consistency_image_level(p_without_fov_obs2, n_without_fov_obs2, row['acc'], row['sens'], row['spec'], eps)
# calculating the percentages of images with a given number of negatives falling in the calculated range
for key in image_level:
if not key in methods.index:
continue
methods.loc[key, 'image_level_consistency_with_fov']= np.sum(image_level[key]['consistency_with_fov']*1)/len(image_level[key])
methods.loc[key, 'image_level_consistency_without_fov']= np.sum(image_level[key]['consistency_without_fov']*1)/len(image_level[key])
methods.loc[key, 'n_image_level']= len(image_level[key])
```
## Printing the results of all image level figures
```
image_level['mo2017']
image_level['meng2015']
image_level['hassan2018']
image_level['tang2017']
image_level['zhu2016']
image_level['geetharamani2016']
image_level['wang2015']
image_level['singh2016']
image_level['singh2017']
image_level['saroj2020']
image_level['dash2018']
image_level['fathi2013']
image_level['imani2015']
image_level['emary2014']
image_level['waheed2015']
image_level['rahebi2014']
image_level['thangaraj2017']
image_level['adapa2020']
image_level['escorcia-gutierrez2020']
image_level['khan2016']
image_level['fraz2012b']
image_level['fraz2012']
image_level['lupascu2010']
image_level['marin2011']
image_level['ricci2007']
image_level['li2016']
image_level['barkana2017']
image_level['tamim2020']
image_level['frucci2016']
image_level['moghimirad2012']
image_level['odstrcilik2013']
image_level['dash2020']
image_level['bharkad2017']
image_level['lupascu2016']
image_level['kumar2020']
image_level['narkthewan2019']
```
## Categorization
```
threshold= image_level_threshold
reduced= methods[methods['image_level_consistency_with_fov'].notnull()].reset_index(drop=True)
reduced.loc[reduced['image_level_consistency_with_fov'] > threshold, 'category']= 'FoV'
reduced.loc[reduced['image_level_consistency_without_fov'] > threshold, 'category']= 'no FoV'
reduced.loc[(reduced['image_level_consistency_with_fov'] > threshold) & (reduced['image_level_consistency_without_fov'] > threshold), 'category']= 'ambiguous'
reduced.loc[(~reduced['category'].isin(['FoV',
'no FoV',
'ambiguous'])), 'category']= 'outlier'
```
## Analysis
```
reduced[['key', 'category']].groupby('category').count()
reduced[reduced['category'] == 'ambiguous']
# preparing latex table
def prepare_key(x):
name= x[:-4]
year= x[-4:]
name= name[:1].upper() + name[1:]
return name + ' (' + year + ') \cite{' + x + '}'
latex= reduced[['key', 'acc', 'sens', 'spec', 'digits', 'n_image_level', 'image_level_consistency_with_fov', 'image_level_consistency_without_fov', 'category']]
latex.loc[latex['category'] == 'no FoV', 'category']= 'all pixels'
latex['key']= latex['key'].apply(lambda x: x[0:1].upper() + x[1:])
latex['key']= latex['key'].apply(lambda x: ' \cite{' + x.lower() + '}')
#latex['key']= latex['key'].apply(prepare_key)
latex['n_image_level']= latex['n_image_level'].astype(int)
latex['digits']= latex['digits'].astype(int)
latex['acc']= latex['acc'].apply(lambda x: ('%.4f' % x)[1:])
latex['sens']= latex['sens'].apply(lambda x: ('%.4f' % x)[1:])
latex['spec']= latex['spec'].apply(lambda x: ('%.4f' % x)[1:])
latex['image_level_consistency_with_fov']= (latex['image_level_consistency_with_fov']*100).astype(int)
latex['image_level_consistency_without_fov']= (latex['image_level_consistency_without_fov']*100).astype(int)
latex.columns=['Key', '$\overline{acc}$', '$\overline{sens}$', '$\overline{spec}$', '\rotatebox{90}{Decimal places}', '\rotatebox{90}{Num. image level fig.}', '\rotatebox{90}{$H_{\text{FoV}}$ not rejected (\%)}', '\rotatebox{90}{$H_{\text{all}}$ not rejected (\%)}', 'Decision']
latex
latex_str= set_column_spaces(latex.sort_values('$\overline{acc}$', ascending=False).to_latex(escape=False, index=False), n_cols=9)
with open(os.path.join(latex_dir, "tab2.tex"), "w") as text_file:
text_file.write(latex_str)
px.scatter(reduced[reduced['category'].notnull()], x='acc', y='spec', text='key', color='category', width=1000, height=1000)
markers= ['o', 's', '+', 'x']
label_mapping= {'FoV': 'FoV', 'outlier': 'Outlier', 'no FoV': 'All pixels'}
plt.figure(figsize=(5, 4))
for i, c in enumerate(['FoV', 'no FoV', 'outlier']):
plt.scatter(reduced[reduced['category'] == c]['acc'], reduced[reduced['category'] == c]['spec'], label=label_mapping[c], marker=markers[i], s=100)
plt.scatter([0.9473], [0.9725], label = 'Ann. #2 with FoV', marker='D', s=200)
plt.scatter([0.9636], [0.9818], label = 'Ann. #2 with all pixels', marker='*', s=300)
plt.xlabel('Accuracy')
plt.ylabel('Specificity')
#plt.gca().set_aspect(1.0)
plt.tight_layout()
plt.legend()
plt.savefig(os.path.join(figures_dir, 'image_level.pdf'))
plt.show()
methods= pd.merge(methods.reset_index(drop=True), reduced[['key', 'category']], on='key', how='left')
```
## Writing the results to file
```
methods.to_csv(image_level_results_file, index=False)
methods.columns
methods
```
| github_jupyter |
# Data Cleaning And Feature Engineering
* Data is very dirty so we have to clean our data for analysis.
* Also have many missing values represented by -1(have to fix it is very important).
```
import pandas as pd
data=pd.read_csv('original_data.csv')
data.head()
data.shape
#droping duplicates
data=data.drop_duplicates(data.columns)
data.shape
```
# Salary column
```
#droping salary which have -1 i.e no salary provided
data=data[data['Salary Estimate'] != '-1']
data.shape
data.head(20)
#replacing ₹ and k to 000
data['Salary Estimate']=data['Salary Estimate'].apply(lambda x: x.replace('₹','').replace('K','000').replace(',',''))
data.head()
data.dtypes
data['Salary Estimate'][0:50]
#making another column with 0 1
#1 if salary is by hourly else 0
data['hourly'] = data['Salary Estimate'].apply(lambda x: 1 if '/hr' in x.lower() else 0)
#making another column with 0 1
#1 if salary is by monthly else 0
data['monthly'] = data['Salary Estimate'].apply(lambda x: 1 if '/mo' in x.lower() else 0)
#removing /hr and /mo
data['Salary Estimate']=data['Salary Estimate'].apply(lambda x: x.lower().replace('/hr','').replace('/mo',''))
#if needed in the future
data['min_salary'] = data['Salary Estimate'].apply(lambda x: (x.split('-')[0]))
#check point
data.to_csv('clean.csv',index=False)
df=pd.read_csv('clean.csv')
def avg_salary(x):
lst=x.split('-')
l=len(lst)
if l>1:
return (float(lst[1])+float(lst[0]))/2
else:
return float(lst[0])
df['avg_salary'] = df['Salary Estimate'].apply(avg_salary)
df.head()
df.shape
#hourly salary to annual
df['avg_salary'] = df.apply(lambda x: x.avg_salary*2000 if x.hourly ==1 else x.avg_salary, axis =1)
#monthly salry to annual
df['avg_salary'] = df.apply(lambda x: x.avg_salary*12 if x.monthly ==1 else x.avg_salary, axis =1)
```
# Company Name Column
```
#cleaning company name
df['Company Name']=df['Company Name'].apply(lambda x: x.split('\n')[0])
df.head()
```
# Founded column
```
data[data['Founded']==-1]
#adding new column company_age
#age of company
df['company_age'] = df.Founded.apply(lambda x: x if x <1 else 2020 - x)
```
# job description Column
```
import numpy as np
def clean_des(x):
try:
return x.replace('\n', ' ')
except AttributeError:
return np.NaN
#cleaning job description
#job description have an values
df['Job Description']=df['Job Description'].apply(clean_des)
df.tail()
```
# Job Title Column
```
df['Job Title'].value_counts()
def title_simplifier(title):
if 'data scientist' in title.lower() or 'data science' in title.lower():
return 'data scientist'
elif 'data engineer' in title.lower():
return 'data engineer'
elif 'analyst' in title.lower():
return 'analyst'
elif 'machine learning' in title.lower():
return 'machine learning engineer'
elif 'manager' in title.lower():
return 'manager'
elif 'director' in title.lower():
return 'director'
else:
return 'other'
#simplifing titles to simplify thw work as there are 282 unique values which have the mostly same work
df['job_title_simplified'] = df['Job Title'].apply(title_simplifier)
df['job_title_simplified'].value_counts()
#if required for analysis
df['number_competitors'] = df['Competitors'].apply(lambda x: len(x.split(',')) if x != '-1' else 'not provided')
df.head()
```
# Revenue Column
* exploring revenue column as it can be a important feature in analysis
```
# replace -1 values with NaN (missing value)
df = df.replace(to_replace = -1, value = np.nan)
#null value in revenue
#df[df['Revenue']=='Unknown / Non-Applicable']
#making another column same as Revenue so that we can make changes to this new column that will not effect origial Revenue column.
df['revenue']=df['Revenue']
df.head()
df['revenue']=df['revenue'].apply(lambda x: x.replace('Unknown / Non-Applicable','-1'))
```
### cleaning revenue column.
```
#replaceing all the characters that are not numbers
df['revenue']=df['revenue'].apply(lambda x: x.replace('₹','').replace('+','').replace('INR','').replace('()','').replace('billion',''))
#making another column with 0 1
#1 if revenue is in million else 0
df['Revenue_million'] = df['revenue'].apply(lambda x: 1 if 'million' in x.lower() else 0)
#replaceing million
df['revenue']=df['revenue'].apply(lambda x: x.replace('million',''))
df['revenue']=df['revenue'].apply(lambda x: x.replace('to','-'))
```
### Making another column for avg of revenue as original revenue have values in form of ranges but we want a specific value for analysis.
```
#there are -1 so when split on - it raise an error that is why use try block
def avg_revenue(x):
lst=x.split('-')
l=len(lst)
if l>1:
try:
return (float(lst[1])+float(lst[0]))/2
except:
return np.nan
else:
return float(lst[0])
df['avg_revenue'] = df['revenue'].apply(avg_revenue)
#### making unit of average revenue as uniform
df['avg_revenue'] = df.apply(lambda x: x.avg_revenue/1000 if x.Revenue_million ==1 else x.avg_revenue, axis =1)
#check percentage of NaN data in every column
round((100*df.isnull().sum())/len(df.index),2)
```
#### Avg_Revenue have about 47% of missing values.It is said that column that have missing value % greater than 30 will be droped but Revenue can be a important column for analysis so we will fill missing values bt using advanced techniques like KNN-Imputer.
#### AS we will fill there values there will be possiblity that analysis around revenue may be wrong we will see it what is the effect of revenue on salary.
```
#import required libraries from advanced imputation techniques
from sklearn.impute import KNNImputer
pd.set_option('display.max_rows',None)
X=df.drop(['Company Name', 'Competitors', 'Headquarters', 'Industry',
'Job Description', 'Job Title', 'Location','Founded','revenue',
'Salary Estimate', 'Sector', 'Size', 'Type of ownership', 'hourly',
'monthly', 'min_salary','Revenue','company_age','Rating','avg_salary',
'job_title_simplified', 'number_competitors', 'Revenue_million'],axis=1)
X
imputer = KNNImputer(n_neighbors=3)
df['avg_revenue']=imputer.fit_transform(X)
df['avg_revenue']=round(df['avg_revenue'])
df.head()
df.columns
df2=df.drop(columns=[ 'hourly', 'monthly', 'min_salary','number_competitors', 'revenue','Revenue_million'])
df2.head()
df2.to_csv('final_cleaned_data.csv',index=False)
```
| github_jupyter |
# Adult Census Income
Debanjan Chowdhury Data 602
# Frame the problem and look at the big picture
## Abstract and Summary
According to an article in the US News - A World report, they were evaluating how indidividuals did not fill out paper works for a long time and the numbers may have been misled. In the year of 2018, an inspecotr general in Rhode Island released a report where the census goers would go to the homes of individuals who had not filled out their forms in a long while. (US News) Another intersting information comes from Federal News Network. According to an article in Federal News Network, the legislatures in the house were planning to apss a bill wher eyou would have about $11,000 in fines in you lie or spread any form of misinformation in the census. This is scary and I also wanted to do this project, because this question came to my mind about why one would try lying on a census and was wondering how they would figure out. Therefore, this dataset came to my mind as it was about census and it would help me get an idea of how to cross check when someone may be lying. (Federal News Network). These two are the main sources of motivation behind why I wanted to work on this project. After seeing this information, I noticed this dataset and it contained information about individuals on a census, there data and the salary ranges of whether it is greater than 50K or less than or equal to 50K salary. The government has recently mentioned that they will provide benefits to individuals who may ahve a salary less than 50K. My colleagues have provided me with a dataset with 32561 rows or that many individuals and 15 columns initailly. The data contains information about an individuals age, education, occupation, race, gender, capital gain, capital loss, hours per week that they work and income (target). The targer data tells us if the salry is in the specific ranges as mentioned above. My role in the company as a data scientist is to verify whether the the salary information is correct and if it is accurate or if someone is bluffing on a specific income for benefits. Initially, I conducted data cleaning to check for missing values and marked them as unknowwn as some individuals may have been unemployed in terms of their job or profession and they may not have a salary. Therefore, I added that step. The next step was where I removed a column as it was redundant and the categorical counting of education categories started from one and not 0 which would have affected us during modelling. Further, I conducted Exploratory Data Analysis to evaluate the dataset further. Next, I conducted feature engineering to convert all letters and other non-numeric categorical values into numeric values as models would use only numeric values. I used label encoding for that. Following that step, I would scale the datasets to ensure that they all are within the same range. After those steps, I developed a logistic regression model, decision tree and random forest models where I split the data into testing and training sets and tested the x values with the targets. In order to verify the accuracy of the models, I used cross validation to check for the mean and standard devation to verify the model accuracy. The cross validation would show the mean and standard devation and that gives an idea of whether the accuracy amount may be correct or not. Following that, I used a confusion matrix and found the accruacy, precision and recall scores for each of the models. I compared the model and the Random forest came to be the best. In order to finetune the model, I removed some of the main outliers from specific columns and was able to improve scores once again. The removal of outliers also showed the random forest as highest in scores. One common thing all models showed was that the true negatives were highest, but in all cases the true positive was larger than the true negative when we went to removing outliers in the confusion matrix.
## Business Problem
Business Problem: According to an article in the US News - A World report, they are evaluating how indidividuals have not filled out paper works for a long time and the numbers may have been misled. Somewhere aorund 2018, an inspecotr general in Rhode Island released a report where the census goers would go to the homes of individuals who had not filled out their forms in a long while. (US News) This was one of my main motivations on why I wanted to look into the project. One misinformation could cause a lot of trouble in the modellings and leagal trouble also. According to an article in Federal News Network, the legislatures in the house were planning to apss a bill wher eyou would have about $11,000 in fines in you lie or spread any form of misinformation in the census. This is scary and I also wanted to do this project, because this question came to my mind about why one would try lying on a census and was wondering how they would figure out. Therefore, this dataset came to my mind as it was about census and it would help me get an idea of how to cross check when someone may be lying. (Federal Newws Network).
Source:
- https://www.usnews.com/news/us/articles/2020-02-03/report-census-hasnt-tested-tasks-to-catch-people-who-lie
- https://federalnewsnetwork.com/federal-newscast/2020/03/spreading-false-information-about-2020-census-could-land-you-in-jail-if-new-bill-becomes-law/
For this project, I am a Data Scientist in the US Census bureau. Recently, the government has announced a special package or benefit for all individuals who are earning less than 50K in temrs of salary. My colleagues collected the data of different individuals who are over the age of 16 and my job is to double check whether their claims on salary are accurate based on all other types of data that they are providing or if they are bluffing in order to get the benefits. There are many details, but the main ones collected from individuals are their age, education, occupation, race, gender, capital gain, capital loss, hours per week that they work and income (Which was used) as a target. Our dataset has about 32561 rows or that many individuals and 15 columns initailly. I removed one of them as it showed categorical labels for individuals and there education, but it did not start from 0 and started from one which would cause trouble when modelling. The target value has details about whether an individuals salary was greater than or less than/equal to 50K. My goal was to verify the accuracy about whether the salary range of aspecific individual was correct or seemed fishy. The dataset will give us an understading of capital gains or loss an individual had in terms of salary and I also noticed specific races where in higher propertion than the others, so many outliers came into existence. I used different types of methods and technologies like data cleaning, feature engineering and exploratory data analysis to undersand the data in more details and evaluate where to be alert or pay attention. Following that, I use logistic regression, decision trees and random forest methods to verify if the income category mentione was correct or not. I compared all three models and realized that one is relatively better than the other one. In order to help enhance and improve the model in the fine tuning step, I decided to remove major outliers in the data columns or features that may have been playing a role with pulling the overall score down. After removing the outliers, I realized that the the accuracy, precision and the recall scores rose by a specfic amount. I also noticed that the random forest model showed the best scores in terms of the accuracy.
## ML Problem
The corresponding machine learning problem is to use Logistic Regression, Decision tree and Random Forest model to evaluate which model shows the best accuracy results and will help us determine the accurac y behind whether all the feature match the target dataset. Our main goal here is to develop a model that is higher than an accuracy score of 75% as 75% was what out score was when we divided the number of individuals who earned more than 50K salary with the total number of indidivuals.
## Getting the data
### Dataset
The dataset used for this project was found at
https://www.kaggle.com/uciml/adult-census-income.
The dataset was inspired by the intial data in the UCI website:
http://archive.ics.uci.edu/ml/datasets/Adult
This dataset contains information that was extracted from a census in a past year. The individuals are above age 16 and the dataset contains information about individuals, their work type (private company employee, government employee, etc), their education levels, marital status, occupation (like technical field, farmer, etc), their race and many more information. The dataset then shows us the income column as the target column and that shows us if an individuals salary is above or below $50K.
### Data Dictionary
Our dataset has about 32561 rows or that many individuals and 15 columns initailly. I removed one of them as it showed categorical labels for individuals and there education, but it did not start from 0 and started from one which would cause trouble when modelling. So 14 columns
- age: (int data type), numerical data. It contains each of the individuals - -- workclass: (string/object data type), cateogorical data. It contains the details of what type of work type each individual has liek private job, government job, etc.
- fnlwgt: (int data type), numerical data. This is a final weight ranking section that is determined by the amount of folks who are over 16, Hispanic and they evaluate the weight by the race, age and sex.
- education: (string/object data type), cateogorical data. It contains the education completed levels of each of the individuals int he census. Some have completed up to high school, some have gone to college, some are still in school.
- education-num: (int data type), categorical data. This column contains a value reprsenting the education level of each individual. However, we dropped this column as it is redundant and the categorical count does not start from 0 and starts from 1. It could affect the model unless every categorical value starts from 0 when it is converted to numeric value.
- marital-status: (int data type), categorical data. It contains information abput thee marital status of individuals like unmarried, married, divorce, etc.
- occupation: (string/object data type), categorical data. It contains information about what type of job or roles each individual has like manager, executive, etc.
- relationship: (string/object data type), categorical data. It contains detials about the individuals relationship status like is he a wife, a husband, has a child, unmarried, etc.
- race: (string/object data type), categorical data. It contains details about the individuals ethnicity - White, Black, Hispanic, etc.
- sex: (renamed it to gender), categorical data. It mentions whether an individual is a mail or female.
- capital-gain: (int data type), numeric data. This column contains the capital gain amount that an individual had. If they had none then it says 0.
- capital-loss: (int data type), numeric data. This column contains the capital loss amount that an individual had. If they had none then it says 0.
- hours-per-week: (int data type), numeric data. This column contains the amount of week each individual worked.
- native-country: (string/object data type), categorical data. It tells us each indiviuals native country.
- income: (string/ object data type), categorical data. It tells us whether an individuals salary is greater than or less than and equal to 50K dollar salary. This is our target value.
## Data Preparation and Exploring data
### Import potential or necessary libraries
```
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
```
### Loading the dataset
```
df = pd.read_csv('adult.csv')
```
**Taking a look at the number of rows and columns**
```
print('Number of columns and rows: ', df.shape)
```
**Getting more detailed information about the dataset like the columns, data types and all.**
```
df.info()
df.head()
```
It seems that education number column was an integer identifier for the edication field. However, it was the categorical identification from 1 and not 0 like they do it in label encoding. Later, on we will need to do label encoding to convert catgorical to numericals for the classification algorithms. I will also be renaming the sex column to gender.
```
df['education.num'].unique()
df = df.drop(columns={'education.num'})
df = df.rename(columns= {"sex": "gender"})
df.head()
```
## Data Cleaning and analyzing
We drop any duplicate calues if there may be any type of duplicate values.
```
df = df.drop_duplicates()
```
We check for null values and it seems there are no nulls, but there are many quesstion marks in the dataset and they not be consideredd null values and in order to know how many missing values there are in each column, we would need to convert them to nulls. It would be essential to know which columns have null values to evaluate if they represent anything or can be removed or replaced.
```
df.isna().sum()
df.head()
```
We will be setting a null value in place of all question marks to represent missing values
```
df = df.replace('?', np.NaN)
df.head()
```
Below, we are double checking null values and where they are. As we can notice the null values are in the workclass, occupation and native country field. It is possible that someone who may have recently been laid off or looking for a job may not have an employment at the time the census collected data and their salary may be 0. It coul also be that an individual did want to report their employemnt details by preference and same goes with the information regarding their native country. Therefore, we can rename it to unknown.
```
df.isna().sum()
```
Some individuals may be unemployed at the time of census survey, could have been laid off, etc. Therefore, there workclass and occupation may not have anything on it, so we set it as unknown. Along with that, some may have preferred not to give information about their native countries and so, we set it as unknown. As it is a string categorical value, we did not set it to 0 to maintain consitency. Later on, label enconding will be used to convert these values to numeric for modelling.
```
df = df.fillna("unknown")
df.head()
```
## Exploratory Data Analysis and Data Visualization
We conducted additional EDA and developed further data visualizations to get a better idea on each of the features used in the dataset, along with their distributions and the relation of key features to target value of income and count of specific values in a column. We also triued to rvaluate the correlation with our target valur or the value in the y-axis which is income in this case.
```
df.hist(figsize=(16,20), bins=30)
sns.pairplot(df)
```
Below is the data of each of the marital status values and their respective calculations. Those who are married with a civil spouse exist in the highest count. Those whose spouse is absent is towards the lowest.
```
plt.figure(figsize=(15, 5))
sns.countplot(x="marital.status", data=df)
```
Below is the data of all of the educations obtained by each of the individuals and the count or the number of people for each of the educational qualification sections. It seems the individuals who have been **High School graduates** or have attended some levels of **college** are in the highest numbers and those who have attended up to **pre-school and elementary school** are in the lowest counts.
```
plt.figure(figsize=(17, 5))
sns.countplot(x="education", data=df)
```
Those individuals in the census, whose race is White is in highest numbers and infidicuals whose race is classified as others or American Indian is in the lowest numbers. It seems this column may also be playing a significant role in the modelling section.
```
plt.figure(figsize=(15, 5))
sns.countplot(x="race", data=df)
```
Below is a visualization taht is helping us understand how many individuals income status is divided based on each of the races. It seems mostly those in who have income less than or equal to 50k are in higher number and the diference between the folkss who have less than or equal to 50k and the folks who have more than 50k is significantly higher for folks who are white.
```
plt.figure(figsize=(10,7))
plt.title('Income relation to race')
sns.countplot(x='race', hue='income',data=df)
```
Below is a distribution plot of the age groups of all of the individuals who are in our dataset. It seems that the largest amount of individuals are below within their late teens to their 40's in age and it also seems that as it goes over the proportion of individuals who are above the age of 40 sowly goes down.
```
sns.distplot(df['age'])
```
Below, is a distribution of all of the individuals and how many hours they are working. It seems that most of the individuals work **40** hours a week which should be considered **full time** jobs.
```
sns.distplot(df['hours.per.week'])
```
Below is a counting of how many male and female are in the dataset. It seems that there are more males than females in the dataset.
```
plt.title('The count of male and female')
sns.countplot(x='gender',data=df)
```
Below is a comparison of male and female and their salaary ranges. It seems that male have a higher overall salary and most of the individuals get **less than or equal to 50K** in comparison to **more than 50K**.
```
plt.title('Income relation to gender')
sns.countplot(x='gender',hue='income',data=df)
```
In the visualization below, we can see that the individuals who get **more than 50K salary** seem to be working more than **40 hours a week**. And those who get **less than or equal to 50K salary** seem to be working **40 or less hours** in most of the cases.
```
sns.boxplot('income', y= 'hours.per.week', data= df)
```
## Prepare the data to better expose the underlying data patterns to ML algorithms
## Feature Engineering
This is the step where we will be converting categorical variables to numerical variables. These values will be used for modelling purposes. We will be using the label encoding techniques to conert the categorical (non-numeric) values into a specific numerical value and an unique numeric value will be assigned to a specific categorical value in order to represent it. If the dataset has 2 variables then it would replaced them with 0 and 1's and if they had more categorical variables than two then it would used more numbers to identify each of the specific categories. Two variables would be binary classification and more classes would be binary classification.
**Below we can see the count of each individual and their salary ranges.**
```
df['income'].value_counts()
```
Below is the ratio of eeach of our categories and it seems that the ratio of individuals whose salary is higher than 50K is a higher ratio than those who have a salary of lower than 50K.
- Problem: Given a set of informatioon about each individual in the census, we are trying to figure out whther their salary exceeds 50K or it does not and according ot the business rule those who would be euqal to or less than 50K would be eligible or specific government benefits.
- Model Goal: Our main goal here is to develop a model that is higher than an accuracy score of 75%.
**Below is our accuracy score or the score of indivuals who earn more than 50k and the proportion.**
```
24698/(24698+7839)
```
**Below is our score of indivuals who earn more than 50k and its proportion and the score of indivuals who earn less then 50K salary and their proportion.**
```
print("Ratio of salary greater than 50K:", 24698/(24698+7839))
print("Ratio of salary lower than 50K:", 7839/(24698+7839))
```
We make a copy of the dataframe just incase. We will be moving on to the step where we use label encoding to replace categorical values in wordings to numerical values.
```
df_new = df.copy()
df_new.head()
```
Importing the Label Encoder Library from Scikit Learn. In the step, we will be converting all categorical columns to numeric.
```
from sklearn.preprocessing import LabelEncoder
```
Below, we are using the technique to fit the label ecoder to a specific column value and then we apppply the transformations where they will take each of the values in the categorical column and it would assign a numeric value to it. We do those steps for 8 of the categorical columns.
```
le = LabelEncoder()
le.fit(df_new['income'])
y = le.transform(df_new['income'])
df_new['income'] = y
le.fit(df['workclass'])
x1 = le.transform(df_new['workclass'])
df_new['workclass'] = x1
le.fit(df['marital.status'])
x2 = le.transform(df_new['marital.status'])
df_new['marital.status'] = x2
le.fit(df['occupation'])
x3 = le.transform(df_new['occupation'])
df_new['occupation'] = x3
le.fit(df['relationship'])
x4 = le.transform(df_new['relationship'])
df_new['relationship'] = x4
le.fit(df['race'])
x5 = le.transform(df_new['race'])
df_new['race'] = x5
le.fit(df['gender'])
x6 = le.transform(df_new['gender'])
df_new['gender'] = x6
le.fit(df['native.country'])
x7 = le.transform(df_new['native.country'])
df_new['native.country'] = x7
le.fit(df['education'])
x8 = le.transform(df_new['education'])
df_new['education'] = x8
```
Below is out updated dataset with each of the categorical columns converted into numerical columns and we will be using this data for modelling.
```
df_new.head()
```
### Data Preparation and Transformation for Modelling
We would intially split the data into training and testing data. The training dataset is what will be used for now and the testing dataset will be used for later after the models are developed, we use it to test the values in the model.
```
from sklearn.model_selection import train_test_split
```
We will be taking every column in x except the the income column which is the target column.
```
x = df_new.drop(['income'], axis = 1)
y = df_new['income']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 1)
```
### Scaling Data
We are scaling the our data size to ensure that they are all in the same range of numbers or scaled into that range. Below, we import a scaler and declare a standard scaler or instantiate it.
```
from sklearn.preprocessing import StandardScaler
std_scaler = StandardScaler()
```
We are fitting our scaler to the training dataset and using the transformation features to create a scaling of the training and testing datasets.
```
std_scaler.fit(x_train)
x_train_scaled = std_scaler.transform(x_train)
x_test_scaled = std_scaler.transform(x_test)
```
## Explore many different models and short-list the best ones
## Modeling & Model Evaluation
This is where we are devloping our models. We will be fitting our training sets into the model for the x scaled training set and the y training data which contains the target. Initially, we will be importing respective libraries and then we would need to create or instantiate a logistic regression model. We will also be evaluating the accuracy score of the model and a prediction which will set what values the x scaled ddatas will predict. Following that step, we would use a confusion matrix to see the accuracy and check for details like true positive (when predcition and actual results are in the positive side or saying no risk), true negative, (when the prediction and actual data both say that things are in the negative side or you are in the risk zone for our project, false positive (when we predict things are on positive side, but in reality they may not be on that side), false negative (when we predict things are on the negative side, but in reality they are on the positive side).
After al of that is over, we find the cross validation to accurately depict if out models accuracy is correct or not. We see the mean +/- stnd. deviation.
Following, the logistic regression tests, we would also test the datasets using the decision tree model and random forest and compare our results.
#### Logistics Regression
```
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression(penalty = 'none', random_state = 1)
logreg.fit(x_train_scaled, y_train)
predictions = logreg.predict(x_train_scaled)
score = logreg.score(x_train_scaled, y_train)
```
In the cell below, we are developing confusion matrix for logistics regression to show the details all predicted datas vs actual data. We look at which one are predicted to be true and actually true, which one is predicted but now we evaluate where the predictions match with the accurate results.
```
from sklearn import metrics
from sklearn.metrics import accuracy_score, precision_score, recall_score
cm = metrics.confusion_matrix(y_train, predictions)
cm
```
Below, we are displaying a visualization of the Confusion matrix for the training datasets
In the confusion matrix blow, we notice that the score is about 80% accurate. This evaluates that it would be correct about 80 percent of the time, but approximately 20% of the time it would be incorrect in the values it gives us. It seems that the number of true negatives arre significantly higher than the others and it is good to predict somethin as bad when it actually may be negative. The false positive and false negative cases could also indicate some concern. False negative cases are significnatly higher than the true positive cases.
```
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Pastel1')
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
all_sample_title = 'Accuracy Score for training data: {0}'.format(score)
plt.title(all_sample_title, size = 15);
plt.show()
```
**Cross Validation** for Logistics Regression
In this step, we are cross validating out model. We are doing 5 splittings. The x values are set as the scaled training dataset and the y values are set as the also we for it to return training score and return the estimator. The verbose is 2 for the machine to show us what is going on.
```
from sklearn.model_selection import cross_validate
cv_fivefold = cross_validate(estimator= logreg,
X = x_train_scaled,
y = y_train,
cv = 5,
return_train_score= True,
return_estimator= True,
verbose = 2)
```
This is the cross validation data of our training and testing scores.
```
print(cv_fivefold['train_score'])
print(cv_fivefold['test_score'])
```
Below, we are looking for the mean of the validation and the standard deviation of it to verify the overall model accuracy. This is the overall summary of logistic regressions without any type of regularization. Our mean is around 80% and there is a standard devaition of about 0.004. Thi is a fairly decent score
```
validation_mean = cv_fivefold['test_score'].mean()
validation_std = cv_fivefold['test_score'].std()
print('Logistic Regression 5-fold cv results (Accuracy) %.3f =/- %.3f'%(validation_mean, validation_std))
```
**Below are the accuracy, precision and recall scores of the model for Logistic Regression.**
- The accuracy score is calculated by dividing the correct prediction amount by the number of total cases.
- Then the precision score is calculated by the number of true positives by the number of condition positives (sum of the true positive and alse negative).
- The recall score is the division of the true positives with the number of predicted positives (sum of true and false postives).
```
print('Log reg accruacy score:', accuracy_score(y_train, predictions))
print('Log reg precision score:', precision_score(y_train, predictions))
print('Log reg recall score:', recall_score(y_train, predictions))
```
According to the results above, our recall score is fairly lower as most of our data was in the negative sections and there were more true negatives than true positives.
### Decision tree
Decision tree will take the predictor space and break it into a number of different regions. We would break the individual datasets in the columns to smaller parts like if we have a dataset column calculating the income. We would evalaute if the income is larger than a specific number and then we would continue to the next node of the tree from the root node if it is. Then we would continue to the terminal node from root node. If not and we find a condition that is not further divideable we will stop. In our example, I set a maximum leaf nodes the decision tree can make is set to 15 for experimental purposes as I feel we have many binary classification, multiclass classification and the dataset is fairly large, so I felt 15 max nodes would allow it to find sufficient conditions.
We are importing the decision tree model and instantiating it. After that, we are trying to fit our scaled x training and y training datasets. The decision tree has a root node where it would have a conditions based on your dataset and it would move to the next node based on the condition. Like if someones is of a specific race then go to node a or b. It would continue till all conditions are satisfied and no nodes remaining. Below, we also find a prediction value and an accruacy score for the model. The score will be calculated by taking the x scaled values training and the y training values. Then the prediction will only store values predicted after seeing the x scaled training dataset.
```
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier(max_leaf_nodes=15, random_state = 1)
clf.fit(x_train_scaled, y_train)
score_dt = clf.score(x_train_scaled, y_train)
prediction_dt = clf.predict(x_train_scaled)
```
Below, we have developed our confusion matrix that takes the y training data set and the predictions made from the x scaled training data and it will show us the value comparison of what was correct and what not.
```
cm_dt = metrics.confusion_matrix(y_train, prediction_dt)
cm_dt
```
Below, we have made the confusion matrix into a visualization format. IWe can see that the score is about 84%. This score shows that the decision tree seems to do better than the linear regression model. This time like the previous one, we can see there are more actual negatives than the positive, but the difference between false negative and tru positive is not as high.
```
plt.figure(figsize=(9,9))
sns.heatmap(cm_dt, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Pastel1')
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
all_sample_title = 'Accuracy Score for training data: {0}'.format(score_dt)
plt.title(all_sample_title, size = 15);
plt.show()
```
**Cross Validation** for Decision tree
We are doing 5 splittings when we are doing cross validations. We set the x values as scaled training values, set the y as y training value also we for it to return training score and return the estimator. We set the verbose is 2 for the machine to show us what is going on.
```
cv_fivefold_dt = cross_validate(estimator= clf,
X = x_train_scaled,
y = y_train,
cv = 5,
return_train_score= True,
return_estimator= True,
verbose = 2)
```
Below are the cross validation scores for our training and testing scores.
```
print(cv_fivefold_dt['train_score'])
print(cv_fivefold_dt['test_score'])
```
Over here, we find the mean of the validation and the standard deviation to verify the overall model accuracy. This is our baseline summary of decision tree model. Our mean is around 84.5 and there is a standard devaition of about 0.003 and this shows a better one than the log regression.
```
dt_validation_mean = cv_fivefold_dt['test_score'].mean()
dt_validation_std = cv_fivefold_dt['test_score'].std()
print('Decision tree 5-fold cv results (Accuracy) %.3f =/- %.3f'%(dt_validation_mean, dt_validation_std))
```
In the example below, we can see the accuracy, recall and the precision scores. As the negative actuals were higher than postives again, we see a low recall score, but the accuracy and precision scre are fairly high. Our scores are overall higher than the logistic regression ones.
```
print('Dec tree accruacy score:', accuracy_score(y_train, prediction_dt))
print('Dec tree precision score:', precision_score(y_train, prediction_dt))
print('Dec tree recall score:', recall_score(y_train, prediction_dt))
```
### Random Forest
There sample using a concept of bootstrapping where they will resampling of the data. They will look into the dataset and will resample the data based on where they will randomly choose data ffrom your original data set and replace it. Like they may place it in a different order or a specific data may be used twoce or more times. The different data sets can be used like a validations. By cahnging datasets we get different entries and we can get the average. I used this as th context seemed to be helpful in understanding how our dtaset can be resampled while modelling. The random forest will randomly sample observations with bootstrapping, but it will only show some parts of the predictor and not all of it. You will not see all dataset and this helps resolve the variance issue in bootstrapping.
We import the Random Forest Classifier from the Scikit Learn.Ensemble method as it ensembles many trees. The instantiation n_estimator parameter shows us how many trees we eill fit and each tree shows a bootstrapped sample of data and we set it to 300. I set every tree to at most 10 depths as it is a large dataset and it would be a good testing purpose. Following the import and instantiate, we are trying to fit our x scales training data and y training data into the random forest model and then we derive the predictions based on the x training data and the scores based on the scaled x training and the y training data.
```
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators = 300, max_features='auto', max_depth= 10)
rf.fit(x_train_scaled, y_train)
predictions_rf = rf.predict(x_train_scaled)
score_rf = rf.score(x_train_scaled, y_train)
```
In the cell below, we are developing confusion matrix for the random forest to show the details of how many of the datas are predicted to be true and actually true, how many are not like that and it would show how many of the actual numbers matched with your prediction and how many did not.
```
cm_dt = metrics.confusion_matrix(y_train, predictions_rf)
cm_dt
```
Below: We are displaying a visualization of the Confusion matrix for the training datasets. In the confusion matrix below, we notice that the score is about 87% accurate. This indicates it is mostly correct however about 13% of the time it is inaccurate. However, the accuracy score of this model is higher than the scores of the Logistic Regression and the Decision Tree model. This time the true negatives are highest, but the true postives are higher than the false postives unlike the other models.
```
plt.figure(figsize=(9,9))
sns.heatmap(cm_dt, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Pastel1')
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
all_sample_title = 'Accuracy Score for training data: {0}'.format(score_rf)
plt.title(all_sample_title, size = 15);
plt.show()
```
**Cross Validation** for Random Forest
We are doing 5 splittings when we are doing cross validations. We set the x values as scaled training values, set the y as y training value also we for it to return training score and return the estimator. We set the verbose is 2 for the machine to show us what is going on.
```
cv_fivefold_rf = cross_validate(estimator= rf,
X = x_train_scaled,
y = y_train,
cv = 5,
return_train_score= True,
return_estimator= True,
verbose = 2)
```
Below are the cross validation scores for our training and testing scores. It is higher than the scores for the Logisti Regression and Decision Tree.
```
print(cv_fivefold_rf['train_score'])
print(cv_fivefold_rf['test_score'])
```
Over here, we find the mean of the validation and the standard deviation to verify the overall model accuracy. This is our baseline summary of decision tree model. Our mean is around 86.1 and there is a standard devaition of about 0.006 and this shows a better one than the log regression and Decision Tree.
```
validation_mean = cv_fivefold_rf['test_score'].mean()
validation_std = cv_fivefold_rf['test_score'].std()
print('Random Forest 5-fold cv results (Accuracy) %.3f =/- %.3f'%(validation_mean, validation_std))
```
In the example below, we can see the accuracy, recall and the precision scores. As the we definitely had a lot of actual negatives the recall score is higher, but also we had a good amount of positives, so the recall score is fairly higher than the recall score for the log reg and decision tree which were in their 30 and 40s
```
print('Random Forest accruacy score:', accuracy_score(y_train, predictions_rf))
print('Random Forest precision score:', precision_score(y_train, predictions_rf))
print('Random Forest recall score:', recall_score(y_train, predictions_rf))
```
As of now, it seems to me that the **Random Forest model has been performing the best** compared to **Logistic Regression** and **Decision Tree** model. However, we will be working on model tuning wehre we try to enhnce the model and aim to improve the overall scores. After tuning, we could make a call.
## Fine-tune your models
In this step, we will be fine tuning the model or trying to improve it or make it better. I plan on removing major outliers from specific columnss and evaluating how each of the models will be performing. I am comparing the Logistic Regression, Decision Tree and the Random Forest models after removing outliers with the original models with all the datas.
**Further fine-tuning: Checking the values after removing the outliers.**
It seems the categorical vlue of 4 is the outlier amongst all of the other races. If we recall from earlier, individuals whose race is white are significantly higher in count than all other races and it seems they are an outlier in this case.
```
sns.countplot(x="race", data=df_new)
```
It seems the categorical value of 1 and 3 seem to be fairly much shorter than the others and we will be removing those as they seem to be the outliers here.
```
sns.countplot(x="marital.status", data=df_new)
```
As we can see in the example below, those individuals who have the education qualification in 3, 10 and 13 seem to be much loser than the rest. In that case, I will be removing those columns or the field with those columns and then we will be running all of the models.
```
sns.countplot(x="education", data=df_new)
```
There is a mistake for the first one we need to remov number 4 as that is the outlier and for second one we need to remove 1 andd 3 in marital status
```
index = df_new[(df_new['race'] == 4)|(df_new['marital.status'] == 1) | (df_new['marital.status'] == 3)
|(df_new['education'] == 3)|(df_new['education'] == 10) | (df_new['education'] == 13)].index
df_new.drop(index, inplace=True)
```
We will be copying the updated dataframe with outlier removal just incase.
```
df2 = df_new.copy()
df2.head()
```
**In this step,** we willbe preparing our data and getting those ready for modelling and data transformation once again. In the updated dataset without the outliers and set the x values to all other features except the target feature of income and the y is set to the target feature of income.
```
x2 = df2.drop(['income'], axis = 1)
y2 = df2['income']
```
Below, we are splitting the data into x and y training datasets.
```
x_tr, x_ts, y_tr, y_ts = train_test_split(x2, y2, test_size = 0.2, random_state = 1)
```
Below, we are **scaling** our x dataset to ensure that they are all in the same range of numbers or scaled into that range. Below, we import a scaler and declare a standard scaler or instantiate it.
```
std_scaler.fit(x_tr)
x_tr_scaled = std_scaler.transform(x_tr)
x_ts_scaled = std_scaler.transform(x_ts)
```
We will be instantiating our logistic regression model with the updated data sets training and testing set. We fit it with the scaled x training dataset andd the y training dataset.
```
lr = LogisticRegression(penalty = 'none', random_state = 1)
lr.fit(x_tr_scaled, y_tr)
```
We are setting our accuracy score of the model by taking the scaled x training set and the y training set. Following that step, we are making a prediction by only using the scaled x training set.
```
score2 = lr.score(x_tr_scaled, y_tr)
pred2 = lr.predict(x_tr_scaled)
```
In the confusion matrix below, we notice that the score is about 87% accurate. It was 80% in with the outliers. This evaluates that it would be correct about 87 percent of the time, but approximately 13% of the time it would be incorrect in the values it gives us. In this case, we still see the true negative numbers are the highest, but the true postiive is larger than the true negative and that is the exact opposite of what we had noticed when we were modelling it with the outliers.
```
plt.figure(figsize=(9,9))
sns.heatmap(cm_dt, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Pastel1')
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
all_sample_title = 'Accuracy Score for training data: {0}'.format(score2)
plt.title(all_sample_title, size = 15);
plt.show()
```
**Cross Validation** for Logistics Regression without outliers
In this step, we are cross validating out model. We are doing 5 splittings. The x values are set as the scaled training dataset in the updated data set without outliers and the y values are set as the also we for it to return training score and return the estimator. The verbose is 2 for the machine to show us what is going on.
```
cv_fivefold_lr2 = cross_validate(estimator= lr,
X = x_tr_scaled,
y = y_tr,
cv = 5,
return_train_score= True,
return_estimator= True,
verbose = 2)
```
This is the cross validation data of our training and testing scores. The sscores are better than theee cross validations for the model without outliers.
```
print(cv_fivefold_lr2['train_score'])
print(cv_fivefold_lr2['test_score'])
```
Below, we are looking for the mean of the validation and the standard deviation of it to verify the overall model accuracy. This is the overall summary of logistic regressions without outliers. Our mean is around 84.5% and there is a standard devaition of about 0.003. This score is better than the 80% score range.
```
lr2_validation_mean = cv_fivefold_dt['test_score'].mean()
lr2_validation_std = cv_fivefold_dt['test_score'].std()
print('Logistic regression without outliers 5-fold cv results (Accuracy) %.3f =/- %.3f'%(lr2_validation_mean, lr2_validation_std))
```
Below, are our scores and we are comparing the scores after outlier removal with the ones when the outlier wass there. As we can see, the accuracy and the precision score is better without outliers than the ones with outliers. On the other hand, the recall score is around the same range.
```
print('Log reg accruacy score without ouliers:', accuracy_score(y_tr, pred2))
print('Log reg precision score without ouliers:', precision_score(y_tr, pred2))
print('Log reg recall score without ouliers:', recall_score(y_tr, pred2))
print('Log reg accruacy score:', accuracy_score(y_train, predictions))
print('Log reg precision score:', precision_score(y_train, predictions))
print('Log reg recall score:', recall_score(y_train, predictions))
```
We will be instantiating our decision tree model with the updated data sets training and testing set. We fit it with the scaled x training dataset andd the y training dataset. We are also setting our accuracy score of the model by taking the scaled x training set and the y training set. Following that step, we are making a prediction by only using the scaled x training set.
```
dt = DecisionTreeClassifier(max_leaf_nodes=15, random_state = 1)
dt.fit(x_tr_scaled, y_tr)
dt_sc = dt.score(x_tr_scaled, y_tr)
dt_pred = dt.predict(x_tr_scaled)
```
In the confusion matrix below, we notice that the score is about 89% accurate. It was about 84% in with the outliers. This evaluates that it would be correct about 89 percent of the time, but approximately 11% of the time it would be incorrect in the values it gives us. In this case the true positive is higher than the false positive which was not the case when we were modelling it with all values including outliers. However, the true negatives are higher than true positives. This score is higher than the logistic regression score even without removing the outliers.
```
plt.figure(figsize=(9,9))
sns.heatmap(cm_dt, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Pastel1')
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
all_sample_title = 'Accuracy Score for training data: {0}'.format(dt_sc)
plt.title(all_sample_title, size = 15);
plt.show()
```
**Cross Validation** for Decision tree
We are doing 5 splittings when we are doing cross validations. We set the x values as scaled training values in the updated data set. Then we set the y as y training value also we for it to return training score and return the estimator. We set the verbose is 2 for the machine to show us what is going on.
```
cv_fivefold_dt2 = cross_validate(estimator= dt,
X = x_tr_scaled,
y = y_tr,
cv = 5,
return_train_score= True,
return_estimator= True,
verbose = 2)
```
Below are the cross validation scores for our training and testing scores. These scores are better than our scores with all the data including outliers.
```
print(cv_fivefold_dt2['train_score'])
print(cv_fivefold_dt2['test_score'])
```
Over here, we find the mean of the validation and the standard deviation to verify the overall model accuracy. This is our baseline summary of decision tree model. Our mean is around 87.6 and there is a standard devaition of about 0.012. The overall score is better than the one without the outliers for decision tree.
```
dt2_validation_mean = cv_fivefold_dt2['test_score'].mean()
dt2_validation_std = cv_fivefold_dt2['test_score'].std()
print('Decision tree without outliers 5-fold cv results (Accuracy) %.3f =/- %.3f'%(dt2_validation_mean, dt2_validation_std))
```
In the example below, we can see the accuracy, recall and the precision scores after removing outliers and we are comparing it with the scores that had no outliers and all of the values were present. The overall scores are much higher in all three of the cases. The accuracy recall and precision scores are higher once we have removed the outliers.
```
print('Dec tree accruacy score without ouliers:', accuracy_score(y_tr, dt_pred))
print('Dec tree precision score without ouliers:', precision_score(y_tr, dt_pred))
print('Dec tree recall score without ouliers:', recall_score(y_tr, dt_pred))
print('Dec tree accruacy score:', accuracy_score(y_train, prediction_dt))
print('Dec tree precision score:', precision_score(y_train, prediction_dt))
print('Dec tree recall score:', recall_score(y_train, prediction_dt))
```
We will be instantiating our random forest model with the updated data sets training and testing set. We fit it with the scaled x training dataset and the y training dataset. We are also setting our accuracy score of the model by taking the scaled x training set and the y training set. Following that step, we are making a prediction by only using the scaled x training set.
```
rf2 = RandomForestClassifier(n_estimators = 300, max_features='auto', max_depth= 10)
rf2.fit(x_tr_scaled, y_tr)
rf2_sc = rf2.score(x_tr_scaled, y_tr)
rf2_pred = rf2.predict(x_tr_scaled)
```
In the confusion matrix below, we notice that the score is about 93% accurate. It was about 87% in with the outliers. This evaluates that it would be correct about 93 percent of the time, but approximately 7% of the time it would be incorrect in the values it gives us. In this case the true positive is higher than the false positiveonce again. However, the true negatives cases are the highest this time also. This model performed better than the logistic regressiona nd the deicision tree model.
```
plt.figure(figsize=(9,9))
sns.heatmap(cm_dt, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Pastel1')
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
all_sample_title = 'Accuracy Score for training data: {0}'.format(rf2_sc)
plt.title(all_sample_title, size = 15);
plt.show()
```
**Cross Validation** for Random Forest
We are doing 5 splittings when we are doing cross validations. We set the x values as scaled training values in the updated data set. Then we set the y as y training value also we for it to return training score and return the estimator. We set the verbose is 2 for the machine to show us what is going on.
```
cv_fivefold_rf2 = cross_validate(estimator= rf2,
X = x_tr_scaled,
y = y_tr,
cv = 5,
return_train_score= True,
return_estimator= True,
verbose = 2)
```
Below are the cross validation scores for our training and testing scores. These scores are better than our scores with all the data including outliers for random forest.
```
print(cv_fivefold_rf2['train_score'])
print(cv_fivefold_rf2['test_score'])
```
Over here, we find the mean of the validation and the standard deviation to verify the overall model accuracy. This is our overall summary of random forest model. Our mean is around 89.2 and there is a standard devaition of about 0.0108. The overall score is better than the one without the outliers for random forest model our mean was 86% in that one.
```
rf2_validation_mean = cv_fivefold_rf2['test_score'].mean()
rf2_validation_std = cv_fivefold_rf2['test_score'].std()
print('Decision tree without outliers 5-fold cv results (Accuracy) %.3f =/- %.3f'%(rf2_validation_mean, rf2_validation_std))
```
In our example below, we can see that the recall score was slightly lower for the ones with removal of outliers but the accuracy and the recall scores were higher when outliers were removed in the case of Random Forest.
```
print('Random Forest accruacy score without outliers:', accuracy_score(y_tr, rf2_pred))
print('Random Forest precision score without outliers:', precision_score(y_tr, rf2_pred))
print('Random Forest recall score without outliers:', recall_score(y_tr, rf2_pred))
print('Random Forest accruacy score:', accuracy_score(y_train, predictions_rf))
print('Random Forest precision score:', precision_score(y_train, predictions_rf))
print('Random Forest recall score:', recall_score(y_train, predictions_rf))
```
## Present your solution
After looking at all of the details and considering it, I decided to go with the random forest model as it had a higher accuracy score than the other ones and the the preductions of true postiives are true negatives were in the top 2 so a majority of the data was predicted correctly. The accuracy score exceeeded 90 was about 93% with the outliers removed They also had the highest calculation scores when in terms of cross validation which was 0.892% +/- 0.008 when outliers were removed. I also have decided to go with the solutions after the outlier was removed as the overall scores were higher in those calses.
### Model Results and Final Choice
We instantiate the model againa ndd fit it again, but this time we are testing and showing out final output after decision is made, so we can use our test datasets in this case. We can see that the true positives and the true negeatives are higher thant he other so it made the correct predictions at the most of the time. I also notice that the precision score was perfect taht calculates the true positive / (true positives + false positives).
```
ran_f = RandomForestClassifier(n_estimators = 300, max_features='auto', max_depth= 10)
ran_f.fit(x_ts_scaled, y_ts)
ran_f_sc = ran_f.score(x_ts_scaled, y_ts)
ran_f_pred = ran_f.predict(x_ts_scaled)
print('Random Forest accruacy score without outliers:', accuracy_score(y_ts, ran_f_pred))
print('Random Forest precision score without outliers:', precision_score(y_ts, ran_f_pred))
print('Random Forest recall score without outliers:', recall_score(y_ts, ran_f_pred))
```
In the confusion matrix below, we can see that there is a 98% accuracy rate with the test data set.
```
plt.figure(figsize=(9,9))
sns.heatmap(cm_dt, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Pastel1')
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
all_sample_title = 'Accuracy Score for training data: {0}'.format(ran_f_sc)
plt.title(all_sample_title, size = 15);
plt.show()
```
Below, we developed an ROC curve it shows the comparison of our true psotove to the false positive rate and the larger the area under the specific curve will be the better it is. The area under curve is AUC score. As we can see here also, the Random Forest classifier is the highest in comparison to the other two. After taking a look at the model results, the ROC curve and the results of the model scores shows us that the Random Forest is the best modelling option and I will be recommending that.
```
log_dis = metrics.plot_roc_curve(lr, x_ts_scaled, y_ts)
dt_dis = metrics.plot_roc_curve(dt, x_ts_scaled, y_ts, ax = log_dis.ax_)
rf_dis = metrics.plot_roc_curve(rf2, x_ts_scaled, y_ts, ax = log_dis.ax_)
rf_dis.figure_.suptitle('ROC curve comparison')
plt.show()
```
## Conclusions, Limitations and later work
As we have already seen, this dataset contains information about individuals in a census containing improtant information like their age, gender, job occupation, job profession, education completed, captial gain/loss, etc.
Along with that, we have seen that we there income was placed into classifications where it was either greater than 50K income or less than and equal to 50K income. As a data scientist in The US Census bureau, my job was to to verify the accuracy of whether each of the individuals salary range was accurate or if somehting seemed fishy and accordingly I used logistic regression model, decision tree model and random forest model. Then my role wwas to give the idea on which model was the best option for this task and recommend one. In order to do that, I needed to provide accuracy details to the company and give them an idea on the dataset and express my thoughts. I initially converted all of the categorical datas to all numeric variables and used those to test it in logistic regression models along with decision tree models. Though, we had aimed for accuracy scores above 75% as that was our baseline, I realized that the logistic regression model has a specific accuracy score higher than 80 and also repeated the steps with logistic regression and random forest model and realized that all of them had a score higher than 80%. However, one common thing was that all of the models hod the highest true negative value and the true positive value was less than false negative. A false negative is scary as things are good but prediction comes that it is nor good. However, that was not the case in the random forest model. In order to further fine tune and enhance my model, I decided to remove outliers from specific columns and see how the model was affected. I realized that it helped the model significanatly and the random forest moel came in the 93 percent range. In all of the cases the crosss validation accuracy scores were also higher for the results of the random forest model. I realized that due to the fact that the random forest model shows a higher data of accuracy in our training sets, I fel that it would be an ideal candidate to use for classification modelling next time. Along with that, I realized that in this dataset the negative overalls were higher than positives, but it was goodd taht the true negatives were realized if they fell in the false positive category that would be bad also as an individual woul feel assured as prrediction would say good, but in reality things maay be bad. That I felt was one **limitation** where the negative cases may ahve been high. Along with that, another **limitation** I felt was that more data could be used like the housing status and possoibly if it was scaled a bit further, I wonder how thee model may have been impacted. Some of my **next steps** are to see how housing status may play a role like if they are in rent, own a house, etc. I also plan as a **next step** to possibly remove more outliers, but it seem that whether you should emove outliers or not depends on the what your aim is. Another, **next step** is that I plan to help enahce the model so it would possibly be higher than 98. 98 was a good score, however when we are evaluating a score like 99 or as close to 100 would be reassuring and that is what I plan to look into fo the long run.
## References and contributions
- Dataset: https://www.kaggle.com/uciml/adult-census-income
- Dataset inspired by: http://archive.ics.uci.edu/ml/datasets/Adult
- https://www.usnews.com/news/us/articles/2020-02-03/report-census-hasnt-tested-tasks-to-catch-people-who-lie
- https://federalnewsnetwork.com/federal-newscast/2020/03/spreading-false-information-about-2020-census-could-land-you-in-jail-if-new-bill-becomes-law/
| github_jupyter |
# Probabilistic Programming
A Probabilistic Programming Language (PPL) is a computer language providing statistical modelling and inference functionalities, in order to reason about random variables, probability distributions and conditioning problems. The most popular PPLs are `Stan`, `PyMC`, `Pyro` and `Edward`.
A probabilistic program is a mix of **deterministic computation** and **sampling**, which allows to *draw random values* from distributions, to *condition* variables on observations and to perform *inference*.
### Pyro
Pyro is a universal probabilistic programming language based on Python.
It can represent any probabilistic model, while providing automatic optimization-based inference that is flexible and scalable to large data sets.
Pyro builds on PyTorch library, which supports GPU-accelerated tensor math and includes automatic differentiation, a technique for efficiently computing gradients.
### Models
The basic unit of probabilistic programs is the stochastic function (or model). A statistical model is a mathematical description of how the values of some knowns and unknowns could generate the observed data.

<div align="center" style="color:darkblue">S. Wood, "Core Statistics"</div>
A stochastic function in Python is an arbitrary function that combines two ingredients:
- deterministic Python code
- primitive stochastic functions that call a random number generator
**Using `sample()` primitive**
Drawing a sample from the unit normal distribution $\mathcal{N}(0,1)$.
```
import torch
import pyro
pyro.set_rng_seed(1) # for reproducibility
loc = 0. # mean
scale = 1. # standard deviation
# using pytorch
normal = torch.distributions.Normal(loc, scale) # create a normal distribution object
x = normal.rsample() # draw a sample from N(0,1)
print("pytorch sample:\t", x)
# using pyro
x = pyro.sample("sample_name", pyro.distributions.Normal(loc, scale))
print("pyro sample:\t", x)
```
Pyro samples are named: Pytorch backend uses these names to uniquely identify sample statements and change their behavior at runtime depending on how the enclosing stochastic function is being used.
**Drawing multiple samples**
Now we draw multiple samples from $\mathcal{N}(2,2)$ and $\text{Exp}(0.3)$ distributions and plot the corresponding histograms.
```
import seaborn as sns
import matplotlib.pyplot as plt
import pyro.distributions as dist
# distributions
normal = dist.Normal(2, 2)
exp = dist.Exponential(0.3)
# multiple samples
normal_samples = [pyro.sample("n",normal) for i in range(200)]
exp_samples = [pyro.sample("n",exp) for i in range(200)]
#plot
fig, axes = plt.subplots(1, 2, figsize=(12,4))
sns.distplot(normal_samples, ax=axes[0])
sns.distplot(exp_samples, ax=axes[1])
axes[0].set_title('Normal')
axes[1].set_title('Exponential')
plt.show()
```
Seaborn `distplot()` automatically estimates the PDFs over histogram bins.
**Simple stochastic model**
Suppose we want to reason about how temperature interacts with sunny and cloudy weather. We can define a simple stochastic function `weather()` describing the interaction
$$ \mathcal{N}(12.0,5.0^2) \; \text{for cloudy weather}$$
$$\mathcal{N}(23.0,6.0^2) \; \text{for sunny weather} \; $$
```
def weather():
# generate a binary sample
is_cloudy = pyro.sample('cloudy', dist.Bernoulli(0.3))
# convert binary sample into categorical
is_cloudy = 'cloudy' if is_cloudy.item() == 1.0 else 'sunny'
loc_temp = {'cloudy': 12.0, 'sunny': 23.0}[is_cloudy]
scale_temp = {'cloudy': 5.0, 'sunny': 6.0}[is_cloudy]
temp = pyro.sample('temp', dist.Normal(loc_temp, scale_temp))
return {"weather":is_cloudy, "temp":temp.item()}
[weather() for _ in range(5)]
```
We could use this stochastic function to model the sales of ice cream based on the weather.
```
def ice_cream_sales():
is_cloudy, temp = weather()
expected_sales = 200. if is_cloudy == 1 and temp > 35.0 else 20.
sales = pyro.sample('ice_cream', pyro.distributions.Normal(expected_sales, 10.0))
return sales
[ice_cream_sales() for _ in range(5)]
```
## Inference
The purpose of statistical inference is that of using a statistical model to infer the values of the unknowns that are consistent with the observed data.
|Frequentist interpretation|Bayesian interpretation|
|:-:|:-:|
|Probability measures a proportion of outcomes. | Probability measures the believability in an event. |
|There is randomness in our estimation of the parameters, but not in the parameters themselves, which are considered as fixed.| Parameters are treated as random variables and our belief about these parameters is updated in the light of data.|
### Bayesian inference
**Bayes theorem**
Let $A$ and $B$ be two events, such that $P(B)\neq0$, then $
P(A|B) = \frac{P(A,B)}{P(B)}=\frac{P(B|A)P(A)}{P(B)}$.
<div> <img src="attachment:image.png" width="400"/></div>
<div align="center" style="color:darkblue">https://medium.com/informatics-lab/probabilistic-programming-1535d7882dbe</div>
**Bayes theorem example**
There are two boxes $b_1$ and $b_2$. Box 1 contains three red and five white balls and box 2 contains two red and five white balls. A box $B\in\{b1,b2\}$ is chosen at random with $P(B=b_1)=P(B=b_2)=0.5$ and a ball chosen at random from this box turns out to be red.
What is the posterior probability that the red ball came from box 1?
$R\in\{0,1\}$ indicates whether the chosen ball is red or not.
From Bayes theorem we get
$$
P(B=b_1|R=1)=\frac{P(B=b_1,R=1)}{P(R=1)}.
$$
and $P(B=b_1,R=1) = P(R=1|B=b_1)P(B=b_1)=\frac{3}{8}\cdot \frac{1}{2}$.
From the law of total probability $P(R=1)=\sum_{i\in\{1,2\}}P(R=1|B=b_i)P(B=b_i)=\frac{3}{8}\cdot \frac{1}{2}+\frac{2}{7}\cdot\frac{1}{2}=\frac{37}{112}$.
Consequently, $$P(B=b_1|R=1)=\frac{3}{16}\cdot\frac{112}{37}=\frac{21}{37}\approx 0.56$$
**Posterior probability**
Under the Bayesian paradigm we do not estimate parameters, we
compute their distribution based on the given data.
The posterior probability is derived according to Bayes' theorem
$$
p(\theta|x) = \frac{p(x|\theta)p(\theta)}{p(x)}
$$
and the idea of uncertainty is preserved by the specific interpretation attributed to the involved terms:
- **prior probability** $p(\theta)$ = degree of belief of event occurring before observing any evidence
- **evidence** $p(x)$ = observed data
- **likelihood** $p(x|\theta)$ = compatibility of the evidence with the given hypothesis
- **posterior probability** $p(\theta|x)$ = updated belief given the evidence
<div> <img src="attachment:image.png" width="500"/></div>
<div align="center" style="color:darkblue">https://www.researchgate.net/figure/Bayesian-updating-of-the-prior-distribution-to-posterior-distribution-The-Posterior_fig1_320507985</div>
### Conjugate priors
If the posterior distribution $p(\theta|x)$ belongs to the same family as the prior distribution $p(\theta)$, then the prior is said to be a **conjugate prior** for the likelihood function $p(x|\theta)$.
This is a particularly convenient case in which the posterior distribution has a closed-form expression.
These are just a few examples of conjugate priors:
|Conjugate prior distribution| Likelihood | Prior hyperparameters | Posterior hyperparameters|
|:------:|:-----:|:-----:|:------:|
|Normal|Normal (known var.)|$$\mu_0,\sigma_0^2$$|$${\frac{1}{\frac{1}{\sigma_0^2}+\frac{n}{\sigma^2}}\Bigg(\frac{\mu_0}{\sigma_0^2}+\frac{\sum_{i=1}^n x_i}{\sigma^2}\Bigg),\Bigg(\frac{1}{\sigma_0^2}+\frac{n}{\sigma^2}\Bigg)^{-1}}$$|
|Inverse Gamma|Normal (known mean)|$$\alpha,\beta$$|$$\alpha+\frac{n}{2},\beta+\frac{\sum_{i=1}^n (x_i-\mu)^2}{2}$$|
|Beta|Binomial|$$\alpha,\beta$$|$$\alpha+\sum_{i=1}^n x_i, n-\sum_{i=1}^n x_i+\beta$$|
|Gamma|Poisson|$$k,\theta$$|$$k+\sum_{i=1}^n x_i,\frac{\theta}{n\theta+1}$$|
|Gamma|Exponential|$$\alpha,\beta$$|$$\alpha+n,\beta+\sum_{i=1}^n x_i$$|
**Beta-Binomial case**
$Beta(\alpha,\beta)$ prior and $x\sim Bin(n,\pi)$ likelihood result in the posterior
\begin{align}
p(\pi|x,\alpha,\beta)&\propto \pi^x (1-\pi)^{n-x} \pi^{\alpha-1}(1-\pi)^{\beta-1}\\
&\propto \pi^{x+\alpha-1}(1-\pi)^{n-x+\beta-1}
\end{align}
which is a $Beta(x+\alpha, n-x+\beta)$.
### Approximate inference
Prior $p(\theta)$ and likelihood $p(x|\theta)$ functions are usually known as part of the model, while the computation of the normalization factor
$$
p(x) = \int_\theta p(x|\theta) p (\theta) d \theta
$$
can easily become intractable in the high-dimensional cases.
**Example of intractable posterior**
Suppose we are trying to figure out how much something weighs, but the scale we’re using is unreliable and gives slightly different answers every time we weigh the same object. We could try to compensate for this variability by integrating the noisy measurement information with a guess based on some *prior knowledge* about the object.
$$weight \, | \, guess \sim \mathcal{N}(guess, 1)$$
$$ measurement \, | \, guess, weight \sim \mathcal{N}(weight, 0.75^2) $$
```
def scale(guess):
weight = pyro.sample("weight", dist.Normal(guess, 1.0))
measurement = pyro.sample("measurement", dist.Normal(weight, 0.75))
return measurement
```
The model is quite simple, so we are able to determine our posterior distribution of interest analytically. But in general the exact computation of the posterior of an arbitrary stochastic function is intractable.
Even the `scale` model with a non-linear function may become intractable.
```
def intractable_scale(guess):
weight = pyro.sample("weight", dist.Normal(guess, 1.0))
measurement = pyro.sample("measurement",
dist.Normal(some_nonlinear_function(weight), 0.75))
return measurement
```
Approximate inference addresses the need of applying Bayesian learning to more complex problems and to the high-dimensional datasets that we are dealing with in machine learning.
Examples of approximate inference include Variational Bayesian methods, Markov chain Monte Carlo, Markov Random Fields and Bayesian Networks.
We can identify two main categories for approximate inference:
- **Stochastic methods** turn the problem of inference into a problem of sampling from the posterior distribution of interest;
- **Deterministic methods** substitute inference with optimization problems.
## References
- S. Wood, "Core Statistics"
- [Pyro library](https://pyro.ai/)
- [Pyro documentation](https://docs.pyro.ai/en/1.1.0/index.html)
- [Probabilistic Programming & Bayesian Methods for Hackers](https://camdavidsonpilon.github.io/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/)
| github_jupyter |
## Twitter Sentiment Analysis
Determining whether a piece of writing is positive, negative or neutral. It’s also known as opinion mining, deriving the opinion or attitude of a speaker.
conda install -n py36 -c conda-forge tweepy
conda install -n py36 -c conda-forge textblob
```
import re
import tweepy
from tweepy import OAuthHandler
from textblob import TextBlob
class TwitterClient(object):
'''
Generic Twitter Class for sentiment analysis.
'''
def __init__(self):
'''
Class constructor or initialization method.
'''
# keys and tokens from the Twitter Dev Console
consumer_key = 'Y6QWAWLoyjHCCA20qPmBp2wkI'
consumer_secret = 'SZPH7zWLJuxDskwoRYqfUb2Lz1yeftyYGh7DBimGz1niLA6o5N'
access_token = '3896792723-IcHSdssHFC1cGpghgO2On6bc4j0y31Wzw2Yb2Gg'
access_token_secret = 'ejbGb8zEsvpIhcPtH264pkvsfkMYjspDvaT1YSexdxrAv'
# attempt authentication
try:
# create OAuthHandler object
self.auth = OAuthHandler(consumer_key, consumer_secret)
# set access token and secret
self.auth.set_access_token(access_token, access_token_secret)
# create tweepy API object to fetch tweets
self.api = tweepy.API(self.auth)
except:
print("Error: Authentication Failed")
def clean_tweet(self, tweet):
'''
Utility function to clean tweet text by removing links, special characters
using simple regex statements.
'''
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])
|(\w+:\/\/\S+)", " ", tweet).split())
def get_tweet_sentiment(self, tweet):
'''
Utility function to classify sentiment of passed tweet
using textblob's sentiment method
'''
# create TextBlob object of passed tweet text
analysis = TextBlob(self.clean_tweet(tweet))
# set sentiment
if analysis.sentiment.polarity > 0:
return 'positive'
elif analysis.sentiment.polarity == 0:
return 'neutral'
else:
return 'negative'
def get_tweets(self, query, count = 10):
'''
Main function to fetch tweets and parse them.
'''
# empty list to store parsed tweets
tweets = []
try:
# call twitter api to fetch tweets
fetched_tweets = self.api.search(q = query, count = count)
# parsing tweets one by one
for tweet in fetched_tweets:
# empty dictionary to store required params of a tweet
parsed_tweet = {}
# saving text of tweet
parsed_tweet['text'] = tweet.text
# saving sentiment of tweet
parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)
# appending parsed tweet to tweets list
if tweet.retweet_count > 0:
# if tweet has retweets, ensure that it is appended only once
if parsed_tweet not in tweets:
tweets.append(parsed_tweet)
else:
tweets.append(parsed_tweet)
# return parsed tweets
return tweets
except tweepy.TweepError as e:
# print error (if any)
print("Error : " + str(e))
def main():
# creating object of TwitterClient Class
api = TwitterClient()
# calling function to get tweets
tweets = api.get_tweets(query = 'Donald Trump', count = 200)
# picking positive tweets from tweets
ptweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive']
# percentage of positive tweets
print("Positive tweets percentage: {} %".format(100*len(ptweets)/len(tweets)))
# picking negative tweets from tweets
ntweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative']
# percentage of negative tweets
print("Negative tweets percentage: {} %".format(100*len(ntweets)/len(tweets)))
# percentage of neutral tweets
print("Neutral tweets percentage: {} % \
".format(100*len(tweets - ntweets - ptweets)/len(tweets)))
# printing first 5 positive tweets
print("\n\nPositive tweets:")
for tweet in ptweets[:10]:
print(tweet['text'])
# printing first 5 negative tweets
print("\n\nNegative tweets:")
for tweet in ntweets[:10]:
print(tweet['text'])
if __name__ == "__main__":
# calling main function
main()
```
| github_jupyter |
# **Model Training**
Importing Basic Libraries and setting input stream for training and testing data
```
import keras
from keras.layers import Input, Dense, Lambda, Flatten
from keras.layers import Dropout
from keras.models import Model
#From Keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
#Re-size all the images
IMAGE_SIZE = [224,224]
train_path = '/content/drive/My Drive/Final Segmented Dataset/Train'
test_path = '/content/drive/My Drive/Final Segmented Dataset/Test'
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
from google.colab import drive
drive.mount('/content/drive')
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory(train_path,
target_size = (224, 224),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory(test_path,
target_size = (224, 224),
batch_size = 32,
class_mode = 'binary')
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers
import os
import glob
import shutil
import sys
import numpy as np
from skimage.io import imread
import matplotlib.pyplot as plt
from IPython.display import Image
import vis
get_ipython().run_line_magic('matplotlib', 'inline')
%matplotlib inline
```
Adding Preprocessing layer to the front of VGG
```
IMAGE_SIZE = [224, 224]
vgg = VGG19(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
for layer in vgg.layers[:-7]:
layer.trainable = False
# our layers - you can add more if you want
x = Flatten()(vgg.output)
x = Dense(100, activation = 'relu')(x)
x = Dropout(0.35)(x)
x = Dense(100, activation = 'relu')(x)
x = Dropout(0.35)(x)
x = Dense(100, activation = 'relu')(x)
x = Dropout(0.35)(x)
prediction = Dense(1, activation='sigmoid')(x)
#prediction = Dense(2, activation='softmax')(x)
def scheduler(epoch, lr):
if epoch < 20:
return lr
else:
return float(lr * tf.math.exp(-0.1))
from keras import callbacks
callback = [callbacks.EarlyStopping(monitor='loss', patience=5), callbacks.LearningRateScheduler(scheduler, verbose=0)]
# create a model object
model = Model(inputs=vgg.input, outputs=prediction)
# view the structure of the model
#model.summary()
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
#loss='categorical_crossentropy',
history = model.fit_generator(training_set,
steps_per_epoch = len(training_set),
epochs = 50, callbacks = callback,
validation_data = test_set,
validation_steps = len(test_set))
len(history.history['loss']) # Only 4 epochs are run.
# loss
plt.plot(history.history['loss'], label='train loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.legend()
plt.show()
plt.savefig('LossVal_loss')
# accuracies
plt.plot(history.history['accuracy'], label='train acc')
plt.plot(history.history['val_accuracy'], label='val acc')
plt.legend()
plt.show()
plt.savefig('AccVal_acc')
import tensorflow as tf
from keras.models import load_model
model.save('/content/drive/My Drive/VGG19AS7.h5')
from keras.preprocessing import image
from keras.applications.vgg19 import preprocess_input, decode_predictions
import numpy as np
img_path = '/content/drive/My Drive/Final Segmented Dataset/Validation/COVID Positive/X(977).jpg'
img = image.load_img(img_path, target_size=(224, 224)) #change to 224 , 224 , 3 if doesn't work
img_data = image.img_to_array(img)
img_data = np.expand_dims(img_data, axis=0)
img_data = preprocess_input(img_data)
preds = model.predict(img_data)
# decode the results into a list of tuples (class, description, probability)
print('Predicted:', preds ) #decode_predictions(preds)
from keras.preprocessing import image
from keras.applications.vgg19 import preprocess_input, decode_predictions
import numpy as np
img_path = '/content/drive/My Drive/Final Segmented Dataset/Validation/COVID Negative/X(377).png'
img = image.load_img(img_path, target_size=(224, 224)) #change to 224 , 224 , 3 if doesn't work
img_data = image.img_to_array(img)
img_data = np.expand_dims(img_data, axis=0)
img_data = preprocess_input(img_data)
preds = model.predict(img_data)
# decode the results into a list of tuples (class, description, probability)
print('Predicted:', preds ) #decode_predictions(preds)
from keras.preprocessing import image
from keras.applications.vgg19 import preprocess_input, decode_predictions
import numpy as np
img_path = '/content/drive/My Drive/Sample blacked out/CN.png'
img = image.load_img(img_path, target_size=(224, 224)) #change to 224 , 224 , 3 if doesn't work
img_data = image.img_to_array(img)
img_data = np.expand_dims(img_data, axis=0)
img_data = preprocess_input(img_data)
preds = model.predict(img_data)
# decode the results into a list of tuples (class, description, probability)
print('Predicted:', preds ) #decode_predictions(preds)
from keras.preprocessing import image
from keras.applications.vgg19 import preprocess_input, decode_predictions
import numpy as np
img_path = '/content/drive/My Drive/Sample blacked out/CP1.jpg'
img = image.load_img(img_path, target_size=(224, 224)) #change to 224 , 224 , 3 if doesn't work
img_data = image.img_to_array(img)
img_data = np.expand_dims(img_data, axis=0)
img_data = preprocess_input(img_data)
preds = model.predict(img_data)
# decode the results into a list of tuples (class, description, probability)
print('Predicted:', preds ) #decode_predictions(preds)
END
```
| github_jupyter |
# Week 3
I hope you're getting the hang of things. Today we're going on with the prinicples of data visualization!
## Overview
Once again, the lecture has three parts:
* First you will watch a video on visualization and solve a couple of exercises.
* After that, we'll be reading about *scientific data visualization*, and the huge number of things you can do with just one variable. Naturally, we'll be answering questions about that book.
* And finally reproducing some of the plots from that book.
## Part 1: Fundamentals of data visualization
Last week we had a small introduction of data visualization. Today, we are going to be a bit more specific on data analysis and visualization. Digging a bit more into the theory with the next video.
<mark>*It's important to highlight that these lectures are quite important. We don't have a formal book on data visualization. So the only source of knowledge about the **principles**, **theories**, and **ideas**, that are the foundation for good data viz, comes from the videos*. So watch them 🤓 </mark>
[](https://www.youtube.com/watch?v=yiU56codNlI)
> *Excercise 1.1:* Questions for the lecture
> * As mentioned earlier, visualization is not the only way to test for correlation. We can (for example) calculate the Pearson correlation. Explain in your own words how the Pearson correlation works and write down it's mathematical formulation. Can you think of an example where it fails (and visualization works)?
> **Answer:** The pearson correlation is defined as $\rho(x,y) = \frac{cov(x,y)}{\sigma_x\sigma_y}$ that is the covariance between x and y divided by their standard deviations multiplied. If $\rho > 0$ there is a positive correlation and $\rho < 0$ there is a negative correlation. For $\rho = 0$ there is no correlation. The pearson correlation can only capture linear correlation of variables, thus if we had $y = x**2$ then $\rho(x,y) = 0$ but the visualization would show a quadratic polynomial.
> * What is the difference between a bar-chart and a histogram?
> **Answer:** A bar-chart shows the count over some criteria or group, usually requires two variables. The histogram shows the frequency or density of one variable, thus showing its distribution.
> * I mention in the video that it's important to choose the right bin-size in histograms. But how do you do that? Do a Google search to find a criterion you like and explain it.
> **Answer:** A common approach is that $numberofbins = ceil( \frac{(maximumvalue - minimumvalue)}{binwidth})$ that accounts for the range of the data (width in the plot) divided by the bin width which gives you how many bins there are room for in the range.
Ok, now that we've talked a bit about correlation and distributions, we are going to compute/visualize them while also testing some hypotheses along the way. Until now, we have analysed data at an explorative level, but we can use statistics to verify whether relationships between variables are significant. We'll do this in the following exercise.
### *Exercise 1.2:*
> Hypothesis testing. We will look into correlations between number of steps and BMI, and differences between two data samples (Females vs Males). Follow the steps below for success:
> * First, we need to get some data. Download and read the data from the Female group [here](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data9b_f.csv) and the one from the Male group [here](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data9b_m.csv).
>
```
import pandas as pd
female = pd.read_csv("https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data9b_f.csv")
male = pd.read_csv("https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data9b_m.csv")
```
> * Next, we are going to verify the following hypotheses:
> 1. <mark>*H1: there is a statistically significant difference in the average number of steps taken by men and women*</mark>. Is there a statistically significant difference between the two groups? What is the difference between their mean number of steps? Plot two histograms to visualize the step-count distributions, and use the criterion you chose in Ex.1.1 to define the right bin-size.
**Hint** you can use the function `ttest_ind()` from the `stats` package to test the hypothesis and consider a significance level $\alpha=0.05$.
> 2. <mark>*H2: there is a negative correlation between the number of steps and the BMI for women*.</mark> We will use Pearson's correlation here. Is there a negative correlation? How big is it?
> 3. <mark>*H3: there is a positive correlation between the number of steps and the BMI for men*.</mark> Is there a positive correlation? Compare it with the one you found for women.
>
```
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme()
def get_bins(data, binwidth=1000):
return range(min(data), max(data) + binwidth, binwidth)
```
#### Hypothesis 1
> There is a statistically significant difference in the average number of steps taken by men and women .
```
f, (ax1, ax2) = plt.subplots(1, 2)
female['steps'].hist(bins=get_bins(female['steps'], 1000), ax=ax1)
ax1.set_title('Step-count histogram for females')
male['steps'].hist(bins=get_bins(male['steps'], 1000), ax=ax2)
ax2.set_title('Step-count histogram for males')
plt.show()
from scipy.stats import ttest_ind
res = ttest_ind(female['steps'], male['steps'])
print(f'Is there a significant difference between male and female step counts? {res.pvalue < 0.05}')
```
#### Hypothesis 2
There is a negative correlation between the number of steps and the BMI for women
```
print(f"The correlation between female step counts and BMI is: {female.corr().iloc[0, 1]:.3}")
```
#### Hypothesis 3
There is a positive correlation between the number of steps and the BMI for men
```
print(f"The correlation between male step counts and BMI is: {male.corr().iloc[0, 1]:.3}")
```
We see that both males and females have a negative correlation between their step count and BMI. However, women seem to have a larger negative correlation between walking and losing weight. Perhaps, they spend more energy when they walk, or they just include more exercise when they walk... this can be investigated using different datasets.
> * We have now gathered the results. Can you find a possible explanation for what you observed? You don't need to come up with a grand theory about mobility and gender, just try to find something (e.g. theory, news, papers, further analysis etc.) to support your conclusions and write down a couple of sentences.
As talked about previously, the activities around when women walk could affect the relationship more, and the fact that they walk more as an exercise than men. It could also be the fact that men just have a larger calorie intake than women in general and the diminishes the effect of walking. As a conclusion, it seems that women tend to burn more fat than men.
> *Exercise 1.3:* scatter plots. We're now going to fully visualize the data from the previous exercise.
>
> * Create a scatter plot with both data samples. Use `color='#f6756d'` for one <font color=#f6756d>sample</font> and `color='#10bdc3'` for the other <font color=#10bdc3>sample</font>. The data is in front of you, what do you observe? Take a minute to think about these exercises: what do you think the point is?
* After answering the questions above, have a look at this [paper](https://genomebiology.biomedcentral.com/track/pdf/10.1186/s13059-020-02133-w.pdf) (in particular, read the *Not all who wander are lost* section).
```
f, ax1 = plt.subplots(1, 1, figsize=(5, 5))
female.plot(kind='scatter', x='steps', y='bmi', color='#f6756d', ax=ax1, label='female')
ax1.set_title('Bmi vs steps for females and males')
male.plot(kind='scatter', x='steps', y='bmi', color='#10bdc3', ax=ax1, label='male')
plt.legend()
plt.show()
```
Obviously, the point is to show that one can perform a plethora of statistical analysis but without visualization we will rarely be able to understand the nature of the data. Which in this case is that it's clearly user-made and does not actually represent the step count vs bmi relationship for men and women.
> * The scatter plot made me think of another point we often overlook: *color-vision impairments*. When visualizing and explaining data, we need to think about our audience:
> * We used the same colors as in the paper, try to save the figure and use any color-blindness simulator you find on the web ([this](https://www.color-blindness.com/coblis-color-blindness-simulator/) was the first that came out in my browser). Are the colors used problematic? Explain why, and try different types of colors. If you are interested in knowing more you can read this [paper](https://www.tandfonline.com/doi/pdf/10.1179/000870403235002042?casa_token=MAYp78HctgQAAAAA:AZKSHJWuNmoMXD5Dtqln1Sc-xjNwCe6UVDMVEpP95UjTH3O1H-NKRkfYljw2VLSm_zKlN74Da6g).
> * But, are colors the only option we have? Find an alternative to colors, explain it, and change your scatter plot accordingly.
* The colors change especially for 'red-blind/protanopia' color blind people. Then it can be difficult to distinguish between the two different points.
* An alternative to colors are different symbols such as crosses, squares, triangles, striped lines and so on. And it is often preferred in addition to colors!
```
f, ax1 = plt.subplots(1, 1, figsize=(8, 8))
female.plot(kind='scatter', x='steps', y='bmi', color='#f6756d', marker='o', ax=ax1, label='female')
ax1.set_title('Bmi vs steps for females and males')
male.plot(kind='scatter', x='steps', y='bmi', color='#10bdc3', ax=ax1, marker='x', label='male')
plt.legend()
plt.show()
```
## Part 2: Reading about the theory of visualization
Since we can go deeper with the visualization this year, we are going to read the first couple of chapters from [*Data Analysis with Open Source Tools*](http://shop.oreilly.com/product/9780596802363.do) (DAOST). It's pretty old, but I think it's a fantastic resource and one that is pretty much as relevant now as it was back then. The author is a physicist (like Sune) so he likes the way he thinks. And the books takes the reader all the way from visualization, through modeling to computational mining. Anywho - it's a great book and well worth reading in its entirety.
As part of this class we'll be reading the first chapters. Today, we'll read chaper 2 (the first 28 pages) which supports and deepens many of the points we made during the video above.
To find the text, you will need to go to **DTU Learn**. It's under "Course content" $\rightarrow$ "Content" $\rightarrow$ "Lecture 3 reading".
> *Excercise 2*: Questions for DAOST
* Explain in your own words the point of the jitter plot
> **Answer:** The point of a jitter plot is to avoid the case of a dot plot where points lie directly on top of each other. The jitter plot solves this by introducing a small random noise
* Explain in your own words the point of figure 2-3. (I'm going to skip saying in your own words going forward, but I hope you get the point; I expect all answers to be in your own words)
> **Answer:** It aims to show that histograms are not always perfect out of the box. They require some thinking with respect to anchoring of the bins, the bin width and number of bins in order to accurately represent the distribution
* The author of DAOST (Philipp Janert) likes KDEs (and think they're better than histograms). And we don't. Sune didn't give a detailed explanation in the video, but now that works to our advantage. We'll ask you to think about this and thereby create an excellent exercise: When can KDEs be misleading
> **Answer:** KDEs can produce smooth representations of dataset whilst also accounting for outliers. However, they are highly influenced by the choice of hyperparameters such as the bandwidth. The data scientist may choose to make a dataset seem larger and more significant than it actually is if the bandwidth is chosen 'correctly'. Moreover, KDEs can cause performance issues for larger datasets. The choice of bandwidth is often a bias-variance trade-off as described in the book which leads to complicated techniques such as cross-validation for producing just a simple plot of a distribution
* Sune discussed some strengths of the CDF - there are also weaknesses. Janert writes CDFs have less intuitive appeal than histograms of KDEs. What does he mean by that
> **Answer:** The value on a CDF plot are always influenced by the previous values which means that the reader has to keep in mind all the previous value up until a certain point and subtract that from the next point to get the change in density from one point to another. This is quite cumbersome in comparison to just looking at a bin for each range that in itself describes how many values there are in the given range. Moreover, instantaneous changes in CDFs are hard to notice for both small and large changes as it is the gradient of the slope that describes the change, and not the value of the line
* What is a *Quantile plot*? What is it good for
> **Answer:** They make the CDF more interpretable as the reader can read from the y axis to the axis. Asking question such as: What response tim
corresponds to the 10th percentile of response times
* How is a *Probablity plot* defined? What is it useful for? Have you ever seen one before
> **Answer:** The probability plot is defined as the inverse of the gaussian distribution. An by a bit of algebra, you can get a linear relationship of a data set as a function of $\phi^{-1} (y_i)$ with intercept $\mu$ and slope $\sigma$. This means that any normal distributed variable should fall on a straight line. However, if it does not, it means that the data is not distributed according to a normal distribution
* One of the reasons we like DAOST is that Janert is so suspicious of mean, median, and related summary statistics. Explain why one has to be careful when usin
those - and why visualization of the full data is always better
> **Answer:** First off, they apply only under certain assumptions and are misleading if those assumptions are not fulfilled. Those assumptions are for example that the data is *unimodal*. By visualizing the data it can quickly be inspected if the distribution is uni or bi or more modal
* Sune loves box plots (but not enough to own one of [these](https://twitter.com/statisticiann/status/1387454947143426049) 😂). When are box plots most useful
> **Answer:** Box plots are great to represent outliers and a dataset's percentile and median values. It also allows us to see if the data set is symmetric and how the data distributes around the mean. They are often best when comparing multiple distributions against each other
* The book doesn't mention [violin plots](https://en.wikipedia.org/wiki/Violin_plot). Are those better or worse than box plots? Why
> **Answer:** A violin plot is a hybrid of the KDE and box plot. This means that violin plots can additionally show density of the distribution and not only its summary statistics! Since it provides more information, it can definitely be better than a box plot. However, one should care that the KDE does not become over representative and show things that may be misleading.
## Part 3: *Finally*! Let's create some visualizations
### *Exercise 3.1*: Connecting the dots and recreating plots from DAOST but using our own favorite dataset.
>
> * Let's make a jitter-plot (that is, code up something like **Figure 2-1** from DAOST from scratch), but based on *SF Police data*. My hunch from inspecting the file is that the police-folks might be a little bit lazy in noting down the **exact** time down to the second. So choose a crime-type and a suitable time interval (somewhere between a month and 6 months depending on the crime-type) and create a jitter plot of the arrest times during a single hour (like 13-14, for example). So let time run on the $x$-axis and create vertical jitter.
```
df_raw = pd.read_csv('Police_Department_Incident_Reports__Historical_2003_to_May_2018.csv')
df_raw.Date = pd.to_datetime(df_raw['Date']) + pd.to_timedelta(df_raw['Time'] + ':00')
df = df_raw[df_raw.Date.dt.year < 2018]
from datetime import datetime
import numpy as np
df_plot_1 = df[
(df.Date >= datetime(2016, 8, 1, 13)) & (df.Date <= datetime(2016, 10, 1, 14)) & (df.Category == 'ASSAULT')]
df_plot_1 = df_plot_1[(df_plot_1.Date.dt.hour >= 13) & (df_plot_1.Date.dt.hour <= 14)]
fig = plt.figure(figsize=(15,7))
g = sns.scatterplot(x=df_plot_1['Time'].sort_values(), y=np.random.random(len(df_plot_1)))
plt.xticks(rotation=85)
plt.ylim([-1, 2])
plt.title('Jitterplot of Robberies from August 2016 to October 2016 in the time interval 13:00 to 14:00')
plt.show()
```
> * Last time, we did lots of bar-plots. Today, we'll play around with histograms (creating two crime-data based versions of the plot-type shown in DAOST **Figure 2-2**). I think the GPS data could be fun to see this way.
> * This time, pick two crime-types with different geographical patterns **and** a suitable time-interval for each (you want between 1000 and 10000 points in your histogram)
> * Then take the latitude part of the GPS coordinates for each crime and bin the latitudes so that you have around 50 bins across the city of SF. You can use your favorite method for binning. I like `numpy.histogram`. This function gives you the counts and then you do your own plotting.
```
df_plot_2 = df[(df.Date >= datetime(2016, 1, 1)) & (df.Date <= datetime(2016, 12, 31)) & (df.Category == 'ROBBERY')]
df_plot_2['X'].hist(bins=50)
plt.xlim([-122.5, -122.375])
plt.title('Histogram of latitude for robbery in 2016')
plt.show()
df_plot_3 = df[
(df.Date >= datetime(2016, 1, 1)) & (df.Date <= datetime(2016, 12, 31)) & (df.Category == 'PROSTITUTION')]
df_plot_3['X'].hist(bins=50)
plt.xlim([-122.5, -122.375])
plt.title('Histogram of latitude for prostitution in 2016')
plt.show()
```
> * Next up is using the plot-type shown in **Figure 2-4** from DAOST, but with the data you used to create Figure 2.1. To create the kernel density plot, you can either use `gaussian_kde` from `scipy.stats` ([for an example, check out this stackoverflow post](https://stackoverflow.com/questions/4150171/how-to-create-a-density-plot-in-matplotlib)) or you can use [`seaborn.kdeplot`](https://seaborn.pydata.org/generated/seaborn.kdeplot.html).
> * Now grab 25 random timepoints from the dataset (of 1000-10000 original data) you've just plotted and create a version of Figure 2-4 based on the 25 data points. Does this shed light on why I think KDEs can be misleading?
>
> Let's take a break. Get some coffee or water. Stretch your legs. Talk to your friends for a bit. Breathe. Get relaxed so you're ready for the second part of the exercise.
```
fig, ax = plt.subplots(1, 1)
sns.kdeplot(df_plot_1.Date, label='KDE', ax=ax)
sns.scatterplot(x=df_plot_1.Date, y=np.random.random(len(df_plot_1)) / 50, color='red', label='Data points', ax=ax)
plt.xticks(rotation=85)
plt.title('KDEplot of Robberies in August 2016 with data only in time interval 13:00 to 14:00')
plt.legend()
plt.show()
```
We see that the KDE does say that some data exists in the tails but in fact there is no data at all..
### *Exercise 3.2*. Ok. Now for more plots 😊
> * Now we'll work on creating two versions of the plot in **Figure 2-11**, but using the GPS data you used for your version of Figure 2-2. Comment on the result. It is not easy to create this plot from scracth.
**Hint:** Take a look at the `scipy.stats.probplot` function.
```
from scipy.stats import probplot
res = probplot(df_plot_2.X, plot=plt)
slope = res[1][0]
intercept = res[1][1]
linear_func = f"data*{slope:.3} + {intercept:.3}"
plt.text(x=-3, y=-122.375, s=linear_func)
plt.title('Probability Plot for Robberies in 2016')
plt.show()
res = probplot(df_plot_3.X, plot=plt)
slope = res[1][0]
intercept = res[1][1]
linear_func = f"data*{slope:.3} + {intercept:.3}"
plt.text(x=-3, y=-122.375, s=linear_func)
plt.title('Probability Plot for Prostitution in 2016')
plt.show()
```
> * OK, we're almost done, but we need some box plots. Here, I'd like you to use the box plots to visualize fluctuations of how many crimes happen per day. We'll use data from the 15 focus crimes defined last week.
> * For the full time-span of the data, calulate the **number of crimes per day** within each category for the entire duration of the data.
> * Create a box-and whiskers plot showing the mean, median, quantiles, etc for all 15 crime-types side-by-side. There are many ways to do this. I like to use [matplotlibs's built in functionality](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.boxplot.html), but you can also achieve good results with [seaborn](https://seaborn.pydata.org/generated/seaborn.boxplot.html) or [pandas](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.boxplot.html).
> * What does this plot reveal that you can't see in the plots from last time?
```
focuscrimes = {'WEAPON LAWS', 'PROSTITUTION', 'DRIVING UNDER THE INFLUENCE', 'ROBBERY', 'BURGLARY', 'ASSAULT',
'DRUNKENNESS', 'DRUG/NARCOTIC', 'TRESPASS', 'LARCENY/THEFT', 'VANDALISM', 'VEHICLE THEFT',
'STOLEN PROPERTY', 'DISORDERLY CONDUCT'}
df_raw = pd.read_csv('Police_Department_Incident_Reports__Historical_2003_to_May_2018.csv')
df_raw.Date = pd.to_datetime(df_raw['Date']) + pd.to_timedelta(df_raw['Time'] + ':00')
df = df_raw[df_raw.Category.isin(focuscrimes)]
df.groupby([df.Date.dt.floor('d'), df.Category]).count()['PdId'].reset_index().pivot_table('PdId', ['Date'],
'Category').boxplot(
figsize=(15, 10))
plt.xticks(rotation=90)
plt.title('Box plots of focus crimes per day')
plt.show()
```
This time we are shown the outliers as well as the summary statistics for each category
> * Also I want to show you guys another interesting use of box plots. To get started, let's calculate another average for each focus-crime, namely what time of day the crime happens. So this time, the distribution we want to plot is the average time-of-day that a crime takes place. There are many ways to do this, but let me describe one way to do it.
* For datapoint, the only thing you care about is the time-of-day, so discard everything else.
* You also have to deal with the fact that time is annoyingly not divided into nice units that go to 100 like many other numbers. I can think of two ways to deal with this.
* For each time-of-day, simply encode it as seconds since midnight.
* Or keep each whole hour, and convert the minute/second count to a percentage of an hour. So 10:15 $\rightarrow$ 10.25, 8:40 $\rightarrow$ 8.67, etc.
* Now you can create box-plots to create an overview of *when various crimes occur*. Note that these plot have quite a different interpretation than ones we created in the previous exercise. Cool, right?
```
df_new = df[['Category', 'Date']]
df_new.head()
df_new['Minutes_since_midnight'] = df_new.Date.dt.hour * 60 + df_new.Date.dt.minute
df_new.head()
df_new.Date.dt.hour.hist(bins=75)
plt.title('Histogram of minutes since midnight')
plt.show()
```
It looks like they are quite inconsistent with logging the time. It usually happens in bulk.
```
# Count occurences for each minute since midnight pr category
df_counts = df_new.value_counts(['Minutes_since_midnight', 'Category']).reset_index().rename({0: 'count'}, axis=1)
N = len(df_new.Minutes_since_midnight.unique())
df_counts['Avg_pr_minute_since_midnight'] = df_counts.apply(lambda x: x['count'] / N, axis=1)
df_counts.pivot_table('Minutes_since_midnight', ['Avg_pr_minute_since_midnight'], 'Category').boxplot(figsize=(15,10))
plt.xticks(rotation=80)
plt.title('Box plot of which time of day each focus crime happens')
plt.ylabel('Minutes since midnight')
plt.show()
```
Pretty cool. Drunkenness happens mostly in the midday! Whilst disorderly conduct happens mostly in the mornings.
| github_jupyter |
# Numpy
```
import numpy as np
from numpy import *
```
A estrutura de dados base do *numpy* sao os **arrays**
```
import numpy as np
# criando um array unidimensional a partir de uma lista
lst = [1,3,5,7,9,10]
a1d = np.array(lst)
print(a1d)
print(lst)
b1d = np.zeros((8))
print('ald=', b1d)
b1d = np.ones((8))
print('bld=', b1d)
b1d = np.arange((8))
print('cld=', b1d)
b1d = np.linspace(1,2,5)
print('dld=', b1d)
```
## Criando arrays bidimensionais a partir de uma lista de listas
```
a2d = np.array([[1,3,5,7,9],
[2,4,6,10,12],
[0,1,2,3,4]])
a2d
import numpy as np
b2d = np.zeros((5,10))
print(b2d)
c2d = np.identity(4)
print(c2d)
```
Numpy arrays sao objetos chamados _ndarrays_ e possuem diversos **atributos**:
- _ndarray.ndim_ - numero de eixos (dimensoes do array)
- _ndarray.shape_ - uma tupla de inteiros indicando o tamanho do array em cada dimensao
- _ndarray.size_ - o numero total de elementos no array
- _ndarray.dtype_ - tipo dos elementos no array
- _ndarray.itemsize_ 0 o tamanho em bytes de cada elemento do array
- _ndarray.data_ - o buffer de memoria contendo os elementos do array
```
a = np.zeros((5))
b = np.zeros((5,1))
print('a.shape', a.shape)
print('b.shape', b.shape)
print('a.ndim', a.ndim)
print('b.ndim', b.ndim)
```
## Percorrendo elementos com um *loop*
A iteracao eh feita por linhas , se o que se busca sao os elementos, deve-se utilizar um laco duplo ou entao a versao 'flat' do array
```
# percorrendo as linhas
for i,row in enumerate(a2d):
print('linha ',i, ' = ', row)
print(5*'--', 'iterando o array')
#perorrendo elementos
for i,r in enumerate(a2d):
for j,e in enumerate(r):
print('elemento ', i , ' ', j, ' = ', e)
print(5*'--', 'utilizando indices')
#pode-se tamem utilizar indices diretamente
for i in range(a2d.shape[0]):
for j in range(a2d.shape[1]):
print('elemento ',i, ' ', j, ' = ', a2d[i,j])
print(5*'--', 'utilizando flat')
```
## Slicing arrays
A melhor forma de se percorrer um array eh por meio de _slicing_ , evitando uso de loops for, que sao computacionalmente muito pesados.
- Array slicing funciona como em listas, mas em multiplas dimensoes
- Omitir um indice corresponde a recuperar toda a dimensao omitida
- Um slice eh uma visao (*VIEW*) do array original (similar a uma referencia), isto eh, o dado nao eh copiado
```
a2d = np.array([[1,3,5,7,9],
[2,4,6,10,12],
[0,1,2,3,4]])
# slicing the lines
print('a2d[1, :] - Recupera a linha de indice 1 (equivalent to a2d[1])\n', a2d[1, :])
# slicing the columns
print('a2d[:, 2] - Recupera a coluna de indice 2\n', a2d[:, 2])
# slicing the blocks
print('a2d[1:, 2:5] - Recupera o bloco a partir da linha de indice 1 e colunas 2,3 e 4\n', a2d[1:, 2:5])
```
## Metodos flatten, ravel e reshape
- O metodo reshape permite reformatar o array mofidicando o numero de linhas e colunas, porem, a nova 'shape' deve possuir o mesmo numero de elementos do array original
- O metodo ravel concatena as linhas da matriz em um array unidimensional
- O Metodo flatten tambem concatena as linhas da matriz em um array unidimensional, porem, faz uma copia dos elementos. O Metodo ravel gera uma **view**, portanto se algum elemento for modificado, o array original tambem o eh
```
# criando um array randomico unidimensional e dps tranformando ele em uma matriz 3x3
a = np.arange(9).reshape((3,3))
print('a:\n', a)
# concatenando as linhas da matriz em um vetor
b = a.ravel()
print('b: \n', b)
x = np.arange(18).reshape(3,6)
# calculando uma mascara booleana onde o valor True
# corresponde aos elementos maiores que 7
mask = (x > 7)
# mascara booleana da matriz para a condicao dada
print(mask)
# recuperando apenas os valores que sao aprovados pela mascara
print(x[mask])
# Zerando apenas os elementos que passaram na mascara
x[mask] = 0
print(x)
```
## Regarding 'Views'
- Uma **view** eh criada ao fatiar (sliccing) o array
- Uma **view** eh uma referencia a uma parte de um array
- Alterar elementos da **view** afeta o array original
- Se necessario, voce pode explicitamente fazer uma copia utilizando o metodo **copy**
## I/O with Numpy
Numpy fornece metodos para ler e escrever arrays em arquivos. A sintaxe basica para leitura de arrays a partir de um arquivo eh
```
nome_array = np.loadtxt('nome_do_arquivo')
```
---
# Broadcasting
| github_jupyter |
```
# Add user specific python libraries to path
import sys
sys.path.insert(0, "/home/smehra/local-packages")
print(sys.path)
import geopandas as gpd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import pyspark.sql.functions as F
import os
os.environ["SPARK_CONF_DIR"] = "/data/tmp/spark/conf"
import pyspark
import random
from pyspark.sql import SparkSession
from pyspark.sql import HiveContext
config = pyspark.SparkConf().setAll([('spark.ui.port', 4050),
('spark.ui.enabled', True),
# if running in local mode, driver will be only executor
# hence, give driver as much memory as possible if running in local mode
('spark.driver.memory','50g'),
# set up executor config if running in cluster or client mode
#('spark.executor.instances', '5'),
#('spark.executor.cores', '5'),
#('spark.executor.memory', '5g'),
#('spark.executor.memoryOverhead', '500m'),
# more partitions means smaller partition size per task
# hence, would reduce memory load
('spark.sql.shuffle.partitions', '1000'),
# increase max result size if you are "collecting" big dataset
# driver will need more memory to collect
#('spark.driver.maxResultSize', '2g'),
# set location spark should use for temporary data
('spark.local.dir', '/data/tmp/smehra/tmp'),
# Set location of hive database
('spark.sql.warehouse.dir', '/data/tmp/hive_warehouse'),
# Add mysql connector jar to use mysql as metastore service
('spark.jars', '/data/tmp/spark/jars/mysql-connector-java-5.1.30-bin.jar'),
# KryoSerializer is faster and more compact than the Java default serializer.
('spark.serializer', 'org.apache.spark.serializer.KryoSerializer'),
# G1GC overcomes the latency and throughput limitations with the old garbage collectors.
('spark.executor.extraJavaOptions','-XX:+UseG1GC')])
spark = SparkSession.builder \
.enableHiveSupport() \
.config(conf=config) \
.master("local[30]") \
.appName("smehra_afgh_project") \
.getOrCreate()
# Get the Hive Context
hive = HiveContext(spark.sparkContext)
spark.sparkContext._conf.getAll()
# Read from Hive
raw_data_phone_calls = hive.sql('SELECT * FROM afghanistan.raw_data_phone_calls')
raw_data_phone_calls.show(5)
# Our methodology assumes a day starts at 5am and ends on 5am next day
# Hence, we calculate "effective" date and time respectively.
# Example:
# Actual datetime: 2013-04-21 3.40am
# Effective date: 2013-04-20
# Effective hour: 23nd hour of the day
raw_data_phone_calls_with_effective_time = raw_data_phone_calls.withColumn('effective_datetime', F.col('datetime') - F.expr("INTERVAL 5 HOURS"))
# keep and reformat columns needed for migration detection algorithm
raw_data_for_migration_detection = raw_data_phone_calls_with_effective_time.withColumn('date', F.date_format(F.col("effective_datetime"), "YYYYMMdd"))
raw_data_for_migration_detection = raw_data_for_migration_detection.select(F.col('phoneHash1').alias("user_id"),
F.col('date'),
F.col("antenna_id"))
raw_data_for_migration_detection.show(5)
# remove null and invalid values
raw_data_for_migration_detection_filtered = raw_data_for_migration_detection.filter(raw_data_for_migration_detection.user_id.isNotNull()
& (raw_data_for_migration_detection.user_id != "-99")
& raw_data_for_migration_detection.date.isNotNull()
& (raw_data_for_migration_detection.date != "-99")
& raw_data_for_migration_detection.antenna_id.isNotNull()
& (raw_data_for_migration_detection.antenna_id != -99))
# Load tower to antenna mapping data
tower_to_antenna_map = spark.read.csv('/data/projects/displacement_afghanistan/data/Aggregated_Groups/TowerDetails_WithGroupIDs_UTM42N.csv', header = True, inferSchema=True)
tower_to_antenna_map = tower_to_antenna_map.select(F.col('Final_Agg_GroupID').alias("tower_group_id"), F.col('callingcellid').alias("antenna_id"))
tower_to_antenna_map.show(5)
# join daily modal location with tower to district mapping
raw_data_with_tower_groups = raw_data_for_migration_detection_filtered.join(tower_to_antenna_map,
raw_data_for_migration_detection_filtered.antenna_id == tower_to_antenna_map.antenna_id,
how = 'left').select(raw_data_for_migration_detection_filtered['*'], tower_to_antenna_map['tower_group_id'])
raw_data_with_tower_groups.show(5)
```
## User Daily Unique Towers
```
raw_data_with_tower_locations = raw_data_with_tower_groups.select(F.col('user_id'),
F.col("date"),
F.col("tower_group_id").alias('location'))
raw_data_with_tower_locations.show(5)
## drop duplicates and sort data
raw_data_with_tower_groups_deduped = raw_data_with_tower_locations.dropDuplicates()
raw_data_with_tower_groups_deduped_sorted = raw_data_with_tower_groups_deduped.sort(["user_id", "date", "location"])
# save in hive
raw_data_with_tower_groups_deduped_sorted.write.saveAsTable('afghanistan.user_daily_unique_towers_long')
user_daily_unique_towers_long = hive.sql('SELECT * FROM afghanistan.user_daily_unique_towers_long')
# convert long from to wide form dataset
# one row per user
# one column per day
# value of each cell represent all towers user used on that day
# note: collect_set dedups location values.
# use collect_list if you need *all* location values for a day for a user
user_daily_unique_towers_wide = user_daily_unique_towers_long.groupby('user_id').pivot('date').agg(F.collect_set('location'))
user_daily_unique_towers_wide.show(100)
# a list of day series i.e from 20130101 to 20171231
daySeriesList = set()
for year in range(2013, 2018):
for month in [1, 3, 5, 7, 8, 10, 12]:
for day in range(1, 32):
daySeriesList.add(str(year) + ("%02d"%month) + ("%02d"%day))
for month in [4, 6, 9, 11]:
for day in range(1, 31):
daySeriesList.add(str(year) + ("%02d"%month) + ("%02d"%day))
for day in range(1, 30):
if(day == 29):
if(year%4 == 0):
daySeriesList.add(str(year) + "02" + ("%02d"%day))
else:
daySeriesList.add(str(year) + "02" + ("%02d"%day))
# add empty columns for days for which we did not have any users making any calls
# existing list of columns in user_daily_unique_towers_wide table
existingDaySeriesColumns = user_daily_unique_towers_wide.columns
existingDaySeriesColumns.remove('user_id')
existingDaySeriesColumns = set(existingDaySeriesColumns)
missingColumns = daySeriesList.difference(existingDaySeriesColumns)
print('missing columns: ', missingColumns)
for newColumn in missingColumns:
user_daily_unique_towers_wide = user_daily_unique_towers_wide.withColumn(str(newColumn), F.array())
print('added column: ', newColumn)
# save in hive
user_daily_unique_towers_wide.write.saveAsTable('afghanistan.user_daily_unique_towers_wide')
```
## User Daily Unique Districts
```
# Load tower to district mapping data
tower_to_district_map = spark.read.csv('/data/projects/displacement_afghanistan/data/Aggregated_Groups/Final_Aggregated_GroupIDs_UTM42N.csv', header = True, inferSchema=True)
tower_to_district_map = tower_to_district_map.select(F.col('Final_Agg_GroupID').alias("tower_group_id"), F.col('distid').alias("district_id"))
tower_to_district_map.show(5)
raw_data_with_districts = raw_data_with_tower_groups.join(tower_to_district_map,
raw_data_with_tower_groups.tower_group_id == tower_to_district_map.tower_group_id,
how = 'left').select(raw_data_with_tower_groups['*'], tower_to_district_map['district_id'])
raw_data_with_districts = raw_data_with_districts.select(F.col('user_id'),
F.col("date"),
F.col("district_id").alias('location'))
raw_data_with_districts.show(5)
## drop duplicates and sort data
raw_data_with_districts_deduped = raw_data_with_districts.dropDuplicates()
raw_data_with_districts_deduped_sorted = raw_data_with_districts_deduped.sort(["user_id", "date", "location"])
# save in hive
raw_data_with_districts_deduped_sorted.write.saveAsTable('afghanistan.user_daily_unique_districts_long')
user_daily_unique_districts_long = hive.sql('SELECT * FROM afghanistan.user_daily_unique_districts_long')
# convert long from to wide form dataset
# one row per user
# one column per day
# value of each cell represent all towers user used on that day
# note: collect_set dedups location values.
# use collect_list if you need *all* location values for a day for a user
user_daily_unique_districts_wide = user_daily_unique_districts_long.groupby('user_id').pivot('date').agg(F.collect_set('location'))
user_daily_unique_districts_wide.show(100)
# a list of day series i.e from 20130101 to 20171231
daySeriesList = set()
for year in range(2013, 2018):
for month in [1, 3, 5, 7, 8, 10, 12]:
for day in range(1, 32):
daySeriesList.add(str(year) + ("%02d"%month) + ("%02d"%day))
for month in [4, 6, 9, 11]:
for day in range(1, 31):
daySeriesList.add(str(year) + ("%02d"%month) + ("%02d"%day))
for day in range(1, 30):
if(day == 29):
if(year%4 == 0):
daySeriesList.add(str(year) + "02" + ("%02d"%day))
else:
daySeriesList.add(str(year) + "02" + ("%02d"%day))
# add empty columns for days for which we did not have any users making any calls
# existing list of columns in user_daily_unique_towers_wide table
existingDaySeriesColumns = user_daily_unique_districts_wide.columns
existingDaySeriesColumns.remove('user_id')
existingDaySeriesColumns = set(existingDaySeriesColumns)
missingColumns = daySeriesList.difference(existingDaySeriesColumns)
print('missing columns: ', missingColumns)
for newColumn in missingColumns:
user_daily_unique_districts_wide = user_daily_unique_districts_wide.withColumn(str(newColumn), F.array())
print('added column: ', newColumn)
# save in hive
user_daily_unique_districts_wide.write.saveAsTable('afghanistan.user_daily_unique_districts_wide')
spark.stop()
```
| github_jupyter |
```
# Update sklearn to prevent version mismatches
#!pip install sklearn --upgrade
# install joblib. This will be used to save your model.
# Restart your kernel after installing
#!pip install joblib
# Import library
import pandas as pd
```
# Read the CSV and Perform Basic Data Cleaning
```
# Read csv file in
df = pd.read_csv("exoplanet_data.csv")
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
df.head()
```
# Select your features (columns)
```
# Set features. This will also be used as your x values.
selected_features = df[['koi_fpflag_nt','koi_fpflag_ss','koi_fpflag_co','koi_fpflag_ec','koi_period','koi_time0bk',
'koi_impact','koi_duration','koi_depth','koi_prad','koi_teq','koi_insol','koi_model_snr',
'koi_tce_plnt_num','koi_steff','koi_slogg','koi_srad','ra','dec','koi_kepmag']]
feature_names = selected_features.columns
feature_names
```
# Create a Train Test Split
Use `koi_disposition` for the y values
```
# Set disposition for y
target = df["koi_disposition"]
target_names = ["CANDIDATE", "CONFIRMED", "FALSE POSITIVE"]
# Do Binary Encoding
data = target.copy()
data_binary_encoded = pd.get_dummies(data)
data_binary_encoded.head()
# Test split data, test data = 20% with stratify
from sklearn.model_selection import train_test_split
test_size=0.40
random_state=42
stratify=data_binary_encoded
X_train, X_test, y_train, y_test = train_test_split(selected_features, data_binary_encoded,
test_size=test_size, random_state=random_state, stratify=stratify)
#X_train, X_test, y_train, y_test = train_test_split(selected_features, target, test_size=0.20, random_state=42)
# Check 1st 5 entries of X_train
X_train.head()
# Check 1st 5 entries of y_train
y_train.head()
```
# Pre-processing
Scale the data using the MinMaxScaler and perform some feature selection
```
# Scale, fit and transform data
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_scaler = scaler.fit(X_train)
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# Check 1st 5 entries of X_train_scaled
X_train_scaled
# Check min max entries of X_train_scaled
print(f"{X_train_scaled.max()} {X_train_scaled.min()}")
print(f"{y_train.max()} {y_train.min()}")
# Check 1st 5 entries of X_test_scaled
X_test_scaled
```
# Train the Model
```
#from sklearn import tree RandomForest
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=200)
rf = rf.fit(X_train_scaled, y_train)
a = rf.score(X_train_scaled, y_train)
b = rf.score(X_test_scaled, y_test)
# print scores for RandomForest
print(f"{a}, {b}")
# Display RandomForestClassifier
rf
# Display features that influences the model most
sorted(zip(rf.feature_importances_, feature_names), reverse=True)
```
# Hyperparameter Tuning
Use `GridSearchCV` to tune the model's parameters
```
# Create the GridSearchCV model
from sklearn.model_selection import GridSearchCV
param_grid = {
'n_estimators': [50, 100, 200, 400, 600],
'max_features': ['auto', 'sqrt', 'log2']
}
grid = GridSearchCV(rf, param_grid, verbose=3)
# Train the model with GridSearch
grid.fit(X_train, y_train)
# Display best tuned params and the score
print(grid.best_params_)
print(grid.best_score_)
# Display and save best tuned param
print('Best max_features:', grid.best_params_['max_features'])
print('Best n_estimators:', grid.best_params_['n_estimators'])
max_features_tune = grid.best_params_['max_features']
n_estimators_tune = grid.best_params_['n_estimators']
# Refit model with new tuned values
rf_tuned = RandomForestClassifier(n_estimators=n_estimators_tune, max_features=max_features_tune)
rf_tuned = rf_tuned.fit(X_train_scaled, y_train)
a_tuned = rf_tuned.score(X_train_scaled, y_train)
b_tuned = rf_tuned.score(X_test_scaled, y_test)
print(f"{a_tuned}, {b_tuned}")
```
# Save the Model
```
# save model to a file
import joblib
filename = 'prakash_randomforest.sav'
joblib.dump(rf_tuned, filename)
print(f"Random Forest Classifier Model Score Comparison")
print(f"-"*95)
print(f"Test Train Sample Size % = {test_size*100}, Random State = {random_state}, Stratify = Yes")
print(f"Model Train Score = {a}, Model Test Score = {b}")
print(f"Model Tuned Train Score = {a_tuned}, Model Tuned Test Score = {b_tuned}")
```
| github_jupyter |
# Inference and Validation
Now that you have a trained network, you can use it for making predictions. This is typically called **inference**, a term borrowed from statistics. However, neural networks have a tendency to perform *too well* on the training data and aren't able to generalize to data that hasn't been seen before. This is called **overfitting** and it impairs inference performance. To test for overfitting while training, we measure the performance on data not in the training set called the **validation** set.
We **avoid overfitting through regularization such as dropout** while monitoring the validation performance during training. In this notebook, I'll show you how to do this in PyTorch.
As usual, let's start by loading the dataset through torchvision. You'll learn more about torchvision and loading data in a later part. This time we'll be taking advantage of the test set which you can get by setting `train=False` here:
```python
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)
```
The test set contains images just like the training set. Typically you'll see 10-20% of the original dataset held out for testing and validation with the rest being used for training.
```
import torch
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# Download and load the training data
trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
```
Here I'll create a model like normal, using the same one from my solution for part 4.
```
from torch import nn, optim
import torch.nn.functional as F
class Classifier(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, 10)
def forward(self, x):
# make sure input tensor is flattened
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.log_softmax(self.fc4(x), dim=1)
return x
```
The goal of validation is to measure the model's performance on data that isn't part of the training set. Performance here is up to the developer to define though. Typically this is just accuracy, the percentage of classes the network predicted correctly. Other options are [precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall#Definition_(classification_context)) and top-5 error rate. We'll focus on accuracy here. First I'll do a forward pass with one batch from the test set.
```
model = Classifier()
images, labels = next(iter(testloader))
# Get the class probabilities
logps = model(images)
ps = torch.exp(logps)
# Make sure the shape is appropriate, we should get 10 class probabilities for 64 examples
print(ps.shape)
```
With the probabilities, we can get the most likely class using the `ps.topk` method. This returns the $k$ highest values. Since we just want the most likely class, we can use `ps.topk(1)`. This returns a tuple of the top-$k$ values and the top-$k$ indices. If the highest value is the fifth element, we'll get back 4 as the index.
```
top_p, top_class = ps.topk(1, dim=1)
# Look at the most likely classes for the first 10 examples
print(top_class[:10,:])
print(top_p[:10,:])
```
Now we can check if the predicted classes match the labels. This is simple to do by equating `top_class` and `labels`, but we have to be careful of the shapes. Here `top_class` is a 2D tensor with shape `(64, 1)` while `labels` is 1D with shape `(64)`. To get the equality to work out the way we want, `top_class` and `labels` must have the same shape.
If we do
```python
equals = top_class == labels
```
`equals` will have shape `(64, 64)`, try it yourself. What it's doing is comparing the one element in each row of `top_class` with each element in `labels` which returns 64 True/False boolean values for each row.
```
equals = top_class == labels.view(*top_class.shape) # <-- why the asterisk?
```
Now we need to calculate the percentage of correct predictions. `equals` has binary values, either 0 or 1. This means that if we just sum up all the values and divide by the number of values, we get the percentage of correct predictions. This is the same operation as taking the mean, so we can get the accuracy with a call to `torch.mean`. If only it was that simple. If you try `torch.mean(equals)`, you'll get an error
```
RuntimeError: mean is not implemented for type torch.ByteTensor
```
This happens because `equals` has type `torch.ByteTensor` but `torch.mean` isn't implemented for tensors with that type. So we'll need to convert `equals` to a float tensor. Note that when we take `torch.mean` it returns a scalar tensor, to get the actual value as a float we'll need to do `accuracy.item()`.
```
accuracy = torch.mean(equals.type(torch.FloatTensor))
print(f'Accuracy: {accuracy.item()*100}%')
```
The network is untrained so it's making random guesses and we should see an accuracy around 10%. Now let's train our network and include our validation pass so we can measure how well the network is performing on the test set. Since we're not updating our parameters in the validation pass, we can speed up our code by turning off gradients using `torch.no_grad()`:
```python
# turn off gradients
with torch.no_grad():
# validation pass here
for images, labels in testloader:
...
```
>**Exercise:** Implement the validation loop below and print out the total accuracy after the loop. You can largely copy and paste the code from above, but I suggest typing it in because writing it out yourself is essential for building the skill. In general you'll always learn more by typing it rather than copy-pasting. You should be able to get an accuracy above 80%.
```
model = Classifier()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)
epochs = 30
steps = 0
train_losses, test_losses = [], []
for e in range(epochs):
running_loss = 0
print('Epoch', e)
for images, labels in trainloader:
optimizer.zero_grad()
log_ps = model(images)
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
## TODO: Implement the validation pass and print out the validation accuracy
# turn off gradients
test_loss = 0
accuracy = 0
with torch.no_grad():
# validation pass here
for images, labels in testloader:
log_ps = model.forward(images)
ps = torch.exp(log_ps)
loss = criterion(log_ps, labels)
test_loss += loss
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor))
train_losses.append(running_loss/len(trainloader))
test_losses.append(test_loss/len(testloader))
print('Training loss: {:.4f}'.format(running_loss / len(trainloader)))
print('Test loss: {:.4f}'.format(test_loss / len(testloader)))
print('Test accuracy: {:.4f}'.format(accuracy / len(testloader)))
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
plt.plot(range(epochs), train_losses, label='Training loss')
plt.plot(range(epochs), test_losses, label='Validation loss')
plt.title('Overfitting in action (*without* dropout)')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend(loc='best')
plt.show()
```
## Overfitting
If we look at the training and validation losses as we train the network, we can see a phenomenon known as overfitting.
<img src='assets/overfitting.png' width=450px>
The network learns the training set better and better, resulting in lower training losses. However, it starts having problems generalizing to data outside the training set leading to the validation loss increasing. The ultimate goal of any deep learning model is to make predictions on new data, so we should strive to get the lowest validation loss possible.
One option is to use the version of the model with the lowest validation loss, here the one around 8-10 training epochs. This strategy is called ***early-stopping***. In practice, you'd save the model frequently as you're training then later choose the model with the lowest validation loss.
The most common method to reduce overfitting (outside of early-stopping) is *dropout*, where we randomly drop input units. This forces the network to share information between weights, increasing its ability to generalize to new data. Adding dropout in PyTorch is straightforward using the [`nn.Dropout`](https://pytorch.org/docs/stable/nn.html#torch.nn.Dropout) module.
```python
class Classifier(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, 10)
# Dropout module with 0.2 drop probability
self.dropout = nn.Dropout(p=0.2)
def forward(self, x):
# make sure input tensor is flattened
x = x.view(x.shape[0], -1)
# Now with dropout
x = self.dropout(F.relu(self.fc1(x)))
x = self.dropout(F.relu(self.fc2(x)))
x = self.dropout(F.relu(self.fc3(x)))
# output so no dropout here
x = F.log_softmax(self.fc4(x), dim=1)
return x
```
**During training we want to use dropout to prevent overfitting, but during inference we want to use the entire network.**
So, we need to **turn off dropout during validation, testing**, and whenever we're using the network to make **predictions**. To do this, you use `model.eval()`. This sets the model to evaluation mode where the dropout probability is 0. You can turn dropout back on by setting the model to train mode with `model.train()`. In general, the pattern for the validation loop will look like this, where you turn off gradients, set the model to evaluation mode, calculate the validation loss and metric, then set the model back to train mode.
```python
# turn off gradients
with torch.no_grad():
# set model to evaluation mode
model.eval()
# validation pass here
for images, labels in testloader:
...
# set model back to train mode
model.train()
```
> **Exercise:** Add dropout to your model and train it on Fashion-MNIST again. See if you can get a lower validation loss or higher accuracy.
```
## TODO: Define your model with dropout added
import torch
from torch import nn
import torch.nn.functional as F
input_dims = 28*28
hidden_dims = [256, 128, 64]
output_dims = 10
class FMNISTNet(nn.Module):
def __init__(self):
super().__init__()
self.input_dims = 28*28
self.hidden_dims = [256, 128, 64]
self.output_dims = 10
self.relu = nn.ReLU()
self.logsoftmax = nn.LogSoftmax(dim=1)
self.dropout = nn.Dropout(p=0.2)
self.fc1 = nn.Linear(input_dims, hidden_dims[0])
self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1])
self.fc3 = nn.Linear(hidden_dims[1], hidden_dims[2])
self.fc4 = nn.Linear(hidden_dims[2], output_dims)
def forward(self, x):
x = x.view(x.shape[0], input_dims)
x = self.fc1(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc3(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc4(x)
out = self.logsoftmax(x)
return out
## TODO: Train your model **with dropout**, and monitor the training progress with the validation loss and accuracy
from torch import optim
model = FMNISTNet()
optimizer = optim.Adam(model.parameters(), lr=0.003)
#optimizer = optim.SGD(model.parameters(), lr=0.01)
criterion = nn.NLLLoss()
epochs = 30
train_losses, test_losses = [], []
for e in range(epochs):
print('epoch', e)
running_loss = 0
for images, labels in trainloader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], input_dims)
optimizer.zero_grad()
logps = model.forward(images)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
test_loss = 0
accuracy = 0
with torch.no_grad():
# validation pass here
model.eval() # turn on eval mode for model
for images, labels in testloader:
log_ps = model.forward(images)
ps = torch.exp(log_ps)
loss = criterion(log_ps, labels)
test_loss += loss
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor))
train_losses.append(running_loss/len(trainloader))
test_losses.append(test_loss/len(testloader))
print('Training loss: {:.4f}'.format(running_loss / len(trainloader)))
print('Test loss: {:.4f}'.format(test_loss / len(testloader)))
print('Test accuracy: {:.4f}'.format(accuracy / len(testloader)))
model.train() # turn on train mode for model
plt.plot(range(epochs), train_losses, label='Training loss')
plt.plot(range(epochs), test_losses, label='Validation loss')
plt.title('(Less!) overfitting in action (*with* dropout)')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend(loc='best')
plt.show()
```
## Inference
Now that the model is trained, we can use it for inference. We've done this before, but now we need to remember to set the model in inference mode with `model.eval()`. You'll also want to turn off autograd with the `torch.no_grad()` context.
```
# Import helper module (should be in the repo)
import helper
# Turn eval mode on for model. Test out your network!
model.eval()
dataiter = iter(testloader)
images, labels = dataiter.next()
img = images[0]
# Convert 2D image to 1D vector
img = img.view(1, 784)
# Calculate the class probabilities (softmax) for img
#with torch.no_grad():
# for images, labels in testloader:
# log_ps = model.forward(images)
# ps = torch.exp(log_ps)
log_ps = model.forward(img)
ps = torch.exp(log_ps)
# Plot the image and probabilities
helper.view_classify(img.view(1, 28, 28), ps, version='Fashion')
# Turn training mode back on for model.
model.train()
```
## Next Up!
In the next part, I'll show you how to save your trained models. In general, you won't want to train a model everytime you need it. Instead, you'll train once, save it, then load the model when you want to train more or use if for inference.
| github_jupyter |
# Analysis and Prediction of Crimes in Chicago
## Overview
The goal of this project is to analyze the Chicago Crimes Dataset, classify the crimes and build a model that predicts the crime for 2017-2020.This project consists of three phases - Analyzing the dataset, Classifying the crimes, Building a prediction model.
For analysing the data, I have used the pandas package. The reason I chose pandas package is that it has built-in functionality for a lot of common data-processing applications: for example, easy groupby syntax, easy joins (which are really efficient in pandas), rolling windows, etc.
For Classifying the crimes and building a Prediction model, I have used LDA, linear regression and non-linear regression on the data and then analyzed the results to identify which algorithm works better.
### Table of Contents
* [Data](#Data)
* [Data Analysis](#Data-Analysis)
* [Classifying the Data](#Classifying-the-Data)
* [Prediction Model](#Prediction-Model)
* [Linear Regression Model](#Linear-Regression-Model)
* [Linear Ridge Regression Model](#Linear-Ridge-Regression-Model)
* [Neural Networks Model](#Neural-Networks-Model)
* [Experimental Results](#Experimental-Results)
* [Conclusion](#Conclusion)
* [References](#References)
## Data
This dataset is taken from [Kaggle](https://www.kaggle.com/currie32/crimes-in-chicago) and reflects reported incidents of crime (with the exception of murders where data exists for each victim) that occurred in the City of Chicago from 2001 to January,2017. The data is in the following form:
<table>
<tr>
<th> </th>
<th>ID</th>
<th>Case Number</th>
<th>Date</th>
<th>Block</th>
<th>IUCR</th>
<th>Primary Type</th>
<th>Description</th>
</tr>
<tr>
<td>388</td>
<td>4785</td>
<td>HP610824</td>
<td>10/07/2008
12:39:00 PM</td>
<td>000XX E 75TH ST</td>
<td>0110</td>
<td>HOMICIDE</td>
<td>FIRST DEGREE MURDER</td>
</tr>
<tr>
<td>835</td>
<td>4786</td>
<td>HP616595</td>
<td>10/09/2008
03:30:00 AM</td>
<td>048XX W POLK ST</td>
<td>0110</td>
<td>HOMICIDE</td>
<td>FIRST DEGREE MURDER</td>
</tr>
<tr>
<td>3</td>
<td>10508693</td>
<td>HZ250496</td>
<td>05/03/2016
11:40:00 PM</td>
<td>013XX S SAWYER AVE</td>
<td>0486</td>
<td>BATTERY</td>
<td>DOMESTIC BATTERY SIMPLE</td>
</tr>
</table>
The dataset consists of 4 different files for crimes in 2001-2004, 2005-2007, 2008-2011,2011-2017 and has a size of 500MB. Since the data is really huge and has many attributes, it seems to be very interesting.
```
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
import pandas as pd
```
Let's first load the 4 files.
```
crimes1_4 = pd.read_csv('Chicago_Crimes_2001_to_2004.csv',sep=',', error_bad_lines=False, index_col=False, dtype='unicode')
crimes1_4.head(5)
crimes5_7 = pd.read_csv('Chicago_Crimes_2005_to_2007.csv',sep=',', error_bad_lines=False, index_col=False, dtype='unicode')
crimes5_7.head(5)
crimes8_11 = pd.read_csv('Chicago_Crimes_2008_to_2011.csv',sep=',', error_bad_lines=False, index_col=False, dtype='unicode')
crimes8_11.head(5)
crimes12_17 = pd.read_csv('Chicago_Crimes_2012_to_2017.csv',index_col='Date')
crimes12_17.head(5)
```
Now as we have loaded all the files, let's try to analyze the data.
# Data Analysis
Let's first analyze the crimes from 2001-2004.
```
pt1_4 = crimes1_4[['Primary Type']]
crime_counts1_4 = pd.DataFrame(pt1_4.groupby('Primary Type').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)
crime_counts1_4
```
From the above table we can see the top 10 crimes that occured in 2001-2004.
```
loc1_4 = crimes1_4[['Location Description']]
locations1_4 = pd.DataFrame(loc1_4.groupby('Location Description').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)
locations1_4
```
From this table we can say that maximum crimes have occured on the streets and residences.
Now let's analyze the crimes from 2005 to 2007, 2008 to 2011 and 2012 to 2017 in the similar way.
```
pt5_7 = crimes5_7[['Primary Type']]
crime_counts5_7 = pd.DataFrame(pt5_7.groupby('Primary Type').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)
crime_counts5_7
loc5_7 = crimes5_7[['Location Description']]
locations5_7 = pd.DataFrame(loc5_7.groupby('Location Description').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)
locations5_7
pt8_11 = crimes8_11[['Primary Type']]
crime_counts8_11 = pd.DataFrame(pt5_7.groupby('Primary Type').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)
crime_counts8_11
loc8_11 = crimes8_11[['Location Description']]
locations8_11 = pd.DataFrame(loc8_11.groupby('Location Description').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)
locations8_11
pt12_17 = crimes12_17[['Primary Type']]
crime_counts12_17 = pd.DataFrame(pt12_17.groupby('Primary Type').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)
crime_counts12_17
loc12_17 = crimes12_17[['Location Description']]
locations12_17 = pd.DataFrame(loc12_17.groupby('Location Description').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)
locations12_17
```
Let us now compare the top crimes in these years.
```
import seaborn as sns
# Initialize the matplotlib figure
plt.figure(figsize=(20,20))
f, ax = plt.subplots(2,2)
sns.barplot(x="counts", y="Primary Type", data=crime_counts1_4,
label="Total", color="b",ax=ax[0][0])
ax[0][0].set_title("2001-2004")
sns.barplot(x="counts", y="Primary Type", data=crime_counts5_7,
label="Total", color="b",ax=ax[0][1])
ax[0][1].set_title("2005-2007")
sns.barplot(x="counts", y="Primary Type", data=crime_counts8_11,
label="Total", color="b",ax=ax[1][0])
ax[1][0].set_title("2008-2011")
sns.barplot(x="counts", y="Primary Type", data=crime_counts12_17,
label="Total", color="b",ax=ax[1][1])
ax[1][1].set_title("2012-2017")
plt.tight_layout()
```
From the above plots we can see that Theft and Battery have been the top crimes in all these years and the number of Narcotics and Criminal Damage crimes are almost the same.
Let's now compare the locations at which the crimes take place.
```
plt.figure(figsize=(30,30))
f, ax = plt.subplots(2,2)
sns.barplot(x="counts", y="Location Description", data=locations1_4,
label="Total", color="b",ax=ax[0][0])
ax[0][0].set_title("2001-2004")
sns.barplot(x="counts", y="Location Description", data=locations5_7,
label="Total", color="b",ax=ax[0][1])
ax[0][1].set_title("2005-2007")
sns.barplot(x="counts", y="Location Description", data=locations8_11,
label="Total", color="b",ax=ax[1][0])
ax[1][0].set_title("2008-2011")
sns.barplot(x="counts", y="Location Description", data=locations12_17,
label="Total", color="b",ax=ax[1][1])
ax[1][1].set_title("2012-2017")
plt.tight_layout()
```
The plots obtained look similar to the plot of types of crimes. Street and Residence remain in the top position for crimes and Residence and Sidewalk are almost similar.
Let's now compare the number of crimes with the number of arrests in these years.
```
crimesPerYear1_4 = pd.DataFrame(crimes1_4.groupby(['Year']).size().sort_values(ascending=False).rename('Count').reset_index())
crimesPerYear1_4 = crimesPerYear1_4.head(4)
crimesPerYear1_4
crimesPerYear5_7 = pd.DataFrame(crimes5_7.groupby(['Year']).size().sort_values(ascending=False).rename('Count').reset_index())
crimesPerYear5_7
crimesPerYear8_11 = pd.DataFrame(crimes8_11.groupby(['Year']).size().sort_values(ascending=False).rename('Count').reset_index())
crimesPerYear8_11
crimesPerYear12_17 = pd.DataFrame(crimes12_17.groupby(['Year']).size().sort_values(ascending=False).rename('Count').reset_index())
crimesPerYear12_17
frames = [crimesPerYear1_4, crimesPerYear5_7, crimesPerYear8_11,crimesPerYear12_17]
result = pd.concat(frames)
result
```
Lets now plot these values. We cannot directly plot the above obtained values since it is a DataFrame. So we first convert it to a numpy array and then plot the crime counts per year.
```
results = result.as_matrix(columns=[result.columns[:]])
results1 = results[:,:].astype(int)
results1 = results1[results1[:,0].argsort()]
results1
y_pos = np.arange(len(results1[:,0]))
plt.barh(y_pos, results1[:,1], align='center', alpha=0.5)
plt.yticks(y_pos, results1[:,0])
plt.xlabel('Crime counts')
plt.title('Crime Counts over the years')
plt.show()
```
From the above plot we can see that the crime rate has gradually decreased until 2004 and from 2005-2008, there were more crimes registered. From 2009, there is a decrease in the crime rate.
2017 shows very less crime rate because the dataset consists of crimes registered till January, 2017 only.
Now let us try to analyze the number of arrests over these years.
```
#arrest_yearly = crimes1_4[crimes1_4['Arrest'] == True]['Arrest']
df = pd.DataFrame(crimes1_4, columns = ['Arrest', 'Year'])
df1 = df[df.Arrest == "True"]
arrestsPerYear1_4 = pd.DataFrame(df1.groupby(['Year']).size().sort_values(ascending=False).rename('ArrestCount').reset_index()).head(4)
arrestsPerYear1_4
df = pd.DataFrame(crimes5_7, columns = ['Arrest', 'Year'])
df1 = df[df.Arrest == "True"]
arrestsPerYear5_7 = pd.DataFrame(df1.groupby(['Year']).size().sort_values(ascending=False).rename('ArrestCount').reset_index())
arrestsPerYear5_7
df = pd.DataFrame(crimes8_11, columns = ['Arrest', 'Year'])
df1 = df[df.Arrest == "True"]
arrestsPerYear8_11 = pd.DataFrame(df1.groupby(['Year']).size().sort_values(ascending=False).rename('ArrestCount').reset_index())
arrestsPerYear8_11
df = pd.DataFrame(crimes12_17, columns = ['Arrest', 'Year'])
df1 = df[df.Arrest == True]
df2 = df[df.Arrest == False]
arrestsPerYear12_17 = pd.DataFrame(df1.groupby(['Year']).size().sort_values(ascending=False).rename('ArrestCount').reset_index())
arrestsPerYear12_17
```
Let's now plot these arrest counts against the crime counts.
```
frames = [arrestsPerYear1_4, arrestsPerYear5_7, arrestsPerYear8_11,arrestsPerYear12_17]
result = pd.concat(frames)
results = result.as_matrix(columns=[result.columns[:]])
results2 = results[:,:].astype(int)
results2 = results2[results2[:,0].argsort()]
results2
ax = plt.subplot(111)
w = 0.3
ax.bar(results2[:,0]-w, results1[:,1],width=w,color='b',align='center')
ax.bar(results2[:,0], results2[:,1],width=w,color='g',align='center')
ax.autoscale(tight=True)
plt.show()
```
From the above plots we can see that the number of arrests recorded in each year is very less compared to the number of crimes.
## Classifying the Data
Let us consider the Crimes data from 2012-2017 for classifying the data. Before we perform any operations on the data, let's first modify the data so that we have the Date as index. This would help us later when we try to predict the crimes.
```
crimes = crimes12_17.iloc[:, 3: ]
crimes.head()
crimes.index = pd.to_datetime(crimes.index)
crimes.head(5)
```
Now since we have the Date as index, lets start the classification of data. Before we classify the types of crimes, let's see how many types of crimes are there.
```
crime_counts12_17 = pd.DataFrame(pt12_17.groupby('Primary Type').size().sort_values(ascending=False).rename('counts').reset_index())
crime_counts12_17
```
From the above table we can see that there are about 32 Primary Types of crimes. Also we can see that some of the crimes can be classified as similar types. For example - Theft, Robbery, Motor Vehicle Theft and Burglary can be given the same label. Similarly we can consider Battery, Sex Offence, Crim Sexual Assualt can be considered as similar crimes.
Now lets try to classify the data by grouping similar items as below:
<table>
<tr>
<th>Categories</th>
<th>Label</th>
<th>Class</th>
</tr>
<tr>
<td>THEFT, BURGLARY, MOTOR VEHICLE THEFT, ROBBERY</td>
<td>THEFT</td>
<th>1</th>
</tr>
<tr>
<td>BATTERY, CRIM SEXUAL ASSAULT, SEX OFFENSE</td>
<td>SEXUAL ASSAULT</td>
<td>2</td>
</tr>
<tr>
<td>NARCOTICS, OTHER NARCOTIC VIOLATION</td>
<td>NARCOTICS</td>
<td>3</td>
</tr>
<tr>
<td>ASSAULT, INTIMIDATION</td>
<td>ASSAULT</td>
<td>4</td>
</tr>
<tr>
<td>OTHER OFFENSE</td>
<td>OTHER OFFENSE</td>
<td>5</td>
</tr>
<tr>
<td>DECEPTIVE PRACTICE</td>
<td>DECEPTIVE PRACTICE</td>
<td>6</td>
</tr>
<tr>
<td>CRIMINAL TRESPASS</td>
<td>CRIMINAL TRESPASS</td>
<td>7</td>
</tr>
<tr>
<td>WEAPONS VIOLATION, CONCEALED CARRY LICENSE VIOLATION</td>
<td>WEAPONS VIOLATION</td>
<td>8</td>
</tr>
<tr>
<td>PUBLIC INDECENCY, PUBLIC PEACE VIOLATION</td>
<td>PUBLIC INDECENCY</td>
<td>9</td>
</tr>
<tr>
<td>OFFENSE INVOLVING CHILDREN</td>
<td>OFFENSE INVOLVING CHILDREN</td>
<td>10</td>
</tr>
<tr>
<td>PROSTITUTION</td>
<td>PROSTITUTION</td>
<td>11</td>
</tr>
<tr>
<td>INTERFERENCE WITH PUBLIC OFFICER</td>
<td>INTERFERENCE WITH PUBLIC OFFICER</td>
<td>12</td>
</tr>
<tr>
<td>HOMICIDE</td>
<td>HOMICIDE</td>
<td>13</td>
</tr>
<tr>
<td>ARSON, CRIMINAL DAMAGE</td>
<td>ARSON</td>
<td>14</td>
</tr>
<tr>
<td>GAMBLING</td>
<td>GAMBLING</td>
<td>15</td>
</tr>
<tr>
<td>LIQUOR LAW VIOLATION</td>
<td>LIQUOR LAW VIOLATION</td>
<td>16</td>
</tr>
<tr>
<td>KIDNAPPING</td>
<td>KIDNAPPING</td>
<td>17</td>
</tr>
<tr>
<td>STALKING, OBSCENITY</td>
<td>STALKING</td>
<td>18</td>
</tr>
<tr>
<td>NON - CRIMINAL, NON-CRIMINAL (SUBJECT SPECIFIED)</td>
<td>NON - CRIMINAL</td>
<td>19</td>
</tr>
<tr>
<td>HUMAN TRAFFICKING</td>
<td>HUMAN TRAFFICKING</td>
<td>20</td>
</tr>
</table>
In this way we reduce 32 categories to 20 categories.
```
classifiedCrimes = crimes12_17.replace(['THEFT', 'BURGLARY', 'MOTOR VEHICLE THEFT', 'ROBBERY' ,'BATTERY', 'CRIM SEXUAL ASSAULT',
'SEX OFFENSE' , 'NARCOTICS','OTHER NARCOTIC VIOLATION' , 'ASSAULT', 'INTIMIDATION' ,
'OTHER OFFENSE' , 'DECEPTIVE PRACTICE' , 'CRIMINAL TRESPASS' , 'WEAPONS VIOLATION' ,
'CONCEALED CARRY LICENSE VIOLATION','PUBLIC INDECENCY', 'PUBLIC PEACE VIOLATION',
'OFFENSE INVOLVING CHILDREN','PROSTITUTION','INTERFERENCE WITH PUBLIC OFFICER','HOMICIDE',
'ARSON', 'CRIMINAL DAMAGE','GAMBLING','LIQUOR LAW VIOLATION','KIDNAPPING','STALKING',
'OBSCENITY','NON - CRIMINAL','NON-CRIMINAL', 'NON-CRIMINAL (SUBJECT SPECIFIED)','HUMAN TRAFFICKING']
,[1,1,1,1,2,2,2,3,3,4,4,5,6,7,8,8,9,9,10,11,12,13,14,14,15,16,17,18,18,19,19,19,20])
primaryTypes = classifiedCrimes[['Primary Type']]
classifiedCrimeCounts = pd.DataFrame(primaryTypes.groupby('Primary Type').size().sort_values(ascending=False).rename('counts').reset_index())
classifiedCrimeCounts
```
Now since we have all the class labels, let us crop the data by taking only the necessary fields.
```
classifiedCrimes = classifiedCrimes[['Primary Type','Latitude','Longitude','Year']]
classifiedCrimes.head(10)
```
Before we start building the prediction system, let's first remove all the missing values. Also in our prediction system, we would be predicting the crime based on the day of the week and the location. For this purpose we add another column to our data which corresponds to the day of the week.
```
classifiedCrimes1 = classifiedCrimes.dropna(axis=0,how='any')
classifiedCrimes1.index = pd.to_datetime(classifiedCrimes1.index)
classifiedCrimes1.head(10)
classifiedCrimes1 = classifiedCrimes1.reset_index()
classifiedCrimes1['weekday'] = classifiedCrimes1['Date'].dt.dayofweek
classifiedCrimes1.head(10)
```
Now we have enough data to build the prediction system.
## Prediction Model
To predict the crimes in the future years, we first need to train a model. There are many methods for building a model but we require an accurate method. So let's experiment with these methods to find the best model.
### Linear Regression Model
Linear Regression is an approach for modeling the relationship between a scalar dependent variable Y and one or more explanatory variables (or independent variables) denoted by X.
#### Method
The dataset is first loaded into a variable known as data and then we separate the Target values into T and the remaining data into X. Then we try to fit a linear model to all of the data to see how accurately we predict the residuary resistance for each sample. To do this we define three functions:
1. model = train(X,T)
2. predict = use(model,X)
3. error = rmse(predict,T)
In our case, the Target values are the crimes(Primary Type) and X consists of the latitude, longitude and weekday.
We first use the train method to find the weights of each of the attribute. This method returns a model which consists of the set of keys < mean, standard deviation, weight>.
To find weight matrix, we use the formula :
$$
\begin{align*}
\wv &= (\Xv^T \Xv)^{-1} \Xv^T \Tv
\end{align*}
$$
But the weight matrix we obtain is not standardized. So we first standardize the values by using the following formula and then applying the above formula
$$
\begin{align*}
\Xv &= \frac{\Xv - m }{s}
\end{align*}
$$
where, m is the mean ,
s is the standard deviation.
```
import math
def train(X,T):
means = np.mean(X,axis = 0)
stds = np.std(X, axis = 0)
Xs = (X - means) / stds
Xs1 = np.hstack((np.ones((Xs.shape[0],1)),Xs))
w = np.linalg.lstsq(np.dot(Xs1.T,Xs1), np.dot(Xs1.T, T))[0]
return {'means':means, 'stds':stds, 'w':w}
```
Now we use this weight obtained from the above train() method in the use() method to get the predicted values of the model. In this method, we use the same mean and standard deviation as above.
```
def use(model, X):
mean = model['means']
std = model['stds']
Xs = (X - mean) / std
Xs1 = np.hstack((np.ones((Xs.shape[0],1)),Xs))
new_w = model['w']
predict = np.dot( Xs1,new_w )
return predict
```
Now we take these predicted values and compare them with the Target values to find the error. We find the RMSE(Root Mean Square Error) for the Predicted values and the Target values using the following formula:
$$
\begin{align*}
rmse &= \sqrt{\frac{\sum_{n=1}^N (Predict - T)^2}{N}}
\end{align*}
$$
```
def rmse(predict, T):
error = predict - T
square = error ** 2
mean = np.mean(square)
root = np.sqrt(mean)
return root
```
The RMSE value indicates the absolute fit of the model to the data – how close the observed data points are to the model’s predicted values.
#### Results
Let's first define the X and T matrices using classifiedCrimes1 dataFrame. Since this is a dataFrame, we first convert it to a numpy array so that we can slice it.
The matrix X should contain the Latitude, Longitude, Year and Weekday. T consists of the Primary Type(Crime)
```
CrimesData = classifiedCrimes1.as_matrix(columns=[classifiedCrimes1.columns[:]])
X = np.float64(CrimesData[:,2:6])
X
T = np.float64(CrimesData[:,1:2])
T
```
Let's now start training the model.
```
model = train(X, T)
predict = use(model, X)
error = rmse(predict, T)
error
```
We can see that when we use linear regression to build our model, we got an error of 4.189
Let us try some more approaches to see if we can build a better model for our prediction system.
### Linear Ridge Regression Model
Ridge regression generally yields better predictions than ordinary least squares solution, through a better compromise between bias and variance. It reduces the sum of squared errors.
If we add a term to our sum of squared error objective function that is the sum of all weight magnitudes except the bias weight. Then, we not only minimize the sum of squared errors, we also minimize the sum of the weight magnitudes:
$$ \sum_{i=1}^N (\tv_i - \xv_i^T \wv)^2 + \lambda \sum_{i=2}^N w_i^2$$
With $\lambda=0$ we have our usual linear regression objective function. With $\lambda>0$, we are adding in a penalty for the weight magnitudes. So we get the equation for weight as
$$ \wv = (X^T X + \lambda I)^{-1} X^T T $$
If we find the best value of $\lambda$ by comparing error on the test data, it will give us an optimistic prediction of error on novel data, because the test data was used to pick the best $\lambda$.
Instead of comparing the error only on test data, we partition the data into multiple ($k$) subsets called "folds". We select one fold to be the test partition, another fold to be the validate partition, and collect the remaining folds to be the train partition. We can do this in $k\,(k-1)$ ways. In order to divide the data into partitions, we use the **partitionKFolds** algorithm.
#### Method
```
def partitionKFolds(X,T,nFolds,shuffle=False,nPartitions=3):
# Randomly arrange row indices
rowIndices = np.arange(X.shape[0])
if shuffle:
np.random.shuffle(rowIndices)
# Calculate number of samples in each of the nFolds folds
nSamples = X.shape[0]
nEach = int(nSamples / nFolds)
if nEach == 0:
raise ValueError("partitionKFolds: Number of samples in each fold is 0.")
# Calculate the starting and stopping row index for each fold.
# Store in startsStops as list of (start,stop) pairs
starts = np.arange(0,nEach*nFolds,nEach)
stops = starts + nEach
stops[-1] = nSamples
startsStops = list(zip(starts,stops))
# Repeat with testFold taking each single fold, one at a time
for testFold in range(nFolds):
if nPartitions == 3:
# Repeat with validateFold taking each single fold, except for the testFold
for validateFold in range(nFolds):
if testFold == validateFold:
continue
# trainFolds are all remaining folds, after selecting test and validate folds
trainFolds = np.setdiff1d(range(nFolds), [testFold,validateFold])
# Construct Xtrain and Ttrain by collecting rows for all trainFolds
rows = []
for tf in trainFolds:
a,b = startsStops[tf]
rows += rowIndices[a:b].tolist()
Xtrain = X[rows,:]
Ttrain = T[rows,:]
# Construct Xvalidate and Tvalidate
a,b = startsStops[validateFold]
rows = rowIndices[a:b]
Xvalidate = X[rows,:]
Tvalidate = T[rows,:]
# Construct Xtest and Ttest
a,b = startsStops[testFold]
rows = rowIndices[a:b]
Xtest = X[rows,:]
Ttest = T[rows,:]
# Return partition matrices, then suspend until called again.
yield Xtrain,Ttrain,Xvalidate,Tvalidate,Xtest,Ttest,testFold
else:
# trainFolds are all remaining folds, after selecting test and validate folds
trainFolds = np.setdiff1d(range(nFolds), [testFold])
# Construct Xtrain and Ttrain by collecting rows for all trainFolds
rows = []
for tf in trainFolds:
a,b = startsStops[tf]
rows += rowIndices[a:b].tolist()
Xtrain = X[rows,:]
Ttrain = T[rows,:]
# Construct Xtest and Ttest
a,b = startsStops[testFold]
rows = rowIndices[a:b]
Xtest = X[rows,:]
Ttest = T[rows,:]
# Return partition matrices, then suspend until called again.
yield Xtrain,Ttrain,Xtest,Ttest,testFold
```
We define a new train method for ridge regression which consists of another parameter lambda. The use and rmse methods are same as we have used in linear regression.
```
def train(X,T,lamb):
means = np.mean(X,axis = 0)
stds = np.std(X, axis = 0)
Xs = (X - means) / stds
Xs1 = np.hstack((np.ones((Xs.shape[0],1)),Xs))
w = np.linalg.lstsq(np.dot(Xs1.T,Xs1), np.dot(Xs1.T, T))[0]
means = X.mean(0)
stds = X.std(0)
n,d = X.shape
Xs1 = np.insert( (X - means)/stds, 0, 1, axis=1)
lambDiag = np.eye(d+1) * lamb
lambDiag[0,0] = 0
w = np.linalg.lstsq( np.dot(Xs1.T,Xs1) + lambDiag, np.dot(Xs1.T,T))[0]
return {'w': w, 'means':means, 'stds':stds}
def use(X,model):
Xs1 = np.insert((X-model['means'])/model['stds'], 0, 1, axis=1)
return np.dot(Xs1,model['w'])
def rmse(A,B):
return np.sqrt(np.mean( (A-B)**2 ))
```
Now since we have all the required methods, lets start testing which lambda value gives the best results.
#### Results
We need to determine which lamba value gives the bestresults for how many number of folds. To do this lets define a method which takes a set of lamba values and the number of folds as input and tells which one gives the best results.
```
def multipleLambdas(X, T, nFolds, lambdas):
foldCount = 0
results = []
for Xtrain,Ttrain,Xval,Tval,Xtest,Ttest,_ in partitionKFolds(X,T,nFolds,True):
for lamb in lambdas:
model = train(Xtrain,Ttrain,lamb)
predict = use(Xval,model)
results.append([foldCount,lamb,rmse(use(Xtrain,model),Ttrain),rmse(use(Xval,model),Tval),rmse(use(Xtest,model),Ttest)])
foldCount +=1
results = np.array(results)
bestresults = []
for i in range(foldCount):
FCRow = np.take(results,(np.where(results[:,0:1] == i))[0], axis =0)
minRow = np.where(results[:,3:4] == (np.amin(FCRow[:,3:4],axis=0)))[0]
bestresults.append(np.take(results,minRow,axis=0))
bestresults = np.array(bestresults)
bestresults = bestresults.reshape(bestresults.shape[0], bestresults.shape[2])
return bestresults
```
Let's now start experimenting with these methods.
```
lambdas = [0,1,2,3,4,5]
bestresults = multipleLambdas(X,T,4,lambdas)
bestresults
```
We can see that the least validation error obtained is 4.184 when we have 4 folds and when $\lambda=0$ . This value is almost same as the error obtained when we used linear regression model. Let's try by increasing the number of folds and for a different set of lambda values.
```
lambdas = [0,5,10,15,20]
bestresults = multipleLambdas(X,T,5,lambdas)
bestresults
```
We can see that the least validation error in this case is 4.185 and is obtained for 5 folds with lambda value 20. Lets try to increase the lambda values and see if we get better results.
```
lambdas = [5,20,40,50,100]
bestresults = multipleLambdas(X,T,5,lambdas)
bestresults
```
The least validation error obtained in this case is 4.185 for $\lambda=5$ which is almost the same as linear regression method.
Let's try using the neural networks for building the model and see if yeilds better results.
### Neural Networks Model
A neural network can be thought of as a network of “neurons” organised in layers. The predictors (or inputs) form the bottom layer, and the forecasts (or outputs) form the top layer.
#### Method
I have use two methods - **trainNN** and **evaluateNN** for training and evaluating the neural network model. In the trainNN method, we first create an object nnet of the NeuralNetwork class and then we train it for different iterations. This method returns the nnet object. The evaluate method takes this nnet object and uses it to get the predicted model and then we compute the error.
```
from neuralnetworks import NeuralNetwork as nn
def trainNN(X,T, parameters):
nnet = nn(X.shape[1], parameters[0], T.shape[1])
nnet.train(X,T, nIterations=parameters[1], verbose=False)
return {'net': nnet}
def evaluateNN(model,X,T):
nnet = model['net']
predict = nnet.use(X)
return np.sqrt(np.mean( (predict-T)**2 ))
```
Similar to the above method, we define a **trainValidateTestKFolds** method which returns the following:
1. the best parameter with number of hidden layers and iterations(hidden layers in case of neural networks)
2. the best parameter values with the training error
3. the mean of the validation error
4. the testing error.
```
def trainValidateTestKFolds(X,T,parameterSets,nFolds,
shuffle=False,verbose=False):
# Randomly arrange row indices
rowIndices = np.arange(X.shape[0])
if shuffle:
np.random.shuffle(rowIndices)
# Calculate number of samples in each of the nFolds folds
nSamples = X.shape[0]
nEach = int(nSamples / nFolds)
if nEach == 0:
raise ValueError("partitionKFolds: Number of samples in each fold is 0.")
# Calculate the starting and stopping row index for each fold.
# Store in startsStops as list of (start,stop) pairs
starts = np.arange(0,nEach*nFolds,nEach)
stops = starts + nEach
stops[-1] = nSamples
startsStops = list(zip(starts,stops))
# Repeat with testFold taking each single fold, one at a time
results = []
# For each test fold
for testFold in range(nFolds):
#initializing bestMean to infinity
bestMean = float("inf")
# For each set of parameter values, called parmSet
for paramset in parameterSets:
# Find best set of parameter values
# For each validate fold (except when same as test fold)
sum = 0
for validateFold in range(nFolds):
#Checking if it is same as test fold so
if testFold == validateFold:
continue
#After selecting test and validate, the remaining are trainFolds
#so we subtract the test and validate folds from other folds
trainFolds = np.setdiff1d(range(nFolds), [testFold,validateFold])
# Constructing Xtrain and Ttrain by collecting rows of all trainFolds
rows = []
for tf in trainFolds:
i,j = startsStops[tf]
rows += rowIndices[i:j].tolist()
Xtrain = X[rows,:]
Ttrain = T[rows,:]
#Use trainf to fit model to training data using parmSet
model = trainNN(Xtrain,Ttrain,paramset)
# Constructing Xvalidate and Tvalidate
i,j = startsStops[validateFold]
rows = rowIndices[i:j]
Xvalidate = X[rows,:]
Tvalidate = T[rows,:]
# Calculate the error of this model by calling evaluatef with
# the model and validation data
error = evaluateNN(model, Xvalidate, Tvalidate)
sum = sum+error
# Calculate the mean of these errors.
currentMean = sum/(nFolds-1)
# If this error is less than the previously best error for parmSet,
# update best parameter values and best error
if(bestMean > currentMean):
#print(bestMean, currentMean)
bestMean = currentMean
bestLambda = paramset
# Make a new set of training data by concatenating the training and
# validation data from previous step.
bestT = np.concatenate((Ttrain,Tvalidate),axis=0)
bestX = np.concatenate((Xtrain,Xvalidate),axis=0)
# Retrain, using trainf again, to fit a new model using the best set of parameter values
# found above, to this new training data.
newModel = trainNN(bestX,bestT,bestLambda)
# Calculate error of this new model on the test data, and also on the new
# training data.
trainError = evaluateNN(newModel,bestX, bestT)
# Construct Xtest and Ttest
i,j = startsStops[testFold]
rows = rowIndices[i:j]
Xtest = X[rows,:]
Ttest = T[rows,:]
testError = evaluateNN(newModel,Xtest, Ttest)
bestList = [bestLambda,bestMean,trainError,testError]
if verbose:
print(bestList)
results.append(bestList)
return results
import itertools
parms = list(itertools.product([[5],[1,20],[10,10,100]], [10,50,75,200]))
result = trainValidateTestKFolds(X, T, parms, nFolds=5, shuffle=False)
for x in result:
print('{:>30s} {:10.3f} {:10.3f} {:10.3f}'.format(str(x[0]), *x[1:]))
```
Let's try using different number of iterations and hidden layers
```
parms = list(itertools.product([[5],[1,3,20],[10,10,100,1]], [10,50,75,200]))
result = trainValidateTestKFolds(X, T, parms, nFolds=5, shuffle=False)
for x in result:
print('{:>30s} {:10.3f} {:10.3f} {:10.3f}'.format(str(x[0]), *x[1:]))
```
We can see that neural network with hidden layers [1,3,20] and 200 iterations gives the best results. It gives an error of 2.31 which is lesser compared to the above two methods.
## Experimental Results
From the above results we can see that the best model is obtained when we use Neural Networks with [1, 3, 20] hidden layers and for 200 iterations. Now as we have the model, we can use this model to predict the crimes in Chicago for the next years.
Whenever we want to predict the crime(when the latitude, location, day of the week are given), we consider the given inputs as test data and use the model that was built previously to predict the output. Thus we get the predicted crime and also the probability that the data predicted is correct.
## Conclusion
On Analyzing the Chicago Crimes Data, we can see that maximum Crimes occur on Streets and Residence areas, and the types of Crimes that occur most frequently are Theft and Battery. Also we can see that the Maximum Crimes have occured in 2008. The number of arrests are very less compared to the number of crimes.
By using Neural Network with 200 iterations and [1, 3, 20] hidden units, we have built a model with the 2012-2017 crimes data which helps in predicting the type of crime given the latitude, longitude, day of the week and Year.
This approach can be further extended by considering the time at which the crime has occured. By doing so, we can yield better results.
## References
* https://www.kaggle.com/currie32/crimes-in-chicago
* https://www.kaggle.com/femiogunbode/eda-of-crime-in-chicago-from-2012-2016/discussion
* https://www.kaggle.com/djonafegnem/chicago-crime-data-analysis
* http://pandas.pydata.org/
* http://pandas.pydata.org/pandas-docs/stable/tutorials.html
| github_jupyter |
# Chapter 3: Deep Learning Libraries
This chapter discusses the important libraries and frameworks that one needs to get started in artificial intelligence. We'll cover the basic functions of the three most popular deep learning frameworks: Tensorflow, Pytorch, and Keras, and show you how to get up and running in each of these frameworks as we will be utilizing them in the following chapters. We'll touch upon computing for Artificial Intelligence, and discuss how GPUs and other advanced memory units can improve AI. Lastly, we'll discuss the fundamentals of two popular cloud computing frameworks for deep learning, AWS and Google Cloud.
```
import numpy as np
```
## TensorFlow Basics
```
import tensorflow as tf
## Define two constants
x = tf.constant(2)
y = tf.constant(2)
## Multiply the constants
product = tf.multiply(x, y)
init = tf.initialize_all_variables()
## In Tensorflow, we must first initialize a session object
sess = tf.Session()
sess.run(init)
## Run the session
print(sess.run(product))
## Close the session
sess.close()
```
Creating a new graph
```
my_graph = tf.Graph()
with new_graph.as_default():
x = tf.constant(2)
y = tf.constant(2)
```
Scopes:
```
with tf.name_scope("my_scope"):
## Define two constants
const1 = tf.constant([4])
const2 = tf.constant([5])
## Multiply the constants
product = tf.multiply(const1, const2)
```
## Keras Basics
As Keras is designed as a model-level library, it does not contain methods for doing basic operations as PyTorch of base TensorFlow does. Instead, it utilizes TensorFlow as a backend. As such, its basic operations are the same as basic TensorFlow operations:
```
import keras.backend as K
x = K.constant(5)
y = K.constant(6)
product = x * y
```
## PyTorch
```
import torch
x = torch.IntTensor([4])
y = torch.IntTensor([5])
product = x * y
```
It's easy to switch between numpy and pytorch
```
## Create a numpy array
numpy_array = np.random.randn(10,10)
##Convert the numpy array to a pytorch tesnor
pytorch_tensor = torch.from_numpy(numpy_array)
## Convert it back to Numpy
numpy_again = pytorch_tensor.numpy()
```
Pytorch tensors can be manipulated in a way that is similar to numpy
```
tensor = torch.FloatTensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
## print the third element of the 2nd row of the tensor
print(tensor[1][2])
## replace the second value of the first tensor
tensor[0][1] = 1
print(tensor)
```
Like TensorFlow, PyTorch runs on the concept of variables, which are values that are intended to change and be updated during training processes
```
from torch.autograd import Variable
## Create a tensor
tensor_two = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
## Convert it to a variable
variable = Variable(tensor_two)
variable.data
```
## TensorFlow Logging
```
my_list = []
## Iterate through the available GPUs
for device in ['/gpu:0', '/gpu:1']:
## Utilize the TensorFlow device manager
with tf.device(device):
x = tf.constant([1,2,3], shape=[1,3])
y = tf.constant([1,2,3],shape [3,1])
my_list.append(tf.matmul(x, y))
with tf.device('/cpu:0'):
sum_operation = tf.add(x,y)
## Run everything through a session
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
sess.run(sum_operation)
```
| github_jupyter |
<table>
<tr>
<td><img src='SystemLink_icon.png' /></td>
<td ><h1><strong>NI SystemLink Python API</strong></h1></td>
</tr>
</table>
## Test Monitor Service Example
***
The Test Monitor Service API provides functions to create, update, delete and query Test results and Test steps.
***
# Prerequisites
- The **NI SystemLink Server Test Module** needs to be installed in order to run this example
- The **NI SystemLink Client** needs to be installed on a system which has TestStand installed and is registered to the SystemLink server. Configure the SystemLink TestStand plugin reporting to enable publishing test results.
- Before you run this example, TestStand mock test results are needed:
- From **TestStand** open the **'Computer Motherboard Test Sequence.seq'**:
- Go to Help -> Find Examples and follow the instructions to open the Examples workspace (Examples.tsw)
- From the Workspace tab, expand **Demos** and select **Computer Motherboard Test**. Open one of the sequence files, based on your language of choice
- Run the sequence at least 10 times
- Make sure you fail several tests, on different components
# Summary
This notebook uses the Test Monitor Service API to import test and step results into Python. The data is used to do custom analytics.
- Get all the test results that were created from the 'Computer Motherboard Test Sequence.seq'
- Create a Pandas Dataframe with the information we want to process for each test
- Plot pass vs. fail tests
- Visualize test run vs. test duration
- Pareto graph (step type)
***
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from systemlink.testmonclient import TestMonitorClient, testmon_messages
testmonclient = TestMonitorClient(service_name='TestMonitorClient')
# Create pandas dataframe with the relevant test results information, to be used later
def get_dataframe_from_results(results):
return pd.concat([pd.DataFrame({'status': result.status.status_name,
'startedAt': result.started_at,
'updatedAt': result.updated_at,
'programName': result.program_name,
'id': result.id,
'systemId': result.system_id,
'operator': result.operator,
'serialNumber': result.serial_number,
'totalTimeInSeconds': result.total_time_in_seconds,
}, index=[idx]) for idx, result in enumerate(results)])
# Only query test results that belong to the 'Computer Motherboard Test Sequence.seq' test program
query = testmon_messages.ResultQuery(None, None, None, ['Computer Motherboard Test Sequence.seq'], None, None, None, None, None, None, None, None, None)
results, _ = testmonclient.query_results(query)
df_results = get_dataframe_from_results(results)
# Show the first elements of the dataframe, which holds the data we will use for further analysis
df_results[:2]
```
# Bar Plot of Test Results
Group the tests results by pass/fail. Create a bar plot to visualize the test runs by result.
```
# Visualize tests results (pass/fail)
bar_width = 0.4
opacity = 0.4
res = df_results.groupby('status').count()
failed = res['id']['Failed']
passed = res['id']['Passed']
plt.style.use('fivethirtyeight')
fig = plt.figure(figsize=(7, 7))
plt.bar(1, passed, bar_width, alpha=opacity, color='b', label='Pass')
plt.bar(1.5, failed, bar_width, alpha=opacity, color='r', label='Fail')
plt.xticks([1, 1.5], ['Pass', 'Fail'], size='15')
plt.ylabel('Runs', size='15')
plt.title('Total Runs: ' + str(passed + failed), weight='bold', size='15')
plt.show()
```
# Plot Test Run vs. Duration
Visualize the test runs vs. duration, with red/green color indicating pass/fail.
```
# Visualize test failures vs duration
result_idx = np.arange(df_results.shape[0])
df_time = df_results[['totalTimeInSeconds', 'status']]
color = ['r' if status == 'Failed' else 'g' for status in df_time['status']]
fig = plt.figure(figsize=(10, 7))
plt.scatter(result_idx, df_time['totalTimeInSeconds'], s=150, c=color, alpha='0.5')
plt.title('Test Results - Duration', weight='bold', size='15')
plt.xlabel('Test Runs', size='15')
plt.ylabel('Time (seconds)', size='15')
plt.show()
```
# Pareto distribution
Get a Pandas Dataframe with all the step failures. Visualize the failures in a Pareto graph, which helps visualize the failure distribution, by step type.
```
# Pareto distribution of step failures visualization
# Create pandas dataframe with the step results information that we want for further processing
def get_failed_steps_dataframe(steps):
failed_steps = [step for step in steps if step.status.status_name == 'Failed' and step.step_type != 'SequenceCall']
return pd.concat([pd.DataFrame({'name': step.name,
'id': step.step_id,
'totalTimeInSeconds': step.total_time_in_seconds,
}, index=[idx]) for idx, step in enumerate(failed_steps)])
results_ids = [result.id for result in results]
step_query = testmon_messages.StepQuery(None, None, None, results_ids, None, None, None, None, None, None)
steps, _ = testmonclient.query_steps(step_query)
steps_df = get_failed_steps_dataframe(steps)
res = steps_df.groupby('name').count()
res = res.sort_values('id', ascending=False)
fig, ax1 = plt.subplots()
fig.set_size_inches(15, 7)
plt.title('Failures by Test', weight='bold', size='15')
plt.ylabel('Number of Runs', size='15')
plt.xlabel('Test Type', size='15')
ax1.get_xaxis().set_ticks([])
# Create the Pareto chart bars
previous_val = 0
cumulative = []
for idx, row in res.iterrows():
val = row['id']
cumulative.append(val + previous_val)
previous_val = val + previous_val
ax1.bar(idx, val, bar_width, alpha=opacity, label=idx)
# Add a legend
labels = list(steps_df['name'])
plt.legend(labels, loc='upper right')
# Cumulative line, in percentage
cumulative_percentage = cumulative/cumulative[-1] * 100
ax2 = ax1.twinx()
ax2.set_ylim([0, 100])
ax2.plot(cumulative_percentage)
plt.ylabel('Failure Percentage', size='15')
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/satyajitghana/PadhAI-Course/blob/master/13_OverfittingAndRegularization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_squared_error, log_loss
from tqdm import tqdm_notebook
import seaborn as sns
sns.set()
from sklearn.preprocessing import OneHotEncoder
from sklearn.datasets import load_iris
from numpy.linalg import norm
my_cmap = 'inferno'
np.random.seed(0)
```
## Generate data
```
iris=load_iris()
data = iris.data[:, :2] # take only the first two features
labels = iris.target
plt.scatter(data[:,0], data[:,1], c=labels, cmap=my_cmap)
plt.show()
print("Data shape",data.shape)
print("Labels shape",labels.shape)
```
## Multi class classification
```
X_train, X_val, Y_train, Y_val = train_test_split(data, labels, stratify=labels, random_state=0,test_size=0.2)
print(X_train.shape, X_val.shape, labels.shape)
enc = OneHotEncoder()
y_OH_train = enc.fit_transform(np.expand_dims(Y_train,1)).toarray()
y_OH_val = enc.fit_transform(np.expand_dims(Y_val,1)).toarray()
print(y_OH_train.shape, y_OH_val.shape)
```
## FF Class
```
class FFNetwork:
def __init__(self, num_hidden=2, init_method = 'xavier', activation_function = 'sigmoid', leaky_slope = 0.1):
self.params={}
self.num_layers=2
self.layer_sizes = [2, num_hidden, 3]
self.activation_function = activation_function
self.leaky_slope = leaky_slope
np.random.seed(0)
if init_method == "random":
for i in range(1,self.num_layers+1):
self.params["W"+str(i)] = np.random.randn(self.layer_sizes[i-1],self.layer_sizes[i])
self.params["B"+str(i)] = np.random.randn(1,self.layer_sizes[i])
elif init_method == "he":
for i in range(1,self.num_layers+1):
self.params["W"+str(i)] = np.random.randn(self.layer_sizes[i-1],self.layer_sizes[i])*np.sqrt(2/self.layer_sizes[i-1])
self.params["B"+str(i)] = np.random.randn(1,self.layer_sizes[i])
elif init_method == "xavier":
for i in range(1,self.num_layers+1):
self.params["W"+str(i)]=np.random.randn(self.layer_sizes[i-1],self.layer_sizes[i])*np.sqrt(1/self.layer_sizes[i-1])
self.params["B"+str(i)]=np.random.randn(1,self.layer_sizes[i])
self.gradients={}
self.update_params={}
self.prev_update_params={}
for i in range(1,self.num_layers+1):
self.update_params["v_w"+str(i)]=0
self.update_params["v_b"+str(i)]=0
self.update_params["m_b"+str(i)]=0
self.update_params["m_w"+str(i)]=0
self.prev_update_params["v_w"+str(i)]=0
self.prev_update_params["v_b"+str(i)]=0
def forward_activation(self, X):
if self.activation_function == "sigmoid":
return 1.0/(1.0 + np.exp(-X))
elif self.activation_function == "tanh":
return np.tanh(X)
elif self.activation_function == "relu":
return np.maximum(0,X)
elif self.activation_function == "leaky_relu":
return np.maximum(self.leaky_slope*X,X)
def grad_activation(self, X):
if self.activation_function == "sigmoid":
return X*(1-X)
elif self.activation_function == "tanh":
return (1-np.square(X))
elif self.activation_function == "relu":
return 1.0*(X>0)
elif self.activation_function == "leaky_relu":
d=np.zeros_like(X)
d[X<=0]=self.leaky_slope
d[X>0]=1
return d
def get_accuracy(self):
Y_pred_train = model.predict(X_train)
Y_pred_train = np.argmax(Y_pred_train,1)
Y_pred_val = model.predict(X_val)
Y_pred_val = np.argmax(Y_pred_val,1)
accuracy_train = accuracy_score(Y_pred_train, Y_train)
accuracy_val = accuracy_score(Y_pred_val, Y_val)
return accuracy_train,accuracy_val
def softmax(self, X):
exps = np.exp(X)
return exps / np.sum(exps, axis=1).reshape(-1,1)
def forward_pass(self, X, params = None):
if params is None:
params = self.params
self.A1 = np.matmul(X, params["W1"]) + params["B1"] # (N, 2) * (2, 2) -> (N, 2)
self.H1 = self.forward_activation(self.A1) # (N, 2)
self.A2 = np.matmul(self.H1, params["W2"]) + params["B2"] # (N, 2) * (2, 2) -> (N, 2)
self.H2 = self.softmax(self.A2) # (N, 2)
return self.H2
def grad(self, X, Y, params = None):
if params is None:
params = self.params
self.forward_pass(X, params)
m = X.shape[0]
self.gradients["dA2"] = self.H2 - Y # (N, 4) - (N, 4) -> (N, 4)
self.gradients["dW2"] = np.matmul(self.H1.T, self.gradients["dA2"]) # (2, N) * (N, 4) -> (2, 4)
self.gradients["dB2"] = np.sum(self.gradients["dA2"], axis=0).reshape(1, -1) # (N, 4) -> (1, 4)
self.gradients["dH1"] = np.matmul(self.gradients["dA2"], params["W2"].T) # (N, 4) * (4, 2) -> (N, 2)
self.gradients["dA1"] = np.multiply(self.gradients["dH1"], self.grad_activation(self.H1)) # (N, 2) .* (N, 2) -> (N, 2)
self.gradients["dW1"] = np.matmul(X.T, self.gradients["dA1"]) # (2, N) * (N, 2) -> (2, 2)
self.gradients["dB1"] = np.sum(self.gradients["dA1"], axis=0).reshape(1, -1) # (N, 2) -> (1, 2)
def fit(self, X, Y, epochs=1, algo= "GD",l2_norm=False, lambda_val=0.8, display_loss=False, eta=1):
train_accuracies={}
val_accuracies={}
if display_loss:
loss = []
weight_mag = []
for num_epoch in tqdm_notebook(range(epochs), total=epochs, unit="epoch"):
m = X.shape[0]
self.grad(X, Y)
for i in range(1,self.num_layers+1):
if l2_norm:
self.params["W"+str(i)] -= (eta * lambda_val)/m * self.params["W"+str(i)] + eta * (self.gradients["dW"+str(i)]/m)
else:
self.params["W"+str(i)] -= eta * (self.gradients["dW"+str(i)]/m)
self.params["B"+str(i)] -= eta * (self.gradients["dB"+str(i)]/m)
train_accuracy,val_accuracy=self.get_accuracy()
train_accuracies[num_epoch]=train_accuracy
val_accuracies[num_epoch]=val_accuracy
if display_loss:
Y_pred = self.predict(X)
loss.append(log_loss(np.argmax(Y, axis=1), Y_pred))
weight_mag.append((norm(self.params["W1"]) + norm(self.params["W2"]) + norm(self.params["B1"]) + norm(self.params["B2"]))/18)
plt.plot(list(train_accuracies.values()),label="Train accuracy")
plt.plot(list(val_accuracies.values()),label="Validation accuracy")
plt.plot(np.ones((epochs, 1))*0.9)
plt.plot(np.ones((epochs, 1))*0.33)
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
if display_loss:
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('epochs')
ax1.set_ylabel('Log Loss', color=color)
ax1.plot(loss, '-o', color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('Weight Magnitude', color=color) # we already handled the x-label with ax1
ax2.plot(weight_mag, '-*', color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout()
plt.show()
def predict(self, X):
Y_pred = self.forward_pass(X)
return np.array(Y_pred).squeeze()
def print_accuracy():
Y_pred_train = model.predict(X_train)
Y_pred_train = np.argmax(Y_pred_train,1)
Y_pred_val = model.predict(X_val)
Y_pred_val = np.argmax(Y_pred_val,1)
accuracy_train = accuracy_score(Y_pred_train, Y_train)
accuracy_val = accuracy_score(Y_pred_val, Y_val)
print("Training accuracy", round(accuracy_train, 4))
print("Validation accuracy", round(accuracy_val, 4))
if False:
plt.scatter(X_train[:,0], X_train[:,1], c=Y_pred_train, cmap=my_cmap, s=15*(np.abs(np.sign(Y_pred_train-Y_train))+.1))
plt.show()
model = FFNetwork(num_hidden=1)
model.fit(X_train, y_OH_train, epochs=100, eta=0.1)
print_accuracy()
model = FFNetwork(num_hidden=2)
model.fit(X_train, y_OH_train, epochs=100, eta=1, display_loss=False)
print_accuracy()
model = FFNetwork(num_hidden=4)
model.fit(X_train, y_OH_train, epochs=400, eta=0.25, display_loss=False)
print_accuracy()
model = FFNetwork(num_hidden=8)
model.fit(X_train, y_OH_train, epochs=500, eta=0.2, display_loss=False)
print_accuracy()
model = FFNetwork(num_hidden=32)
model.fit(X_train, y_OH_train, epochs=500, eta=0.2, display_loss=False)
print_accuracy()
model = FFNetwork(num_hidden=64)
model.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=False)
print_accuracy()
```
## Add L2 Regularization
```
model = FFNetwork(num_hidden=64)
model.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=True, lambda_val=0.1, display_loss=True)
print_accuracy()
model = FFNetwork(num_hidden=64)
model.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=True, lambda_val=1, display_loss=True)
print_accuracy()
model = FFNetwork(num_hidden=64)
model.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=True, lambda_val=5, display_loss=True)
print_accuracy()
model = FFNetwork(num_hidden=64)
model.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=True, lambda_val=10, display_loss=True)
print_accuracy()
```
## Add noise to training data set
```
model = FFNetwork(num_hidden=64)
model.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=False)
print_accuracy()
for noise_fraction in [0.01, 0.05, 0.1, 0.15, 0.18, 0.2]:
print(noise_fraction)
X_train_noisy = X_train * (1 - noise_fraction*np.random.randn(X_train.shape[0], X_train.shape[1]))
model = FFNetwork(num_hidden=64)
model.fit(X_train_noisy, y_OH_train, epochs=2000, eta=0.1, l2_norm=False)
print_accuracy()
```
## Early stopping
```
model = FFNetwork(num_hidden=32)
model.fit(X_train, y_OH_train, epochs=500, eta=0.2, display_loss=True)
print_accuracy()
model = FFNetwork(num_hidden=32)
model.fit(X_train, y_OH_train, epochs=100, eta=0.2, display_loss=True)
print_accuracy()
```
| github_jupyter |
## Define the Convolutional Neural Network
After you've looked at the data you're working with and, in this case, know the shapes of the images and of the keypoints, you are ready to define a convolutional neural network that can *learn* from this data.
In this notebook and in `models.py`, you will:
1. Define a CNN with images as input and keypoints as output
2. Construct the transformed FaceKeypointsDataset, just as before
3. Train the CNN on the training data, tracking loss
4. See how the trained model performs on test data
5. If necessary, modify the CNN structure and model hyperparameters, so that it performs *well* **\***
**\*** What does *well* mean?
"Well" means that the model's loss decreases during training **and**, when applied to test image data, the model produces keypoints that closely match the true keypoints of each face. And you'll see examples of this later in the notebook.
---
## CNN Architecture
Recall that CNN's are defined by a few types of layers:
* Convolutional layers
* Maxpooling layers
* Fully-connected layers
You are required to use the above layers and encouraged to add multiple convolutional layers and things like dropout layers that may prevent overfitting. You are also encouraged to look at literature on keypoint detection, such as [this paper](https://arxiv.org/pdf/1710.00977.pdf), to help you determine the structure of your network.
### TODO: Define your model in the provided file `models.py` file
This file is mostly empty but contains the expected name and some TODO's for creating your model.
---
## PyTorch Neural Nets
To define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the feedforward behavior of a network that employs those initialized layers in the function `forward`, which takes in an input image tensor, `x`. The structure of this Net class is shown below and left for you to fill in.
Note: During training, PyTorch will be able to perform backpropagation by keeping track of the network's feedforward behavior and using autograd to calculate the update to the weights in the network.
#### Define the Layers in ` __init__`
As a reminder, a conv/pool layer may be defined like this (in `__init__`):
```
# 1 input image channel (for grayscale images), 32 output channels/feature maps, 3x3 square convolution kernel
self.conv1 = nn.Conv2d(1, 32, 3)
# maxpool that uses a square window of kernel_size=2, stride=2
self.pool = nn.MaxPool2d(2, 2)
```
#### Refer to Layers in `forward`
Then referred to in the `forward` function like this, in which the conv1 layer has a ReLu activation applied to it before maxpooling is applied:
```
x = self.pool(F.relu(self.conv1(x)))
```
Best practice is to place any layers whose weights will change during the training process in `__init__` and refer to them in the `forward` function; any layers or functions that always behave in the same way, such as a pre-defined activation function, should appear *only* in the `forward` function.
#### Why models.py
You are tasked with defining the network in the `models.py` file so that any models you define can be saved and loaded by name in different notebooks in this project directory. For example, by defining a CNN class called `Net` in `models.py`, you can then create that same architecture in this and other notebooks by simply importing the class and instantiating a model:
```
from models import Net
net = Net()
```
```
# load the data if you need to; if you have already loaded the data, you may comment this cell out
# -- DO NOT CHANGE THIS CELL -- #
!mkdir /data
!wget -P /data/ https://s3.amazonaws.com/video.udacity-data.com/topher/2018/May/5aea1b91_train-test-data/train-test-data.zip
!unzip -n /data/train-test-data.zip -d /data
```
<div class="alert alert-info">**Note:** Workspaces automatically close connections after 30 minutes of inactivity (including inactivity while training!). Use the code snippet below to keep your workspace alive during training. (The active_session context manager is imported below.)
</div>
```
from workspace_utils import active_session
with active_session():
train_model(num_epochs)
```
```
# import the usual resources
import matplotlib.pyplot as plt
import numpy as np
# import utilities to keep workspaces alive during model training
from workspace_utils import active_session
# watch for any changes in model.py, if it changes, re-load it automatically
%load_ext autoreload
%autoreload 2
## TODO: Define the Net in models.py
import torch
import torch.nn as nn
import torch.nn.functional as F
## TODO: Once you've define the network, you can instantiate it
# one example conv layer has been provided for you
from models import Net
net = Net()
print(net)
```
## Transform the dataset
To prepare for training, create a transformed dataset of images and keypoints.
### TODO: Define a data transform
In PyTorch, a convolutional neural network expects a torch image of a consistent size as input. For efficient training, and so your model's loss does not blow up during training, it is also suggested that you normalize the input images and keypoints. The necessary transforms have been defined in `data_load.py` and you **do not** need to modify these; take a look at this file (you'll see the same transforms that were defined and applied in Notebook 1).
To define the data transform below, use a [composition](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html#compose-transforms) of:
1. Rescaling and/or cropping the data, such that you are left with a square image (the suggested size is 224x224px)
2. Normalizing the images and keypoints; turning each RGB image into a grayscale image with a color range of [0, 1] and transforming the given keypoints into a range of [-1, 1]
3. Turning these images and keypoints into Tensors
These transformations have been defined in `data_load.py`, but it's up to you to call them and create a `data_transform` below. **This transform will be applied to the training data and, later, the test data**. It will change how you go about displaying these images and keypoints, but these steps are essential for efficient training.
As a note, should you want to perform data augmentation (which is optional in this project), and randomly rotate or shift these images, a square image size will be useful; rotating a 224x224 image by 90 degrees will result in the same shape of output.
```
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
# the dataset we created in Notebook 1 is copied in the helper file `data_load.py`
from data_load import FacialKeypointsDataset
# the transforms we defined in Notebook 1 are in the helper file `data_load.py`
from data_load import Rescale, RandomCrop, Normalize, ToTensor
## TODO: define the data_transform using transforms.Compose([all tx's, . , .])
# order matters! i.e. rescaling should come before a smaller crop
data_transform = transforms.Compose([Rescale(250), RandomCrop(224), Normalize(), ToTensor()])
# testing that you've defined a transform
assert(data_transform is not None), 'Define a data_transform'
# create the transformed dataset
transformed_dataset = FacialKeypointsDataset(csv_file='/data/training_frames_keypoints.csv',
root_dir='/data/training/',
transform=data_transform)
print('Number of images: ', len(transformed_dataset))
# iterate through the transformed dataset and print some stats about the first few samples
for i in range(4):
sample = transformed_dataset[i]
print(i, sample['image'].size(), sample['keypoints'].size())
```
## Batching and loading data
Next, having defined the transformed dataset, we can use PyTorch's DataLoader class to load the training data in batches of whatever size as well as to shuffle the data for training the model. You can read more about the parameters of the DataLoader, in [this documentation](http://pytorch.org/docs/master/data.html).
#### Batch size
Decide on a good batch size for training your model. Try both small and large batch sizes and note how the loss decreases as the model trains. Too large a batch size may cause your model to crash and/or run out of memory while training.
**Note for Windows users**: Please change the `num_workers` to 0 or you may face some issues with your DataLoader failing.
```
# load training data in batches
batch_size = 10
train_loader = DataLoader(transformed_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4)
```
## Before training
Take a look at how this model performs before it trains. You should see that the keypoints it predicts start off in one spot and don't match the keypoints on a face at all! It's interesting to visualize this behavior so that you can compare it to the model after training and see how the model has improved.
#### Load in the test dataset
The test dataset is one that this model has *not* seen before, meaning it has not trained with these images. We'll load in this test data and before and after training, see how your model performs on this set!
To visualize this test data, we have to go through some un-transformation steps to turn our images into python images from tensors and to turn our keypoints back into a recognizable range.
```
# load in the test data, using the dataset class
# AND apply the data_transform you defined above
# create the test dataset
test_dataset = FacialKeypointsDataset(csv_file='/data/test_frames_keypoints.csv',
root_dir='/data/test/',
transform=data_transform)
# load test data in batches
batch_size = 10
test_loader = DataLoader(test_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4)
```
## Apply the model on a test sample
To test the model on a test sample of data, you have to follow these steps:
1. Extract the image and ground truth keypoints from a sample
2. Wrap the image in a Variable, so that the net can process it as input and track how it changes as the image moves through the network.
3. Make sure the image is a FloatTensor, which the model expects.
4. Forward pass the image through the net to get the predicted, output keypoints.
This function test how the network performs on the first batch of test data. It returns the images, the transformed images, the predicted keypoints (produced by the model), and the ground truth keypoints.
```
# test the model on a batch of test images
def net_sample_output():
# iterate through the test dataset
for i, sample in enumerate(test_loader):
# get sample data: images and ground truth keypoints
images = sample['image']
key_pts = sample['keypoints']
# convert images to FloatTensors
images = images.type(torch.FloatTensor)
# forward pass to get net output
output_pts = net(images)
# reshape to batch_size x 68 x 2 pts
output_pts = output_pts.view(output_pts.size()[0], 68, -1)
# break after first image is tested
if i == 0:
return images, output_pts, key_pts
```
#### Debugging tips
If you get a size or dimension error here, make sure that your network outputs the expected number of keypoints! Or if you get a Tensor type error, look into changing the above code that casts the data into float types: `images = images.type(torch.FloatTensor)`.
```
# call the above function
# returns: test images, test predicted keypoints, test ground truth keypoints
test_images, test_outputs, gt_pts = net_sample_output()
# print out the dimensions of the data to see if they make sense
print(test_images.data.size())
print(test_outputs.data.size())
print(gt_pts.size())
```
## Visualize the predicted keypoints
Once we've had the model produce some predicted output keypoints, we can visualize these points in a way that's similar to how we've displayed this data before, only this time, we have to "un-transform" the image/keypoint data to display it.
Note that I've defined a *new* function, `show_all_keypoints` that displays a grayscale image, its predicted keypoints and its ground truth keypoints (if provided).
```
def show_all_keypoints(image, predicted_key_pts, gt_pts=None):
"""Show image with predicted keypoints"""
# image is grayscale
plt.imshow(image, cmap='gray')
plt.scatter(predicted_key_pts[:, 0], predicted_key_pts[:, 1], s=20, marker='.', c='m')
# plot ground truth points as green pts
if gt_pts is not None:
plt.scatter(gt_pts[:, 0], gt_pts[:, 1], s=20, marker='.', c='g')
```
#### Un-transformation
Next, you'll see a helper function. `visualize_output` that takes in a batch of images, predicted keypoints, and ground truth keypoints and displays a set of those images and their true/predicted keypoints.
This function's main role is to take batches of image and keypoint data (the input and output of your CNN), and transform them into numpy images and un-normalized keypoints (x, y) for normal display. The un-transformation process turns keypoints and images into numpy arrays from Tensors *and* it undoes the keypoint normalization done in the Normalize() transform; it's assumed that you applied these transformations when you loaded your test data.
```
# visualize the output
# by default this shows a batch of 10 images
def visualize_output(test_images, test_outputs, gt_pts=None, batch_size=10):
for i in range(batch_size):
plt.figure(figsize=(20,10))
ax = plt.subplot(1, batch_size, i+1)
# un-transform the image data
image = test_images[i].data # get the image from it's Variable wrapper
image = image.numpy() # convert to numpy array from a Tensor
image = np.transpose(image, (1, 2, 0)) # transpose to go from torch to numpy image
# un-transform the predicted key_pts data
predicted_key_pts = test_outputs[i].data
predicted_key_pts = predicted_key_pts.numpy()
# undo normalization of keypoints
predicted_key_pts = predicted_key_pts*50.0+100
# plot ground truth points for comparison, if they exist
ground_truth_pts = None
if gt_pts is not None:
ground_truth_pts = gt_pts[i]
ground_truth_pts = ground_truth_pts*50.0+100
# call show_all_keypoints
show_all_keypoints(np.squeeze(image), predicted_key_pts, ground_truth_pts)
plt.axis('off')
plt.show()
# call it
visualize_output(test_images, test_outputs, gt_pts)
```
## Training
#### Loss function
Training a network to predict keypoints is different than training a network to predict a class; instead of outputting a distribution of classes and using cross entropy loss, you may want to choose a loss function that is suited for regression, which directly compares a predicted value and target value. Read about the various kinds of loss functions (like MSE or L1/SmoothL1 loss) in [this documentation](http://pytorch.org/docs/master/_modules/torch/nn/modules/loss.html).
### TODO: Define the loss and optimization
Next, you'll define how the model will train by deciding on the loss function and optimizer.
---
```
## TODO: Define the loss and optimization
import torch.optim as optim
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=0.0001, amsgrad=True, weight_decay=0)
```
## Training and Initial Observation
Now, you'll train on your batched training data from `train_loader` for a number of epochs.
To quickly observe how your model is training and decide on whether or not you should modify it's structure or hyperparameters, you're encouraged to start off with just one or two epochs at first. As you train, note how your the model's loss behaves over time: does it decrease quickly at first and then slow down? Does it take a while to decrease in the first place? What happens if you change the batch size of your training data or modify your loss function? etc.
Use these initial observations to make changes to your model and decide on the best architecture before you train for many epochs and create a final model.
```
def train_net(n_epochs):
# prepare the net for training
net.train()
for epoch in range(n_epochs): # loop over the dataset multiple times
running_loss = 0.0
# train on batches of data, assumes you already have train_loader
for batch_i, data in enumerate(train_loader):
# get the input images and their corresponding labels
images = data['image']
key_pts = data['keypoints']
# flatten pts
key_pts = key_pts.view(key_pts.size(0), -1)
# convert variables to floats for regression loss
key_pts = key_pts.type(torch.FloatTensor)
images = images.type(torch.FloatTensor)
# forward pass to get outputs
output_pts = net(images)
# calculate the loss between predicted and target keypoints
loss = criterion(output_pts, key_pts)
# zero the parameter (weight) gradients
optimizer.zero_grad()
# backward pass to calculate the weight gradients
loss.backward()
# update the weights
optimizer.step()
# print loss statistics
running_loss += loss.item()
if batch_i % 10 == 9: # print every 10 batches
print('Epoch: {}, Batch: {}, Avg. Loss: {}'.format(epoch + 1, batch_i+1, running_loss/10))
running_loss = 0.0
print('Finished Training')
# train your network
n_epochs = 5 # start small, and increase when you've decided on your model structure and hyperparams
# this is a Workspaces-specific context manager to keep the connection
# alive while training your model, not part of pytorch
with active_session():
train_net(n_epochs)
```
## Test data
See how your model performs on previously unseen, test data. We've already loaded and transformed this data, similar to the training data. Next, run your trained model on these images to see what kind of keypoints are produced. You should be able to see if your model is fitting each new face it sees, if the points are distributed randomly, or if the points have actually overfitted the training data and do not generalize.
```
# get a sample of test data again
test_images, test_outputs, gt_pts = net_sample_output()
print(test_images.data.size())
print(test_outputs.data.size())
print(gt_pts.size())
## TODO: visualize your test output
# you can use the same function as before, by un-commenting the line below:
visualize_output(test_images, test_outputs, gt_pts)
```
Once you've found a good model (or two), save your model so you can load it and use it later!
Save your models but please **delete any checkpoints and saved models before you submit your project** otherwise your workspace may be too large to submit.
```
## TODO: change the name to something uniqe for each new model
model_dir = 'saved_models/'
model_name = 'keypoints_model_1.pt'
# after training, save your model parameters in the dir 'saved_models'
torch.save(net.state_dict(), model_dir+model_name)
```
After you've trained a well-performing model, answer the following questions so that we have some insight into your training and architecture selection process. Answering all questions is required to pass this project.
### Question 1: What optimization and loss functions did you choose and why?
**Answer**: I used the Adam optimizer and MSE (Mean Squared Error) loss function. Adam is an adaptive method and computes individual learning rates for different parameters. MSE is the sum of squared distances between target variable and predicted values. These were suitable for our case.
### Question 2: What kind of network architecture did you start with and how did it change as you tried different architectures? Did you decide to add more convolutional layers or any layers to avoid overfitting the data?
**Answer**: I started with 6 convolution layer network followed by linear and dropout.The loss was high. I then added maxpool layers in between , followed by linear and then dropout.
### Question 3: How did you decide on the number of epochs and batch_size to train your model?
**Answer**: I started by 1 epoch which had Avg. Loss: 0.31769979521632197, then I changed to 5 so it trains better. Batch size I used 10
## Feature Visualization
Sometimes, neural networks are thought of as a black box, given some input, they learn to produce some output. CNN's are actually learning to recognize a variety of spatial patterns and you can visualize what each convolutional layer has been trained to recognize by looking at the weights that make up each convolutional kernel and applying those one at a time to a sample image. This technique is called feature visualization and it's useful for understanding the inner workings of a CNN.
In the cell below, you can see how to extract a single filter (by index) from your first convolutional layer. The filter should appear as a grayscale grid.
```
# Get the weights in the first conv layer, "conv1"
# if necessary, change this to reflect the name of your first conv layer
weights1 = net.conv1.weight.data
w = weights1.numpy()
filter_index = 0
print(w[filter_index][0])
print(w[filter_index][0].shape)
# display the filter weights
plt.imshow(w[filter_index][0], cmap='gray')
```
## Feature maps
Each CNN has at least one convolutional layer that is composed of stacked filters (also known as convolutional kernels). As a CNN trains, it learns what weights to include in it's convolutional kernels and when these kernels are applied to some input image, they produce a set of **feature maps**. So, feature maps are just sets of filtered images; they are the images produced by applying a convolutional kernel to an input image. These maps show us the features that the different layers of the neural network learn to extract. For example, you might imagine a convolutional kernel that detects the vertical edges of a face or another one that detects the corners of eyes. You can see what kind of features each of these kernels detects by applying them to an image. One such example is shown below; from the way it brings out the lines in an the image, you might characterize this as an edge detection filter.
<img src='images/feature_map_ex.png' width=50% height=50%/>
Next, choose a test image and filter it with one of the convolutional kernels in your trained CNN; look at the filtered output to get an idea what that particular kernel detects.
### TODO: Filter an image to see the effect of a convolutional kernel
---
```
##TODO: load in and display any image from the transformed test dataset
## TODO: Using cv's filter2D function,
## apply a specific set of filter weights (like the one displayed above) to the test image
import cv2
image = cv2.imread('./images/the_beatles.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.xticks([]), plt.yticks([])
plt.title("Orginal Image")
filtered = cv2.filter2D(image, -1, w[filter_index][0])
fig = plt.figure()
ax = fig.add_subplot(121, xticks = [], yticks = [])
ax.imshow(filtered)
ax.set_title("Feature Map")
ax = fig.add_subplot(122, xticks = [], yticks = [])
ax.imshow(w[filter_index][0], cmap = 'gray')
plt.show()
```
### Question 4: Choose one filter from your trained CNN and apply it to a test image; what purpose do you think it plays? What kind of feature do you think it detects?
**Answer**: It detect vertical lines and certain features
---
## Moving on!
Now that you've defined and trained your model (and saved the best model), you are ready to move on to the last notebook, which combines a face detector with your saved model to create a facial keypoint detection system that can predict the keypoints on *any* face in an image!
| github_jupyter |
# Fairness Indicators on TF-Hub Text Embeddings
In this colab, you will learn how to use [Fairness Indicators](https://github.com/tensorflow/fairness-indicators) to evaluate embeddings from [TF Hub](https://www.tensorflow.org/hub). Fairness Indicators is a suite of tools that facilitates evaluation and visualization of fairness metrics on machine learning models. Fairness Indicators is built on top of [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/guide/tfma), TensorFlow's official model evaluation library.
# Imports
```
!pip install fairness-indicators
%tensorflow_version 2.x
import os
import tempfile
import apache_beam as beam
from datetime import datetime
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis.addons.fairness.view import widget_view
from tensorflow_model_analysis.addons.fairness.post_export_metrics import fairness_indicators
from fairness_indicators import example_model
from fairness_indicators.examples import util
```
# Defining Constants
TensorFlow parses features from data using [`FixedLenFeature`](https://www.tensorflow.org/api_docs/python/tf/io/FixedLenFeature) and [`VarLenFeature`](https://www.tensorflow.org/api_docs/python/tf/io/VarLenFeature). So to allow TensorFlow to parse our data, we will need to map out our input feature, output feature, and any slicing features that we will want to analyze via Fairness Indicators.
```
BASE_DIR = tempfile.gettempdir()
# The input and output features of the classifier
TEXT_FEATURE = 'comment_text'
LABEL = 'toxicity'
FEATURE_MAP = {
# input and output features
LABEL: tf.io.FixedLenFeature([], tf.float32),
TEXT_FEATURE: tf.io.FixedLenFeature([], tf.string),
# slicing features
'sexual_orientation': tf.io.VarLenFeature(tf.string),
'gender': tf.io.VarLenFeature(tf.string),
'religion': tf.io.VarLenFeature(tf.string),
'race': tf.io.VarLenFeature(tf.string),
'disability': tf.io.VarLenFeature(tf.string)
}
IDENTITY_TERMS = ['gender', 'sexual_orientation', 'race', 'religion', 'disability']
```
# Data
In this exercise, we'll work with the [Civil Comments dataset](https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification), approximately 2 million public comments made public by the [Civil Comments platform](https://github.com/reaktivstudios/civil-comments) in 2017 for ongoing research. This effort was sponsored by Jigsaw, who have hosted competitions on Kaggle to help classify toxic comments as well as minimize unintended model bias.
Each individual text comment in the dataset has a toxicity label, with the label being 1 if the comment is toxic and 0 if the comment is non-toxic. Within the data, a subset of comments are labeled with a variety of identity attributes, including categories for gender, sexual orientation, religion, and race or ethnicity.
You can choose to download the original dataset and process it in the colab, which may take minutes, or you can download the preprocessed data.
```
download_original_data = True
if download_original_data:
train_tf_file = tf.keras.utils.get_file('train_tf.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/train_tf.tfrecord')
validate_tf_file = tf.keras.utils.get_file('validate_tf.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/validate_tf.tfrecord')
# The identity terms list will be grouped together by their categories
# on threshould 0.5. Only the identity term column, text column,
# and label column will be kept after processing.
train_tf_file = util.convert_comments_data(train_tf_file)
validate_tf_file = util.convert_comments_data(validate_tf_file)
else:
train_tf_file = tf.keras.utils.get_file('train_tf_processed.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/train_tf_processed.tfrecord')
validate_tf_file = tf.keras.utils.get_file('validate_tf_processed.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/validate_tf_processed.tfrecord')
```
# Creating a TensorFlow Model Analysis Pipeline
The Fairness Indicators library operates on [TensorFlow Model Analysis (TFMA) models](https://www.tensorflow.org/tfx/model_analysis/get_started). TFMA models wrap [TensorFlow models](https://www.tensorflow.org/guide/estimator) with additional functionality to evaluate and visualize their results. The actual evaluation occurs inside of an [Apache Beam pipeline](https://beam.apache.org/documentation/programming-guide/).
So we need to...
1. Build a TensorFlow model.
2. Build a TFMA model on top of the TensorFlow model.
3. Run the model analysis in a Beam pipeline.
# Putting it all Together
```
def embedding_fairness_result(embedding, identity_term='gender'):
model_dir = os.path.join(BASE_DIR, 'train',
datetime.now().strftime('%Y%m%d-%H%M%S'))
print("Training classifier for " + embedding)
classifier = example_model.train_model(model_dir,
train_tf_file,
LABEL,
TEXT_FEATURE,
FEATURE_MAP,
embedding)
# We need to create a unique path to store our results for this embedding.
embedding_name = embedding.split('/')[-2]
eval_result_path = os.path.join(BASE_DIR, 'eval_result', embedding_name)
example_model.evaluate_model(classifier,
validate_tf_file,
eval_result_path,
identity_term,
LABEL,
FEATURE_MAP)
return tfma.load_eval_result(output_path=eval_result_path)
```
# Run TFMA & Fairness Indicators
## Fairness Indicators Metrics
Refer [here](https://github.com/tensorflow/fairness-indicators) for more information on Fairness Indicators. Below are some of the available metrics.
* [Negative Rate, False Negative Rate (FNR), and True Negative Rate (TNR)](https://en.wikipedia.org/wiki/False_positives_and_false_negatives#False_positive_and_false_negative_rates)
* [Positive Rate, False Positive Rate (FPR), and True Positive Rate (TPR)](https://en.wikipedia.org/wiki/False_positives_and_false_negatives#False_positive_and_false_negative_rates)
* [Accuracy](https://www.tensorflow.org/api_docs/python/tf/keras/metrics/Accuracy)
* [Precision and Recall](https://en.wikipedia.org/wiki/Precision_and_recall)
* [Precision-Recall AUC](https://www.tensorflow.org/api_docs/python/tf/keras/metrics/AUC)
* [ROC AUC](https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve)
## Text Embeddings
**[TF-Hub](https://www.tensorflow.org/hub)** provides several **text embeddings**. These embeddings will serve as the feature column for our different models. For this Colab, we use the following embeddings:
* [**random-nnlm-en-dim128**](https://tfhub.dev/google/random-nnlm-en-dim128/1): random text embeddings, this serves as a convenient baseline.
* [**nnlm-en-dim128**](https://tfhub.dev/google/nnlm-en-dim128/1): a text embedding based on [A Neural Probabilistic Language Model](http://www.jmlr.org/papers/volume3/bengio03a/bengio03a.pdf).
* [**universal-sentence-encoder**](https://tfhub.dev/google/universal-sentence-encoder/2): a text embedding based on [Universal Sentence Encoder](https://arxiv.org/pdf/1803.11175.pdf).
## Fairness Indicator Results
For each of the above embeddings, we will compute fairness indicators with our `embedding_fairness_result` pipeline, and then render the results in the Fairness Indicator UI widget with `widget_view.render_fairness_indicator`.
Note that the `widget_view.render_fairness_indicator` cells may need to be run twice for the visualization to be displayed.
#### Random NNLM
```
eval_result_random_nnlm = embedding_fairness_result('https://tfhub.dev/google/random-nnlm-en-dim128/1')
widget_view.render_fairness_indicator(eval_result_random_nnlm)
```
##### NNLM
```
eval_result_nnlm = embedding_fairness_result('https://tfhub.dev/google/nnlm-en-dim128/1')
widget_view.render_fairness_indicator(eval_result_nnlm)
```
##### Universal Sentence Encoder
```
eval_result_use = embedding_fairness_result('https://tfhub.dev/google/universal-sentence-encoder/2')
widget_view.render_fairness_indicator(eval_result_use)
```
## Exercises
1. Pick an identity category, such as religion or sexual orientation, and look at False Positive Rate for the Universal Sentence Encoder. How do different slices compare to each other? How do they compare to the Overall baseline?
2. Now pick a different identity category. Compare the results of this category with the previous one. Does the model weigh one category as more "toxic" than the other? Does this change with the embedding used?
3. Does the model generally tend to overestimate or underestimate the number of toxic comments?
4. Look at the graphs for different fairness metrics. Which metrics seem most informative? Which embeddings perform best and worst for that metric?
| github_jupyter |
<div style="text-align: right" align="right"><i>Peter Norvig, 3 Jan 2020</i></div>
# Spelling Bee Puzzle
The [3 Jan. 2020 edition of the 538 Riddler](https://fivethirtyeight.com/features/can-you-solve-the-vexing-vexillology/) concerns the popular NYTimes [Spelling Bee](https://www.nytimes.com/puzzles/spelling-bee) puzzle:
> In this game, seven letters are arranged in a **honeycomb** lattice, with one letter in the center. Here’s the lattice from Dec. 24, 2019:
>
> <img src="https://fivethirtyeight.com/wp-content/uploads/2020/01/Screen-Shot-2019-12-24-at-5.46.55-PM.png?w=1136" width="150">
>
> The goal is to identify as many words as possible that meet the following criteria:
> 1. The word must be at least four letters long.
> 2. The word must include the central letter.
> 3. The word cannot include any letter beyond the seven given letters.
>
>Note that letters can be repeated. For example, the words GAME and AMALGAM are both acceptable words. Four-letter words are worth 1 point each, while five-letter words are worth 5 points, six-letter words are worth 6 points, seven-letter words are worth 7 points, etc. Words that use all of the seven letters in the honeycomb are known as **pangrams** and earn 7 bonus points (in addition to the points for the length of the word). So in the above example, MEGAPLEX is worth 15 points.
>
> ***Which seven-letter honeycomb results in the highest possible game score?*** To be a valid choice of seven letters, no letter can be repeated, it must not contain the letter S (that would be too easy) and there must be at least one pangram.
>
> For consistency, please use [this word list](https://norvig.com/ngrams/enable1.txt) to check your game score.
Since the referenced [word list](https://norvig.com/ngrams/enable1.txt) came from *my* web site, I felt somewhat compelled to solve this one. (Note I didn't make up the word list; it is a standard Scrabble word list that I happen to host a copy of.) I'll show you how I address the problem, step by step:
# Step 1: Words, Word Scores, and Pangrams
Let's start by defining some basics:
- A **valid word** is a string of at least 4 letters, with no 'S', and not more than 7 distinct letters.
- A **word list** is, well, a list of words.
- A **pangram** is a word with exactly 7 distinct letters; it scores a **pangram bonus** of 7 points.
- The **word score** is 1 for a four letter word, or the length of the word for longer words, plus any pangram bonus.
```
from typing import List, Set, Tuple, Dict
from collections import Counter, defaultdict, namedtuple
from itertools import combinations
import matplotlib.pyplot as plt
Word = str # Type for a word
def valid(word) -> bool:
"""Does word have at least 4 letters, no 'S', and no more than 7 distinct letters?"""
return len(word) >= 4 and 'S' not in word and len(set(word)) <= 7
def valid_words(text, valid=valid) -> List[Word]:
"""All the valid words in text."""
return [w for w in text.upper().split() if valid(w)]
def pangram_bonus(word) -> int:
"""Does a word get a bonus for having 7 distinct letters?"""
return 7 if len(set(word)) == 7 else 0
def word_score(word) -> int:
"""The points for this word, including bonus for pangram."""
return 1 if len(word) == 4 else len(word) + pangram_bonus(word)
```
I'll make a mini word list to experiment with:
```
mini = valid_words('game amalgam amalgamation glam gem gems em megaplex cacciatore erotica')
mini
```
Note that `gem` and `em` are too short, `gems` has an `s` which is not allowed, and `amalgamation` has too many distinct letters (8). We're left with six valid words out of the ten candidate words. Here are examples of the other two functions in action:
```
{w for w in mini if pangram_bonus(w)}
{w: word_score(w) for w in mini}
```
# Step 2: Honeycombs and Game Scores
In a honeycomb the order of the letters doesn't matter; all that matters is:
1. The seven distinct letters in the honeycomb.
2. The one distinguished center letter.
Thus, we can represent a honeycomb as follows (I wanted to put in my own less verbose `__repr__` method):
```
class Honeycomb(namedtuple('_', 'letters, center')):
def __repr__(self): return f'Honeycomb({self.letters!r}, {self.center!r})'
hc = Honeycomb('AEGLMPX', 'G')
hc
```
The **game score** for a honeycomb is the sum of the word scores for all the words that the honeycomb can make. How do we know if a honeycomb can make a word? It can if (1) the word contains the honeycomb's center and (2) every letter in the word is in the honeycomb.
```
def game_score(honeycomb, wordlist) -> int:
"""The total score for this honeycomb."""
return sum(word_score(w)
for w in wordlist if can_make(honeycomb, w))
def can_make(honeycomb, word) -> bool:
"""Can the honeycomb make this word?"""
letters, center = honeycomb
return center in word and all(L in letters for L in word)
game_score(hc, mini)
{w: word_score(w) for w in mini if can_make(hc, w)}
```
# Step 3: Best Honeycomb
How many possible honeycombs are there? We can put any letter in the center, then any 6 letters around the outside (order doesn't matter); since the letter 'S' is not allowed, this gives a total of 25 × (24 choose 6) = 3,364,900 possible honeycombs. We could conceivably ask for the game score of every one of them and pick the best; that would probably take hours of computation (not seconds, and not days).
However, a key constraint of the game is that **there must be at least one pangram** in the set of words that a valid honeycomb can make. That means that a valid honeycomb must ***be*** the set of seven letters in one of the pangram words in the word list, with any of the seven letters as the center. My approach to find the best (highest scoring) honeycomb is:
* Go through all the words and find all the valid honeycombs: the 7-letter pangram letter sets, with any of the 7 letters as center.
* Compute the game score for each valid honeycomb and return a honeycomb with maximal game score.
```
def best_honeycomb(words) -> Honeycomb:
"""Return a honeycomb with highest game score on these words."""
return max(valid_honeycombs(words),
key=lambda h: game_score(h, words))
def valid_honeycombs(words) -> List[Honeycomb]:
"""Valid Honeycombs are the pangram lettersets, with any center."""
pangram_lettersets = {letterset(w) for w in words if pangram_bonus(w)}
return [Honeycomb(letters, center)
for letters in pangram_lettersets
for center in letters]
```
I will represent a **set of letters** as a sorted string of distinct letters. Why not a Python `set` (or `frozenset` if we want it to be the key of a dict)? Because a string takes up less space in memory, and its printed representation is easier to read when debugging. Compare:
- `frozenset({'A', 'E', 'G', 'L', 'M', 'P', 'X'})`
- `'AEGLMPX'`
I'll use the name `letterset` for the function that converts a word to a set of letters, and `Letterset` for the resulting type:
```
Letterset = str # Type for a set of letters, like "AGLM"
def letterset(word) -> Letterset:
"""The set of letters in a word, represented as a sorted str."""
return ''.join(sorted(set(word)))
{w: letterset(w) for w in mini}
```
Note that 'AMALGAM' and 'GLAM' have the same letterset, as do 'CACCIATORE' and 'EROTICA'.
```
valid_honeycombs(mini)
best_honeycomb(mini)
```
**We're done!** We know how to find the best honeycomb. But so far, we've only done it for the mini word list.
# Step 4: The enable1 Word List
Here's the real word list, `enable1.txt`, and some counts derived from it:
```
! [ -e enable1.txt ] || curl -O http://norvig.com/ngrams/enable1.txt
! wc -w enable1.txt
enable1 = valid_words(open('enable1.txt').read())
len(enable1)
pangrams = [w for w in enable1 if pangram_bonus(w)]
len(pangrams)
len({letterset(w) for w in pangrams}) # pangram lettersets
len(valid_honeycombs(enable1))
```
To summarize, there are:
- 172,820 words in the `enable1` word list
- 44,585 valid Spelling Bee words
- 14,741 pangram words
- 7,986 distinct pangram lettersets
- 55,902 (7 × 7,986) valid pangram-containing honeycombs
How long will it take to run `best_honeycomb(enable1)`? Most of the computation time is in `game_score` (which has to look at all 44,585 valid words), so let's estimate the total time by first checking how long it takes to compute the game score of a single honeycomb:
```
%time game_score(hc, enable1)
```
Roughly 10 milliseconds on my computer (this may vary). How many minutes would it be to run `game_score` for all 55,902 valid honeycombs?
```
55902 * 10/1000 / 60
```
About 9 or 10 minutes. I could run `best_honeycomb(enable1)` right now and take a coffee break until it completes, but I think that a puzzle like this deserves a more elegant solution. I'd like to get the run time under a minute (as is suggested in [Project Euler](https://projecteuler.net/)), and I have an idea how to do it.
# Step 5: Faster Algorithm: Points Table
Here's my plan for a more efficient program:
1. Keep the same strategy of trying every pangram letterset, but do some precomputation that will make `game_score` much faster.
1. The precomputation is: compute the `letterset` and `word_score` for each word, and make a table of `{letterset: total_points}` giving the total number of word score points for all the words that correspond to each letterset. I call this a **points table**.
3. These calculations are independent of the honeycomb, so they need to be done only once, not 55,902 times.
4. `game_score2` (the name is changed because the interface has changed) takes a honeycomb and a points table as input. The idea is that every word that the honeycomb can make must have a letterset that is the same as a valid **letter subset** of the honeycomb. A valid letter subset must include the center letter, and it may or may not include each of the other 6 letters, so there are exactly $2^6 = 64$ valid letter subsets. (The function `letter_subsets(honeycomb)` computes these.)
The result of `game_score2` is the sum of the honeycomb's 64 letter subset entries in the points table.
That means that in `game_score2` we no longer need to iterate over 44,585 words and check if each word is a subset of the honeycomb. Instead we iterate over the 64 subsets of the honeycomb and for each one check—in one table lookup—whether it is a word (or more than word) and how many total points those word(s) score. Since 64 < 44,585, that's a nice optimization!
Here's the code:
```
PointsTable = Dict[Letterset, int]
def best_honeycomb(words) -> Honeycomb:
"""Return a honeycomb with highest game score on these words."""
points_table = tabulate_points(words)
honeycombs = (Honeycomb(letters, center)
for letters in points_table if len(letters) == 7
for center in letters)
return max(honeycombs, key=lambda h: game_score2(h, points_table))
def tabulate_points(words) -> PointsTable:
"""Return a Counter of {letterset: points} from words."""
table = Counter()
for w in words:
table[letterset(w)] += word_score(w)
return table
def letter_subsets(honeycomb) -> List[Letterset]:
"""The 64 subsets of the letters in the honeycomb, always including the center letter."""
return [letters
for n in range(1, 8)
for letters in map(''.join, combinations(honeycomb.letters, n))
if honeycomb.center in letters]
def game_score2(honeycomb, points_table) -> int:
"""The total score for this honeycomb, using a points table."""
return sum(points_table[letterset] for letterset in letter_subsets(honeycomb))
```
Let's get a feel for how this works.
First `letter_subsets` (a 4-letter honeycomb makes $2^3 = 8$ subsets; 7-letter honeycombs make $2^6 = 64$):
```
letter_subsets(Honeycomb('GLAM', 'G'))
mini # Remind me again what the mini word list is?
```
Now `tabulate_points`:
```
tabulate_points(mini)
```
The letterset `'AGLM'` gets 8 points, 7 for AMALGAM and 1 for GLAM. `'ACEIORT'` gets 31 points, 17 for CACCIATORE and 14 for EROTICA. The other lettersets represent one word each.
Let's make sure we haven't broken the `best_honeycomb` function:
```
assert best_honeycomb(mini) == Honeycomb('ACEIORT', 'A')
```
# Step 6: The Solution
Finally, the solution to the puzzle:
```
%time best = best_honeycomb(enable1)
best, game_score(best, enable1)
```
**Wow! 3898 is a high score!**
And it took less than 2 seconds of computation to find the best honeycomb!
# Step 7: Even Faster Algorithm: Branch and Bound
A run time of 2 seconds is pretty good! But what if the word list were 100 times bigger? What if a honeycomb had 12 letters around the outside, not just 6? We might still be looking for ideas to speed up the computation. I happen to have one.
Consider the word 'EQUIVOKE'. It is a pangram, but what with the 'Q' and 'V' and 'K', it is not a high-scoring honeycomb, regardless of what center is used:
```
{C: game_score(Honeycomb('EIKOQUV', C), enable1)
for C in 'EQUIVOKE'}
```
It would be great if we could eliminate all seven of these honeycombs at once, rather than trying each one in turn. So my idea is to:
- Keep track of the best honeycomb and best score found so far.
- For each new pangram letterset, ask "if we weren't required to use the center letter, would this letterset score higher than the best honeycomb so far?"
- If yes, then try it with all seven centers; if not then discard it immediately.
- This is called a [**branch and bound**](https://en.wikipedia.org/wiki/Branch_and_bound) algorithm: if an **upper bound** of the new letterset's score can't beat the best honeycomb so far, then we prune a whole **branch** of the search tree consisting of the seven honeycombs that have that letterset.
What would the score of a letterset be if we weren't required to use the center letter? It turns out I can make a dummy Honeycomb and specify the empty string for the center, `Honeycomb(letters, '')`, and call `game_score2` on that. This works because of a quirk of Python: we ask if `honeycomb.center in letters`; normally in Python the expression `x in y` means "is `x` a member of the collection `y`", but when `y` is a string it means "is `x` a substring of `y`", and the empty string is a substring of every string. (If I had represented a letterset as a Python `set`, this wouldn't work.)
Thus, I can rewrite `best_honeycomb` as follows:
```
def best_honeycomb2(words) -> Honeycomb:
"""Return a honeycomb with highest game score on these words."""
points_table = tabulate_points(words)
best, best_score = None, 0
pangrams = (s for s in points_table if len(s) == 7)
for p in pangrams:
if game_score2(Honeycomb(p, ''), points_table) > best_score:
for center in p:
honeycomb = Honeycomb(p, center)
score = game_score2(honeycomb, points_table)
if score > best_score:
best, best_score = honeycomb, score
return best
%time best_honeycomb2(enable1)
```
Same honeycomb for the answer, but four times faster—less than half a second.
# Step 8: Curiosity
I'm curious about a bunch of things.
### What's the highest-scoring individual word?
```
max(enable1, key=word_score)
```
### What are some of the pangrams?
```
pangrams[::500] # Every five-hundreth pangram
```
### What's the breakdown of reasons why words are invalid?
```
Counter('has S' if 'S' in w else
'< 4' if len(w) < 4 else
'> 7' if len(set(w)) > 7 else
'valid'
for w in valid_words(open('enable1.txt').read(), lambda w: True))
```
There are more than twice as many words with an 'S' as there are valid words.
### About the points table: How many different letter subsets are there?
```
pts = tabulate_points(enable1)
len(pts)
```
That means there's about two valid words for each letterset.
### Which letter subsets score the most?
```
pts.most_common(10)
```
The best honeycomb, `'AEGINRT`, is also the highest scoring letter subset on its own (although it only gets 832 of the 3,898 total points from using all seven letters).
### How many honeycombs does `best_honeycomb2` consider?
We know that `best_honeycomb` considers 7,986 × 7 = 55,902 honeycombs. How many does `best_honeycomb2` consider? We can answer that by wrapping `Honeycomb` with a decorator that counts calls:
```
def call_counter(fn):
"Return a function that calls fn, and increments a counter on each call."
def wrapped(*args, **kwds):
wrapped.call_counter += 1
return fn(*args, **kwds)
wrapped.call_counter = 0
return wrapped
Honeycomb = call_counter(Honeycomb)
best = best_honeycomb2(enable1)
Honeycomb.call_counter
```
Only 8,084 honeycombs are considered. That means that most pangrams are only considered once; for only 14 pangrams do we consider all seven centers.
```
(8084 - 7986) / 7
```
# Step 9: Fancy Report
I'd like to see the actual words that each honeycomb can make, in addition to the total score, and I'm curious about how the words are divided up by letterset. Here's a function to provide such a report. I remembered that there is a `fill` function in Python (it is in the `textwrap` module) but this turned out to be a lot more complicated than I expected. I guess it is difficult to create a practical extraction and reporting tool. I feel you, [Larry Wall](http://www.wall.org/~larry/).
```
from textwrap import fill
def report(honeycomb=None, words=enable1):
"""Print stats, words, and word scores for the given honeycomb (or the best
honeycomb if no honeycomb is given) over the given word list."""
bins = group_by(words, letterset)
adj = ("best " if honeycomb is None else "")
honeycomb = honeycomb or best_honeycomb(words)
points = game_score(honeycomb, words)
subsets = letter_subsets(honeycomb)
nwords = sum(len(bins[s]) for s in subsets)
print(f'The {adj}{honeycomb} scores {Ns(points, "point")} on {Ns(nwords, "word")}',
f'from a {len(words)} word list:\n')
for s in sorted(subsets, key=lambda s: (-len(s), s)):
if bins[s]:
pts = sum(word_score(w) for w in bins[s])
wcount = Ns(len(bins[s]), "pangram" if len(s) == 7 else "word")
intro = f'{s:>7} {Ns(pts, "point"):>10} {wcount:>8} '
words = [f'{w}:{word_score(w)}' for w in sorted(bins[s])]
print(fill(' '.join(words), width=110,
initial_indent=intro, subsequent_indent=' '*8))
def Ns(n, noun):
"""A string with `n` followed by the plural or singular of noun:
Ns(3, 'bear') => '3 bears'; Ns(1, 'world') => '1 world'"""
return f"{n:d} {noun}{' ' if n == 1 else 's'}"
def group_by(items, key):
"Group items into bins of a dict, each bin keyed by key(item)."
bins = defaultdict(list)
for item in items:
bins[key(item)].append(item)
return bins
report(hc, mini)
report()
```
# Step 10: What honeycombs have a high score without a lot of words?
Michael Braverman said he dislikes puzzles with a lot of low-scoring four-letter words. Can we find succint puzzles with lots of points but few words? With two objectives there won't be a single best answer to this question; rather we can ask: what honeycombs are there such that there are no other honeycombs with both more points and fewer words? We say such honeycombs are [**Pareto optimal**](https://en.wikipedia.org/wiki/Pareto_efficiency) and are on the **Pareto frontier**. We can find them as follows:
```
def pareto_honeycombs(words) -> list:
"""A table of {word_count: (points, honeycomb)} with highest scoring honeycomb."""
points_table = tabulate_points(words)
wcount_table = Counter(map(letterset, words))
honeycombs = (Honeycomb(letters, center)
for letters in points_table if len(letters) == 7
for center in letters)
# Build a table of {word_count: (points, honeycomb)}
table = defaultdict(lambda: (0, None))
for h in honeycombs:
points = game_score2(h, points_table)
wcount = game_score2(h, wcount_table)
table[wcount] = max(table[wcount], (points, h))
return pareto_frontier(table)
def pareto_frontier(table) -> list:
"""The pareto frontier that minimizes word counts while maximizing points.
Returns a list of (wcount, points, honeycomb, points/wcount) entries
such that there is no other entry that has fewer words and more points."""
return [(w, p, h, round(p/w, 2))
for w, (p, h) in sorted(table.items())
if not any(h2 != h and w2 <= w and p2 >= p
for w2, (p2, h2) in table.items())]
ph = pareto_honeycombs(enable1)
len(ph)
```
So there are 108 (out of 55,902) honeycombs on the Pareto frontier. We can see the first ten (sorted by word count), and every tenth one after that:
```
ph[:10] # (word count, points, honeycomb, points/wcount)
ph[10::10]
```
Let's see what the frontier looks like by plotting word counts versus points scored:
```
W, P, H, PPW = zip(*ph)
def plot(xlabel, X, ylabel, Y):
plt.plot(X, Y, '.'); plt.xlabel(xlabel); plt.ylabel(ylabel); plt.grid(True)
plot('Word count', W, 'Points', P, )
```
That's somewhat surprising; usually a Pareto frontier looks like a quarter-circle; here it looks like an almost straight line. Maybe we can get a better view by plotting word counts versus the number of points per word:
```
plot('Word count', W, 'Points per word', PPW)
```
We can see all the Pareto optimal honeycombs that score more than, say, 7.6 points per word:
```
[entry for entry in ph if entry[-1] > 7.6]
```
The last few honeycombs on the right-hand side all rise above the average points/word. We can see that they are all variants of the highest-scoring honeycomb, but with different centers:
```
ph[-5:]
```
Here are reports on what I think are the most interesting low-word-count, higher-score honeycombs. I would have scored zero on the first one, and probably not much better on the second.
```
report(Honeycomb('CEGIPTX', 'G'))
report(Honeycomb('DEIORXZ', 'X'))
```
The following I think are decent puzzles:
```
report(Honeycomb('ACINOTV', 'V'))
report(Honeycomb('ACINOTU', 'U'))
```
# Step 11: S Words
What if we allowed honeycombs and words to have an 'S' in them?
```
enable1s = valid_words(open('enable1.txt').read(),
lambda w: len(w) >= 4 and len(set(w)) <= 7)
len(enable1s), len(enable1)
```
Allowing 'S' more than doubles the number of words. Will it double the score of the best honeycomb?
```
report(words=enable1s)
```
Yes it does (roughly) double the score!
# Summary
This notebook showed how to find the highest-scoring honeycomb. Thanks to a series of ideas, we were able to achieve a substantial reduction in the number of honeycombs that need to be examined (a factor of 400), the run time needed for `game_score` (a factor of about 200), and the overall run time (a factor of about 70,000).
- **Brute Force Enumeration** (3,364,900 honeycombs; 10 hours (estimate) run time)<br>Try every possible honeycomb.
- **Pangram Lettersets** (55,902 honeycombs; 10 minutes (estimate) run time)<br>Try just the honeycombs that are pangram lettersets (with every center).
- **Points Table** (55,902 honeycombs; under 2 seconds run time)<br>Precompute the score for each letterset, and sum the 64 letter subsets of each honeycomb.
- **Branch and Bound** (8,084 honeycombs; under 1/2 second run time)<br>Try every center only for lettersets that score better than the best score so far.
Here are pictures for the highest-scoring honeycombs, with and without an S:
<img src="http://norvig.com/honeycombs.png" width="350">
<center>
537 words; 3,898 points 1,179 words; 8,681 points
<br>
</center>
| github_jupyter |
# Marginal Gaussianization
* Author: J. Emmanuel Johnson
* Email: jemanjohnson34@gmail.com
In this demonstration, we will show how we can do the marginal Gaussianization on a 2D dataset using the Histogram transformation and Inverse CDF Gaussian distribution.
```
import os, sys
cwd = os.getcwd()
# sys.path.insert(0, f"{cwd}/../")
sys.path.insert(0, "/home/emmanuel/code/rbig")
from rbig.data import ToyData
from rbig.transform.gaussianization import MarginalGaussianization
# from rbig.transform.gaussianization import HistogramGaussianization, KDEGaussianization
from rbig.transform import InverseGaussCDF
import numpy as np
from scipy import stats
# Plot Functions
import matplotlib.pyplot as plt
import seaborn as sns
sns.reset_defaults()
#sns.set_style('whitegrid')
#sns.set_context('talk')
sns.set_context(context='talk',font_scale=0.7)
%matplotlib inline
%load_ext autoreload
%autoreload 2
```
## Data
For this example, we are looking at a 2D dataset.
```
def plot_2d_joint(data, color='blue', title='Original Data'):
fig = plt.figure(figsize=(5, 5))
g = sns.jointplot(x=data[:, 0], y=data[:, 1], kind='hex', color=color)
plt.xlabel('X')
plt.ylabel('Y')
plt.suptitle(title)
plt.tight_layout()
plt.show()
def plot_prob(data, probs, title='Probabilities'):
fig, ax = plt.subplots()
h = ax.scatter(data[:, 0], data[:, 1], s=1, c=probs, cmap='Reds')
ax.set_xlabel('X')
ax.set_ylabel('Y')
cbar = plt.colorbar(h, )
ax.set_title(title)
plt.show()
seed = 123
rng = np.random.RandomState(seed=seed)
dataset = 'rbig'
n_samples = 10_000
n_features = 2
noise = 0.25
random_state=1
clusters = 2
data = ToyData(
dataset=dataset,
n_samples=n_samples,
n_features=n_features,
noise=noise,
random_state=random_state,
clusters=clusters,
).generate_samples()
X = data[:, 0]
Y = data[:, 1]
plot_2d_joint(data, title='Original Data')
```
## Uniformization Transformation
```
from rbig.transform.uniformization import HistogramUniformization, KDEUniformization, MarginalUniformization
# from rbig.density.histogram import ScipyHistogram, QuantileHistogram
# from rbig.den
```
#### Initialize Uniformization Algorithm
```
# INITIALIZE UNIFORMIZATION ALGORITHM
#===
# uniform_clf = HistogramUniformization(bins=100, support_extension=10, alpha=1e-4, n_quantiles=None)
uniform_clf = KDEUniformization(n_quantiles=50, method='fft')
# density_clf = KDEScipy(n_quantiles=50, bw_method='scott', support_extension=10)
# density_clf = KDESklearn(n_quantiles=100, support_extension=10)
```
#### Add it to Marginal Transformation Algorithm
```
mg_uniformizer = MarginalUniformization(uniform_clf)
mg_uniformizer.fit(data)
X_trans = mg_uniformizer.transform(data)
plot_2d_joint(X_trans, title='Transformed Data')
data_approx = mg_uniformizer.inverse_transform(X_trans)
plot_2d_joint(data_approx, title='Transformed Data')
X_ldj = mg_uniformizer.log_abs_det_jacobian(data)
plot_2d_joint(X_ldj, title='Transformed Data')
plot_2d_joint(np.exp(X_ldj), title='Transformed Data')
plot_prob(data, X_ldj.sum(-1), title='Log Probabilities')
plot_prob(data, np.exp(X_ldj.sum(-1)), title='Probabilities')
```
## Marginal Gaussinization
```
from rbig.transform.uniformization import HistogramUniformization, KDEUniformization, MarginalUniformization
from rbig.transform.gaussianization import MarginalGaussianization
uniform_clf = HistogramUniformization(bins=100, support_extension=10, alpha=1e-4, n_quantiles=None)
uniform_clf = KDEUniformization(n_quantiles=50, method='fft', )
mg_gaussianizer = MarginalGaussianization(uniform_clf)
mg_gaussianizer.fit(data)
X_trans = mg_gaussianizer.transform(data)
plot_2d_joint(X_trans, title='Transformed Data')
data_approx = mg_gaussianizer.inverse_transform(X_trans)
plot_2d_joint(data_approx, title='Transformed Data')
X_ldj = mg_gaussianizer.log_abs_det_jacobian(data)
plot_2d_joint(X_ldj, title='Transformed Data')
plot_2d_joint(np.exp(X_ldj), title='Transformed Data')
X_lprob = mg_gaussianizer.score_samples(data)
plot_prob(data, X_lprob, title='Log Probabilities')
plot_prob(data, np.exp(X_lprob), title='Probabilities')
```
### Negative Log Likelihood
```
X_nll = mg_gaussianizer.score(data,)
print(f"Negative Log-Likelihood Score: {X_nll:.4f}")
```
## Marginal Histogram Transformation
So, for this transformation, we are going to transform our data from the current distribution to a marginally Gaussian distribution and then perform a rotation. In theory, if we do enough of these, we will eventually convert to a Gaussian distribution.
```
# parameters
nbins = 1_000 # number of bins to do the histogram transform
alpha = 1e-05 # adds some regularization (noise)
support_extension = 10
# initialize the transformer
mg_transformer = HistogramGaussianization(
nbins=nbins,
alpha=alpha
)
# fit the transformer to the data
mg_transformer.fit(data);
```
### 1. Forward Transformation
For this transformation, we will be applying the following:
$$\Psi(\mathbf{x}) = \Phi^{-1}(\mathbf{x})$$
where $\Phi^{-1}(\cdot)$ is the inverse CDF of the Gaussian distribution.
```
data_trans = mg_transformer.transform(data)
plot_2d_joint(data_trans, title='Transformed Data')
```
So clearly we can see that the transformation works. Both of the marginals are Gaussian distributed..
### 2. Inverse Transformation
For this step, we will apply the inverse transformation:
$$\Psi^{-1}(\mathbf{x}) = \Phi \left( \mathbf{x} \right)$$
where $\Phi(\cdot)$ is the CDF of the Gaussian distribution.
```
data_approx = mg_transformer.inverse_transform(data_trans)
# check that its more or less equal
np.testing.assert_array_almost_equal(data_approx, data, decimal=1e-5)
```
We see that this transformation is very close to the original. In fact, it's close to approximately 1e-5 decimal places. The errors will definitely stem from the boundaries.
```
# Plot results
plot_2d_joint(data_approx, title='Inverse Transformed Data')
```
## Log Absolute Determinant Jacobian
Using the derivative of inverse-functions theorem, we can calculate the derivative like so:
$$\nabla_\mathbf{x} \Phi^{-1}(\mathbf{x}) = \frac{1}{\phi (\Phi^{-1} (x)) }$$
where $\phi(\cdot)$ is the PDF of the Gaussian distribution. Taking the log of these terms gives us:
$$ \log \nabla_\mathbf{x} \Phi^{-1}(\mathbf{x}) = - \log \phi (\Phi^{-1} (x))$$
```
X_slogdet = mg_transformer.log_abs_det_jacobian(data)
print(X_slogdet.min(), X_slogdet.max())
print(np.exp(X_slogdet).min(), np.exp(X_slogdet).max())
# plot the gradients
plot_2d_joint(np.exp(X_slogdet), title='Jacobian Data')
```
## Log Probability
$$\log p_\theta(\mathbf{x}) = \log p_\theta \left( \mathbf{z} \right) + \log \left| \nabla_\mathbf{x} \mathbf{z} \right|$$
where $\mathbf{z} = \Psi(\mathbf{x})$
```
# score samples
log_prob = mg_transformer.score_samples(data)
# score samples
log_prob = mg_transformer.score_samples(data)
plot_prob(data, log_prob, title='Log Probabilities')
```
## Probability
This is the same as above but without the log scale:
$$p_\theta(\mathbf{x}) = p_\theta \left( \mathbf{z} \right) \left| \nabla_\mathbf{x} \mathbf{z} \right|$$
where $\mathbf{z} = \Psi(\mathbf{x})$
```
plot_prob(data, np.exp(log_prob), title='Probabilities')
```
## Negative Log-Likelihood
We need to take the expected value (mean) of all log probabilities.
$$\text{nll} = \frac{1}{N} \sum_{n=1}^{N} \log p_\theta(\mathbf{x})$$
```
score = mg_transformer.score(data)
print(f"Negative Log-Likelihood Score: {score:.4f}")
```
| github_jupyter |
```
!pip install -q --upgrade jax jaxlib
from __future__ import print_function, division
import jax.numpy as np
from jax import grad, jit, vmap
from jax import random
key = random.PRNGKey(0)
```
# The Autodiff Cookbook
*alexbw@, mattjj@*
JAX has a pretty general automatic differentiation system. In this notebook, we'll go through a whole bunch of neat autodiff ideas that you can cherry pick for your own work, starting with the basics.
## Gradients
### Starting with `grad`
You can differentiate a function with `grad`:
```
grad_tanh = grad(np.tanh)
print(grad_tanh(2.0))
```
`grad` takes a function and returns a function. If you have a Python function `f` that evaluates the mathematical function $f$, then `grad(f)` is a Python function that evaluates the mathematical function $\nabla f$. That means `grad(f)(x)` represents the value $\nabla f(x)$.
Since `grad` operates on functions, you can apply it to its own output to differentiate as many times as you like:
```
print(grad(grad(np.tanh))(2.0))
print(grad(grad(grad(np.tanh)))(2.0))
```
Let's look at computing gradients with `grad` in a linear logistic regression model. First, the setup:
```
def sigmoid(x):
return 0.5 * (np.tanh(x / 2) + 1)
# Outputs probability of a label being true.
def predict(W, b, inputs):
return sigmoid(np.dot(inputs, W) + b)
# Build a toy dataset.
inputs = np.array([[0.52, 1.12, 0.77],
[0.88, -1.08, 0.15],
[0.52, 0.06, -1.30],
[0.74, -2.49, 1.39]])
targets = np.array([True, True, False, True])
# Training loss is the negative log-likelihood of the training examples.
def loss(W, b):
preds = predict(W, b, inputs)
label_probs = preds * targets + (1 - preds) * (1 - targets)
return -np.sum(np.log(label_probs))
# Initialize random model coefficients
key, W_key, b_key = random.split(key, 3)
W = random.normal(W_key, (3,))
b = random.normal(b_key, ())
```
Use the `grad` function with its `argnums` argument to differentiate a function with respect to positional arguments.
```
# Differentiate `loss` with respect to the first positional argument:
W_grad = grad(loss, argnums=0)(W, b)
print('W_grad', W_grad)
# Since argnums=0 is the default, this does the same thing:
W_grad = grad(loss)(W, b)
print('W_grad', W_grad)
# But we can choose different values too, and drop the keyword:
b_grad = grad(loss, 1)(W, b)
print('b_grad', b_grad)
# Including tuple values
W_grad, b_grad = grad(loss, (0, 1))(W, b)
print('W_grad', W_grad)
print('b_grad', b_grad)
```
This `grad` API has a direct correspondence to the excellent notation in Spivak's classic *Calculus on Manifolds* (1965), also used in Sussman and Wisdom's [*Structure and Interpretation of Classical Mechanics*](http://mitpress.mit.edu/sites/default/files/titles/content/sicm_edition_2/book.html) (2015) and their [*Functional Differential Geometry*](https://mitpress.mit.edu/books/functional-differential-geometry) (2013). Both books are open-access. See in particular the "Prologue" section of *Functional Differential Geometry* for a defense of this notation.
Essentially, when using the `argnums` argument, if `f` is a Python function for evaluating the mathematical function $f$, then the Python expression `grad(f, i)` evaluates to a Python function for evaluating $\partial_i f$.
### Differentiating with respect to nested lists, tuples, and dicts
Differentiating with respect to standard Python containers just works, so use tuples, lists, and dicts (and arbitrary nesting) however you like.
```
def loss2(params_dict):
preds = predict(params_dict['W'], params_dict['b'], inputs)
label_probs = preds * targets + (1 - preds) * (1 - targets)
return -np.sum(np.log(label_probs))
print(grad(loss2)({'W': W, 'b': b}))
```
You can [register your own container types](https://github.com/google/jax/issues/446#issuecomment-467105048) to work with not just `grad` but all the JAX transformations (`jit`, `vmap`, etc.).
### Evaluate a function and its gradient using `value_and_grad`
Another convenient function is `value_and_grad` for efficiently computing both a function's value as well as its gradient's value:
```
from jax import value_and_grad
loss_value, Wb_grad = value_and_grad(loss, (0, 1))(W, b)
print('loss value', loss_value)
print('loss value', loss(W, b))
```
### Checking against numerical differences
A great thing about derivatives is that they're straightforward to check with finite differences:
```
# Set a step size for finite differences calculations
eps = 1e-4
# Check b_grad with scalar finite differences
b_grad_numerical = (loss(W, b + eps / 2.) - loss(W, b - eps / 2.)) / eps
print('b_grad_numerical', b_grad_numerical)
print('b_grad_autodiff', grad(loss, 1)(W, b))
# Check W_grad with finite differences in a random direction
key, subkey = random.split(key)
vec = random.normal(subkey, W.shape)
unitvec = vec / np.sqrt(np.vdot(vec, vec))
W_grad_numerical = (loss(W + eps / 2. * unitvec, b) - loss(W - eps / 2. * unitvec, b)) / eps
print('W_dirderiv_numerical', W_grad_numerical)
print('W_dirderiv_autodiff', np.vdot(grad(loss)(W, b), unitvec))
```
JAX provides a simple convenience function that does essentially the same thing, but checks up to any order of differentiation that you like:
```
from jax.test_util import check_grads
check_grads(loss, (W, b), order=2) # check up to 2nd order derivatives
```
### Hessian-vector products with `grad`-of-`grad`
One thing we can do with higher-order `grad` is build a Hessian-vector product function. (Later on we'll write an even more efficient implementation that mixes both forward- and reverse-mode, but this one will use pure reverse-mode.)
A Hessian-vector product function can be useful in a [truncated Newton Conjugate-Gradient algorithm](https://en.wikipedia.org/wiki/Truncated_Newton_method) for minimizing smooth convex functions, or for studying the curvature of neural network training objectives (e.g. [1](https://arxiv.org/abs/1406.2572), [2](https://arxiv.org/abs/1811.07062), [3](https://arxiv.org/abs/1706.04454), [4](https://arxiv.org/abs/1802.03451)).
For a scalar-valued function $f : \mathbb{R}^n \to \mathbb{R}$, the Hessian at a point $x \in \mathbb{R}^n$ is written as $\partial^2 f(x)$. A Hessian-vector product function is then able to evaluate
$\qquad v \mapsto \partial^2 f(x) \cdot v$
for any $v \in \mathbb{R}^n$.
The trick is not to instantiate the full Hessian matrix: if $n$ is large, perhaps in the millions or billions in the context of neural networks, then that might be impossible to store.
Luckily, `grad` already gives us a way to write an efficient Hessian-vector product function. We just have to use the identity
$\qquad \partial^2 f (x) v = \partial [x \mapsto \partial f(x) \cdot v] = \partial g(x)$,
where $g(x) = \partial f(x) \cdot v$ is a new scalar-valued function that dots the gradient of $f$ at $x$ with the vector $v$. Nottice that we're only ever differentiating scalar-valued functions of vector-valued arguments, which is exactly where we know `grad` is efficient.
In JAX code, we can just write this:
```
def hvp(f, x, v):
return grad(lambda x: np.vdot(grad(f)(x), v))
```
This example shows that you can freely use lexical closure, and JAX will never get perturbed or confused.
We'll check this implementation a few cells down, once we see how to compute dense Hessian matrices. We'll also write an even better version that uses both forward-mode and reverse-mode.
## Jacobians and Hessians using `jacfwd` and `jacrev`
You can compute full Jacobian matrices using the `jacfwd` and `jacrev` functions:
```
from jax import jacfwd, jacrev
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
J = jacfwd(f)(W)
print("jacfwd result, with shape", J.shape)
print(J)
J = jacrev(f)(W)
print("jacrev result, with shape", J.shape)
print(J)
```
These two functions compute the same values (up to machine numerics), but differ in their implementation: `jacfwd` uses forward-mode automatic differentiation, which is more efficient for "tall" Jacobian matrices, while `jacrev` uses reverse-mode, which is more efficient for "wide" Jacobian matrices. For matrices that are near-square, `jacfwd` probably has an edge over `jacrev`.
You can also use `jacfwd` and `jacrev` with container types:
```
def predict_dict(params, inputs):
return predict(params['W'], params['b'], inputs)
J_dict = jacrev(predict_dict)({'W': W, 'b': b}, inputs)
for k, v in J_dict.items():
print("Jacobian from {} to logits is".format(k))
print(v)
```
For more details on forward- and reverse-mode, as well as how to implement `jacfwd` and `jacrev` as efficiently as possible, read on!
Using a composition of two of these functions gives us a way to compute dense Hessian matrices:
```
def hessian(f):
return jacfwd(jacrev(f))
H = hessian(f)(W)
print("hessian, with shape", H.shape)
print(H)
```
This shape makes sense: if we start with a function $f : \mathbb{R}^n \to \mathbb{R}^m$, then at a point $x \in \mathbb{R}^n$ we expect to get the shapes
* $f(x) \in \mathbb{R}^m$, the value of $f$ at $x$,
* $\partial f(x) \in \mathbb{R}^{m \times n}$, the Jacobian matrix at $x$,
* $\partial^2 f(x) \in \mathbb{R}^{m \times n \times n}$, the Hessian at $x$,
and so on.
To implement `hessian`, we could have used `jacrev(jacrev(f))` or `jacrev(jacfwd(f))` or any other composition of the two. But forward-over-reverse is typically the most efficient. That's because in the inner Jacobian computation we're often differentiating a function wide Jacobian (maybe like a loss function $f : \mathbb{R}^n \to \mathbb{R}$), while in the outer Jacobian computation we're differentiating a function with a square Jacobian (since $\nabla f : \mathbb{R}^n \to \mathbb{R}^n$), which is where forward-mode wins out.
## How it's made: two foundational autodiff functions
### Jacobian-Vector products (JVPs, aka forward-mode autodiff)
JAX includes efficient and general implementations of both forward- and reverse-mode automatic differentiation. The familiar `grad` function is built on reverse-mode, but to explain the difference in the two modes, and when each can be useful, we need a bit of math background.
#### JVPs in math
Mathematically, given a function $f : \mathbb{R}^n \to \mathbb{R}^m$, the Jacobian matrix of $f$ evaluated at an input point $x \in \mathbb{R}^n$, denoted $\partial f(x)$, is often thought of as a matrix in $\mathbb{R}^m \times \mathbb{R}^n$:
$\qquad \partial f(x) \in \mathbb{R}^{m \times n}$.
But we can also think of $\partial f(x)$ as a linear map, which maps the tangent space of the domain of $f$ at the point $x$ (which is just another copy of $\mathbb{R}^n$) to the tangent space of the codomain of $f$ at the point $f(x)$ (a copy of $\mathbb{R}^m$):
$\qquad \partial f(x) : \mathbb{R}^n \to \mathbb{R}^m$.
This map is called the [pushforward map](https://en.wikipedia.org/wiki/Pushforward_(differential)) of $f$ at $x$. The Jacobian matrix is just the matrix for this linear map in a standard basis.
If we don't commit to one specific input point $x$, then we can think of the function $\partial f$ as first taking an input point and returning the Jacobian linear map at that input point:
$\qquad \partial f : \mathbb{R}^n \to \mathbb{R}^n \to \mathbb{R}^m$.
In particular, we can uncurry things so that given input point $x \in \mathbb{R}^n$ and a tangent vector $v \in \mathbb{R}^n$, we get back an output tangent vector in $\mathbb{R}^m$. We call that mapping, from $(x, v)$ pairs to output tangent vectors, the *Jacobian-vector product*, and write it as
$\qquad (x, v) \mapsto \partial f(x) v$
#### JVPs in JAX code
Back in Python code, JAX's `jvp` function models this transformation. Given a Python function that evaluates $f$, JAX's `jvp` is a way to get a Python function for evaluating $(x, v) \mapsto (f(x), \partial f(x) v)$.
```
from jax import jvp
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
key, subkey = random.split(key)
v = random.normal(subkey, W.shape)
# Push forward the vector `v` along `f` evaluated at `W`
y, u = jvp(f, (W,), (v,))
```
In terms of Haskell-like type signatures, we could write
```haskell
jvp :: (a -> b) -> a -> T a -> (b, T b)
```
where we use `T a` to denote the type of the tangent space for `a`. In words, `jvp` takes as arguments a function of type `a -> b`, a value of type `a`, and a tangent vector value of type `T a`. It gives back a pair consisting of a value of type `b` and an output tangent vector of type `T b`.
The `jvp`-transformed function is evaluated much like the original function, but paired up with each primal value of type `a` it pushes along tangent values of type `T a`. For each primitive numerical operation that the original function would have applied, the `jvp`-transformed function executes a "JVP rule" for that primitive that both evaluates the primitive on the primals and applies the primitive's JVP at those primal values.
That evaluation strategy has some immediate implications about computational complexity: since we evaluate JVPs as we go, we don't need to store anything for later, and so the memory cost is independent of the depth of the computation. In addition, the FLOP cost of the `jvp`-transformed function is about 2x the cost of just evaluating the function. Put another way, for a fixed primal point $x$, we can evaluate $v \mapsto \partial f(x) \cdot v$ for about the same cost as evaluating $f$.
That memory complexity sounds pretty compelling! So why don't we see forward-mode very often in machine learning?
To answer that, first think about how you could use a JVP to build a full Jacobian matrix. If we apply a JVP to a one-hot tangent vector, it reveals one column of the Jacobian matrix, corresponding to the nonzero entry we fed in. So we can build a full Jacobian one column at a time, and to get each column costs about the same as one function evaluation. That will be efficient for functions with "tall" Jacobians, but inefficient for "wide" Jacobians.
If you're doing gradient-based optimization in machine learning, you probably want to minimize a loss function from parameters in $\mathbb{R}^n$ to a scalar loss value in $\mathbb{R}$. That means the Jacobian of this function is a very wide matrix: $\partial f(x) \in \mathbb{R}^{1 \times n}$, which we often identify with the Gradient vector $\nabla f(x) \in \mathbb{R}^n$. Building that matrix one column at a time, with each call taking a similar number of FLOPs to evaluating the original function, sure seems inefficient! In particular, for training neural networks, where $f$ is a training loss function and $n$ can be in the millions or billions, this approach just won't scale.
To do better for functions like this, we just need to use reverse-mode.
### Vector-Jacobian products (VJPs, aka reverse-mode autodiff)
Where forward-mode gives us back a function for evaluating Jacobian-vector products, which we can then use to build Jacobian matrices one column at a time, reverse-mode is a way to get back a function for evaluating vector-Jacobian products (equivalently Jacobian-transpose-vector products), which we can use to build Jacobian matrices one row at a time.
#### VJPs in math
Let's again consider a function $f : \mathbb{R}^n \to \mathbb{R}^m$.
Starting from our notation for JVPs, the notation for VJPs is pretty simple:
$\qquad (x, v) \mapsto v \partial f(x)$,
where $v$ is an element of the cotangent space of $f$ at $x$ (isomorphic to another copy of $\mathbb{R}^m$). When being rigorous, we should think of $v$ as a linear map $v : \mathbb{R}^m \to \mathbb{R}$, and when we write $v \partial f(x)$ we mean function composition $v \circ \partial f(x)$, where the types work out because $\partial f(x) : \mathbb{R}^n \to \mathbb{R}^m$. But in the common case we can identify $v$ with a vector in $\mathbb{R}^m$ and use the two almost interchageably, just like we might sometimes flip between "column vectors" and "row vectors" without much comment.
With that identification, we can alternatively think of the linear part of a VJP as the transpose (or adjoint conjugate) of the linear part of a JVP:
$\qquad (x, v) \mapsto \partial f(x)^\mathsf{T} v$.
For a given point $x$, we can write the signature as
$\qquad \partial f(x)^\mathsf{T} : \mathbb{R}^m \to \mathbb{R}^n$.
The corresponding map on cotangent spaces is often called the [pullback](https://en.wikipedia.org/wiki/Pullback_(differential_geometry))
of $f$ at $x$. The key for our purposes is that it goes from something that looks like the output of $f$ to something that looks like the input of $f$, just like we might expect from a transposed linear function.
#### VJPs in JAX code
Switching from math back to Python, the JAX function `vjp` can take a Python function for evaluating $f$ and give us back a Python function for evaluating the VJP $(x, v) \mapsto (f(x), v^\mathsf{T} \partial f(x))$.
```
from jax import vjp
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
y, vjp_fun = vjp(f, W)
key, subkey = random.split(key)
u = random.normal(subkey, y.shape)
# Pull back the covector `u` along `f` evaluated at `W`
v = vjp_fun(u)
```
In terms of Haskell-like type signatures, we could write
```haskell
vjp :: (a -> b) -> a -> (b, CT b -> CT a)
```
where we use `CT a` to denote the type for the cotangent space for `a`. In words, `vjp` takes as arguments a function of type `a -> b` and a point of type `a`, and gives back a pair consisting of a value of type `b` and a linear map of type `CT b -> CT a`.
This is great because it lets us build Jacobian matrices one row at a time, and the FLOP cost for evaluating $(x, v) \mapsto (f(x), v^\mathsf{T} \partial f(x))$ is only about twice the cost of evaluating $f$. In particular, if we want the gradient of a function $f : \mathbb{R}^n \to \mathbb{R}$, we can do it in just one call. That's how `grad` is efficient for gradient-based optimization, even for objectives like neural network training loss functions on millions or billions of parameters.
There's a cost, though: though the FLOPs are friendly, memory scales with the depth of the computation. Also, the implementation is traditionally more complex than that of forward-mode, though JAX has some tricks up its sleeve (that's a story for a future notebook!).
For more on how reverse-mode works, see [this tutorial video from the Deep Learning Summer School in 2017](http://videolectures.net/deeplearning2017_johnson_automatic_differentiation/).
## Hessian-vector products using both forward- and reverse-mode
In a previous section, we implemented a Hessian-vector product function just using reverse-mode:
```
def hvp(f, x, v):
return grad(lambda x: np.vdot(grad(f)(x), v))
```
That's efficient, but we can do even better and save some memory by using forward-mode together with reverse-mode.
Mathematically, given a function $f : \mathbb{R}^n \to \mathbb{R}$ to differentiate, a point $x \in \mathbb{R}^n$ at which to linearize the function, and a vector $v \in \mathbb{R}^n$, the Hessian-vector product function we want is
$(x, v) \mapsto \partial^2 f(x) v$
Consider the helper function $g : \mathbb{R}^n \to \mathbb{R}^n$ defined to be the derivative (or gradient) of $f$, namely $g(x) = \partial f(x)$. All we need is its JVP, since that will give us
$(x, v) \mapsto \partial g(x) v = \partial^2 f(x) v$.
We can translate that almost directly into code:
```
from jax import jvp, grad
# forward-over-reverse
def hvp(f, primals, tangents):
return jvp(grad(f), primals, tangents)[1]
```
Even better, since we didn't have to call `np.dot` directly, this `hvp` function works with arrays of any shape and with arbitrary container types (like vectors stored as nested lists/dicts/tuples), and doesn't even have a dependence on `jax.numpy`.
Here's an example of how to use it:
```
def f(X):
return np.sum(np.tanh(X)**2)
key, subkey1, subkey2 = random.split(key, 3)
X = random.normal(subkey1, (30, 40))
V = random.normal(subkey2, (30, 40))
ans1 = hvp(f, (X,), (V,))
ans2 = np.tensordot(hessian(f)(X), V, 2)
print(np.allclose(ans1, ans2, 1e-4, 1e-4))
```
Another way you might consider writing this is using reverse-over-forward:
```
# reverse-over-forward
def hvp_revfwd(f, primals, tangents):
g = lambda primals: jvp(f, primals, tangents)[1]
return grad(g)(primals)
```
That's not quite as good, though, because forward-mode has less overhead than reverse-mode, and since the outer differentiation operator here has to differentiate a larger computation than the inner one, keeping forward-mode on the outside works best:
```
# reverse-over-reverse, only works for single arguments
def hvp_revrev(f, primals, tangents):
x, = primals
v, = tangents
return grad(lambda x: np.vdot(grad(f)(x), v))(x)
print("Forward over reverse")
%timeit -n10 -r3 hvp(f, (X,), (V,))
print("Reverse over forward")
%timeit -n10 -r3 hvp_revfwd(f, (X,), (V,))
print("Reverse over reverse")
%timeit -n10 -r3 hvp_revrev(f, (X,), (V,))
print("Naive full Hessian materialization")
%timeit -n10 -r3 np.tensordot(hessian(f)(X), V, 2)
```
## Composing VJPs, JVPs, and `vmap`
### Jacobian-Matrix and Matrix-Jacobian products
Now that we have `jvp` and `vjp` transformations that give us functions to push-forward or pull-back single vectors at a time, we can use JAX's [`vmap` transformation](https://github.com/google/jax#auto-vectorization-with-vmap) to push and pull entire bases at once. In particular, we can use that to write fast matrix-Jacobian and Jacobian-matrix products.
```
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
# Pull back the covectors `m_i` along `f`, evaluated at `W`, for all `i`.
# First, use a list comprehension to loop over rows in the matrix M.
def loop_mjp(f, x, M):
y, vjp_fun = vjp(f, x)
return np.vstack([vjp_fun(mi) for mi in M])
# Now, use vmap to build a computation that does a single fast matrix-matrix
# multiply, rather than an outer loop over vector-matrix multiplies.
def vmap_mjp(f, x, M):
y, vjp_fun = vjp(f, x)
return vmap(vjp_fun)(M)
key = random.PRNGKey(0)
num_covecs = 128
U = random.normal(key, (num_covecs,) + y.shape)
loop_vs = loop_mjp(f, W, M=U)
print('Non-vmapped Matrix-Jacobian product')
%timeit -n10 -r3 loop_mjp(f, W, M=U)
print('\nVmapped Matrix-Jacobian product')
vmap_vs = vmap_mjp(f, W, M=U)
%timeit -n10 -r3 vmap_mjp(f, W, M=U)
assert np.allclose(loop_vs, vmap_vs), 'Vmap and non-vmapped Matrix-Jacobian Products should be identical'
def loop_jmp(f, x, M):
# jvp immediately returns the primal and tangent values as a tuple,
# so we'll compute and select the tangents in a list comprehension
return np.vstack([jvp(f, (W,), (si,))[1] for si in S])
def vmap_jmp(f, x, M):
_jvp = lambda s: jvp(f, (W,), (s,))[1]
return vmap(_jvp)(M)
num_vecs = 128
S = random.normal(key, (num_vecs,) + W.shape)
loop_vs = loop_jmp(f, W, M=S)
print('Non-vmapped Jacobian-Matrix product')
%timeit -n10 -r3 loop_jmp(f, W, M=S)
vmap_vs = vmap_jmp(f, W, M=S)
print('\nVmapped Jacobian-Matrix product')
%timeit -n10 -r3 vmap_jmp(f, W, M=S)
assert np.allclose(loop_vs, vmap_vs), 'Vmap and non-vmapped Jacobian-Matrix products should be identical'
```
### The implementation of `jacfwd` and `jacrev`
Now that we've seen fast Jacobian-matrix and matrix-Jacobian products, it's not hard to guess how to write `jacfwd` and `jacrev`. We just use the same technique to push-forward or pull-back an entire standard basis (isomorphic to an identity matrix) at once.
```
from jax import jacrev as builtin_jacrev
def our_jacrev(f):
def jacfun(x):
y, vjp_fun = vjp(f, x)
# Use vmap to do a matrix-Jacobian product.
# Here, the matrix is the Euclidean basis, so we get all
# entries in the Jacobian at once.
J, = vmap(vjp_fun, in_axes=0)(np.eye(len(y)))
return J
return jacfun
assert np.allclose(builtin_jacrev(f)(W), our_jacrev(f)(W)), 'Incorrect reverse-mode Jacobian results!'
from jax import jacfwd as builtin_jacfwd
def our_jacfwd(f):
def jacfun(x):
_jvp = lambda s: jvp(f, (x,), (s,))[1]
Jt =vmap(_jvp, in_axes=1)(np.eye(len(x)))
return np.transpose(Jt)
return jacfun
assert np.allclose(builtin_jacfwd(f)(W), our_jacfwd(f)(W)), 'Incorrect forward-mode Jacobian results!'
```
Interestingly, [Autograd](https://github.com/hips/autograd) couldn't do this. Our [implementation of reverse-mode `jacobian` in Autograd](https://github.com/HIPS/autograd/blob/96a03f44da43cd7044c61ac945c483955deba957/autograd/differential_operators.py#L60) had to pull back one vector at a time with an outer-loop `map`. Pushing one vector at a time through the computation is much less efficient than batching it all together with `vmap`.
Another thing that Autograd couldn't do is `jit`. Interestingly, no matter how much Python dynamism you use in your function to be differentiated, we could always use `jit` on the linear part of the computation. For example:
```
def f(x):
try:
if x < 3:
return 2 * x ** 3
else:
raise ValueError
except ValueError:
return np.pi * x
y, f_vjp = vjp(f, 4.)
print(jit(f_vjp)(1.))
```
## Complex numbers and differentiation
JAX is great at complex numbers and differentiation. To support both [holomorphic and non-holomorphic differentiation](https://en.wikipedia.org/wiki/Holomorphic_function), JAX follows [Autograd's convention](https://github.com/HIPS/autograd/blob/master/docs/tutorial.md#complex-numbers) for encoding complex derivatives.
Consider a complex-to-complex function $f: \mathbb{C} \to \mathbb{C}$ that we break down into its component real-to-real functions:
```
def f(z):
x, y = real(z), imag(z)
return u(x, y), v(x, y) * 1j
```
That is, we've decomposed $f(z) = u(x, y) + v(x, y) i$ where $z = x + y i$. We define `grad(f)` to correspond to
```
def grad_f(z):
x, y = real(z), imag(z)
return grad(u, 0)(x, y) + grad(u, 1)(x, y) * 1j
```
In math symbols, that means we define $\partial f(z) \triangleq \partial_0 u(x, y) + \partial_1 u(x, y)$. So we throw out $v$, ignoring the complex component function of $f$ entirely!
This convention covers three important cases:
1. If `f` evaluates a holomorphic function, then we get the usual complex derivative, since $\partial_0 u = \partial_1 v$ and $\partial_1 u = - \partial_0 v$.
2. If `f` is evaluates the real-valued loss function of a complex parameter `x`, then we get a result that we can use in gradient-based optimization by taking steps in the direction of the conjugate of `grad(f)(x)`.
3. If `f` evaluates a real-to-real function, but its implementation uses complex primitives internally (some of which must be non-holomorphic, e.g. FFTs used in convolutions) then we get the same result that an implementation that only used real primitives would have given.
By throwing away `v` entirely, this convention does not handle the case where `f` evaluates a non-holomorphic function and you want to evaluate all of $\partial_0 u$, $\partial_1 u$, $\partial_0 v$, and $\partial_1 v$ at once. But in that case the answer would have to contain four real values, and so there's no way to express it as a single complex number.
You should expect complex numbers to work everywhere in JAX. Here's differentiating through a Cholesky decomposition of a complex matrix:
```
A = np.array([[5., 2.+3j, 5j],
[2.-3j, 7., 1.+7j],
[-5j, 1.-7j, 12.]])
def f(X):
L = np.linalg.cholesky(X)
return np.sum((L - np.sin(L))**2)
grad(f)(A)
```
For primitives' JVP rules, writing the primals as $z = a + bi$ and the tangents as $t = c + di$, we define the Jacobian-vector product $t \mapsto \partial f(z) \cdot t$ as
$t \mapsto
\begin{matrix} \begin{bmatrix} 1 & 1 \end{bmatrix} \\ ~ \end{matrix}
\begin{bmatrix} \partial_0 u(a, b) & -\partial_0 v(a, b) \\ - \partial_1 u(a, b) i & \partial_1 v(a, b) i \end{bmatrix}
\begin{bmatrix} c \\ d \end{bmatrix}$.
See Chapter 4 of [Dougal's PhD thesis](https://dougalmaclaurin.com/phd-thesis.pdf) for more details.
# More advanced autodiff
In this notebook, we worked through some easy, and then progressively more complicated, applications of automatic differentiation in JAX. We hope you now feel that taking derivatives in JAX is easy and powerful.
There's a whole world of other autodiff tricks and functionality out there. Topics we didn't cover, but hope to in a "Advanced Autodiff Cookbook" include:
- Gauss-Newton Vector Products, linearizing once
- Custom VJPs and JVPs
- Efficient derivatives at fixed-points
- Estimating the trace of a Hessian using random Hessian-vector products.
- Forward-mode autodiff using only reverse-mode autodiff.
- Taking derivatives with respect to custom data types.
- Checkpointing (binomial checkpointing for efficient reverse-mode, not model snapshotting).
- Optimizing VJPs with Jacobian pre-accumulation.
| github_jupyter |
# In-Class Coding Lab: Lists
The goals of this lab are to help you understand:
- List indexing and slicing
- List methods such as insert, append, find, delete
- How to iterate over lists with loops
## Python Lists work like Real-Life Lists
In real life, we make lists all the time. To-Do lists. Shopping lists. Reading lists. These lists are collections of items, for example here's my shopping list:
```
Milk, Eggs, Bread, Beer
```
There are 4 items in this list.
Likewise, we can make a similar list in Python, and count the number of items in the list using the `len()` function:
```
shopping_list = [ 'Milk', 'Eggs', 'Bread', 'Beer']
item_count = len(shopping_list)
print("List: %s has %d items" % (shopping_list, item_count))
```
## Enumerating Your List Items
In real-life, we *enumerate* lists all the time. We go through the items on our list one at a time and make a decision, for example: "Did I add that to my shopping cart yet?"
In Python we go through items in our lists with the `for` loop. We use `for` because the number of items in pre-determined and thus a **definite** loop is the appropriate choice.
Here's an example:
```
for item in shopping_list:
print("I need to buy some %s " % (item))
```
## Now You Try It!
Write code in the space below to print each stock on its own line.
```
stocks = [ 'IBM', 'AAPL', 'GOOG', 'MSFT', 'TWTR', 'FB']
#TODO: Write code here
print("Here are the stocks I invested $ 1 000 000 000 ")
for item in stocks:
print (item)
```
## Indexing Lists
Sometimes we refer to our items by their place in the list. For example "Milk is the first item on the list" or "Beer is the last item on the list."
We can also do this in Python, and it is called *indexing* the list.
**IMPORTANT** The first item in a Python lists starts at index **0**.
```
print("The first item in the list is:", shopping_list[0])
print("The last item in the list is:", shopping_list[3])
print("This is also the last item in the list:", shopping_list[-1])
print("This is the second to last item in the list:", shopping_list[-2])
```
## For Loop with Index
You can also loop through your Python list using an index. In this case we use the `range()` function to determine how many times we should loop:
```
for i in range(len(shopping_list)):
print("I need to buy some %s " % (shopping_list[i]))
```
## Now You Try It!
Write code to print the 2nd and 4th stocks in the list variable `stocks`. For example:
`AAPL MSFT`
```
#TODO: Write code here
stocks = [ 'IBM', 'AAPL', 'GOOG', 'MSFT', 'TWTR', 'FB']
print(stocks [1],stocks[3])
```
## Lists are Mutable
Unlike strings, lists are mutable. This means we can change a value in the list.
For example, I want `'Craft Beer'` not just `'Beer'`:
```
print(shopping_list)
shopping_list[-1] = 'Craft Beer'
print(shopping_list)
```
## List Methods
In your readings and class lecture, you encountered some list methods. These allow us to maniupulate the list by adding or removing items.
```
print("Shopping List: %s" %(shopping_list))
print("Adding 'Cheese' to the end of the list...")
shopping_list.append('Cheese') #add to end of list
print("Shopping List: %s" %(shopping_list))
print("Adding 'Cereal' to position 0 in the list...")
shopping_list.insert(0,'Cereal') # add to the beginning of the list (position 0)
print("Shopping List: %s" %(shopping_list))
print("Removing 'Cheese' from the list...")
shopping_list.remove('Cheese') # remove 'Cheese' from the list
print("Shopping List: %s" %(shopping_list))
print("Removing item from position 0 in the list...")
del shopping_list[0] # remove item at position 0
print("Shopping List: %s" %(shopping_list))
```
## Now You Try It!
Write a program to remove the following stocks: `IBM` and `TWTR`
Then add this stock to the end `NFLX` and this stock to the beginning `TSLA`
Print your list when you are done. It should look like this:
`['TSLA', 'AAPL', 'GOOG', 'MSFT', 'FB', 'NFLX']`
```
# TODO: Write Code here
stocks = [ 'IBM', 'AAPL', 'GOOG', 'MSFT', 'TWTR', 'FB']
#print(stocks)
stocks.append("NFLX")
stocks.remove('IBM')
stocks.remove('TWTR')
stocks.insert(0,'TSLA')
print(stocks)
```
## Sorting
Since Lists are mutable. You can use the `sort()` method to re-arrange the items in the list alphabetically (or numerically if it's a list of numbers)
```
print("Before Sort:", shopping_list)
shopping_list.sort()
print("After Sort:", shopping_list)
```
# Putting it all together
Winning Lotto numbers. When the lotto numbers are drawn, they are in any order, when they are presented they're allways sorted. Let's write a program to input 5 numbers then output them sorted
```
1. for i in range(5)
2. input a number
3. append the number you input to the lotto_numbers list
4. sort the lotto_numbers list
5. print the lotto_numbers list like this:
'today's winning numbers are [1, 5, 17, 34, 56]'
```
```
## TODO: Write program here:
lotto_numbers = [] # start with an empty list
for i in range(5):
inp = input("input a number: ")
lotto_numbers.append(inp)
lotto_numbers.sort()
#print(lotto_numbers)
print("today's winning numbers are", lotto_numbers)
import random
```
##### 5
##### 5
#####
| github_jupyter |
### 1. Conditionals. Study the following code:
<code>
print ("statement A")
if x > 0:
print ("statement B")
elif x < 0:
print( "statement C")
else:
print ("statement D")
print ( "statement E")
</code>
```
ans=input("Which of the statements above (A, B, C, D, E) will be printed if x < 0?\n")
print (ans)
ans=input("Which of the statements above will be printed if x == 0?\n")
print (ans)
ans=input("Which of the statements above will be printed if x > 0?\n")
print (ans)
```
### 2. What are the value(s) returned after executing the following range code(s)
```
my_answer = eval (input (f'What is the value of list(range(5))?\n'))
print(my_answer)
if my_answer== list(range(5)):
print("You are Correct")
else:
print("Wrong Answer, Try Again")
my_answer = eval (input (f'What is the value of list(range(1,10))?\n'))
print(my_answer)
if my_answer== list(range(1,10)):
print("You are Correct")
else:
print("Wrong Answer, Try Again")
my_answer = eval (input (f'What is the value of list(range(1,30,5))?\n'))
print(my_answer)
if my_answer== list(range(1,30,5)):
print("You are Correct")
else:
print("Wrong Answer, Try Again")
my_answer = eval (input (f'What is the value of list(range(1,10,-3))?\n'))
print(my_answer)
if my_answer== list(range(1,10, -3)):
print("You are Correct")
else:
print("Wrong Answer, Try Again")
```
### 3. What argument(s) could we give to the range() built-in function if we wanted the following lists to be generated?
0, 1, 2, 3, 4, 5, 6, 7, 8, 9
```
range(0,10)
```
3, 6, 9, 12, 15, 18
```
range(3,20,3)
```
-20, 200, 420, 640, 860
```
range(-20,861,220)
```
### 4. Consider the following variables have been defined with these values:
<code>
a = 3
b = 0
c = -4
d = 10
</code>
Answer what would be the result of the following Boolean expressions:
```
a = 3; b = 0; c = -4; d = 10
my_answer = eval (input (f'What is the value of (a > c) and (d != b)?\n'))
print(my_answer)
if my_answer== ( (a > c) and (d != b) ):
print("You are Correct")
else:
print("Wrong Answer, Try Again")
a = 3; b = 0; c = -4; d = 10
my_answer = eval (input (f'What is the value of (c <= b) or (b<= d)?\n'))
print(my_answer)
if my_answer== ( (c <= b) or (b<= d)):
print("You are Correct")
else:
print("Wrong Answer, Try Again")
a = 3; b = 0; c = -4; d = 10
my_answer = eval (input (f'What is the value of not((c == d) and (c < a))?\n'))
print(my_answer)
if my_answer == (not((c == d) and (c < a))):
print("You are Correct")
else:
print("Wrong Answer, Try Again")
```
### 5.
Write a program to play the guess the number game. The computer will pick a
number between 1 and 100 and the user will try to guess the number in as few guesses
as possible. Input: The user will enter a guess until the correct guess is entered The
program will keep asking for a guess until the correct guess is entered. The program will
also give hints to the user (number is too high, or too low) Output: The program will
output the number of guesses. Below is a sample run of how the program should
proceed:
guess_game()<br><br>
I am thinking of a number between 1 and 100
<br>Can you guess what the number is?
<br>Enter your guess<br>
10
<br>Too low!
<br>Enter your guess<br>
60
<br>Too high!
<br>Enter your guess<br>
45
<br>You win!!
<br>You solved the problem in 3 guesses
```
import random
def guess_game():
cnt = 0
randomNum = random.randint(1, 100)
print('I am thinking of a number between 1 and 100'+' \n'+'Can you guess what the number is?')
while cnt < 1000:
print('Enter your guess')
guessNum = int(input())
cnt +=1
if (guessNum < randomNum):
print('Too low!')
if (guessNum > randomNum):
print('Too high!')
if (guessNum == randomNum):
break
if guessNum == randomNum:
cnt = str(cnt)
print('You win!!'+"\n"+'You solved the problem in ' + cnt + ' guesses!')
guess_game()
```
#### 6
Loops. Write a program to have the user input three (3) numbers: (f)rom, <br>(t)o, and (i)ncrement. Count from f to t in increments of i, inclusive of <br> f and t. For example, if the input is f == 2, t == 24, and i == 4,<br> the program would output: 2, 6, 10, 14, 18, 22.
```
def my_loop(f,t,i):
temp=0
for inc in range(f,t):
if (f<t):
f=f+i
print(f-i)
my_loop(2,24,4)
```
| github_jupyter |
Importing Libraries
```
import random
```
Defining Variables
```
playing = True
game_session = True
suits = ['Hearts', 'Diamonds', 'Spades', 'Clubs']
ranks = ['Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace']
values = {
'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8,
'Nine':9, 'Ten':10, 'Jack':10, 'Queen':10, 'King':10, 'Ace':11
}
```
Defining Class
```
# Defining Card Class
class Card:
# Initializing Class
def __init__(self, rank, suit):
self.rank = rank
self.suit = suit
# Defining print method
def __str__(self):
return f'{self.rank} of {self.suit}'
# Defining Deck Class
class Deck:
# Initializing Class
def __init__(self):
self.deck = []
self.value = 0
for suit in suits:
for rank in ranks:
self.deck.append(Card(rank, suit))
# Defining print method
def __str__(self):
complete_deck = ''
for card in self.deck:
complete_deck += f'\n {card.__str__()}'
return f'The deck has {complete_deck}'
# Adding a method for shuffling the deck
def shuffle(self):
return random.shuffle(self.deck)
# Adding a method for dealing the card
def deal(self):
return self.deck.pop()
# Defining Hand Class
class Hand:
# Initializing the class
def __init__(self):
self.cards = []
self.value = 0
self.aces = 0
# Function to add a card to the player cards
def add_card(self, card):
self.cards.append(card)
self.value += values[card.rank]
if card.rank == 'Ace':
self.aces += 1
# Handling Ace's value (if total value exceeds 21 then the value of ace changes to 1)
def adjust_ace_value(self):
while self.value > 21 and self.aces:
self.value -= 10
self.aces -= 1
# Defining Chips Class
class Chips:
# Initialzing Class
def __init__(self, total=100):
self.total = total
self.bet = 0
# Function if player wins the bet
def bet_won(self):
self.total += self.bet
# Function if player loses the bet
def bet_lost(self):
self.total -= self.bet
```
Defining Functions
```
# Fucntion for taking bet
def bet_this(chips):
while True:
try:
chips.bet = int(input(f"\nHow much would you like to bet?\n(You have total of {chips.total} chips)\n"))
except ValueError:
print('Sorry, the bet amount should be an integer value\n')
else:
if chips.bet > chips.total:
print(f"Sorry you don't have that many chips. You only have {chips.total} chips.\n")
else:
print(f"You bet {chips.bet} chips on this round!\n")
break
# Function to show some cards
def show_some_cards(dealer, player):
print(f"Dealer's Card: <Card Hidden> {dealer.cards[1]}\n")
print("Your Cards:")
print(*player.cards, sep='\n')
print(f"Your current value: {player.value}\n")
# Function to show all the cards
def show_all_cards(dealer, player):
print("Dealer's Cards:")
print(*dealer.cards, sep='\n')
print(f"Dealer's final value: {dealer.value}\n")
print("Your Cards:")
print(*player.cards, sep='\n')
print(f"Your final value: {player.value}")
# Function to ask whether the player wants to hit or stand
def hit_or_stand(dealer_hand, player_hand, deck, chips):
global playing
while True:
choice = input("Would you like to Hit or Stand?\nEnter 'h' or 's'\n")
if choice[0].lower() == 'h':
print('Player wants to hit\n')
hit_card(player_hand, deck)
player_hand.adjust_ace_value()
if player_hand.value > 21:
player_lost(chips)
show_some_cards(dealer_hand, player_hand)
playing = False
break
elif player_hand.value == 21:
player_won(chips)
show_some_cards(dealer_hand, player_hand)
playing = False
break
else:
show_some_cards(dealer_hand, player_hand)
elif choice[0].lower() == 's':
print('Player wants to stay\n')
break
else:
print('Please select from the given options only!\n')
continue
# Function for dealing the card
def hit_card(hand, deck):
hand.add_card(deck.deal())
hand.adjust_ace_value()
# Functions for all the game ending scenarios
def player_won(chips):
chips.bet_won()
print('\nCongratulation! You WON!!!')
print(f'You won {chips.bet} chips\nYou have {chips.total} chips in total.\n')
def player_lost(chips):
chips.bet_lost()
print('\nOoops!!')
print(f'Sorry you lost {chips.bet} chips\nYou have {chips.total} chips remaining.\n')
def dealer_won(chips):
chips.bet_lost()
print('\nOpps! Dealor Won!')
print(f'Sorry you lost {chips.bet} chips\nYou have {chips.total} chips remaining.\n')
def dealer_lost(chips):
chips.bet_won()
print('\nDealer lost! That means YOU WON!!!')
print(f'You won {chips.bet} chips\nYou have {chips.total} chips in total.\n')
def tie():
print('\nDealer and Player Tie!')
```
Game Begins
```
# Welcoming the player
print('Welcome to the game of Blackjack\n')
# Initializing the player chips
player_chips = Chips(100)
while game_session:
playing = True
# Getting a fresh deck and shuffling it
deck = Deck()
deck.shuffle()
# Dealing Player Hand
player_hand = Hand()
player_hand.add_card(deck.deal())
player_hand.add_card(deck.deal())
# Dealing Dealer Hand
dealer_hand = Hand()
dealer_hand.add_card(deck.deal())
dealer_hand.add_card(deck.deal())
# Asking for the bet amount and showing some cards
bet_this(player_chips)
show_some_cards(dealer_hand, player_hand)
if player_hand.value == 21:
player_won(player_chips)
print(f"YOU won {player_chips.bet} chips!!!")
print(f"You now have a total of {player_chips.total} chips")
else:
hit_or_stand(dealer_hand, player_hand, deck, player_chips)
while playing:
while dealer_hand.value < 17:
dealer_hand.add_card(deck.deal())
if dealer_hand.value > 21:
dealer_lost(player_chips)
show_all_cards(dealer_hand, player_hand)
break
elif dealer_hand.value > player_hand.value:
dealer_won(player_chips)
show_all_cards(dealer_hand, player_hand)
break
elif dealer_hand.value < player_hand.value:
player_won(player_chips)
show_all_cards(dealer_hand, player_hand)
break
else:
tie()
show_all_cards(dealer_hand, player_hand)
break
play_again = input("\nDo you want to play again?\nEnter 'y' or 'n'\n")
if play_again[0].lower() == 'y':
if player_chips.total > 0:
continue
else:
print('Sorry you do not have any chips left.\nThank you for playing with us!')
game_session = False
else:
game_session = False
print('Thanks for playing with us')
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
sys.path.append('../src')
from ratio_space import ratiospace_division, nCk, origin_vector
from myutils import get_figratio, plot_hist, cumulative_bins
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True, figsize=(9, 9))
N = 3
K = 10
for N, _ax in enumerate(ax.flat):
N += 2
p = origin_vector(ratiospace_division(N,K))
if N <= 2:
_ax.scatter(p, np.zeros(len(p)), 10, 'k')
else:
_ax.scatter(p.T[0], p.T[1], 10, 'k', alpha=0.1)
p = origin_vector(ratiospace_division(N,1))
poly = plt.Polygon(p[:,:2], alpha=0.2, fc='g')
_ax.add_patch(poly)
_ax.set_aspect('equal')
_ax.set_ylim(-1.1, 1.1)
_ax.set_xlim(-1.1, 1.1)
plt.tight_layout()
fig, ax = plt.subplots(3, 3, figsize=(14, 14))
N = 6
K = 10
for N, _ax in enumerate(ax.flat, start=2):
p = origin_vector(ratiospace_division(N, 1))
if N==2:
_ax.scatter(p, np.zeros(p.shape), 1, 'r')
_ax.plot(p, np.zeros(p.shape), 'g', alpha=0.3, linewidth=10)
else:
_ax.scatter(p.T[0], p.T[1], 1, 'r')
cons_base = (0.1, 0.9)
cons = [(0.1, 0.9)]*N
P = ratiospace_division(N, K, constraints=cons)
p = origin_vector(P)
df = pd.DataFrame(np.c_[P, p])
if N==2:
_ax.scatter(p, np.zeros(p.shape), 10, 'g', label='1d-Simplex')
else:
_ax.scatter(p.T[0], p.T[1], 10, 'g', alpha=0.3, label=f'Constraint:{cons_base}')
p = origin_vector(ratiospace_division(N,1))
poly = plt.Polygon(p[:,:2], alpha=0.2, fc='g', label=f'{N-1}d-Simplex')
_ax.add_patch(poly)
_ax.legend()
_ax.set_ylim(-1.1, 1.1)
_ax.set_xlim(-1.1, 1.1)
_ax.set_aspect('equal')
plt.savefig('../image/simplex_grid.png')
```
# Asset Portfolio
```
nb_ex = 50
dic = {
'Stock' : (0.05, 0.1),
'FX' : (0.05, 0.1),
'Deposit' : (0.1, 0.4),
'Real Estate' : (0.3, 0.7),
}
keys = pd.DataFrame(dic, index=['min', 'max']).sort_values('max', 1).columns
cons = [dic[x] for x in keys]
for n, c in zip(keys, cons):
print(n, c)
grid_data = ratiospace_division(4, nb_ex, constraints=cons)
print('Portion combination', grid_data.shape)
fig, ax = plt.subplots(ncols=len(keys), figsize=(16, 3))
label = np.digitize(grid_data[:,0], np.unique(grid_data[:,0]))-1
unique_label = np.unique(label)
color = unique_label/unique_label.max()
color = plt.get_cmap('rainbow')(color)
color[:, -1] = 0.6
for _ax, data, k in zip(ax.flat, grid_data.T, keys):
unique_data = len(np.unique(data))
h, e = np.histogram(data, bins=unique_data)
title=f'{k} [{len(h)}cut]'
_ax.set_title(title)
_btm = np.zeros(len(e[:-1]))
for l, c in zip(np.unique(label), color):
idx = np.where(label==l)[0]
h, _ = np.histogram(data[idx], bins=e)
_ax.bar(e[:-1], h, e[1:]-e[:-1], _btm, align='edge', fc=c, ec='w')
_btm += h
_e = e if len(e)<13 else e[np.linspace(0,len(e)-1,13).astype(int)]
_ax.set_xticks(_e)
_ax.set_xticklabels([f'{x:.3f}'for x in _e], rotation=90)
plt.tight_layout()
p = np.ones((3, 3))
for i in nCk(3, 2):
j = list(set(np.arange(3))-set(i))
for _i in i:
p[j, _i] = cons[_i+1][0]
m = p[j]==1
p[j, j] -= p[j, i].sum()
def plot_ratiospace(grid_data, label, p, fname=None):
ul = np.unique(label)
nc, nr = get_figratio(len(ul))
fig, ax = plt.subplots(nr, nc, figsize=(5*nc, 5*nr), sharex=True, sharey=True)
color = plt.get_cmap('rainbow')(label/label.max())
for _ax, _ul in zip(ax.flat, ul):
idx = np.where(_ul==label)[0]
poly = plt.Polygon(origin_vector(p), alpha=0.1, color='b')
_p = origin_vector(p)
_ax.scatter(_p.T[0], _p.T[1], 4, c='r')
_ax.set_title(f'{keys[0]}:{grid_data[idx][:,0][0]:.0%}')
for i, _p in enumerate(origin_vector(p)):
_i = np.argmax(p[i])
_text = f'{keys[_i+1]}:{p[_i].max():.0%}'
_ax.text(_p[0], _p[1], _text)
_ax.add_patch(poly)
data = origin_vector(grid_data[idx][:,1:])
_ax.scatter(data.T[0], data.T[1], 10, marker='H', c=color[idx], alpha=0.6)
_ax.set_aspect('equal')
if fname:
fig.suptitle(fname.split('/')[-1])
plt.savefig(fname)
plt.close()
plot_ratiospace(grid_data, label, p)
def gen_matrix(edges, data, value=None, func=[np.mean, np.std], return_labels=False, debug=False):
labels = np.array([np.digitize(d, e, right=True) for e,d in zip(edges, data)]) -1
_shape = [len(x)-1 for x in edges]
_shape.append(len(edges))
if debug:
print(_shape)
matrix = np.zeros(tuple(_shape))
mask = np.ones(matrix.shape[:-1])
if not value is None:
stats = np.zeros(tuple(_shape[:-1]+[len(func)]))
if debug:
print(data.shape, matrix.shape, labels.shape, mask.shape)
check = 0
for i in zip(*np.where(mask)):
_idx = np.arange(data.shape[-1])
for j, k in enumerate(i):
_tmp = np.where(labels[j][_idx]==k)[0]
_idx = _idx[_tmp]
if len(_idx)==0:
break
if len(_idx)==0:
continue
for j in range(len(i)):
_data = data[j][_idx]
if len(_data)>0:
matrix[i][j] = _data.mean()
if debug:
print(i, len(_idx), matrix[i], end='\n')
check += len(_idx)
if value is None:
continue
for j, _f in enumerate(func):
stats[i][j] = _f(value[_idx])
if debug:
print(check)
if return_labels:
if not value is None:
return matrix, stats, labels
return matrix, labels
if not value is None:
return matrix, stats
return matrix
```
# Example
```
from sklearn import datasets as ds
dic = ds.load_boston()
dic.keys()
df = pd.DataFrame(dic.data, columns=dic.feature_names)
n = df.shape[1]
nr, nc = get_figratio(n)
fig, ax = plt.subplots(nc, nr, figsize=(16, 9 ))
print(len(ax), df.shape, nr, nc)
df.hist(ax=ax.flat[:n])
fig.tight_layout()
base_bins = {
'B' : [0, 330, 400],
'CHAS' : [0, 0.1, 0.9, 1],
'CRIM' : [0, 10, 100],
'INDUS' : [0, 15, 40],
'RAD' : [0, 12, 30],
'TAX' : [100, 500, 600, 800],
'ZN' : [0, 10, 100]
}
for line in dic.DESCR.split('\n'):
print(line)
_ = plot_hist(dic.target)
```
| github_jupyter |
## Udacity SDCND - Term 2: MPC Project ##
### I. The Model
I have used **classroom model**.
i. State
- x: position in x direction
- y: position in y direction
- psi: steering angle
- v: velocity of the car
- cte: cross-track error along the y axis
- epsi: error in the steering angle
ii. Actuators
- delta: applied steering angle
- a: applied throttle
iii. Update Equations
- x<sub>t</sub> = x<sub>t-1</sub> \* v<sub>t-1</sub> \* cos(psi<sub>t-1</sub>) \* dt
- y<sub>t</sub> = y<sub>t-1</sub> \* v<sub>t-1</sub> \* sin(psi<sub>t-1</sub>) \* dt
- psi<sub>t</sub> = psi<sub>t-1</sub> + (v<sub>t-1</sub>/Lf) \* delta<sub>t-1</sub> \* dt
- v<sub>t</sub> = v<sub>t-1</sub> + a<sub>t-1</sub> + dt
- cte<sub>t</sub> = (f<sub>t-1</sub> - y<sub>t-1</sub>) + (v<sub>t-1</sub> \* sin(epsi<sub>t-1</sub>) \* dt)
- epsi<sub>t</sub> = ((psi<sub>t-1</sub> - psides<sub>t-1</sub>) - ((v<sub>t-1</sub>/Lf) \* delta<sub>t-1</sub> \* dt))
**f** is the value of the 3rd degree polynomial representing the reference line at the current value of x.
**psides** is the desired psi, which is the tangential angle of the derivative of the polynomial at that point.
### II. Timestep Length and Elapsed Duration
The final values chosen are **N=10** and **dt=0.1**.
If the value of N is too small, we cannot predict the future well. If value is too large then we may plan for a long future which not be what we are expecting. The values for N and dt are 10 and 0.1 respectively. These values were just a part of hit and trial process. I tested with 7/0.5; 9,0.25; 18,0.05 also in order to fix 10 and 0.1.
### III. Polynomial Fitting
The waypoint co-ordinates received from the simulator are first converted into cars co-ordinate system where car is the origin, I have done it in `Main.cpp::Lines 104 - 114`.
The converted co-ordinates are fit to a polynomial at `Main.cpp::Lines 124` using the polyfit method.
### IV. Model Predictive Control with Latency
In order to account for the 100 ms latency, the initial state of the car supplied by the simulator is updated using the same model descibed above.
Here, the **latency** period is used as the time gap **dt**.
Below is the code block from `Main.cpp`.
```cpp
const double current_px = 0.0 + v * act_latency;
const double current_py = 0.0;
const double current_psi = 0.0 + v * (-delta) / Lf * act_latency;
const double current_v = v + a * act_latency;
const double current_cte = cte + v * sin(epsi) * act_latency;
const double current_epsi = epsi + v * (-delta) / Lf * act_latency;
```
| github_jupyter |
# Multivariate Resemblance Analysis (MRA) Dataset A
In this notebook the multivariate resemblance analysis of Dataset A is performed for all STDG approaches.
```
#import libraries
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import os
print('Libraries imported!!')
#define directory of functions and actual directory
HOME_PATH = '' #home directory of the project
FUNCTIONS_DIR = 'EVALUATION FUNCTIONS/RESEMBLANCE'
ACTUAL_DIR = os.getcwd()
#change directory to functions directory
os.chdir(HOME_PATH + FUNCTIONS_DIR)
#import functions for univariate resemblance analisys
from multivariate_resemblance import get_numerical_correlations
from multivariate_resemblance import plot_correlations
from multivariate_resemblance import get_categorical_correlations
from multivariate_resemblance import compute_mra_score
#change directory to actual directory
os.chdir(ACTUAL_DIR)
print('Functions imported!!')
```
## 1. Read real and synthetic datasets
In this part real and synthetic datasets are read.
```
#Define global variables
DATA_TYPES = ['Real','GM','SDV','CTGAN','WGANGP']
SYNTHESIZERS = ['GM','SDV','CTGAN','WGANGP']
FILEPATHS = {'Real' : HOME_PATH + 'REAL DATASETS/TRAIN DATASETS/A_Diabetes_Data_Real_Train.csv',
'GM' : HOME_PATH + 'SYNTHETIC DATASETS/GM/A_Diabetes_Data_Synthetic_GM.csv',
'SDV' : HOME_PATH + 'SYNTHETIC DATASETS/SDV/A_Diabetes_Data_Synthetic_SDV.csv',
'CTGAN' : HOME_PATH + 'SYNTHETIC DATASETS/CTGAN/A_Diabetes_Data_Synthetic_CTGAN.csv',
'WGANGP' : HOME_PATH + 'SYNTHETIC DATASETS/WGANGP/A_Diabetes_Data_Synthetic_WGANGP.csv'}
categorical_columns = ['gender','age','admission_type_id','discharge_disposition_id','admission_source_id','max_glu_serum',
'A1Cresult','change','diabetesMed','readmitted']
data = dict()
#iterate over all datasets filepaths and read each dataset
for name, path in FILEPATHS.items() :
data[name] = pd.read_csv(path)
for col in categorical_columns :
data[name][col] = data[name][col].astype('category')
data
```
## 2. Plot PPC matrixes and calculate matrixes norms
```
#compute correlation matrixes for all datasets
cors_numerical = dict()
norms_numerical = dict()
for name in DATA_TYPES :
cors_numerical[name], norms_numerical[name] = get_numerical_correlations(data[name])
norms_numerical
fig, axs = plt.subplots(nrows=1, ncols=5, figsize=(15, 2.5))
axs_idxs = range(6)
idx = dict(zip(DATA_TYPES,axs_idxs))
for name_idx, name in enumerate(DATA_TYPES) :
ax = axs[idx[name]]
matrix = cors_numerical[name]
if name_idx != len(DATA_TYPES) - 1:
plot_correlations(matrix, ax, color_bar=False)
else:
plot_correlations(matrix, ax, color_bar=True)
if name_idx > 0:
ax.set_yticks([])
if name == 'Real' :
ax.set_title(name)
else :
score = compute_mra_score(cors_numerical['Real'], matrix)
ax.set_title(name + ' (' + str(score) + ')')
fig.savefig('MULTIVARIATE RESEMBLANCE RESULTS/PPC_Matrices.svg', bbox_inches='tight')
```
## 3. Plot correlations for categorical variables and calculate matrixes norms
```
#compute correlation matrixes for all datasets
cors_categorical = dict()
norms_categorical = dict()
for name in DATA_TYPES :
cors_categorical[name], norms_categorical[name] = get_categorical_correlations(data[name])
norms_categorical
fig, axs = plt.subplots(nrows=1, ncols=5, figsize=(15, 2.5))
axs_idxs = range(6)
idx = dict(zip(DATA_TYPES,axs_idxs))
first = True
for name_idx, name in enumerate(DATA_TYPES) :
ax = axs[idx[name]]
matrix = cors_categorical[name]
if name_idx != len(DATA_TYPES) - 1:
plot_correlations(matrix, ax, color_bar=False)
else:
plot_correlations(matrix, ax, color_bar=True)
if name_idx > 0:
ax.set_yticks([])
if name == 'Real' :
ax.set_title(name)
else :
score = compute_mra_score(cors_categorical['Real'], matrix)
ax.set_title(name + ' (' + str(score) + ')')
fig.savefig('MULTIVARIATE RESEMBLANCE RESULTS/Categorical_Matrices.svg', bbox_inches='tight')
```
## 4. Explore the results
```
norms_numerical
norms_categorical
norms_data = [np.asarray(list(norms_numerical.values())), np.asarray(list(norms_categorical.values()))]
df_norms = pd.DataFrame(data=norms_data, columns=DATA_TYPES, index=['PPC_MATRIX_NORMS','CATEGORICAL_CORS_MATRIX_NORMS'])
df_norms.to_csv('MULTIVARIATE RESEMBLANCE RESULTS/Correlation_Matrix_Norms.csv')
df_norms
```
| github_jupyter |
```
from qiskit.tools.jupyter import *
from qiskit import IBMQ
IBMQ.load_account()
#provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
provider=IBMQ.get_provider(hub='ibm-q-research', group='uni-maryland-1', project='main')
backend = provider.get_backend('ibmq_armonk')
backend_config = backend.configuration()
assert backend_config.open_pulse, "Backend doesn't support Pulse"
dt = backend_config.dt
print(f"Sampling time: {dt*1e9} ns") # The configuration returns dt in seconds, so multiply by
# 1e9 to get nanoseconds
backend_defaults = backend.defaults()
import numpy as np
# unit conversion factors -> all backend properties returned in SI (Hz, sec, etc)
GHz = 1.0e9 # Gigahertz
MHz = 1.0e6 # Megahertz
us = 1.0e-6 # Microseconds
ns = 1.0e-9 # Nanoseconds
# We will find the qubit frequency for the following qubit.
qubit = 0
# The sweep will be centered around the estimated qubit frequency.
center_frequency_Hz = backend_defaults.qubit_freq_est[qubit] # The default frequency is given in Hz
# warning: this will change in a future release
print(f"Qubit {qubit} has an estimated frequency of {center_frequency_Hz / GHz} GHz.")
# scale factor to remove factors of 10 from the data
scale_factor = 1e-14
# We will sweep 40 MHz around the estimated frequency
frequency_span_Hz = 20 * MHz
# in steps of 1 MHz.
frequency_step_Hz = 1 * MHz
# We will sweep 20 MHz above and 20 MHz below the estimated frequency
frequency_min = center_frequency_Hz - frequency_span_Hz / 2
frequency_max = center_frequency_Hz + frequency_span_Hz / 2
# Construct an np array of the frequencies for our experiment
frequencies_GHz = np.arange(frequency_min / GHz,
frequency_max / GHz,
frequency_step_Hz / GHz)
print(f"The sweep will go from {frequency_min / GHz} GHz to {frequency_max / GHz} GHz \
in steps of {frequency_step_Hz / MHz} MHz.")
# number of shots for our experiments
NUM_SHOTS = 1024
# samples need to be multiples of 16
def get_closest_multiple_of_16(num):
return int(num + 8 ) - (int(num + 8 ) % 16)
from qiskit import pulse # This is where we access all of our Pulse features!
from qiskit.pulse import Play
# This Pulse module helps us build sampled pulses for common pulse shapes
from qiskit.pulse import library as pulse_lib
# Drive pulse parameters (us = microseconds)
drive_sigma_us = 0.075 # This determines the actual width of the gaussian
drive_samples_us = drive_sigma_us*8 # This is a truncating parameter, because gaussians don't have
# a natural finite length
drive_sigma = get_closest_multiple_of_16(drive_sigma_us * us /dt) # The width of the gaussian in units of dt
drive_samples = get_closest_multiple_of_16(drive_samples_us * us /dt) # The truncating parameter in units of dt
drive_amp = 0.05
# Drive pulse samples
drive_pulse = pulse_lib.gaussian(duration=drive_samples,
sigma=drive_sigma,
amp=drive_amp,
name='freq_sweep_excitation_pulse')
# Find out which group of qubits need to be acquired with this qubit
meas_map_idx = None
for i, measure_group in enumerate(backend_config.meas_map):
if qubit in measure_group:
meas_map_idx = i
break
assert meas_map_idx is not None, f"Couldn't find qubit {qubit} in the meas_map!"
inst_sched_map = backend_defaults.instruction_schedule_map
measure = inst_sched_map.get('measure', qubits=backend_config.meas_map[meas_map_idx])
### Collect the necessary channels
drive_chan = pulse.DriveChannel(qubit)
meas_chan = pulse.MeasureChannel(qubit)
acq_chan = pulse.AcquireChannel(qubit)
# Create the base schedule
# Start with drive pulse acting on the drive channel
schedule = pulse.Schedule(name='Frequency sweep')
schedule += Play(drive_pulse, drive_chan)
# The left shift `<<` is special syntax meaning to shift the start time of the schedule by some duration
schedule += measure << schedule.duration
# Create the frequency settings for the sweep (MUST BE IN HZ)
frequencies_Hz = frequencies_GHz*GHz
schedule_frequencies = [{drive_chan: freq} for freq in frequencies_Hz]
schedule.draw(label=True)
from qiskit import assemble
num_shots_per_frequency = 1024
frequency_sweep_program = assemble(schedule,
backend=backend,
meas_level=1,
meas_return='avg',
shots=num_shots_per_frequency,
schedule_los=schedule_frequencies)
job = backend.run(frequency_sweep_program)
print(job.job_id())
from qiskit.tools.monitor import job_monitor
job_monitor(job)
frequency_sweep_results = job.result(timeout=120) # timeout parameter set to 120 seconds
import matplotlib.pyplot as plt
sweep_values = []
for i in range(len(frequency_sweep_results.results)):
# Get the results from the ith experiment
res = frequency_sweep_results.get_memory(i)*scale_factor
# Get the results for `qubit` from this experiment
sweep_values.append(res[qubit])
plt.scatter(frequencies_GHz, np.real(sweep_values), color='black') # plot real part of sweep values
plt.xlim([min(frequencies_GHz), max(frequencies_GHz)])
plt.xlabel("Frequency [GHz]")
plt.ylabel("Measured signal [a.u.]")
plt.show()
from scipy.optimize import curve_fit
def fit_function(x_values, y_values, function, init_params):
fitparams, conv = curve_fit(function, x_values, y_values, init_params)
y_fit = function(x_values, *fitparams)
return fitparams, y_fit
fit_params, y_fit = fit_function(frequencies_GHz,
np.real(sweep_values),
lambda x, A, q_freq, B, C: (A / np.pi) * (B / ((x - q_freq)**2 + B**2)) + C,
[-5, 4.975, 1, 5] # initial parameters for curve_fit
)
plt.scatter(frequencies_GHz, np.real(sweep_values), color='black')
plt.plot(frequencies_GHz, y_fit, color='red')
plt.xlim([min(frequencies_GHz), max(frequencies_GHz)])
plt.xlabel("Frequency [GHz]")
plt.ylabel("Measured Signal [a.u.]")
plt.show()
A, rough_qubit_frequency, B, C = fit_params
rough_qubit_frequency = rough_qubit_frequency*GHz # make sure qubit freq is in Hz
print(f"We've updated our qubit frequency estimate from "
f"{round(backend_defaults.qubit_freq_est[qubit] / GHz, 8)} GHz to {round(rough_qubit_frequency/GHz, 8)} GHz.")
# This experiment uses these values from the previous experiment:
# `qubit`,
# `measure`, and
# `rough_qubit_frequency`.
# Rabi experiment parameters
num_rabi_points = 50
# Drive amplitude values to iterate over: 50 amplitudes evenly spaced from 0 to 0.75
drive_amp_min = 0
drive_amp_max = 0.75
drive_amps = np.linspace(drive_amp_min, drive_amp_max, num_rabi_points)
# Build the Rabi experiments:
# A drive pulse at the qubit frequency, followed by a measurement,
# where we vary the drive amplitude each time.
rabi_schedules = []
for drive_amp in drive_amps:
rabi_pulse = pulse_lib.gaussian(duration=drive_samples, amp=drive_amp,
sigma=drive_sigma, name=f"Rabi drive amplitude = {drive_amp}")
this_schedule = pulse.Schedule(name=f"Rabi drive amplitude = {drive_amp}")
this_schedule += Play(rabi_pulse, drive_chan)
# Reuse the measure instruction from the frequency sweep experiment
this_schedule += measure << this_schedule.duration
rabi_schedules.append(this_schedule)
rabi_schedules[-1].draw(label=True)
# Assemble the schedules into a Qobj
num_shots_per_point = 1024
rabi_experiment_program = assemble(rabi_schedules,
backend=backend,
meas_level=1,
meas_return='avg',
shots=num_shots_per_point,
schedule_los=[{drive_chan: rough_qubit_frequency}]
* num_rabi_points)
print(job.job_id())
job = backend.run(rabi_experiment_program)
job_monitor(job)
rabi_results = job.result(timeout=120)
# center data around 0
def baseline_remove(values):
return np.array(values) - np.mean(values)
rabi_values = []
for i in range(num_rabi_points):
# Get the results for `qubit` from the ith experiment
rabi_values.append(rabi_results.get_memory(i)[qubit]*scale_factor)
rabi_values = np.real(baseline_remove(rabi_values))
plt.xlabel("Drive amp [a.u.]")
plt.ylabel("Measured signal [a.u.]")
plt.scatter(drive_amps, rabi_values, color='black') # plot real part of Rabi values
plt.show()
fit_params, y_fit = fit_function(drive_amps,
rabi_values,
lambda x, A, B, drive_period, phi: (A*np.cos(2*np.pi*x/drive_period - phi) + B),
[4, -4, 0.4, 2*np.pi])
plt.scatter(drive_amps, rabi_values, color='black')
plt.plot(drive_amps, y_fit, color='red')
drive_period = fit_params[2] # get period of rabi oscillation
plt.axvline(drive_period/2, color='red', linestyle='--')
plt.axvline(drive_period, color='red', linestyle='--')
plt.annotate("", xy=(drive_period, 0), xytext=(drive_period/2,0), arrowprops=dict(arrowstyle="<->", color='red'))
plt.annotate("$\pi$", xy=(drive_period/2-0.03, 0.1), color='red')
plt.xlabel("Drive amp [a.u.]", fontsize=15)
plt.ylabel("Measured signal [a.u.]", fontsize=15)
plt.show()
pi_amp_01 = abs(drive_period / 2)
print(f"Pi Amplitude = {pi_amp_01}")
pi_pulse_01 = pulse_lib.gaussian(duration=drive_samples,
amp=pi_amp_01,
sigma=drive_sigma,
name='pi_pulse_01')
# Create the two schedules
# Ground state schedule
zero_schedule = pulse.Schedule(name="zero schedule")
zero_schedule |= measure
# Excited state schedule
one_schedule = pulse.Schedule(name="one schedule")
one_schedule |= pulse.Play(pi_pulse_01, drive_chan)
one_schedule |= measure << one_schedule.duration
zero_schedule.draw(label=True)
one_schedule.draw(label=True)
# Assemble the schedules into a program
IQ_01_program = assemble([zero_schedule, one_schedule],
backend=backend,
meas_level=1,
meas_return='single',
shots=NUM_SHOTS,
schedule_los=[{drive_chan: rough_qubit_frequency}] * 2)
IQ_01_job = backend.run(IQ_01_program)
print(IQ_01_job.job_id())
job_monitor(IQ_01_job)
def get_job_data(job, average):
"""Retrieve data from a job that has already run.
Args:
job (Job): The job whose data you want.
average (bool): If True, gets the data assuming data is an average.
If False, gets the data assuming it is for single shots.
Return:
list: List containing job result data.
"""
job_results = job.result(timeout=120) # timeout parameter set to 120 s
result_data = []
for i in range(len(job_results.results)):
if average: # get avg data
result_data.append(job_results.get_memory(i)[qubit]*scale_factor)
else: # get single data
result_data.append(job_results.get_memory(i)[:, qubit]*scale_factor)
return result_data
def get_closest_multiple_of_16(num):
"""Compute the nearest multiple of 16. Needed because pulse enabled devices require
durations which are multiples of 16 samples.
"""
return (int(num) - (int(num)%16))
# Get job data (single); split for zero and one
IQ_01_data = get_job_data(IQ_01_job, average=False)
zero_data = IQ_01_data[0]
one_data = IQ_01_data[1]
def IQ_01_plot(x_min, x_max, y_min, y_max):
"""Helper function for plotting IQ plane for |0>, |1>. Limits of plot given
as arguments."""
# zero data plotted in blue
plt.scatter(np.real(zero_data), np.imag(zero_data),
s=5, cmap='viridis', c='blue', alpha=0.5, label=r'$|0\rangle$')
# one data plotted in red
plt.scatter(np.real(one_data), np.imag(one_data),
s=5, cmap='viridis', c='red', alpha=0.5, label=r'$|1\rangle$')
# Plot a large dot for the average result of the zero and one states.
mean_zero = np.mean(zero_data) # takes mean of both real and imaginary parts
mean_one = np.mean(one_data)
plt.scatter(np.real(mean_zero), np.imag(mean_zero),
s=200, cmap='viridis', c='black',alpha=1.0)
plt.scatter(np.real(mean_one), np.imag(mean_one),
s=200, cmap='viridis', c='black',alpha=1.0)
plt.xlim(x_min, x_max)
plt.ylim(y_min,y_max)
plt.legend()
plt.ylabel('I [a.u.]', fontsize=15)
plt.xlabel('Q [a.u.]', fontsize=15)
plt.title("0-1 discrimination", fontsize=15)
x_min = -25
x_max = -5
y_min = -25
y_max = 0
IQ_01_plot(x_min, x_max, y_min, y_max)
def reshape_complex_vec(vec):
"""Take in complex vector vec and return 2d array w/ real, imag entries. This is needed for the learning.
Args:
vec (list): complex vector of data
Returns:
list: vector w/ entries given by (real(vec], imag(vec))
"""
length = len(vec)
vec_reshaped = np.zeros((length, 2))
for i in range(len(vec)):
vec_reshaped[i]=[np.real(vec[i]), np.imag(vec[i])]
return vec_reshaped
# Create IQ vector (split real, imag parts)
zero_data_reshaped = reshape_complex_vec(zero_data)
one_data_reshaped = reshape_complex_vec(one_data)
IQ_01_data = np.concatenate((zero_data_reshaped, one_data_reshaped))
print(IQ_01_data.shape) # verify IQ data shape
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import train_test_split
# construct vector w/ 0's and 1's (for testing)
state_01 = np.zeros(NUM_SHOTS) # shots gives number of experiments
state_01 = np.concatenate((state_01, np.ones(NUM_SHOTS)))
print(len(state_01))
# Shuffle and split data into training and test sets
IQ_01_train, IQ_01_test, state_01_train, state_01_test = train_test_split(IQ_01_data, state_01, test_size=0.5)
# Set up the LDA
LDA_01 = LinearDiscriminantAnalysis()
LDA_01.fit(IQ_01_train, state_01_train)
# test on some simple data
print(LDA_01.predict([[0,0], [10, 0]]))
# Compute accuracy
score_01 = LDA_01.score(IQ_01_test, state_01_test)
print(score_01)
# Plot separatrix on top of scatter
def separatrixPlot(lda, x_min, x_max, y_min, y_max, shots):
nx, ny = shots, shots
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='black')
IQ_01_plot(x_min, x_max, y_min, y_max)
separatrixPlot(LDA_01, x_min, x_max, y_min, y_max, NUM_SHOTS)
```
## Discriminating the |0⟩, |1⟩ and |2⟩ states
```
from qiskit.pulse.library import Waveform ## replaced by Wafeform after 0.25.0
def apply_sideband(pulse, freq):
"""Apply a sinusoidal sideband to this pulse at frequency freq.
Args:
pulse (SamplePulse): The pulse of interest.
freq (float): LO frequency for which we want to apply the sweep.
Return:
SamplePulse: Pulse with a sideband applied (oscillates at difference between freq and cal_qubit_freq).
"""
# time goes from 0 to dt*drive_samples, sine arg of form 2*pi*f*t
t_samples = np.linspace(0, dt*drive_samples, drive_samples)
sine_pulse = np.sin(2*np.pi*(freq-rough_qubit_frequency)*t_samples) # no amp for the sine
# create sample pulse w/ sideband applied
# Note: need to make sq_pulse.samples real, multiply elementwise
sideband_pulse = Waveform(np.multiply(np.real(pulse.samples), sine_pulse), name='sideband_pulse')
return sideband_pulse
def create_excited_freq_sweep_program(freqs, drive_power):
"""Builds a program that does a freq sweep by exciting the |1> state.
This allows us to obtain the 1->2 frequency. We get from the |0> to |1>
state via a pi pulse using the calibrated qubit frequency. To do the
frequency sweep from |1> to |2>, we use a sideband method by tacking
a sine factor onto the sweep drive pulse.
Args:
freqs (np.ndarray(dtype=float)): Numpy array of frequencies to sweep.
drive_power (float) : Value of drive amplitude.
Raises:
ValueError: Thrown if use more than 75 frequencies; currently, an error will be thrown on the backend
if you try more than 75 frequencies.
Returns:
Qobj: Program for freq sweep experiment.
"""
if len(freqs) > 75:
raise ValueError("You can only run 75 schedules at a time.")
print(f"The frequency sweep will go from {freqs[0] / GHz} GHz to {freqs[-1]/ GHz} GHz \
using {len(freqs)} frequencies. The drive power is {drive_power}.")
base_12_pulse = pulse_lib.gaussian(duration=drive_samples,
sigma=drive_sigma,
amp=drive_power,
name='base_12_pulse')
schedules = []
for jj, freq in enumerate(freqs):
# add sideband to gaussian pulse
freq_sweep_12_pulse = apply_sideband(base_12_pulse, freq)
# add commands to schedule
schedule = pulse.Schedule(name="Frequency = {}".format(freq))
# Add 0->1 pulse, freq sweep pulse and measure
schedule |= pulse.Play(pi_pulse_01, drive_chan)
schedule |= pulse.Play(freq_sweep_12_pulse, drive_chan) << schedule.duration
schedule |= measure << schedule.duration # shift measurement to after drive pulses
schedules.append(schedule)
num_freqs = len(freqs)
# draw a schedule
display(schedules[-1].draw(channels=[drive_chan, meas_chan], label=True, scale=1.0))
# assemble freq sweep program
# Note: LO is at cal_qubit_freq for each schedule; accounted for by sideband
excited_freq_sweep_program = assemble(schedules,
backend=backend,
meas_level=1,
meas_return='avg',
shots=NUM_SHOTS,
schedule_los=[{drive_chan: rough_qubit_frequency}]
* num_freqs)
return excited_freq_sweep_program
# sweep 400 MHz below 0->1 frequency to catch the 1->2 frequency
num_freqs = 75
excited_sweep_freqs = rough_qubit_frequency + np.linspace(-400*MHz, 30*MHz, num_freqs)
excited_freq_sweep_program = create_excited_freq_sweep_program(excited_sweep_freqs, drive_power=0.3)
# Plot an example schedule to make sure it's valid
excited_freq_sweep_job = backend.run(excited_freq_sweep_program)
print(excited_freq_sweep_job.job_id())
job_monitor(excited_freq_sweep_job)
# Get job data (avg)
excited_freq_sweep_data = get_job_data(excited_freq_sweep_job, average=True)
# Note: we are only plotting the real part of the signal
plt.scatter(excited_sweep_freqs/GHz, excited_freq_sweep_data, color='black')
plt.xlim([min(excited_sweep_freqs/GHz)+0.01, max(excited_sweep_freqs/GHz)]) # ignore min point (is off)
plt.xlabel("Frequency [GHz]", fontsize=15)
plt.ylabel("Measured Signal [a.u.]", fontsize=15)
plt.title("1->2 Frequency Sweep (first pass)", fontsize=15)
plt.show()
from scipy.optimize import curve_fit
from scipy.signal import find_peaks
# Prints out relative maxima frequencies in output_data; height gives lower bound (abs val)
def rel_maxima(freqs, output_data, height):
"""
Prints out relative maxima frequencies in output_data (can see peaks); height gives upper bound (abs val).
Be sure to set the height properly or the peak will be ignored!
Args:
freqs (list): frequency list
output_data (list): list of resulting signals
height (float): upper bound (abs val) on a peak
Returns:
list: List containing relative maxima frequencies
"""
peaks, _ = find_peaks(output_data, height)
print("Freq. dips: ", freqs[peaks])
return freqs[peaks]
maxima = rel_maxima(excited_sweep_freqs, np.real(excited_freq_sweep_data), 18)
approx_12_freq = maxima
# smaller range refined sweep
num_freqs = 75
refined_excited_sweep_freqs = approx_12_freq + np.linspace(-20*MHz, 20*MHz, num_freqs)
refined_excited_freq_sweep_program = create_excited_freq_sweep_program(refined_excited_sweep_freqs, drive_power=0.3)
refined_excited_freq_sweep_job = backend.run(refined_excited_freq_sweep_program)
print(refined_excited_freq_sweep_job.job_id())
job_monitor(refined_excited_freq_sweep_job)
# Get the refined data (average)
refined_excited_freq_sweep_data = get_job_data(refined_excited_freq_sweep_job, average=True)
# do fit in Hz
(refined_excited_sweep_fit_params,
refined_excited_sweep_y_fit) = fit_function(refined_excited_sweep_freqs,
refined_excited_freq_sweep_data,
lambda x, A, q_freq, B, C: (A / np.pi) * (B / ((x - q_freq)**2 + B**2)) + C,
[-12, 4.624*GHz, 0.05*GHz, 3*GHz] # initial parameters for curve_fit
)
# Note: we are only plotting the real part of the signal
plt.scatter(refined_excited_sweep_freqs/GHz, refined_excited_freq_sweep_data, color='black')
plt.plot(refined_excited_sweep_freqs/GHz, refined_excited_sweep_y_fit, color='red')
plt.xlim([min(refined_excited_sweep_freqs/GHz), max(refined_excited_sweep_freqs/GHz)])
plt.xlabel("Frequency [GHz]", fontsize=15)
plt.ylabel("Measured Signal [a.u.]", fontsize=15)
plt.title("1->2 Frequency Sweep (refined pass)", fontsize=15)
plt.show()
_, qubit_12_freq, _, _ = refined_excited_sweep_fit_params
print(f"Our updated estimate for the 1->2 transition frequency is "
f"{round(qubit_12_freq/GHz, 7)} GHz.")
# experimental configuration
num_rabi_points = 75 # number of experiments (ie amplitudes to sweep out)
# Drive amplitude values to iterate over: 75 amplitudes evenly spaced from 0 to 1.0
drive_amp_min = 0
drive_amp_max = 1.0
drive_amps = np.linspace(drive_amp_min, drive_amp_max, num_rabi_points)
# Create schedule
rabi_12_schedules = []
# loop over all drive amplitudes
for ii, drive_amp in enumerate(drive_amps):
base_12_pulse = pulse_lib.gaussian(duration=drive_samples,
sigma=drive_sigma,
amp=drive_amp,
name='base_12_pulse')
# apply sideband at the 1->2 frequency
rabi_12_pulse = apply_sideband(base_12_pulse, qubit_12_freq)
# add commands to schedule
schedule = pulse.Schedule(name='Rabi Experiment at drive amp = %s' % drive_amp)
schedule |= pulse.Play(pi_pulse_01, drive_chan) # 0->1
schedule |= pulse.Play(rabi_12_pulse, drive_chan) << schedule.duration # 1->2 Rabi pulse
schedule |= measure << schedule.duration # shift measurement to after drive pulse
rabi_12_schedules.append(schedule)
# Assemble the schedules into a program
# Note: The LO frequency is at cal_qubit_freq to support the 0->1 pi pulse;
# it is modified for the 1->2 pulse using sidebanding
rabi_12_expt_program = assemble(rabi_12_schedules,
backend=backend,
meas_level=1,
meas_return='avg',
shots=NUM_SHOTS,
schedule_los=[{drive_chan: rough_qubit_frequencyqubit_frequencyqubit_frequencyqubit_frequencyqubit_frequency_qubit_frequency_qubit_frequency}]
* num_rabi_points)
rabi_12_job = backend.run(rabi_12_expt_program)
print(rabi_12_job.job_id())
job_monitor(rabi_12_job)
# Get the job data (average)
rabi_12_data = get_job_data(rabi_12_job, average=True)
print(len(rabi_12_data)*0.6)
# Note: We only plot the real part of the signal.
rabi_12_data = np.real(baseline_remove(rabi_12_data))
(rabi_12_fit_params,
rabi_12_y_fit) = fit_function(drive_amps[:45],
rabi_12_data[:45],
lambda x, A, B, drive_12_period, phi: (A*np.cos(2*np.pi*x/drive_12_period - phi) + B),
[2, -2.5, 0.4,0.5])
plt.scatter(drive_amps, rabi_12_data, color='black')
plt.plot(drive_amps[:45], rabi_12_y_fit, color='red')
drive_12_period = rabi_12_fit_params[2]
# account for phi in computing pi amp
pi_amp_12 = (drive_12_period/2/np.pi) *(np.pi+rabi_12_fit_params[3])
plt.axvline(pi_amp_12, color='red', linestyle='--')
plt.axvline(pi_amp_12+drive_12_period/2, color='red', linestyle='--')
plt.annotate("", xy=(pi_amp_12+drive_12_period/2, 0), xytext=(pi_amp_12,0), arrowprops=dict(arrowstyle="<->", color='red'))
plt.annotate("$\pi$", xy=(pi_amp_12-0.03, 0.1), color='red')
plt.xlabel("Drive amp [a.u.]", fontsize=15)
plt.ylabel("Measured signal [a.u.]", fontsize=15)
plt.title('Rabi Experiment (1->2)', fontsize=20)
plt.show()
print(f"Our updated estimate for the 1->2 transition frequency is "
f"{round(qubit_12_freq/GHz, 7)} GHz.")
print(f"Pi Amplitude (1->2) = {pi_amp_12}")
pi_pulse_12 = pulse_lib.gaussian(duration=drive_samples,
amp=pi_amp_12,
sigma=drive_sigma,
name='pi_pulse_12')
# make sure this pulse is sidebanded
pi_pulse_12 = apply_sideband(pi_pulse_12, qubit_12_freq)
# Create the three schedules
# Ground state schedule
zero_schedule = pulse.Schedule(name="zero schedule")
zero_schedule |= measure
# Excited state schedule
one_schedule = pulse.Schedule(name="one schedule")
one_schedule |= pulse.Play(pi_pulse_01, drive_chan)
one_schedule |= measure << one_schedule.duration
# Excited state schedule
two_schedule = pulse.Schedule(name="two schedule")
two_schedule |= pulse.Play(pi_pulse_01, drive_chan)
two_schedule |= pulse.Play(pi_pulse_12, drive_chan) << two_schedule.duration
two_schedule |= measure << two_schedule.duration
# Assemble the schedules into a program
IQ_012_program = assemble([zero_schedule, one_schedule, two_schedule],
backend=backend,
meas_level=1,
meas_return='single',
shots=NUM_SHOTS,
schedule_los=[{drive_chan: rough_qubit_frequency}] * 3)
# Assemble the schedules into a program
IQ_012_program = assemble([zero_schedule, one_schedule, two_schedule],
backend=backend,
meas_level=1,
meas_return='single',
shots=NUM_SHOTS,
schedule_los=[{drive_chan: rough_qubit_frequency}] * 3)
IQ_012_job = backend.run(IQ_012_program)
print(IQ_012_job.job_id())
job_monitor(IQ_012_job)
# Get job data (single); split for zero, one and two
IQ_012_data = get_job_data(IQ_012_job, average=False)
zero_data = IQ_012_data[0]
one_data = IQ_012_data[1]
two_data = IQ_012_data[2]
def IQ_012_plot(x_min, x_max, y_min, y_max):
"""Helper function for plotting IQ plane for 0, 1, 2. Limits of plot given
as arguments."""
# zero data plotted in blue
plt.scatter(np.real(zero_data), np.imag(zero_data),
s=5, cmap='viridis', c='blue', alpha=0.5, label=r'$|0\rangle$')
# one data plotted in red
plt.scatter(np.real(one_data), np.imag(one_data),
s=5, cmap='viridis', c='red', alpha=0.5, label=r'$|1\rangle$')
# two data plotted in green
plt.scatter(np.real(two_data), np.imag(two_data),
s=5, cmap='viridis', c='green', alpha=0.5, label=r'$|2\rangle$')
# Plot a large dot for the average result of the 0, 1 and 2 states.
mean_zero = np.mean(zero_data) # takes mean of both real and imaginary parts
mean_one = np.mean(one_data)
mean_two = np.mean(two_data)
plt.scatter(np.real(mean_zero), np.imag(mean_zero),
s=200, cmap='viridis', c='black',alpha=1.0)
plt.scatter(np.real(mean_one), np.imag(mean_one),
s=200, cmap='viridis', c='black',alpha=1.0)
plt.scatter(np.real(mean_two), np.imag(mean_two),
s=200, cmap='viridis', c='black',alpha=1.0)
plt.xlim(x_min, x_max)
plt.ylim(y_min,y_max)
plt.legend()
plt.ylabel('I [a.u.]', fontsize=15)
plt.xlabel('Q [a.u.]', fontsize=15)
plt.title("0-1-2 discrimination", fontsize=15)
x_min = -10
x_max = 20
y_min = 10
y_max = 35
IQ_012_plot(x_min, x_max, y_min, y_max)
# Create IQ vector (split real, imag parts)
zero_data_reshaped = reshape_complex_vec(zero_data)
one_data_reshaped = reshape_complex_vec(one_data)
two_data_reshaped = reshape_complex_vec(two_data)
IQ_012_data = np.concatenate((zero_data_reshaped, one_data_reshaped, two_data_reshaped))
print(IQ_012_data.shape) # verify IQ data shape
# construct vector w/ 0's, 1's and 2's (for testing)
state_012 = np.zeros(NUM_SHOTS) # shots gives number of experiments
state_012 = np.concatenate((state_012, np.ones(NUM_SHOTS)))
state_012 = np.concatenate((state_012, 2*np.ones(NUM_SHOTS)))
print(len(state_012))
# Shuffle and split data into training and test sets
IQ_012_train, IQ_012_test, state_012_train, state_012_test = train_test_split(IQ_012_data, state_012, test_size=0.5)
# Set up the LDA
LDA_012 = LinearDiscriminantAnalysis()
LDA_012.fit(IQ_012_train, state_012_train)
# test on some simple data
print(LDA_012.predict([[0, 0], [-10, 0], [-15, -5]]))
# Compute accuracy
score_012 = LDA_012.score(IQ_012_test, state_012_test)
print(score_012)
IQ_012_plot(x_min, x_max, y_min, y_max)
separatrixPlot(LDA_012, x_min, x_max, y_min, y_max, NUM_SHOTS)
from sklearn.neural_network import MLPClassifier
IQ_012_plot(x_min, x_max, y_min, y_max)
NN_012 = MLPClassifier(solver='lbfgs',max_iter=1000)
NN_012.fit(IQ_012_train, state_012_train)
separatrixPlot(NN_012, x_min, x_max, y_min, y_max, NUM_SHOTS)
print(NN_012.score(IQ_012_test, state_012_test))
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
IQ_012_plot(x_min, x_max, y_min, y_max)
QDA_012 = QuadraticDiscriminantAnalysis()
QDA_012.fit(IQ_012_train, state_012_train)
scoreq_012 = QDA_012.score(IQ_012_test, state_012_test)
print(scoreq_012)
from sklearn.neural_network import MLPClassifier
IQ_012_plot(x_min, x_max, y_min, y_max)
NN_012 = MLPClassifier(hidden_layer_sizes = (100,100,),solver='lbfgs',max_iter=1000)
NN_012.fit(IQ_012_train, state_012_train)
separatrixPlot(NN_012, x_min, x_max, y_min, y_max, NUM_SHOTS)
print(NN_012.score(IQ_012_test, state_012_test))
```
### Qudit Gates Class
Currently we only have a few gate operations supported: xcyc (cyclic shift), axcyc (acyclic shift), x01, x12,x02, h01, h12, and h02 gates.
```
from qiskit.pulse import *
from qiskit.pulse import library as pulse_lib
class QuditGates:
def __init__(self, pi_amp_01, pi_amp_12, trans_freq_01, trans_freq_12, chan, dt):
# self.sched = sched
self.dt = dt
self.pi_amp_01 = pi_amp_01
self.pi_amp_12 = pi_amp_12
self.chan = chan
self.trans_freq_01 = trans_freq_01
self.trans_freq_12 = trans_freq_12
drive_sigma = 0.075*1e-6
self.drive_sigma_dt = int(drive_sigma/dt) - (int(drive_sigma/dt%16))
self.drive_samples_dt = 8*self.drive_sigma_dt
self.pi_pul_01 = pulse_lib.gaussian(duration=self.drive_samples_dt,
amp=self.pi_amp_01,
sigma=self.drive_sigma_dt,
)
self.base_pulse = pulse_lib.gaussian(duration=self.drive_samples_dt,
sigma=self.drive_sigma_dt,
amp=self.pi_amp_12)
# pi_pulse_12 = apply_sideband_n(base_pulse, trans_freq_12)
def apply_sideband_n(self, base_pulse, freq):
t_samples = np.linspace(0, dt*self.drive_samples_dt, num=self.drive_samples_dt)
sine_pulse = np.sin(2*np.pi*(freq-self.trans_freq_01)*t_samples)
sideband_pulse = Waveform(np.multiply(np.real(base_pulse.samples), sine_pulse), name='sideband_pulse')
return sideband_pulse
def xcyc(self):
'''This the gate corresponding to the operator
|n> --> |(n+1) mod 3>.'''
pi_pul_12 = self.apply_sideband_n(self.base_pulse, trans_freq_12)
with build() as xcyc_pulse:
play(pi_pul_12, self.chan)
play(self.pi_pul_01, self.chan)
sched = Schedule()
sched += xcyc_pulse
return sched
def xacyc(self):
'''This the gate corresponding to the operator
|n> --> |(n-1) mod 3>.'''
pi_pul_12 = self.apply_sideband_n(self.base_pulse, trans_freq_12)
with build as xacyc_pulse:
play(self.pi_pul_01, self.chan)
play(pi_pul_12, self.chan)
sched = Schedule()
sched += xacyc_pulse
return sched
def x01(self, chan):
'''This the gate corresponding to the operator
|0> --> |1> and |1> --> |0>.'''
with pulse.build as x01_pulse:
pulse.play(self.pi_pul_01, chan)
sched = Schedule()
return sched += x01_pulse
def x12(self, chan):
'''This the gate corresponding to the operator
|1> --> |2> and |2> --> |1>.'''
with pulse.build as x12_pulse:
pulse.play(self.pi_pul_12, chan)
sched = Schedule()
return sched += x12_pulse
def x02(self, chan):
'''This the gate corresponding to the operator
|0> --> |2> and |2> --> |0>.'''
with pulse.build as x02_pulse:
pulse.play(self.pi_pul_01, chan)
pulse.play(self.pi_pul_12, chan)
pulse.play(self.pi_pul_01, chan)
sched = Schedule()
return sched += x02_pulse
def h01(self):
qc = QuantumCircuit(1)
qc.h(0)
with build(self.backend) as h01_pulse:
call(qc)
sched = Schedule()
sched += h01_pulse
return sched
def h12(self):
qc = QuantumCircuit(1)
qc.h(0)
with build(self.backend) as h12_pulse:
call(qc)
sched = Schedule()
sched += h12_pulse
return sched
def h02(self):
qc = QuantumCircuit(1)
qc.h(0)
with build(self.backend) as h02_pulse:
call(qc)
sched = Schedule()
sched += Play(self.pi_pul_01,self.chan)
sched += h02_pulse
sched += Play(self.pi_pul_01,self.chan)
return sched
from qiskit.pulse import Schedule
trans_freq_01=4971800930
trans_freq_12=4623781600
pi_amp_01=0.14096173612665405
pi_amp_12=0.2493075855271005
gate=QuditGates(pi_amp_01, pi_amp_12, trans_freq_01, trans_freq_12, drive_chan, dt)
cyclic_shift_sched = Schedule()
cyclic_shift_sched += gate.xcyc()
inst_sched_map = backend_defaults.instruction_schedule_map
measure = inst_sched_map.get('measure', qubits=backend_config.meas_map[meas_map_idx])
cyclic_shift_sched |= measure << cyclic_shift_sched.duration
cyclic_shift_program = assemble(cyclic_shift_sched,
backend=backend,
meas_level=1,
meas_return='single',
shots=NUM_SHOTS,
schedule_los=[{drive_chan: rough_qubit_frequency}])
cyclic_shift_sched.draw(label=True)
cyclic_shift_job = backend.run(cyclic_shift_program)
job_monitor(cyclic_shift_job)
cyclic_shift_data = get_job_data(cyclic_shift_job, average=False)
cyclic_shift_data
cyclic_shift_reshaped=reshape_complex_vec(cyclic_shift_data[0])
def IQ_012_measure_plot(x_min, x_max, y_min, y_max):
"""Helper function for plotting IQ plane for 0, 1, 2. Limits of plot given
as arguments."""
# measured data plotted in blue
plt.scatter(np.real(cyclic_shift_data), np.imag(cyclic_shift_data),
s=5, cmap='viridis', c='blue', alpha=0.5, label=r'$|0\rangle$')
mean_data = np.mean(cyclic_shift_data) # takes mean of both real and imaginary parts
plt.scatter(np.real(mean_data), np.imag(mean_data),
s=200, cmap='viridis', c='black',alpha=1.0)
plt.xlim(x_min, x_max)
plt.ylim(y_min,y_max)
plt.legend()
plt.ylabel('I [a.u.]', fontsize=15)
plt.xlabel('Q [a.u.]', fontsize=15)
plt.title("0-1-2 discrimination of measured data", fontsize=15)
x_min = -30
x_max = 30
y_min = -60
y_max = 30
IQ_012_measure_plot(x_min, x_max, y_min, y_max)
output=NN_012.predict(cyclic_shift_reshaped)
separatrixPlot(NN_012, x_min, x_max, y_min, y_max, NUM_SHOTS)
print(output)
unique, counts = np.unique(output, return_counts=True)
dict(zip(unique, counts))
```
The discriminator seems to be away from the measure data of the cyclic shift. This problem needs to be solved in the future.
### Compare to LDA discriminator
```
x_min = -30
x_max = 30
y_min = -60
y_max = 30
IQ_012_measure_plot(x_min, x_max, y_min, y_max)
output_LDA=LDA_012.predict(cyclic_shift_reshaped)
separatrixPlot(LDA_012, x_min, x_max, y_min, y_max, NUM_SHOTS)
unique, counts = np.unique(output_LDA, return_counts=True)
dict(zip(unique, counts))
```
It has the same issue of classification of the measured data.
| github_jupyter |
```
from __future__ import absolute_import, division, print_function
%matplotlib inline
# %matplotlib nbagg
import tensorflow as tf
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
from data_generator_tensorflow import get_batch, print_valid_characters
import os
import sys
sys.path.append(os.path.join('.', '..'))
import utils
import tf_utils
```
# Attention Decoder
> <span style="color:gray">
Original [Theano/Lasagne tutorial](https://github.com/DeepLearningDTU/nvidia_deep_learning_summercamp_2016/) by
Lars Maaløe ([larsmaaloee](https://github.com/larsmaaloee)),
Søren Kaae Sønderby ([skaae](https://github.com/skaae)), and
Casper Sønderby ([casperkaae](https://github.com/casperkaae)).
Converted to TensorFlow by
Alexander R. Johansen ([alrojo](https://github.com/alrojo)),
and updated by
Toke Faurby ([faur](https://github.com/Faur)).
> </span>
Soft attention for recurrent neural networks have recently attracted a lot of interest.
These methods let the Decoder model selective focus on which part of the encoder sequence it will use for each decoded output symbol.
This relieves the encoder from having to compress the input sequence into a fixed size vector representation passed on to the decoder.
Secondly we can interrogate the decoder network about where it attends while producing the ouputs.
below we'll implement an decoder with selective attention and show that it significantly improves the performance of the toy translation task.
The seminal attention paper is https://arxiv.org/pdf/1409.0473v7.pdf
The principle of attention models is:
1. Use the encoder to get the hidden represention $\{h^1_e, ...h^n_e\}$ for each position in the input sequence.
2. For timestep $t$ in the decoder do for $m = 1...n$ : $a_{mt} = f(h^e_m, h^d_t)$. Where f is a function returning a scalar value.
4. Weight each $h^e_m$ by its probability $p_{mt}$ and sum to get $h_{in}$.
5. Use $h_{in}$ as an additional input to the decoder. $h_{in}$ is recalculated each time the decoder is updated.
```
# At the bottom of the script there is some code which saves the model.
# If you wish to restore your model from a previous state use this function.
load_model = False
# resetting the graph
tf.reset_default_graph()
# Setting up hyperparameters and general configs
MAX_DIGITS = 10
MIN_DIGITS = 5
NUM_INPUTS = 27
NUM_OUTPUTS = 11 #(0-9 + '#')
BATCH_SIZE = 16
# try various learning rates 1e-2 to 1e-5
LEARNING_RATE = 0.005
X_EMBEDDINGS = 8
t_EMBEDDINGS = 8
NUM_UNITS_ENC = 16
NUM_UNITS_DEC = 16
NUM_UNITS_ATTN = 16
# Setting up placeholders, these are the tensors that we "feed" to our network
Xs = tf.placeholder(tf.int32, shape=[None, None], name='X_input')
ts_in = tf.placeholder(tf.int32, shape=[None, None], name='t_input_in')
ts_out = tf.placeholder(tf.int32, shape=[None, None], name='t_input_out')
X_len = tf.placeholder(tf.int32, shape=[None], name='X_len')
t_len = tf.placeholder(tf.int32, shape=[None], name='X_len')
t_mask = tf.placeholder(tf.float32, shape=[None, None], name='t_mask')
# Building the model
# first we build the embeddings to make our characters into dense, trainable vectors
X_embeddings = tf.get_variable('X_embeddings', [NUM_INPUTS, X_EMBEDDINGS],
initializer=tf.random_normal_initializer(stddev=0.1))
t_embeddings = tf.get_variable('t_embeddings', [NUM_OUTPUTS, t_EMBEDDINGS],
initializer=tf.random_normal_initializer(stddev=0.1))
# setting up weights for computing the final output
W_out = tf.get_variable('W_out', [NUM_UNITS_DEC, NUM_OUTPUTS])
b_out = tf.get_variable('b_out', [NUM_OUTPUTS])
X_embedded = tf.gather(X_embeddings, Xs, name='embed_X')
t_embedded = tf.gather(t_embeddings, ts_in, name='embed_t')
# forward encoding
enc_cell = tf.nn.rnn_cell.GRUCell(NUM_UNITS_ENC)#python.ops.rnn_cell.GRUCell
enc_out, enc_state = tf.nn.dynamic_rnn(cell=enc_cell, inputs=X_embedded,
sequence_length=X_len, dtype=tf.float32)
# use below in case TF's does not work as intended
#enc_state, _ = tf_utils.encoder(X_embedded, X_len, 'encoder', NUM_UNITS_ENC)
#
#enc_state = tf.concat(1, [enc_state, enc_state])
# decoding
# note that we are using a wrapper for decoding here, this wrapper is hardcoded to only use GRU
# check out tf_utils to see how you make your own decoder
dec_out, dec_out_valid, alpha_valid = \
tf_utils.attention_decoder(enc_out, X_len, enc_state, t_embedded, t_len,
NUM_UNITS_DEC, NUM_UNITS_ATTN, t_embeddings,
W_out, b_out)
# reshaping to have [batch_size*seqlen, num_units]
out_tensor = tf.reshape(dec_out, [-1, NUM_UNITS_DEC])
out_tensor_valid = tf.reshape(dec_out_valid, [-1, NUM_UNITS_DEC])
# computing output
out_tensor = tf.matmul(out_tensor, W_out) + b_out
out_tensor_valid = tf.matmul(out_tensor_valid, W_out) + b_out
# reshaping back to sequence
b_size = tf.shape(X_len)[0] # use a variable we know has batch_size in [0]
seq_len = tf.shape(t_embedded)[1] # variable we know has sequence length in [1]
num_out = tf.constant(NUM_OUTPUTS) # casting NUM_OUTPUTS to a tensor variable
out_shape = tf.concat([tf.expand_dims(b_size, 0),
tf.expand_dims(seq_len, 0),
tf.expand_dims(num_out, 0)],
axis=0)
out_tensor = tf.reshape(out_tensor, out_shape)
out_tensor_valid = tf.reshape(out_tensor_valid, out_shape)
## handling shape loss
y = out_tensor
y_valid = out_tensor_valid
```
### Defining the cost function, gradient clipping and accuracy
```
def loss_and_acc(preds):
# sequence_loss_tensor is a modification of TensorFlow's own sequence_to_sequence_loss
# TensorFlow's seq2seq loss works with a 2D list instead of a 3D tensors
loss = tf_utils.sequence_loss_tensor(preds, ts_out, t_mask, NUM_OUTPUTS) # notice that we use ts_out here!
# if you want regularization
reg_scale = 0.00001
regularize = tf.contrib.layers.l2_regularizer(reg_scale)
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
reg_term = sum([regularize(param) for param in params])
loss += reg_term
# calculate accuracy
argmax = tf.to_int32(tf.argmax(preds, 2))
correct = tf.to_float(tf.equal(argmax, ts_out)) * t_mask
accuracy = tf.reduce_sum(correct) / tf.reduce_sum(t_mask)
return loss, accuracy, argmax
loss, accuracy, predictions = loss_and_acc(y)
loss_valid, accuracy_valid, predictions_valid = loss_and_acc(y_valid)
# use lobal step to keep track of our iterations
global_step = tf.Variable(0, name='global_step', trainable=False)
# pick optimizer, try momentum or adadelta
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
# extract gradients for each variable
grads_and_vars = optimizer.compute_gradients(loss)
## add below for clipping by norm
#gradients, variables = zip(*grads_and_vars) # unzip list of tuples
#clipped_gradients, global_norm = (
# tf.clip_by_global_norm(gradients, self.clip_norm) )
#grads_and_vars = zip(clipped_gradients, variables)
## apply gradients and make trainable function
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
```
### Testing the forward pass
```
# print all the variable names and shapes
# notice that W_z is now packed, such that it contains both W_z_h and W_x_h, this is for optimization
# further, we now have W_s, b_s. This is so NUM_UNITS_ENC and NUM_UNITS_DEC does not have to share shape ..!
for var in tf.global_variables ():
s = var.name + " "*(40-len(var.name))
print (s, var.value().get_shape())
#Generate some validation data
X_val, X_len_val, t_in_val, t_out_val, t_len_val, t_mask_val, \
text_inputs_val, text_targets_in_val, text_targets_out_val = \
get_batch(batch_size=5000, max_digits=MAX_DIGITS,min_digits=MIN_DIGITS)
print("X_val", X_val.shape)
print("t_out_val", t_out_val.shape)
## Start the session
# restricting memory usage, TensorFlow is greedy and will use all memory otherwise
gpu_opts = tf.GPUOptions(per_process_gpu_memory_fraction=0.35)
# initialize the Session
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_opts))
# test train part
sess.run(tf.global_variables_initializer())
%%time
## If you get an error, remove this line! It makes the error message hard to understand.
# NOTICE - THIS MIGHT TAKE UPTO 30 MINUTES ON CPU..!
# setting up running parameters
val_interval = 5000
samples_to_process = 2e5
samples_processed = 0
samples_val = []
costs, accs = [], []
plt.figure()
try:
while samples_processed < samples_to_process:
# load data
X_tr, X_len_tr, t_in_tr, t_out_tr, t_len_tr, t_mask_tr, \
text_inputs_tr, text_targets_in_tr, text_targets_out_tr = \
get_batch(batch_size=BATCH_SIZE,max_digits=MAX_DIGITS,min_digits=MIN_DIGITS)
# make fetches
fetches_tr = [train_op, loss, accuracy]
# set up feed dict
feed_dict_tr = {Xs: X_tr, X_len: X_len_tr, ts_in: t_in_tr,
ts_out: t_out_tr, t_len: t_len_tr, t_mask: t_mask_tr}
# run the model
res = tuple(sess.run(fetches=fetches_tr, feed_dict=feed_dict_tr))
_, batch_cost, batch_acc = res
costs += [batch_cost]
samples_processed += BATCH_SIZE
#if samples_processed % 1000 == 0: print batch_cost, batch_acc
#validation data
if samples_processed % val_interval == 0:
#print "validating"
fetches_val = [accuracy_valid, y_valid, alpha_valid]
feed_dict_val = {Xs: X_val, X_len: X_len_val, ts_in: t_in_val,
ts_out: t_out_val, t_len: t_len_val, t_mask: t_mask_val}
res = tuple(sess.run(fetches=fetches_val, feed_dict=feed_dict_val))
acc_val, output_val, alp_val = res
samples_val += [samples_processed]
accs += [acc_val]
plt.plot(samples_val, accs, 'b-')
plt.ylabel('Validation Accuracy', fontsize=15)
plt.xlabel('Processed samples', fontsize=15)
plt.title('', fontsize=20)
plt.grid('on')
plt.savefig("out_attention.png")
display.display(display.Image(filename="out_attention.png"))
display.clear_output(wait=True)
# NOTICE - THIS MIGHT TAKE UPTO 30 MINUTES ON CPU..!
except KeyboardInterrupt:
pass
#plot of validation accuracy for each target position
plt.figure(figsize=(7,7))
plt.plot(np.mean(np.argmax(output_val,axis=2)==t_out_val,axis=0))
plt.ylabel('Accuracy', fontsize=15)
plt.xlabel('Target position', fontsize=15)
#plt.title('', fontsize=20)
plt.grid('on')
plt.show()
#why do the plot look like this?
### attention plot, try with different i = 1, 2, ..., 1000
i = 42
column_labels = map(str, list(t_out_val[i]))
row_labels = map(str, (list(X_val[i])))
data = alp_val[i]
fig, ax = plt.subplots()
heatmap = ax.pcolor(data, cmap=plt.cm.Blues)
# put the major ticks at the middle of each cell
ax.set_xticks(np.arange(data.shape[1])+0.5, minor=False)
ax.set_yticks(np.arange(data.shape[0])+0.5, minor=False)
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticklabels(row_labels, minor=False)
ax.set_yticklabels(column_labels, minor=False)
plt.ylabel('output', fontsize=15)
plt.xlabel('Attention plot', fontsize=15)
plt.show()
# Plot of average attention weight as a function of the sequence position for each of
# the 21 targets in the output sequence i.e. each line is the mean postion of the
# attention for each target position.
np.mean(alp_val, axis=0).shape
plt.figure()
plt.plot(np.mean(alp_val, axis=0).T)
plt.ylabel('alpha', fontsize=15)
plt.xlabel('Input Sequence position', fontsize=15)
plt.title('Alpha weights', fontsize=20)
plt.legend(map(str,range(1,22)), bbox_to_anchor=(1.125,1.0), fontsize=10)
plt.show()
## Save model
# Read more about saving and loading models at https://www.tensorflow.org/programmers_guide/saved_model
# Save model
save_path = tf.train.Saver().save(sess, "/tmp/model.ckpt")
print("Model saved in file: %s" % save_path)
sess.close()
```
## Assignments for the attention decoder
1. Explain what the attention plot shows.
2. Explain what the alpha weights plot shows.
3. Why are the alpha curve for the first digit narrow and peaked while later digits have alpha curves that are wider and less peaked?
4. Why is attention a good idea for this problem? Can you think of other problems where attention is a good choice?
1. Compare the performance and training time (number of samples processed) for the models with and without attention.
5. Try setting MIN_DIGITS and MAX_DIGITS to 20
6. Enable gradient clipping (under the loss codeblock)
| github_jupyter |
# eICU Experiments
```
import tensorflow as tf
import numpy as np
import h5py
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import tensorflow_probability as tfp
import sklearn
from sklearn import metrics
import seaborn as sns
import random
```
Follow Read-me instruction to download the medical data.
After having downloaded the data in '../data/eICU_data.h5', upload the data:
```
def get_data(test=True):
hf = h5py.File('../data/eICU_data.h5', 'r')
data_total = np.array(hf.get('x'))
endpoints_total = np.array(hf.get('y'))
hf.close()
data_train, data_val, y_train, endpoints_total_val = train_test_split(data_total[:int(len(data_total) * 0.85)],
endpoints_total[:int(len(data_total) * 0.85)],
test_size=0.20,
random_state=42)
if test:
data_val = data_total[int(len(data_total) * 0.85):]
endpoints_total_val = endpoints_total[int(len(data_total) * 0.85):]
return data_train, data_val, y_train, endpoints_total_val
def batch_generator(data_train, data_val, endpoints_total_val, batch_size, mode="train"):
while True:
if mode == "train":
for i in range(len(data_train) // batch_size):
time_series = data_train[i * batch_size: (i + 1) * batch_size]
yield time_series, i
elif mode == "val":
for i in range(len(data_val) // batch_size):
time_series = data_val[i * batch_size: (i + 1) * batch_size]
time_series_endpoint = endpoints_total_val[i * batch_size: (i + 1) * batch_size]
yield time_series, time_series_endpoint, i
else:
raise ValueError("The mode has to be in {train, val}")
```
Insert the name of the job in ex_name:
```
ex_name="hyperopt_LSTM_20_16-16_2020-02-17_35a17"
```
Get the data:
```
batch_size=128
modelpath = "../models/{}/{}".format(ex_name, ex_name)
data_train, data_val, endpoints_total_train, endpoints_total_val = get_data(test=True)
```
## Create heat-maps, trajectories and probability distributions
```
som_dim = [16,16]
latent_dim=10
val_gen = batch_generator(data_train, data_val, endpoints_total_val, 300, mode="val")
num_batches = len(data_val) // 300
tf.reset_default_graph()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.import_meta_graph(modelpath+".meta")
saver.restore(sess, modelpath)
graph = tf.get_default_graph()
k = graph.get_tensor_by_name("k/k:0")
z_e = graph.get_tensor_by_name("z_e_sample/z_e:0")
x = graph.get_tensor_by_name("inputs/x:0")
is_training = graph.get_tensor_by_name("is_training/is_training:0")
graph = tf.get_default_graph()
z_e_p = graph.get_tensor_by_name("prediction/next_state/input_lstm:0")
q = graph.get_tensor_by_name("q/distribution/q:0")
embeddings = graph.get_tensor_by_name("embeddings/embeddings:0")
reconstruction = graph.get_tensor_by_name("reconstruction_e/x_hat:0")
print("Evaluation...")
test_k_all = []
labels_val_all = []
z_e_all=[]
z_q_all = []
qq = []
for i in range(num_batches):
batch_data, batch_labels, ii = next(val_gen)
f_dic = {x: batch_data}
test_k_all.extend(sess.run(k, feed_dict=f_dic))
labels_val_all.extend(batch_labels)
z_q_all.extend(sess.run(q, feed_dict=f_dic))
qq.extend(sess.run(q, feed_dict=f_dic))
z_e_all.extend(sess.run(z_e, feed_dict=f_dic))
labels_val_all = np.array(labels_val_all)
k_all = np.array(test_k_all)
qq = np.array(qq)
labels_val_all = np.reshape(labels_val_all, (-1, labels_val_all.shape[-1]))
NMI_24 = metrics.normalized_mutual_info_score(labels_val_all[:, 3], k_all)
NMI_12 = metrics.normalized_mutual_info_score(labels_val_all[:, 2], k_all)
NMI_6 = metrics.normalized_mutual_info_score(labels_val_all[:, 1], k_all)
NMI_1 = metrics.normalized_mutual_info_score(labels_val_all[:, 0], k_all)
embb = sess.run(embeddings, feed_dict={x: data_val})
```
Get the labels:
```
labels_12 = labels_val_all[:,2]
labels_1 = labels_val_all[:,0]
labels_6 = labels_val_all[:,1]
labels_24 = labels_val_all[:,3]
hosp_disc_1 = labels_val_all[:,4]
hosp_disc_6 = labels_val_all[:,5]
hosp_disc_12 = labels_val_all[:,6]
hosp_disc_24 = labels_val_all[:,7]
u_disc_1 = labels_val_all[:,8]
u_disc_6 = labels_val_all[:,9]
u_disc_12 = labels_val_all[:,10]
u_disc_24 = labels_val_all[:, 11]
labels_1 = labels_1.astype(int)
labels_6 = labels_6.astype(int)
labels_12 = labels_12.astype(int)
labels_24 = labels_24.astype(int)
hosp_disc_12 = hosp_disc_12.astype(int)
hosp_disc_24 = hosp_disc_24.astype(int)
hosp_disc_1 = hosp_disc_1.astype(int)
hosp_disc_6 = hosp_disc_6.astype(int)
```
### Moran Index
```
sd = som_dim[0]*som_dim[1]
mean = np.sum(labels_val_all[:, 0]) / len(labels_val_all[:, 0])
ones = np.ones((len(np.reshape(k_all, (-1)))))
clust_matr1 = np.zeros(som_dim[0]*som_dim[1])
labels= labels_val_all[:, 0]
for i in range(som_dim[0]*som_dim[1]):
dd = np.sum(ones[np.where(np.reshape(k_all, (-1))==i)])
if dd == 0:
s1 = 0
else:
s1 = np.sum(labels[np.where(np.reshape(k_all, (-1))==i)]) / np.sum(ones[np.where(np.reshape(k_all, (-1))==i)])
clust_matr1[i] = s1
k = np.arange(0,sd)
k1 = k // som_dim[0]
k2 = k % som_dim[0]
W = np.zeros((sd,sd))
for i in range(sd):
for j in range(sd):
d1 = np.abs((k1[i] - k1[j]))
d2 = np.abs((k2[i] - k2[j]))
d1 = min(som_dim[0]-d1, d1)
d2 = min(som_dim[0]-d2, d2)
W[i,j] = np.exp(-(d1+d2))
if i==j:
W[i,j]=0
M = 0
N_n = 0
for i in range(sd):
for j in range(sd):
M += (clust_matr1[i] -mean)*(clust_matr1[j] -mean)* W[i,j]
for i in range(sd):
N_n += (clust_matr1[i]-mean)**2
W_n = np.sum(W)
I = M * sd / (N_n*W_n)
print(I)
```
### APACHE score heatmap:
```
labels = labels_1
ones = np.ones((len(np.reshape(k_all, (-1)))))
clust_matr1 = np.zeros(som_dim[0]*som_dim[1])
clust_matr2 = np.zeros(som_dim[0]*som_dim[1])
for i in range(som_dim[0]*som_dim[1]):
s1 = np.sum(labels[np.where(np.reshape(k_all, (-1))==i)]) / np.sum(ones[np.where(np.reshape(k_all, (-1))==i)])
clust_matr1[i] = s1
clust_matr1 = np.reshape(clust_matr1, (som_dim[0],som_dim[1]))
ax = sns.heatmap(clust_matr1, cmap="YlGnBu")
plt.show()
```
### Trajectories
```
T = []
S = []
for i in range(1000):
h = np.reshape(u_disc_1, (-1,72))
if np.max(h[i]) == 1:
T.append(i)
else:
S.append(i)
ind_r = np.random.random_integers(0, 50, 10)
ind_s = np.random.random_integers(0, 50, 10)
T = np.array(T)
S = np.array(S)
a = np.concatenate([S[ind_s], T[ind_r]])
k_all.shape
labels = labels_1
it = 0
k_all = np.reshape(k_all, (-1,72))
fig, ax = plt.subplots(5, 4, figsize=(50,43))
ones = np.ones((len(np.reshape(k_all, (-1)))))
clust_matr1 = np.zeros(som_dim[0]*som_dim[1])
clust_matr2 = np.zeros(som_dim[0]*som_dim[1])
for i in range(som_dim[0]*som_dim[1]):
s1 = np.sum(labels[np.where(np.reshape(k_all, (-1)) == i)]) / np.sum(ones[np.where(np.reshape(k_all, (-1))==i)])
clust_matr1[i] = s1
clust_matr1 = np.reshape(clust_matr1, (som_dim[0],som_dim[1]))
for t in a:
#fig, ax = plt.subplots(figsize=(10,7.5))
if it > 9:
c = "r"
#print(t)
else:
c = "g"
cc = it % 4
rr = it // 4
g = sns.heatmap(clust_matr1, cmap="YlGnBu",ax=ax[rr][cc])
k_1 = k_all[t] // som_dim[1]
k_2 = k_all[t] % som_dim[1]
ax[rr][cc].plot(k_2[:] + 0.5, k_1[:] + 0.5, color=c, linewidth=4)
ax[rr][cc].scatter(k_2[0] + 0.5, k_1[0] + 0.5, color=c, s=200, label='Start')
ax[rr][cc].scatter(k_2[1:-1] + 0.5, k_1[1:-1] + 0.5, color=c, linewidth=5, marker='.')
ax[rr][cc].scatter(k_2[-1] + 0.5, k_1[-1] + 0.5, color=c, s=500, linewidth=4, marker='x', label='End')
ax[rr][cc].legend(loc=2, prop={'size': 20})
it +=1
plt.show()
```
Probability distribution over trajectory:
```
qq.shape
prob_q = np.reshape(qq, (-1, 72, som_dim[0]*som_dim[1]))
i = np.random.randint(0, 50) #Randomly sampled patient
it = 0
fig, ax = plt.subplots(2, 3, figsize=(50,25))
k_all = np.reshape(k_all, (-1,72))
for t in [0, 17, 40, 57, 64, 71]:
cc = it % 3
rr = it // 3
k_1 = k_all[i] // som_dim[1]
k_2 = k_all[i] % som_dim[1]
c = "black"
g1 = sns.heatmap(np.reshape(prob_q[i, t], (som_dim[0],som_dim[1])), cmap='Reds', alpha=1, ax=ax[rr][cc])
ax[rr][cc].plot(k_2[:] + 0.5, k_1[:] + 0.5, color=c, linewidth=6)
ax[rr][cc].scatter(k_2[0] + 0.5, k_1[0] + 0.5, color=c, s=800, label='Start')
ax[rr][cc].scatter(k_2[1:-1] + 0.5, k_1[1:-1] + 0.5, color=c, linewidth=10, marker='.')
ax[rr][cc].scatter(k_2[-1] + 0.5, k_1[-1] + 0.5, color=c, s=1200, linewidth=10, marker='x', label='End')
ax[rr][cc].legend(loc=2, prop={'size': 30})
ax[rr][cc].set_title("Time-step = {}".format(it*14), fontsize=40)
it +=1
plt.show()
```
## Unrolling future time-steps and prediction
```
def z_dist_flat(z_e, embeddings):
"""Computes the distances between the encodings and the embeddings."""
emb = np.reshape(embeddings, (som_dim[0]*som_dim[1], -1))
z = np.reshape(z_e, (z_e.shape[0], 1, latent_dim))
z = np.tile(z, [1,som_dim[0]*som_dim[1], 1])
z_dist = np.square(z-emb)
z_dist_red = np.sum(z_dist, axis=-1)
return z_dist_red
val_gen = batch_generator(data_train, data_val, endpoints_total_val, 300, mode="val")
tf.reset_default_graph()
num_batches = len(data_val) // 300
latent_dim = 20
num_pred = 6
som = 16*16
max_n_step = 72
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.import_meta_graph(modelpath+".meta")
saver.restore(sess, modelpath)
graph = tf.get_default_graph()
k = graph.get_tensor_by_name("k/k:0")
z_e = graph.get_tensor_by_name("z_e_sample/z_e:0")
next_z_e = graph.get_tensor_by_name("prediction/next_z_e:0")
x = graph.get_tensor_by_name("inputs/x:0")
is_training = graph.get_tensor_by_name("is_training/is_training:0")
graph = tf.get_default_graph()
init_1 = graph.get_tensor_by_name("prediction/next_state/init_state:0")
z_e_p = graph.get_tensor_by_name("prediction/next_state/input_lstm:0")
state1 = graph.get_tensor_by_name("prediction/next_state/next_state:0")
q = graph.get_tensor_by_name("q/distribution/q:0")
embeddings = graph.get_tensor_by_name("embeddings/embeddings:0")
z_p = graph.get_tensor_by_name('reconstruction_e/decoder/z_e:0')
reconstruction = graph.get_tensor_by_name("reconstruction_e/x_hat:0")
print("Evaluation...")
training_dic = {is_training: True, z_e_p: np.zeros((max_n_step * len(data_val), latent_dim)),
init_1: np.zeros((2, batch_size, 100)), z_p: np.zeros((max_n_step * len(data_val), latent_dim))}
k_all = []
z_e_all=[]
z_q_all = []
qq = []
x_rec = []
for i in range(num_batches):
batch_data, batch_labels, ii = next(val_gen)
f_dic = {x: batch_data}
k_all.extend(sess.run(k, feed_dict=f_dic))
z_q_all.extend(sess.run(q, feed_dict=f_dic))
z_e_all.extend(sess.run(z_e, feed_dict=f_dic))
qq.extend(sess.run(q, feed_dict=f_dic))
f_dic.update(training_dic)
x_rec.extend(sess.run(reconstruction, feed_dict=f_dic))
z_e_all = np.array(z_e_all)
k_all = np.array(k_all)
qq = np.array(qq)
x_rec = np.array(x_rec)
z_e_all = z_e_all.reshape((-1, max_n_step, latent_dim))
k_all = k_all.reshape((-1, max_n_step))
t = 72-num_pred
embeddings = sess.run(embeddings, feed_dict={x: data_val[:, :t, :]})
embeddings = np.reshape(embeddings,(-1, latent_dim))
z_e_o = z_e_all[:, :t, :]
k_o = k_all[:, :t]
k_eval=[]
next_z_e_o = []
state1_o =[]
for i in range(num_batches):
batch_data, batch_labels, ii = next(val_gen)
batch_data=batch_data[:, :t, :]
f_dic = {x: batch_data}
f_dic.update(training_dic)
next_z_e_o.extend(sess.run(next_z_e, feed_dict=f_dic))
if i == 0:
state1_o = sess.run(state1, feed_dict=f_dic)
else:
state1_o = np.concatenate([state1_o, sess.run(state1, feed_dict=f_dic)], axis=1)
next_z_e_o = np.array(next_z_e_o)
state1_o = np.array(state1_o)
next_z_e_o_all = np.reshape(next_z_e_o[:, -1, :], (-1,1,latent_dim))
next_z_e_o = next_z_e_o[:, -1, :]
k_next = np.argmin(z_dist_flat(next_z_e_o, embeddings), axis=-1)
k_o = np.concatenate([k_o, np.expand_dims(k_next,1)], axis=1)
z_e_o = np.concatenate([z_e_o, np.expand_dims(next_z_e_o, 1)], axis=1)
f_dic = {x: np.zeros((len(data_val),1, 98)), is_training: False, z_e_p: np.zeros((1 * len(data_val), latent_dim)),
z_p: next_z_e_o, init_1: np.zeros((2, batch_size, 100))}
x_pred_hat = np.reshape(sess.run(reconstruction, feed_dict=f_dic), (-1, 1, 98))
for i in range(num_pred-1):
print(i)
inp = data_val[:1500, (t + i), :]
f_dic = {x: np.reshape(inp, (inp.shape[0],1,inp.shape[1]))}
val_dic = {is_training: False, z_e_p: next_z_e_o, init_1: state1_o, z_p: np.zeros((max_n_step * len(inp), latent_dim))}
f_dic.update(val_dic)
next_z_e_o = sess.run(next_z_e, feed_dict=f_dic)
state1_o = sess.run(state1, feed_dict=f_dic)
next_z_e_o_all = np.concatenate([next_z_e_o_all, next_z_e_o], axis=1)
k_next = np.argmin(z_dist_flat(next_z_e_o, embeddings), axis=-1)
k_o = np.concatenate([k_o, np.expand_dims(k_next,1)], axis=1)
z_e_o = np.concatenate([z_e_o, next_z_e_o], axis=1)
next_z_e_o = np.reshape(next_z_e_o, (-1, latent_dim))
f_dic = {x: np.zeros((len(data_val),1, 98)), is_training: False, z_e_p: np.zeros((max_n_step * len(data_val), latent_dim)),
z_p: next_z_e_o, init_1: np.zeros((2, batch_size, 100))}
final_x = sess.run(reconstruction, feed_dict=f_dic)
x_pred_hat = np.concatenate([x_pred_hat, np.reshape(final_x, (-1, 1, 98))], axis = 1)
f_dic = {x: np.zeros((1500,1, 98)), is_training: False, z_e_p: np.zeros((max_n_step * 1500, latent_dim)),
z_p: z_e_all[:, t-1, :], init_1: np.zeros((2, batch_size, 100))}
final_x = sess.run(reconstruction, feed_dict=f_dic)
sklearn.metrics.mean_squared_error(np.reshape(x_pred_hat, (-1, 98)), np.reshape(data_val[:1500, -num_pred:], (-1, 98)))
```
Accuracy of unrolled state:
```
k_true = np.reshape(k_all[:, -num_pred:], (-1))
k_pred = np.reshape(k_o[:, -num_pred:], (-1))
tot = 0
acc = 0
for i in range(len(k_true)):
tot += 1
if k_true[i] == k_pred[i]:
acc += 1
acc = acc / tot
acc
```
| github_jupyter |
## Precision-Recall-Curves
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_precision_recall_curve
from yellowbrick.classifier import PrecisionRecallCurve
```
## Load data
```
# load data
data = pd.read_csv('../kdd2004.csv')
# remap target class to 0 and 1
data['target'] = data['target'].map({-1:0, 1:1})
data.head()
# data size
data.shape
# imbalanced target
data.target.value_counts() / len(data)
# separate dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(
data.drop(labels=['target'], axis=1), # drop the target
data['target'], # just the target
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
```
## Train ML models
### Random Forests
```
rf = RandomForestClassifier(n_estimators=100, random_state=39, max_depth=2, n_jobs=4)
rf.fit(X_train, y_train)
y_train_rf = rf.predict_proba(X_train)[:,1]
y_test_rf = rf.predict_proba(X_test)[:,1]
```
### Logistic Regression
```
logit = LogisticRegression(random_state=0, max_iter=1000)
logit.fit(X_train, y_train)
y_train_logit = logit.predict_proba(X_train)[:,1]
y_test_logit = logit.predict_proba(X_test)[:,1]
```
## Precision-Recall Curve
### Sklearn
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_precision_recall_curve.html#sklearn.metrics.plot_precision_recall_curve
```
rf_disp = plot_precision_recall_curve(rf, X_test, y_test)
logit_disp = plot_precision_recall_curve(logit, X_test, y_test)
ax = plt.gca()
rf_disp.plot(ax=ax, alpha=0.8)
logit_disp.plot(ax=ax, alpha=0.8)
```
### Yellobrick
https://www.scikit-yb.org/en/latest/api/classifier/prcurve.html
```
visualizer = PrecisionRecallCurve(rf, classes=[0, 1])
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.show() # Finalize and show the figure
visualizer = PrecisionRecallCurve(logit, classes=[0, 1])
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.show() # Finalize and show the figure
```
## Additional Reading
https://towardsdatascience.com/on-roc-and-precision-recall-curves-c23e9b63820c
| github_jupyter |
# TensorFlow Tutorial
Welcome to this week's programming assignment. Until now, you've always used numpy to build neural networks. Now we will step you through a deep learning framework that will allow you to build neural networks more easily. Machine learning frameworks like TensorFlow, PaddlePaddle, Torch, Caffe, Keras, and many others can speed up your machine learning development significantly. All of these frameworks also have a lot of documentation, which you should feel free to read. In this assignment, you will learn to do the following in TensorFlow:
- Initialize variables
- Start your own session
- Train algorithms
- Implement a Neural Network
Programing frameworks can not only shorten your coding time, but sometimes also perform optimizations that speed up your code.
## 1 - Exploring the Tensorflow Library
To start, you will import the library:
```
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict
%matplotlib inline
np.random.seed(1)
```
Now that you have imported the library, we will walk you through its different applications. You will start with an example, where we compute for you the loss of one training example.
$$loss = \mathcal{L}(\hat{y}, y) = (\hat y^{(i)} - y^{(i)})^2 \tag{1}$$
```
y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36.
y = tf.constant(39, name='y') # Define y. Set to 39
loss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss
init = tf.global_variables_initializer() # When init is run later (session.run(init)),
# the loss variable will be initialized and ready to be computed
with tf.Session() as session: # Create a session and print the output
session.run(init) # Initializes the variables
print(session.run(loss)) # Prints the loss
```
Writing and running programs in TensorFlow has the following steps:
1. Create Tensors (variables) that are not yet executed/evaluated.
2. Write operations between those Tensors.
3. Initialize your Tensors.
4. Create a Session.
5. Run the Session. This will run the operations you'd written above.
Therefore, when we created a variable for the loss, we simply defined the loss as a function of other quantities, but did not evaluate its value. To evaluate it, we had to run `init=tf.global_variables_initializer()`. That initialized the loss variable, and in the last line we were finally able to evaluate the value of `loss` and print its value.
Now let us look at an easy example. Run the cell below:
```
a = tf.constant(2)
b = tf.constant(10)
c = tf.multiply(a,b)
print(c)
```
As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type "int32". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it.
```
sess = tf.Session()
print(sess.run(c))
```
Great! To summarize, **remember to initialize your variables, create a session and run the operations inside the session**.
Next, you'll also have to know about placeholders. A placeholder is an object whose value you can specify only later.
To specify values for a placeholder, you can pass in values by using a "feed dictionary" (`feed_dict` variable). Below, we created a placeholder for x. This allows us to pass in a number later when we run the session.
```
# Change the value of x in the feed_dict
x = tf.placeholder(tf.int64, name = 'x')
print(sess.run(2 * x, feed_dict = {x: 3}))
sess.close()
```
When you first defined `x` you did not have to specify a value for it. A placeholder is simply a variable that you will assign data to only later, when running the session. We say that you **feed data** to these placeholders when running the session.
Here's what's happening: When you specify the operations needed for a computation, you are telling TensorFlow how to construct a computation graph. The computation graph can have some placeholders whose values you will specify only later. Finally, when you run the session, you are telling TensorFlow to execute the computation graph.
### 1.1 - Linear function
Lets start this programming exercise by computing the following equation: $Y = WX + b$, where $W$ and $X$ are random matrices and b is a random vector.
**Exercise**: Compute $WX + b$ where $W, X$, and $b$ are drawn from a random normal distribution. W is of shape (4, 3), X is (3,1) and b is (4,1). As an example, here is how you would define a constant X that has shape (3,1):
```python
X = tf.constant(np.random.randn(3,1), name = "X")
```
You might find the following functions helpful:
- tf.matmul(..., ...) to do a matrix multiplication
- tf.add(..., ...) to do an addition
- np.random.randn(...) to initialize randomly
```
# GRADED FUNCTION: linear_function
def linear_function():
"""
Implements a linear function:
Initializes W to be a random tensor of shape (4,3)
Initializes X to be a random tensor of shape (3,1)
Initializes b to be a random tensor of shape (4,1)
Returns:
result -- runs the session for Y = WX + b
"""
np.random.seed(1)
### START CODE HERE ### (4 lines of code)
X = np.random.randn(3, 1)
W = np.random.randn(4, 3)
b = np.random.randn(4, 1)
Y = tf.add(tf.matmul(W, X), b)
### END CODE HERE ###
# Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate
### START CODE HERE ###
sess = tf.Session()
result = sess.run( Y )
### END CODE HERE ###
# close the session
sess.close()
return result
print( "result = " + str(linear_function()))
```
*** Expected Output ***:
<table>
<tr>
<td>
**result**
</td>
<td>
[[-2.15657382]
[ 2.95891446]
[-1.08926781]
[-0.84538042]]
</td>
</tr>
</table>
### 1.2 - Computing the sigmoid
Great! You just implemented a linear function. Tensorflow offers a variety of commonly used neural network functions like `tf.sigmoid` and `tf.softmax`. For this exercise lets compute the sigmoid function of an input.
You will do this exercise using a placeholder variable `x`. When running the session, you should use the feed dictionary to pass in the input `z`. In this exercise, you will have to (i) create a placeholder `x`, (ii) define the operations needed to compute the sigmoid using `tf.sigmoid`, and then (iii) run the session.
** Exercise **: Implement the sigmoid function below. You should use the following:
- `tf.placeholder(tf.float32, name = "...")`
- `tf.sigmoid(...)`
- `sess.run(..., feed_dict = {x: z})`
Note that there are two typical ways to create and use sessions in tensorflow:
**Method 1:**
```python
sess = tf.Session()
# Run the variables initialization (if needed), run the operations
result = sess.run(..., feed_dict = {...})
sess.close() # Close the session
```
**Method 2:**
```python
with tf.Session() as sess:
# run the variables initialization (if needed), run the operations
result = sess.run(..., feed_dict = {...})
# This takes care of closing the session for you :)
```
```
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Computes the sigmoid of z
Arguments:
z -- input value, scalar or vector
Returns:
results -- the sigmoid of z
"""
### START CODE HERE ### ( approx. 4 lines of code)
# Create a placeholder for x. Name it 'x'.
x = tf.placeholder(tf.float32,name="x")
# compute sigmoid(x)
sigmoid = tf.sigmoid(x)
# Create a session, and run it. Please use the method 2 explained above.
# You should use a feed_dict to pass z's value to x.
with tf.Session() as sess:
# Run session and call the output "result"
result = sess.run( sigmoid, feed_dict={x:z} )
### END CODE HERE ###
return result
print ("sigmoid(0) = " + str(sigmoid(0)))
print ("sigmoid(12) = " + str(sigmoid(12)))
```
*** Expected Output ***:
<table>
<tr>
<td>
**sigmoid(0)**
</td>
<td>
0.5
</td>
</tr>
<tr>
<td>
**sigmoid(12)**
</td>
<td>
0.999994
</td>
</tr>
</table>
<font color='blue'>
**To summarize, you how know how to**:
1. Create placeholders
2. Specify the computation graph corresponding to operations you want to compute
3. Create the session
4. Run the session, using a feed dictionary if necessary to specify placeholder variables' values.
### 1.3 - Computing the Cost
You can also use a built-in function to compute the cost of your neural network. So instead of needing to write code to compute this as a function of $a^{[2](i)}$ and $y^{(i)}$ for i=1...m:
$$ J = - \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log a^{ [2] (i)} + (1-y^{(i)})\log (1-a^{ [2] (i)} )\large )\small\tag{2}$$
you can do it in one line of code in tensorflow!
**Exercise**: Implement the cross entropy loss. The function you will use is:
- `tf.nn.sigmoid_cross_entropy_with_logits(logits = ..., labels = ...)`
Your code should input `z`, compute the sigmoid (to get `a`) and then compute the cross entropy cost $J$. All this can be done using one call to `tf.nn.sigmoid_cross_entropy_with_logits`, which computes
$$- \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log \sigma(z^{[2](i)}) + (1-y^{(i)})\log (1-\sigma(z^{[2](i)})\large )\small\tag{2}$$
```
# GRADED FUNCTION: cost
def cost(logits, labels):
"""
Computes the cost using the sigmoid cross entropy
Arguments:
logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)
labels -- vector of labels y (1 or 0)
Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels"
in the TensorFlow documentation. So logits will feed into z, and labels into y.
Returns:
cost -- runs the session of the cost (formula (2))
"""
### START CODE HERE ###
# Create the placeholders for "logits" (z) and "labels" (y) (approx. 2 lines)
z = tf.placeholder(tf.float32, name="z")
y = tf.placeholder(tf.float32, name="y")
# Use the loss function (approx. 1 line)
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z, labels=y)
# Create a session (approx. 1 line). See method 1 above.
sess = tf.Session()
# Run the session (approx. 1 line).
cost = sess.run(cost, feed_dict={ z:logits, y:labels })
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return cost
logits = sigmoid(np.array([0.2,0.4,0.7,0.9]))
cost = cost(logits, np.array([0,0,1,1]))
print ("cost = " + str(cost))
```
** Expected Output** :
<table>
<tr>
<td>
**cost**
</td>
<td>
[ 1.00538719 1.03664088 0.41385433 0.39956614]
</td>
</tr>
</table>
### 1.4 - Using One Hot encodings
Many times in deep learning you will have a y vector with numbers ranging from 0 to C-1, where C is the number of classes. If C is for example 4, then you might have the following y vector which you will need to convert as follows:
<img src="images/onehot.png" style="width:600px;height:150px;">
This is called a "one hot" encoding, because in the converted representation exactly one element of each column is "hot" (meaning set to 1). To do this conversion in numpy, you might have to write a few lines of code. In tensorflow, you can use one line of code:
- tf.one_hot(labels, depth, axis)
**Exercise:** Implement the function below to take one vector of labels and the total number of classes $C$, and return the one hot encoding. Use `tf.one_hot()` to do this.
```
# GRADED FUNCTION: one_hot_matrix
def one_hot_matrix(labels, C):
"""
Creates a matrix where the i-th row corresponds to the ith class number and the jth column
corresponds to the jth training example. So if example j had a label i. Then entry (i,j)
will be 1.
Arguments:
labels -- vector containing the labels
C -- number of classes, the depth of the one hot dimension
Returns:
one_hot -- one hot matrix
"""
### START CODE HERE ###
# Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line)
C = tf.constant( C, name='C' )
# Use tf.one_hot, be careful with the axis (approx. 1 line)
one_hot_matrix = tf.one_hot( labels, C, axis=0 )
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session (approx. 1 line)
one_hot = sess.run(one_hot_matrix)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return one_hot
labels = np.array([1,2,3,0,2,1])
one_hot = one_hot_matrix(labels, C = 4)
print ("one_hot = " + str(one_hot))
```
**Expected Output**:
<table>
<tr>
<td>
**one_hot**
</td>
<td>
[[ 0. 0. 0. 1. 0. 0.]
[ 1. 0. 0. 0. 0. 1.]
[ 0. 1. 0. 0. 1. 0.]
[ 0. 0. 1. 0. 0. 0.]]
</td>
</tr>
</table>
### 1.5 - Initialize with zeros and ones
Now you will learn how to initialize a vector of zeros and ones. The function you will be calling is `tf.ones()`. To initialize with zeros you could use tf.zeros() instead. These functions take in a shape and return an array of dimension shape full of zeros and ones respectively.
**Exercise:** Implement the function below to take in a shape and to return an array (of the shape's dimension of ones).
- tf.ones(shape)
```
# GRADED FUNCTION: ones
def ones(shape):
"""
Creates an array of ones of dimension shape
Arguments:
shape -- shape of the array you want to create
Returns:
ones -- array containing only ones
"""
### START CODE HERE ###
# Create "ones" tensor using tf.ones(...). (approx. 1 line)
ones = tf.ones(shape)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session to compute 'ones' (approx. 1 line)
ones = sess.run(ones)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return ones
print ("ones = " + str(ones([3])))
```
**Expected Output:**
<table>
<tr>
<td>
**ones**
</td>
<td>
[ 1. 1. 1.]
</td>
</tr>
</table>
# 2 - Building your first neural network in tensorflow
In this part of the assignment you will build a neural network using tensorflow. Remember that there are two parts to implement a tensorflow model:
- Create the computation graph
- Run the graph
Let's delve into the problem you'd like to solve!
### 2.0 - Problem statement: SIGNS Dataset
One afternoon, with some friends we decided to teach our computers to decipher sign language. We spent a few hours taking pictures in front of a white wall and came up with the following dataset. It's now your job to build an algorithm that would facilitate communications from a speech-impaired person to someone who doesn't understand sign language.
- **Training set**: 1080 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (180 pictures per number).
- **Test set**: 120 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (20 pictures per number).
Note that this is a subset of the SIGNS dataset. The complete dataset contains many more signs.
Here are examples for each number, and how an explanation of how we represent the labels. These are the original pictures, before we lowered the image resolutoion to 64 by 64 pixels.
<img src="images/hands.png" style="width:800px;height:350px;"><caption><center> <u><font color='purple'> **Figure 1**</u><font color='purple'>: SIGNS dataset <br> <font color='black'> </center>
Run the following code to load the dataset.
```
# Loading the dataset
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
```
Change the index below and run the cell to visualize some examples in the dataset.
```
# Example of a picture
index = 0
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
```
As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so.
```
# Flatten the training and test images
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
# Normalize image vectors
X_train = X_train_flatten/255.
X_test = X_test_flatten/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6)
Y_test = convert_to_one_hot(Y_test_orig, 6)
print ("number of training examples = " + str(X_train.shape[1]))
print ("number of test examples = " + str(X_test.shape[1]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
```
**Note** that 12288 comes from $64 \times 64 \times 3$. Each image is square, 64 by 64 pixels, and 3 is for the RGB colors. Please make sure all these shapes make sense to you before continuing.
**Your goal** is to build an algorithm capable of recognizing a sign with high accuracy. To do so, you are going to build a tensorflow model that is almost the same as one you have previously built in numpy for cat recognition (but now using a softmax output). It is a great occasion to compare your numpy implementation to the tensorflow one.
**The model** is *LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX*. The SIGMOID output layer has been converted to a SOFTMAX. A SOFTMAX layer generalizes SIGMOID to when there are more than two classes.
### 2.1 - Create placeholders
Your first task is to create placeholders for `X` and `Y`. This will allow you to later pass your training data in when you run your session.
**Exercise:** Implement the function below to create the placeholders in tensorflow.
```
# GRADED FUNCTION: create_placeholders
def create_placeholders(n_x, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)
n_y -- scalar, number of classes (from 0 to 5, so -> 6)
Returns:
X -- placeholder for the data input, of shape [n_x, None] and dtype "float"
Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float"
Tips:
- You will use None because it let's us be flexible on the number of examples you will for the placeholders.
In fact, the number of examples during test/train is different.
"""
### START CODE HERE ### (approx. 2 lines)
X = tf.placeholder(tf.float32, [n_x, None], name="X")
Y = tf.placeholder(tf.float32, [n_y, None], name="Y")
### END CODE HERE ###
return X, Y
X, Y = create_placeholders(12288, 6)
print ("X = " + str(X))
print ("Y = " + str(Y))
```
**Expected Output**:
<table>
<tr>
<td>
**X**
</td>
<td>
Tensor("Placeholder_1:0", shape=(12288, ?), dtype=float32) (not necessarily Placeholder_1)
</td>
</tr>
<tr>
<td>
**Y**
</td>
<td>
Tensor("Placeholder_2:0", shape=(10, ?), dtype=float32) (not necessarily Placeholder_2)
</td>
</tr>
</table>
### 2.2 - Initializing the parameters
Your second task is to initialize the parameters in tensorflow.
**Exercise:** Implement the function below to initialize the parameters in tensorflow. You are going use Xavier Initialization for weights and Zero Initialization for biases. The shapes are given below. As an example, to help you, for W1 and b1 you could use:
```python
W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer())
```
Please use `seed = 1` to make sure your results match ours.
```
# GRADED FUNCTION: initialize_parameters
def initialize_parameters():
"""
Initializes parameters to build a neural network with tensorflow. The shapes are:
W1 : [25, 12288]
b1 : [25, 1]
W2 : [12, 25]
b2 : [12, 1]
W3 : [6, 12]
b3 : [6, 1]
Returns:
parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 6 lines of code)
W1 = tf.get_variable("W1", [25, 12288], initializer = tf.contrib.layers.xavier_initializer(seed=1))
b1 = tf.get_variable("b1", [25, 1], initializer = tf.zeros_initializer())
W2 = tf.get_variable("W2", [12, 25], initializer = tf.contrib.layers.xavier_initializer(seed=1))
b2 = tf.get_variable("b2", [12, 1], initializer = tf.zeros_initializer())
W3 = tf.get_variable("W3", [6, 12], initializer = tf.contrib.layers.xavier_initializer(seed=1))
b3 = tf.get_variable("b3", [6, 1], initializer = tf.zeros_initializer())
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
return parameters
tf.reset_default_graph()
with tf.Session() as sess:
parameters = initialize_parameters()
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
**Expected Output**:
<table>
<tr>
<td>
**W1**
</td>
<td>
< tf.Variable 'W1:0' shape=(25, 12288) dtype=float32_ref >
</td>
</tr>
<tr>
<td>
**b1**
</td>
<td>
< tf.Variable 'b1:0' shape=(25, 1) dtype=float32_ref >
</td>
</tr>
<tr>
<td>
**W2**
</td>
<td>
< tf.Variable 'W2:0' shape=(12, 25) dtype=float32_ref >
</td>
</tr>
<tr>
<td>
**b2**
</td>
<td>
< tf.Variable 'b2:0' shape=(12, 1) dtype=float32_ref >
</td>
</tr>
</table>
As expected, the parameters haven't been evaluated yet.
### 2.3 - Forward propagation in tensorflow
You will now implement the forward propagation module in tensorflow. The function will take in a dictionary of parameters and it will complete the forward pass. The functions you will be using are:
- `tf.add(...,...)` to do an addition
- `tf.matmul(...,...)` to do a matrix multiplication
- `tf.nn.relu(...)` to apply the ReLU activation
**Question:** Implement the forward pass of the neural network. We commented for you the numpy equivalents so that you can compare the tensorflow implementation to numpy. It is important to note that the forward propagation stops at `z3`. The reason is that in tensorflow the last linear layer output is given as input to the function computing the loss. Therefore, you don't need `a3`!
```
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents:
Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3
### END CODE HERE ###
return Z3
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
print("Z3 = " + str(Z3))
```
**Expected Output**:
<table>
<tr>
<td>
**Z3**
</td>
<td>
Tensor("Add_2:0", shape=(6, ?), dtype=float32)
</td>
</tr>
</table>
You may have noticed that the forward propagation doesn't output any cache. You will understand why below, when we get to brackpropagation.
### 2.4 Compute cost
As seen before, it is very easy to compute the cost using:
```python
tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = ..., labels = ...))
```
**Question**: Implement the cost function below.
- It is important to know that the "`logits`" and "`labels`" inputs of `tf.nn.softmax_cross_entropy_with_logits` are expected to be of shape (number of examples, num_classes). We have thus transposed Z3 and Y for you.
- Besides, `tf.reduce_mean` basically does the summation over the examples.
```
# GRADED FUNCTION: compute_cost
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
# to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)
logits = tf.transpose(Z3)
labels = tf.transpose(Y)
### START CODE HERE ### (1 line of code)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
### END CODE HERE ###
return cost
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
cost = compute_cost(Z3, Y)
print("cost = " + str(cost))
```
**Expected Output**:
<table>
<tr>
<td>
**cost**
</td>
<td>
Tensor("Mean:0", shape=(), dtype=float32)
</td>
</tr>
</table>
### 2.5 - Backward propagation & parameter updates
This is where you become grateful to programming frameworks. All the backpropagation and the parameters update is taken care of in 1 line of code. It is very easy to incorporate this line in the model.
After you compute the cost function. You will create an "`optimizer`" object. You have to call this object along with the cost when running the tf.session. When called, it will perform an optimization on the given cost with the chosen method and learning rate.
For instance, for gradient descent the optimizer would be:
```python
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)
```
To make the optimization you would do:
```python
_ , c = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
```
This computes the backpropagation by passing through the tensorflow graph in the reverse order. From cost to inputs.
**Note** When coding, we often use `_` as a "throwaway" variable to store values that we won't need to use later. Here, `_` takes on the evaluated value of `optimizer`, which we don't need (and `c` takes the value of the `cost` variable).
### 2.6 - Building the model
Now, you will bring it all together!
**Exercise:** Implement the model. You will be calling the functions you had previously implemented.
```
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 1500, minibatch_size = 32, print_cost = True):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_x, n_y)
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
### START CODE HERE ### (1 line)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
### END CODE HERE ###
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
### START CODE HERE ### (1 line)
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
### END CODE HERE ###
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
```
Run the following cell to train your model! On our machine it takes about 5 minutes. Your "Cost after epoch 100" should be 1.016458. If it's not, don't waste time; interrupt the training by clicking on the square (⬛) in the upper bar of the notebook, and try to correct your code. If it is the correct cost, take a break and come back in 5 minutes!
```
parameters = model(X_train, Y_train, X_test, Y_test)
```
**Expected Output**:
<table>
<tr>
<td>
**Train Accuracy**
</td>
<td>
0.999074
</td>
</tr>
<tr>
<td>
**Test Accuracy**
</td>
<td>
0.716667
</td>
</tr>
</table>
Amazing, your algorithm can recognize a sign representing a figure between 0 and 5 with 71.7% accuracy.
**Insights**:
- Your model seems big enough to fit the training set well. However, given the difference between train and test accuracy, you could try to add L2 or dropout regularization to reduce overfitting.
- Think about the session as a block of code to train the model. Each time you run the session on a minibatch, it trains the parameters. In total you have run the session a large number of times (1500 epochs) until you obtained well trained parameters.
### 2.7 - Test with your own image (optional / ungraded exercise)
Congratulations on finishing this assignment. You can now take a picture of your hand and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the following code
4. Run the code and check if the algorithm is right!
```
import scipy
from PIL import Image
from scipy import ndimage
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "thumbs_up.jpg"
## END CODE HERE ##
# We preprocess your image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(64,64)).reshape((1, 64*64*3)).T
my_image_prediction = predict(my_image, parameters)
plt.imshow(image)
print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction)))
```
You indeed deserved a "thumbs-up" although as you can see the algorithm seems to classify it incorrectly. The reason is that the training set doesn't contain any "thumbs-up", so the model doesn't know how to deal with it! We call that a "mismatched data distribution" and it is one of the various of the next course on "Structuring Machine Learning Projects".
<font color='blue'>
**What you should remember**:
- Tensorflow is a programming framework used in deep learning
- The two main object classes in tensorflow are Tensors and Operators.
- When you code in tensorflow you have to take the following steps:
- Create a graph containing Tensors (Variables, Placeholders ...) and Operations (tf.matmul, tf.add, ...)
- Create a session
- Initialize the session
- Run the session to execute the graph
- You can execute the graph multiple times as you've seen in model()
- The backpropagation and optimization is automatically done when running the session on the "optimizer" object.
| github_jupyter |
# 1D Variability hypothesis testing for HBEC IFN experiment
```
import scanpy as sc
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
from pybedtools import BedTool
import pickle as pkl
%matplotlib inline
import sys
sys.path.append('/home/ssm-user/Github/scrna-parameter-estimation/dist/memento-0.0.6-py3.8.egg')
sys.path.append('/home/ssm-user/Github/misc-seq/miscseq/')
import encode
import memento
data_path = '/data_volume/memento/hbec/'
```
### Read the processed RNA data
Focus on the club and bc/club cells and type I interferons for now.
Encode the timestamps to integers.
```
adata = sc.read(data_path + 'HBEC_type_I_filtered_counts_deep.h5ad')
adata = adata[:, ~adata.var.index.str.startswith('MT-')].copy()
# adata.obs['cell_type'] = adata.obs['cell_type'].apply(lambda x: x if x != 'basal/club' else 'bc')
# adata.obs['cell_type'] = adata.obs['cell_type'].apply(lambda x: x if x != 'ionocyte/tuft' else 'ion-tuft')
```
```
converter = {'basal/club':'BC', 'basal':'B', 'ciliated':'C', 'goblet':'G', 'ionocyte/tuft':'IT', 'neuroendo':'N'}
adata.obs['ct'] = adata.obs['cell_type'].apply(lambda x: converter[x])
```
### Setup memento
```
def assign_q(batch):
if batch == 0:
return 0.387*0.25
elif batch == 1:
return 0.392*0.25
elif batch == 2:
return 0.436*0.25
else:
return 0.417*0.25
adata.obs['q'] = adata.obs['batch'].apply(assign_q)
memento.setup_memento(adata, q_column='q')
```
### Run memento for each subset, comparing to control
```
cts = ['C', 'B', 'BC']
tps = ['3', '6', '9', '24', '48']
stims = ['alpha', 'beta', 'gamma', 'lambda']
import os
done_files = os.listdir(data_path + 'binary_test_latest/')
for ct in cts:
for tp in tps:
for stim in stims:
fname = '{}_{}_{}.h5ad'.format('-'.join(ct), stim, tp)
if fname in done_files:
print('Skipping', fname)
continue
print('starting', ct, tp, stim)
adata_stim = adata.copy()[
adata.obs.ct.isin([ct]) & \
adata.obs.stim.isin(['control', stim]) & \
adata.obs.time.isin(['0',tp]), :].copy()
time_converter={0:0, int(tp):1}
adata_stim.obs['time_step'] = adata_stim.obs['time'].astype(int).apply(lambda x: time_converter[x])
memento.create_groups(adata_stim, label_columns=['time_step', 'donor'])
memento.compute_1d_moments(adata_stim, min_perc_group=.9)
memento.ht_1d_moments(
adata_stim,
formula_like='1 + time_step + donor',
treatment_col='time_step',
num_boot=10000,
verbose=1,
num_cpus=93,
resampling='permutation',
approx=True)
adata_stim.write(data_path + 'binary_test_latest/{}_{}_{}.h5ad'.format(ct, stim, tp))
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.