code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
```
import matplotlib.pyplot as plt
def model():
"""Solve u'' = -1, u(0)=0, u'(1)=0."""
import sympy as sym
x, c_0, c_1, = sym.symbols('x c_0 c_1')
u_x = sym.integrate(1, (x, 0, x)) + c_0
u = sym.integrate(u_x, (x, 0, x)) + c_1
r = sym.solve([u.subs(x,0) - 0,
sym.diff(u,x).subs(x, 1) - 0],
[c_0, c_1])
u = u.subs(c_0, r[c_0]).subs(c_1, r[c_1])
u = sym.simplify(sym.expand(u))
return u
def midpoint_rule(f, M=100000):
"""Integrate f(x) over [0,1] using M intervals."""
from numpy import sum, linspace
dx = 1.0/M # interval length
x = linspace(dx/2, 1-dx/2, M) # integration points
return dx*sum(f(x))
def check_integral_b():
from numpy import pi, sin
for i in range(12):
exact = 2/(pi*(2*i+1))
numerical = midpoint_rule(
f=lambda x: sin((2*i+1)*pi*x/2))
print((i, abs(exact - numerical)))
def sine_sum(x, N):
s = 0
from numpy import pi, sin, zeros
u = [] # u[k] is the sum i=0,...,k
k = 0
for i in range(N+1):
s += - 16.0/((2*i+1)**3*pi**3)*sin((2*i+1)*pi*x/2)
u.append(s.copy()) # important with copy!
return u
def plot_sine_sum():
from numpy import linspace
x = linspace(0, 1, 501) # coordinates for plot
u = sine_sum(x, N=10)
u_e = 0.5*x*(x-2)
N_values = 0, 1, 10
for k in N_values:
plt.plot(x, u[k])
plt.plot(x, u_e)
plt.legend(['N=%d' % k for k in N_values] + ['exact'],
loc='upper right')
plt.xlabel('$x$'); plt.ylabel('$u$')
plt.savefig('tmpc.png'); plt.savefig('tmpc.pdf')
def check_integral_d():
from numpy import pi, sin
for i in range(24):
if i % 2 == 0:
exact = 2/(pi*(i+1))
elif (i-1) % 4 == 0:
exact = 2*2/(pi*(i+1))
else:
exact = 0
numerical = midpoint_rule(
f=lambda x: sin((i+1)*pi*x/2))
print((i, abs(exact - numerical)))
def check_integral_d_sympy_answer():
from numpy import pi, sin
for i in range(12):
exact = 2/(pi*(i+1))
numerical = midpoint_rule(
f=lambda x: sin((i+1)*pi*x/2))
print((i, abs(exact - numerical)))
def sine_sum_d(x, N):
s = 0
from numpy import pi, sin, zeros
u = [] # u[k] is the sum i=0,...,k
k = 0
for i in range(N+1):
if i % 2 == 0: # even i
s += - 16.0/((i+1)**3*pi**3)*sin((i+1)*pi*x/2)
elif (i-1) % 4 == 0: # 1, 5, 9, 13, 17
s += - 2*16.0/((i+1)**3*pi**3)*sin((i+1)*pi*x/2)
else:
s += 0
u.append(s.copy())
return u
def plot_sine_sum_d():
from numpy import linspace
x = linspace(0, 1, 501) # coordinates for plot
u = sine_sum_d(x, N=20)
u_e = 0.5*x*(x-2)
N_values = 0, 1, 2, 3, 20
for k in N_values:
plt.plot(x, u[k])
plt.plot(x, u_e)
plt.legend(['N=%d' % k for k in N_values] + ['exact'],
loc='upper right')
plt.xlabel('$x$'); plt.ylabel('$u$')
#plt.axis([0.9, 1, -0.52, -0.49])
plt.savefig('tmpd.png'); plt.savefig('tmpd.pdf')
if __name__ == '__main__':
import sys
print((model()))
print('sine 2*i+1 integral:')
check_integral_b()
print('sine i+1 integral, sympy answer:')
check_integral_d_sympy_answer()
print('sine i+1 integral:')
check_integral_d()
#sys.exit(0)
plot_sine_sum()
plt.figure()
plot_sine_sum_d()
plt.show()
```
| github_jupyter |
# XGBoost model for Bike sharing dataset
```
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# preprocessing methods
from sklearn.preprocessing import StandardScaler
# accuracy measures and data spliting
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
# deep learning libraries
import xgboost as xgb
import graphviz
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = 15, 7
```
## 1. Data import
```
DATADIR = '../data/bike/'
MODELDIR = '../checkpoints/bike-sharing/xgb/'
data_path = os.path.join(DATADIR, 'bike-sharing-processed.csv')
data = pd.read_csv(data_path)
data.set_index('date', inplace=True)
data.sort_index(inplace=True)
data.head()
plt.plot(data.cnt, '.')
plt.title('Bike sharing count')
plt.xlabel('sample id')
plt.ylabel('count')
plt.show()
```
## 2. Train test split
```
y = data[['cnt']].copy()
X = data.drop(columns=['cnt'], axis=1)
print(f'X and y shape:')
print(X.shape, y.shape)
# date selection
datelist = data.index.unique()
# two month data for testset
print(f'Test start date: {datelist[-61]}')
# Train test split : last 60 days for test set
X_train = X[X.index < datelist[-61]]
X_test = X[X.index >= datelist[-61]]
y_train = y[y.index < datelist[-61]]
y_test = y[y.index >= datelist[-61]]
print(f'Size of train and test set respectively:')
print(X_train.shape,X_test.shape, y_train.shape, y_test.shape)
```
## 3. Parameter selection using grid search
```
def xgb_parameter_selection(X, y, grid_param, xgb_param):
xgb_grid = GridSearchCV(estimator=xgb.XGBRegressor(**xgb_param, seed=seed),
param_grid=grid_param, cv=3)
xgb_grid.fit(X, y)
return xgb_grid
```
### 3.1 Depth and child weight selection
```
seed = 42
# max depth and child weight selection
grid_param_1 = {'max_depth': [3, 5],
'min_child_weight': [3, 5, 7]
}
xgb_param_1 = {'objective' :'reg:linear',
'silent' : 1,
'n_estimators': 100,
'learning_rate' : 0.1}
model_1 = xgb_parameter_selection(X_train, y_train, grid_param_1, xgb_param_1)
# print(f'Best estimator : {model_1.best_estimator_}')
print(f'Best parameter : {model_1.best_params_}')
print(f'Best score : {model_1.best_score_}')
```
### 3.2 colsample_bytree and subsample selection
```
# column and sample selection parameter
grid_param_2 = {'colsample_bytree' : [0.7, 1.0],
'subsample' : [0.8, 1]
}
xgb_param_2 = {'objective' :'reg:linear',
'silent' : 1,
'max_depth': 5,
'min_child_weight':7,
'n_estimators': 100,
'learning_rate' : 0.1,
'eval_metric' : 'mae' }
model_2 = xgb_parameter_selection(X_train, y_train, grid_param_2, xgb_param_2)
print(f'Best parameter : {model_2.best_params_}')
print(f'Best score : {model_2.best_score_}')
```
### 3.3 gamma selection
```
# gamma selection
grid_param_3 = {'gamma' : [0, 0.1, 0.2, 5]
}
xgb_param_3 = {'objective' :'reg:linear',
'silent' : 1,
'max_depth': 5,
'min_child_weight': 7,
'n_estimators': 100,
'learning_rate' : 0.1,
'colsample_bytree' : 0.7,
'subsample' : 1}
model_3 = xgb_parameter_selection(X_train, y_train, grid_param_3, xgb_param_3)
print(f'Best parameter : {model_3.best_params_}')
print(f'Best score : {model_3.best_score_}')
```
### 3.4 learning rate
```
# learning_rate selection
grid_param_4 = {'learning_rate' : [0.1, 0.01, 0.001]
}
xgb_param_4 = {'objective' :'reg:linear',
'silent' : 1,
'max_depth': 5,
'min_child_weight': 7,
'n_estimators': 100,
'learning_rate' : 0.1,
'colsample_bytree' : 0.7,
'subsample' : 1,
'gamma' : 0}
model_4 = xgb_parameter_selection(X_train, y_train, grid_param_4, xgb_param_4)
print(f'Best parameter : {model_4.best_params_}')
print(f'Best score : {model_4.best_score_}')
```
## 4. Final model training
```
final_param = {'objective' :'reg:linear',
'silent' : 1,
'max_depth': 5,
'min_child_weight': 7,
'n_estimators': 100,
'learning_rate' : 0.1,
'colsample_bytree' : 0.7,
'subsample' : 1,
'gamma' : 0,
'eval_metric' : 'mae'}
def xgb_final(X_train, y_train, param, MODELDIR):
model = xgb.XGBRegressor(**param)
model.fit(X_train, y_train, verbose=True)
# directory for saving model
if os.path.exists(MODELDIR):
pass
else:
os.makedirs(MODELDIR)
model.save_model(os.path.join(MODELDIR, 'xgb-v1.model'))
return model
model = xgb_final(X_train, y_train, final_param, MODELDIR)
```
## 5. Model evaluation
```
def model_evaluation(X_train, X_test, y_train, y_test):
# predict and tranform to original scale
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
# MAE and NRMSE calculation
train_rmse = np.sqrt(mean_squared_error(y_train, y_train_pred))
train_mae = mean_absolute_error(y_train, y_train_pred)
train_nrmse = train_rmse/np.std(y_train.values)
test_rmse = np.sqrt(mean_squared_error(y_test, y_test_pred))
test_mae = mean_absolute_error(y_test, y_test_pred)
test_nrmse = test_rmse/np.std(y_test.values)
print(f'Training MAE: {np.round(train_mae, 3)}')
print(f'Trainig NRMSE: {np.round(train_nrmse, 3)}')
print()
print(f'Test MAE: {np.round(test_mae)}')
print(f'Test NRMSE: {np.round(test_nrmse)}')
return y_train_pred, y_test_pred
y_train_pred, y_test_pred = model_evaluation(X_train, X_test, y_train, y_test)
```
## 6. Result plotting
```
plt.plot(y_train.values, label='actual')
plt.plot(y_train_pred, label='predicted')
plt.ylabel('count')
plt.xlabel('sample id')
plt.title('Actual vs Predicted on training data using XGBoost')
plt.legend()
plt.tight_layout()
plt.show()
plt.plot(y_test.values, label='actual')
plt.plot(y_test_pred, label='predicted')
plt.ylabel('count')
plt.xlabel('sample id')
plt.title('Actual vs Predicted on test data using XGBoost', fontsize=14)
plt.legend()
plt.tight_layout()
plt.show()
```
## 7. Variable importance
```
xgb.plot_importance(model)
plt.show()
```
| github_jupyter |
# Bubble Sort
```
def sort(lst):
while True:
corrected = False
for i in range(0,len(lst)-1):
if lst[i]>lst[i+1]:
lst[i],lst[i+1] = lst[i+1],lst[i]
corrected = True
if not corrected:
return lst
sort([2,13,3,7,1,5])
sort([15,14,19,13,20,4,9])
```
# Selection sort
```
def selection_sort(lst):
for i in range(len(lst)-1):
min_position = i
for j in range(i,len(lst)):
if lst[j]<lst[min_position]:
min_position = j
temp = lst[i]
lst[i]=lst[min_position]
lst[min_position] = temp
print(lst)
selection_sort([5,3,8,6,7,2])
```
# Write a Python program to find the index of an item in a specified list.
```
lst = [10,30,4,-6]
num = int(input("Enter a element to find index: "))
if num in lst:
print("index of number is: ",lst.index(num))
else:
print("number Not in list")
```
# Write a Python program to remove a key from a dictionary
```
myDict = {'a':1,'b':2,'c':3,'d':4}
print(myDict)
if 'a' in myDict:
del myDict['a']
print(myDict)
```
# Write a Python program to get the key, value and item in a dictionary.
```
dict_num = {1: 10, 2: 20, 3: 30, 4: 40, 5: 50, 6: 60}
print("key value count")
for count, (key, value) in enumerate(dict_num.items(), 1):
print(key,' ',value,' ', count)
```
# Write a Python program to concatenate all elements in a list into a string and return it.
```
def concatenate_list_data(list):
result= ''
for element in list:
result += str(element)
return result
print(concatenate_list_data([1, 5, 12, 2]))
```
# Write a Python program to create all possible strings by using 'a', 'e', 'i', 'o', 'u'. Use the characters exactly once.
```
import random
char_list = ['a','e','i','o','u']
random.shuffle(char_list)
print(''.join(char_list))
```
# Write a Python program to reverse the digits of a given number and add it to the original, If the sum is not a palindrome repeat this procedure.
```
def rev_number(n):
s = 0
while True:
k = str(n)
if k == k[::-1]:
break
else:
m = int(k[::-1])
n += m
s += 1
return n
print(rev_number(1234))
print(rev_number(1473))
```
# Write a Python program to iteration over sets. And also add multiple elemntss
```
num_set = set([0, 1, 2, 3, 4, 5])
for x in num_set:
print(x)
color_set = num_set
color_set.add(3)
print(color_set)
#Add multiple items
color_set.update([3,57,77])
print(color_set)
```
| github_jupyter |
# Weather Data Collection
```
import pandas as pd
import numpy as np
from selenium import webdriver
import time
races = pd.read_csv('./data/races.csv')
races.head()
races.shape
weather = races.iloc[:,[0,1,2]]
info = []
for link in races.url:
try:
df = pd.read_html(link)[0]
if 'Weather' in list(df.iloc[:,0]):
n = list(df.iloc[:,0]).index('Weather')
info.append(df.iloc[n,1])
else:
df = pd.read_html(link)[1]
if 'Weather' in list(df.iloc[:,0]):
n = list(df.iloc[:,0]).index('Weather')
info.append(df.iloc[n,1])
else:
df = pd.read_html(link)[2]
if 'Weather' in list(df.iloc[:,0]):
n = list(df.iloc[:,0]).index('Weather')
info.append(df.iloc[n,1])
else:
df = pd.read_html(link)[3]
if 'Weather' in list(df.iloc[:,0]):
n = list(df.iloc[:,0]).index('Weather')
info.append(df.iloc[n,1])
else:
driver = webdriver.Chrome()
driver.get(link)
# click language button
button = driver.find_element_by_link_text('Italiano')
button.click()
clima = driver.find_element_by_xpath('//*[@id="mw-content-text"]/div/table[1]/tbody/tr[9]/td').text
info.append(clima)
except:
info.append('not found')
len(info)
weather['weather'] = info
weather.head()
weather.tail()
weather_dict = {'weather_warm': ['soleggiato', 'clear', 'warm', 'hot', 'sunny', 'fine', 'mild', 'sereno'],
'weather_cold': ['cold', 'fresh', 'chilly', 'cool'],
'weather_dry': ['dry', 'asciutto'],
'weather_wet': ['showers', 'wet', 'rain', 'pioggia', 'damp', 'thunderstorms', 'rainy'],
'weather_cloudy': ['overcast', 'nuvoloso', 'clouds', 'cloudy', 'grey', 'coperto']}
weather_df = pd.DataFrame(columns = weather_dict.keys())
for col in weather_df:
weather_df[col] = weather['weather'].map(lambda x: 1 if any(i in weather_dict[col] for i in x.lower().split()) else 0)
weather_df.head()
weather_info = pd.concat([weather, weather_df], axis = 1)
weather_info.shape
weather_info.head()
weather_info.tail()
weather_info.to_csv('./data/weather.csv', index= False)
```
| github_jupyter |
# Temporal Congruency Experiments
```
from scripts.imports import *
from scripts.df_styles import df_highlighter
out = Exporter(paths['outdir'], 'clause')
# redefine df_sg to include adverbs
df_sg = df[df.n_times == 1]
df_sg.columns
```
# Tense Collocations with tokens
```
token_ct = df_sg.pivot_table(
index=['lex_token'],
columns='verbtense',
aggfunc='size',
fill_value=0,
)
# pair down to top tenses
token_ct = token_ct.loc[token_ct.index[token_ct.sum(1) >= 2]]
token_ct = token_ct[token_ct.columns[token_ct.sum(0) >= 2]]
# sorting
token_ct = token_ct.loc[token_ct.sum(1).sort_values(ascending=False).index]
token_ct
token_dp = sig.apply_deltaP(token_ct, 0, 1)
token_dp = token_dp.dropna()
token_dp.head()
token_fs, token_odds = sig.apply_fishers(token_ct, 0, 1)
token_fs
```
## PCA Analysis
```
vtense_pca, vtense_loadings = apply_pca(token_dp, 0, 1, components=4)
fig, ax = plt.subplots(figsize=(8, 8))
x, y = (vtense_pca['PC1'], vtense_pca['PC2'])
ax.scatter(x, y, s=15)
fig, ax = plt.subplots(figsize=(8, 8))
s = 70
x, y = (vtense_pca.iloc[:,0], vtense_pca.iloc[:,1])
ax.scatter(x, y, facecolor=[], s=2)
texts = []
for lex_tok in vtense_pca.index:
tx, ty = vtense_pca.loc[idx[lex_tok]][:2]
show_lex = get_display(lex_tok)
texts.append(plt.text(tx, ty, show_lex, size=12,
fontfamily='SBL Biblit'))
offsets = {}
top_loadings = vtense_loadings.abs().sum().sort_values(ascending=False).index[:8]
texts = []
for feature in top_loadings:
x_off, y_off, size = offsets.get(feature, (0,0,15)) # config offsets / size
fx, fy = vtense_loadings[feature][:2] * 2
plt.arrow(0, 0, fx, fy, color='#808080', linewidth=1, head_width=0)
show_text = get_display(feature) # handle bidirectional
texts.append(plt.text(fx+x_off, fy+y_off, show_text, color='#808080', size=size, fontfamily='SBL Biblit'))
out.plot('tense_PCA')
```
## PCA Analysis (with Fisher's)
```
vtense_pca2, vtense_loadings2 = apply_pca(token_fs, 0, 1, components=4)
fig, ax = plt.subplots(figsize=(8, 8))
x, y = (vtense_pca2['PC1'], vtense_pca2['PC2'])
ax.scatter(x, y, s=15)
fig, ax = plt.subplots(figsize=(8, 8))
s = 70
x, y = (vtense_pca2.iloc[:,0], vtense_pca2.iloc[:,1])
ax.scatter(x, y, facecolor=[], s=2)
texts = []
for lex_tok in vtense_pca2.index:
tx, ty = vtense_pca2.loc[idx[lex_tok]][:2]
show_lex = get_display(lex_tok)
texts.append(plt.text(tx, ty, show_lex, size=12,
fontfamily='SBL Biblit'))
#adjust_text(texts)
#out.plot('pca2_durVSloc_TENSE_text')
offsets = {}
top_loadings2 = vtense_loadings2.abs().sum().sort_values(ascending=False).index[:5]
texts = []
for feature in top_loadings2:
x_off, y_off, size = offsets.get(feature, (0,0,15)) # config offsets / size
fx, fy = vtense_loadings2[feature][:2] * 2
plt.arrow(0, 0, fx, fy, color='#808080', linewidth=1, head_width=0)
show_text = get_display(feature) # handle bidirectional
texts.append(plt.text(fx+x_off, fy+y_off, show_text, color='#808080', size=size, fontfamily='SBL Biblit'))
```
## With Demonstratives (generally)
```
df_sg.demon_type.value_counts()
df_sg.columns
demon_ct = df_sg[df_sg.DEMON == 1].pivot_table(
index=['front', 'demon_type'],
columns=['verbtense'],
aggfunc='size',
fill_value=0,
)
demon_ct
df_sg[
(df_sg.verbtense == 'PRES')
& (df_sg.demon_type == 'THAT')
][['verse', 'clause']]
```
## Tense + Verbform + Modifiers
```
modi_ct = df_sg.pivot_table(
index=['verbform', 'verbtense'],
values=['DEF', 'ORDN', 'QUANT', 'PL', 'NUM', 'DEMON', 'SFX', 'unmodified'],
aggfunc='sum',
fill_value=0,
)
modi_ct
modi_fs, modi_odds = sig.apply_fishers(modi_ct, 0, 1)
df_highlighter(modi_fs, rule='fishers')
```
# With Tagged Tenses
```
tense_advbs = df_sg[
(df_sg.is_advb == 1)
]
tense_advbs.shape
# get counts about which adverbs are being tagged as what
lex_tense_ct = tense_advbs.pivot_table(
index=['TA Heads'],
columns=['tense'],
aggfunc='size',
fill_value=0,
)
lex_tense_ct = lex_tense_ct.loc[lex_tense_ct.sum(1).sort_values(ascending=False).index]
lex_tense_ct
# first look at it with adverbs only
at_counts = tense_advbs.pivot_table(
index=['tense'],
columns=['verbform'],
aggfunc='size',
fill_value=0,
)
at_counts.drop('infc', axis=1, inplace=True)
# sort
at_counts = at_counts.loc[at_counts.sum(1).sort_values(ascending=False).index]
at_counts = at_counts[at_counts.sum().sort_values(ascending=False).index]
out.table(
at_counts,
'advb_tense_ct',
caption='Tense and Hebrew Verb Collocation Frequencies (adverbs)'
)
at_counts
at_pr = at_counts.div(at_counts.sum(1), 0).round(2)
out.table(
at_pr,
'advb_tense_pr',
caption='Tense and Hebrew Verb Collocation Proportions (adverbs)'
)
at_pr
```
#### Now look across non-adverbial versions
```
tense_np = df_sg[
(df_sg.is_advb == 0)
& (df_sg.function == 'simultaneous')
]
np_ct = df_sg[df_sg.is_advb == 0].pivot_table(
index=['tense'],
columns=['verbform'],
aggfunc='size',
fill_value=0,
)
np_ct.drop(['infc', 'infa'], axis=1, inplace=True)
# sort in accord with the adverb DF
np_ct = np_ct.loc[at_pr.index]
np_ct = np_ct[at_pr.columns]
out.table(
np_ct,
'np_tense_ct',
caption='Tense and Hebrew Verb Collocation Frequencies (NP-based adverbials)'
)
np_ct
np_pr = np_ct.div(np_ct.sum(1), 0).round(2)
out.table(
np_pr,
'np_tense_pr',
caption='Tense and Hebrew Verb Collocation Proportions (NP-based adverbials)'
)
np_pr
# how much more frequent is weqatal future than in adverb set?
wqtl_diff = np_pr.loc['FUT']['wqtl'] - at_pr.loc['FUT']['wqtl']
out.number(wqtl_diff*100, 'wqtl_diff')
wqtl_diff
out.number(
np_pr['wayq']['PAST']*100,
'NP_past_wayq_perc'
)
df_sg[
(df_sg.tense == 'FUT')
& (df_sg.verbform == 'qtl')
][['verse', 'clause']]
df_sg[
(df_sg.tense == 'PAST')
& (df_sg.verbform == 'wqtl')
][['verse', 'clause']]
df_sg[
(df_sg.tense == 'PAST')
& (df_sg.verbform == 'yqtl')
][['verse', 'clause']]
df_sg[
(df_sg.tense == 'PRES')
& (df_sg.verbform == 'wqtl')
][['verse', 'clause']]
df_sg[
(df_sg.tense == 'PAST')
& (df_sg.verbform == 'ptcp')
][['verse', 'clause']]
df_sg[
(df_sg.tense == 'FUT')
& (df_sg.verbform == 'ptcp')
][['verse', 'clause']]
fig, axs = plt.subplots(2, 3, figsize=(12, 8))
axs = axs.ravel()
tensenames = {'PRES':'Present', 'FUT': 'Future', 'PAST': 'Past'}
i = 0
for table, kind in ([at_pr, 'Adverb'], [np_pr, 'Adverbial']):
for tense in table.index:
ax = axs[i]
i += 1
data = table.loc[tense]
if kind == 'Adverb':
ct_data = at_counts.loc[tense]
else:
ct_data = np_ct.loc[tense]
tensename = tensenames[tense]
kwargs = {}
if tensename == 'Present' and kind == 'Adverbial':
tensename = '"Today"'
if kind == 'Adverbial':
kwargs['color'] = 'orange'
data.plot(ax=ax, kind='bar', edgecolor='black', **kwargs)
ax.set_xticklabels(ax.get_xticklabels(), rotation=0)
ax.set_title(f'Collocations with {tensename} Time {kind} (N={ct_data.sum()})', size=10)
ax.set_ylabel('proportion')
ax.set_ylim((0, 0.7))
ax.grid(True, axis='y')
ax.set_axisbelow(True)
fig.tight_layout()
out.plot(
'advb_np_prs'
)
```
| github_jupyter |
## APIs
Let's start by looking at [OMDb API](https://www.omdbapi.com/).
The OMDb API is a free web service to obtain movie information, all content and images on the site are contributed and maintained by users.
The Python package [urllib](https://docs.python.org/3/howto/urllib2.html) can be used to fetch resources from the internet.
OMDb tells us what kinds of requests we can make. We are going to do a title search. As you can see below, we have an additional parameter "&Season=1" which does not appear in the parameter tables. If you read through the change log, you will see it documented there.
Using the urllib and json packages allow us to call an API and store the results locally.
```
import json
import urllib.request
data = json.loads(urllib.request.urlopen('http://www.omdbapi.com/?t=Game%20of%20Thrones&Season=1').read().\
decode('utf8'))
```
What should we expect the type to be for the variable data?
```
print(type(data))
```
What do you think the data will look like?
```
data.keys()
data
```
We now have a dictionary object of our data. We can use python to manipulate it in a variety of ways. For example, we can print all the titles of the episodes.
```
for episode in data['Episodes']:
print(episode['Title'], episode['imdbRating'])
```
We can use pandas to convert the episode information to a dataframe.
```
import pandas as pd
df = pd.DataFrame.from_dict(data['Episodes'])
df
```
And, we can save our data locally to use later.
```
with open('tutorial_output/omdb_api_data.json', 'w') as f:
json.dump(data, f)
```
Let's try an API that requires an API key!
"The [Digital Public Library of America](https://dp.la/) brings together the riches of America’s libraries, archives, and museums, and makes them freely available to the world. It strives to contain the full breadth of human expression, from the written word, to works of art and culture, to records of America’s heritage, to the efforts and data of science."
And, they have an [API](https://dp.la/info/developers/codex/api-basics/).
In order to use the API, you need to [request a key](https://dp.la/info/developers/codex/policies/#get-a-key). You can do this with an HTTP POST request.
If you are using **OS X or Linux**, replace "YOUR_EMAIL@example.com" in the cell below with your email address and execute the cell. This will send the rquest to DPLA and they will email your API key to the email address you provided. To successfully query the API, you must include the ?api_key= parameter with the 32-character hash following.
```
# execute this on OS X or Linux by removing '#' on the next line and excuting the cell
#! curl -v -XPOST http://api.dp.la/v2/api_key/YOUR_EMAIL@example.com
```
If you are on **Windows 7 or 10**, [open PowerShell](http://www.tenforums.com/tutorials/25581-windows-powershell-open-windows-10-a.html). Replace "YOUR_EMAIL@example.com" in the cell below with your email address. Copy the code and paste it at the command prompt in PowerShell. This will send the rquest to DPLA and they will email your API key to the email address you provided. To successfully query the API, you must include the ?api_key= parameter with the 32-character hash following.
```
#execute this on Windows by running the line below, without the leading '#', in PowerShell
#Invoke-WebRequest -Uri ("http://api.dp.la/v2/api_key/YOUR_EMAIL@example.com") -Method POST -Verbose -usebasicparsing
```
You will get a response similar to what is shown below and will receive an email fairly quickly from DPLA with your key.
shell-init: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
* Trying 52.2.169.251...
* Connected to api.dp.la (52.2.169.251) port 80 (#0)
> POST /v2/api_key/YOUR_EMAIL@example.com HTTP/1.1
> Host: api.dp.la
> User-Agent: curl/7.43.0
> Accept: */*
>
< HTTP/1.1 201 Created
< Access-Control-Allow-Origin: *
< Cache-Control: max-age=0, private, must-revalidate
< Content-Type: application/json; charset=utf-8
< Date: Thu, 20 Oct 2016 20:53:24 GMT
< ETag: "8b66d9fe7ded79e3151d5a22f0580d99"
< Server: nginx/1.1.19
< Status: 201 Created
< X-Request-Id: d61618751a376452ac3540b3157dcf48
< X-Runtime: 0.179920
< X-UA-Compatible: IE=Edge,chrome=1
< Content-Length: 89
< Connection: keep-alive
<
* Connection #0 to host api.dp.la left intact
{"message":"API key created and sent via email. Be sure to check your Spam folder, too."}
It is good practice not to put your keys in your code. You can store them in a file and read them in from there. If you are pushing your code to GitHub, make sure you put your key files in .gitignore.
I created a file on my drive called "config_secret.json". The contents of the file look like this:
{
"api_key" : "my api key here"
}
I can then write code to read the information in.
A template called config_secret_template.json has been provided for you to add your keys to.
```
with open("./dpla_config_secret.json") as key_file:
key = json.load(key_file)
key
```
Then, when I create my API query, I can use a variable in place of my actual key.
The Requests library allows us to build urls with different parameters. You build the parameters as a dictionary that contains key/value pairs for everything after the '?' in your url.
```
import requests
# we are specifying our url and parameters here as variables
url = 'http://api.dp.la/v2/items/'
params = {'api_key' : key['api_key'], 'q' : 'goats+AND+cats'}
# we are creating a response object, r
r = requests.get(url, params=params)
type(r)
# we can look at the url that was created by requests with our specified variables
r.url
# we can check the status code of our request
r.status_code
```
[HTTP Status Codes](http://www.restapitutorial.com/httpstatuscodes.html)
```
# we can look at the content of our request
print(r.content)
```
By default, DPLA returns 10 items at a time. We can see from the count value, our query has 29 results. DPLA does give us a paramter we can set to change this to get up to 500 items at a time.
```
params = {'api_key' : key['api_key'], 'q' : 'goats+AND+cats', 'page_size': 500}
r = requests.get(url, params=params)
print(r.content)
```
If we were working with an API that limited us to only 10 items at a time, we could write a loop to pull our data.
The file [seeclickfix_api.py](./seeclickfix_api.py) in the api folder of this repo is an example of how you can pull multiple pages of data from an API. It uses the [SeeClickFix API](http://dev.seeclickfix.com/). "[SeeClickFix](https://seeclickfix.com/) allows you to play an integral role in public services — routing neighborhood concerns like potholes and light outages to the right official with the right information."
| github_jupyter |
# TensorFlow Data Validation Example
This notebook describes how to explore and validate Chicago Taxi dataset using TensorFlow Data Validation.
# Setup
Import necessary packages and set up data paths.
```
import tensorflow_data_validation as tfdv
import os
BASE_DIR = os.getcwd()
DATA_DIR = os.path.join(BASE_DIR, 'data')
TRAIN_DATA = os.path.join(DATA_DIR, 'train.csv')
EVAL_DATA = os.path.join(DATA_DIR, 'eval.csv')
SERVING_DATA = os.path.join(DATA_DIR, 'serving.csv')
```
# Compute descriptive data statistics
TFDV can compute descriptive
[statistics](https://github.com/tensorflow/metadata/tree/v0.6.0/tensorflow_metadata/proto/v0/statistics.proto)
that provide a quick overview of the data in terms of the features that are
present and the shapes of their value distributions.
Internally, TFDV uses [Apache Beam](https://beam.apache.org)'s data-parallel
processing framework to scale the computation of statistics over large datasets.
For applications that wish to integrate deeper with TFDV (e.g., attach
statistics generation at the end of a data-generation pipeline), the API also
exposes a Beam PTransform for statistics generation.
```
train_stats = tfdv.generate_statistics_from_csv(TRAIN_DATA)
```
The statistics can be visualized using [Facets Overview](https://pair-code.github.io/facets/) tool which provide a succinct visualization of these statistics for easy browsing. TFDV provides a utility method that visualizes statistics using Facets.
```
tfdv.visualize_statistics(train_stats)
```
# Infer a schema
The
[schema](https://github.com/tensorflow/metadata/tree/v0.6.0/tensorflow_metadata/proto/v0/schema.proto)
describes the expected properties of the data. Some of these properties are:
* which features are expected to be present
* their type
* the number of values for a feature in each example
* the presence of each feature across all examples
* the expected domains of features.
In short, the schema describes the expectations for "correct" data and can thus
be used to detect errors in the data (described below).
Since writing a schema can be a tedious task, especially for datasets with lots
of features, TFDV provides a method to generate an initial version of the schema
based on the descriptive statistics.
```
schema = tfdv.infer_schema(train_stats)
```
In general, TFDV uses conservative heuristics to infer stable data properties
from the statistics in order to avoid overfitting the schema to the specific
dataset. It is strongly advised to **review the inferred schema and refine
it as needed**, to capture any domain knowledge about the data that TFDV's
heuristics might have missed.
```
tfdv.display_schema(schema)
```
# Check evaluation data for errors
Given a schema, it is possible to check whether a dataset conforms to the
expectations set in the schema or whether there exist any data anomalies. TFDV
performs this check by matching the statistics of the dataset against the schema
and marking any discrepancies.
```
# Compute stats over eval data.
eval_stats = tfdv.generate_statistics_from_csv(EVAL_DATA)
# Compare stats of eval data with training data.
tfdv.visualize_statistics(lhs_statistics=eval_stats, rhs_statistics=train_stats,
lhs_name='EVAL_DATASET', rhs_name='TRAIN_DATASET')
# Check eval data for errors by validating the eval data stats using the previously inferred schema.
anomalies = tfdv.validate_statistics(eval_stats, schema)
tfdv.display_anomalies(anomalies)
```
The anomalies indicate that out of domain values were found for features `company` and `payment_type` in the stats in < 1% of the feature values. If this was expected, then the schema can be updated as follows.
```
# Relax the minimum fraction of values that must come from the domain for feature company.
company = tfdv.get_feature(schema, 'company')
company.distribution_constraints.min_domain_mass = 0.9
# Add new value to the domain of feature payment_type.
payment_type_domain = tfdv.get_domain(schema, 'payment_type')
payment_type_domain.value.append('Prcard')
updated_anomalies = tfdv.validate_statistics(eval_stats, schema)
tfdv.display_anomalies(updated_anomalies)
```
If an anomaly truly indicates a data error, then the underlying data should be fixed.
# Schema Environments
By default, validations assume that all datasets in a pipeline adhere to a single schema. In some cases introducing
slight schema variations is necessary, for instance features used as labels are required during training (and should
be validated), but are missing during serving.
**Environments** can be used to express such requirements. In particular, features in schema can be associated with a set of environments using `default_environment`, `in_environment` and `not_in_environment`.
For example, if the `tips` feature is being used as the label in training, but missing in the serving data. Without environment specified, it will show up as an anomaly.
```
serving_stats = tfdv.generate_statistics_from_csv(SERVING_DATA)
serving_anomalies = tfdv.validate_statistics(serving_stats, schema)
tfdv.display_anomalies(serving_anomalies)
```
Note that 'tips' feature is shown in the anomalies as 'Column dropped', as it is not present in the serving dataset. We can do the following to fix this.
```
# All features are by default in both TRAINING and SERVING environments.
schema.default_environment.append('TRAINING')
schema.default_environment.append('SERVING')
# Specify that 'tips' feature is not in SERVING environment.
tfdv.get_feature(schema, 'tips').not_in_environment.append('SERVING')
serving_anomalies_with_env = tfdv.validate_statistics(
serving_stats, schema, environment='SERVING')
tfdv.display_anomalies(serving_anomalies_with_env)
```
# Check data drift and skew
In addition to checking whether a dataset conforms to the expectations set in the schema, TFDV also provides functionalities to detect
* drift between different days of training data
* skew between training and serving data
TFDV performs this check by comparing the statistics of different datasets based on the drift/skew comparators specified in the schema.
```
# Add skew comparator for 'payment_type' feature.
payment_type = tfdv.get_feature(schema, 'payment_type')
payment_type.skew_comparator.infinity_norm.threshold = 0.01
# Add drift comparator for 'company' feature.
company=tfdv.get_feature(schema, 'company')
company.drift_comparator.infinity_norm.threshold = 0.001
skew_anomalies = tfdv.validate_statistics(train_stats, schema, previous_statistics=eval_stats,
serving_statistics=serving_stats)
tfdv.display_anomalies(skew_anomalies)
```
| github_jupyter |
CWPK \#34: A Python Module, Part II: Packaging and The Structure Extractor
=======================================
Moving from Notebook to Package Proved Perplexing
--------------------------
<div style="float: left; width: 305px; margin-right: 10px;">
<img src="http://kbpedia.org/cwpk-files/cooking-with-kbpedia-305.png" title="Cooking with KBpedia" width="305" />
</div>
This installment of the [*Cooking with Python and KBpedia*](https://www.mkbergman.com/cooking-with-python-and-kbpedia/) series is the second of a three-part mini-series on writing and packaging a formal Python project. The previous installment described a [DRY](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) (don't repeat yourself) approach to how to generalize our annotation extraction routine. This installment describes how to transition that code from [Jupyter Notebook](https://en.wikipedia.org/wiki/Project_Jupyter#Jupyter_Notebook) interactive code to a formally organized Python package. We also extend our generalized approach to the structure extractor.
In this installment I am working with the notebook and the [Spyder](https://en.wikipedia.org/wiki/Spyder_(software)) IDE in tandem. The notebook is the source of the initial prototype code. It is also the testbed for seeing if the package may be imported and is working properly. We use Spyder for all of the final code development, including moving into functions and classes and organizing by files. We also start to learn some of its [IDE](https://en.wikipedia.org/wiki/Integrated_development_environment) features, such as auto-complete, which is a nice way to test questions about namespaces and local and global variables.
As noted in earlier installments, a Python 'module' is a single script file (in the form of <code>my_file.py</code>) that itself may contain multiple functions, variable declarations, class (object) definitions, and the like, kept in this single file because of their related functionality. A 'package' in Python is a directory with at least one module and (generally) a standard <code>\_\_init\_\_.py</code> file that informs Python a package is available and its name. Python packages and modules are named with lower case. A package name is best when short and without underscores. A module may use underscores to better convey its purpose, such as <code>do_something.py</code>.
For our project based on _Cooking with Python and KBpedia_ (**CWPK**), we will pick up on this acronym and name our project '*cowpoke*'. The functional module we are starting the project with is <code>extract.py</code>, the module for the extraction routines we have been developing over the past few installments..
### Perplexing Questions
While it is true the Python organization has some thorough tutorials, referenced in the concluding **Additional Documentation**, I found it surprisingly difficult to figure out how to move my [Jupyter Notebook](https://en.wikipedia.org/wiki/Project_Jupyter#Jupyter_Notebook) prototypes to a packaged [Python](https://en.wikipedia.org/wiki/Python_(programming_language)) program. I could see that logical modules (single Python scripts, <code>\*.py</code>) made sense, and that there were going to be shared functions across those modules. I could also see that I wanted to use a standard set of variable descriptions in order to specify 'record-like' inputs to the routines. My hope was to segregate all of the input information required for a new major exercise of *cowpoke* into the editing of a single file. That would make configuring a new run a simple process.
I read and tried many tutorials trying to figure out an architecture and design for this packaging. I found the tutorials helpful at a broad, structural level of what goes into a package and how to refer and import other parts, but the nuances of where and how to use classes and functions and how to best share some variables and specifications across modules remained opaque to me. Here are some of the questions and answers I needed to discover before I could make progress:
***1. Where do I put the files to be seen by the notebook and the project?***
After installing Python and setting up the environment noted in installments [**CWPK #9**](https://www.mkbergman.com/2336/cwpk-9-installing-python/) - [**#11**](https://www.mkbergman.com/2338/cwpk-11-installing-a-python-ide/) you should have many packages already on your system, including for Spyder and Jupyter Notebook. There are at least two listings of full packages in different locations. To re-discover what your Python paths are, Run this cell:
```
import sys
print(sys.path)
```
You want to find the site packages directory under your Python library (mine is <code>C:\\1-PythonProjects\\Python\\lib\\site-packages</code>). We will define the '*cowpoke*' directory under this parent and also point our Spyder project to it. (**NB:** Of course, you can locate your package directory anywhere you want, but you would need to add that location to your path as well, and later configuration steps may also require customization.)
***2. What is the role of class and defined variables?***
I know the major functions I have been prototyping, such as the annotation extractor from the last [**CWPK #33**](https://www.mkbergman.com/2370/cwpk-33-a-python-package-part-i-the-annotation-extractor/) installment, need to be formalized as a defined function (the <code>def *function_name*</code> statement). Going into this packaging, however, it is not clear to me whether I should package multiple function definitions under one class (some tutorials seem to so suggest) or where and how I need to declare variables such as *loop* that are part of a run configuration.
One advantage of putting both variables and functions under a single class is that they can be handled as a unit. On the other hand, having a separate class of only input variables seems to be the best arrangement for a record orientation (see next question #4). In practice, I chose to embrace both types.
***3. What is the role of <code>self</code> and where to introduce or use?***
The question of the role of <code>self</code> perplexed me for some time. On the one hand, <code>self</code> is not a reserved keyword in Python, but it is used frequently by convention. Class variables come in two flavors. One flavor is when the variable value is universal to all instances of class. Every instance of this class will share the same value for this variable. It is declared simply after first defining the class and outside of any methods:
<pre>variable = my_variable</pre>
In contrast, instance variables, which is where <code>self</code> is used, are variables with values specific to each instance of class. The values of one instance typically vary from the values of another instance. Class instance variables should be declared within a method, often with this kind of form, as this example from the **Additional Documentation** shows:
<pre>
class SomeClass:
variable_1 = “ This is a class variable”
variable_2 = 100 #this is also a class variable.
def __init__(self, param1, param2):
self.instance_var1 = param1
#instance_var1 is a instance variable
self.instance_var2 = param2
#instance_var2 is a instance variable
</pre>
In this recipe, we are assigning <code>self</code> by convention to the first parameter of the function (method). We can then access the values of the instance variable as declared in the definition via the <code>self</code> convention, also without the need to pass additional arguments or parameters, making for simpler use and declarations. (**NB:** You may name this first parameter something other than <code>self</code>, but that is likely confusing since it goes against the convention.)
Importantly, know we may use this same approach to assign <code>self</code> as the first parameter for instance methods, in addition to instance variables. For either instance variables or methods, Python explicitly passes the current instance and its arguments (<code>self</code>) as the first argument to the instance call.
At any rate, for our interest of being able to pass variable assignments from a separate <code>config.py</code> file to a local extraction routine, the approach using the universal class variable is the right form. But, is it the best form?
***4. What is the best practice for initializing a record?***
If one stands back and thinks about what we are trying to do with our annotation extraction routine (as with other build or extraction steps), we see that we are trying to set a number of key parameters for what data we use and what branches we take during the routine. These parameters are, in effect, keywords used in the routines, the specific values of which (sources of data, what to loop over, etc.) vary by the specific instance of the extraction or build run we are currently invoking. This set-up sounds very much like a kind of 'record' format where we have certain method fields (such as output file or source of the looping data) that vary by run. This is equivalent to a *key:value* pair. In other words, we can treat our configuration specification as the input to a given run of the annotation extractor as a dictionary (<code>dict</code>) as we discussed in the last installment. The <code>dict</code> form looks to be the best form for our objective. We'll see this use below.
***5. What are the special privileges about <code>\_\_main\_\_.py</code>?***
Another thing I saw while reading the background tutorials was reference to a more-or-less standard <code>\_\_main.\_\_.py</code> file. However, in looking at many of the packages installed in my current Python installation I saw that this construct is by no means universally used, though some packages do. Should I be using this format or not?
For two reasons my general desire is to remove this file. The first reason is because this file can be confused with the <code>\_\_main\_\_</code> module. The second reason is because I could find no real clear guidance about best practices for the file except to keep it simple. That seemed to me thin gruel for keeping something I did not fully understand and found confusing. So, I initially decided not to use this form.
However, I found things broke when I tried to remove it. I assume with greater knowledge or more experience I might find the compelling recipe for simplifying this file away. But, it is easier to keep it and move on rather than get stuck on a question not central to our project.
***6. What is the best practice for arranging internal imports across a project?***
I think one of the reasons I did not see a simple answer to the above question is the fact I have not yet fully understood the relationships between global and local variables and module functions and inheritance, all of which require a sort of grokking, I suppose, of namespaces.
I plan to continue to return to these questions as I learn more with subsequent installments and code development. If I encounter new insights or better ways to do things, my current intent is to return to any prior installments, leave the existing text as is, and then add annotations as to what I learned. If you have not seen any of these notices by now, I guess I have not later discovered better approaches. (**Note**: I think I began to get a better understanding about namespaces on the return leg of our build 'roundtrip', roughly about **CWPK #40** from now, but I still have questions, even from that later vantage point.)
### New File Definitions
As one may imagine, the transition from notebook to module package has resulted in some changes to the code. The first change, of course, was to split the code into the starting pieces, including adding the <code>\_\_init\_\_.py</code> that signals the available *cowpoke* package. Here is the new file structure:
<pre>
|-- PythonProject
|-- Python
|-- [Anaconda3 distribution]
|-- Lib
|-- site-packages # location to store files
|-- alot
|-- cowpoke # new project directory
|-- __init__.py # four new files here
|-- __main__.py
|-- config.py
|-- extract.py
|-- TBA
|-- TBA
</pre>
At the top of each file we place our import statements, including references to other modules within the *cowpoke* project. Here is the statement at the top of <code>\_\_init\_\_.py</code> (which also includes some package identification boilerplate):
<pre>
from cowpoke.__main__ import *
from cowpoke.config import *
from cowpoke.extract import *
</pre>
I should note that the asterisk (\*) character above tells the system to import all objects within the file, a practice that is generally not encouraged, though is common. It is discouraged because of the amount of objects brought into a current working space, which may pose name conflicts or a burdened system for larger projects. However, since our system is quite small and I do not foresee unmanageable namespace complexity, I use this simpler shorthand.
Our <code>\_\_main\_\_.py</code> contains the standard start-up script that we have recently been using for many installments. You can see this code and the entire file by Running the next cell (assuming you have been following this entire **CWPK** series and have stored earlier distribution files):
<div style="background-color:#eee; border:1px dotted #aaa; vertical-align:middle; margin:15px 60px; padding:8px;"><strong>Which environment?</strong> The specific load routine you should choose below depends on whether you are using the online MyBinder service (the 'raw' version) or local files. The example below is based on using local files (though replace with your own local directory specification). If loading from MyBinder, replace with the lines that are commented (<code>#</code>) out.</div>
```
with open(r'C:\1-PythonProjects\Python\Lib\site-packages\cowpoke\__main__.py', 'r') as f:
print(f.read())
```
(**NB:** Remember the '<code>r</code>' switch on the file name is to treat the string as 'raw'.)
We move our dictionary definitions to the <code>config.py</code>. Go ahead and inspect it in the next cell, but realized much has been added to this file due to subsequent coding steps in our project installments:
```
with open(r'C:\1-PythonProjects\Python\Lib\site-packages\cowpoke\config.py', 'r') as f:
print(f.read())
```
We already had the class and property dictionaries as presented in the [**CWPK #33**](https://www.mkbergman.com/2370/cwpk-33-a-python-package-part-i-the-annotation-extractor/) installment. The key change notable for the <code>config.py</code>, which remember is intended for where we enter run specifications for a new run (build or extract) of the code, was to pull out our specifications for the annotation extractor. This new dictionary, the <code>extract_deck</code>, is expanded later to embrace other run parameters for additional functions. At the time of this initial set-up, however, the dictionary contained these relatively few entries:
<pre>
extract_deck = {
"""This is the dictionary for the specifications of each
extraction run; what is its run deck.
"""
'property_loop' : '',
'class_loop' : '',
'loop' : 'property_loop',
'loop_list' : prop_dict.values(),
'out_file' : 'C:/1-PythonProjects/kbpedia/sandbox/prop_annot_out.csv',
}
</pre>
These are the values passed to the new annotation extraction function, <code>def annot_extractor</code>, now migrated to the <code>extract.py</code> module. Here is the commented code block (which will not run on its own as a cell):
```
def annot_extractor(**extract_deck): # define the method here, see note
print('Beginning annotation extraction . . .')
loop_list = extract_deck.get('loop_list') # notice we are passing run_deck to current vars
loop = extract_deck.get('loop')
out_file = extract_deck.get('out_file')
class_loop = extract_deck.get('class_loop')
property_loop = extract_deck.get('property_loop')
a_dom = ''
a_rng = ''
a_func = ''
""" These are internal counters used in this module's methods """
p_set = ''
x = 1
cur_list = []
with open(out_file, mode='w', encoding='utf8', newline='') as output:
csv_out = csv.writer(output)
... # remainder of code as prior installment . . .
```
**Note:** Normally, a function definition is followed by its arguments in parentheses. The special notation of the double asterisks (\*\*) signals to expect a variable list of keywords (more often in tutorials shown as '<code>\*\*kwargs</code>'), which is how we make the connection to the values of the keys in the <code>extract_deck</code> dictionary. We retrieve these values based on the <code>.get()</code> method shown in the next assignments. Note, as well, that positional arguments can also be treated in a similar way using the single asterisk (<code>\*</code>) notation ('<code>\*args</code>').
At the command line or in an interactive notebook, we can run this function with the following call:
<pre>
import cowpoke
cowpoke.annot_extractor(**cowpoke.extract_deck)
</pre>
We are not calling it here given that your local <code>config.py</code> is not set up with the proper configuration parameters for this specific example.
These efforts complete our initial set-up on the Python *cowpoke* package.
### Generalizing and Moving the Structure Extractor
You may want to relate the modified code in this section to the last state of our structure extraction routine, shown as the last code cell in [**CWPK #32**](https://www.mkbergman.com/2368/cwpk-32-iterating-over-a-full-extraction/).
We took that code, applied the generalization approaches earlier discussed, and added a <code>set.union</code> method to getting the unique list from a very large list of large sets. This approach using sets (that can be hashed) sped up what had been a linear lookup by about 10x. We also moved the general parameters to share the same <code>extract_deck</code> dictionary.
We made the same accommodations for processing properties v classes (and typologies). We wrapped the resulting code block into a defined function wrapper, similar for what we did for annotations, only now for (is-a) structure:
```
from owlready2 import *
from cowpoke.config import *
from cowpoke.__main__ import *
import csv
import types
world = World()
kko = []
kb = []
rc = []
core = []
skos = []
kb_src = master_deck.get('kb_src') # we get the build setting from config.py
if kb_src is None:
kb_src = 'standard'
if kb_src == 'sandbox':
kbpedia = 'C:/1-PythonProjects/kbpedia/sandbox/kbpedia_reference_concepts.owl'
kko_file = 'C:/1-PythonProjects/kbpedia/sandbox/kko.owl'
elif kb_src == 'standard':
kbpedia = 'C:/1-PythonProjects/kbpedia/v300/targets/ontologies/kbpedia_reference_concepts.owl'
kko_file = 'C:/1-PythonProjects/kbpedia/v300/build_ins/stubs/kko.owl'
elif kb_src == 'extract':
kbpedia = 'C:/1-PythonProjects/kbpedia/v300/build_ins/ontologies/kbpedia_reference_concepts.owl'
kko_file = 'C:/1-PythonProjects/kbpedia/v300/build_ins/ontologies/kko.owl'
elif kb_src == 'full':
kbpedia = 'C:/1-PythonProjects/kbpedia/v300/build_ins/stubs/kbpedia_rc_stub.owl'
kko_file = 'C:/1-PythonProjects/kbpedia/v300/build_ins/stubs/kko.owl'
else:
print('You have entered an inaccurate source parameter for the build.')
skos_file = 'http://www.w3.org/2004/02/skos/core'
kb = world.get_ontology(kbpedia).load()
rc = kb.get_namespace('http://kbpedia.org/kko/rc/')
skos = world.get_ontology(skos_file).load()
kb.imported_ontologies.append(skos)
core = world.get_namespace('http://www.w3.org/2004/02/skos/core#')
kko = world.get_ontology(kko_file).load()
kb.imported_ontologies.append(kko)
kko = kb.get_namespace('http://kbpedia.org/ontologies/kko#')
def struct_extractor(**extract_deck):
print('Beginning structure extraction . . .')
loop_list = extract_deck.get('loop_list')
loop = extract_deck.get('loop')
out_file = extract_deck.get('out_file')
class_loop = extract_deck.get('class_loop')
property_loop = extract_deck.get('property_loop')
x = 1
cur_list = []
a_set = []
s_set = []
# r_default = '' # Series of variables needed later
# r_label = '' #
# r_iri = '' #
# render = '' #
new_class = 'owl:Thing'
with open(out_file, mode='w', encoding='utf8', newline='') as output:
csv_out = csv.writer(output)
if loop == class_loop:
header = ['id', 'subClassOf', 'parent']
p_item = 'rdfs:subClassOf'
else:
header = ['id', 'subPropertyOf', 'parent']
p_item = 'rdfs:subPropertyOf'
csv_out.writerow(header)
for value in loop_list:
print(' . . . processing', value)
root = eval(value)
a_set = root.descendants()
a_set = set(a_set)
s_set = a_set.union(s_set)
print(' . . . processing consolidated set.')
for s_item in s_set:
o_set = s_item.is_a
for o_item in o_set:
row_out = (s_item,p_item,o_item)
csv_out.writerow(row_out)
if loop == class_loop:
if s_item not in cur_list:
row_out = (s_item,p_item,new_class)
csv_out.writerow(row_out)
cur_list.append(s_item)
x = x + 1
print('Total rows written to file:', x)
struct_extractor(**extract_deck)
```
Again, since we can not guarantee the operating circumstance, you can try this on your own instance with the command:
<pre>
cowpoke.struct_extractor(**cowpoke.extract_deck)
</pre>
Note we're using a prefixed *cowpoke* function to make the generic dictionary request. All we need to do before the run is to go to the <code>config.py</code> file, and make the value (right-hand side) changes to the <code>extract_deck</code> dictionary. Save the file, make sure your current notebook instance has been cleared, and enter the command above.
There aren't any commercial-grade checks here to make sure you are not inadvertently overwriting a desired file. Loose code and routines such as what we are developing in this **CWPK** series warrant making frequent backups, and scrutinizing your <code>config.py</code> assignments before kicking off a run.
### Additional Documentation
Here are additional guides resulting from the research in today's installation:
- Python's [Class and Instance Variable](https://docs.python.org/3/tutorial/classes.html#class-and-instance-variables) documentation
- [Understanding self in Python](https://medium.com/quick-code/understanding-self-in-python-a3704319e5f0)
- PythonTips' [The self variable in python explained](https://pythontips.com/2013/08/07/the-self-variable-in-python-explained/)
- DEV's [class v instance variables](https://dev.to/ogwurujohnson/distinguishing-instance-variables-from-class-variables-in-python-81)
- Programiz' [self in Python, Demystified](https://www.programiz.com/article/python-self-why)
- StackOverflow's [What is \_\_main\_\_.py?](https://stackoverflow.com/questions/4042905/what-is-main-py)
- See StackOverflow for a nice example of the advantage of [using sets to find unique items](https://stackoverflow.com/questions/12897374/get-unique-values-from-a-list-in-python) in a listing.
<div style="background-color:#efefff; border:1px dotted #ceceff; vertical-align:middle; margin:15px 60px; padding:8px;">
<span style="font-weight: bold;">NOTE:</span> This article is part of the <a href="https://www.mkbergman.com/cooking-with-python-and-kbpedia/" style="font-style: italic;">Cooking with Python and KBpedia</a> series. See the <a href="https://www.mkbergman.com/cooking-with-python-and-kbpedia/"><strong>CWPK</strong> listing</a> for other articles in the series. <a href="http://kbpedia.org/">KBpedia</a> has its own Web site.
</div>
<div style="background-color:#ebf8e2; border:1px dotted #71c837; vertical-align:middle; margin:15px 60px; padding:8px;">
<span style="font-weight: bold;">NOTE:</span> This <strong>CWPK
installment</strong> is available both as an online interactive
file <a href="https://mybinder.org/v2/gh/Cognonto/CWPK/master" ><img src="https://mybinder.org/badge_logo.svg" style="display:inline-block; vertical-align: middle;" /></a> or as a <a href="https://github.com/Cognonto/CWPK" title="CWPK notebook" alt="CWPK notebook">direct download</a> to use locally. Make sure and pick the correct installment number. For the online interactive option, pick the <code>*.ipynb</code> file. It may take a bit of time for the interactive option to load.</div>
<div style="background-color:#feeedc; border:1px dotted #f7941d; vertical-align:middle; margin:15px 60px; padding:8px;">
<div style="float: left; margin-right: 5px;"><img src="http://kbpedia.org/cwpk-files/warning.png" title="Caution!" width="32" /></div>I am at best an amateur with Python. There are likely more efficient methods for coding these steps than what I provide. I encourage you to experiment -- which is part of the fun of Python -- and to <a href="mailto:mike@mkbergman.com">notify me</a> should you make improvements.
</div>
| github_jupyter |
```
############## PLEASE RUN THIS CELL FIRST! ###################
# import everything and define a test runner function
from importlib import reload
from helper import run
import ecc, helper, tx, script
# Signing Example
from ecc import G, N
from helper import hash256
secret = 1800555555518005555555
z = int.from_bytes(hash256(b'ECDSA is awesome!'), 'big')
k = 12345
r = (k*G).x.num
s = (z+r*secret) * pow(k, -1, N) % N
print(hex(z), hex(r), hex(s))
print(secret*G)
# Verification Example
from ecc import S256Point, G, N
z = 0xbc62d4b80d9e36da29c16c5d4d9f11731f36052c72401a76c23c0fb5a9b74423
r = 0x37206a0610995c58074999cb9767b87af4c4978db68c06e8e6e81d282047a7c6
s = 0x8ca63759c1157ebeaec0d03cecca119fc9a75bf8e6d0fa65c841c8e2738cdaec
point = S256Point(0x04519fac3d910ca7e7138f7013706f619fa8f033e6ec6e09370ea38cee6a7574,
0x82b51eab8c27c66e26c858a079bcdf4f1ada34cec420cafc7eac1a42216fb6c4)
u = z * pow(s, -1, N) % N
v = r * pow(s, -1, N) % N
print((u*G + v*point).x.num == r)
```
### Exercise 1
Which sigs are valid?
```
P = (887387e452b8eacc4acfde10d9aaf7f6d9a0f975aabb10d006e4da568744d06c,
61de6d95231cd89026e286df3b6ae4a894a3378e393e93a0f45b666329a0ae34)
z, r, s = ec208baa0fc1c19f708a9ca96fdeff3ac3f230bb4a7ba4aede4942ad003c0f60,
ac8d1c87e51d0d441be8b3dd5b05c8795b48875dffe00b7ffcfac23010d3a395,
68342ceff8935ededd102dd876ffd6ba72d6a427a3edb13d26eb0781cb423c4
z, r, s = 7c076ff316692a3d7eb3c3bb0f8b1488cf72e1afcd929e29307032997a838a3d,
eff69ef2b1bd93a66ed5219add4fb51e11a840f404876325a1e8ffe0529a2c,
c7207fee197d27c618aea621406f6bf5ef6fca38681d82b2f06fddbdce6feab6
```
```
# Exercise 1
from ecc import S256Point, G, N
px = 0x887387e452b8eacc4acfde10d9aaf7f6d9a0f975aabb10d006e4da568744d06c
py = 0x61de6d95231cd89026e286df3b6ae4a894a3378e393e93a0f45b666329a0ae34
signatures = (
# (z, r, s)
(0xec208baa0fc1c19f708a9ca96fdeff3ac3f230bb4a7ba4aede4942ad003c0f60,
0xac8d1c87e51d0d441be8b3dd5b05c8795b48875dffe00b7ffcfac23010d3a395,
0x68342ceff8935ededd102dd876ffd6ba72d6a427a3edb13d26eb0781cb423c4),
(0x7c076ff316692a3d7eb3c3bb0f8b1488cf72e1afcd929e29307032997a838a3d,
0xeff69ef2b1bd93a66ed5219add4fb51e11a840f404876325a1e8ffe0529a2c,
0xc7207fee197d27c618aea621406f6bf5ef6fca38681d82b2f06fddbdce6feab6),
)
# initialize the public point
# use: S256Point(x-coordinate, y-coordinate)
point = S256Point(px, py)
# iterate over signatures
for z, r, s in signatures:
# u = z / s, v = r / s
u = z * pow(s, -1, N) % N
v = r * pow(s, -1, N) % N
# finally, uG+vP should have the x-coordinate equal to r
print((u*G+v*point).x.num == r)
```
### Exercise 2
#### Make [this test](/edit/session3/ecc.py) pass: `ecc.py:S256Test:test_verify`
```
# Exercise 2
reload(ecc)
run(ecc.S256Test('test_verify'))
```
### Exercise 3
#### Make [this test](/edit/session3/ecc.py) pass: `ecc.py:PrivateKeyTest:test_sign`
```
# Exercise 3
reload(ecc)
run(ecc.PrivateKeyTest('test_sign'))
```
### Exercise 4
Verify the DER signature for the hash of "ECDSA is awesome!" for the given SEC pubkey
`z = int.from_bytes(hash256('ECDSA is awesome!'), 'big')`
Public Key in SEC Format:
0204519fac3d910ca7e7138f7013706f619fa8f033e6ec6e09370ea38cee6a7574
Signature in DER Format: 304402201f62993ee03fca342fcb45929993fa6ee885e00ddad8de154f268d98f083991402201e1ca12ad140c04e0e022c38f7ce31da426b8009d02832f0b44f39a6b178b7a1
```
# Exercise 4
from ecc import S256Point, Signature
from helper import hash256
der = bytes.fromhex('304402201f62993ee03fca342fcb45929993fa6ee885e00ddad8de154f268d98f083991402201e1ca12ad140c04e0e022c38f7ce31da426b8009d02832f0b44f39a6b178b7a1')
sec = bytes.fromhex('0204519fac3d910ca7e7138f7013706f619fa8f033e6ec6e09370ea38cee6a7574')
# message is the hash256 of the message "ECDSA is awesome!"
z = int.from_bytes(hash256(b'ECDSA is awesome!'), 'big')
# parse the der format to get the signature
sig = Signature.parse(der)
# parse the sec format to get the public key
point = S256Point.parse(sec)
# use the verify method on S256Point to validate the signature
print(point.verify(z, sig))
```
### Exercise 5
#### Make [this test](/edit/session3/tx.py) pass: `tx.py:TxTest:test_parse_version`
```
# Exercise 5
reload(tx)
run(tx.TxTest('test_parse_version'))
```
### Exercise 6
#### Make [this test](/edit/session3/tx.py) pass: `tx.py:TxTest:test_parse_inputs`
```
# Exercise 6
reload(tx)
run(tx.TxTest('test_parse_inputs'))
```
### Exercise 7
#### Make [this test](/edit/session3/tx.py) pass: `tx.py:TxTest:test_parse_outputs`
```
# Exercise 7
reload(tx)
run(tx.TxTest('test_parse_outputs'))
```
### Exercise 8
#### Make [this test](/edit/session3/tx.py) pass: `tx.py:TxTest:test_parse_locktime`
```
# Exercise 8
reload(tx)
run(tx.TxTest('test_parse_locktime'))
```
### Exercise 9
What is the scriptSig from the second input in this tx? What is the scriptPubKey and amount of the first output in this tx? What is the amount for the second output?
```
010000000456919960ac691763688d3d3bcea9ad6ecaf875df5339e148a1fc61c6ed7a069e010000006a47304402204585bcdef85e6b1c6af5c2669d4830ff86e42dd205c0e089bc2a821657e951c002201024a10366077f87d6bce1f7100ad8cfa8a064b39d4e8fe4ea13a7b71aa8180f012102f0da57e85eec2934a82a585ea337ce2f4998b50ae699dd79f5880e253dafafb7feffffffeb8f51f4038dc17e6313cf831d4f02281c2a468bde0fafd37f1bf882729e7fd3000000006a47304402207899531a52d59a6de200179928ca900254a36b8dff8bb75f5f5d71b1cdc26125022008b422690b8461cb52c3cc30330b23d574351872b7c361e9aae3649071c1a7160121035d5c93d9ac96881f19ba1f686f15f009ded7c62efe85a872e6a19b43c15a2937feffffff567bf40595119d1bb8a3037c356efd56170b64cbcc160fb028fa10704b45d775000000006a47304402204c7c7818424c7f7911da6cddc59655a70af1cb5eaf17c69dadbfc74ffa0b662f02207599e08bc8023693ad4e9527dc42c34210f7a7d1d1ddfc8492b654a11e7620a0012102158b46fbdff65d0172b7989aec8850aa0dae49abfb84c81ae6e5b251a58ace5cfeffffffd63a5e6c16e620f86f375925b21cabaf736c779f88fd04dcad51d26690f7f345010000006a47304402200633ea0d3314bea0d95b3cd8dadb2ef79ea8331ffe1e61f762c0f6daea0fabde022029f23b3e9c30f080446150b23852028751635dcee2be669c2a1686a4b5edf304012103ffd6f4a67e94aba353a00882e563ff2722eb4cff0ad6006e86ee20dfe7520d55feffffff0251430f00000000001976a914ab0c0b2e98b1ab6dbf67d4750b0a56244948a87988ac005a6202000000001976a9143c82d7df364eb6c75be8c80df2b3eda8db57397088ac46430600
```
```
# Exercise 9
from io import BytesIO
from tx import Tx
hex_transaction = '010000000456919960ac691763688d3d3bcea9ad6ecaf875df5339e148a1fc61c6ed7a069e010000006a47304402204585bcdef85e6b1c6af5c2669d4830ff86e42dd205c0e089bc2a821657e951c002201024a10366077f87d6bce1f7100ad8cfa8a064b39d4e8fe4ea13a7b71aa8180f012102f0da57e85eec2934a82a585ea337ce2f4998b50ae699dd79f5880e253dafafb7feffffffeb8f51f4038dc17e6313cf831d4f02281c2a468bde0fafd37f1bf882729e7fd3000000006a47304402207899531a52d59a6de200179928ca900254a36b8dff8bb75f5f5d71b1cdc26125022008b422690b8461cb52c3cc30330b23d574351872b7c361e9aae3649071c1a7160121035d5c93d9ac96881f19ba1f686f15f009ded7c62efe85a872e6a19b43c15a2937feffffff567bf40595119d1bb8a3037c356efd56170b64cbcc160fb028fa10704b45d775000000006a47304402204c7c7818424c7f7911da6cddc59655a70af1cb5eaf17c69dadbfc74ffa0b662f02207599e08bc8023693ad4e9527dc42c34210f7a7d1d1ddfc8492b654a11e7620a0012102158b46fbdff65d0172b7989aec8850aa0dae49abfb84c81ae6e5b251a58ace5cfeffffffd63a5e6c16e620f86f375925b21cabaf736c779f88fd04dcad51d26690f7f345010000006a47304402200633ea0d3314bea0d95b3cd8dadb2ef79ea8331ffe1e61f762c0f6daea0fabde022029f23b3e9c30f080446150b23852028751635dcee2be669c2a1686a4b5edf304012103ffd6f4a67e94aba353a00882e563ff2722eb4cff0ad6006e86ee20dfe7520d55feffffff0251430f00000000001976a914ab0c0b2e98b1ab6dbf67d4750b0a56244948a87988ac005a6202000000001976a9143c82d7df364eb6c75be8c80df2b3eda8db57397088ac46430600'
# bytes.fromhex to get the binary representation
bin_transaction = bytes.fromhex(hex_transaction)
# create a stream using BytesIO()
stream = BytesIO(bin_transaction)
# Tx.parse() the stream
tx_obj = Tx.parse(stream)
# print tx's second input's scriptSig
print(tx_obj.tx_ins[1].script_sig)
# print tx's first output's scriptPubKey
print(tx_obj.tx_outs[0].script_pubkey)
# print tx's second output's amount
print(tx_obj.tx_outs[1].amount)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import seaborn as sns
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from itertools import combinations
data = np.load('/home/jan/lbc_full.npy')
meta = pd.read_csv('/mnt/tchandra-lab/Jan/methyl-pattern/data/lbc/meta.csv')
lbc_fitness = pd.read_csv('Datasets/fitness_table.csv', index_col='id')
# Pre-allocate the memory for meth_gradients
counter = 0
for part_id in meta.ID.unique():
# Extract number of waves in participant
waves_number = len(meta[meta.ID == part_id].sort_values(by='WAVE'))
# Use arithmetic progression formula to update #possible wave combinations
combinations = waves_number*(waves_number-1)/2
counter = counter + int(combinations)
# create empty numpy array, each row is a combination of methylation gradients
gradients = np.empty((counter, data.shape[1]))
# Create gradients metadata dataframe
gradients_meta = pd.DataFrame()
# Track combination number to point towards a row in gradients
index_counter = 0
for part_id in meta.ID.unique():
part_slice = meta[meta.ID == part_id].sort_values(by='WAVE')
for comb in combinations(part_slice.WAVE, 2):
init_index = part_slice[part_slice['WAVE']==comb[0]]['index']
last_index = part_slice[part_slice['WAVE']==comb[1]]['index']
gradients[index_counter, :] = (data[last_index] - data[init_index]) / (3*(comb[1]-comb[0]))
gradients_meta = gradients_meta.append({'part_id': part_id,
'gradients_index': index_counter,
'combination': comb},
ignore_index=True)
index_counter += 1
gradients_meta
y = np.zeros((len(gradients_meta), len(lbc_fitness.columns)))
for i, row in gradients_meta.iterrows():
if row.part_id in lbc_fitness.index.unique():
y[i,:] = lbc_fitness[lbc_fitness.index == row.part_id].values
zero_columns = np.where(~target.any(axis=1))[0]
y_filtered = np.delete(target, zero_columns, axis=0)
X_filtered = np.delete(gradients, zero_columns, axis=0)
import numpy as np
from sklearn.decomposition import NMF
model = NMF(n_components=100, init='random', random_state=0)
W = model.fit_transform(X_filtered)
H = model.components_
std_CpG = np.nanstd(X_filtered, axis=0)
box = sns.boxplot(x=std_CpG)
box
a=1
a
```
# Check sites
```
def part_gradient(part_id):
part_slice = meta[meta.ID == part_id].sort_values(by='WAVE')
time_span = 3*(part_slice.WAVE.iloc[-1] - part_slice.WAVE.iloc[0])
first_index = meta[meta.ID == part_id].sort_values(by='WAVE')['index'].iloc[0]
last_index = meta[meta.ID == part_id].sort_values(by='WAVE')['index'].iloc[-1]
gradient = (data[last_index] - data[first_index]) / time_span
return gradient
def check_sites(part_id , top_sites=True, bottom=True, n_std=4):
# Extract participant data
part_slice = meta[meta.ID == part_id].sort_values(by='WAVE')
part_data = data[part_slice['index'],]
# Compute evolution of methylation between first and last timepoint
meth_evolution = part_gradient(part_id)
mean = np.nanmean(meth_evolution)
std = np.nanstd(meth_evolution)
# Find locations whit large gradients
top_sites = np.where(meth_evolution > mean + n_std*std)[0]
bottom_sites = np.where(meth_evolution < mean - n_std*std)[0]
fig, (ax1, ax2) = plt.subplots(2, 1)
# Plot top sites
for site in top_sites:
ax1.plot(part_slice['WAVE'], part_data[:,site])
# Plot bottom sites
for site in bottom_sites:
ax2.plot(part_slice['WAVE'], part_data[:,site])
return fig, (top_sites, bottom_sites)
# Compute evolution of methylation between first and last timepoint
meth_evolution = part_gradient('LBC0001A')
mean = np.nanmean(meth_evolution)
std = np.nanstd(meth_evolution)
print(f'Mean: {mean} -- 2 Std: {2*std}')
box = sns.boxplot(x=meth_evolution)
# Extract and plot top and bottom sites
fig, sites = check_sites('LBC0001A')
# Extract and plot top and bottom sites
fig_2, sites_2 = check_sites('LBC0251K')
```
# Predicting the presence of mutations based on the longitudinal evolution of mutations
## Preparing a dataset
```
import numpy as np
from tensorflow import keras
from keras.datasets import mnist
#loading dataset
(train_X, train_y), (val_X, val_y) = mnist.load_data()
#normalizing the dataset
train_X, val_X = train_X/255, val_X/255
# visualizing 9 rndom digits from the dataset
for i in range(331,340):
plt.subplot(i)
a = np.random.randint(0, train_X.shape[0], 1)
plt.imshow(train_X[a[0]], cmap = plt.get_cmap('binary'))
plt.tight_layout()
plt.show()
train_X.shape
```
| github_jupyter |
# ML Model For Crop Prediction
1. We are using the dataset which we have already cleaned and can be found <a href="https://github.com/Harshit564/Crop-Prediction-ML-Model/blob/main/final_crops_data.csv" target="_top">here</a>
2. Our goal is to train a ML model which can predict the crop from the given features.
```
#importing the necessary libraries
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
# loading the csv file into the dataframe object
crops = pd.read_csv("final_crops_data.csv")
crops.head()
# features and target variable in the dataset
columns = list(crops.columns)
print("features in the dataset :-")
print(columns[:-1])
print("\n")
print("Target variable :-")
print(columns[-1])
# Let's explore the dataset before training the ML model
fig,axes = plt.subplots(2,2,sharey=False,sharex=False,figsize=(10,10))
x = [0,0,1,1]
y = [0,1,0,1]
for x,y,feature in zip(x,y,columns[:-1]):
axes[x,y].hist(crops[feature],bins=30)
axes[x,y].set_title(feature,fontsize=20)
axes[x,y].set_ylabel("frequency",fontsize=10)
plt.show()
```
### From the histograms of all features you can see only pH has the normal distribution
```
# Boxplots
fig,ax = plt.subplots(figsize=(13,13))
sns.boxplot(x="pH",y="crop",data=crops)
labels = ax.get_yticklabels()
ax.set_ylabel("Crops",Fontsize="18")
ax.set_yticklabels(labels,Fontsize=14)
plt.show()
```
### From the boxplot you can see "phaphar" crop has the largest pH range.
```
# Max humidity that can be withstand by crops
crops_max_temp = crop.groupby("crop")["humidity"].max()
crops_max_temp = crops_max_temp*100
fig,ax = plt.subplots(figsize=(12,12),dpi=80)
ax.plot(crops_max_temp,marker="s",color='r')
ax.set_title("Maximum Humidity withstand by crops",fontsize=20)
ax.set_xlabel("crops",fontsize=15,labelpad=20)
ax.set_ylabel("Humidity(in %)",fontsize=15)
cr = ax.get_xticklabels()
plt.xticks(rotation=45,fontsize=14)
plt.yticks(fontsize=14)
plt.show()
```
### From this graph you can see that 3 crops cabbage, strawberry and tomato can withstand the 90% humidity
```
#Splitting the data into training and testing set
features = crops.iloc[:,:-1]
label = crops.iloc[:,-1]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(features, label, random_state=7,test_size=0.2, stratify=label)
# Preprocessing the data before feeding it to the ML algorithm
from sklearn.preprocessing import StandardScaler
# Standard Scaling
std_scalar = StandardScaler()
X_train_std_scaled = std_scalar.fit_transform(X_train)
X_test_std_scaled = std_scalar.transform(X_test)
# Training the dataset using K Nearest Neighbor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
#lr_clf = LogisticRegression(max_iter=500,random_state=7)
knn_clf = KNeighborsClassifier(n_neighbors=3)
knn_clf.fit(X_train_std_scaled,y_train)
y_predict_knn = knn_clf.predict(X_test_std_scaled)
print("Accuracy score using K(=3) Neighbors is " + str(accuracy_score(y_test,y_predict_knn)))
lr_clf = LogisticRegression(max_iter=100000,random_state=7)
lr_clf.fit(X_train_std_scaled,y_train)
y_predict_lr = lr_clf.predict(X_test_std_scaled)
print("Accuracy score Logistic Regression using is " + str(accuracy_score(y_test,y_predict_lr)))
from sklearn.svm import SVC
svm_clf = SVC(kernel='rbf')
svm_clf.fit(X_train_std_scaled, y_train)
y_predict_svm = svm_clf.predict(X_test_std_scaled)
print("Accuracy score using SVM is " + str(accuracy_score(y_test,y_predict_svm)))
from sklearn.tree import DecisionTreeClassifier
tree_clf = DecisionTreeClassifier(max_depth=10)
tree_clf.fit(X_train_std_scaled, y_train)
y_predict_tree = tree_clf.predict(X_test_std_scaled)
print("Accuracy score using Decision tree classifier is " + str(accuracy_score(y_test,y_predict_tree)))
# Random Forest Model
from sklearn.ensemble import RandomForestClassifier
rf_clf = RandomForestClassifier(n_estimators=100,max_depth=9,random_state=50)
rf_clf.fit(X_train_std_scaled, y_train)
y_predict_rf = rf_clf.predict(X_test_std_scaled)
print("Accuracy score using Random Forest classifier is " + str(accuracy_score(y_test,y_predict_rf)))
# Finding the best hyperparameters using RandomisedSearchCV
# Forming the random grid
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
print(random_grid)
# Use the random grid to search for best hyperparameters
# First create the base model to tune
rf = RandomForestClassifier()
# Random search of parameters, using 3 fold cross validation,
# search across 100 different combinations, and use all available cores
rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter = 100, cv = 3, verbose=2, random_state=42, n_jobs = -1)
# Fit the random search model
rf_random.fit(X_train_std_scaled, y_train)
rf_random.best_params_
# Again training the Random Forest Classifier with best parameters
rf_clf_best = RandomForestClassifier(n_estimators=400,
min_samples_split=2,
min_samples_leaf=4,
max_features="sqrt",
max_depth=10,
bootstrap=True)
rf_clf_best.fit(X_train_std_scaled, y_train)
y_predict_rf_be = rf_clf_best.predict(X_test_std_scaled)
print("Accuracy score using tuned Random Forest classifier is " + str(accuracy_score(y_test,y_predict_rf_be)))
from sklearn.model_selection import GridSearchCV
# Create the parameter grid based on the results of random search
param_grid = {
'bootstrap': [True],
'max_depth': [80, 90, 100, 110],
'max_features': [2, 3],
'min_samples_leaf': [3, 4, 5],
'min_samples_split': [8, 10, 12],
'n_estimators': [100, 200, 300, 1000]
}
# Create a based model
rf_clf_grid = RandomForestClassifier()
# Instantiate the grid search model
grid_search = GridSearchCV(estimator = rf_clf_grid, param_grid = param_grid,
cv = 3, n_jobs = -1, verbose = 2)
grid_search.fit(X_train_std_scaled, y_train)
grid_search.best_params_
# Again training the Random Forest Classifier with best parameters found using GridSearchCV
rf_clf_best_gr = RandomForestClassifier(n_estimators=50,
min_samples_split=8,
min_samples_leaf=5,
max_features=4,
max_depth=10,
bootstrap=True,
random_state=50)
rf_clf_best_gr.fit(X_train_std_scaled, y_train)
y_predict_rf_gr = rf_clf_best_gr.predict(X_test_std_scaled)
print("Accuracy score using tuned(GridSearchCV) Random Forest classifier is " + str(accuracy_score(y_test,y_predict_rf_gr)))
```
## After tuning the Hyperparameter we managed to improve our model by 0.2%
| github_jupyter |
<a href="https://colab.research.google.com/github/navroz-lamba/DS-Unit-2-Linear-Models/blob/master/Assignment_214_Logistic_Regression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import RandomizedSearchCV
# importing the dataset
df = pd.read_csv('https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/burritos/burritos.csv',
parse_dates=['Date'],
index_col='Date')
df.head(25)
# df.Date.dt.date.min()
# df.Date.dt.date.max()
# Derive binary classification target:
# We define a 'Great' burrito as having an
# overall rating of 4 or higher, on a 5 point scale.
# Drop unrated burritos.
df = df.dropna(subset=['overall'])
df['Great'] = df['overall'] >= 4
# Clean/combine the Burrito categories
df['Burrito'] = df['Burrito'].str.lower()
california = df['Burrito'].str.contains('california')
asada = df['Burrito'].str.contains('asada')
surf = df['Burrito'].str.contains('surf')
carnitas = df['Burrito'].str.contains('carnitas')
df.loc[california, 'Burrito'] = 'California'
df.loc[asada, 'Burrito'] = 'Asada'
df.loc[surf, 'Burrito'] = 'Surf & Turf'
df.loc[carnitas, 'Burrito'] = 'Carnitas'
df.loc[~california & ~asada & ~surf & ~carnitas, 'Burrito'] = 'Other'
df.head()
# Drop some high cardinality categoricals
df = df.drop(columns=['Notes', 'Location', 'Reviewer', 'Address', 'URL', 'Neighborhood'])
# Drop some columns to prevent "leakage"
df = df.drop(columns=['Rec', 'overall'])
# split into train and test
filt = df.index < '2018-01-01'
df_train = df.loc[filt]
df_test = df.loc[~filt]
df_train.isna().sum().sort_values(ascending = False)
# since all the NaN values under categorical dtypes means they were not added to the burrito,
#i am replacing them with not added so we could still use those to predict our model
cat_attribs = df_train.select_dtypes(include='object')
for value in cat_attribs:
df_train[value].fillna('''Wasn't Added''', inplace= True)
df_train.dtypes
# looking at the NaN values again
df_train.isna().sum().sort_values(ascending = False)
# we will drop Queso as all the values are NaN
# We will also delete the Density, Mass, yelp and Google as 90% or more of the values are NaN
df_train = df_train.drop(['Queso', 'Density (g/mL)', 'Mass (g)', 'Yelp', 'Google','Chips'], axis=1)
df_train.head()
```
we will take care of the rest of the numerical NaN values with the simple imputer
# split into train, test and val
```
X = df_train.drop('Great', axis=1)
y = df_train['Great']
df_train.shape
# Train set
cutoff_train = '2017-01-01'
filt= (X.index < cutoff_train)
X_train, y_train = X.loc[filt], y.loc[filt]
# val set
X_val, y_val = X.loc[~filt], y.loc[~filt]
X_train.shape[0] + X_val.shape[0] == df_train.shape[0]
```
# lets build a pipeline
### separating categorical and numerical attributes
```
df_cat = X_train.select_dtypes(include='object')
df_cat.columns
df_cat_val = X_val.select_dtypes(include='object')
df_cat_val.columns
df_num = X_train.select_dtypes(exclude='object')
df_num.columns
pipeline_num = Pipeline([('imputer', SimpleImputer()),
('scaler', StandardScaler())])
pipeline_cat = Pipeline([('imputer', SimpleImputer(strategy='most_frequent')),
("ohe", OneHotEncoder(sparse=False))])
# now putting the pipelines together using ColumnTransformer
#generate a list of col and num attributes that we could call
num_attribs = list(df_num)
cat_attribs = list(df_cat)
full_pipeline = ColumnTransformer([
('categorical_pipeline', pipeline_cat, cat_attribs),
('numerical_pipeline', pipeline_num, num_attribs)])
# transform the X_train
df_prepared_train = full_pipeline.fit_transform(X_train)
df_prepared_val = full_pipeline.transform(X_val)
```
## Making one final pipeline with the logistic regression model
```
# we could add regularization to the model by adding the hyperparameter, C
log_reg = Pipeline([('full_pipeline', full_pipeline),
('logistic Regression', LogisticRegression())])
# fitting the train test
log_reg.fit(X_train, y_train)
# log_reg.fit(X_val, y_val)
print('Training Accuracy:', log_reg.score(X_train, y_train))
print('Validation Accuracy:', log_reg.score(X_val, y_val))
```
## We see that our model is clearly overfitting and we would need regularization
```
# lets find the best value for C using cross validation
log_reg = LogisticRegression(random_state=42)
parameters = {'C':[.001, .005, 1e-5, .01, .05, 1e-2, 1, 5,10]}
log_reg_cv = RandomizedSearchCV(log_reg, parameters, n_iter=9,cv=10, random_state=42 )
log_reg_cv.fit(df_prepared, y_train)
print(log_reg_cv.best_params_)
print(log_reg_cv.best_estimator_)
# using the best_estimator_ lets update the pipeline
log_reg = Pipeline([('full_pipeline', full_pipeline),
('logistic Regression', LogisticRegression(**log_reg_cv.best_params_))])
# fitting the train test
log_reg.fit(X_train, y_train);
```
# Score with the regularized Logistic Regression model
```
print('Training Accuracy:', log_reg.score(X_train, y_train))
print('Validation Accuracy:', log_reg.score(X_val, y_val))
!pip install feature-engine
```
| github_jupyter |
# Objective:
As input to the system, take the live feed from the webcam and use pose estimation to map out a small dance tutorial.
# Approach:
- We will take a pretrained **openpose estimation model** to prdict the **18 keypoints** on a human body.
- We take openpose model for tensorflow by Ildoo Kim
- GitHub Repo Link: https://github.com/ildoonet/tf-pose-estimation
<br>**[!] Note**: Some how I found issues with this repo to work with tensorflow 2.0 and followed a modified repo of his by Gunjan Seth.<br>
GitHub Repo Link: https://github.com/gsethi2409/tf-pose-estimation
<br>Medium Blog by Gunjan Seth: https://medium.com/@gsethi2409/pose-estimation-with-tensorflow-2-0-a51162c095ba
- The keypoints of the dancer are obtained and stored in a array list.
- These keypoints are **normalized**.
- The user feed is taken and the keypoints are detected.
- The keypoints are normalized and the **cosine similarity** is found between the user keypoints and the array of dancer keypoints.
- The minimum similarity score is **compared with the threshold** and then it displays is the user steps are correct or not for the given dancer moves.
# Constraints To Look For:
1. The model should be fast for prediction. Latency should be avoided.
2. Predictions should be accurate and the steps should be close enough with the dancer.
## Import the Necessary Libraries
```
import sys
import time
import logging
import numpy as np
import cv2
import numpy as np
from tf_pose import common
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
import matplotlib.pyplot as plt
from sklearn.preprocessing import Normalizer
import warnings
warnings.filterwarnings('ignore')
```
## Model and TfPose Estimator
We initialize the pretrained model with the required parameters as seen below.
```
camera = 0
resize = '432x368' # resize images before they are processed
resize_out_ratio = 4.0 # resize heatmaps before they are post-processed
model='mobilenet_v2_large'
show_process = False
tensorrt = False # for tensorrt process
w, h = model_wh(resize)
if w > 0 and h > 0:
e = TfPoseEstimator(get_graph_path(model), target_size=(w, h), trt_bool=False)
else:
e = TfPoseEstimator(get_graph_path(model), target_size=(432, 368), trt_bool=False)
print('********* Model Ready *************')
```
# Take position from the trainer (dancer):
- We made two functions to get all the keypoints from the trainer and store them in a dataframe and in a list.
- The function **"dance_video_processing"** is used to predict all the keypoints from the video and return all the keypoints for the video.
- The function **"get_position"** is used to take all the keypoints that are returned from the above function, preprocess them and return the dataframe and the list of keypoints.
```
def dance_video_processing(video_path= r'dance_video/dancer.mp4',showBG = True):
cap = cv2.VideoCapture(video_path)
if cap.isOpened() is False:
print("Error opening video stream or file")
fps_time = 0
while True:
ret_val, image = cap.read()
dim = (368, 428)
if ret_val:
# resize image
image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
humans = e.inference(image,
resize_to_default=(w > 0 and h > 0),
upsample_size=4.0)
if not showBG:
image = np.zeros(image.shape)
# Plotting the keypoints and lines to the image
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
npimg = np.copy(image)
image_h, image_w = npimg.shape[:2]
centers = {}
keypoints_list=[]
for human in humans:
# draw point
for i in range(common.CocoPart.Background.value):
if i not in human.body_parts.keys():
continue
body_part = human.body_parts[i]
x_axis=int(body_part.x * image_w + 0.5)
y_axis=int(body_part.y * image_h + 0.5)
center=[x_axis,y_axis]
centers[i] = center
keypoints_list.append(centers)
# To display fps
cv2.putText(image, "FPS: %f" % (1.0 / (time.time() - fps_time)), (10, 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# To display image
cv2.imshow('Dancer', image)
fps_time = time.time()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
#print(keypoints_list)
cap.release()
cv2.destroyAllWindows()
return keypoints_list
def get_position(video_path= r'dance_video/dancer.mp4',showBG = True):
keypoints_list=dance_video_processing()
import pandas as pd
#features=[0]*32
features=[0]*36
#print(features)
keyp_list=[]
#data=pd.Dataframe()
#print(len(keypoints_list[i]))
# Preprocessing of the keypoints data
for i in range(0, len(keypoints_list)):
k=-2
for j in range(0,18):
k=k+2
try:
if k>=36:
break
#print(k)
#print(keypoints_list[i][j])
features[k]=keypoints_list[i][j][0]
features[k+1]=keypoints_list[i][j][1]
except:
features[k]=0
features[k+1]=0
#print(features)
keyp_list.append(features)
#print(keyp_list)
# Getting all the feature column names for intialization of our dataframe.
column_names=[]
for i in range(36):
column_names.append(str(i))
data=pd.DataFrame(keyp_list,columns=column_names)
return data,keyp_list
data,keyp_list=get_position()
data.head()
```
**Observation:**
- We can see how the keypoints data looks from the above example.
- Since they are 18 keypoints and each keypoint has **x-coordinate** and **y-coordinate** we have **36 columns** (18 x 2).
# Cosine Similarity:
Cosine Similarity function for our model to find the keypoints.
```
def findCosineSimilarity_1(source_representation, test_representation):
import numpy as np
a = np.matmul(np.transpose(source_representation), test_representation)
b = np.sum(np.multiply(source_representation, source_representation))
c = np.sum(np.multiply(test_representation, test_representation))
return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
```
# Comparing:
Comparing the user images with keypoints of the dancer.
```
def compare_positions(trainer_video,user_video,keyp_list, dim=(420,720)):
cap = cv2.VideoCapture(trainer_video)
cam = cv2.VideoCapture(user_video)
cam.set(3, w)
cam.set(4, h)
fps_time = 0 #Initializing fps to 0
while True:
ret_val, image_1 = cam.read()
e_d=0
ret_val_1,image_2=cap.read()
if ret_val_1 and ret_val:
# resizing the images
image_2 = cv2.resize(image_2, dim, interpolation = cv2.INTER_AREA)
image_1 = cv2.resize(image_1, dim, interpolation = cv2.INTER_AREA)
dancers_1=e.inference(image_2,resize_to_default=(w > 0 and h > 0),upsample_size=4.0)
humans_2 = e.inference(image_1, resize_to_default=(w > 0 and h > 0),upsample_size=4.0 )
#Dancer keypoints and normalization
transformer = Normalizer().fit(keyp_list)
keyp_list=transformer.transform(keyp_list)
# Showing FPS
cv2.putText(image_2, "FPS: %f" % (1.0 / (time.time() - fps_time)), (10, 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# Displaying the dancer feed.
cv2.imshow('Dancer Window', image_2)
# Getting User keypoints, normalization and comparing also plotting the keypoints and lines to the image
image_1 = TfPoseEstimator.draw_humans(image_1, humans_2, imgcopy=False)
npimg = np.copy(image_1)
image_h, image_w = npimg.shape[:2]
centers = {}
keypoints_list=[]
for human in humans_2:
# draw point
for i in range(common.CocoPart.Background.value):
if i not in human.body_parts.keys():
continue
body_part = human.body_parts[i]
x_axis=int(body_part.x * image_w + 0.5)
y_axis=int(body_part.y * image_h + 0.5)
center=[x_axis,y_axis]
centers[i] = center
k=-2
features=[0]*36
for j in range(0,18):
k=k+2
try:
if k>=36:
break
#print(k)
#print(keypoints_list[i][j])
features[k]=centers[j][0]
features[k+1]=centers[j][1]
except:
features[k]=0
features[k+1]=0
features=transformer.transform([features])
#print(features[0])
min_=100 # Intializing a value to get minimum cosine similarity score from the dancer array list with the user
for j in keyp_list:
#print(j)
sim_score=findCosineSimilarity_1(j,features[0])
#print(sim_score)
#Getting the minimum Cosine Similarity Score
if min_>sim_score:
min_=sim_score
# Displaying the minimum cosine score
cv2.putText(image_1, str(min_), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# If the disctance is below the threshold
if min_<0.15:
cv2.putText(image_1, "CORRECT STEPS", (120, 700),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
else:
cv2.putText(image_1, "NOT CORRECT STEPS", (80, 700),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(image_1, "FPS: %f" % (1.0 / (time.time() - fps_time)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# Display the user feed
cv2.imshow('User Window', image_1)
fps_time = time.time()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cam.release()
cap.release()
cv2.destroyAllWindows()
```
##### Note:
Since I cant dance, I'll be using a video for this :P.<br> We can replce the **user_video** attribute to **0 or 1** to turn on live camera depending on the type of camera we have.
### For a wrong positions:
```
compare_positions(r'dance_video/dancer.mp4',r'dance_video/wrong_dance.mp4',keyp_list)
```
### For a correct positions:
```
compare_positions(r'dance_video/dancer.mp4',r'dance_video/right_dance.mp4',keyp_list)
```
# Conclusion:
- We have developed a pose estimation similarity pipeline to compare similarity between two poses from the given feed of videos or live cam.<br>
**Flaws:**
- This approach fails when the trainer is far or the user is near to the camera or vise-versa. This happens because there is a **scale variation** between the keypoints of the image.<br>
**Solution:**
- We can eleminate this problem by **croping out the image of a peron** using a CNN architecture like Yolo or anything that could detect the bounding boxes of a person.
- This image then can be fed to the openpose model to estimate keypoints for both the sources.<br>
**Scope of improvement:**
- The accuracy of the model for keypoint prediction can be increased by taking a much powerful pretrained model architecture than mobilenet.
| github_jupyter |
# Advanced Matplotlib Concepts Lecture
In this lecture we cover some more advanced topics which you won't usually use as often. You can always reference the documentation for more resources!
### Logarithmic Scale
* It is also possible to set a logarithmic scale for one or both axes. This functionality is in fact only one application of a more general transformation system in Matplotlib. Each of the axes' scales are set seperately using `set_xscale` and `set_yscale` methods which accept one parameter (with the value "log" in this case):
```
import matplotlib.pyplot as plt
import matplotlib as mp
%matplotlib inline
import numpy as np
x = np.linspace(0,5,11) # We go from 0 to 5 and grab 11 points which are linearly spaced.
y = x ** 2
fig, axes = plt.subplots(1, 2, figsize=(10,4))
axes[0].plot(x, x**2, x, np.exp(x))
axes[0].set_title("Normal scale")
axes[1].plot(x, x**2, x, np.exp(x))
axes[1].set_yscale("log")
axes[1].set_title("Logarithmic scale (y)");
```
### Placement of ticks and custom tick labels
* We can explicitly determine where we want the axis ticks with `set_xticks` and `set_yticks`, which both take a list of values for where on the axis the ticks are to be placed. We can also use the `set_xticklabels` and `set_yticklabels` methods to provide a list of custom text labels for each tick location:
```
fig, ax = plt.subplots(figsize=(10, 4))
ax.plot(x, x**2, x, x**3, lw=2)
ax.set_xticks([1, 2, 3, 4, 5])
ax.set_xticklabels([r'$\alpha$', r'$\beta$', r'$\gamma$', r'$\delta$', r'$\epsilon$'], fontsize=18)
yticks = [0, 50, 100, 150]
ax.set_yticks(yticks)
ax.set_yticklabels(["$%.1f$" % y for y in yticks], fontsize=18); # use LaTeX formatted labels
```
There are a number of more advanced methods for controlling major and minor tick placement in matplotlib figures, such as automatic placement according to different policies. See http://matplotlib.org/api/ticker_api.html for details.
#### Scientific notation
With large numbers on axes, it is often better use scientific notation:
```
fig, ax = plt.subplots(1, 1)
ax.plot(x, x**2, x, np.exp(x))
ax.set_title("scientific notation")
ax.set_yticks([0, 50, 100, 150])
from matplotlib import ticker
formatter = ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1,1))
ax.yaxis.set_major_formatter(formatter)
```
## Axis number and axis label spacing
```
# distance between x and y axis and the numbers on the axes
mp.rcParams['xtick.major.pad'] = 5
mp.rcParams['ytick.major.pad'] = 5
fig, ax = plt.subplots(1, 1)
ax.plot(x, x**2, x, np.exp(x))
ax.set_yticks([0, 50, 100, 150])
ax.set_title("label and axis spacing")
# padding between axis label and axis numbers
ax.xaxis.labelpad = 5
ax.yaxis.labelpad = 5
ax.set_xlabel("x")
ax.set_ylabel("y")
# restore defaults
mp.rcParams['xtick.major.pad'] = 3
mp.rcParams['ytick.major.pad'] = 3
```
#### Axis position adjustments
Unfortunately, when saving figures the labels are sometimes clipped, and it can be necessary to adjust the positions of axes a little bit. This can be done using `subplots_adjust`:
```
fig, ax = plt.subplots(1, 1)
ax.plot(x, x**2, x, np.exp(x))
ax.set_yticks([0, 50, 100, 150])
ax.set_title("title")
ax.set_xlabel("x")
ax.set_ylabel("y")
fig.subplots_adjust(left=0.15, right=.9, bottom=0.1, top=0.9);
```
### Axis grid
With the `grid` method in the axis object, we can turn on and off grid lines. We can also customize the appearance of the grid lines using the same keyword arguments as the `plot` function:
```
fig, axes = plt.subplots(1, 2, figsize=(10,3))
# default grid appearance
axes[0].plot(x, x**2, x, x**3, lw=2)
axes[0].grid(True)
# custom grid appearance
axes[1].plot(x, x**2, x, x**3, lw=2)
axes[1].grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
```
### Axis spines
* We can also change the properties of axis spines:
```
fig, ax = plt.subplots(figsize=(6,2))
ax.spines['bottom'].set_color('blue')
ax.spines['top'].set_color('blue')
ax.spines['left'].set_color('red')
ax.spines['left'].set_linewidth(2)
# turn off axis spine to the right
ax.spines['right'].set_color("none")
ax.yaxis.tick_left() # only ticks on the left side
```
### Twin axes
Sometimes it is useful to have dual x or y axes in a figure; for example, when plotting curves with different units together. Matplotlib supports this with the `twinx` and `twiny` functions:
```
fig, ax1 = plt.subplots()
ax1.plot(x, x**2, lw=2, color="blue")
ax1.set_ylabel(r"area $(m^2)$", fontsize=18, color="blue")
for label in ax1.get_yticklabels():
label.set_color("blue")
ax2 = ax1.twinx()
ax2.plot(x, x**3, lw=2, color="red")
ax2.set_ylabel(r"volume $(m^3)$", fontsize=18, color="red")
for label in ax2.get_yticklabels():
label.set_color("red")
```
### Axes where x and y is zero
```
fig, ax = plt.subplots()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0)) # set position of x spine to x=0
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',0)) # set position of y spine to y=0
xx = np.linspace(-0.75, 1., 100)
ax.plot(xx, xx**3);
```
## Other 2D plot styles
In addition to the regular `plot` method, there are a number of other functions for generating different kind of plots. See the matplotlib plot gallery for a complete list of available plot types: http://matplotlib.org/gallery.html. Some of the more useful ones are show below:
```
n = np.array([0,1,2,3,4,5])
fig, axes = plt.subplots(1, 4, figsize=(12,3))
axes[0].scatter(xx, xx + 0.25*np.random.randn(len(xx)))
axes[0].set_title("scatter")
axes[1].step(n, n**2, lw=2)
axes[1].set_title("step")
axes[2].bar(n, n**2, align="center", width=0.5, alpha=0.5)
axes[2].set_title("bar")
axes[3].fill_between(x, x**2, x**3, color="green", alpha=0.5);
axes[3].set_title("fill_between");
```
### Text annotation
* Annotating text in matplotlib figures can be done using the `text` function. It supports LaTeX formatting just like axis label texts and titles:
```
fig, ax = plt.subplots()
ax.plot(xx, xx**2, xx, xx**3)
ax.text(0.15, 0.2, r"$y=x^2$", fontsize=20, color="blue")
ax.text(0.65, 0.1, r"$y=x^3$", fontsize=20, color="green");
```
### Figures with multiple subplots and insets
* Axes can be added to a matplotlib Figure canvas manually using `fig.add_axes` or using a sub-figure layout manager such as `subplots`, `subplot2grid`, or `gridspec`:
#### subplots
```
fig,ax = plt.subplots(2,3)
fig.tight_layout()
```
### subplot2grid
```
fig = plt.figure()
ax1 = plt.subplot2grid((3,3), (0,0), colspan=3)
ax2 = plt.subplot2grid((3,3), (1,0), colspan=2)
ax3 = plt.subplot2grid((3,3), (1,2), rowspan=2)
ax4 = plt.subplot2grid((3,3), (2,0))
ax5 = plt.subplot2grid((3,3), (2,1))
fig.tight_layout()
```
## gridspec
```
import matplotlib.gridspec as gridspec
fig = plt.figure()
gs = gridspec.GridSpec(2, 3, height_ratios=[2,1], width_ratios=[1,2,1])
for g in gs:
ax = fig.add_subplot(g)
fig.tight_layout()
```
### add axes
* Manually adding axes with `add_axes` is useful for adding insets to figures:
```
fig, ax = plt.subplots()
ax.plot(xx, xx**2, xx, xx**3)
fig.tight_layout()
# inset
inset_ax = fig.add_axes([0.2, 0.55, 0.35, 0.35]) # X, Y, width, height
inset_ax.plot(xx, xx**2, xx, xx**3)
inset_ax.set_title('zoom near origin')
# set axis range
inset_ax.set_xlim(-.2, .2)
inset_ax.set_ylim(-.005, .01)
# set axis tick locations
inset_ax.set_yticks([0, 0.005, 0.01])
inset_ax.set_xticks([-0.1,0,.1]);
```
### Colormap and contour figures
* Colormaps and contour figures are useful for plotting functions of two variables. In most of these functions we will use a colormap to encode one dimension of the data. There are a number of predefined colormaps. It is relatively straightforward to define custom colormaps. For a list of pre-defined colormaps, see: http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps
```
alpha = 0.7
phi_ext = 2 * np.pi * 0.5
def flux_qubit_potential(phi_m, phi_p):
return 2 + alpha - 2 * np.cos(phi_p) * np.cos(phi_m) - alpha * np.cos(phi_ext - 2*phi_p)
phi_m = np.linspace(0, 2*np.pi, 100)
phi_p = np.linspace(0, 2*np.pi, 100)
X,Y = np.meshgrid(phi_p, phi_m)
Z = flux_qubit_potential(X, Y).T
```
#### pcolor
```
fig, ax = plt.subplots()
p = ax.pcolor(X/(2*np.pi), Y/(2*np.pi), Z, cmap=mp.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max())
cb = fig.colorbar(p, ax=ax)
```
#### imshow
```
fig, ax = plt.subplots()
im = ax.imshow(Z, cmap=mp.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[0, 1, 0, 1])
im.set_interpolation('bilinear')
cb = fig.colorbar(im, ax=ax)
```
## Contour
```
fig, ax = plt.subplots()
cnt = ax.contour(Z, cmap=mp.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[0, 1, 0, 1])
```
## 3D figures
* To use 3D graphics in matplotlib, we first need to create an instance of the `Axes3D` class. 3D axes can be added to a matplotlib figure canvas in exactly the same way as 2D axes; or, more conveniently, by passing a `projection='3d'` keyword argument to the `add_axes` or `add_subplot` methods.
```
from mpl_toolkits.mplot3d.axes3d import Axes3D
```
#### Surface plots
```
fig = plt.figure(figsize=(14,6))
# `ax` is a 3D-aware axis instance because of the projection='3d' keyword argument to add_subplot
ax = fig.add_subplot(1, 2, 1, projection='3d')
p = ax.plot_surface(X, Y, Z, rstride=4, cstride=4, linewidth=0)
# surface_plot with color grading and color bar
ax = fig.add_subplot(1, 2, 2, projection='3d')
p = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=mp.cm.coolwarm, linewidth=0, antialiased=False)
cb = fig.colorbar(p, shrink=0.5)
```
## Wire-frame plot
```
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(1, 1, 1, projection='3d')
p = ax.plot_wireframe(X, Y, Z, rstride=4, cstride=4,color='teal')
```
#### Coutour plots with projections
```
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(1,1,1, projection='3d')
ax.plot_surface(X, Y, Z, rstride=4, cstride=4, alpha=0.25)
cset = ax.contour(X, Y, Z, zdir='z', offset=-np.pi, cmap=mp.cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='x', offset=-np.pi, cmap=mp.cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='y', offset=3*np.pi, cmap=mp.cm.coolwarm)
ax.set_xlim3d(-np.pi, 2*np.pi);
ax.set_ylim3d(0, 3*np.pi);
ax.set_zlim3d(-np.pi, 2*np.pi);
```
## FURTHER READING :
* http://www.matplotlib.org - The project web page for matplotlib.
* https://github.com/matplotlib/matplotlib - The source code for matplotlib.
* http://matplotlib.org/gallery.html - A large gallery showcaseing various types of plots matplotlib can create. Highly recommended!
* http://www.loria.fr/~rougier/teaching/matplotlib - A good matplotlib tutorial.
* http://scipy-lectures.github.io/matplotlib/matplotlib.html - Another good matplotlib reference.
| github_jupyter |
# TensorFlow Datasetのテスト
[tf.data.Dataset のAPIドキュメント (tensorflow.org/api_docs)](https://www.tensorflow.org/api_docs/python/tf/data/Dataset)
```
import numpy as np
import tensorflow as tf
```
## 共通的に利用する関数定義
0..9までの連番を格納したDatasetを作成する`make_ds`と、Datasetの中身を表示する`print_ds`を定義。
1回の`print_ds`呼び出しが、機械学習の1エポックのデータ取り出しに相当する。
```
def make_ds():
return tf.data.Dataset.range(10)
def print_ds(*args):
line = ""
for ds in args:
data = list(ds.as_numpy_iterator())
line += str(data)
print(line)
ds = make_ds()
print_ds(ds)
```
## Datasetに対する操作と結果
```
LOOPS = 12 # Datasetから値を取り出す回数(エポック数のイメージ)
```
### shuffle操作
```
# 単純なDatasetの場合、何度取り出しても同じ値になる。
ds = make_ds()
for i in range(LOOPS):
print_ds(ds)
# shuffleすると、取り出す度にデータがシャッフルされる。
# shuffleの引数にはデータ数を指定すると、データ全体がほぼ均一にシャッフルされる。
ds = make_ds().shuffle(10)
for i in range(LOOPS):
print_ds(ds)
# shuffleに、reshuffle_each_iteration=Falseを設定すると、2回目以降は同じデータの並び順になる。
# つまり、最初の1回だけシャッフルされるような状態。
# デフォルトはreshuffle_each_iteration=Trueとなっているため、取り出す度にシャッフルされる。
ds = make_ds().shuffle(10, reshuffle_each_iteration=False)
for i in range(LOOPS):
print_ds(ds)
```
### shuffleとcache
```
# cacheを指定するとメモリ上にデータをキャッシュできるため、2回目以降のデータ取得でパフォーマンス向上が期待できる。
# しかし、shuffle -> cache の順序で適用すると、2回目以降のシャッフルが反映されない。
# なお、2回目以降のシャッフルを抑止する目的では使用しない方が良い。
# 2回目以降のシャッフルを抑止するのであれば、shuffleでreshuffle_each_iteration=Falseを指定する。
ds = make_ds().shuffle(10).cache() # この順番は非推奨
for i in range(LOOPS):
print_ds(ds)
# cache -> shuffle という順序にすれば、正しくシャッフルが機能する。
# 2回目以降の取り出しでも、shuffleが正しく機能している。
ds = make_ds().cache().shuffle(10) # 正しい順序
for i in range(LOOPS):
print_ds(ds)
```
### shuffleとbatch
```
# batchを使って、指定サイズのミニバッチに分割できる。
# バッチサイズで割りきれずに余った部分は、余った数でバッチ化される。(オプションで切り捨てることも可能)
ds = make_ds().batch(4)
for i in range(LOOPS):
print_ds(ds)
# batch -> shuffle の順序で適用すると、各バッチ単位の固まりでシャッフルされる。
# バッチ内部の値はシャッフルされない。
ds = make_ds().batch(4).shuffle(3)
for i in range(LOOPS):
print_ds(ds)
# shuffle -> batch だと、シャッフルしたデータに対してバッチ化するため、各バッチに含まれる値もシャッフルされる。
ds = make_ds().shuffle(10).batch(4)
for i in range(LOOPS):
print_ds(ds)
# 参考までに、cacheの挙動は、shuffleと組み合わせたときと同じ動き。
# きちんとシャッフルしたければ、cacheはshuffleより先に適用する。
ds = make_ds().shuffle(10).batch(4).cache()
for i in range(LOOPS):
print_ds(ds)
```
### shuffleとtake/skip
```
# take/skipでDatasetを任意の場所で区切って取り出せる。
# takeは、Datasetの先頭から指定した要素数のデータを取り出す。
# skipは、Datasetの先頭から指定した要素数をスキップし、その後のデータを取り出す。
# つまり、skip(7)とすれば、8個目の要素以降全てを取り出せる。
# このtake/skipを使って、Datasetをtrain/test用に簡単に分割できる。
ds = make_ds()
ds_x = ds.take(7)
ds_y = ds.skip(7)
for i in range(LOOPS):
print_ds(ds_x, ds_y)
# shuffleしたデータに対して、take/skipでデータ分割した場合、値を取り出す度に元データ自体がシャッフルされる。
# take/skipで区分けした中でのシャッフル「ではない」ので注意。
# 特に、take/skipでtrain/test用にデータを分割した場合、train/test用データが各エポックごとに混ざってしまうので注意が必要。
ds = make_ds().shuffle(10)
ds_x = ds.take(7)
ds_y = ds.skip(7)
for i in range(LOOPS):
print_ds(ds_x, ds_y)
# shuffleしたデータを、take/skipで分割後に混ぜたくない場合、shuffleでreshuffle_each_iteration=Falseを指定すれば良い。
# こうしておけば、2回目以降のデータ取り出しでも同じ並び順となるため、take/skipのデータが混ざることは無い。
# ただし、take/skipで分割したそれぞれのデータブロック内でも、毎回同じ並び順になってしまう。
ds = make_ds().shuffle(10, reshuffle_each_iteration=False)
ds_x = ds.take(7)
ds_y = ds.skip(7)
for i in range(LOOPS):
print_ds(ds_x, ds_y)
# shuffleしたデータを、take/skipで分割後、takeで取得したデータだけを毎回シャッフルしたい場合、
# takeで取得したデータに対して、再度shuffleを適用すればOK。
ds = make_ds().shuffle(10, reshuffle_each_iteration=False)
ds_x = ds.take(7).shuffle(7) # takeで取得したデータ内でシャッフル
ds_y = ds.skip(7)
for i in range(LOOPS):
print_ds(ds_x, ds_y)
```
| github_jupyter |
# _*Pricing European Put Options*_
### Introduction
<br>
Suppose a <a href="http://www.theoptionsguide.com/put-option.aspx">European put option</a> with strike price $K$ and an underlying asset whose spot price at maturity $S_T$ follows a given random distribution.
The corresponding payoff function is defined as:
$$\max\{K - S_T, 0\}$$
In the following, a quantum algorithm based on amplitude estimation is used to estimate the expected payoff, i.e., the fair price before discounting, for the option:
$$\mathbb{E}\left[ \max\{K - S_T, 0\} \right]$$
as well as the corresponding $\Delta$, i.e., the derivative of the option price with respect to the spot price, defined as:
$$
\Delta = -\mathbb{P}\left[S_T \leq K\right]
$$
The approximation of the objective function and a general introduction to option pricing and risk analysis on quantum computers are given in the following papers:
- <a href="https://arxiv.org/abs/1806.06893">Quantum Risk Analysis. Woerner, Egger. 2018.</a>
- <a href="https://arxiv.org/abs/1905.02666">Option Pricing using Quantum Computers. Stamatopoulos et al. 2019.</a>
```
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from qiskit import Aer
from qiskit.aqua.algorithms import IterativeAmplitudeEstimation
from qiskit.aqua.components.uncertainty_models import LogNormalDistribution
from qiskit.aqua.components.uncertainty_problems import UnivariateProblem
from qiskit.aqua.components.uncertainty_problems import UnivariatePiecewiseLinearObjective as PwlObjective
```
### Uncertainty Model
We construct a circuit factory to load a log-normal random distribution into a quantum state.
The distribution is truncated to a given interval $[low, high]$ and discretized using $2^n$ grid points, where $n$ denotes the number of qubits used.
The unitary operator corresponding to the circuit factory implements the following:
$$\big|0\rangle_{n} \mapsto \big|\psi\rangle_{n} = \sum_{i=0}^{2^n-1} \sqrt{p_i}\big|i\rangle_{n},$$
where $p_i$ denote the probabilities corresponding to the truncated and discretized distribution and where $i$ is mapped to the right interval using the affine map:
$$ \{0, \ldots, 2^n-1\} \ni i \mapsto \frac{high - low}{2^n - 1} * i + low \in [low, high].$$
```
# number of qubits to represent the uncertainty
num_uncertainty_qubits = 3
# parameters for considered random distribution
S = 2.0 # initial spot price
vol = 0.4 # volatility of 40%
r = 0.05 # annual interest rate of 4%
T = 40 / 365 # 40 days to maturity
# resulting parameters for log-normal distribution
mu = ((r - 0.5 * vol**2) * T + np.log(S))
sigma = vol * np.sqrt(T)
mean = np.exp(mu + sigma**2/2)
variance = (np.exp(sigma**2) - 1) * np.exp(2*mu + sigma**2)
stddev = np.sqrt(variance)
# lowest and highest value considered for the spot price; in between, an equidistant discretization is considered.
low = np.maximum(0, mean - 3*stddev)
high = mean + 3*stddev
# construct circuit factory for uncertainty model
uncertainty_model = LogNormalDistribution(num_uncertainty_qubits, mu=mu, sigma=sigma, low=low, high=high)
# plot probability distribution
x = uncertainty_model.values
y = uncertainty_model.probabilities
plt.bar(x, y, width=0.2)
plt.xticks(x, size=15, rotation=90)
plt.yticks(size=15)
plt.grid()
plt.xlabel('Spot Price at Maturity $S_T$ (\$)', size=15)
plt.ylabel('Probability ($\%$)', size=15)
plt.show()
```
### Payoff Function
The payoff function decreases linearly with an increasing spot price at maturity $S_T$ until it reaches zero for a spot price equal to the strike price $K$, it stays constant to zero for larger spot prices.
The implementation uses a comparator, that flips an ancilla qubit from $\big|0\rangle$ to $\big|1\rangle$ if $S_T \leq K$, and this ancilla is used to control the linear part of the payoff function.
The linear part itself is then approximated as follows.
We exploit the fact that $\sin^2(y + \pi/4) \approx y + 1/2$ for small $|y|$.
Thus, for a given approximation scaling factor $c_{approx} \in [0, 1]$ and $x \in [0, 1]$ we consider
$$ \sin^2( \pi/2 * c_{approx} * ( x - 1/2 ) + \pi/4) \approx \pi/2 * c_{approx} * ( x - 1/2 ) + 1/2 $$ for small $c_{approx}$.
We can easily construct an operator that acts as
$$\big|x\rangle \big|0\rangle \mapsto \big|x\rangle \left( \cos(a*x+b) \big|0\rangle + \sin(a*x+b) \big|1\rangle \right),$$
using controlled Y-rotations.
Eventually, we are interested in the probability of measuring $\big|1\rangle$ in the last qubit, which corresponds to
$\sin^2(a*x+b)$.
Together with the approximation above, this allows to approximate the values of interest.
The smaller we choose $c_{approx}$, the better the approximation.
However, since we are then estimating a property scaled by $c_{approx}$, the number of evaluation qubits $m$ needs to be adjusted accordingly.
For more details on the approximation, we refer to:
<a href="https://arxiv.org/abs/1806.06893">Quantum Risk Analysis. Woerner, Egger. 2018.</a>
```
# set the strike price (should be within the low and the high value of the uncertainty)
strike_price = 2.126
# set the approximation scaling for the payoff function
c_approx = 0.25
# setup piecewise linear objective fcuntion
breakpoints = [uncertainty_model.low, strike_price]
slopes = [-1, 0]
offsets = [strike_price - uncertainty_model.low, 0]
f_min = 0
f_max = strike_price - uncertainty_model.low
european_put_objective = PwlObjective(
uncertainty_model.num_target_qubits,
uncertainty_model.low,
uncertainty_model.high,
breakpoints,
slopes,
offsets,
f_min,
f_max,
c_approx
)
# construct circuit factory for payoff function
european_put = UnivariateProblem(
uncertainty_model,
european_put_objective
)
# plot exact payoff function (evaluated on the grid of the uncertainty model)
x = uncertainty_model.values
y = np.maximum(0, strike_price - x)
plt.plot(x, y, 'ro-')
plt.grid()
plt.title('Payoff Function', size=15)
plt.xlabel('Spot Price', size=15)
plt.ylabel('Payoff', size=15)
plt.xticks(x, size=15, rotation=90)
plt.yticks(size=15)
plt.show()
# evaluate exact expected value (normalized to the [0, 1] interval)
exact_value = np.dot(uncertainty_model.probabilities, y)
exact_delta = -sum(uncertainty_model.probabilities[x <= strike_price])
print('exact expected value:\t%.4f' % exact_value)
print('exact delta value: \t%.4f' % exact_delta)
```
### Evaluate Expected Payoff
```
# set target precision and confidence level
epsilon = 0.01
alpha = 0.05
# construct amplitude estimation
ae = IterativeAmplitudeEstimation(epsilon=epsilon, alpha=alpha, a_factory=european_put)
result = ae.run(quantum_instance=Aer.get_backend('qasm_simulator'), shots=100)
conf_int = np.array(result['confidence_interval'])
print('Exact value: \t%.4f' % exact_value)
print('Estimated value: \t%.4f' % (result['estimation']))
print('Confidence interval:\t[%.4f, %.4f]' % tuple(conf_int))
```
### Evaluate Delta
The Delta is a bit simpler to evaluate than the expected payoff.
Similarly to the expected payoff, we use a comparator circuit and an ancilla qubit to identify the cases where $S_T \leq K$.
However, since we are only interested in the (negative) probability of this condition being true, we can directly use this ancilla qubit as the objective qubit in amplitude estimation without any further approximation.
```
# setup piecewise linear objective fcuntion
breakpoints = [uncertainty_model.low, strike_price]
slopes = [0, 0]
offsets = [1, 0]
f_min = 0
f_max = 1
c_approx = 1
european_delta_objective = PwlObjective(
uncertainty_model.num_target_qubits,
uncertainty_model.low,
uncertainty_model.high,
breakpoints,
slopes,
offsets,
f_min,
f_max,
c_approx
)
# construct circuit factory for payoff function
european_put_delta = UnivariateProblem(
uncertainty_model,
european_delta_objective
)
# set target precision and confidence level
epsilon = 0.01
alpha = 0.05
# construct amplitude estimation
ae_delta = IterativeAmplitudeEstimation(epsilon=epsilon, alpha=alpha, a_factory=european_put_delta)
result_delta = ae_delta.run(quantum_instance=Aer.get_backend('qasm_simulator'), shots=100)
conf_int = -np.array(result_delta['confidence_interval'])[::-1]
print('Exact delta: \t%.4f' % exact_delta)
print('Esimated value: \t%.4f' % -result_delta['estimation'])
print('Confidence interval: \t[%.4f, %.4f]' % tuple(conf_int))
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
```
| github_jupyter |
# Le-Net 1 based architecture
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from numpy import linalg as lin
import scipy.signal as sig
from PIL import Image
import glob
import matplotlib.cm as cm
import itertools
########### Functions ############################################################################################################################
# Define Activitation functions, pooling and convolution functions (the rules)
def Sigmoid(x):
return (1/(1+np.exp(-x)))
def Sigmoid_dx(x):
return np.exp(-x)/((1+np.exp(-x))**2)
def TanH(x):
return (1-np.exp(-x))/(1+np.exp(-x))
def Pool(I,W):
PoolImg=np.zeros((len(I)/len(W),len(I)/len(W))) # W must fit an integer times into I.
for i in range(0,len(PoolImg)):
for j in range(0,len(PoolImg)):
SelAr=I[i*len(W):(i+1)*len(W),j*len(W):(j+1)*len(W)]
PoolImg[i,j]=np.inner(SelAr.flatten(),W.flatten()) # Now this is just an inner product since we have vectors
return PoolImg
# To automatically make Gaussian kernels
def makeGaussian(size, fwhm = 3, center=None):
x = np.arange(0, size, 1, float)
y = x[:,np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
# To automatically define pooling nodes
def Pool_node(N):
s=(N,N)
a=float(N)*float(N)
return (1.0/a)*np.ones(s)
#################### Define pooling layers ###########################################################################
P12=Pool_node(4)*(1.0/100.0) #factor 1000 added to lower values more
P34=Pool_node(1)*(1.0/10.0)
#################### Define Convolution layers #######################################################################
######### First C layer #########
C1=[]
## First Kernel
# Inspiration: http://en.wikipedia.org/wiki/Sobel_operator
# http://stackoverflow.com/questions/9567882/sobel-filter-kernel-of-large-size
Kernel=np.array([[4,3,2,1,0,-1,-2,-3,-4],
[5,4,3,2,0,-2,-3,-4,-5],
[6,5,4,3,0,-3,-4,-5,-6],
[7,6,5,4,0,-4,-5,-6,-7],
[8,7,6,5,0,-5,-6,-7,-8],
[7,6,5,4,0,-4,-5,-6,-7],
[6,5,4,3,0,-3,-4,-5,-6],
[5,4,3,2,0,-2,-3,-4,-5],
[4,3,2,1,0,-1,-2,-3,-4]])
C1.append(Kernel)
## Second Kernel
Kernel=np.matrix.transpose(Kernel)
C1.append(Kernel)
##Third Kernel
#Kernel=makeGaussian(9,5)
#Kernel=(1/np.sum(Kernel))*Kernel
#C1.append(Kernel)
######### Initialize output weights and biases #########
# Define the number of branches in one row
patchSize=40
N_branches= 3
ClassAmount=3 # Forest, City, Water
Size_C2=5
S_H3=((patchSize-C1[0].shape[0]+1)/P12.shape[1])-Size_C2+1
S_H4=S_H3/P34.shape[1]
import pickle
file=open('W.txt','r')
W=pickle.load(file)
file=open('W2.txt','r')
W2=pickle.load(file)
file=open('Output_bias.txt','r')
Output_bias=pickle.load(file)
file=open('H3_bias.txt','r')
H3_bias=pickle.load(file)
file=open('C2.txt','r')
C2=pickle.load(file)
```
# For the extra information regarding the code in the following cell
a random patch is chosen in the following way: the program counts how many files and patches there are in total, then it permutes the sequence so that a random patch is chosen every iteration (forest, city, water). After selecting the number the file has to be found back.
# save training parameters
```
####### Test phase on new images #######
Error_Test=[]
N_correct=0
patchSize=40
Patches_TEST=np.empty([1,patchSize,patchSize])
Patches_TEST_RGB=np.empty([1,patchSize,patchSize,3])
Patches_t=np.empty([3])
name="Test/Test4.png"
img = Image.open(name)
data=img.convert('RGB')
data= np.asarray( data, dtype="int32" )
data=0.2126*data[:,:,0]+0.7152*data[:,:,1]+0.0722*data[:,:,2]
data2=img.convert('RGB')
data2= np.asarray( data2, dtype="int32" )
Yamount=data.shape[0]/patchSize # Counts how many times the windowsize fits in the picture
Xamount=data.shape[1]/patchSize # Counts how many times the windowsize fits in the picture
# Create patches for structure
data_t=np.array([[data[j*patchSize:(j+1)*patchSize,i*patchSize:(i+1)*patchSize] for i in range(0,Xamount)] for j in range(0,Yamount)])
data_t=np.reshape(data_t, [data_t.shape[0]*data_t.shape[1], patchSize, patchSize])
Patches_TEST=np.append(Patches_TEST,data_t,axis=0)
#Create patches for colour
data_t=np.array([[data2[j*patchSize:(j+1)*patchSize,i*patchSize:(i+1)*patchSize,:] for i in range(0,Xamount)] for j in range(0,Yamount)])
data_t=np.reshape(data_t, [data_t.shape[0]*data_t.shape[1], patchSize, patchSize, 3])
Patches_TEST_RGB=np.append(Patches_TEST_RGB, data_t,axis=0)
Patches_TEST=np.delete(Patches_TEST, 0,0)
Patches_TEST_RGB=np.delete(Patches_TEST_RGB, 0,0)
from itertools import product
###### Chooses patch and defines label #####
#for PP in range(0,len(Sequence)):
Forest=0
City=0
Water=0
for PP in range(0,Patches_TEST.shape[0]):
inputPatch=Patches_TEST[PP]
Int_RGB=np.mean(np.mean(Patches_TEST_RGB[PP,:,:,:], axis=0), axis=0)/255
### Layer 1 ###
H1=[]
H2=[]
H3=np.zeros((len(C1), N_branches, S_H3,S_H3))
H4=np.zeros((len(C1), N_branches, S_H4,S_H4))
x=np.zeros(ClassAmount)
f=np.zeros(ClassAmount)
for r in range (0, len(C1)):
H1.append(sig.convolve(inputPatch, C1[r], 'valid'))
H2.append(Pool(H1[r], P12))
for b in range(0,N_branches):
H3[r][b]=Sigmoid(sig.convolve(H2[r], C2[r][b],'valid')-H3_bias[r][b])
H4[r][b]=Pool(H3[r][b],P34)
y=np.append([H4.flatten()], [Int_RGB])
#Now we have 3x3x4x4 inputs, connected to the 3 output nodes
for k in range(0,ClassAmount):
W_t=np.append([W[k].flatten()], [W2[k]])
x[k]=np.inner(y, W_t)
f[k]=Sigmoid(x[k]-Output_bias[k])
f=f/np.sum((f))
if np.argmax(f)==0:
if np.argmax(f)==1:
City=City+1
if np.argmax(f)==2:
Water=Water+1
print Forest, City, Water
Int_RGB
```
| github_jupyter |
# Practice Exercise: Exploring data (Exploratory Data Analysis)
## Context:
- The data includes 120 years (1896 to 2016) of Olympic games with information about athletes and medal results.
- We'll focus on practicing the summary statistics and data visualization techniques that we've learned in the course.
- In general, this dataset is popular to explore how the Olympics have evolved over time, including the participation and performance of different genders, different countries, in various sports and events.
- Check out the original source if you are interested in using this data for other purposes (https://www.kaggle.com/heesoo37/120-years-of-olympic-history-athletes-and-results)
## Dataset Description:
We'll work on the data within athlete_events.csv.
Each row corresponds to an individual athlete competing in an individual Olympic event.
The columns are:
- **ID**: Unique number for each athlete
- **Name**: Athlete's name
- **Sex**: M or F
- **Age**: Integer
- **Height**: In centimeters
- **Weight**: In kilograms
- **Team**: Team name
- **NOC**: National Olympic Committee 3-letter code
- **Games**: Year and season
- **Year**: Integer
- **Season**: Summer or Winter
- **City**: Host city
- **Sport**: Sport
- **Event**: Event
- **Medal**: Gold, Silver, Bronze, or NA
## Objective:
- Examine/clean the dataset
- Explore distributions of single numerical and categorical features via statistics and plots
- Explore relationships of multiple features via statistics and plots
We are only going to explore part of the dataset, please feel free to explore more if you are interested.
### 1. Import the libraries `Pandas` and `Seaborn`
```
import pandas as pd
import seaborn as sns
```
### 2. Import the data from the csv file as DataFrame `olympics`
```
olympics = pd.read_csv('athlete_events.csv')
```
### 3. Look at the info summary, head of the DataFrame
```
olympics.info()
olympics.head()
```
### 4. Impute the missing data
#### Use `IterativeImputer` in `sklearn` to impute based on columns `Year`, `Age`, `Height`, `Weight`
##### Import libraries
```
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
```
##### Build a list of columns that will be used for imputation, which are `Year`, `Age`, `Height`, `Weight`
The column `Year` doesn't have mssing values, but we include it since it might be helpful modeling the other three columns. The age, height, and weight could change across years.
```
cols_to_impute = ['Year', 'Age', 'Height', 'Weight']
```
##### Create an `IterativeImputer` object and set its `min_value` and `max_value` parameters to be the minumum and maximum of corresponding columns
```
iter_imp = IterativeImputer(min_value=olympics[cols_to_impute].min(), max_value=olympics[cols_to_impute].max())
```
##### Apply the imputer to fit and transform the columns to an imputed NumPy array
```
imputed_cols = iter_imp.fit_transform(olympics[cols_to_impute])
```
##### Assign the imputed array back to the original DataFrame's columns
```
olympics[cols_to_impute] = imputed_cols
```
#### Fill the missing values in the column `Medal` with string of 'NA'
```
olympics['Medal'] = olympics['Medal'].fillna('NA')
```
#### Double check that the columns are all imputed
```
olympics.isna().sum()
```
### 5. Use the `describe` method to check the numerical columns
```
olympics.describe()
```
### 6. Plot the histograms of the numerical columns using `Pandas`
```
olympics.hist(figsize=(15, 10))
```
Notice that there could be outliers for `Age`, `Weight`, `Height`. But we'll only focus on `Age`.
### 7. Plot the histogram with a rug plot of the column `Age` using `Seaborn`, with both 20 and 50 bins
```
sns.displot(data=olympics, x='Age', bins=20, rug=True)
sns.displot(data=olympics, x='Age', bins=50, rug=True)
```
Notice the slight changes of distributions of `Age` when the number of bins changes.
### 8. Plot the boxplot of the column `Age` using `Pandas`
```
olympics['Age'].plot(kind='box')
```
### 9. Plot the boxplot of the column `Age` using `Seaborn`
```
sns.catplot(data=olympics, y='Age', kind='box')
```
### 10. Calculate the first quartile, third quartile, and IQR of the column `Age`
```
Q1 = olympics['Age'].quantile(0.25)
Q3 = olympics['Age'].quantile(0.75)
IQR = Q3 - Q1
print(Q1)
print(Q3)
print(IQR)
```
### 11. Print out the lower and upper thresholds for outliers based on IQR for the column `Age`
```
print(f'Low age outlier threshold: {Q1 - 1.5*IQR}')
print(f'High age outlier threshold: {Q3 + 1.5*IQR}')
```
### 12. What are the `Sport` for the athletes of really young age
#### Filter for the column `Sport` when the column `Age` has outliers of lower values
```
msk_lower = (olympics['Age'] < (Q1 - 1.5*IQR))
olympics.loc[msk_lower,'Sport']
```
#### Look at the unique values of `Sport` and their counts when `Age` are low-valued outliers
Did you find any sports popular for really young athletes?
```
olympics.loc[msk_lower,'Sport'].value_counts()
```
There are specific sports with really young age athletes, e.g., Swimming, Figure Skating.
### 13. What are the `Sport` for the athletes of older age
#### Filter for the column `Sport` when the column `Age` has outliers of higher values
```
msk_upper = (olympics['Age'] > (Q3 + 1.5*IQR))
olympics.loc[msk_upper,'Sport']
```
#### Look at the unique values of `Sport` and their counts when `Age` are high-valued outliers
Did you find any sports popular for older age athletes?
```
olympics.loc[msk_upper,'Sport'].value_counts()
```
There are specific sports popular for higher-aged athletes. They tend to need more skills rather than movements.
### 14. Check for the number of unique values in each column
```
olympics.nunique()
```
Olympics is a large event! There are many `Name`, `Team`, `NOC`, `Games`, `Year`, `City`, `Sport`, and `Event`!
### 15. Use the `describe` method to check the non-numerical columns
```
olympics.describe(exclude='number')
```
### 16. Apply the `value_counts` method for each non-numerical column, check for their unique values and counts
```
cat_cols = olympics.select_dtypes(exclude='number').columns
cat_cols
for col in cat_cols:
print(olympics[col].value_counts())
print()
```
### 17. Check the first record within the dataset for each Olympic `Sport`
*Hint: sort the DataFrame by `Year`, then groupby by `Sport`*
```
olympics.sort_values('Year').groupby('Sport').first()
```
### 18. What are the average `Age`, `Height`, `Weight` of female versus male Olympic athletes
```
olympics.groupby('Sex')[['Age','Height','Weight']].mean()
```
### 19. What are the minimum, average, maximum `Age`, `Height`, `Weight` of athletes in different `Year`
```
olympics.groupby('Year')[['Age','Height','Weight']].agg(['min', 'mean', 'max'])
```
### 20. What are the minimum, average, median, maximum `Age` of athletes for different `Season` and `Sex` combinations
```
olympics.groupby(['Season', 'Sex'])['Age'].agg(['min', 'mean', 'median', 'max'])
```
### 21. What are the average `Age` of athletes, and numbers of unique `Team`, `Sport`, `Event`, for different `Season` and `Sex` combinations
```
olympics.groupby(['Season', 'Sex']).agg({'Age': 'mean', 'Team': 'nunique', 'Sport': 'nunique', 'Event': 'nunique'})
```
### 22. What are the average `Age`, `Height`, `Weight` of athletes, for different `Medal`, `Season`, `Sex` combinations
```
olympics.groupby(['Medal', 'Season', 'Sex'])[['Age', 'Height', 'Weight']].mean()
```
### 23. Plot the scatterplot of `Height` and `Weight`
```
sns.relplot(data=olympics, x='Height', y='Weight', kind='scatter')
```
### 24. Plot the scatterplot of `Height` and `Weight`, using different colors and styles of dots for different `Sex`
```
sns.relplot(data=olympics, x='Height', y='Weight', hue='Sex', style='Sex')
```
### 25. Plot the pairwise relationships of `Age`, `Height`, `Weight`
```
sns.pairplot(olympics[['Age', 'Height', 'Weight']])
```
### 26. Plot the pairwise relationships of `Age`, `Height`, `Weight`, with different colors for `Sex`
```
sns.pairplot(olympics[['Age', 'Height', 'Weight', 'Sex']], hue='Sex')
```
### 27. Print out the correlation matrix of `Age`, `Height`, `Weight`
```
olympics[['Age', 'Height', 'Weight']].corr()
```
Notice the strong positive relationship between `Height` and `Weight`, which is intuitive.
### 28. Use heatmap to demonstrate the correlation matrix of `Age`, `Height`, `Weight`, use a colormap (`cmap`) of 'crest'
```
sns.heatmap(olympics[['Age', 'Height', 'Weight']].corr(), cmap='crest')
```
### 29. Plot the histograms of `Age`, with different colors for different `Sex`
```
sns.displot(data=olympics, x='Age', hue='Sex', aspect=2)
```
### 30. Plot the histograms of `Age`, on separate plots for different `Sex`
```
sns.displot(data=olympics, x='Age', col='Sex', aspect=2)
```
### 31. Look at the changes of average `Age` across `Year` by line charts, with separate lines for different `Season` using different colors
```
sns.relplot(data=olympics, x='Year', y='Age', hue='Season', kind='line', aspect=2)
```
### 32. Look at the distributions of `Age` for different `Sex` using boxplots
```
sns.catplot(data=olympics, x='Sex', y='Age', kind='box')
```
### 33. Look at the distributions of `Age` for different `Sex` using violin plots
```
sns.catplot(data=olympics, x='Sex', y='Age', kind='violin')
```
### 34. Look at the distributions of `Age` for different `Sex` using boxplots, with different colors of plots for different `Season`
```
sns.catplot(data=olympics, x='Sex', y='Age', kind='box', hue='Season')
```
### 35. Use count plots to look at the changes of number of athlete-events across `Year`, for different `Sex` by colors, and different `Season` on separate plots
```
sns.catplot(data=olympics, x='Year', hue='Sex', kind='count', col='Season', col_wrap=1, aspect=4)
```
Notice the obvious increase of female athlete-events in the Olympics across years.
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# CycleGAN
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/beta/tutorials/generative/cyclegan"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/generative/cyclegan.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/generative/cyclegan.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/tutorials/generative/cyclegan.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This notebook demonstrates unpaired image to image translation using conditional GAN's, as described in [Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks](https://arxiv.org/abs/1703.10593), also known as CycleGAN. The paper proposes a method that can capture the characteristics of one image domain and figure out how these characteristics could be translated into another image domain, all in the absence of any paired training examples.
This notebook assumes you are familiar with Pix2Pix, which you can learn about in the [Pix2Pix tutorial](https://www.tensorflow.org/beta/tutorials/generative/pix2pix). The code for CycleGAN is similar, the main difference is an additional loss function, and the use of unpaired training data.
CycleGAN uses a cycle consistency loss to enable training without the need for paired data. In other words, it can translate from one domain to another without a one-to-one mapping between the source and target domain.
This opens up the possibility to do a lot of interesting tasks like photo-enhancement, image colorization, style transfer, etc. All you need is the source and the target dataset (which is simply a directory of images).


## Set up the input pipeline
Install the [tensorflow_examples](https://github.com/tensorflow/examples) package that enables importing of the generator and the discriminator.
```
!pip install git+https://github.com/tensorflow/examples.git
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow_datasets as tfds
from tensorflow_examples.models.pix2pix import pix2pix
import os
import time
import matplotlib.pyplot as plt
from IPython.display import clear_output
tfds.disable_progress_bar()
AUTOTUNE = tf.data.experimental.AUTOTUNE
```
## Input Pipeline
This tutorial trains a model to translate from images of horses, to images of zebras. You can find this dataset and similar ones [here](https://www.tensorflow.org/datasets/datasets#cycle_gan).
As mentioned in the [paper](https://arxiv.org/abs/1703.10593), apply random jittering and mirroring to the training dataset. These are some of the image augmentation techniques that avoids overfitting.
This is similar to what was done in [pix2pix](https://www.tensorflow.org/beta/tutorials/generative/pix2pix#load_the_dataset)
* In random jittering, the image is resized to `286 x 286` and then randomly cropped to `256 x 256`.
* In random mirroring, the image is randomly flipped horizontally i.e left to right.
```
dataset, metadata = tfds.load('cycle_gan/horse2zebra',
with_info=True, as_supervised=True)
train_horses, train_zebras = dataset['trainA'], dataset['trainB']
test_horses, test_zebras = dataset['testA'], dataset['testB']
BUFFER_SIZE = 1000
BATCH_SIZE = 1
IMG_WIDTH = 256
IMG_HEIGHT = 256
def random_crop(image):
cropped_image = tf.image.random_crop(
image, size=[IMG_HEIGHT, IMG_WIDTH, 3])
return cropped_image
# normalizing the images to [-1, 1]
def normalize(image):
image = tf.cast(image, tf.float32)
image = (image / 127.5) - 1
return image
def random_jitter(image):
# resizing to 286 x 286 x 3
image = tf.image.resize(image, [286, 286],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# randomly cropping to 256 x 256 x 3
image = random_crop(image)
# random mirroring
image = tf.image.random_flip_left_right(image)
return image
def preprocess_image_train(image, label):
image = random_jitter(image)
image = normalize(image)
return image
def preprocess_image_test(image, label):
image = normalize(image)
return image
train_horses = train_horses.map(
preprocess_image_train, num_parallel_calls=AUTOTUNE).cache().shuffle(
BUFFER_SIZE).batch(1)
train_zebras = train_zebras.map(
preprocess_image_train, num_parallel_calls=AUTOTUNE).cache().shuffle(
BUFFER_SIZE).batch(1)
test_horses = test_horses.map(
preprocess_image_test, num_parallel_calls=AUTOTUNE).cache().shuffle(
BUFFER_SIZE).batch(1)
test_zebras = test_zebras.map(
preprocess_image_test, num_parallel_calls=AUTOTUNE).cache().shuffle(
BUFFER_SIZE).batch(1)
sample_horse = next(iter(train_horses))
sample_zebra = next(iter(train_zebras))
plt.subplot(121)
plt.title('Horse')
plt.imshow(sample_horse[0] * 0.5 + 0.5)
plt.subplot(122)
plt.title('Horse with random jitter')
plt.imshow(random_jitter(sample_horse[0]) * 0.5 + 0.5)
plt.subplot(121)
plt.title('Zebra')
plt.imshow(sample_zebra[0] * 0.5 + 0.5)
plt.subplot(122)
plt.title('Zebra with random jitter')
plt.imshow(random_jitter(sample_zebra[0]) * 0.5 + 0.5)
```
## Import and reuse the Pix2Pix models
Import the generator and the discriminator used in [Pix2Pix](https://github.com/tensorflow/examples/blob/master/tensorflow_examples/models/pix2pix/pix2pix.py) via the installed [tensorflow_examples](https://github.com/tensorflow/examples) package.
The model architecture used in this tutorial is very similar to what was used in [pix2pix](https://github.com/tensorflow/examples/blob/master/tensorflow_examples/models/pix2pix/pix2pix.py). Some of the differences are:
* Cyclegan uses [instance normalization](https://arxiv.org/abs/1607.08022) instead of [batch normalization](https://arxiv.org/abs/1502.03167).
* The [CycleGAN paper](https://arxiv.org/abs/1703.10593) uses a modified `resnet` based generator. This tutorial is using a modified `unet` generator for simplicity.
There are 2 generators (G and F) and 2 discriminators (X and Y) being trained here.
* Generator `G` learns to transform image `X` to image `Y`. $(G: X -> Y)$
* Generator `F` learns to transform image `Y` to image `X`. $(F: Y -> X)$
* Discriminator `D_X` learns to differentiate between image `X` and generated image `X` (`F(Y)`).
* Discriminator `D_Y` learns to differentiate between image `Y` and generated image `Y` (`G(X)`).

```
OUTPUT_CHANNELS = 3
generator_g = pix2pix.unet_generator(OUTPUT_CHANNELS, norm_type='instancenorm')
generator_f = pix2pix.unet_generator(OUTPUT_CHANNELS, norm_type='instancenorm')
discriminator_x = pix2pix.discriminator(norm_type='instancenorm', target=False)
discriminator_y = pix2pix.discriminator(norm_type='instancenorm', target=False)
to_zebra = generator_g(sample_horse)
to_horse = generator_f(sample_zebra)
plt.figure(figsize=(8, 8))
contrast = 8
imgs = [sample_horse, to_zebra, sample_zebra, to_horse]
title = ['Horse', 'To Zebra', 'Zebra', 'To Horse']
for i in range(len(imgs)):
plt.subplot(2, 2, i+1)
plt.title(title[i])
if i % 2 == 0:
plt.imshow(imgs[i][0] * 0.5 + 0.5)
else:
plt.imshow(imgs[i][0] * 0.5 * contrast + 0.5)
plt.show()
plt.figure(figsize=(8, 8))
plt.subplot(121)
plt.title('Is a real zebra?')
plt.imshow(discriminator_y(sample_zebra)[0, ..., -1], cmap='RdBu_r')
plt.subplot(122)
plt.title('Is a real horse?')
plt.imshow(discriminator_x(sample_horse)[0, ..., -1], cmap='RdBu_r')
plt.show()
```
## Loss functions
In CycleGAN, there is no paired data to train on, hence there is no guarantee that the input `x` and the target `y` pair are meaningful during training. Thus in order to enforce that the network learns the correct mapping, the authors propose the cycle consistency loss.
The discriminator loss and the generator loss are similar to the ones used in [pix2pix](https://www.tensorflow.org/beta/tutorials/generative/pix2pix#define_the_loss_functions_and_the_optimizer).
```
LAMBDA = 10
loss_obj = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real, generated):
real_loss = loss_obj(tf.ones_like(real), real)
generated_loss = loss_obj(tf.zeros_like(generated), generated)
total_disc_loss = real_loss + generated_loss
return total_disc_loss * 0.5
def generator_loss(generated):
return loss_obj(tf.ones_like(generated), generated)
```
Cycle consistency means the result should be close to the original input. For example, if one translates a sentence from English to French, and then translates it back from French to English, then the resulting sentence should be the same as the original sentence.
In cycle consistency loss,
* Image $X$ is passed via generator $G$ that yields generated image $\hat{Y}$.
* Generated image $\hat{Y}$ is passed via generator $F$ that yields cycled image $\hat{X}$.
* Mean absolute error is calculated between $X$ and $\hat{X}$.
$$forward\ cycle\ consistency\ loss: X -> G(X) -> F(G(X)) \sim \hat{X}$$
$$backward\ cycle\ consistency\ loss: Y -> F(Y) -> G(F(Y)) \sim \hat{Y}$$

```
def calc_cycle_loss(real_image, cycled_image):
loss1 = tf.reduce_mean(tf.abs(real_image - cycled_image))
return LAMBDA * loss1
```
As shown above, generator $G$ is responsible for translating image $X$ to image $Y$. Identity loss says that, if you fed image $Y$ to generator $G$, it should yield the real image $Y$ or something close to image $Y$.
$$Identity\ loss = |G(Y) - Y| + |F(X) - X|$$
```
def identity_loss(real_image, same_image):
loss = tf.reduce_mean(tf.abs(real_image - same_image))
return LAMBDA * 0.5 * loss
```
Initialize the optimizers for all the generators and the discriminators.
```
generator_g_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
generator_f_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
discriminator_x_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
discriminator_y_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
```
## Checkpoints
```
checkpoint_path = "./checkpoints/train"
ckpt = tf.train.Checkpoint(generator_g=generator_g,
generator_f=generator_f,
discriminator_x=discriminator_x,
discriminator_y=discriminator_y,
generator_g_optimizer=generator_g_optimizer,
generator_f_optimizer=generator_f_optimizer,
discriminator_x_optimizer=discriminator_x_optimizer,
discriminator_y_optimizer=discriminator_y_optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print ('Latest checkpoint restored!!')
```
## Training
Note: This example model is trained for fewer epochs (40) than the paper (200) to keep training time reasonable for this tutorial. Predictions may be less accurate.
```
EPOCHS = 40
def generate_images(model, test_input):
prediction = model(test_input)
plt.figure(figsize=(12, 12))
display_list = [test_input[0], prediction[0]]
title = ['Input Image', 'Predicted Image']
for i in range(2):
plt.subplot(1, 2, i+1)
plt.title(title[i])
# getting the pixel values between [0, 1] to plot it.
plt.imshow(display_list[i] * 0.5 + 0.5)
plt.axis('off')
plt.show()
```
Even though the training loop looks complicated, it consists of four basic steps:
* Get the predictions.
* Calculate the loss.
* Calculate the gradients using backpropagation.
* Apply the gradients to the optimizer.
```
@tf.function
def train_step(real_x, real_y):
# persistent is set to True because the tape is used more than
# once to calculate the gradients.
with tf.GradientTape(persistent=True) as tape:
# Generator G translates X -> Y
# Generator F translates Y -> X.
fake_y = generator_g(real_x, training=True)
cycled_x = generator_f(fake_y, training=True)
fake_x = generator_f(real_y, training=True)
cycled_y = generator_g(fake_x, training=True)
# same_x and same_y are used for identity loss.
same_x = generator_f(real_x, training=True)
same_y = generator_g(real_y, training=True)
disc_real_x = discriminator_x(real_x, training=True)
disc_real_y = discriminator_y(real_y, training=True)
disc_fake_x = discriminator_x(fake_x, training=True)
disc_fake_y = discriminator_y(fake_y, training=True)
# calculate the loss
gen_g_loss = generator_loss(disc_fake_y)
gen_f_loss = generator_loss(disc_fake_x)
total_cycle_loss = calc_cycle_loss(real_x, cycled_x) + calc_cycle_loss(real_y, cycled_y)
# Total generator loss = adversarial loss + cycle loss
total_gen_g_loss = gen_g_loss + total_cycle_loss + identity_loss(real_y, same_y)
total_gen_f_loss = gen_f_loss + total_cycle_loss + identity_loss(real_x, same_x)
disc_x_loss = discriminator_loss(disc_real_x, disc_fake_x)
disc_y_loss = discriminator_loss(disc_real_y, disc_fake_y)
# Calculate the gradients for generator and discriminator
generator_g_gradients = tape.gradient(total_gen_g_loss,
generator_g.trainable_variables)
generator_f_gradients = tape.gradient(total_gen_f_loss,
generator_f.trainable_variables)
discriminator_x_gradients = tape.gradient(disc_x_loss,
discriminator_x.trainable_variables)
discriminator_y_gradients = tape.gradient(disc_y_loss,
discriminator_y.trainable_variables)
# Apply the gradients to the optimizer
generator_g_optimizer.apply_gradients(zip(generator_g_gradients,
generator_g.trainable_variables))
generator_f_optimizer.apply_gradients(zip(generator_f_gradients,
generator_f.trainable_variables))
discriminator_x_optimizer.apply_gradients(zip(discriminator_x_gradients,
discriminator_x.trainable_variables))
discriminator_y_optimizer.apply_gradients(zip(discriminator_y_gradients,
discriminator_y.trainable_variables))
for epoch in range(EPOCHS):
start = time.time()
n = 0
for image_x, image_y in tf.data.Dataset.zip((train_horses, train_zebras)):
train_step(image_x, image_y)
if n % 10 == 0:
print ('.', end='')
n+=1
clear_output(wait=True)
# Using a consistent image (sample_horse) so that the progress of the model
# is clearly visible.
generate_images(generator_g, sample_horse)
if (epoch + 1) % 5 == 0:
ckpt_save_path = ckpt_manager.save()
print ('Saving checkpoint for epoch {} at {}'.format(epoch+1,
ckpt_save_path))
print ('Time taken for epoch {} is {} sec\n'.format(epoch + 1,
time.time()-start))
```
## Generate using test dataset
```
# Run the trained model on the test dataset
for inp in test_horses.take(5):
generate_images(generator_g, inp)
```
## Next steps
This tutorial has shown how to implement CycleGAN starting from the generator and discriminator implemented in the [Pix2Pix](https://www.tensorflow.org/beta/tutorials/generative/pix2pix) tutorial. As a next step, you could try using a different dataset from [TensorFlow Datasets](https://www.tensorflow.org/datasets/datasets#cycle_gan).
You could also train for a larger number of epochs to improve the results, or you could implement the modified ResNet generator used in the [paper](https://arxiv.org/abs/1703.10593) instead of the U-Net generator used here.
Try using a different dataset from . You can also implement the modified ResNet generator used in the [paper](https://arxiv.org/abs/1703.10593) instead of the U-Net generator that's used here.
| github_jupyter |
##### Copyright 2022 The Cirq Developers
```
# @title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Devices
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://quantumai.google/cirq/devices"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/devices.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/devices.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/devices.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a>
</td>
</table>
```
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet cirq
print("installed cirq.")
import cirq
```
## Validation basics
When you are looking to run an algorithm on a real quantum computer (not a simulated one), there are often many additional constraints placed on the circuits you would like to run. Qubit connectivity, algorithm layout and the types of gates used in the circuit all become much more important. Cirq uses the abstract class `Device` to represent the constraints of an actual quantum processor. An example implementation of a device can be seen in the `cirq_google.Sycamore` class:
```
import cirq_google
import networkx as nx
my_device = cirq_google.Sycamore
print(my_device)
```
This string representation of the device indicates the structure of the device and the connectivity of the qubits. In Sycamore's case, two-qubit gates can only be executed on qubits that are adjacent in the grid. Other constraints, like supported gates, are not shown in this representation.
You can access all of the constraints indirectly by validating moments, operations and circuits with the `validate_***` method to verify if that structure would work on the device or not. In general, the `validate_***` method will tell you what part of your operation/moment/circuit does not fit the device's constraints, and why. All devices support this functionality. For the Sycamore device:
```
op1 = cirq.X(cirq.GridQubit(7, 7))
try:
my_device.validate_operation(op1)
except Exception as e:
print(e)
```
The previous example used a qubit that wasn't on the device, making the operation invalid. Most `validate_operation` implementations also take into account things like supported gates and connectivity as well:
```
q1, q2, q3 = cirq.GridQubit(7, 4), cirq.GridQubit(7, 5), cirq.GridQubit(7, 6)
op1 = cirq.H(q1)
op2 = cirq_google.SYC(q1, q3)
try:
my_device.validate_operation(op1)
except Exception as e:
print(e)
try:
my_device.validate_operation(op2)
except Exception as e:
print(e)
```
These validation operations can also be used with moments of operations and full circuits:
```
op1 = cirq.X(q2)
op2 = cirq_google.SYC(q1, q3)
try:
my_device.validate_moment(cirq.Moment([op1, op2]))
except Exception as e:
print(e)
my_circuit = cirq.Circuit(
cirq.PhasedXPowGate(phase_exponent=0.3)(q1),
cirq.PhasedXPowGate(phase_exponent=0.3)(q2),
cirq_google.SYC(q1, q2),
cirq_google.SYC(q2, q3),
)
my_device.validate_circuit(my_circuit)
```
`op1` is allowed on qubit `q2`, but `op2` has the same invalid qubit target error as before. `validate_moment` finds this error by iterating the moment and stopping once the invalid operation is found. On the other hand, `my_circuit` satisfies all the device constraints and could be run on a Sycamore device, so `validate_circuit` does not throw an exception for it.
## Metadata features
Some devices will also expose additional information via the `metadata` property. Metadata is usually exposed via the an instance (or subclass instance) of the `cirq.DeviceMetadata` class. You can access the metadata information of the Sycamore device with the `metadata` attribute:
```
metadata = my_device.metadata
print(type(metadata))
issubclass(type(metadata), cirq.DeviceMetadata)
```
The Sycamore device is a 2d grid device that exposes a `cirq.GridDeviceMetadata` with a uniform set of gates across all the qubits as well as a planar nearest neighbor connectivity graph. You can explore the properties below, starting with `qubit_set` and `nx_graph`, which are common to all instances and subclasses of the `cirq.DeviceMetadata` class.
First, the set of qubits available are available in the `qubit_set` attribute.
```
print(metadata.qubit_set)
```
The `nx_graph` attribute details which of the `54` different qubits are connected to one another. Connected qubit pairs can execute two-qubit gates between them.
```
print(metadata.nx_graph)
```
`cirq.GridDeviceMetadata` has some attributes that are not automatically included in `cirq.DeviceMetadata`, including `gateset`, which indicates the types and families of Cirq gates that are accepted by all qubits across the device.
```
print(metadata.gateset)
```
These metadata features can be useful when designing/building algorithms around certain device information in order to tailor them for that device.
## The `cirq.Device` interface
For advanced users (such as vendors) it is also possible to implement your own Device with its own unique constraints and metadata information. Below is an example of a fictitious custom device:
```
class MyDevice(cirq.Device):
"""Five qubits on a line, supporting X/Y/Z and CZ between neighbors."""
def __init__(self):
# Specify the qubits available to the device
self._qubits = set(cirq.LineQubit.range(5))
# Specify which gates are valid
self._supported_gates = cirq.Gateset(
cirq.XPowGate, cirq.YPowGate, cirq.ZPowGate, cirq.CZPowGate
)
def validate_operation(self, operation):
"""Check to make sure `operation` is valid.
`operation` must be on qubits found on the device
and if it is a two qubit gate the qubits must be adjacent
Raises:
ValueError: if operation acts on qubits not found on the device.
ValueError: if two qubit gates have non-local interactions.
ValueError: if the operation is not in the supported gates.
"""
# Ensure that the operation's qubits are available on the device
if any(x not in self._qubits for x in operation.qubits):
raise ValueError("Using qubits not found on device.")
# Ensure that the operation's qubits are adjacent if there are two of them
if len(operation.qubits) == 2:
p, q = operation.qubits
if not p.is_adjacent(q):
raise ValueError('Non-local interaction: {}'.format(repr(operation)))
# Ensure that the operation itself is a supported one
if operation not in self._supported_gates:
raise ValueError("Unsupported operation type.")
def validate_circuit(self, circuit):
"""Check to make sure `circuit` is valid.
Calls validate_operation on all operations as well as imposing
a global limit on the total number of CZ gates.
Raises:
ValueError: if `validate_operation` raises for any operation in the
circuit.
ValueError: if there are more than 10 CZ gates in the entire circuit.
"""
# Call Device's `validate_operation`, which calls the `validate_operation`
# function specified above on each operation in the circuit
super().validate_circuit(circuit)
# Ensure that no more than 10 two-qubit CZ gates exist in the circuit
cz_count = sum(1 for mom in circuit for op in mom if len(op.qubits) == 2)
if cz_count > 10:
raise ValueError("Too many total CZs")
@property
def metadata(self):
"""MyDevice GridDeviceMetadata."""
# Since `MyDevice` is planar it is a good idea to subclass the
# GridDeviceMetadata class to communicate additional device information to
# the user.
return cirq.GridDeviceMetadata(
qubit_pairs=[(p, q) for p in self._qubits for q in self._qubits if p.is_adjacent(q)],
gateset=self._supported_gates,
)
```
At absolute minimum, when creating a custom `Device`, you should inherit from `cirq.Device` and overwrite the `__init__` and `validate_operation` methods.
This custom device can now be used to validate circuits:
```
my_custom_device = MyDevice()
my_circuit = cirq.Circuit(
cirq.X(cirq.LineQubit(0)),
cirq.X(cirq.LineQubit(2)),
cirq.X(cirq.LineQubit(4)),
cirq.CZ(*cirq.LineQubit.range(2)),
)
too_many_czs = cirq.Circuit(cirq.CZ(*cirq.LineQubit.range(2)) for _ in range(11))
# my_circuit is valid for my_custom_device.
my_custom_device.validate_circuit(my_circuit)
# each operation of too_many_czs is valid individually...
for moment in too_many_czs:
for op in moment:
my_custom_device.validate_operation(op)
# But the device has global constraints which the circuit does not meet:
try:
my_custom_device.validate_circuit(too_many_czs)
except Exception as e:
print(e)
```
By default, the `validate_circuit` method of the `cirq.Device` class simply calls `validate_moment` on all the moments, which calls `validate_operation` on all the operations. It is advisable to maintain this behavior in your custom device, which can be implemented as above, by calling `super().validate_***` when writing each method.
Depending on the scoping of constraints the custom device has, certain less local constraints might be better placed in `validate_moment` and certain global constraints might belong in `validate_circuit`. In addition to this, you can also add metadata options to your device. You can define a metadata subclass of `cirq.DeviceMetadata` or you can use an inbuilt metadata class like `cirq.GridDeviceMetadata`:
```
my_metadata = my_custom_device.metadata
# Display device graph:
nx.draw(my_metadata.nx_graph)
```
# Summary
Devices in Cirq are used to specify constraints on circuits that are imposed by quantum hardware devices. You can check that an operation, moment, or circuit is valid on a particular `cirq.Device` by using `validate_operation`, `validate_moment`, or `validate_circuit` respectively. You can also create your own custom device objects to specify constraints for a new or changed device. Device objects, custom and otherwise, also can carry around metadata that may be useful for the validation process or other processes.
| github_jupyter |
```
# hide
%load_ext nb_black
# default_exp clients
from will_it_saturate.clients import BaseClient
from will_it_saturate.registry import register_model
# export
import os
import math
import time
import httpx
import asyncio
import aiohttp
import subprocess
from pathlib import Path
from datetime import datetime
from multiprocessing import Pool
from multiprocessing import set_start_method
# from will_it_saturate.old_core import Benchmark
from will_it_saturate.servers import BaseServer
# os.environ["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"] = "YES"
# set_start_method("fork")
# print(os.environ["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"])
```
## Caveats
On macOS increase open file limit with:
```
ulimit -n 2048
```
Before starting the fastAPI Server with:
```
uvicorn will_it_saturate.fastapi.main:app --reload
```
It's not really possible to test forked client from this notebook. I don't know why. It works in the 03_run_benchmark script. Here I have to set_start_method("fork") and other ugly stuff.
```
# dont_test
byte = 8
gigabit = 10 ** 9
bandwidth = gigabit / byte
# file_sizes = [10 ** 7, 10 ** 6]
file_sizes = [10 ** 7, 10 ** 6, 10 ** 5]
# file_sizes = [10 ** 7]
# benchmark = Benchmark(
# bandwidth=bandwidth,
# duration=3,
# file_sizes=file_sizes,
# )
# benchmark.create_epochs()
# export
# just here because of broken nbdev confusing lua with python
counter = 0
request = None
@register_model
class HttpxClient(BaseClient):
async def measure_server(self, epoch):
print("measure server")
print(epoch.urls[0])
max_connections = min(epoch.number_of_connections, 50)
print("max_connections: ", max_connections)
# max_connections = 10
limits = httpx.Limits(
max_keepalive_connections=10, max_connections=max_connections
)
timeout = httpx.Timeout(30.0, connect=60.0)
start = time.perf_counter()
async with httpx.AsyncClient(limits=limits, timeout=timeout) as client:
responses = await asyncio.gather(*[client.get(url) for url in epoch.urls])
elapsed = time.perf_counter() - start
print("done: ", elapsed)
print("responses status: ", responses[0].status_code)
return elapsed, responses
def measure_in_new_process(self, epoch):
print("new process")
elapsed, responses = asyncio.run(self.measure_server(epoch))
self.verify_checksums(epoch, responses)
return elapsed
def measure(self, epoch):
print("measure")
with Pool(1) as p:
[result] = p.map(self.measure_in_new_process, [epoch])
return result
# def run_httpx():
# byte = 8
# gigabit = 10 ** 9
# bandwidth = gigabit / byte
#
# # file_sizes = [10 ** 7, 10 ** 6]
# # file_sizes = [10 ** 7, 10 ** 6, 10 ** 5]
# file_sizes = [10 ** 7]
#
# benchmark = Benchmark(
# bandwidth=bandwidth,
# duration=3,
# file_sizes=file_sizes,
# servers=[BenchmarkServer(name="uvicorn")],
# clients=[HttpxClient(name="httpx")],
# )
# benchmark.create_rows()
# benchmark.run()
# print(benchmark.results_frame)
# export
import sys
import typer
from will_it_saturate.hosts import Host
from will_it_saturate.epochs import Epoch
from will_it_saturate.servers import BaseServer
from will_it_saturate.control.client import ControlClient
def run_httpx_with_args(exponent: int):
print("running httpx")
typer.echo(f"exponent {exponent}")
control_server_port, server_port = 8100, 5100
server_host_name = "192.168.178.113"
server = BaseServer(host=server_host_name, port=server_port)
server_control_host = Host(name=server_host_name, port=control_server_port)
server_control_client = ControlClient(host=server_control_host)
epoch = Epoch(file_size=10 ** exponent, duration=10)
epoch.files = server_control_client.get_or_create_files(epoch)
epoch.create_urls_from_files(server)
benchmark_client = HttpxClient(name="httpx", host=server_host_name, port=server_port)
elapsed = benchmark_client.measure(epoch)
print(f"elapsed: {elapsed}")
def run_httpx():
typer.run(run_httpx_with_args)
# dont_test
# client = HttpxClient()
# elapsed, responses = await client.measure_server(benchmark.epochs[0])
# print(elapsed)
```
## aiohttp
```
# export
class AioHttpResponse:
def __init__(self, url, content, started, stopped):
self.url = url
self.content = content
self.started = started
self.stopped = stopped
@register_model
class AioHttpClient(BaseClient):
timestamps = []
def set_timestamps(self, responses):
for response in responses:
self.timestamps.append((response.started, response.stopped))
async def fetch_page(self, session, url):
async with session.get(url) as response:
started = datetime.now()
content = await response.read()
stopped = datetime.now()
return AioHttpResponse(url, content, started, stopped)
async def measure_server(self, epoch):
print("measure server")
print(epoch.urls[0])
urls = epoch.urls
max_connections = min(epoch.number_of_connections, 200)
conn = aiohttp.TCPConnector(limit=max_connections)
responses = []
start = time.perf_counter()
async with aiohttp.ClientSession(connector=conn) as session:
tasks = [asyncio.create_task(self.fetch_page(session, url)) for url in urls]
responses = await asyncio.gather(*tasks)
elapsed = time.perf_counter() - start
return elapsed, responses
def measure_in_new_process(self, epoch):
elapsed, responses = asyncio.run(self.measure_server(epoch))
self.verify_checksums(epoch, responses)
self.set_timestamps(responses)
print("timestamps: ", len(self.timestamps))
return elapsed, self.timestamps
def measure(self, epoch):
with Pool(1) as p:
[result, timestamps] = p.map(self.measure_in_new_process, [epoch])
return result, timestamps
# dont_test
client = AioHttpClient()
elapsed, responses = await client.measure_server(benchmark.epochs[0])
print(elapsed)
```
## wrk
```
# export
@register_model
class WrkClient(BaseClient):
connections: int = 20
# set duration to two minutes since it is 10 seconds by default and kills the benchmark
duration: int = 120
threads: int = 1
host: str = "localhost"
port: str = "8000"
def create_urls_string(self, epoch):
urls = []
for bf in epoch.files:
urls.append(f' {{path = "/{bf.path}"}},')
return "\n".join(urls)
def create_lua_script(self, epoch):
requests_head = "requests = {"
requests_tail = "}"
lua_body = """
print(requests[1])
if #requests <= 0 then
print("multiplerequests: No requests found.")
os.exit()
end
print("multiplerequests: Found " .. #requests .. " requests")
counter = 1
request = function()
-- Get the next requests array element
local request_object = requests[counter]
-- Increment the counter
counter = counter + 1
-- If the counter is longer than the requests array length -> stop and exit
if counter > #requests then
wrk.thread:stop()
os.exit()
end
-- Return the request object with the current URL path
return wrk.format(request_object.method, request_object.path, request_object.headers, request_object.body)
end
"""
urls = self.create_urls_string(epoch)
lua = "\n".join([requests_head, urls, requests_tail, lua_body])
with Path(f"wrk.lua").open("w") as f:
f.write(lua)
def run_wrk(self):
kwargs = {"capture_output": True, "text": True}
start = time.perf_counter()
command = [
"wrk",
"-d",
str(self.duration),
"-c",
str(self.connections),
"-t",
str(self.threads),
"-s",
"wrk.lua",
f"http://{self.host}:{self.port}",
]
print("command: ", " ".join(command))
output = subprocess.run(
command,
**kwargs,
)
elapsed = time.perf_counter() - start
return elapsed
def measure(self, epoch):
print("measure? wtf?")
self.create_lua_script(epoch)
elapsed = self.run_wrk()
return elapsed
```
## Wrk CLI command
```shell
time wrk -d 30 -c 20 -t 1 -s wrk.lua http://staging.wersdoerfer.de:5001
```
```
%%time
# dont_test
kwargs = {"capture_output": True, "text": True}
output = subprocess.run(["wrk", "-c20", "-t1", "-d2", "-s", "wrk.lua", "http://localhost:8000"], **kwargs)
# output = subprocess.run(["wrk", "-d2", "http://localhost:8000"], **kwargs)
# dont_test
print(output.stdout)
# dont_test
client = WrkClient()
elapsed = client.measure(benchmark.epochs[0])
print(elapsed)
# hide
# dont_test
from nbdev.export import notebook2script
notebook2script()
```
| github_jupyter |
# 2.3 Linear Time
However, finding the minimal value in an unordered array is not a constant time operation as scanning over each element in the array is needed in order to determine the minimal value. Hence it is a linear time operation, taking O(n) time. If the number of elements is known in advance and does not change, however, such an algorithm can still be said to run in constant time.
```
def find_max(unordered_list):
max_value = unordered_list[0]
for i in range(len(unordered_list)):
if unordered_list[i] > max_value:
max_value = unordered_list[i]
return max_value
def timer(func, x):
start_time = time.process_time()
item = func(x)
end_time = time.process_time()
print(f'for a list of {len(x)} items: {end_time - start_time} seconds')
return item
timer(find_max, short_list)
timer(find_max, long_list)
for length in list_lengths:
start = time.process_time()
random_list = [np.random.randint(1, 100) for random_integer in range(length)]
end = time.process_time()
print('runtime for generating a list of {:,} elements: {} seconds'.format(length, (end-start)))
start = time.process_time()
find_max(random_list)
end = time.process_time()
poly_times.append(end - start)
print('runtime for double for loop multiplication of {:,} items: {} seconds\n'.format(length, (end-start)))
linear = pd.read_csv("/content/here/MyDrive/Data and Algorithms/ALGO02/linear_no_tpu.csv")
linear_tpu = pd.read_csv("/content/here/MyDrive/Data and Algorithms/ALGO02/linear_tpu.csv")
linear
linear_tpu
generate_list = linear['time: generate list'].to_numpy()
generate_list_tpu = linear_tpu['time: generate list'].to_numpy()
find_max = linear['time: find max value'].to_numpy()
find_max_tpu = linear_tpu['time: find max value'].to_numpy()
fig, ax = plt.subplots(1, 2, figsize=(15, 6))
param = ['salmon', 'standard', 'darkred', 'TPU/High RAM']
for i in range(2):
ax[i].plot(list_lengths, generate_list, c=param[0], label=param[1], linewidth=3)
ax[i].plot(list_lengths, generate_list_tpu, c=param[2], label=param[3], linewidth=3)
ax[i].legend()
ax[0].set_title("Time taken for generating list", fontsize=15)
ax[1].set_title("Time taken for finding max value", fontsize=15)
plt.suptitle("Linear Time Comparison", fontsize=25, fontweight='bold', y=1)
```
# Important Functions
- `sns.lmplot(x='n', y='time', data=dataset, ci=None)`
- lmplot = linear model (regression) plot
- ci: confidence intreval
- `pd.DataFrame(list(zip(list1, list2, ..., list_n))`
- `list(zip(item1, item2 ... item_n))`
- `time.process_time()`
References
My algorithm learning notebook following the live lesson series [**"Data Structures, Algorithms, and Machine Learning Optimization"**](https://learning.oreilly.com/videos/data-structures-algorithms/9780137644889/) by Dr. Jon Krohn. I adapted some and partially modified or added entirely new code. Notes largely based on and (some of them entirely) from Jon's notebooks and learning materials. The lesson and original notebook source code at:
https://learning.oreilly.com/videos/data-structures-algorithms/9780137644889/
https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/7-algos-and-data-structures.ipynb
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
%config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
import sys
if '..' not in sys.path:
sys.path.append('..')
import pandas as pd
import numpy as np
import networkx as nx
import copy
import scipy as sp
import math
import seaborn
import pickle
import warnings
import matplotlib
import matplotlib.pyplot as plt
import re
# import multiprocessing
# from lib.mobilitysim import MobilitySimulator
from lib.dynamics import DiseaseModel
from lib.distributions import CovidDistributions
from lib.plot import Plotter
from lib.data import collect_data_from_df
from lib.measures import (
MeasureList,
BetaMultiplierMeasure,
BetaMultiplierMeasureByType,
SocialDistancingForAllMeasure,
SocialDistancingForKGroups,
SocialDistancingByAgeMeasure,
SocialDistancingForPositiveMeasure,
ComplianceForAllMeasure,
Interval)
from lib.runutils import *
from IPython.display import display, HTML
# from lib.mobilitysim import MobilitySimulator
# from lib.town_data import generate_population, generate_sites, compute_distances
# from lib.town_maps import MapIllustrator
```
## Zihan
```
summaries_SD_6 = load_summary('tracing_isolate_sftest_2trace_stochastic_20pctall_isohouse_40rpts_005betas_trace_friends_only_colleagues_tracehouse_housesites.pk')
'''
(0) 0: SocialDistancingForAllMeasure`
(1) 1: SocialDistancingForPositiveMeasure
(2) 2: SocialDistancingByAgeMeasure`
(3) 3: SocialDistancingForSmartTracing
(4) 4: SocialDistancingForKGroups`
(5) 5: UpperBoundCasesSocialDistancing`
(6) 'resi/dead'
(7) 'hosp'
(8) 'site_measures'
(9) not contained
'''
import matplotlib.pyplot as plt
rpts = 20
p_compliance = [0.0, 1.0]
measures_deployed = [1,3,6,7]
for j, policy in enumerate(['advanced']):
summaries_ = summaries_SD_6[policy]
f,axs = plt.subplots(2,2,figsize = (13,13))
num_expo_house = []
num_expo_contact = []
for s, summary in enumerate(summaries_):
multi_3 = 0
counts = np.zeros((10,))
num_contacts = 0
num_nega = np.sum(summary.state['nega'])
num_posi = np.sum(summary.state['posi'])
num_expo_house.append(summary.num_expo_house)
num_expo_contact.append(summary.num_expo_contact)
num_i_contained_infectious_true = 0
num_j_contained_infectious_true = 0
num_i_contained_infectious_false = 0
num_j_contained_infectious_false = 0
#axs[1,s].boxplot(summary.state_started_at['posi'][np.multiply(summary.state_started_at['posi']!=np.inf, summary.state_started_at['posi']!=-1)])
for r in range(rpts):
num_contacts += len(summary.mob[r])
for contact in summary.mob[r]:
if contact.data['i_contained_infectious']==True:
num_i_contained_infectious_true += 1
# if contact.data['j_contained_infectious']==True:
# num_j_contained_infectious_true += 1
if contact.data['i_contained_infectious']==False:
num_i_contained_infectious_false += 1
# if contact.data['j_contained_infectious']==False:
# num_j_contained_infectious_false += 1
if (not contact.data['i_contained']) and (not contact.data['j_contained'] ):
counts[9] += 1
else:
# if (3 in contact.data['i_contained_by']) or (3 in contact.data['j_contained_by']):
# if len()
for i in range(6):
if (i in contact.data['i_contained_by']) or (i in contact.data['j_contained_by']):
counts[i] += 1
if ('resi/dead' in contact.data['i_contained_by']) or ('resi/dead' in contact.data['j_contained_by']):
counts[6] += 1
if ('hosp' in contact.data['i_contained_by']) or ('hosp' in contact.data['j_contained_by']):
counts[7] += 1
if ('site_measures' in contact.data['i_contained_by']) or ('site_measures' in contact.data['j_contained_by']):
counts[8] += 1
counts /= num_contacts
axs[0,s].bar(range(1,len(measures_deployed)+1),counts[measures_deployed])
axs[0,s].set_title('Tracking compliance '+str(p_compliance[s])+', '+ str(round((1-counts[9])*100,2))+'\% contained')
axs[0,s].set_xlabel('contact status',fontsize = 20)
axs[0,s].set_ylabel('proportion in sampled contacts',fontsize = 20)
axs[0,s].set_xticks(range(1,len(measures_deployed)+1))
axs[0,s].set_xticklabels(measures_deployed)
axs[0,s].set_ylim(0,0.5)
print('number of contacts:', num_contacts/rpts)
print('Tracking compliance '+str(p_compliance[s])+', positive ', num_posi)
print('Tracking compliance '+str(p_compliance[s])+', negative rate: ', num_nega/(num_nega+num_posi))
print('i_contained_infectious true rate: ',num_i_contained_infectious_true/(num_i_contained_infectious_true+num_i_contained_infectious_false))
#print('j_contained_infectious true rate: ',num_j_contained_infectious_true/(num_j_contained_infectious_true+num_j_contained_infectious_false))
axs[1,0].boxplot(num_expo_house)
axs[1,0].set_title('Household Exposures')
axs[1,0].set_xlabel('compliance',fontsize = 20)
axs[1,0].set_xticklabels(p_compliance)
axs[1,0].set_ylabel('number of exposures',fontsize = 20)
axs[1,1].boxplot(num_expo_contact)
axs[1,1].set_title('Contact Exposures')
axs[1,1].set_xlabel('compliance',fontsize = 20)
axs[1,1].set_xticklabels(p_compliance)
axs[1,1].set_ylabel('number of exposures',fontsize = 20)
#plt.tight_layout()
plt.savefig('plots/tracing_isolate_sf_tc1x_sup20_laura_isohouse_20rpts2_detail.png',dpi=300)
plt.show()
# Plot results of experiments_server_7-1.py
c=0
runstr = f'run{c}_'
FIGSIZE=(6, 4)
p_compliance = [0.0,0.6,1.0]
plotter = Plotter()
titles_SD_6_ = list(['Tracking compliance '+ str(int(p*100.0)) + ' \%' for p in p_compliance])
for j, policy in enumerate(['basic']):
summaries_ = summaries_SD_6[policy]
plotter.compare_total_infections(
summaries_,
titles=titles_SD_6_,
figtitle=(f'Infections for compliance levels for ' + policy + ' individuals compliant with contact-tracing'),
filename=runstr + f'SD_6{j}'+'tracing_isolate_sftest_2trace_stochastic_20pctall_isohouse_40rpts_005betas_trace_friends_only_colleagues_tracehouse_housesites_highhomebeta',
figsize=FIGSIZE, acc=500,
ymax=1500, errorevery=14, start_date = '2020-03-08', show_legend=False)
'''
0: 'education', 1: 'office', 2: 'social', 3: 'supermarket', 4: 'home'
'''
# sites where infections happen
import matplotlib.pyplot as plt
rpts = 40
p_compliance = [0.0, 0.6, 1.0]
for j, policy in enumerate(['basic']):
summaries_ = summaries_SD_6[policy]
for s, summary in enumerate(summaries_):
contact_sites = []
num_contacts = 0
num_nega = np.sum(summary.state['nega'])
num_posi = np.sum(summary.state['posi'])
num_i_contained_infectious_true = 0
num_j_contained_infectious_true = 0
num_i_contained_infectious_false = 0
num_j_contained_infectious_false = 0
#axs[1,s].boxplot(summary.state_started_at['posi'][np.multiply(summary.state_started_at['posi']!=np.inf, summary.state_started_at['posi']!=-1)])
for r in range(rpts):
num_contacts += len(summary.mob[r])
for contact in summary.mob[r]:
if (not contact.data['i_contained']) and (not contact.data['j_contained'] ):
if contact.site >= 86:
contact_sites.append(1)
else:
contact_sites.append(0)
else:
if contact.site >= 86:
contact_sites.append(3)
else:
contact_sites.append(2)
unique_sites, counts_sites = np.unique(contact_sites, return_counts=True)
print(p_compliance[s], counts_sites[1]/(counts_sites[0]+counts_sites[1]))
unique_sites
```
## Laura
### Experiments_server_7-1 with essential workers
```
'''
(0) 0: SocialDistancingForAllMeasure`
(1) 1: SocialDistancingForPositiveMeasure
(2) 2: SocialDistancingByAgeMeasure`
(3) 3: SocialDistancingForSmartTracing
(4) 4: SocialDistancingForKGroups`
(5) 5: UpperBoundCasesSocialDistancing`
(6) 'resi/dead'
(7) 'hosp'
(8) 'site_measures'
(9) not contained
'''
import matplotlib.pyplot as plt
p_compliance = [0.0, 1.0]
measures_deployed = [1,3,6,7]
summaries_SD_6 = load_summary('summaries_contacts_supermarket_bh0.pk')
for j, policy in enumerate(['advanced']):
summaries_ = summaries_SD_6[policy]
f,axs = plt.subplots(1,2,figsize = (13,5))
for s, summary in enumerate(summaries_):
essential_counts = np.zeros((10,))
essential_num_contacts = 0
nonessential_counts = np.zeros((10,))
nonessential_num_contacts = 0
for r in range(summary.random_repeats):
for contact in summary.mob[r]:
if (summary.essential_workers[0][contact.indiv_j]==True):
essential_num_contacts += 1
counts = essential_counts
else:
nonessential_num_contacts += 1
counts = nonessential_counts
if (not contact.data['i_contained']) and (not contact.data['j_contained'] ):
counts[9] += 1
else:
for i in range(6):
if (i in contact.data['i_contained_by']) or (i in contact.data['j_contained_by']):
counts[i] += 1
if ('resi/dead' in contact.data['i_contained_by']) or ('resi/dead' in contact.data['j_contained_by']):
counts[6] += 1
if ('hosp' in contact.data['i_contained_by']) or ('hosp' in contact.data['j_contained_by']):
counts[7] += 1
if ('site_measures' in contact.data['i_contained_by']) or ('site_measures' in contact.data['j_contained_by']):
counts[8] += 1
essential_counts /= essential_num_contacts
nonessential_counts /= nonessential_num_contacts
width = 0.4
xticks = np.arange(1,len(measures_deployed)+1)
axs[s].bar(xticks-0.2,nonessential_counts[measures_deployed],width=width, label='Nonessential')
axs[s].bar(xticks+0.2,essential_counts[measures_deployed],width=width, label='Essential')
axs[s].set_title('Tracking compliance '+str(p_compliance[s])+', '+ str(round((1-counts[9])*100,2))+'\% contained')
axs[s].set_xlabel('contact status',fontsize = 20)
axs[s].set_ylabel('proportion in sampled contacts',fontsize = 20)
axs[s].set_xticks(range(1,len(measures_deployed)+1))
axs[s].set_xticklabels(measures_deployed)
axs[s].set_ylim(0,1.0)
axs[s].legend()
#plt.tight_layout()
#plt.savefig('plots/contact_details.png',dpi=300)
plt.show()
# Graph num infected
c=0
runstr = f'run{c}_'
# summaries_SD_6 = load_summary('contact_record_test_1.pk')
FIGSIZE=(6, 4)
p_compliance = [0.0, 1.0]
plotter = Plotter()
titles_ = list(['Tracking compliance '+ str(int(p*100.0)) + ' \%' for p in p_compliance])
for j, policy in enumerate(['advanced']):
summaries_list = summaries_SD_6[policy]
plotter.compare_total_infections(
summaries_list,
titles=titles_,
filename=runstr + f'experiments_7-1',
figsize=FIGSIZE, acc=500,
ymax=1000, errorevery=14)
for summ in summaries_SD_6['advanced']:
df = make_summary_df(summ)
display(df)
```
### Experiments_essential.py
```
# Plot results of experiments_essential.py
'''
(0) 0: SocialDistancingForAllMeasure`
(1) 1: SocialDistancingForPositiveMeasure
(2) 2: SocialDistancingByAgeMeasure`
(3) 3: SocialDistancingForSmartTracing
(4) 4: SocialDistancingForKGroups`
(5) 5: UpperBoundCasesSocialDistancing`
(6) 'resi/dead'
(7) 'hosp'
(8) 'site_measures'
(9) not contained
'''
measures = np.array(['SDForAll', 'SDForPositive', 'SDByAge', 'SDForSmartTracing','SDForKGroups','UpperBound','resi/dead','hosp','site_measures','not contained'])
import matplotlib.pyplot as plt
p_compliance = [0.0, 0.5]
measures_deployed = [1,3,6,7]
summaries_ = load_summary('summaries_r45.pk')
f,axs = plt.subplots(2,2,figsize = (13,10))
for j, policy in enumerate(['random','essential']):
for s in range(len(p_compliance)):
summary = summaries_[(policy,p_compliance[s])]
essential_counts = np.zeros((10,))
essential_num_contacts = 0
nonessential_counts = np.zeros((10,))
nonessential_num_contacts = 0
for r in range(summary.random_repeats):
for contact in summary.mob[r]:
if (summary.essential_workers[0][contact.indiv_j]==True):
essential_num_contacts += 1
counts = essential_counts
else:
nonessential_num_contacts += 1
counts = nonessential_counts
if (not contact.data['i_contained']) and (not contact.data['j_contained'] ):
counts[9] += 1
else:
for i in range(6):
if (i in contact.data['i_contained_by']) or (i in contact.data['j_contained_by']):
counts[i] += 1
if ('resi/dead' in contact.data['i_contained_by']) or ('resi/dead' in contact.data['j_contained_by']):
counts[6] += 1
if ('hosp' in contact.data['i_contained_by']) or ('hosp' in contact.data['j_contained_by']):
counts[7] += 1
if ('site_measures' in contact.data['i_contained_by']) or ('site_measures' in contact.data['j_contained_by']):
counts[8] += 1
essential_counts /= essential_num_contacts
nonessential_counts /= nonessential_num_contacts
width = 0.4
xticks = np.arange(1,len(measures_deployed)+1)
axs[j,s].bar(xticks-0.2,nonessential_counts[measures_deployed],width=width, label='Nonessential')
axs[j,s].bar(xticks+0.2,essential_counts[measures_deployed],width=width, label='Essential')
axs[j,s].set_title('Tracking compliance '+str(p_compliance[s])+', '+ str(round((1-counts[9])*100,2))+'\% contained')
axs[j,s].set_xlabel('contact status',fontsize = 20)
axs[j,s].set_ylabel('proportion in sampled contacts',fontsize = 20)
axs[j,s].set_xticks(range(1,len(measures_deployed)+1))
axs[j,s].set_xticklabels(measures[measures_deployed],rotation=45,ha='right',fontsize=10)
axs[j,s].set_ylim(0,1.0)
axs[j,s].legend()
plt.tight_layout()
#plt.savefig('plots/contact_details.png',dpi=300)
plt.show()
c=0
runstr = f'run{c}_'
# summaries_ = load_summary('contact_record_test_1.pk')
FIGSIZE=(6, 4)
params = [('random',0.0),('random',0.5),('essential',0.5)]
plotter = Plotter()
titles_ = list(['Compliance '+ str(int(p*100.0)) + ' \%'+' '+policy for (policy, p) in params])
summaries_list = [summaries_[param] for param in params]
plotter.compare_total_infections(
summaries_list,
titles=titles_,
filename=runstr + f'experiments_essential',
figsize=FIGSIZE, acc=500,
ymax=2000, errorevery=14)
for summ in summaries_list:
df = make_summary_df(summ)
display(df)
```
#### Experiments_essential_new with multiple worker types
```
# Plot results of experiments_essential.py
'''
(0) 0: SocialDistancingForAllMeasure`
(1) 1: SocialDistancingForPositiveMeasure
(2) 2: SocialDistancingByAgeMeasure`
(3) 3: SocialDistancingForSmartTracing
(4) 4: SocialDistancingForKGroups`
(5) 5: UpperBoundCasesSocialDistancing`
(6) 'resi/dead'
(7) 'hosp'
(8) 'site_measures'
(9) not contained
'''
measures = np.array(['SDForAll', 'SDForPositive', 'SDByAge', 'SDForSmartTracing','SDForKGroups','UpperBound','resi/dead','hosp','site_measures','not contained'])
import matplotlib.pyplot as plt
p_compliance = [0.0, 0.5]
measures_deployed = [1,3,6,7]
summaries_ = load_summary('summaries_r54.pk')
f,axs = plt.subplots(1,4,figsize = (20,5))
for j, (policy,p) in enumerate([('None',0.0),('random',0.5),('essential',0.5),('None',1.0)]):
summary = summaries_[(policy,p)]
essential_counts = np.zeros((10,))
essential_num_contacts = 0
nonessential_counts = np.zeros((10,))
nonessential_num_contacts = 0
for r in range(summary.random_repeats):
for contact in summary.mob[r]:
if (summary.essential_workers[0][contact.indiv_j]==True):
essential_num_contacts += 1
counts = essential_counts
else:
nonessential_num_contacts += 1
counts = nonessential_counts
if (not contact.data['i_contained']) and (not contact.data['j_contained'] ):
counts[9] += 1
else:
for i in range(6):
if (i in contact.data['i_contained_by']) or (i in contact.data['j_contained_by']):
counts[i] += 1
if ('resi/dead' in contact.data['i_contained_by']) or ('resi/dead' in contact.data['j_contained_by']):
counts[6] += 1
if ('hosp' in contact.data['i_contained_by']) or ('hosp' in contact.data['j_contained_by']):
counts[7] += 1
if ('site_measures' in contact.data['i_contained_by']) or ('site_measures' in contact.data['j_contained_by']):
counts[8] += 1
essential_counts /= essential_num_contacts
nonessential_counts /= nonessential_num_contacts
width = 0.4
xticks = np.arange(1,len(measures_deployed)+1)
axs[j].bar(xticks-0.2,nonessential_counts[measures_deployed],width=width, label='Nonessential')
axs[j].bar(xticks+0.2,essential_counts[measures_deployed],width=width, label='Essential')
axs[j].set_title('Compliance '+policy+' '+str(p)+', '+ str(round((1-counts[9])*100,2))+'\% contained')
axs[j].set_xlabel('contact status',fontsize = 20)
axs[j].set_ylabel('proportion in sampled contacts',fontsize = 20)
axs[j].set_xticks(range(1,len(measures_deployed)+1))
axs[j].set_xticklabels(measures[measures_deployed],rotation=45,ha='right',fontsize=10)
axs[j].set_ylim(0,1.0)
axs[j].legend()
plt.tight_layout()
#plt.savefig('plots/contact_details.png',dpi=300)
plt.show()
c=0
runstr = f'run{c}_'
# summaries_ = load_summary('contact_record_test_1.pk')
FIGSIZE=(6, 4)
params = [('None',0.0),('random',0.5),('essential',0.5),('None',1.0)]
plotter = Plotter()
titles_ = list(['Compliance '+ str(int(p*100.0)) + ' \%'+' '+policy for (policy, p) in params])
summaries_list = [summaries_[param] for param in params]
plotter.compare_total_infections(
summaries_list,
titles=titles_,
filename=runstr + f'experiments_essential',
figsize=FIGSIZE, acc=500,
ymax=2000, errorevery=14)
for summ in summaries_list:
df = make_summary_df(summ)
display(df)
```
## Emma
```
# Plot results of experiments_server_7-1.py
c=0
runstr = f'run{c}_'
summaries_SD_6 = load_summary('summaries_SD_5.pk')
FIGSIZE=(6, 4)
p_compliance = [0.0, 0.6, 1.0]
plotter = Plotter()
titles_SD_6_ = list(['Tracking compliance '+ str(int(p*100.0)) + ' \%' for p in p_compliance])
for j, policy in enumerate(['basic']):
summaries_ = summaries_SD_6[policy]
plotter.compare_total_infections(
summaries_,
titles=titles_SD_6_,
figtitle=(f'Infections for compliance levels for ' + policy + ' individuals compliant with contact-tracing'),
filename=runstr + f'SD_6{j}'+'tracing_isolate_5sftest_5trace_sup20_isohouse_40rpts_010betas',
figsize=FIGSIZE, acc=500,
ymax=5000, errorevery=14)
plt.show()
c=0
runstr = f'run{c}_'
summaries_SD_6 = load_summary('tracing_isolate_sftest_2trace_stochastic_20pctall_isohouse_40rpts_005betas_trace_friends_only_colleagues_tracehouse.pk')
# titles_SD_6_ = list(['Tracking compliance '+ str(int(p*100.0)) + ' \%' for p in p_compliance])
p_compliance = [0.4]
all_states = ['susc', 'expo', 'ipre', 'isym', 'iasy', 'posi', 'nega', 'resi', 'dead', 'hosp']
infectious_states = ['ipre', 'isym', 'iasy', 'posi']
noninfectious_states = ['susc', 'expo', 'nega', 'resi', 'dead', 'hosp']
plot_states ='seperate_infectious_noninfectious' #'combine_infectious_states' #['susc', 'expo', 'ipre', 'isym', 'iasy', 'posi', 'resi']
active_measures = ['CT','posi_measure']
alphas = np.linspace(0.6,0.3,num=len(plot_states))
plotter = Plotter()
for j, policy in enumerate(['basic']):
summaries_ = summaries_SD_6[policy]
# f,axs = plt.subplots(1,len(p_compliance),figsize = (13,5))
for c, summary in enumerate(summaries_): # each compliance rate
if c != 2: continue
fig = plt.figure(figsize=(21,7))
axs1 = fig.add_subplot(131)
axs2 = fig.add_subplot(132)
axs3 = fig.add_subplot(133)
traced_times = {cur_measure: np.zeros(summary.n_people) for cur_measure in active_measures}
traced_all_states = {cur_state: np.zeros(summary.n_people) for cur_state in all_states}
traced_all_states_essential = {cur_state: np.zeros(summary.n_people) for cur_state in all_states}
traced_all_states_normal = {cur_state: np.zeros(summary.n_people) for cur_state in all_states}
traced_infectious_states = np.zeros(summary.n_people)
traced_infectious_states_essential = np.zeros(summary.n_people)
traced_infectious_states_normal = np.zeros(summary.n_people)
traced_noninfectious_states = np.zeros(summary.n_people)
traced_noninfectious_states_essential = np.zeros(summary.n_people)
traced_noninfectious_states_normal = np.zeros(summary.n_people)
sum_all_states = 0
sum_all_states_essential = 0
sum_all_states_normal = 0
traced_all_states_ratio = {cur_state: np.zeros(1) for cur_state in all_states}
traced_all_states_ratio_essential = {cur_state: np.zeros(1) for cur_state in all_states}
traced_all_states_ratio_normal = {cur_state: np.zeros(1) for cur_state in all_states}
traced_plot_states_ratio = {cur_state: np.zeros(1) for cur_state in plot_states}
traced_plot_states_ratio_essential = {cur_state: np.zeros(1) for cur_state in plot_states}
traced_plot_states_ratio_normal = {cur_state: np.zeros(1) for cur_state in plot_states}
for r in range(summary.random_repeats): # each repeat
for cur_measure in active_measures:
traced_times[cur_measure] += summary.is_traced[cur_measure][r]
for cur_state in all_states:
traced_all_states[cur_state] += summary.is_traced_state[cur_state][r]
for cur_measure in active_measures:
traced_times[cur_measure] /= summary.random_repeats
for cur_state in all_states:
# traced times for all indiv at all states
traced_all_states[cur_state] /= summary.random_repeats
# seperate essential and normal
traced_all_states_essential[cur_state] = np.multiply(traced_all_states[cur_state],summary.essential_workers[0])
traced_all_states_normal[cur_state] = np.multiply(traced_all_states[cur_state],1-summary.essential_workers[0])
# # sum_all_states += np.count_nonzero(avg_traced_all_states[cur_state])
# # traced_all_states_ratio[cur_state] = np.count_nonzero(avg_traced_all_states[cur_state])
# # sum_all_states_essential += np.count_nonzero(~np.isnan(avg_traced_all_states_essential[cur_state]))
# # sum_all_states_normal += np.count_nonzero(~np.isnan(avg_traced_all_states_normal[cur_state]))
# # traced_all_states_ratio_essential[cur_state] = np.count_nonzero(~np.isnan(avg_traced_all_states_essential[cur_state]))
# # traced_all_states_ratio_normal[cur_state] = np.count_nonzero(~np.isnan(avg_traced_all_states_normal[cur_state]))
sum_all_states += sum((traced_all_states[cur_state]))
traced_all_states_ratio[cur_state] = sum((traced_all_states[cur_state])) # to be discussed
sum_all_states_essential += sum((traced_all_states_essential[cur_state]))
sum_all_states_normal += sum((traced_all_states_normal[cur_state]))
traced_all_states_ratio_essential[cur_state] = sum((traced_all_states_essential[cur_state]))
traced_all_states_ratio_normal[cur_state] = sum((traced_all_states_normal[cur_state]))
for cur_state in all_states:
if sum_all_states != 0:
traced_all_states_ratio[cur_state] /= sum_all_states
traced_all_states_ratio_essential[cur_state] /= sum_all_states_essential
traced_all_states_ratio_normal[cur_state] /= sum_all_states_normal
else:
traced_all_states_ratio[cur_state] = 0
traced_all_states_ratio_essential[cur_state] = 0
traced_all_states_ratio_normal[cur_state] = 0
# for cur_state in infectious_states:
# traced_infectious_states += traced_all_states[cur_state]
# traced_infectious_states_essential = np.multiply(traced_infectious_states,summary.essential_workers[0])
# traced_infectious_states_normal = np.multiply(traced_infectious_states,1-summary.essential_workers[0])
# traced_infectious_states_essential[traced_infectious_states_essential==0] = 'nan'
# traced_infectious_states_normal[traced_infectious_states_normal==0] = 'nan'
for cur_state in noninfectious_states:
traced_noninfectious_states += traced_all_states[cur_state]
traced_noninfectious_states_essential = np.multiply(traced_noninfectious_states,summary.essential_workers[0])
traced_noninfectious_states_normal = np.multiply(traced_noninfectious_states,1-summary.essential_workers[0])
traced_noninfectious_states_essential[traced_noninfectious_states_essential==0] = 'nan'
traced_noninfectious_states_normal[traced_noninfectious_states_normal==0] = 'nan'
# Zihan:
traced_infectious_states = traced_times['CT'] - traced_noninfectious_states
traced_infectious_states_essential = np.multiply(traced_infectious_states,summary.essential_workers[0])
traced_infectious_states_normal = np.multiply(traced_infectious_states,1-summary.essential_workers[0])
traced_infectious_states_essential[traced_infectious_states_essential==0] = 'nan'
traced_infectious_states_normal[traced_infectious_states_normal==0] = 'nan'
## plot
axs1.plot(traced_times['CT'],traced_times['CT'],linestyle='--',color='black',alpha=0.1)
axs2.plot(traced_times['CT'],traced_times['CT'],linestyle='--',color='black',alpha=0.1)
axs3.plot(traced_times['CT'],traced_times['CT'],linestyle='--',color='black',alpha=0.1)
if plot_states == 'combine_infectious_states':
axs1.scatter(traced_times, traced_infectious_states_essential,
alpha=alphas[i], edgecolors=None, label=cur_state, color='tab:blue')
axs1.scatter(traced_times, traced_infectious_states_essential,
alpha=alphas[i], edgecolors=None, label=cur_state, color='tab:red')
axs2.scatter(traced_times, traced_infectious_states_essential,
alpha=alphas[i], edgecolors=None, label=cur_state, color='tab:red')
axs3.scatter(traced_times, traced_infectious_states_normal,
alpha=alphas[i], edgecolors=None, label=cur_state, color='tab:blue')
axs1.set_title('infectious traced freq for all indiv')
axs2.set_title('infectious traced freq for essential')
axs3.set_title('infectious traced freq for non essential')
elif plot_states == 'nonzero_states':
for i, cur_state in enumerate(all_states):
if sum(traced_all_states[cur_state]) != 0:
axs.scatter(traced_times, traced_all_states[cur_state],
alpha=alphas[i], edgecolors=None, label=cur_state)
axs.legend()
elif plot_states == 'seperate_infectious_noninfectious':
alll = np.nansum(traced_noninfectious_states) + np.nansum(traced_infectious_states)
axs1.scatter(traced_times['CT'], traced_noninfectious_states,
alpha=0.6, edgecolors=None, label='non-infectious: '
+str(round((np.nansum(traced_noninfectious_states)/alll)*100,2))+'\%', color='tab:blue')
axs1.scatter(traced_times['CT'], traced_infectious_states,
alpha=0.3, edgecolors=None, label='infectious: '
+str(round((np.nansum(traced_infectious_states)/alll)*100,2))+'\%', color='tab:red')
alll = np.nansum(traced_noninfectious_states_essential) + np.nansum(traced_infectious_states_essential)
axs2.scatter(traced_times['CT'], traced_noninfectious_states_essential,
alpha=0.6, edgecolors=None, label='non-infectious: '
+str(round((np.nansum(traced_noninfectious_states_essential)/alll)*100,2))+'\%', color='tab:blue')
axs2.scatter(traced_times['CT'], traced_infectious_states_essential,
alpha=0.3, edgecolors=None, label='infectious: '
+str(round((np.nansum(traced_infectious_states_essential)/alll)*100,2))+'\%', color='tab:red')
alll = np.nansum(traced_noninfectious_states_normal) + np.nansum(traced_infectious_states_normal)
axs3.scatter(traced_times['CT'], traced_noninfectious_states_normal,
alpha=0.6, edgecolors=None, label='non-infectious: '
+str(round((np.nansum(traced_noninfectious_states_normal)/alll)*100,2))+'\%', color='tab:blue')
axs3.scatter(traced_times['CT'], traced_infectious_states_normal,
alpha=0.3, edgecolors=None, label='infectious: '
+str(round((np.nansum(traced_infectious_states_normal)/alll)*100,2))+'\%', color='tab:red')
else:
# compute relative state ratio
sum_traced_plot_states_ratio = 0
sum_traced_plot_states_ratio_essential = 0
sum_traced_plot_states_ratio_normal = 0
for cur_state in plot_states:
sum_traced_plot_states_ratio += traced_all_states_ratio[cur_state]
traced_plot_states_ratio[cur_state] = traced_all_states_ratio[cur_state]
sum_traced_plot_states_ratio_essential += traced_all_states_ratio_essential[cur_state]
traced_plot_states_ratio_essential[cur_state] = traced_all_states_ratio_essential[cur_state]
sum_traced_plot_states_ratio_normal += traced_all_states_ratio_normal[cur_state]
traced_plot_states_ratio_normal[cur_state] = traced_all_states_ratio_normal[cur_state]
for cur_state in plot_states:
traced_all_states_essential[cur_state][traced_all_states_essential[cur_state]==0] = 'nan'
traced_all_states_normal[cur_state][traced_all_states_normal[cur_state]==0] = 'nan'
if sum_traced_plot_states_ratio != 0:
traced_plot_states_ratio[cur_state] /= sum_traced_plot_states_ratio
traced_plot_states_ratio_essential[cur_state] /= sum_traced_plot_states_ratio_essential
traced_plot_states_ratio_normal[cur_state] /= sum_traced_plot_states_ratio_normal
# plot
for i, cur_state in enumerate(plot_states):
axs1.scatter(traced_times, traced_all_states[cur_state],
alpha=alphas[i], edgecolors=None, label=cur_state+': '
+str(round(traced_plot_states_ratio[cur_state]*100,2))+'\%')
axs2.scatter(traced_times, traced_all_states_essential[cur_state],
alpha=alphas[i], edgecolors=None, label=cur_state+': '
+str(round(traced_plot_states_ratio_essential[cur_state]*100,2))+'\%')
axs3.scatter(traced_times, traced_all_states_normal[cur_state],
alpha=alphas[i], edgecolors=None, label=cur_state+': '
+str(round(traced_plot_states_ratio_normal[cur_state]*100,2))+'\%')
# axs1.set_title('Stay home by Posi measure vs by CT for ' + r"$\bf{" + 'all\ indiv' + "}$")
# axs2.set_title('Stay home by Posi measure vs by CT for ' + r"$\bf{" + 'essential' + "}$")
# axs3.set_title('Stay home by Posi measure vs by CT for ' + r"$\bf{" + 'non-essential' + "}$")
axs1.set_title('Infectious vs non-infectious stay home for ' + r"$\bf{" + 'all\ indiv' + "}$")
axs2.set_title('Infectious vs non-infectious stay home for ' + r"$\bf{" + 'essential' + "}$")
axs3.set_title('Infectious vs non-infectious stay home for ' + r"$\bf{" + 'non-essential' + "}$")
axs1.legend()
axs2.legend()
axs3.legend()
# axs1.set_xticks(range(0, int(max(traced_times['CT'])+1)))
# axs2.set_xticks(range(0, int(max(traced_times['CT'])+1)))
# axs3.set_xticks(range(0, int(max(traced_times['CT'])+1)))
axs1.set_xlabel('traced frequency, [num of times]',fontsize = 15)
axs2.set_xlabel('traced frequency, [num of times]',fontsize = 15)
axs3.set_xlabel('traced frequency, [num of times]',fontsize = 15)
axs1.set_ylabel('traced state frequency, [num of times]',fontsize = 15)
# axs2.set_ylabel('traced state frequency, [num of times]',fontsize = 15)
# axs3.set_ylabel('traced state frequency, [num of times]',fontsize = 15)
print(traced_times)
# print(avg_traced_infectious_states)
# print(traced_all_states_ratio)
plt.savefig('plots/tracing_isolate_sftest_2trace_stochastic_20pctall_isohouse_40rpts_005betas_trace_friends_only_colleagues_tracehouse_04compliance.png',dpi=300)
plt.show()
def computeAverageTraced(summary,cur_state,r,t):
cur_num_of_traced_indiv = 0
repeatr_trace_started_at = summary.trace_started_at[cur_state][r]
repeatr_trace_ended_at = summary.trace_ended_at[cur_state][r]
for i in range(summary.n_people): # each person
if repeatr_trace_started_at[i]: # if person i is traced
for cur_traced_time in range(len(repeatr_trace_started_at[i])):
if (repeatr_trace_started_at[i][cur_traced_time]<t) and (repeatr_trace_ended_at[i][cur_traced_time])>t: # if i is home at t
if (cur_traced_time ==0):
cur_num_of_traced_indiv += 1
if (cur_traced_time != 0) and (repeatr_trace_started_at[i][cur_traced_time] >
(repeatr_trace_started_at[i][cur_traced_time-1]+24.0*7)):
cur_num_of_traced_indiv += 1
return cur_num_of_traced_indiv
c=0
runstr = f'run{c}_'
TO_HOURS = 24.0
acc=500
summaries_SD_6 = load_summary('summaries_SD_5.pk')
# titles_SD_6_ = list(['Tracking compliance '+ str(int(p*100.0)) + ' \%' for p in p_compliance])
p_compliance = [0.0, 1.0]
all_states = ['susc', 'expo', 'ipre', 'isym', 'iasy', 'posi', 'nega', 'resi', 'dead', 'hosp']
infectious_states = ['ipre', 'isym', 'iasy', 'posi']
plot_states = infectious_states #'combine_infectious_states' #['susc', 'expo', 'ipre', 'isym', 'iasy', 'posi', 'resi']
alphas = np.linspace(0.6,0.3,num=len(plot_states))
plotter = Plotter()
for j, policy in enumerate(['basic','advanced']):
summaries_ = summaries_SD_6[policy]
# f,axs = plt.subplots(1,len(p_compliance),figsize = (13,5))
for c, summary in enumerate(summaries_): # each compliance rate
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
ts_ipre, means_ipre, stds_ipre = [], [], []
ts_isym, means_isym, stds_isym = [], [], []
ts_iasy, means_iasy, stds_iasy = [], [], []
ts_posi, means_posi, stds_posi = [], [], []
ts_CT, means_CT, stds_CT = [], [], []
cur_num_of_ipre_traced_indiv = np.zeros(summary.random_repeats)
cur_num_of_isym_traced_indiv = np.zeros(summary.random_repeats)
cur_num_of_iasy_traced_indiv = np.zeros(summary.random_repeats)
cur_num_of_posi_traced_indiv = np.zeros(summary.random_repeats)
for t in np.linspace(0.0, summary.max_time, num=acc, endpoint=True): # each time
for r in range(summary.random_repeats): # each repeat
cur_num_of_ipre_traced_indiv[r] = computeAverageTraced(summary,'ipre',r,t)
cur_num_of_isym_traced_indiv[r] = computeAverageTraced(summary,'isym',r,t)
cur_num_of_iasy_traced_indiv[r] = computeAverageTraced(summary,'iasy',r,t)
cur_num_of_posi_traced_indiv[r] = computeAverageTraced(summary,'posi',r,t)
# repeatr_ipre_trace_started_at = summary.trace_started_at['ipre'][r]
# repeatr_ipre_trace_ended_at = summary.trace_ended_at['ipre'][r]
# for i in range(summary.n_people): # each person
# if repeatr_ipre_trace_started_at[i]: # if person i is traced
# for cur_traced_time in range(len(repeatr_ipre_trace_started_at[i])):
# if (repeatr_ipre_trace_started_at[i][cur_traced_time]<t) and (repeatr_CT_trace_ended_at[i][cur_traced_time])>t: # if i is home at t
# if (cur_traced_time ==0):
# cur_num_of_ipre_traced_indiv[r] += 1
# if (cur_traced_time != 0) and (repeatr_ipre_trace_started_at[i][cur_traced_time] >
# (repeatr_ipre_trace_started_at[i][cur_traced_time-1]+24.0*7)):
# cur_num_of_ipre_traced_indiv[r] += 1
# cur_num_of_CT_traced_indiv[r] +=
# np.sum([(repeatr_CT_trace_started_at[i][j]<t) and (repeatr_CT_trace_ended_at[i][j])>t
# for j in range(len(repeatr_CT_trace_ended_at[i]))])
cur_num_of_CT_traced_indiv = cur_num_of_ipre_traced_indiv+cur_num_of_isym_traced_indiv+cur_num_of_iasy_traced_indiv
ts_CT.append(t/TO_HOURS)
means_CT.append(np.mean(cur_num_of_CT_traced_indiv))
stds_CT.append(np.std(cur_num_of_CT_traced_indiv))
ts_ipre.append(t/TO_HOURS)
means_ipre.append(np.mean(cur_num_of_ipre_traced_indiv))
stds_ipre.append(np.std(cur_num_of_ipre_traced_indiv))
ts_isym.append(t/TO_HOURS)
means_isym.append(np.mean(cur_num_of_isym_traced_indiv))
stds_isym.append(np.std(cur_num_of_isym_traced_indiv))
ts_iasy.append(t/TO_HOURS)
means_iasy.append(np.mean(cur_num_of_iasy_traced_indiv))
stds_iasy.append(np.std(cur_num_of_iasy_traced_indiv))
ts_posi.append(t/TO_HOURS)
means_posi.append(np.mean(cur_num_of_posi_traced_indiv))
stds_posi.append(np.std(cur_num_of_posi_traced_indiv))
# ax.errorbar(ts_CT, means_CT, yerr=stds_CT)
ax.plot(ts_CT,means_CT,label='CT')
ax.plot(ts_posi,means_posi,label='posi measure')
ax.set_xlabel('simulation time, [days]',fontsize = 15)
ax.set_ylabel('infectious population stay home',fontsize = 15)
ax.set_title('CT=1.0, '+ policy)
ax.legend()
plt.show()
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
# ax.errorbar(ts_CT, means_CT, yerr=stds_CT)
ax.plot(ts_CT,means_CT,label='CT')
ax.plot(ts_posi,means_posi,label='posi measure')
ax.set_xlabel('simulation time, [days]',fontsize = 15)
ax.set_ylabel('infectious population stay home',fontsize = 15)
ax.set_title('CT=1.0, advanced')
ax.legend()
plt.show()
'''
(0) 1: SocialDistancingForAllMeasure`
(1) 2: SocialDistancingForPositiveMeasure
(2) 3: SocialDistancingByAgeMeasure`
(3) 4: SocialDistancingForSmartTracing
(4) 5: SocialDistancingForKGroups`
(5) 6: UpperBoundCasesSocialDistancing`
(6) 'resi/dead'
(7) 'hosp'
(8) 'site_measures'
(9) not contained
(10) contained_infectious
'''
import matplotlib.pyplot as plt
rpts = 2
p_compliance = [0.0, 1.0]
measures_deployed = [0,1,3,6,7]
measures_deployed_str = ['Shutdown','Posi Measure',
'Age Measure','CT',
'K groups','Upper Bound','resi/dead','hosp']
# summaries_SD_6 = load_summary('summaries_SD_5_advanced.pk')
summaries_SD_6 = load_summary('summaries_r62.pk')
for j, policy in enumerate(['advanced']):
summaries_ = summaries_SD_6[policy]
f,axs = plt.subplots(1,2,figsize = (13,5))
f1,axs1 = plt.subplots(1,2,figsize = (13,5))
f2,axs2 = plt.subplots(1,2,figsize = (13,5))
for s, summary in enumerate(summaries_):
counts = np.zeros((11,))
num_contacts = 0
for r in range(rpts):
num_contacts += len(summary.mob[r])
for contact in summary.mob[r]:
if (not contact.data['i_contained']) and (not contact.data['j_contained'] ):
counts[9] += 1
else:
for i in range(6):
if (i in contact.data['i_contained_by']) or (i in contact.data['j_contained_by']):
counts[i] += 1
if ('resi/dead' in contact.data['i_contained_by']) or ('resi/dead' in contact.data['j_contained_by']):
counts[6] += 1
if ('hosp' in contact.data['i_contained_by']) or ('hosp' in contact.data['j_contained_by']):
counts[7] += 1
if ('site_measures' in contact.data['i_contained_by']) or ('site_measures' in contact.data['j_contained_by']):
counts[8] += 1
if contact.data['i_contained_infectious'] or contact.data['j_contained_infectious']:
counts[10] += 1
counts /= num_contacts
counts[10] = counts[10]/sum(counts[0:5])
measure_distribution = counts[0:8]/sum(counts[0:8])
print('infectious contained rate: ', counts[10])
axs[s].bar(range(1,len(measures_deployed)+1),counts[measures_deployed])
axs[s].set_title('CT '+str(p_compliance[s])+', '+ str(round((1-counts[9])*100,2))+'\% contained, '+str(round(counts[10]*100,2))+'\% are infectious')
axs[s].set_xlabel('contact status',fontsize = 20)
axs[s].set_ylabel('proportion in sampled contacts',fontsize = 20)
axs[s].set_xticks(range(1,len(measures_deployed)+1))
axs[s].set_xticklabels(measures_deployed)
axs[s].set_ylim(0,0.8)
# axs1[s].pie(measure_distribution,labels=measures_deployed_str)
text = axs1[s].pie(measure_distribution,labels=measures_deployed_str,autopct='%.2f')[1]
fixOverLappingText(text)
# for label, t in zip(measures_deployed_str, text[1]):
# x, y = t.get_position()
# angle = int(math.degrees(math.atan2(y, x)))
# ha = "left"
# va = "bottom"
# if angle > 90:
# angle -= 180
# if angle < 0:
# va = "top"
# if -45 <= angle <= 0:
# ha = "right"
# va = "bottom"
# plt.annotate(label, xy=(x,y), rotation=angle, ha=ha, va=va, size=8)
#plt.tight_layout()
#plt.savefig('plots/contact_details.png',dpi=300)
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/btian/deep-learning/blob/main/debiasing.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import IPython
IPython.display.YouTubeVideo('59bMh59JQDo')
%tensorflow_version 2.x
import tensorflow as tf
import functools
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
!pip install mitdeeplearning -qq
import mitdeeplearning as mdl
train_path = tf.keras.utils.get_file('train_face.h5', 'https://www.dropbox.com/s/hlz8atheyozp1yx/train_face.h5?dl=1')
loader = mdl.lab2.TrainingDatasetLoader(train_path)
n = loader.get_train_size()
print(n)
(images, labels) = loader.get_batch(100)
face_images = images[np.where(labels==1)[0]]
not_face_images = images[np.where(labels==0)[0]]
idx_face = 23
idx_not_face = 6
plt.figure(figsize=(5,5))
plt.subplot(1, 2, 1)
plt.imshow(face_images[idx_face])
plt.title('Face'); plt.grid(False)
plt.subplot(1, 2, 2)
plt.imshow(not_face_images[idx_not_face])
plt.title('Not Face'); plt.grid(False)
n_filters = 12
def make_standard_classifier(n_outputs=1):
Conv2D = functools.partial(tf.keras.layers.Conv2D, padding='same', activation='relu')
BatchNorm = tf.keras.layers.BatchNormalization
Flatten = tf.keras.layers.Flatten
Dense = functools.partial(tf.keras.layers.Dense, activation='relu')
model = tf.keras.Sequential([
Conv2D(filters=1*n_filters, kernel_size=5, strides=2),
BatchNorm(),
Conv2D(filters=2*n_filters, kernel_size=5, strides=2),
BatchNorm(),
Conv2D(filters=4*n_filters, kernel_size=3, strides=2),
BatchNorm(),
Conv2D(filters=6*n_filters, kernel_size=3, strides=2),
BatchNorm(),
Flatten(),
Dense(512),
Dense(n_outputs, activation=None),
])
return model
standard_classifier = make_standard_classifier()
bs = 32
epochs = 2
lr = 1e-3
optimizer = tf.keras.optimizers.Adam(lr)
loss_history = mdl.util.LossHistory(smoothing_factor=0.99)
plotter = mdl.util.PeriodicPlotter(sec=2, scale='semilogy')
if hasattr(tqdm, '_instances'): tqdm._instances.clear()
@tf.function
def standard_train_step(x, y):
with tf.GradientTape() as tape:
logits = standard_classifier(x)
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
grads = tape.gradient(loss, standard_classifier.trainable_variables)
optimizer.apply_gradients(zip(grads, standard_classifier.trainable_variables))
return loss
for epoch in range(epochs):
for idx in tqdm(range(loader.get_train_size() // bs)):
x, y = loader.get_batch(bs)
loss = standard_train_step(x, y)
loss_history.append(loss.numpy().mean())
plotter.plot(loss_history.get())
batch_x, batch_y = loader.get_batch(5000)
y_pred_standard = tf.round(tf.nn.sigmoid(standard_classifier.predict(batch_x)))
acc_standard = tf.reduce_mean(tf.cast(tf.equal(batch_y, y_pred_standard), tf.float32))
print(f'Standard CNN accuracy on training set: {acc_standard.numpy():.4f}')
test_faces = mdl.lab2.get_test_faces()
keys = ['Light Female', 'Light Male', 'Dark Female', 'Dark Male']
for group, key in zip(test_faces, keys):
plt.figure(figsize=(5,5))
plt.imshow(np.hstack(group))
plt.title(key, fontsize=15)
standard_classifier_logits = [standard_classifier(np.array(x, dtype=np.float32)) for x in test_faces]
standard_classifier_probs = tf.squeeze(tf.sigmoid(standard_classifier_logits))
xx = range(len(keys))
yy = standard_classifier_probs.numpy().mean(1)
plt.bar(xx, yy)
plt.xticks(xx, keys)
plt.ylim(max(0, yy.min() - yy.ptp() / 2.), yy.max() + yy.ptp() / 2.)
plt.title('Standard Classifier Predictions')
def vae_loss_function(x, x_recon, mu, logsigma, kl_weight=5e-4):
latent_loss = 0.5 * tf.reduce_sum(tf.exp(logsigma) + tf.square(mu) - 1.0 - logsigma, axis=1)
reconstruction_loss = tf.reduce_mean(tf.abs(x - x_recon), axis=(1,2,3))
vae_loss = kl_weight * latent_loss + reconstruction_loss
return vae_loss
### VAE Reparameterization ###
def sampling(z_mean, z_logsigma):
batch, latent_dim = z_mean.shape
epsilon = tf.random.normal(shape=(batch, latent_dim))
z = z_mean + tf.exp(0.5 * z_logsigma) * epsilon
return z
### Loss function for DB-VAE ###
"""Loss function for DB-VAE.
# Arguments
x: true input x
x_pred: reconstructed x
y: true label (face or not face)
y_logit: predicted labels
mu: mean of latent distribution (Q(z|X))
logsigma: log of standard deviation of latent distribution (Q(z|X))
# Returns
total_loss: DB-VAE total loss
classification_loss = DB-VAE classification loss
"""
def debiasing_loss_function(x, x_pred, y, y_logit, mu, logsigma):
vae_loss = vae_loss_function(x, x_pred, mu, logsigma)
classification_loss = tf.nn.sigmoid_cross_entropy_with_logits(y, y_logit)
# Use the training data labels to create variable face_indicator:
# indicator that reflects which training data are images of faces
face_indicator = tf.cast(tf.equal(y, 1), tf.float32)
total_loss = tf.reduce_mean(classification_loss + face_indicator * vae_loss)
return total_loss, classification_loss
n_filters = 12
latent_dim = 100
def make_face_decoder_network():
Conv2DTranspose = functools.partial(tf.keras.layers.Conv2DTranspose, padding='same', activation='relu')
BatchNorm = tf.keras.layers.BatchNormalization
Flatten = tf.keras.layers.Flatten
Dense = functools.partial(tf.keras.layers.Dense, activation='relu')
Reshape = tf.keras.layers.Reshape
decoder = tf.keras.Sequential([
Dense(units=4*4*6*n_filters),
Reshape(target_shape=(4, 4, 6*n_filters)),
BatchNorm(),
# Upscaling convolutions
#Conv2DTranspose(filters=6*n_filters, kernel_size=3, strides=2),
#BatchNorm(),
Conv2DTranspose(filters=4*n_filters, kernel_size=3, strides=2),
BatchNorm(),
Conv2DTranspose(filters=2*n_filters, kernel_size=3, strides=2),
BatchNorm(),
Conv2DTranspose(filters=1*n_filters, kernel_size=5, strides=2),
Conv2DTranspose(filters=3, kernel_size=5, strides=2),
])
return decoder
class DB_VAE(tf.keras.Model):
def __init__(self, latent_dim):
super().__init__(self)
self.latent_dim = latent_dim
num_encoder_dims = 2*self.latent_dim + 1
self.encoder = make_standard_classifier(num_encoder_dims)
self.decoder = make_face_decoder_network()
def encode(self, x):
encoder_output = self.encoder(x)
y_logit = tf.expand_dims(encoder_output[:, 0], -1)
z_mean = encoder_output[:, 1:self.latent_dim+1]
z_logsigma = encoder_output[:, self.latent_dim+1:]
return y_logit, z_mean, z_logsigma
def reparameterize(self, z_mean, z_logsigma):
z = sampling(z_mean, z_logsigma)
return z
def decode(self, z):
reconstruction = self.decoder(z)
return reconstruction
def call(self, x):
y_logit, z_mean, z_logsigma = self.encode(x)
z = self.reparameterize(z_mean, z_logsigma)
recon = self.decode(z)
return y_logit, z_mean, z_logsigma, recon
def predict(self, x):
y_logit, z_mean, z_logsigma = self.encode(x)
return y_logit
dbvae = DB_VAE(latent_dim)
def get_latent_mu(images, dbvae, batch_size=1024):
N = images.shape[0]
mu = np.zeros((N, latent_dim))
for start_ind in range(0, N, batch_size):
end_ind = min(start_ind+batch_size, N+1)
batch = (images[start_ind:end_ind]).astype(np.float32)/255.
_, batch_mu, _ = dbvae.encode(batch)
mu[start_ind:end_ind] = batch_mu
return mu
### Resampling algorithm for DB-VAE ###
def get_training_sample_probabilities(images, dbvae, bins=10, smoothing_fac=1e-3):
print('Recomputing the sampling probabilities')
mu = get_latent_mu(images, dbvae)
training_sample_p = np.zeros(mu.shape[0])
for i in range(latent_dim):
latent_distribution = mu[:,i]
hist_density, bin_edges = np.histogram(latent_distribution, density=True, bins=bins)
bin_edges[0] = -float('inf')
bin_edges[-1] = float('inf')
bin_idx = np.digitize(latent_distribution, bin_edges)
hist_smoothed_density = hist_density + smoothing_fac
hist_smoothed_density = hist_smoothed_density / np.sum(hist_smoothed_density)
p = 1.0 / hist_smoothed_density[bin_idx-1]
p = p / np.sum(p)
training_sample_p = np.maximum(p, training_sample_p)
training_sample_p /= np.sum(training_sample_p)
return training_sample_p
### Training DB-VAE ###
batch_size = 32
learning_rate = 5e-4
latent_dim = 100
num_epochs = 6
dbvae = DB_VAE(latent_dim)
optimizer = tf.keras.optimizers.Adam(learning_rate)
@tf.function
def debiasing_train_step(x, y):
with tf.GradientTape() as tape:
y_logit, z_mean, z_logsigma, x_recon = dbvae(x)
loss, class_loss = debiasing_loss_function(x, x_recon, y, y_logit, z_mean, z_logsigma)
grads = tape.gradient(loss, dbvae.trainable_variables)
optimizer.apply_gradients(zip(grads, dbvae.trainable_variables))
return loss
all_faces = loader.get_all_train_faces()
if hasattr(tqdm, '_instances'): tqdm._instances.clear()
for i in range(num_epochs):
IPython.display.clear_output(wait=True)
print('Starting epoch {}/{}'.format(i+1, num_epochs))
p_faces = get_training_sample_probabilities(all_faces, dbvae)
for j in tqdm(range(loader.get_train_size() // batch_size)):
(x, y) = loader.get_batch(batch_size, p_pos=p_faces)
loss = debiasing_train_step(x, y)
# Plot progress every 500 steps
if j % 500 == 0:
mdl.util.plot_sample(x, y, dbvae)
dbvae_logits = [dbvae.predict(np.array(x, dtype=np.float32)) for x in test_faces]
dbvae_probs = tf.squeeze(tf.sigmoid(dbvae_logits))
xx = np.arange(len(keys))
plt.bar(xx, standard_classifier_probs.numpy().mean(1), width=0.2, label='Standard CNN')
plt.bar(xx+0.2, dbvae_probs.numpy().mean(1), width=0.2, label='DB-VAE')
plt.xticks(xx, keys)
plt.title('Network predictions on test dataset')
plt.ylabel('Probability'); plt.legend(bbox_to_anchor=(1.04,1), loc='upper left');
dbvae.summary()
dbvae.encoder.summary()
dbvae.decoder.summary()
```
| github_jupyter |
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/io/tutorials/genome"><img src="https://www.tensorflow.org/images/tf_logo_32px.png"> TensorFlow.orgで表示</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/io/tutorials/genome.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png"> Google Colab で実行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/io/tutorials/genome.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">View source on GitHub</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/io/tutorials/genome.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード/a0}</a></td>
</table>
## 概要
このチュートリアルでは、一般的に使用されるゲノミクス IO 機能を提供する<code>tfio.genome</code>パッケージについて解説します。これは、いくつかのゲノミクスファイル形式を読み取り、データを準備するための一般的な演算を提供します (例: One-Hot エンコーディングまたは Phred クオリティスコアを確率に解析します)。
このパッケージは、[Google Nucleus](https://github.com/google/nucleus) ライブラリを使用して、主な機能の一部を提供します。
## セットアップ
```
try:
%tensorflow_version 2.x
except Exception:
pass
!pip install tensorflow-io
import tensorflow_io as tfio
import tensorflow as tf
```
## FASTQ データ
FASTQ は、基本的な品質情報に加えて両方の配列情報を保存する一般的なゲノミクスファイル形式です。
まず、サンプルの`fastq`ファイルをダウンロードします。
```
# Download some sample data:
!curl -OL https://raw.githubusercontent.com/tensorflow/io/master/tests/test_genome/test.fastq
```
### FASTQ データの読み込み
`tfio.genome.read_fastq`を使用してこのファイルを読みこみます (`tf.data` API は近日中にリリースされる予定です)。
```
fastq_data = tfio.genome.read_fastq(filename="test.fastq")
print(fastq_data.sequences)
print(fastq_data.raw_quality)
```
ご覧のとおり、返された`fastq_data`には fastq ファイル内のすべてのシーケンスの文字列テンソル (それぞれ異なるサイズにすることが可能) である`fastq_data.sequences`、および、シーケンスで読み取られた各塩基の品質に関する Phred エンコードされた品質情報を含む`fastq_data.raw_quality`が含まれています。
### 品質
関心がある場合は、ヘルパーオペレーションを使用して、この品質情報を確率に変換できます。
```
quality = tfio.genome.phred_sequences_to_probability(fastq_data.raw_quality)
print(quality.shape)
print(quality.row_lengths().numpy())
print(quality)
```
### One-Hot エンコーディング
また、One-Hot エンコーダ―を使用してゲノムシーケンスデータ (`A` `T` `C` `G`の塩基配列で構成される) をエンコードすることもできます。これに役立つ演算が組み込まれています。
```
print(tfio.genome.sequences_to_onehot.__doc__)
print(tfio.genome.sequences_to_onehot.__doc__)
```
| github_jupyter |
```
import netCDF4
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import numpy as np
import sys
import math
import os
import glob
import pandas
import re
from scipy.interpolate import griddata
%matplotlib inline
plt.rcParams["figure.figsize"] = (10,6)
plt.rcParams.update({'font.size': 20})
data_path = "/path/netcdf/"
fname = "20200515.ssp585.TEST_SSP585_DEBUG.ne30_oECv3_ICG.grizzly.cam.h2.2015-01-01-00000.nc"
def print_data_info(data):
# Print some data info
###############################
print (data.variables.keys())
print (data)
for d in data.dimensions.items():
print (d)
## http://schubert.atmos.colostate.edu/~cslocum/netcdf_example.html
print (data.data_model)
nc_attrs = data.ncattrs()
for nc_attr in nc_attrs:
print ('\t%s:' % nc_attr, repr(data.getncattr(nc_attr)))
print ("NetCDF dimension information:")
nc_dims = [dim for dim in data.dimensions] # list of nc dimensions
for dim in nc_dims:
print ("\tName:", dim)
print ("\t\tsize:", len(data.dimensions[dim]))
nc_vars = [var for var in data.variables] # list of nc variables
print ("NetCDF variable information:")
for var in nc_vars:
if var not in nc_dims:
print ('\tName:', var)
print ("\t\tdimensions:", data.variables[var].dimensions)
print ("\t\tsize:", data.variables[var].size)
def load_data(filename):
data = Dataset(filename)
return data
## Load data
data = load_data(data_path+fname)
#print_data_info(data)
tsteps_per_month = len(data.variables['time'][:])
var_name = 'T001'
tstep = 100
lon_array = np.asarray(data.variables['lon'][:])
lat_array = np.asarray(data.variables['lat'][:])
uvel = np.asarray(data.variables[var_name][:])
uvel = np.asarray(uvel[tstep,:])
print (np.min(lon_array),np.max(lon_array))
print (np.min(lat_array),np.max(lat_array))
import matplotlib.colors as matcolors
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.patches import Rectangle
top = plt.get_cmap('twilight_shifted', 256)
top_cmp = matcolors.ListedColormap(top(np.linspace(0.55, 1, 256)))
bottom = cm.get_cmap('twilight_shifted', 256)
bottom_cmp = matcolors.ListedColormap(bottom(np.linspace(0.05,0.45,256)))
white = np.array(([256/256, 256/256, 256/256, 1]))
newcolors = np.vstack((bottom_cmp(np.linspace(0, 1, 256)),
top_cmp(np.linspace(0, 1, 256))))
newcmp = matcolors.ListedColormap(newcolors, name='OrangeBlue')
newcmp2 = matcolors.ListedColormap(newcmp(np.linspace(0.0, 0.64, 512)))
## Render using python grid data
lon_dim = 360
lat_dim = 180
points = np.column_stack((lon_array, lat_array))
## create 2D regular grid
grid_x, grid_y = np.mgrid[0:360:360j, -89:89:180j] ## grid for whole world
cur_loc = np.zeros((lat_dim*lon_dim,2),dtype='float')
ind = 0
for j in range(lat_dim):
for i in range(lon_dim):
cur_loc[ind,:] = np.array([grid_x[i][j],grid_y[i][j]])
ind = ind+1
print(len(points))
grid_z0 = griddata(points, uvel, cur_loc, method='linear')
grid_z0_2d = grid_z0.reshape((lat_dim,lon_dim))
plt.imshow(grid_z0_2d, origin='lower',cmap=plt.get_cmap(newcmp2))
plt.colorbar(orientation="vertical", shrink=0.74, label="Kelvin")
plt.xlabel("Longitude")
plt.ylabel("Latitude")
plt.yticks(np.arange(0, 190, 90))
plt.savefig('out.png')
```
| github_jupyter |
# Formulas: Fitting models using R-style formulas
Since version 0.5.0, ``statsmodels`` allows users to fit statistical models using R-style formulas. Internally, ``statsmodels`` uses the [patsy](http://patsy.readthedocs.org/) package to convert formulas and data to the matrices that are used in model fitting. The formula framework is quite powerful; this tutorial only scratches the surface. A full description of the formula language can be found in the ``patsy`` docs:
* [Patsy formula language description](http://patsy.readthedocs.org/)
## Loading modules and functions
```
import numpy as np # noqa:F401 needed in namespace for patsy
import statsmodels.api as sm
```
#### Import convention
You can import explicitly from statsmodels.formula.api
```
from statsmodels.formula.api import ols
```
Alternatively, you can just use the `formula` namespace of the main `statsmodels.api`.
```
sm.formula.ols
```
Or you can use the following convention
```
import statsmodels.formula.api as smf
```
These names are just a convenient way to get access to each model's `from_formula` classmethod. See, for instance
```
sm.OLS.from_formula
```
All of the lower case models accept ``formula`` and ``data`` arguments, whereas upper case ones take ``endog`` and ``exog`` design matrices. ``formula`` accepts a string which describes the model in terms of a ``patsy`` formula. ``data`` takes a [pandas](https://pandas.pydata.org/) data frame or any other data structure that defines a ``__getitem__`` for variable names like a structured array or a dictionary of variables.
``dir(sm.formula)`` will print a list of available models.
Formula-compatible models have the following generic call signature: ``(formula, data, subset=None, *args, **kwargs)``
## OLS regression using formulas
To begin, we fit the linear model described on the [Getting Started](./regression_diagnostics.html) page. Download the data, subset columns, and list-wise delete to remove missing observations:
```
dta = sm.datasets.get_rdataset("Guerry", "HistData", cache=True)
df = dta.data[['Lottery', 'Literacy', 'Wealth', 'Region']].dropna()
df.head()
```
Fit the model:
```
mod = ols(formula='Lottery ~ Literacy + Wealth + Region', data=df)
res = mod.fit()
print(res.summary())
```
## Categorical variables
Looking at the summary printed above, notice that ``patsy`` determined that elements of *Region* were text strings, so it treated *Region* as a categorical variable. `patsy`'s default is also to include an intercept, so we automatically dropped one of the *Region* categories.
If *Region* had been an integer variable that we wanted to treat explicitly as categorical, we could have done so by using the ``C()`` operator:
```
res = ols(formula='Lottery ~ Literacy + Wealth + C(Region)', data=df).fit()
print(res.params)
```
Patsy's mode advanced features for categorical variables are discussed in: [Patsy: Contrast Coding Systems for categorical variables](./contrasts.html)
## Operators
We have already seen that "~" separates the left-hand side of the model from the right-hand side, and that "+" adds new columns to the design matrix.
## Removing variables
The "-" sign can be used to remove columns/variables. For instance, we can remove the intercept from a model by:
```
res = ols(formula='Lottery ~ Literacy + Wealth + C(Region) -1 ', data=df).fit()
print(res.params)
```
## Multiplicative interactions
":" adds a new column to the design matrix with the interaction of the other two columns. "*" will also include the individual columns that were multiplied together:
```
res1 = ols(formula='Lottery ~ Literacy : Wealth - 1', data=df).fit()
res2 = ols(formula='Lottery ~ Literacy * Wealth - 1', data=df).fit()
print(res1.params, '\n')
print(res2.params)
```
Many other things are possible with operators. Please consult the [patsy docs](https://patsy.readthedocs.org/en/latest/formulas.html) to learn more.
## Functions
You can apply vectorized functions to the variables in your model:
```
res = smf.ols(formula='Lottery ~ np.log(Literacy)', data=df).fit()
print(res.params)
```
Define a custom function:
```
def log_plus_1(x):
return np.log(x) + 1.
res = smf.ols(formula='Lottery ~ log_plus_1(Literacy)', data=df).fit()
print(res.params)
```
Any function that is in the calling namespace is available to the formula.
## Using formulas with models that do not (yet) support them
Even if a given `statsmodels` function does not support formulas, you can still use `patsy`'s formula language to produce design matrices. Those matrices
can then be fed to the fitting function as `endog` and `exog` arguments.
To generate ``numpy`` arrays:
```
import patsy
f = 'Lottery ~ Literacy * Wealth'
y,X = patsy.dmatrices(f, df, return_type='matrix')
print(y[:5])
print(X[:5])
```
To generate pandas data frames:
```
f = 'Lottery ~ Literacy * Wealth'
y,X = patsy.dmatrices(f, df, return_type='dataframe')
print(y[:5])
print(X[:5])
print(sm.OLS(y, X).fit().summary())
```
| github_jupyter |
```
! pip install fastcore --upgrade -qq
! pip install fastai --upgrade -qq
from fastai.vision.all import *
import fastai
from sys import exit
from operator import itemgetter
import re
import torch
from torch.nn import functional as F
import numpy as np
from time import process_time_ns, process_time
import gc
def scale(val, spec="#0.4G"):
PREFIXES = np.array([c for c in u"yzafpnµm kMGTPEZY"])
exp = np.int8(np.log10(np.abs(val)) // 3 * 3 * np.sign(val))
val /= 10.**exp
prefix = PREFIXES[exp//3 + len(PREFIXES)//2]
return f"{val:{spec}}{prefix}"
def display_times(times):
return f"{scale(times.mean())}s ± {scale(times.std())}s, {scale(times.min())}s, {scale(times.max())}s"
def profile_cpu(func, inp, n_repeat=100, warmup=10):
fwd_times,bwd_times = [],[]
for i in range(n_repeat + warmup):
start = process_time()
res = func(inp)
end = process_time()
if i >= warmup: fwd_times.append(end-start)
inp = inp.clone().requires_grad_()
y = func(inp)
l = y.mean()
start = process_time()
_ = torch.autograd.grad(l, inp)
end = process_time()
if i >= warmup: bwd_times.append(end-start)
return (np.array(fwd_times), # Elapsed time is in seconds
np.array(bwd_times))
def profile_cuda(func, inp, n_repeat=100, warmup=10):
fwd_times,bwd_times = [],[]
for i in range(n_repeat + warmup):
start,end = (torch.cuda.Event(enable_timing=True) for _ in range(2))
start.record()
res = func(inp)
end.record()
torch.cuda.synchronize()
if i >= warmup: fwd_times.append(start.elapsed_time(end))
start,end = (torch.cuda.Event(enable_timing=True) for _ in range(2))
inp = inp.clone().requires_grad_()
y = func(inp)
l = y.mean()
start.record()
_ = torch.autograd.grad(l, inp)
end.record()
torch.cuda.synchronize()
if i >= warmup: bwd_times.append(start.elapsed_time(end))
return (np.array(fwd_times)/1000, # Elapsed time is in ms
np.array(bwd_times)/1000)
mish_pt = lambda x: x.mul(torch.tanh(F.softplus(x)))
def profile(device='cuda', n_repeat=100, warmup=10, size='(16,10,256,256)', baseline=True, types='all'):
if types == 'all':
dtypes = [torch.float16, torch.bfloat16, torch.float32, torch.float64]
else:
if not hasattr(torch, types): exit("Invalid data type, expected torch type or 'all', got {types}")
dtypes = [getattr(torch, types)]
dev = torch.device(type=device)
sz_str = size.replace(' ','')
if not re.match(r"[\(\[]\d+(,\d+)*[\)\]]", sz_str):
exit("Badly formatted size, should be a list or tuple such as \"(1,2,3)\".")
sz = list(map(int, sz_str[1:-1].split(',')))
print(f"Profiling over {n_repeat} runs after {warmup} warmup runs.")
for dtype in dtypes:
if len(dtypes) > 1:
print(f"Testing on {dtype}:")
ind = ' '
else: ind = ''
inp = torch.randn(*sz, dtype=dtype, device=dev)
timings = []
funcs = {}
funcs.update(relu = torch.nn.functional.relu,
leaky_relu = torch.nn.functional.leaky_relu,
softplus = torch.nn.functional.softplus,
silu_jit = fastai.layers.swish,
silu_native = torch.nn.functional.silu,
mish_naive = mish_pt,
mish_jit = fastai.layers.mish,
mish_native = torch.nn.functional.mish)
if device=='cuda': funcs['mish_cuda'] = MishCudaFunction.apply
max_name = max(map(len, funcs.keys())) + 6
for (name,func) in funcs.items():
if device=='cuda':
if (name=='mish_cuda') and (dtype==torch.bfloat16):
pass
else:
fwd_times,bwd_times = profile_cuda(func, inp, n_repeat, warmup)
torch.cuda.empty_cache()
if device=='cpu':
fwd_times,bwd_times = profile_cpu(func, inp, n_repeat, warmup)
gc.collect()
print(ind+(name+'_fwd:').ljust(max_name) + display_times(fwd_times))
print(ind+(name+'_bwd:').ljust(max_name) + display_times(bwd_times))
```
# Haswell Benchmark
```
!cat /proc/cpuinfo
profile('cpu', types='float32')
profile('cpu', size='(64,10,256,256)', types='float32')
```
# Broadwell Benchmark
```
!cat /proc/cpuinfo
profile('cpu', types='float32')
profile('cpu', size='(64,10,256,256)', types='float32')
```
# Skylake Benchmark
```
!cat /proc/cpuinfo
profile('cpu', types='float32')
profile('cpu', size='(64,10,256,256)', types='float32')
```
| github_jupyter |
```
%%html
<style> table {float:left} </style>
!pip install torch tqdm lazyme nltk gensim
!python -m nltk.downloader punkt
import numpy as np
from tqdm import tqdm
import pandas as pd
from gensim.corpora import Dictionary
import torch
from torch import nn, optim, tensor, autograd
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
try: # Use the default NLTK tokenizer.
from nltk import word_tokenize, sent_tokenize
# Testing whether it works.
# Sometimes it doesn't work on some machines because of setup issues.
word_tokenize(sent_tokenize("This is a foobar sentence. Yes it is.")[0])
except: # Use a naive sentence tokenizer and toktok.
import re
from nltk.tokenize import ToktokTokenizer
# See https://stackoverflow.com/a/25736515/610569
sent_tokenize = lambda x: re.split(r'(?<=[^A-Z].[.?]) +(?=[A-Z])', x)
# Use the toktok tokenizer that requires no dependencies.
toktok = ToktokTokenizer()
word_tokenize = word_tokenize = toktok.tokenize
```
# Classifying Toxic Comments
Lets apply what we learnt in a realistic task and **fight cyber-abuse with NLP**!
From https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/
> *The threat of abuse and harassment online means that many people stop <br>*
> *expressing themselves and give up on seeking different opinions. <br>*
> *Platforms struggle to effectively facilitate conversations, leading many <br>*
> *communities to limit or completely shut down user comments.*
The goal of the task is to build a model to detect different types of of toxicity:
- toxic
- severe toxic
- threats
- obscenity
- insults
- identity-based hate
In this part, you'll be munging the data as how I would be doing it at work.
Your task is to train a feed-forward network on the toxic comments given the skills we have accomplished thus far.
## Digging into the data...
If you're using linux/Mac you can use these bang commands in the notebook:
```
!pip3 install kaggle
!mkdir -p /content/.kaggle/
!echo '{"username":"natgillin","key":"54ae95ab760b52c3307ed4645c6c9b5d"}' > /content/.kaggle/kaggle.json
!chmod 600 /content/.kaggle/kaggle.json
!kaggle competitions download -c jigsaw-toxic-comment-classification-challenge
!unzip /content/.kaggle/competitions/jigsaw-toxic-comment-classification-challenge/*
```
Otherwise, download the data from https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/
```
df_train = pd.read_csv('D:/projects/tsundoku-master/data/toxic/train.csv')
df_train.head()
df_train[df_train['threat'] == 1]['comment_text']
df_train.iloc[3712]['comment_text']
df_train['comment_text_tokenzied'] = df_train['comment_text'].apply(word_tokenize)
# Just in case your Jupyter kernel dies, save the tokenized text =)
# To save your tokenized text you can do this:
import pickle
with open('train_tokenized_text.pkl', 'wb') as fout:
pickle.dump(df_train['comment_text_tokenzied'], fout)
# To load it back:
import pickle
with open('train_tokenized_text.pkl', 'rb') as fin:
df_train['comment_text_tokenzied'] = pickle.load(fin)
```
# How to get a one-hot?
There are many variants of how to get your one-hot embeddings from the individual columns.
This is one way:
```
label_column_names = "toxic severe_toxic obscene threat insult identity_hate".split()
df_train[label_column_names].values
torch.tensor(df_train[label_column_names].values).float()
# Convert one-hot to indices of the column.
print(np.argmax(df_train[label_column_names].values, axis=1))
class ToxicDataset(Dataset):
def __init__(self, texts, labels):
self.texts = texts
self.vocab = Dictionary(texts)
special_tokens = {'<pad>': 0, '<unk>':1}
self.vocab = Dictionary(texts)
self.vocab.patch_with_special_tokens(special_tokens)
self.vocab_size = len(self.vocab)
# Vectorize labels
self.labels = torch.tensor(labels)
# Keep track of how many data points.
self._len = len(texts)
# Find the longest text in the data.
self.max_len = max(len(txt) for txt in texts)
self.num_labels = len(labels[0])
def __getitem__(self, index):
vectorized_sent = self.vectorize(self.texts[index])
# To pad the sentence:
# Pad left = 0; Pad right = max_len - len of sent.
pad_dim = (0, self.max_len - len(vectorized_sent))
vectorized_sent = F.pad(vectorized_sent, pad_dim, 'constant')
return {'x':vectorized_sent,
'y':self.labels[index],
'x_len':len(vectorized_sent)}
def __len__(self):
return self._len
def vectorize(self, tokens):
"""
:param tokens: Tokens that should be vectorized.
:type tokens: list(str)
"""
# See https://radimrehurek.com/gensim/corpora/dictionary.html#gensim.corpora.dictionary.Dictionary.doc2idx
# Lets just cast list of indices into torch tensors directly =)
return torch.tensor(self.vocab.doc2idx(tokens))
def unvectorize(self, indices):
"""
:param indices: Converts the indices back to tokens.
:type tokens: list(int)
"""
return [self.vocab[i] for i in indices]
label_column_names = "toxic severe_toxic obscene threat insult identity_hate".split()
toxic_data = ToxicDataset(df_train['comment_text_tokenzied'],
df_train[label_column_names].values)
toxic_data[123]
batch_size = 20
dataloader = DataLoader(dataset=toxic_data,
batch_size=batch_size, shuffle=True)
class FFNet(nn.Module):
def __init__(self, max_len, num_labels, vocab_size, embedding_size, hidden_dim):
super(FFNet, self).__init__()
self.embeddings = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=embedding_size,
padding_idx=0)
# The no. of inputs to the linear layer is the
# no. of tokens in each input * embedding_size
self.linear1 = nn.Linear(embedding_size*max_len, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, num_labels)
def forward(self, inputs):
# We want to flatten the inputs so that we get the matrix of shape.
# batch_size x no. of tokens in each input * embedding_size
batch_size, max_len = inputs.shape
embedded = self.embeddings(inputs).view(batch_size, -1)
hid = F.relu(self.linear1(embedded))
out = self.linear2(hid)
return F.sigmoid(out)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
embedding_size = 100
learning_rate = 0.003
hidden_size = 100
criterion = nn.BCELoss()
# Hint: the CBOW model object you've created.
model = FFNet(toxic_data.max_len,
len(label_column_names),
toxic_data.vocab_size,
embedding_size=embedding_size,
hidden_dim=hidden_size).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
#model = nn.DataParallel(model)
losses = []
num_epochs = 50
for _e in range(num_epochs):
epoch_loss = []
for batch in tqdm(dataloader):
x = batch['x'].to(device)
y = batch['y'].to(device)
# Zero gradient.
optimizer.zero_grad()
# Feed forward.
predictions = model(x)
loss = criterion(predictions, y.float())
loss.backward()
optimizer.step()
epoch_loss.append(float(loss))
print(sum(epoch_loss)/len(epoch_loss))
losses.append(sum(epoch_loss)/len(epoch_loss))
def predict(text):
# Vectorize and Pad.
vectorized_sent = toxic_data.vectorize(word_tokenize(text))
pad_dim = (0, toxic_data.max_len - len(vectorized_sent))
vectorized_sent = F.pad(vectorized_sent, pad_dim, 'constant')
# Forward Propagation.
# Unsqueeze because model is expecting `batch_size` x `sequence_len` shape.
outputs = model(vectorized_sent.unsqueeze(0).to(device)).squeeze()
# To get the boolean output, we check if outputs are > 0.5
return [int(l > 0.5) for l in outputs]
# What happens if you use torch.max instead? =)
##return label_column_names[int(torch.max(outputs, dim=1).indices)]
text = "This is a nice message."
print(label_column_names)
predict(text)
```
| github_jupyter |
# Lesson 4 Practice: Pandas Part 2
Use this notebook to follow along with the lesson in the corresponding lesson notebook: [L04-Pandas_Part2-Lesson.ipynb](./L04-Pandas_Part2-Lesson.ipynb).
## Instructions
Follow along with the teaching material in the lesson. Throughout the tutorial sections labeled as "Tasks" are interspersed and indicated with the icon: . You should follow the instructions provided in these sections by performing them in the practice notebook. When the tutorial is completed you can turn in the final practice notebook. For each task, use the cell below it to write and test your code. You may add additional cells for any task as needed or desired.
## Task 1a: Setup
- import pandas
- re-create the `df` data frame
- re-create the `iris_df` data frame
```
import pandas as pd
import numpy as np
df = pd.DataFrame(
{'alpha': [0, 1, 2, 3, 4],
'beta': ['a', 'b', 'c', 'd', 'e']})
df
iris_df = pd.read_csv('data/iris.csv')
iris_df
```
## Task 2a: Inserting Columns
+ Create a copy of the `df` dataframe.
+ Add a new column named "delta" to the copy that consists of random numbers.
```
df['delta'] = np.random.random([5])
df
```
## Task 3a: Missing Data
+ Create two new copies of the `df` dataframe:
+ Add a new column to both that has missing values.
+ In one copy, replace missing values with a value of your choice.
+ In the other copy, drop rows with `NaN` values.
+ Print both arrays to confirm.
```
df['gamma'] = pd.Series([2,5,7, np.nan, 8])
df
a = df.fillna(100)
a
df['theta'] = pd.Series([1,6,9, np.nan, 8])
df
b = df.dropna()
b
```
## Task 4a: Operations
<span style="float:right; margin-left:10px; clear:both;"></span>
View the [Computational tools](https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html) and [statistical methods](https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#method-summary) documentation.
Using the list of operational functions choose five functions to use with the iris data frame.
```
iris_df.mean()
iris_df.mean(1)
iris_df.min(1)
iris_df.std(1)
iris_df.var(1)
iris_df.count(1)
```
## Task 4b: Apply
Practice using `apply` on either the `df` or `iris_df` data frames using any two functions of your choice other than `print`, `type`, and `np.sum`.
```
help(df.apply)
df.apply(np.sum)
iris_df.apply(np.sum)
```
## Task 4c. Occurances
Ientify the number of occurances for each species (virginica, versicolor, setosa) in the `iris_df` object. *Hint*: the `value_counts` function only works on a `pd.Series` object, not on the full data frame..
```
pd.value_counts(iris_df['species'])
```
## Task 5a: String Methods
+ Create a list of five strings that represent dates in the form YYYY-MM-DD (e.g. 2020-02-20 for Feb 20th, 2020).
+ Add this list of dates as a new column in the `df` dataframe.
+ Now split the date into 3 new columns with one column representing the year, another the month and another they day.
+ Combine the values from columns `alpha` and `beta` into a new column where the values are spearated with a colon.
```
list = pd.date_range('20210301', periods=5)
list
df['date'] = list
df
df[['year', 'month', 'day']] = df['date'].astype(str).str.split('-',expand=True)
df
df['combined'] = df['alpha'].astype(str) + ':' + df['beta']
df
```
## Task 6a: Concatenation by Rows
+ Create the following dataframe
```Python
df1 = pd.DataFrame(
{'alpha': [0, 1, 2, 3, 4],
'beta': ['a', 'b', 'c', 'd', 'e']}, index = ['I1', 'I2' ,'I3', 'I4', 'I5'])
```
+ Create a new dataframe named `df2` with column names "delta" and "gamma" that contins 5 rows with some index names that overlap with the `df1` dataframe and some that do not.
+ Concatenate the two dataframes by rows and print the result.
+ You should see the two have combined one after the other, but there should also be missing values added.
+ Explain why there are missing values.
```
df1 = pd.DataFrame(
{'alpha': [0, 1, 2, 3, 4],
'beta': ['a', 'b', 'c', 'd', 'e']}, index = ['I1', 'I2' ,'I3', 'I4', 'I5'])
df1
df2 = pd.DataFrame(
{'delta': [0, 3, 5, 7, 9],
'gamma': ['e', 'i', 'a', 's', 'p']}, index = ['I4', 'I5', 'I6', 'I7' ,'I8',])
df2
df3 = pd.concat([df1, df2], axis = 0)
df3 # missing values are there because all the labels are not defined in two data frames
```
## Task 6b: Concatenation by Columns
Using the same dataframes, df1 and df2, from Task 6a practice:
+ Concatenate the two by columns
+ Add a "delta" column to `df1` and concatenate by columns such that there are 5 columns in the merged dataframe.
+ Respond in writing to this question (add a new 'raw' cell to contain your answer). What will happen if using you had performed an inner join while concatenating?
+ Try the concatenation with the inner join to see if you are correct.
```
df4 = pd.concat([df1, df2], axis = 1)
df4
df1['delta'] = ['0', '3', '5', '7', '9']
df1
df5 = pd.concat([df1, df2], axis = 1)
df5
df5 = pd.concat([df1, df2], axis = 1, join = 'inner')
df5
```
#### Task 6c: Concat and append data frames
<span style="float:right; margin-left:10px; clear:both;"></span>
+ Create a new 5x5 dataframe full of random numbers.
+ Create a new 5x10 dataframe full of 1's.
+ Append one to the other and print it.
+ Append a single Series of zeros to the end of the appended dataframe.
```
a = pd.DataFrame(np.random.random([5,5]))
a
b = pd.DataFrame(np.ones([5,10]))
b
c = a.append(b, ignore_index = True)
c
d = pd.Series(np.random.random(10))
d
c.append(d, ignore_index = True)
```
## Task 6d: Grouping
Demonstrate a `groupby`.
+ Create a new column with the label "region" in the iris data frame. This column will indicates geographic regions of the US where measurments were taken. Values should include: 'Southeast', 'Northeast', 'Midwest', 'Southwest', 'Northwest'. Use these randomly.
+ Use `groupby` to get a new data frame of means for each species in each region.
+ Add a `dev_stage` column by randomly selecting from the values "early" and "late".
+ Use `groupby` to get a new data frame of means for each species,in each region and each development stage.
+ Use the `count` function (just like you used the `mean` function) to identify how many rows in the table belong to each combination of species + region + developmental stage.
```
iris_df['region'] = np.random.choice(['Southeast', 'Northeast', 'Midwest', 'Southwest', 'Northwest'])
iris_df
groups = iris_df.groupby('region')
groups.mean()
iris_df['region'] = np.random.choice(['Southeast', 'Northeast', 'Midwest', 'Southwest', 'Northwest'])
iris_df
groups = iris_df.groupby('region')
groups.mean()
iris_df['region'] = np.random.choice(['Southeast', 'Northeast', 'Midwest', 'Southwest', 'Northwest'])
iris_df
groups = iris_df.groupby('region')
groups.mean()
iris_df['region'] = np.random.choice(['Southeast', 'Northeast', 'Midwest', 'Southwest', 'Northwest'])
iris_df
groups = iris_df.groupby('region')
groups.mean()
iris_df['region'] = np.random.choice(['Southeast', 'Northeast', 'Midwest', 'Southwest', 'Northwest'])
iris_df
groups = iris_df.groupby('region')
groups.mean()
iris_df['dev_stage'] = np.random.choice(['early', 'late'], iris_df.shape[0])
iris_df.head()
groups = iris_df.groupby(['species', 'region', 'dev_stage'])
groups.mean()
groups.count()
```
| github_jupyter |
### 基于数据集多重抽样的分类器
**元算法(meta-algorithm)** 是对其他算法进行组合的一种方式。Adaboosting算法是最流行的元算法。
将不同的分类器组合起来,这种组合结果被称为**集成方法(ensemble method)** 或者**元算法(meta-algorithm)** 。使用集成方法时会有多种形式:可以是不同算法的集成,也可以是同一算法在不同设置下的集成,还可以是数据集不同部分分配给不同分类器之后的集成。
AdaBoost算法的优缺点
- 优点:泛化错误低,易编码,可以应用在大部分分类器上,无参数调整。
- 缺点:对离群点敏感。
- 适用数据类型:数值型和标称型。
#### bagging:基于数据随机重抽样的分类器构建算法
**自举汇聚法(bootstrap aggregating)** 也称为bagging方法,是在从原始数据集选择S次后得到S个新数据集的一种技术。新数据集和原数据集的大小相等。每个数据集都是**通过在原始数据集中随机选择一个样本来进行替换**而得到的。这里的替换意味着可以**多次选择同一个样本** 。
这个性质允许新的数据集中**可以有重复的值** 。更先进的bagging方法比如**随机森林(random forest)** 。
#### boosting
boosting是一种与bagging类似的技术。不管是在boosting还是在bagging中,所使用的多个分类器的类型都是一致的。但boosting不同的分类器是通过**串行训练** 获得的,每个新分类器都根据已训练出的分类器的性能来进行训练。boosting是通过**集中关注被已有分类器错分的那些数据**来获得新的分类器。
boosting分类的结果是基于所有分类器的加权求和结果的,因此boosting与bagging不一样。**bagging中分类器的权重是相等的,boosting中分类器的权重并不相等**,每个权重代表的是其对应分类器在上一轮迭代中的成功度。
AdaBoosting的一般流程
(1)收集数据:可以使用任意方法。
(2)准备数据:依赖于所使用的弱分类器类型,本章使用的是单层决策树,这种分类器可以处理任何数据类型。当然也可以使用任意分类器作为弱分类器,第2章到第6章中的任一分类器都可以充当弱分类器。作为弱分类器,简单分类器的效果更好。
(3)分析数据:可以使用任意方法。
(4)训练算法:AdaBoost的大部分时间都用在训练上,分类器将多次在同一个数据集上训练弱分类器。
(5)测试算法:计算分类器的错误率。
(6)使用算法:同SVM一样,AdaBoost预测两个类别中的一个。如果想把它应用到多个类别的场景,那么就要像多类SVM中的做法一样对AdaBoost进行修改。
### 训练算法:基于错误提升分类器的性能
AdaBoost是adaptive boosting(自适应boosting)的缩写,其运行过程如下:
训练数据中的每个样本,并赋予其一个权重,这些权重构成了向量D。一开始,这些权重都初始化成相等值。首先在训练数据上训练出一个弱分类器并计算该分类的错误率,然后在同一数据集上再次训练弱分类器,在分类器的第二次训练中,将会重新调整每个样本的权重,其中第一次分对的样本的权重将会降低,而第一次分错的样本权重将会提高。
为了从所有弱分类器中得到最终的分类结果,AdaBoost为每个分类器都分配了一个权重值$\alpha$,这些$\alpha$值是**基于每个弱分类器的错误率**进行计算的。
其中,错误率$\varepsilon$计算公式为
$$\varepsilon = \frac{未正确分类的样本数目}{所有样本数目}$$
$\alpha$的计算公式为:
$$\alpha = \frac{1}{2}ln \left( \frac{1 - \varepsilon}{\varepsilon} \right)$$
计算出$\alpha$值之后,可以对权重向量D进行更新,以使得那些**正确分类的样本的权重降低**而**错分样本的权重升高**。
如果某个样本被正确分类,那么该样本的权重更改为:
$$D_{i}^{(t+1)} = \frac{D_{i}^{(t)}e^{-\alpha}}{Sum(D)}$$
如果某个样本未被正确分类,那么该样本的权重更改为:
$$D_{i}^{(t+1)} = \frac{D_{i}^{(t)}e^{\alpha}}{Sum(D)}$$
在计算出D之后,AdaBoost又进入下一轮迭代。AdaBoost算法会不断重复训练和调整权重的过程,直到训练错误率为0或者弱分类器的数目达到用户的指定值为止。
### 基于单层决策树构建弱分类器
本章采用基于单个特征的单层决策树来做决策。由于这棵树只有一次分裂过程,因此只是一个树桩。
```
%run simpleDataPlot.py
import adaboost
reload(adaboost)
datMat, classLabels = adaboost.loadSimpleData()
```
构建单层决策树来作为弱分类器的伪代码如下:
将最小错误率minError设置为$+\infty$
对数据集中的每一个特征(第一层循环):
对每个步长(第二层循环):
对每个不等号(第三层循环):
建立一棵单层决策树并利用加权数据集对它进行测试
如果错误率低于minError,则将当前单层决策树设为最佳单层决策树
返回最佳单层决策树
```
import numpy as np
reload(adaboost)
D = np.mat(np.ones((5,1)) / 5)
adaboost.buildStump(datMat, classLabels, D)
reload(adaboost)
classifierArray, aggClassEst = adaboost.adaBoostTrainDS(datMat, classLabels, 9)
```
adaboost算法中的向量D非常重要,一开始这些权重都赋予了相同的值。在后续迭代中,adaboost算法会在增加错分数据的权重的同时,降低正确分类数据的权重。D是一个**概率分布向量** , 因此其所有元素之和为1.0。所以一开始所有元素都被初始化成1/m。
```
classifierArray
```
代码中对于D权重向量的更新如下:
expon = multiply(-1*alpha*mat(classLabels).T, classEst)
D = multiply(D, exp(expon))
D = D/D.sum()
这段代码表明了数据的classLabel与单层决策树分类的出来的classEst同号时,即为分类正确,得到的指数位置为$-\alpha$,否则分类错误则为$\alpha$。
numpy.multiply函数是**element-wise product**,表示对应元素相乘。代码中为两个向量的对应元素相乘,这里不是线性代数中的点积。
### 测试算法:基于AdaBoost的分类
```
reload(adaboost)
datArr, labelArr = adaboost.loadSimpleData()
classifierArr, aggClassEst = adaboost.adaBoostTrainDS(datArr, labelArr, 30)
classifierArr
adaboost.adaClassify([0, 0], classifierArr)
```
可以发现,随着迭代进行,数据点[0,0]的分类结果越来越强。
```
adaboost.adaClassify([5, 5], classifierArr)
reload(adaboost)
datArr, labelArr = adaboost.loadDataSet('horseColicTraining2.txt')
classifierArray,aggClassEst = adaboost.adaBoostTrainDS(datArr, labelArr, 10)
testArr, testLabelArr = adaboost.loadDataSet('horseColicTest2.txt')
prediction10 = adaboost.adaClassify(testArr, classifierArray)
errArr = np.mat(np.ones((67,1)))
errArr[prediction10!=mat(testLabelArr).T].sum()
errRate = errArr[prediction10!=mat(testLabelArr).T].sum() / 67
errRate
```
我们可以把弱分类器想成是SVM中的一个核函数,也可以按照最大化某个最小间隔的方式重写AdaBoost算法。
### 非均衡分类问题
在《机器学习实战》这本书中的算法都是基于**错误率**来衡量分类器任务的成功程度的。错误率是指**在所有测试样例中错分样例的比例** 。
我们经常使用**混淆矩阵(confusion matrix)** 来作为帮助人们更好地了解分类错误的工具。
|||||
|--|--|--|--|
|||预测结果||
|||+1|-1|
|真实结果|+1|真正例,真阳(TP)|伪反例,假阴(FN)|
|真实结果|-1|伪正例,假阳(FP)|真反例,真阴(TN)|
$$\mathbf{正确率(Accuracy)} = \frac{TP}{TP+FP}$$
$$\mathbf{召回率(Recall)} = \frac{TP}{TP+FN}$$
准确率表示的是预测为正例的样本的真正正例的比例。召回率表示的是预测为正例的样本占所有真实正例的比例。
准确率衡量的可以是预测的癌症病人中有些人不是真的得了癌症,即真的得了癌症的病人比例。召回率是一些真的得了癌症的病人没有预测出来。这个就很可怕了,所以对于癌症预测一般要提高召回率。
另一个用于度量分类中非均衡性的工具是**ROC曲线(ROC curve)** , ROC代表接收者操作特征(receiver operating characteristic)。ROC曲线不仅可以用于比较分类器,也可以基于成本效益(cost-versus-benefit)分析来做出决策。
ROC曲线的横坐标为假阳率(即FP/(FP+TN)),纵坐标为真阳率(TP/(TP+FN))。
理想情况下,**最佳的分类器应该尽可能位于左上角**。即在假阳率很低的情况下,真阳率很高。
对于不同的ROC曲线,比较曲线下的面积(Area Unser the Curve, AUC)。AUC给出的是分类器的平均性能值,当然它不能完全代替对整条曲线的观察。完美分类器的AUC为1.0,随机猜测的AUC为0.5。
```
reload(adaboost)
datArr, labelArr = adaboost.loadDataSet('horseColicTraining2.txt')
classifierArray, aggClassEst = adaboost.adaBoostTrainDS(datArr, labelArr, 10)
adaboost.plotROC(aggClassEst.T, labelArr)
```
还有一些处理非均衡数据的方法,比如**代价敏感学习(cost-sensitive learning)** 。此外,还可以对训练数据进行改造,比如**欠抽样(undersampling)** 或者**过抽样(oversampling)** 来实现。过抽样意味着复制样例,而欠抽样意味着删除样例。
| github_jupyter |
```
import keras
keras.__version__
```
# Understanding recurrent neural networks
이 노트북은 [케라스 창시자에게 배우는 딥러닝](https://tensorflow.blog/케라스-창시자에게-배우는-딥러닝/) 책의 6장 2절의 코드 예제입니다. 책에는 더 많은 내용과 그림이 있습니다. 이 노트북에는 소스 코드에 관련된 설명만 포함합니다. 이 노트북의 설명은 케라스 버전 2.2.2에 맞추어져 있습니다. 케라스 최신 버전이 릴리스되면 노트북을 다시 테스트하기 때문에 설명과 코드의 결과가 조금 다를 수 있습니다.
---
[...]
## 케라스의 순환 층
넘파이로 간단하게 구현한 과정이 실제 케라스의 `SimpleRNN` 층에 해당합니다:
```
from keras.layers import SimpleRNN
```
`SimpleRNN`이 한 가지 다른 점은 넘파이 예제처럼 하나의 시퀀스가 아니라 다른 케라스 층과 마찬가지로 시퀀스 배치를 처리한다는 것입니다. 즉, `(timesteps, input_features)` 크기가 아니라 `(batch_size, timesteps, input_features)` 크기의 입력을 받습니다.
케라스에 있는 모든 순환 층과 동일하게 `SimpleRNN`은 두 가지 모드로 실행할 수 있습니다. 각 타임스텝의 출력을 모은 전체 시퀀스를 반환하거나(크기가 `(batch_size, timesteps, output_features)`인 3D 텐서), 입력 시퀀스에 대한 마지막 출력만 반환할 수 있습니다(크기가 `(batch_size, output_features)`인 2D 텐서). 이 모드는 객체를 생성할 때 `return_sequences` 매개변수로 선택할 수 있습니다. 예제를 살펴보죠:
```
from keras.models import Sequential
from keras.layers import Embedding, SimpleRNN
model = Sequential()
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32))
model.summary()
model = Sequential()
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32, return_sequences=True))
model.summary()
```
네트워크의 표현력을 증가시키기 위해 여러 개의 순환 층을 차례대로 쌓는 것이 유용할 때가 있습니다. 이런 설정에서는 중간 층들이 전체 출력 시퀀스를 반환하도록 설정해야 합니다:
```
model = Sequential()
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32)) # 맨 위 층만 마지막 출력을 반환합니다.
model.summary()
```
이제 IMDB 영화 리뷰 분류 문제에 적용해 보죠. 먼저 데이터를 전처리합니다:
```
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 10000 # 특성으로 사용할 단어의 수
maxlen = 500 # 사용할 텍스트의 길이(가장 빈번한 max_features 개의 단어만 사용합니다)
batch_size = 32
print('데이터 로딩...')
(input_train, y_train), (input_test, y_test) = imdb.load_data(num_words=max_features)
print(len(input_train), '훈련 시퀀스')
print(len(input_test), '테스트 시퀀스')
print('시퀀스 패딩 (samples x time)')
input_train = sequence.pad_sequences(input_train, maxlen=maxlen)
input_test = sequence.pad_sequences(input_test, maxlen=maxlen)
print('input_train 크기:', input_train.shape)
print('input_test 크기:', input_test.shape)
```
`Embedding` 층과 `SimpleRNN` 층을 사용해 간단한 순환 네트워크를 훈련시켜 보겠습니다:
```
from keras.layers import Dense
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(SimpleRNN(32))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(input_train, y_train,
epochs=10,
batch_size=128,
validation_split=0.2)
```
이제 훈련과 검증의 손실과 정확도를 그래프로 그립니다:
```
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
```
3장에서 이 데이터셋을 사용한 첫 번째 모델에서 얻은 테스트 정확도는 87%였습니다. 안타깝지만 간단한 순환 네트워크는 이 기준 모델보다 성능이 높지 않습니다(85% 정도의 검증 정확도를 얻었습니다). 이런 원인은 전체 시퀀스가 아니라 처음 500개의 단어만 입력에 사용했기 때문입니다. 이 RNN은 기준 모델보다 얻은 정보가 적습니다. 다른 이유는 `SimpleRNN`이 텍스트와 같이 긴 시퀀스를 처리하는데 적합하지 않기 때문입니다. 더 잘 작동하는 다른 순환 층이 있습니다. 조금 더 고급 순환 층을 살펴보죠.
[...]
## 케라스를 사용한 LSTM 예제
이제 실제적인 관심사로 이동해 보죠. LSTM 층으로 모델을 구성하고 IMDB 데이터에서 훈련해 보겠습니다(그림 6-16과 6-17 참조). 이 네트워크는 조금 전 `SimpleRNN`을 사용했던 모델과 비슷합니다. LSTM 층은 출력 차원만 지정하고 다른 (많은) 매개변수는 케라스의 기본값으로 남겨 두었습니다. 케라스는 좋은 기본값을 가지고 있어서 직접 매개변수를 튜닝하는 데 시간을 쓰지 않고도 거의 항상 어느정도 작동하는 모델을 얻을 수 있습니다.
```
from keras.layers import LSTM
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(LSTM(32))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(input_train, y_train,
epochs=10,
batch_size=128,
validation_split=0.2)
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
```
| github_jupyter |
# Generative Adversarial Network
In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!
GANs were [first reported on](https://arxiv.org/abs/1406.2661) in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:
* [Pix2Pix](https://affinelayer.com/pixsrv/)
* [CycleGAN](https://github.com/junyanz/CycleGAN)
* [A whole list](https://github.com/wiseodd/generative-models)
The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks _as close as possible_ to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.

The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator.
The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.
```
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
```
## Model Inputs
First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input `inputs_real` and the generator input `inputs_z`. We'll assign them the appropriate sizes for each of the networks.
```
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
```
## Generator network

Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.
#### Variable Scope
Here we need to use `tf.variable_scope` for two reasons. Firstly, we're going to make sure all the variable names start with `generator`. Similarly, we'll prepend `discriminator` to the discriminator variables. This will help out later when we're training the separate networks.
We could just use `tf.name_scope` to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also _sample from it_ as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the `reuse` keyword for `tf.variable_scope` to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.
To use `tf.variable_scope`, you use a `with` statement:
```python
with tf.variable_scope('scope_name', reuse=False):
# code here
```
Here's more from [the TensorFlow documentation](https://www.tensorflow.org/programmers_guide/variable_scope#the_problem) to get another look at using `tf.variable_scope`.
#### Leaky ReLU
TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to `tf.maximum`. Typically, a parameter `alpha` sets the magnitude of the output for negative values. So, the output for negative input (`x`) values is `alpha*x`, and the output for positive `x` is `x`:
$$
f(x) = max(\alpha * x, x)
$$
#### Tanh Output
The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.
```
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('generator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
```
## Discriminator
The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.
```
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('discriminator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
```
## Hyperparameters
```
# Size of input image to discriminator
input_size = 784
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Smoothing
smooth = 0.1
```
## Build network
Now we're building the network from the functions defined above.
First is to get our inputs, `input_real, input_z` from `model_inputs` using the sizes of the input and z.
Then, we'll create the generator, `generator(input_z, input_size)`. This builds the generator with the appropriate input and output sizes.
Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as `g_model`. So the real data discriminator is `discriminator(input_real)` while the fake discriminator is `discriminator(g_model, reuse=True)`.
```
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Build the model
g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)
# g_model is the generator output
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha)
```
## Discriminator and Generator Losses
Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, `d_loss = d_loss_real + d_loss_fake`. The losses will by sigmoid cross-entropys, which we can get with `tf.nn.sigmoid_cross_entropy_with_logits`. We'll also wrap that in `tf.reduce_mean` to get the mean for all the images in the batch. So the losses will look something like
```python
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
```
For the real image logits, we'll use `d_logits_real` which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter `smooth`. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like `labels = tf.ones_like(tensor) * (1 - smooth)`
The discriminator loss for the fake data is similar. The logits are `d_logits_fake`, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.
Finally, the generator losses are using `d_logits_fake`, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.
```
# Calculate losses
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
```
## Optimizers
We want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use `tf.trainable_variables()`. This creates a list of all the variables we've defined in our graph.
For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with `generator`. So, we just need to iterate through the list from `tf.trainable_variables()` and keep variables to start with `generator`. Each variable object has an attribute `name` which holds the name of the variable as a string (`var.name == 'weights_0'` for instance).
We can do something similar with the discriminator. All the variables in the discriminator start with `discriminator`.
Then, in the optimizer we pass the variable lists to `var_list` in the `minimize` method. This tells the optimizer to only update the listed variables. Something like `tf.train.AdamOptimizer().minimize(loss, var_list=var_list)` will only train the variables in `var_list`.
```
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
```
## Training
```
batch_size = 100
epochs = 100
samples = []
losses = []
# Only save generator variables
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
```
## Training loss
Here we'll check out the training losses for the generator and discriminator.
```
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
```
## Generator samples from training
Here we can view samples of images from the generator. First we'll look at images taken while training.
```
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
```
These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make.
```
_ = view_samples(-1, samples)
```
Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
```
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
```
It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s.
## Sampling from the generator
We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!
```
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
_ = view_samples(0, [gen_samples])
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
%matplotlib inline
class NNModel:
def __init__(self,learning_rate, n_iter, args):
self.learning_rate = learning_rate
self.args = args
self.n_iter = n_iter
def z_score(self,w,x,b):
return np.dot(w,x)+b
def init_params(self,n_x):
parameters={}
n_a = n_x
for i in range(1,len(self.args)+1):
n_h = self.args[i-1][0]
parameters['w'+str(i)] = np.random.rand(n_h,n_a)*np.sqrt(1/n_x)
parameters['b'+str(i)] = np.random.randn(n_h,1)
n_a = n_h
return parameters
def activation(self,z,fn = 'linear'):
act_fn ={'linear':z,
'relu':np.maximum(z,0),
'tanh':np.tanh(z),
'sigmoid':1/(1+np.exp(-z)),
'softmax':np.exp(z)/np.sum(np.exp(z))}
return act_fn[fn]
def forward_prop(self,x, parameters):
L = len(args)
z_scores = {}
activations = {'a0':x}
for i in range(1,L+1):
z_scores['z'+str(i)] = self.z_score(parameters['w'+str(i)],activations['a'+str(i-1)],parameters['b'+str(i)])
z = z_scores['z'+str(i)]
activations['a'+str(i)] = self.activation(z,fn=self.args[i-1][1])
return z_scores, activations
def compute_cost(self,y,y_hat):
m = y.shape[0]
cost = (-1/m)*(np.dot(y, np.log(y_hat.T+0.0000001)) + np.dot(1-y, np.log(1-y_hat.T+0.0000001)))
return np.squeeze(cost)
def backprop(self,y, parameters, z_scores, activations):
gradients = {}
L = len(self.args)
m = y.shape[0]
for i in range(L,0,-1):
if i==L:
gradients['dz'+str(i)]=activations['a'+str(i)]-y
else:
gradients['dz'+str(i)] = np.multiply(np.dot(parameters['w'+str(i+1)].T, gradients['dz'+str(i+1)]), 1*(z_scores['z'+str(i)]>=0))
dz = gradients['dz'+str(i)]
gradients['dw'+str(i)] = (1/m)*np.matmul(dz,activations['a'+str(i-1)].T)
gradients['db'+str(i)] = (1/m)*np.sum(dz,axis=1,keepdims=True)
return gradients
def update_params(self,parameters, gradients):
eta = self.learning_rate
for i in range(1,len(parameters)//2+1):
parameters['w'+str(i)]-=eta*gradients['dw'+str(i)]
parameters['b'+str(i)]-=eta*gradients['db'+str(i)]
return parameters
def fit(self,x,y):
np.random.seed(5)
params = self.init_params(x.shape[0])
for i in range(self.n_iter):
z_scores,activations = self.forward_prop(x,params)
y_hat = activations['a'+str(len(self.args))]
#print(y_hat)
cost = self.compute_cost(y,y_hat)
gradients = self.backprop(y,params,z_scores,activations)
params = self.update_params(params,gradients)
if i%1000==0:
print('Iteration : {} Cost : {}'.format(i,cost))
return params
def predict(self,x_test,params):
z_scores, activations = self.forward_prop(x_test,params)
y_pred = 1*(activations['a'+str(len(params)//2)]>0.5)
return np.squeeze(y_pred)
path = '/home/mrityunjay/Downloads/sonar.csv'
df = pd.read_csv(path)
df.columns=['x'+str(i) for i in range(len(df.columns))]
X=df.drop(['x60'],axis=1)
Y=df['x60']
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.25,random_state=1)
X_train = np.transpose(X_train.values)
X_test = np.transpose(X_test.values)
Y_train = 1*(Y_train.values=='R')
Y_test = 1*(Y_test.values=='R')
Y_train = Y_train.reshape(1,Y_train.shape[0])
Y_test = Y_test.reshape(1,Y_test.shape[0])
args=[(100,'relu'),(50,'relu'),(10,'relu'),(5,'relu'),(3,'relu'),(1,'sigmoid')]
nn = NNModel(learning_rate=0.001, n_iter = 10000, args=args)
params = nn.fit(X_train,Y_train)
Y_pred = nn.predict(X_test,params)
print(Y_pred)
print(Y_test)
acc = accuracy_score(Y_pred,np.squeeze(Y_test))
print(acc)
```
| github_jupyter |
# ETL Pipeline Preparation
Follow the instructions below to help you create your ETL pipeline.
### 1. Import libraries and load datasets.
- Import Python libraries
- Load `messages.csv` into a dataframe and inspect the first few lines.
- Load `categories.csv` into a dataframe and inspect the first few lines.
```
# import necessary libraries
import pandas as pd
import numpy as np
# load messages into dataframe
messages = pd.read_csv("messages.csv")
messages.head()
categories = pd.read_csv("categories.csv")
categories.head()
```
### 2. Merge datasets.
- Merge the messages and categories datasets using the common id
- Assign this combined dataset to `df`, which will be cleaned in the following steps
```
# merge datasets
df = messages.merge(categories, on='id')
df.head()
```
### 3. Split `categories` into separate category columns.
- Split the values in the `categories` column on the `;` character so that each value becomes a separate column. You'll find [this method](https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.Series.str.split.html) very helpful! Make sure to set `expand=True`.
- Use the first row of categories dataframe to create column names for the categories data.
- Rename columns of `categories` with new column names.
```
# create a dataframe of the 36 individual category columns
categories = df['categories'].str.split(";", expand=True)
categories.head()
# select the first row of the categories dataframe
row = categories.loc[0,:]
# use this row to extract a list of new column names for categories.
# one way is to apply a lambda function that takes everything
# up to the second to last character of each string with slicing
category_colnames = row.apply(lambda x:x.split('-')[0]).values.tolist()
print(category_colnames)
# rename the columns of `categories`
categories.columns = category_colnames
categories.head()
```
### 4. Convert category values to just numbers 0 or 1.
- Iterate through the category columns in df to keep only the last character of each string (the 1 or 0). For example, `related-0` becomes `0`, `related-1` becomes `1`. Convert the string to a numeric value.
- You can perform [normal string actions on Pandas Series](https://pandas.pydata.org/pandas-docs/stable/text.html#indexing-with-str), like indexing, by including `.str` after the Series. You may need to first convert the Series to be of type string, which you can do with `astype(str)`.
```
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].apply(lambda x:x.split('-')[1])
# convert column from string to numeric
categories[column] = categories[column].astype(int)
categories.head()
# Check non one-hot ecoding columns
columns=(categories.max()>1)[categories.max()>1].index
# Check number not in (0,1) and update other value to 1
for col in columns:
print(categories[col].value_counts())
categories.loc[categories[col]>1,col] = 1
print(categories[col].value_counts())
```
### 5. Replace `categories` column in `df` with new category columns.
- Drop the categories column from the df dataframe since it is no longer needed.
- Concatenate df and categories data frames.
```
# drop the original categories column from `df`
df.drop('categories',axis=1, inplace=True)
df.head()
# concatenate the original dataframe with the new `categories` dataframe
df = pd.concat([df,categories], axis=1)
df.head()
```
### 6. Remove duplicates.
- Check how many duplicates are in this dataset.
- Drop the duplicates.
- Confirm duplicates were removed.
```
# check number of duplicates
df.duplicated().sum()
# drop duplicates
df.drop_duplicates(inplace=True)
# check number of duplicates
df.duplicated().sum()
```
### 7. Save the clean dataset into an sqlite database.
You can do this with pandas [`to_sql` method](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_sql.html) combined with the SQLAlchemy library. Remember to import SQLAlchemy's `create_engine` in the first cell of this notebook to use it below.
```
from sqlalchemy import create_engine
engine = create_engine('sqlite:///all_messages.db')
df.to_sql('all_messages', engine, index=False)
```
### 8. Use this notebook to complete `etl_pipeline.py`
Use the template file attached in the Resources folder to write a script that runs the steps above to create a database based on new datasets specified by the user. Alternatively, you can complete `etl_pipeline.py` in the classroom on the `Project Workspace IDE` coming later.
```
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
def etl_pipeline(db_path,csv1="messages.csv", csv2="categories.csv",tablename='disastertab'):
# load messages dataset
messages = pd.read_csv(csv1)
# load categories dataset
categories = pd.read_csv(csv2)
# merge datasets
df = messages.merge(categories, on='id')
# create a dataframe of the 36 individual category columns
categories = df['categories'].str.split(";", expand=True)
# select the first row of the categories dataframe
row = categories.loc[0,:]
# use this row to extract a list of new column names for categories.
category_colnames = row.apply(lambda x:x.split('-')[0]).values.tolist()
# rename the columns of `categories`
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].apply(lambda x:x.split('-')[1])
# convert column from string to numeric
categories[column] = categories[column].astype(int)
# Check number not in (0,1) and update other value to 1
categories.loc[categories[column]>1,column] = 1
# drop the original categories column from `df`
df.drop('categories',axis=1, inplace=True)
# concatenate the original dataframe with the new `categories` dataframe
df = pd.concat([df,categories], axis=1)
df.head()
# drop duplicates
df.drop_duplicates(inplace=True)
engine = create_engine('sqlite:///'+db_path)
df.to_sql(tablename, engine, index=False)
etl_pipeline('workspace/data/DisasterResponse.db')
engine = create_engine('sqlite:///workspace/data/DisasterResponse.db')
tt=pd.read_sql('SELECT * from disastertab', engine)
tt.head(10)
etl_pipeline(db_path='workspace/data/DisasterResponse.db',csv1="./workspace/data/disaster_messages.csv", csv2="./workspace/data/disaster_categories.csv",)
engine = create_engine('sqlite:///workspace/data/DisasterResponse.db')
tt=pd.read_sql('SELECT * from disastertab', engine)
tt.head(10)
tt.genre.value_counts()
tt.max()
```
| github_jupyter |
# ML Exercise 1 - Linear Regression
To help you start with Python and NumPy, there is a great tutorial online,
created at Stanford. It can be downloaded as a notebook at https://github.com/kuleshov/cs228-material/tree/master/tutorials/python. Note that this tutorial is written in Python 2.7.
<font color='red'> PLEASE DO NOT HESITATE to include your remarks/comments (in colors of your choice) in the notebook. That will be considered as a short report.</font>
```
# Change here using your first and last names
fn1 = "bonnie"
ln1 = "parker"
fn2 = "clyde"
ln2 = "barrow"
filename = "_".join(map(lambda s: s.strip().lower(),
["tp1", ln1, fn1, "and", ln2, fn2])) + ".ipynb"
print(filename)
```
## Part 1: Linear regression with one variable
You will implement linear regression with one variable to predict profits for a food truck. Suppose you are the CEO of a restaurant franchise and are considering different cities for opening a new outlet. The chain already has trucks in various cities and you have data for profits and populations from the cities.
<font color='red'> PLEASE READ CAREFULLY this. When you open a notebook (ipython) with google colab, you create a new working session. If you want that your "created session" can see your data in your google drive, you need to "mount" the drive and include the correct path to your data. Do not worry, the following code will do that. When call drive.mount, you need to authorize: click the link, copy the authorization code,...
You can run some basic commands like "pwd, ls" using "!pwd, or !ls"
</font>
```
###### METHOD 1:
from google.colab import drive
# This will prompt for authorization.
drive.mount('/content/drive')
import os
os.chdir('/content/drive/My Drive/Option_AI_2nd/ML')
# TODO: change the correct path, you may need to create a "data" folder in your drive
!wget https://www.dropbox.com/s/us61lvxcjnn1n3j/ex1data1.txt?dl=0 \
-O /content/drive/My Drive/Option_AI_2nd/ML/data/ex1data1.txt
###### METHOD 2: EASIER
!wget https://www.dropbox.com/s/us61lvxcjnn1n3j/ex1data1.txt?dl=0 \
-O ex1data1.txt
# Check your current folder if method 1
!pwd
```
# Importe some libraries and examine the data.
```
##### If you use METHOD 1
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
path = os.getcwd() + 'ex1data1.txt' # TO DO: CHANGE THE PATH
data = pd.read_csv(path, header=None, names=['Population', 'Profit'])
data.head()
##### If you use METHOD 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv('ex1data1.txt', header=None, names=['Population', 'Profit'])
data.head()
data.describe()
```
#Plot it to get a better idea of what the data looks like.
```
data.plot(kind='scatter', x='Population', y='Profit', figsize=(15,10))
#data.T.plot.scatter(x = 'Population', y = 'Profit')
```
#Goal: fit the linear regression parameters $\mathbf{w}$ to your dataset using gradient descent.
You suppose that your relation between your data $x$ and target $t$ can be modeled by a linear mode
$y(x,\mathbf{w})= w_0+w_1x$ \\
The objective of linear regression is to minimize the cost function:
$E(\mathbf{w})=\displaystyle \frac{1}{2N} \sum_{n=1}^{N} \{y(x_n,\mathbf{w})-t_n\}^2 $ \\
$N$ is the number of samples in the data set.
<font color='red'> Question: in this dataset, $N=?$ </font>
Your model parameters are the $w_j$ values which you will adjust to minimize cost $E(\mathbf{w})$.
One solution is the batch gradient descent algorithm. In batch gradient descent,each iteration performs the update:
$\mathbf{w}=\displaystyle \mathbf{w} - \eta \frac{1}{N} \sum_{n=1}^{N} \{y(x_n,\mathbf{w})-t_n\}x_n$
With each step of gradient descent, your parameters $w_j$ come closer to the optimal values that will achieve the lowest cost $E(\mathbf{w})$.
<font color='red'> Question: how many parameters $w$ do you have to find for this dataset. </font>
<font color='red'>TODO: Create a function to compute the cost of a given solution (characterized by the parameters w). </font>
Functions like power, sum of numpy may be useful.
#Important notes for implementation in Numpy
For array, * means element-wise multiplication, and the dot() function is used for matrix multiplication.
For matrix, * means matrix multiplication, and the multiply() function is used for element-wise multiplication.
<font color='red'> Maybe in this part (Linear Regression), you should learn to use np.array. In part 2 (Logistic Regression), you learn to use np.matrix </font>
```
def computeCost(X, t, w):
#X : input dataset of shape (N x D+1), N: number of examples, D: the number of features.
#t : target of shape (N, ).
#w : parameters of shape (D+1, ).
# Note that if you use np.array, * is elementwise multiplication, unlike Matlab,
# you may need to use it
# then using np.sum...
# you can use np.dot
# WRITE YOU CODE HERE
# In this dataset, X is Population (each data point has only one variable, one feature)
# t is profit
# but for coding this function, you should consider that X is a np.array...
# (array in numpy may have more than one dimension)
...
return ...
```
#Do some data preparation.
<font color='red'>TODO: get X (training data) (first column) and t (target variable) (last column) from "data". </font>
```
# set X (training data) and t (target variable)
cols = data.shape[1]
X = data.iloc[:,0:1]
t = data.iloc.... ##### COMPLETE YOUR CODE HERE#####
```
Take a look to make sure X (training set) and t (target variable) look correct.
```
X.head()
t.head()
```
Let's add a column of ones to the X so we can use a vectorized solution to computing the cost and gradients (use X.insert).
<font color='red'>Question: why do you need to do this? (hint: related to $w_0$)</font>
```
X.insert(0,'Ones',1)
X.head()
```
The cost function is expecting numpy array (or matrices) so we need to convert X and t before we can use w (use np.array or np.matrix ...). We also need to initialize w (np.zeros). <font color='red'>Question: explain the shape of w?</font>
```
X = np.array(X.values)
t = np.array.... ##### COMPLETE YOUR CODE HERE#####
w = np.zeros... ##### COMPLETE YOUR CODE HERE#####
```
Let's take a quick look at the shape of our X,t,w.
```
X.shape, w.shape, t.shape
```
Now let's compute the cost for our initial solution (0 values for w).
```
computeCost(X, t, w)
```
Expected result 32.072733877455676. So far so good.
#<font color='red'>Exercise: Define a function to perform gradient descent on the parameters w using the update rules defined in the text.</font>
Functions like multiply, sum of numpy may be useful.
```
def gradientDescent(X, t, w, eta, iters):
# Initialize some useful values
N=... # number of training examples
cost = np.zeros(iters);
# make a copy of theta, to avoid changing the original array, since numpy arrays
# are passed by reference to functions
w = w.copy()
for i in range(iters):
##### WRITE YOUR CODE HERE#####
w = w - ...##### COMPLETE YOUR CODE HERE#####
cost[i] = computeCost(X, t, w) # you should stock the cost function for each epoch
# this is useful for checking whether the cost reduces...
return w, cost
```
Initialize some additional variables - the learning rate eta, and the number of iterations to perform.
```
eta = 0.01
iters = 1000
```
Now let's run the gradient descent algorithm to fit our parameters eta to the training set.
```
w, cost = gradientDescent(X, t, w, eta, iters)
w
```
Expected result: matrix([[-3.24140214, 1.1272942 ]]).
Finally you can compute the cost (error) of the trained model using our fitted parameters.
```
computeCost(X, t, w)
```
Expected result: 4.515955503078912.
Now let's plot the linear model along with the data to visually see how well it fits.
```
#TODO: to complete
x = np.linspace(data.Population.min(), data.Population.max(), 100)
f = ... ##### COMPLETE YOUR CODE HERE#####
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(x, f, 'r', label='Prediction')
ax.scatter(data.Population, data.Profit, label='Traning Data')
ax.legend(loc=2)
ax.set_xlabel('Population')
ax.set_ylabel('Profit')
ax.set_title('Predicted Profit vs. Population Size')
```
Looks pretty good! Since the gradient decent function also outputs a vector with the cost at each training iteration, we can plot that as well. Notice that the cost always decreases - this is an example of a convex optimization problem.
```
#TODO: plot the error vs. training epoch
fig, ax = plt.subplots(figsize=(10,7))
ax.plot... #use np.arange(iters) to create an array ...
##### COMPLETE YOUR CODE HERE#####
```
## Part 2: Linear regression with multiple variables
This exercise also included a housing price data set with 2 variables (size of the house in square feet and number of bedrooms) and a target (price of the house). Let's use the techniques we already applied to analyze that data set as well.
```
!wget https://www.dropbox.com/s/5b5hnnnn8d4y2o5/ex1data2.txt?dl=0 \
-O ex1data2.txt
```
The notebook will start by loading and displaying some values from this dataset.
You then complete the code to:
- Subtract the mean value of each feature from the dataset.
- After subtracting the mean, additionally scale (divide) the feature values by their respective “standard deviations.”
```
file = 'ex1data2.txt'
data2 = pd.read_csv(file, header=None, names=['Size', 'Bedrooms', 'Price'])
data2.head()
```
<font color='red'>Exercise: add another pre-processing step - normalizing the features. </font>
<font color='red'>Question: why is it necessary? </font>
```
##TODO, normalize your features
data2 = ...##### COMPLETE YOUR CODE HERE#####
#Now show the normalized data
data2.head()
```
Now let's repeat our pre-processing steps from part 1 and run the linear regression procedure on the new data set.
```
eta=0.01
# add ones column
data2.insert... ##### COMPLETE YOUR CODE HERE#####
# set X (training data) and t (target variable)
cols = data2.shape[1]
X2 = data2.iloc...##### COMPLETE YOUR CODE HERE#####
t2 = ...##### COMPLETE YOUR CODE HERE#####
# convert to matrices/array
X2 = ....##### COMPLETE YOUR CODE HERE#####
t2 = ....##### COMPLETE YOUR CODE HERE#####
# initialize w
# TODO: what is shape of w? why?
# ..........................
w2 = ....##### COMPLETE YOUR CODE HERE#####
# perform linear regression on the data set
w2, cost2 = gradientDescent(X2, t2, w2, eta, iters)
# get the cost (error) of the model
computeCost(X2, t2, w2)
```
We can take a quick look at the training progess for this one as well.
```
#TODO: plot the error vs. training epoch
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(np.arange(iters), cost, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title('Error vs. Training Epoch')
```
<font color='red'>Question: What can you see from this figure ?</font>
Answer: ....
<font color='red'>Exercise: Try out different learning rates for the dataset and find a learning rate that converges quickly.</font>
```
###WRITE YOUR CODE HERE
....
```
Instead of implementing these algorithms from scratch, we could also use scikit-learn's linear regression function. Let's apply scikit-learn's linear regressio algorithm to the data from part 1 and see what it comes up with.
```
from sklearn import linear_model
model = linear_model.LinearRegression()
model.fit(X, t)
```
Here's what the scikit-learn model's predictions look like.
```
x = np.array(X[:, 1])
f = model.predict(X).flatten()
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(x, f, 'r', label='Prediction')
ax.scatter(data.Population, data.Profit, label='Traning Data')
ax.legend(loc=2)
ax.set_xlabel('Population')
ax.set_ylabel('Profit')
ax.set_title('Predicted Profit vs. Population Size')
```
You can submit now your code via Moodle.
Do not forget to answer the questions!!!
| github_jupyter |
# Movie Recommender System
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import ast
%matplotlib inline
movies = pd.read_csv('tmdb_5000_movies.csv')
credits = pd.read_csv('tmdb_5000_credits.csv')
movies.head()
credits.head()
movies = movies.merge(credits, on='title')
movies.shape
movies = movies[['movie_id','title','overview','genres','keywords','cast','crew']]
movies.head()
movies.isnull().sum()
movies.dropna(inplace=True)
movies.duplicated().sum()
def get_genres(x):
genre = []
for i in ast.literal_eval(x):
genre.append(i['name'])
return genre
movies['genres'] = movies['genres'].apply(lambda x: get_genres(x))
movies['keywords'] = movies['keywords'].apply(lambda x: get_genres(x))
movies.head(1)
def get_actor(x):
actor_name = []
counter = 0
for i in ast.literal_eval(x):
if counter != 3:
actor_name.append(i['name'])
counter += 1
else:
break
return actor_name
movies['cast'] = movies['cast'].apply(lambda x: get_actor(x))
movies.head(1)
def get_director(x):
director_name = []
for i in ast.literal_eval(x):
if i['job'] == 'Director':
director_name.append(i['name'])
return director_name
movies['crew'] = movies['crew'].apply(lambda x: get_director(x))
movies.head()
movies['overview'] = movies['overview'].apply(lambda x: x.split())
movies
movies['genres'] = movies['genres'].apply(lambda x: [i.replace(' ','') for i in x])
movies['keywords'] = movies['keywords'].apply(lambda x: [i.replace(' ','') for i in x])
movies['cast'] = movies['cast'].apply(lambda x: [i.replace(' ','') for i in x])
movies['crew'] = movies['crew'].apply(lambda x: [i.replace(' ','') for i in x])
movies.head()
movies['tags'] = movies['overview'] + movies['genres'] + movies['keywords'] + movies['cast'] + movies['crew']
df = movies[['movie_id','title','tags']]
df
df['tags'] = df['tags'].apply(lambda x: ' '.join(x))
df['tags'] = df['tags'].apply(lambda x: x.lower())
import nltk
from nltk.stem.porter import PorterStemmer
ps = PorterStemmer()
def stem(x):
stem_words = []
for i in x.split():
stem_words.append(ps.stem(i))
return ' '.join(stem_words)
df['tags'] = df['tags'].apply(lambda x: stem(x))
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features=5000,stop_words='english')
cv.fit_transform(df['tags']).toarray().shape
vectors = cv.fit_transform(df['tags']).toarray()
cv.get_feature_names()
from sklearn.metrics.pairwise import cosine_similarity
similarity = cosine_similarity(vectors)
similarity
distances = sorted(enumerate(similarity[0]),reverse=True,key = lambda x: x[1])[1:6]
distances
def recommend(movie):
movie_index = df[df['title'] == movie].index[0]
distances = similarity[movie_index]
movie_list = sorted(enumerate(distances),reverse=True,key = lambda x: x[1])[1:6]
for i in movie_list:
print(df['title'][i[0]])
recommend('Avatar')
import pickle
pickle.dump(df,open('movies.pkl','wb'))
pickle.dump(similarity,open('similarity.pkl','wb'))
```
| github_jupyter |
```
!pip install transformers
# generics
import pandas as pd
import numpy as np
from tqdm import tqdm
import re
from collections import defaultdict
import matplotlib.pyplot as plt
import random
!pip install pytypo
import pytypo
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from sklearn.metrics import confusion_matrix, classification_report, f1_score
import torch.nn.functional as F
from transformers import BertTokenizer, AutoModel, BertConfig, TFBertModel, AdamW, get_linear_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, BertConfig, get_constant_schedule_with_warmup
# import warnings
# warnings.filterwarnings('FutureWarning')
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
device
from google.colab import drive
drive.mount('/content/drive')
def set_seed(seed_value=42):
"""Set seed for reproducibility.
"""
random.seed(seed_value)
np.random.seed(seed_value)
torch.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
set_seed(29092020)
tokenizer = BertTokenizer.from_pretrained("indobenchmark/indobert-base-p1")
model = AutoModel.from_pretrained("indobenchmark/indobert-base-p1")
# df = pd.read_excel('./drive/My Drive/satdat/dataset.xlsx')
df_train = pd.read_csv('./drive/My Drive/satdat/train.csv')
df_val = pd.read_csv('./drive/My Drive/satdat/val.csv')
test = pd.read_csv('./drive/My Drive/satdat/datatest_labelled.csv')
# df_train, df_val = train_test_split(df, test_size=0.1, random_state=42)
# df_train.to_csv("./drive/My Drive/satdat/b_train.csv")
# df_val.to_csv('./drive/My Drive/satdat/b_val.csv')
def clean(text) :
text_cleaning_re = "@\S+|https?:\S+|http?:\S|[#]+|[^A-Za-z0-9]+"
text_cleaning_hash = "#[A-Za-z0-9]+"
text_cleaning_num = "(^|\W)\d+"
text = re.sub(text_cleaning_hash, " ", text).strip()
text = re.sub(text_cleaning_num, " ", text).strip()
text = re.sub(text_cleaning_re, " ", text).strip()
text = text.strip()
out = []
for word in text.split() :
# try :
# out.append(word.replace(word, slang[word]))
# except Exception as e :
out.append(word)
return pytypo.correct_sentence(" ".join(out).strip())
slang = pd.read_csv('./drive/My Drive/satdat/slang.csv')
slang = slang[['slang', 'formal']]
slang = slang.set_index('slang')['formal'].to_dict()
df_train.narasi = df_train.narasi.apply(lambda x: clean(x))
df_train.judul = df_train.judul.apply(lambda x: clean(x))
df_val.narasi = df_val.narasi.apply(lambda x: clean(x))
df_val.judul = df_val.judul.apply(lambda x: clean(x))
test.narasi = test.narasi.apply(lambda x: clean(x))
test.judul = test.judul.apply(lambda x: clean(x))
class HoaxDataset(Dataset) :
def __init__(self, feature1, feature2, label, tokenizer, max_len, no_label=False) :
self.feature1 = feature1
self.feature2 = feature2
self.label = label
self.tokenizer = tokenizer
self.max_len = max_len
self.no_label = no_label
def __len__(self) :
return len(self.feature1)
def __getitem__(self, item) :
feature1 = str(self.feature1[item])
feature2 = str(self.feature2[item])
if not self.no_label:
label = self.label[item]
encoding1 = tokenizer.encode_plus(
# ntar diganti <----------------------------------------------------
feature1,
max_length=64,
add_special_tokens=True,
return_token_type_ids=False,
return_attention_mask=True,
truncation=True,
pad_to_max_length=True,
return_tensors='pt'
)
encoding2 = tokenizer.encode_plus(
feature2,
max_length=32,
add_special_tokens=True,
return_token_type_ids=False,
return_attention_mask=True,
truncation=True,
pad_to_max_length=True,
return_tensors='pt'
)
if not self.no_label :
return {
'narasi_text' : feature1,
'narasi_input_ids' : encoding1['input_ids'].flatten(),
'narasi_attention_mask' : encoding1['attention_mask'].flatten(),
'judul_narasi_text' : feature2,
'judul_input_ids' : encoding2['input_ids'].flatten(),
'judul_attention_mask' : encoding2['attention_mask'].flatten(),
'label' : torch.tensor(label, dtype=torch.long)
}
else :
return {
'narasi_text' : feature1,
'narasi_input_ids' : encoding1['input_ids'].flatten(),
'narasi_attention_mask' : encoding1['attention_mask'].flatten(),
'judul_narasi_text' : feature2,
'judul_input_ids' : encoding2['input_ids'].flatten(),
'judul_attention_mask' : encoding2['attention_mask'].flatten(),
}
def to_data_loader(df, columns, label, tokenizer, max_len, batch_size) :
ds = HoaxDataset(
df[columns[0]],
df[columns[1]],
df[label],
tokenizer=tokenizer,
max_len=max_len,
)
return DataLoader(
ds,
batch_size=batch_size,
)
def test_to_data_loader(df, columns, tokenizer, max_len, batch_size) :
ds = HoaxDataset(
df[columns[0]],
df[columns[1]],
None,
tokenizer=tokenizer,
max_len=max_len,
no_label=True
)
return DataLoader(
ds,
batch_size=batch_size,
)
train_data_loader = to_data_loader(df_train, ['narasi', 'judul'], 'label', tokenizer, 64, 32)
val_data_loader = to_data_loader(df_val, ['narasi', 'judul'], 'label', tokenizer, 64, 32)
test_data_loader = test_to_data_loader(test, ['narasi', 'judul'], tokenizer, 64, 32)
data = next(iter(test_data_loader))
data.keys()
class HoaxClassifier(nn.Module) :
def __init__(self, n_classes) :
super(HoaxClassifier, self).__init__()
config = BertConfig.from_pretrained('indobenchmark/indobert-base-p1')
self.bert1 = AutoModel.from_pretrained("indobenchmark/indobert-base-p1", config=config)
self.bert2 = AutoModel.from_pretrained("indobenchmark/indobert-base-p1", config=config)
self.drop = nn.Dropout(p=0.5)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.dual_bert = nn.Linear(self.bert1.config.hidden_size * 2, 32)
self.out = nn.Linear(32, 2)
def forward(self, narasi_input_ids, narasi_attention_mask, judul_input_ids, judul_attention_mask) :
_, pooled_output1 = self.bert1(
input_ids = narasi_input_ids,
attention_mask = narasi_attention_mask
)
_, pooled_output2 = self.bert2(
input_ids = judul_input_ids,
attention_mask = judul_attention_mask
)
x = torch.cat((pooled_output1, pooled_output2), dim=1)
x = self.drop(x)
x = self.dual_bert(x)
x = self.tanh(x)
x = self.drop(x)
x = self.out(x)
return x
model = HoaxClassifier(2)
model.to(device)
# load freezed only if already exist
# model.load_state_dict(torch.load('/content/drive/My Drive/satdat/freezed_state.bin'))
# toggle to train non embeddings
model.bert1.embeddings.requires_grad_=True
model.bert2.embeddings.requires_grad_=True
EPOCHS = 8
opt = AdamW(model.parameters(), lr=3e-5, correct_bias=False, weight_decay=1e-4)
total_steps = len(train_data_loader) * EPOCHS
scheduler = get_constant_schedule_with_warmup(
opt,
num_warmup_steps=0,
# num_training_steps=total_steps,
)
loss_function = nn.CrossEntropyLoss().to(device)
def train_epoch (model, data_loader, loss_fn, optimizer, device, scheduler, n_examples) :
model = model.train()
correct_predictions = 0
losses = []
for d in data_loader :
input_ids1 = d['narasi_input_ids'].to(device)
input_ids2 = d['judul_input_ids'].to(device)
input_mask1 = d['narasi_attention_mask'].to(device)
input_mask2 = d['judul_attention_mask'].to(device)
label = d['label'].to(device)
outputs = model(
input_ids1,
input_mask1,
input_ids2,
input_mask2
)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, label)
correct_predictions += torch.sum(preds == label)
losses.append(loss.item())
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
return correct_predictions.double() / n_examples, np.mean(losses)
def eval_model(model, data_loader, loss_fn, device, n_examples) :
model = model.eval()
losses = []
correct_predictions=0
with torch.no_grad() :
for d in data_loader :
input_ids1 = d['narasi_input_ids'].to(device)
input_ids2 = d['judul_input_ids'].to(device)
input_mask1 = d['narasi_attention_mask'].to(device)
input_mask2 = d['judul_attention_mask'].to(device)
label = d['label'].to(device)
outputs = model(
input_ids1,
input_mask1,
input_ids2,
input_mask2
)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, label)
correct_predictions += torch.sum(preds == label)
losses.append(loss.item())
return correct_predictions.double() / n_examples, np.mean(losses)
%%time
history = defaultdict(list)
best_accuracy = 0
for epoch in range(EPOCHS):
print(f'Epoch {epoch + 1}/{EPOCHS}')
print('-' * 10)
train_acc, train_loss = train_epoch(
model,
train_data_loader,
loss_function,
opt,
device,
scheduler,
len(df_train)
)
print(f'Train loss {train_loss} accuracy {train_acc}')
val_acc, val_loss = eval_model(
model,
val_data_loader,
loss_function,
device,
len(df_val)
)
print(f'Val loss {val_loss} accuracy {val_acc}')
print()
history['train_acc'].append(train_acc)
history['train_loss'].append(train_loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
if val_acc > best_accuracy:
torch.save(model.state_dict(), 'best_model.bin')
best_accuracy = val_acc
def get_predictions(model, data_loader):
model = model.eval()
predictions = []
prediction_probs = []
with torch.no_grad():
for d in data_loader:
input_ids1 = d['narasi_input_ids'].to(device)
input_ids2 = d['judul_input_ids'].to(device)
input_mask1 = d['narasi_attention_mask'].to(device)
input_mask2 = d['judul_attention_mask'].to(device)
outputs = model(
input_ids1,
input_mask1,
input_ids2,
input_mask2
)
_, preds = torch.max(outputs, dim=1)
probs = F.softmax(outputs, dim=1)
predictions.extend(preds)
prediction_probs.extend(probs)
predictions = torch.stack(predictions).cpu()
prediction_probs = torch.stack(prediction_probs).cpu()
return predictions, prediction_probs
# load best model
model.load_state_dict(torch.load('./best_model.bin'))
y_pred, y_pred_probs = get_predictions(
model,
test_data_loader
)
y_pred
print(classification_report(list(test['label']), y_pred))
print(f1_score(list(test['label']), y_pred, average='micro'))
import itertools
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
def plot_confusion_matrix(cm, classes,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
# cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=20)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, fontsize=13)
plt.yticks(tick_marks, classes, fontsize=13)
fmt = '.2f'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label', fontsize=17)
plt.xlabel('Predicted label', fontsize=17)
cnf_matrix = confusion_matrix(test.label.to_list(), y_pred)
plt.figure(figsize=(6,6))
plot_confusion_matrix(cnf_matrix, classes=['0', '1'], title="Confusion matrix")
plt.show()
```
| github_jupyter |
# Autotrainer PPO2 on Gym Pendulum
Test the autotrainer on Open Ai's Pendulum environment, which is continuous and considered to be an easy enviroment to solve.
## Ensure that Tensorflow is using the GPU
```
import tensorflow as tf
if tf.test.gpu_device_name():
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
print("Please install GPU version of TF")
```
## Define Experiment Tags
```
TAGS = ['gym-pendulum', 'gpu',]
```
## Parse CLI arguments and register w/ wandb
This experiment will be using the auto trainer to handle all of the hyperparmeter running
```
from auto_trainer import params
import auto_trainer
auto_trainer.trainer.PROJECT_NAME = 'autotrainer-gym-baselines'
config = params.WandbParameters().parse()
config.episodes = 10000
config.episode_length = 750
config.num_workers = 8
config.eval_frequency = 25
config.eval_episodes = 5
config.fps = 20
# Create a 4 second gif
config.eval_render_freq = int(config.episode_length / (4 * config.fps))
config
config, run = auto_trainer.get_synced_config(config, TAGS)
config
```
## Create a virtual display for environment rendering
```
import pyvirtualdisplay
display = pyvirtualdisplay.Display(visible=False, size=(1400, 900))
display.start()
```
## Create a normalized wrapper for the Pendulum Environment
The vanilla Pendulum enviornment has its action and observation spaces outside of $[-1, 1]$. Create a simple wrapper to apply min/max scaling to the respective values. Note that the default Pendulum environment doesn't have a termination state, so artifically create a termination condition.
```
from gym.envs.classic_control import pendulum
from gym import spaces
import gym
class NormalizedPendulum(pendulum.PendulumEnv):
def __init__(self, length: int = 1000):
super().__init__()
self.unscaled_obs_space = self.observation_space
self.action_space = spaces.Box(low=-1., high=1., shape=(1,))
self.observation_space = spaces.Box(low=-1., high=1., shape=(3,))
self._length = length
self._cnt = 0
def reset(self):
self._cnt = 0
return super().reset()
def step(self, u):
self._cnt += 1
obs, reward, done, info = super().step(u * self.max_torque)
if self._cnt % self._length == 0:
return obs, reward, True, info
else:
return obs, reward, done, info
def _get_obs(self):
return super()._get_obs() / self.unscaled_obs_space.high
```
Create the environment generator
```
def make_env(length):
def _init():
return NormalizedPendulum(length)
return _init
```
### Create the Envs
Import the desired vectorized env
```
from stable_baselines.common.vec_env import SubprocVecEnv
from stable_baselines.common.vec_env import VecNormalize
```
Create training & testing environments
```
train_env = SubprocVecEnv([make_env(config.episode_length)
for _ in range(config.num_workers)])
test_env = make_env(config.episode_length)()
```
## Learning
And we're off!
```
model, config, run = auto_trainer.train(train_env, test_env, config, TAGS,
log_freq=250, full_logging=False, run=run)
```
| github_jupyter |
# Quantization of Image Classification Models
This tutorial demostrates how to apply INT8 quantization to Image Classification model using [Post-training Optimization Tool API](../../compression/api/README.md). The Mobilenet V2 model trained on Cifar10 dataset is used as an example. The code of this tutorial is designed to be extandable to custom model and dataset. It is assumed that OpenVINO is already installed. This tutorial consists of the following steps:
- Prepare the model for quantization
- Define data loading and accuracy validation functionality
- Run optimization pipeline
- Compare accuracy of the original and quantized models
- Compare performance of the original and quantized models
- Compare results on one picture
```
import os
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
from addict import Dict
from compression.api import DataLoader, Metric
from compression.engines.ie_engine import IEEngine
from compression.graph import load_model, save_model
from compression.graph.model_utils import compress_model_weights
from compression.pipeline.initializer import create_pipeline
from openvino.runtime import Core
from torchvision import transforms
from torchvision.datasets import CIFAR10
# Set the data and model directories
DATA_DIR = 'data'
MODEL_DIR = 'model'
os.makedirs(DATA_DIR, exist_ok=True)
os.makedirs(MODEL_DIR, exist_ok=True)
```
## Prepare the Model
Model preparation stage has the following steps:
- Download PyTorch model from Torchvision repository
- Convert it to ONNX format
- Run OpenVINO Model Optimizer tool to convert ONNX to OpenVINO Intermediate Representation (IR)
```
model = torch.hub.load("chenyaofo/pytorch-cifar-models", "cifar10_mobilenetv2_x1_0", pretrained=True)
model.eval()
dummy_input = torch.randn(1, 3, 32, 32)
onnx_model_path = Path(MODEL_DIR) / 'mobilenet_v2.onnx'
ir_model_xml = onnx_model_path.with_suffix('.xml')
ir_model_bin = onnx_model_path.with_suffix('.bin')
torch.onnx.export(model, dummy_input, onnx_model_path, verbose=True)
# Run OpenVINO Model Optimization tool to convert ONNX to OpenVINO IR
!mo --framework=onnx --data_type=FP16 --input_shape=[1,3,32,32] -m $onnx_model_path --output_dir $MODEL_DIR
```
## Define Data Loader
At this step the `DataLoader` interface from POT API is implemented.
```
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))])
dataset = CIFAR10(root=DATA_DIR, train=False, transform=transform, download=True)
# create DataLoader from CIFAR10 dataset
class CifarDataLoader(DataLoader):
def __init__(self, config):
"""
Initialize config and dataset.
:param config: created config with DATA_DIR path.
"""
if not isinstance(config, Dict):
config = Dict(config)
super().__init__(config)
self.indexes, self.pictures, self.labels = self.load_data(dataset)
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
"""
Return one sample of index, label and picture.
:param index: index of the taken sample.
"""
if index >= len(self):
raise IndexError
return (self.indexes[index], self.labels[index]), self.pictures[index].numpy()
def load_data(self, dataset):
"""
Load dataset in needed format.
:param dataset: downloaded dataset.
"""
pictures, labels, indexes = [], [], []
for idx, sample in enumerate(dataset):
pictures.append(sample[0])
labels.append(sample[1])
indexes.append(idx)
return indexes, pictures, labels
```
## Define Accuracy Metric Calculation
At this step the `Metric` interface for accuracy Top-1 metric is implemented. It is used for validating accuracy of quantized model.
```
# Custom implementation of classification accuracy metric.
class Accuracy(Metric):
# Required methods
def __init__(self, top_k=1):
super().__init__()
self._top_k = top_k
self._name = 'accuracy@top{}'.format(self._top_k)
self._matches = []
@property
def value(self):
""" Returns accuracy metric value for the last model output. """
return {self._name: self._matches[-1]}
@property
def avg_value(self):
""" Returns accuracy metric value for all model outputs. """
return {self._name: np.ravel(self._matches).mean()}
def update(self, output, target):
""" Updates prediction matches.
:param output: model output
:param target: annotations
"""
if len(output) > 1:
raise Exception('The accuracy metric cannot be calculated '
'for a model with multiple outputs')
if isinstance(target, dict):
target = list(target.values())
predictions = np.argsort(output[0], axis=1)[:, -self._top_k:]
match = [float(t in predictions[i]) for i, t in enumerate(target)]
self._matches.append(match)
def reset(self):
""" Resets collected matches """
self._matches = []
def get_attributes(self):
"""
Returns a dictionary of metric attributes {metric_name: {attribute_name: value}}.
Required attributes: 'direction': 'higher-better' or 'higher-worse'
'type': metric type
"""
return {self._name: {'direction': 'higher-better',
'type': 'accuracy'}}
```
## Run Quantization Pipeline and compare the accuracy of the original and quantized models
Here we define a configuration for our quantization pipeline and run it.
NOTE: we use built-in `IEEngine` implementation of the `Engine` interface from the POT API for model inference. `IEEngine` is built on top of OpenVINO Python* API for inference and provides basic functionality for inference of simple models. If you have a more complicated inference flow for your model/models you should create your own implementation of `Engine` interface, for example by inheriting from `IEEngine` and extending it.
```
model_config = Dict({
'model_name': 'mobilenet_v2',
'model': ir_model_xml,
'weights': ir_model_bin
})
engine_config = Dict({
'device': 'CPU',
'stat_requests_number': 2,
'eval_requests_number': 2
})
dataset_config = {
'data_source': DATA_DIR
}
algorithms = [
{
'name': 'DefaultQuantization',
'params': {
'target_device': 'CPU',
'preset': 'performance',
'stat_subset_size': 300
}
}
]
# Steps 1-7: Model optimization
# Step 1: Load the model.
model = load_model(model_config)
# Step 2: Initialize the data loader.
data_loader = CifarDataLoader(dataset_config)
# Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric.
metric = Accuracy(top_k=1)
# Step 4: Initialize the engine for metric calculation and statistics collection.
engine = IEEngine(engine_config, data_loader, metric)
# Step 5: Create a pipeline of compression algorithms.
pipeline = create_pipeline(algorithms, engine)
# Step 6: Execute the pipeline.
compressed_model = pipeline.run(model)
# Step 7 (Optional): Compress model weights quantized precision
# in order to reduce the size of final .bin file.
compress_model_weights(compressed_model)
# Step 8: Save the compressed model to the desired path.
compressed_model_paths = save_model(model=compressed_model, save_path=MODEL_DIR, model_name="quantized_mobilenet_v2"
)
compressed_model_xml = compressed_model_paths[0]["model"]
compressed_model_bin = Path(compressed_model_paths[0]["model"]).with_suffix(".bin")
# Step 9: Compare accuracy of the original and quantized models.
metric_results = pipeline.evaluate(model)
if metric_results:
for name, value in metric_results.items():
print(f"Accuracy of the original model: {name}: {value}")
metric_results = pipeline.evaluate(compressed_model)
if metric_results:
for name, value in metric_results.items():
print(f"Accuracy of the optimized model: {name}: {value}")
```
## Compare Performance of the Original and Quantized Models
Finally, we will measure the inference performance of the FP32 and INT8 models. To do this, we use [Benchmark Tool](https://docs.openvinotoolkit.org/latest/openvino_inference_engine_tools_benchmark_tool_README.html) - OpenVINO's inference performance measurement tool.
NOTE: For more accurate performance, we recommended running benchmark_app in a terminal/command prompt after closing other applications. Run benchmark_app -m model.xml -d CPU to benchmark async inference on CPU for one minute. Change CPU to GPU to benchmark on GPU. Run benchmark_app --help to see an overview of all command line options.
```
# Inference FP16 model (IR)
!benchmark_app -m $ir_model_xml -d CPU -api async
# Inference INT8 model (IR)
!benchmark_app -m $compressed_model_xml -d CPU -api async
```
## Compare results on four pictures.
```
ie = Core()
# read and load float model
float_model = ie.read_model(
model=ir_model_xml, weights=ir_model_bin
)
float_compiled_model = ie.compile_model(model=float_model, device_name="CPU")
# read and load quantized model
quantized_model = ie.read_model(
model=compressed_model_xml, weights=compressed_model_bin
)
quantized_compiled_model = ie.compile_model(model=quantized_model, device_name="CPU")
# define all possible labels from CIFAR10
labels_names = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
all_pictures = []
all_labels = []
# get all pictures and their labels
for i, batch in enumerate(data_loader):
all_pictures.append(batch[1])
all_labels.append(batch[0][1])
def plot_pictures(indexes: list, all_pictures=all_pictures, all_labels=all_labels):
"""Plot 4 pictures.
:param indexes: a list of indexes of pictures to be displayed.
:param all_batches: batches with pictures.
"""
images, labels = [], []
num_pics = len(indexes)
assert num_pics == 4, f'No enough indexes for pictures to be displayed, got {num_pics}'
for idx in indexes:
assert idx < 10000, 'Cannot get such index, there are only 10000'
pic = np.rollaxis(all_pictures[idx].squeeze(), 0, 3)
images.append(pic)
labels.append(labels_names[all_labels[idx]])
f, axarr = plt.subplots(1, 4)
axarr[0].imshow(images[0])
axarr[0].set_title(labels[0])
axarr[1].imshow(images[1])
axarr[1].set_title(labels[1])
axarr[2].imshow(images[2])
axarr[2].set_title(labels[2])
axarr[3].imshow(images[3])
axarr[3].set_title(labels[3])
def infer_on_pictures(model, indexes: list, all_pictures=all_pictures):
""" Inference model on a few pictures.
:param net: model on which do inference
:param indexes: list of indexes
"""
predicted_labels = []
request = model.create_infer_request()
for idx in indexes:
assert idx < 10000, 'Cannot get such index, there are only 10000'
request.infer(inputs={'input.1': all_pictures[idx][None,]})
result = request.get_output_tensor(0).data
result = labels_names[np.argmax(result[0])]
predicted_labels.append(result)
return predicted_labels
indexes_to_infer = [7, 12, 15, 20] # to plot specify 4 indexes
plot_pictures(indexes_to_infer)
results_float = infer_on_pictures(float_compiled_model, indexes_to_infer)
results_quanized = infer_on_pictures(quantized_compiled_model, indexes_to_infer)
print(f"Labels for picture from float model : {results_float}.")
print(f"Labels for picture from quantized model : {results_quanized}.")
```
| github_jupyter |
# Using PLIO to analyze control networks
PLIO is a general purpose library for reading data from various sources. In this workshop, we will be using PLIO's ability to read ISIS control networks into a Pandas dataframe.
```
# PLIO uses pysis for some other things. We don't technically need this but it avoids a warning.
import os
os.environ['ISISROOT'] = '/usgs/cpkgs/anaconda3_linux/envs/isis4.3.0'
os.environ['ISISDATA'] = '/usgs/cpkgs/isis3/isis_data'
# 3D plotting toolkit for matplotlib
from mpl_toolkits.mplot3d import Axes3D
# Numerical Python library
import numpy as np
```
# Our networks
All of this data was generously provided by Lynn Weller and Mike Bland from their Europa control project.
The first network is a very rough starting point. The Galileo images of Europa were put through the [findfeatures](https://isis.astrogeology.usgs.gov/Application/presentation/Tabbed/findfeatures/findfeatures.html) application and then all of the resulting networks were merged together. This network has many known issues including islands, massive residuals, and poor coverage.
The second network is the final network containing Galileo and Voyager images of Europa. The issues from the initial network have been resolved and the final point cloud covers the majority of the body.
```
galileo_net = '/scratch/jmapel/europa/networks/GLL_FFCombined_thin_SubReg2_Del_2.net'
final_net = '/scratch/jmapel/europa/networks/GalileoVoyager_Europa_Merged_2020_CilixFree.net'
```
# The control network dataframe
PLIO directly ingests the data from the control network file. Each row in the dataframe is a single control measure and each column is a field from the protobuf control network. The data for control points is stored implicitly in its measures.
```
# This function is what reads a control network file
from plio.io.io_controlnetwork import from_isis
galileo_df = from_isis(galileo_net)
galileo_df.describe()
```
### Exercise: How many measures are there in the network? How many points are there in the network? How many images are there in the network?
tip: use len(dataframe) to find the number of rows in a dataframe
tip: use dataframe["columnName"].nunique() to find the number of unique values in a column
## Data types
The different columns of our dataframe store different types of data. The cell below shows all of the the data types in the dataframe. You can see all of the different possible datatypes for a dataframe in the [pandas docs](https://pandas.pydata.org/pandas-docs/stable/user_guide/basics.html#basics-dtypes).
```
galileo_df.dtypes
```
Most of the data types are straightforward. For example, the line and sample are 64-bit floats. Let's dig into the more unusual types.
**pointType, measureType, aprioriSurfPointSource, and aprioriRadiusSource** are 64 bit integers, but those integers correspond to enumerations. For example, a pointType of 2 means Free. See the tables below for all of the enumerations
```
galileo_df[['pointType', 'measureType', 'aprioriSurfPointSource']].head()
```
<center>**pointType**</center>
| Value | Name |
| ----: | :---------------- |
| 0 | Tie (obsolete) |
| 1 | Ground (obsolete) |
| 2 | Free |
| 3 | Constrained |
| 4 | Fixed |
<center>**measureType**</center>
| Value | Name |
| ----: | :----------------- |
| 0 | Candidate |
| 1 | Manual |
| 2 | RegisteredPixel |
| 3 | RegisteredSubPixel |
<center>**aprioriSurfPointSource & aprioriRadiusSource **</center>
| Value | Name |
| ----: | :---------------- |
| 0 | None |
| 1 | User |
| 2 | AverageOfMeasures |
| 3 | Reference |
| 4 | Ellipsoid |
| 5 | DEM |
| 6 | Basemap |
| 7 | BundleSolution |
### Exercise: Have any measure in this network been sub-pixel registered?
tip: look at the measure types
**id, pointChoosername, pointDatetime, aprioriSurfPointSourceFile, aprioriRadiusSourceFile, serialnumber, measureChoosername, and measureDatetime** are all listed as objects but are simply strings.
```
galileo_df[['id', 'serialnumber', 'pointChoosername', 'pointDatetime', 'measureChoosername', 'measureDatetime']].head()
```
**adjustedCovar, pointLog, and measureLog** are more complicated. We will go over adjustedCovar later with the final Euroap network. pointLog is leftover from older network formats and can be ignored. measureLog contains information about the registration of the measure.
```
galileo_df.loc[1,'measureLog']
```
## Data availability
Depending on how your network was generated and what processing has been done, many fields will not be set. If a numerical field has a value of 0, then it has not been set. For example, our network has not been bundle adjusted, so there are only a priori ground points.
```
galileo_df[['aprioriX', 'aprioriY', 'aprioriZ', 'adjustedX', 'adjustedY', 'adjustedZ']].describe()
```
### Exercise: Can you find all of the fields that are completely unset in our control network?
tip: numerical fields default to 0, strings default to an empty string "", and boolean values default to False.
You can also check which columns are default programmaticaly. The following cell checks if all of the values in a column are a default value.
```
(galileo_df==0).all() | (galileo_df=="").all() | (galileo_df==False).all()
```
# Looking at a bundle adjusted control network
Our Galileo network is interesting but networks have significantly more useful information in them after bundle adjustment. So, let's take a look at the final Europa network.
```
final_net_df = from_isis(final_net)
final_net_df.describe()
```
### Exercise: What fields are set in the bundle adjusted network that weren't previously?
## Analyzing the measures
The data in a control network dataframe is not always in the format we want to work with. The measure residuals are broken down into the line and sample residuals. The following cell computes the full magnitude of the residuals and adds it to the dataframe under the "residualMag" column.
```
final_net_df['residualMag'] = np.sqrt(final_net_df['sampleResidual']**2 + final_net_df['lineResidual']**2)
```
Now let's plot the residuals and see if we can form any theories. The next cell imports matplotlib for plotting tools and then plots the residuals in terms of sample and line residual. Note that the color of points is based on the residual magnitude, whcih should give a nice bullseye effect.
```
# This allows us to interact with our plots. This must be set before importing pyplot
%matplotlib notebook
# General plotting library
import matplotlib
import matplotlib.pyplot as plt
resid_fig = plt.figure(figsize=(6, 6))
resid_ax = resid_fig.add_subplot(111)
resid_scatter = resid_ax.scatter(final_net_df['sampleResidual'], final_net_df['lineResidual'], c=final_net_df['residualMag'], marker='+')
resid_ax.set_aspect('equal')
plt.axhline(0, color='black')
plt.axvline(0, color='black')
resid_cbar = plt.colorbar(resid_scatter)
resid_fig.suptitle('Bundle Adjusted Measure Residuals')
resid_ax.set_xlabel('Sample Residual')
resid_ax.set_ylabel('Line Residual')
resid_cbar.set_label('Residual Magnitude')
plt.show()
```
We can also color our points based on other properties. Let's try and separate the measures out by mission. The serial numbers should help us so let's look at the serial numbers for all of our images.
```
final_net_df['serialnumber'].unique()
```
Each serial number starts with the mission name, which makes separating them out easy. All we need to do is check if the beginning of the serial number matches our mission.
The pd.DataFrame.str package allows us to do this type of string comparisons quickly and easily. Here we will use the DataFrame.str.startswith method.
```
final_galileo_df = final_net_df[final_net_df['serialnumber'].str.startswith('Galileo')]
final_voyager1_df = final_net_df[final_net_df['serialnumber'].str.startswith('Voyager1')]
final_voyager2_df = final_net_df[final_net_df['serialnumber'].str.startswith('Voyager2')]
```
Now let's plot the measures and color them based on their mission.
```
inst_resid_fig = plt.figure(figsize=(6, 6))
inst_resid_ax = inst_resid_fig.add_subplot(111)
inst_resid_ax.scatter(final_galileo_df['sampleResidual'], final_galileo_df['lineResidual'], color='Green', marker='+', alpha=0.25, label='Galileo')
inst_resid_ax.scatter(final_voyager1_df['sampleResidual'], final_voyager1_df['lineResidual'], color='Red', marker='+', alpha=0.25, label='Voyager1')
inst_resid_ax.scatter(final_voyager2_df['sampleResidual'], final_voyager2_df['lineResidual'], color='Blue', marker='+', alpha=0.25, label='Voyager2')
inst_resid_ax.set_aspect('equal')
plt.axhline(0, color='black')
plt.axvline(0, color='black')
plt.legend()
inst_resid_fig.suptitle('Bundle Adjusted Measure Residuals by Mission')
inst_resid_ax.set_xlabel('Sample Residual')
inst_resid_ax.set_ylabel('Line Residual')
plt.show()
```
### What can you say about the residuals for the different missions based on our plot?
### Exercise: What the descriptive statistics for the residual magnitude of the Galileo measures? What about for Voyager 1 and Voyager 2?
```
final_galileo_df['residualMag'].describe()
final_voyager1_df['residualMag'].describe()
final_voyager2_df['residualMag'].describe()
```
### Do you notice anything interesting about the residual magnitudes for the different instruments? How does this compare to what you noticed with the scatter plot?
We can even test if the measure residuals are normally distributed. The following cell performs a chi-squared test to see if the residual magnitudes could reasonably come from a normal distribution. This is important because it will tell us if we have large blunders in our network or systematic error from something like a bad sensor model.
```
# Statistics library
from scipy import stats
alpha = 1e-3 # 99.999% confidence
_, normal_test_result = stats.normaltest(final_voyager1_df['residualMag'])
print(f'Chi-squared test statistic: {normal_test_result}')
if (normal_test_result < alpha):
print("The residuals are normally distributed")
else:
print("The residuals may not be normally distributed")
```
## Analyzing the points
The information for control points is duplicated for each measure they have. So, the first step in looking at control point data is to extract only the data we want from the dataframe. This will make the dataframe easier to read and it will make things run quicker.
To do this, we're going to first extract all of the columns with point data. Then, we're going extract the first measure from each point. After all is said and done, we will have a dataframe with columns related to the point info and only one row for each point.
```
point_columns = ['id',
'pointType',
'pointChoosername',
'pointDatetime',
'pointEditLock',
'pointIgnore',
'pointJigsawRejected',
'aprioriSurfPointSource',
'aprioriSurfPointSourceFile',
'aprioriRadiusSource',
'aprioriRadiusSourceFile',
'latitudeConstrained',
'longitudeConstrained',
'radiusConstrained',
'aprioriX',
'aprioriY',
'aprioriZ',
'aprioriCovar',
'adjustedX',
'adjustedY',
'adjustedZ',
'adjustedCovar',
'pointLog']
final_points_df = final_net_df[point_columns].drop_duplicates('id')
final_points_df.describe()
```
Next, we're going to transform the point data so that it's more useful to us. This cell will take the (X, Y, Z) adjusted ground points and convert them to (lat, lon, radius) using a library called pyproj. pyproj is a very powerful projections library and can do many cartofraphic transformations and projections.
**Note: This cell will generate a warning because we are using old pyproj.Proj calls which will eventually need to change. For now we can ignore the warning.**
```
# Projection library for switching between rectangular and latitudinal
os.environ['PROJ_LIB'] = '/usgs/cpkgs/anaconda3_linux/envs/autocnet/share/proj'
import pyproj
# Compute the lat/lon/alt
europa_radii = [1562600, 1560300, 1559500]
ecef = pyproj.Proj(proj='geocent', a=europa_radii[0], b=europa_radii[1], c=europa_radii[2])
lla = pyproj.Proj(proj='latlong', a=europa_radii[0], b=europa_radii[1], c=europa_radii[2])
lon, lat, alt = pyproj.transform(ecef, lla, final_points_df['adjustedX'].values, final_points_df['adjustedY'].values, final_points_df['adjustedZ'].values, radians=True)
# Store the data in the dataframe
final_points_df['latitude'] = lat
final_points_df['longitude'] = lon
final_points_df['altitude'] = alt
# We will also want the point radii
final_points_df['radius'] = np.sqrt(final_points_df['adjustedX']**2 + final_points_df['adjustedY']**2 + final_points_df['adjustedZ']**2)
```
Because of how we defined our projection, the latitude and longitude values will be in radians. Also, the longitude will be in 180 postiive East. You can change this by modifying how you use pyproj but that is outside of this workshop.
```
final_points_df[["latitude", "longitude", "altitude", "radius"]].describe()
```
### Exercise: Convert the latitude and longitude from radians to degrees:
Similar to how we computed the residual magnitude, we want to compute the average residual magnitude for each point. The following cell goes back to our original dataframe, computes the mean point by point, and then saves all of the results in our new dataframe.
**Note: This cell can take a while to run because it has to re-access the dataframe for every point**
```
final_points_df["averageResidual"] = 0
for point_id, group in final_net_df.groupby('id'):
final_points_df.loc[final_points_df.id == point_id, "averageResidual"] = group['residualMag'].mean()
```
### Exercise: What is the 95th percentile for the average residuals?
## Plotting the points
Now that we have latitudes and longitudes for each point, we can generate some simple plots to look at them.
```
point_map = plt.figure(figsize=(10, 10))
point_ax = point_map.add_subplot(111)
point_ax.scatter(final_points_df["longitude"], final_points_df["latitude"], marker='+')
point_map.suptitle('Control Points')
point_ax.set_xlabel('Longitude')
point_ax.set_ylabel('Latitude')
plt.show()
```
It can also be helpful to color the points based on different values. The following cell draws the same plot but colors each point based on its average residual. Because the residuals are not uniformly distributed we also apply a lograithmic scale to the colors that you can see in the colorbar.
```
point_resid_map = plt.figure(figsize=(10, 10))
point_resid_ax = point_resid_map.add_subplot(111)
point_resid_norm = matplotlib.colors.LogNorm(vmax=final_points_df["averageResidual"].max())
point_resid_scatter = point_resid_ax.scatter(final_points_df["longitude"], final_points_df["latitude"], c=final_points_df["averageResidual"], alpha=0.5, norm=point_resid_norm, marker='+', cmap=plt.get_cmap('plasma'))
point_resid_cbar = plt.colorbar(point_resid_scatter)
point_resid_map.suptitle('Control Points')
point_resid_ax.set_xlabel('Longitude')
point_resid_ax.set_ylabel('Latitude')
point_resid_cbar.set_label('Average Residual Magnitude')
plt.show()
```
Plotting individual points can be helpful getting a general idea for the distribution of the points, but it can be hard to interpret the data in area where there are many points all ontop of each other. So, let's combine near by points and determine the residual based on the region.
To do this, we're going to bin the points into a regular grid across the latitude and longitude and then compute the mean within each bin.
**Try changing the grid_step value and re-running the two cells**
```
grid_step = 10
final_points_df['lonBin'] = final_points_df['longitude'].apply(lambda x: [e for e in range(-180, 180, grid_step) if e <= x][-1])
final_points_df['latBin'] = final_points_df['latitude'].apply(lambda x: [e for e in range(-90, 90, grid_step) if e <= x][-1])
avg_resid_binned = final_points_df.groupby(['lonBin', 'latBin'])['averageResidual'].mean()
filled_data = []
for lon_bin in range(-180, 180, grid_step):
for lat_bin in range(-90, 90, grid_step):
try:
filled_data.append(avg_resid_binned.loc[lon_bin, lat_bin])
except:
filled_data.append(0)
filled_data = np.array(filled_data).reshape((int(360/grid_step), int(180/grid_step))).T
avg_gridded = plt.figure(figsize=(10, 5))
avg_gridded_ax = avg_gridded.add_subplot(111)
avg_gridded_plot = avg_gridded_ax.imshow(filled_data, origin='lower', extent= [-180, 180, -90, 90], cmap=plt.get_cmap('plasma'))
avg_gridded_ax.scatter(final_points_df["longitude"], final_points_df["latitude"], color='black', marker='+', alpha=0.1)
avg_gridded_cbar = plt.colorbar(avg_gridded_plot)
avg_gridded.suptitle('Average Residual by lat/lon grid')
avg_gridded_ax.set_xlabel('Longitude')
avg_gridded_ax.set_ylabel('Latitude')
avg_gridded_cbar.set_label('Average Residual Magnitude')
plt.show()
```
## 3D Plotting
2D plotting either requires these simple equal area projections or converting to another projection via pyproj. Instead, let's look at our data in true 3D.
The following cell plots the same data as before but plots it in 3d instead of just a 2d projection
```
resid_fig_3d = plt.figure(figsize=(10, 10))
resid_ax_3d = resid_fig_3d.add_subplot(111, projection='3d')
resid_plot_3d = resid_ax_3d.scatter(final_points_df['adjustedX'], final_points_df['adjustedY'], final_points_df['adjustedZ'], c=final_points_df["averageResidual"], alpha=0.5, norm=point_resid_norm, marker='+', cmap=plt.get_cmap('plasma'))
resid_cbar_3d = plt.colorbar(resid_plot_3d)
resid_fig_3d.suptitle('3D Control Points')
resid_cbar_3d.set_label('Average Residual Magnitude (pix)')
plt.show()
```
| github_jupyter |
```
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import pytz
columns = ['Capture_time', 'Id']
data = pd.read_csv('evo_data_menor.csv', usecols=columns, nrows=500000)
data.head()
print(datetime.datetime.now())
# Colleting vehicle ids
car_ids = list(data.Id.unique())
print(datetime.datetime.now())
# Removing uncommon ids
# Ex: 4c5865a3-4b03-40f6-a3a8-d4e94aae3b17
car_ids = [id for id in car_ids if id.find('-') == -1]
def str_to_datetime(df_time):
"""
Reformatando de string para datetime.
Parameters
----------
df_time : pandas.DataFrame, string
Dataframe com strings a serem convertidas para datetime.
Returns
----------
date_list : pandas.DataFrame, datetime
Dataframe com valores em datetime para possíveis fusos de Vancouver.
"""
date_list = []
# Formatos de fuso horário comum de Vancouver e
# fuso horário característico de horário de verão
format_string = ['%Y-%m-%d %H:%M:%S.%f-08:00', '%Y-%m-%d %H:%M:%S.%f-07:00',
'%Y-%m-%d %H:%M:%S-08:00', '%Y-%m-%d %H:%M:%S-07:00']
for date in df_time:
for fmt in format_string:
try:
date_list.append(datetime.datetime.strptime(str(date), fmt))
break
except:
pass
return pd.DataFrame(date_list)
data['Capture_time'] = str_to_datetime(data['Capture_time'])
data.head()
parked = 0
andando_weekdays = []
andando_weekends = []
data = data.sort_index(by='Capture_time')
data.index = range(len(data))
print(datetime.datetime.now())
# Percorre todo o dataframe para verificar quantos carros estão andando em dado minuto
for i in range(1, len(data)):
start_time_atual = int(data['Capture_time'].iloc[i].timestamp())
start_time_anterior = int(data['Capture_time'].iloc[i-1].timestamp())
# Enquanto está no mesmo minuto, é analisado se o carro está parado
if (start_time_atual == start_time_anterior):
parked += 1
else:
# Carros viajando são dados pelo total de carros da frota menos os que estão atualmente estacionados
in_travel = len(car_ids) - parked
porcentagem = (in_travel/len(car_ids))*100
# Verifica que a data está entre segunda(1) e sexta(5)
if (int(datetime.datetime.fromtimestamp(start_time_anterior).strftime('%w')) > 0 and
int(datetime.datetime.fromtimestamp(start_time_anterior).strftime('%w')) < 6):
andando_weekdays.append([start_time_anterior, in_travel, porcentagem])
else:
andando_weekends.append([start_time_anterior, in_travel, porcentagem])
parked = 0
print(datetime.datetime.now())
dfIn_Travel_weekdays = pd.DataFrame(andando_weekdays, columns=['capture_time', 'total_in_travel', 'percentage'])
dfIn_Travel_weekends = pd.DataFrame(andando_weekends, columns=['capture_time', 'total_in_travel', 'percentage'])
dfIn_Travel_weekdays.head()
def from_timestamp_list(timestamp_list):
datetime_list = []
for date in timestamp_list:
datetime_list.append(datetime.datetime.fromtimestamp(int(date)))
return pd.DataFrame(datetime_list)
# Formatando os dados de unix timestamp para datetime
dfWeekdays = dfIn_Travel_weekdays
dfWeekdays['capture_time'] = from_timestamp_list(dfWeekdays['capture_time'])
dfWeekends = dfIn_Travel_weekends
dfWeekends['capture_time'] = from_timestamp_list(dfWeekends['capture_time'])
# Plot da porcentagem de carros alocados em dias de semana
plt.plot(dfWeekdays['capture_time'],dfWeekdays['percentage'])
plt.gcf().autofmt_xdate()
plt.show()
# Plot da porcentagem de carros alocados em dias de final de semana
plt.plot(dfWeekends['capture_time'],dfWeekends['percentage'])
plt.gcf().autofmt_xdate()
plt.show()
dfWeekends.to_csv('weekends.csv', index=False, encoding='utf-8')
dfWeekdays.to_csv('weekdays.csv', index=False, encoding='utf-8')
dfWeekdays = pd.read_csv('plots/weekdays.csv')
dfWeekends = pd.read_csv('plots/weekends.csv')
# Debug
dfWeekdays.capture_time = pd.to_datetime(dfWeekdays.capture_time)
dfWeekdays['minute'] = dfWeekdays.capture_time.dt.minute
dfWeekdays['hour'] = dfWeekdays.capture_time.dt.hour
# Outlier importante de ser verificado
dfWeekdays[(dfWeekdays.hour == 10) & (dfWeekdays.minute == 32)]
dfWeekdays['capture_time'] = pd.to_datetime(dfWeekdays['capture_time'])
dfWeekends['capture_time'] = pd.to_datetime(dfWeekends['capture_time'])
def media(df):
"""
Faz a media das porcentagens minuto a minuto de todo o dataset.
Parameters
------------
df : Pandas dataframe
Dados a serem analisados, com uma coluna dos horários e outra com as porcentagens.
Returns
----------
media : Pandas dataframe
Dados com a média das porcentagens para 24 horas.
"""
minute = []
# Criando um vetor que irá sinalizar a quantidade de minutos corridos até tal registro
for i in range(len(df)):
capture_time = df['capture_time'].iloc[i]
minute.append(capture_time.minute + (capture_time.hour * 60))
# Ordenando o dataset por minutos corridos para facilitar a soma de valores
df['minute'] = minute
df = df.sort_values(by=['minute', 'capture_time'])
valores = pd.DataFrame()
media = []
for i in range(1,len(df)):
minute_atual = df['minute'].iloc[i-1]
minute_proximo = df['minute'].iloc[i]
# Enquanto está no mesmo valor de minutos corridos os valores percentuais
# são armazenados para ser calculada a média de tal minuto no intervalo de 24h
if (minute_proximo == minute_atual):
valores = valores.append([df['percentage'].iloc[i-1]])
else:
valores = valores.append([df['percentage'].iloc[i-1]])
media.append([df['capture_time'].iloc[i-1].strftime('%H:%M'),
float(valores.mean()), float(valores.std())])
valores = pd.DataFrame()
media = pd.DataFrame(media, columns=['time', 'mean', 'std'])
return media
# Fazendo a média das porcentagens de cada dia
dfWeekdays = dfWeekdays.sort_values(by='capture_time')
mediaWeekdays = media(dfWeekdays)
dfWeekends = dfWeekends.sort_values(by='capture_time')
mediaWeekends = media(dfWeekends)
mediaWeekdays.to_csv('mediaWeekdays.csv', index=False, encoding='utf-8')
mediaWeekends.to_csv('mediaWeekends.csv', index=False, encoding='utf-8')
mediaWeekdays = pd.read_csv('plots/mediaWeekdays.csv')
mediaWeekends = pd.read_csv('plots/mediaWeekends.csv')
import numpy as np
# Plot da media das porcentagens dos dias de semana
fig, ax = plt.subplots()
# Curva dos carros andando
ax.plot(range(len(mediaWeekdays['time'])),mediaWeekdays['mean'], label='Carros Ocupados')
# Curvas representando o intervalo de desvio padrão
ax.plot(range(len(mediaWeekdays['time'])), mediaWeekdays['mean']+mediaWeekdays['std'], alpha=150, c='gray', label='Desvio Padrão')
ax.plot(range(len(mediaWeekdays['time'])), mediaWeekdays['mean']-mediaWeekdays['std'], alpha=150, c='gray')
# Modificando os labels das horas
ax.xaxis.set_ticks(np.arange(0, 1441, 120))
fig.canvas.draw()
labels = [item.get_text() for item in ax.get_xticklabels()]
labels = range(0,26,2)
ax.set_xticklabels(labels)
# Legendas e label dos eixos
plt.legend(bbox_to_anchor=(0.01, 0.99), loc=2, borderaxespad=0.2)
plt.ylabel('Percentual')
plt.xlabel('Horário')
# Salvando o plot
# plt.savefig('Weekdays_v2.pdf', bbox_inches='tight')
plt.show()
import numpy as np
# Plot da media das porcentagens dos dias de semana
fig, ax = plt.subplots()
# Curva dos carros andando
ax.plot(range(len(mediaWeekends['time'])),mediaWeekends['mean'], label='Carros Reservados')
# Curvas representando o intervalo de desvio padrão
ax.plot(range(len(mediaWeekends['time'])), mediaWeekends['mean']+mediaWeekends['std'], alpha=150, c='gray', label='Desvio Padrão')
ax.plot(range(len(mediaWeekends['time'])), mediaWeekends['mean']-mediaWeekends['std'], alpha=150, c='gray')
# Modificando os labels das horas
ax.xaxis.set_ticks(np.arange(0, 1441, 120))
fig.canvas.draw()
labels = [item.get_text() for item in ax.get_xticklabels()]
labels = range(0,26,2)
ax.set_xticklabels(labels)
# Legendas e label dos eixos
plt.legend(bbox_to_anchor=(0.01, 0.99), loc=2, borderaxespad=0.2)
plt.ylabel('Percentual')
plt.xlabel('Horário')
# Salvando o plot
# plt.savefig('Weekends_v2.pdf', bbox_inches='tight')
plt.show()
# CSV criado a partir dos dados coletados do arquivo ModoApi_Data_Filter
dfTravels = pd.read_csv('travels.csv')
dfTravels['Start_time'] = str_to_datetime(dfTravels['Start_time'])
dfTravels['End_time'] = str_to_datetime(dfTravels['End_time'])
# A função deve receber os valores previamente separados como somente dias de semana ou finais de semana
def cont_reservas(dfDays):
# Coletando todos os minutos de captura
datas = pd.to_datetime(dfDays['capture_time'])
datas = pd.DataFrame(datas)
dfReservas = pd.concat([dfTravels['Start_time'], dfTravels['End_time']], axis=1)
dfReservas = dfReservas.sort_values(by='Start_time')
# Outlier que está gerando comparações erroneas
dfReservas.drop(65240, axis=0, inplace=True)
cont_reservas = 0
reservas = []
# Auxiliar para adquirir o indice da viagem mais proxima que engloba a hora atual
proximo_start = 0
for i in range(len(datas)):
data = datas['capture_time'].iloc[i]
# Auxiliar para evitar analises desnecessárias
# start_test = True
# Comparando todas as datas aos intervalos das reservas, e vendo se ele faz parte para somar a porcentagem
for j in range(proximo_start, len(dfReservas)):
if (j == 289): continue
if (dfReservas['Start_time'].iloc[j] <= data <= dfReservas['End_time'].iloc[j]):
cont_reservas += 1
# Evita comparações desnecessárias com viagens que terminaram antes da hora a ser analisada
# Seguindo a ideia de que se a viagem não englobou antes a hora atual
# ela não irá englobar as próximas
# if (start_test) :
# if (proximo_start > 0): proximo_start = j - 1
# else: proximo_start = j
# start_test = False
# Evita analisar viagens que começaram depois da hora atual
if (dfReservas['Start_time'].iloc[j] > data):
break
porcentagem = (cont_reservas/len(car_ids))*100
reservas.append([data, cont_reservas, porcentagem])
cont_reservas = 0
if (i % 100 == 0): print(str(i) + " "+str(proximo_start))
reservas = pd.DataFrame(reservas, columns=['datetime', 'total_reserves', 'percentage'])
return reservas
dfR_Weekdays = cont_reservas(dfWeekdays)
dfR_Weekends = cont_reservas(dfWeekends)
dfR_Weekdays.to_csv('r_weekdays.csv', index=False, encoding='utf-8')
dfR_Weekends.to_csv('r_weekends.csv', index=False, encoding='utf-8')
dfR_Weekends = pd.read_csv('plots/r_weekends.csv')
dfR_Weekends['datetime']= pd.to_datetime(dfR_Weekends['datetime'])
dfR_Weekdays = pd.read_csv('plots/r_weekdays.csv')
dfR_Weekdays['datetime'] = pd.to_datetime(dfR_Weekdays['datetime'])
# Plot da porcentagem de carros alocados em fins de semana
plt.plot(dfR_Weekends['datetime'],dfR_Weekends['percentage'])
plt.gcf().autofmt_xdate()
plt.show()
# Plot da porcentagem de carros alocados em dias de semana
plt.plot(dfR_Weekdays['datetime'],dfR_Weekdays['percentage'])
plt.gcf().autofmt_xdate()
plt.show()
# Fazendo a média das porcentagens de cada dia
dfR_Weekdays = dfR_Weekdays.sort_values(by='datetime')
dfR_Weekdays['capture_time'] = dfR_Weekdays['datetime']
dfmediaR_Weekdays = media(dfR_Weekdays, 32)
dfR_Weekends = dfR_Weekends.sort_values(by='datetime')
dfR_Weekends['capture_time'] = dfR_Weekends['datetime']
dfmediaR_Weekends = media(dfR_Weekends, 20)
dfmediaR_Weekdays = pd.read_csv('plots/media_r_weekdays.csv', encoding='utf-8')
dfmediaR_Weekends = pd.read_csv('plots/media_r_weekends.csv', encoding='utf-8')
import matplotlib
import numpy as np
matplotlib.rc('font', size=12)
# Plot das porcentagens dos fins de semana
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(14,4.5)
# Curva dos carros andando
ax1.plot(range(len(mediaWeekdays['time'])),mediaWeekdays['mean'], label='Carros Ocupados')
# Curvas representando o intervalo de desvio padrão
ax1.plot(range(len(mediaWeekdays['time'])), mediaWeekdays['mean']+mediaWeekdays['std'], alpha=150, c='gray')
ax1.plot(range(len(mediaWeekdays['time'])), mediaWeekdays['mean']-mediaWeekdays['std'], alpha=150, c='gray')
# Curva dos carros reservados
ax1.plot(range(len(dfmediaR_Weekdays['time'])),dfmediaR_Weekdays['mean'], label='Carros Reservados', c='r', ls='--')
# Curvas representando o intervalo de desvio padrão
ax1.plot(range(len(dfmediaR_Weekdays['time'])), dfmediaR_Weekdays['mean']+dfmediaR_Weekdays['std'], alpha=150, c='#FA8072', ls='--')
ax1.plot(range(len(dfmediaR_Weekdays['time'])), dfmediaR_Weekdays['mean']-dfmediaR_Weekdays['std'], alpha=150, c='#FA8072', ls='--')
# Modificando os labels das horas e das porcentagens
ax1.xaxis.set_ticks(np.arange(0, 1441, 120))
ax1.yaxis.set_ticks(np.arange(0, 110, 10))
fig.canvas.draw()
labels = [item.get_text() for item in ax1.get_xticklabels()]
labels = range(0,26,2)
ax1.set_xticklabels(labels)
# Eixo y de 0 a 100%
ax1.set_ylim([0,100])
# Legendas e label dos eixos
ax1.legend(bbox_to_anchor=(0.01, 0.99), loc=2, borderaxespad=0.2)
ax1.set_ylabel('Percentual')
ax1.set_xlabel('Horário')
# # Curva dos carros andando
ax2.plot(range(len(mediaWeekends['time'])),mediaWeekends['mean'], label='Carros Ocupados')
# # Curvas representando o intervalo de desvio padrão
ax2.plot(range(len(mediaWeekends['time'])), mediaWeekends['mean']+mediaWeekends['std'], alpha=150, c='gray')
ax2.plot(range(len(mediaWeekends['time'])), mediaWeekends['mean']-mediaWeekends['std'], alpha=150, c='gray')
# # Curva dos carros reservados
ax2.plot(range(len(dfmediaR_Weekends['time'])),dfmediaR_Weekends['mean'], label='Carros Reservados', c='r', ls='--')
# # Curvas representando o intervalo de desvio padrão
ax2.plot(range(len(dfmediaR_Weekends['time'])), dfmediaR_Weekends['mean']+dfmediaR_Weekends['std'], alpha=150, c='#FA8072', ls='--')
ax2.plot(range(len(dfmediaR_Weekends['time'])), dfmediaR_Weekends['mean']-dfmediaR_Weekends['std'], alpha=150, c='#FA8072', ls='--')
# Modificando os labels das horas e das porcentagens
ax2.xaxis.set_ticks(np.arange(0, 1441, 120))
ax2.yaxis.set_ticks(np.arange(0, 110, 10))
fig.canvas.draw()
labels = [item.get_text() for item in ax2.get_xticklabels()]
labels = range(0,26,2)
ax2.set_xticklabels(labels)
# Eixo y de 0 a 100%
ax2.set_ylim([0,100])
# Legendas e label dos eixos
ax2.legend(bbox_to_anchor=(0.55, 0.99), loc=2, borderaxespad=0.1)
ax2.set_ylabel('Percentual')
ax2.set_xlabel('Horário')
plt.show()
#plt.savefig('plots/ViagensPorHoras_Evo.pdf')
```
| github_jupyter |
```
# import useful stuff
import pandas as pd
from sklearn.tree import DecisionTreeClassifier as Tree
import re
# avoid undefined metric warning when calculating precision with 0 labels defined as 1
import warnings
warnings.filterwarnings('ignore')
```
### Data transformations (from data analysis)
```
def transform(df, fillna=False):
# remove columns
for col in ['ult_fec_cli_1t', 'conyuemp', 'tipodom', 'cod_prov',
'pais_residencia', 'ncodpers', 'indrel', 'indrel_1mes',
'ind_empleado', 'fecha_alta', 'fecha_dato']:
del df[col]
# convert numerical vars to int
numerical_vars = ['age', 'antiguedad', 'renta']
df[numerical_vars] = df[numerical_vars].convert_objects(convert_numeric=True)
# convert S/N to boolean
for var in ['indfall', 'indresi', 'indext']:
df[var] = df[var] == 'S'
# drop na
if fillna:
df = df.fillna(value=0)
else:
df = df.dropna()
# one hot encode remaining categorical vars
categorical_vars = ['segmento', 'sexo', 'tiprel_1mes', 'canal_entrada', 'nomprov']
df = pd.get_dummies(df, prefix=None, prefix_sep='_', dummy_na=False,
columns=categorical_vars, sparse=False, drop_first=False)
# remove variables with one value, if any
for col in df.columns:
if len(df[col].value_counts()) == 1:
print(col)
del df[col]
return df
df_train = pd.read_csv('train_ver2.csv', nrows=2000000)
df_train = transform(df_train)
```
### First Shot at Prediction
After all required corvertions have been made, I can make a first shot at predicting. First question we need to ask is, what I'm a predicting?
I'm predicting comsuption of a certain product. I have a total of 24 booleans that will tell whether or not this customer consumed this product. These are my labels for a One vs All classification model.
```
# separate the labels
labels = []
for col in df_train.columns:
if col[:4] == 'ind_' and col[-4:] == 'ult1':
labels.append(col)
# create X and y delete dataframe
X = df_train[df_train.columns.difference(labels)]
y = df_train[labels]
del df_train
# upload test data
X_test = pd.read_csv('test_ver2.csv')
# initialize results
report = pd.DataFrame(X_test['ncodpers'])
classif_results = {}
# prepare test data for classifer
X_test = transform(X_test, fillna=True)
# X_test should only have columns that are also in X (needed due to one-hot encoding)
paired_columns = [col for col in X_test.columns if col in X.columns]
X_test = X_test[paired_columns]
# predict each product with a different clssifer
for label in labels:
if len(y[label].value_counts()) != 1:
clf = Tree()
clf.fit(X, y[label])
classif_results[label] = clf.predict(X_test)
# clean memory
del X
del y
del X_test
# transform results to expected output
fn_name_labels = lambda label, pred: list(map(lambda x: label if x else '', pred))
cf_list = [fn_name_labels(k,v) for k,v in classif_results.items()]
# concatenate results
fn_join_columns = lambda x:re.sub('\s+', ' ', ' '.join(x)).strip()
# add new column added products in report
report['added_products'] = list(map(fn_join_columns, zip(*cf_list)))
report.ix[0, 'added_products']
report.to_csv('round1.csv', header=True, index=False)
```
| github_jupyter |
# Activation Functions
This function introduces activation functions in TensorFlow
We start by loading the necessary libraries for this script.
```
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
# from tensorflow.python.framework import ops
# ops.reset_default_graph()
tf.reset_default_graph()
```
### Start a graph session
```
sess = tf.Session()
```
### Initialize the X range values for plotting
```
x_vals = np.linspace(start=-10., stop=10., num=100)
```
### Activation Functions:
ReLU activation
```
print(sess.run(tf.nn.relu([-3., 3., 10.])))
y_relu = sess.run(tf.nn.relu(x_vals))
```
ReLU-6 activation
```
print(sess.run(tf.nn.relu6([-3., 3., 10.])))
y_relu6 = sess.run(tf.nn.relu6(x_vals))
```
ReLU-6 refers to the following function
\begin{equation}
\min\left(\max(0, x), 6\right)
\end{equation}
Sigmoid activation
```
print(sess.run(tf.nn.sigmoid([-1., 0., 1.])))
y_sigmoid = sess.run(tf.nn.sigmoid(x_vals))
```
Hyper Tangent activation
```
print(sess.run(tf.nn.tanh([-1., 0., 1.])))
y_tanh = sess.run(tf.nn.tanh(x_vals))
```
Softsign activation
```
print(sess.run(tf.nn.softsign([-1., 0., 1.])))
y_softsign = sess.run(tf.nn.softsign(x_vals))
```
softsign refers to the following function
\begin{equation}
\frac{x}{1 + |x|}
\end{equation}
<br>
<img src="http://tecmemo.wpblog.jp/wp-content/uploads/2017/01/activation_04.png" width=40%>
Softplus activation

```
print(sess.run(tf.nn.softplus([-1., 0., 1.])))
y_softplus = sess.run(tf.nn.softplus(x_vals))
```
Softplus refers to the following function
\begin{equation}
\log\left(\exp(x) + 1\right)
\end{equation}
Exponential linear activation
```
print(sess.run(tf.nn.elu([-1., 0., 1.])))
y_elu = sess.run(tf.nn.elu(x_vals))
```
ELU refers to the following function
\begin{equation}\label{eq:}
f = \begin{cases}
\exp(x) - 1 &(x < 0 )\\
0 &(x \geq 0 )\\
\end{cases}
\end{equation}
### Plot the different functions
```
plt.style.use('ggplot')
plt.plot(x_vals, y_softplus, 'r--', label='Softplus', linewidth=2)
plt.plot(x_vals, y_relu, 'b:', label='ReLU', linewidth=2)
plt.plot(x_vals, y_relu6, 'g-.', label='ReLU6', linewidth=2)
plt.plot(x_vals, y_elu, 'k-', label='ExpLU', linewidth=0.5)
plt.ylim([-1.5,7])
plt.legend(loc='upper left', shadow=True, edgecolor='k')
plt.show()
plt.plot(x_vals, y_sigmoid, 'r--', label='Sigmoid', linewidth=2)
plt.plot(x_vals, y_tanh, 'b:', label='Tanh', linewidth=2)
plt.plot(x_vals, y_softsign, 'g-.', label='Softsign', linewidth=2)
plt.ylim([-1.3,1.3])
plt.legend(loc='upper left', shadow=True, edgecolor='k')
plt.show()
```


| github_jupyter |
# The Penniless Pilgrim
I came across this TED Ed video with a riddle (by [Dan Finkel](https://mathforlove.com/who-am-i/dan-finkel/)) on YouTube:
```
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/6sBB-gRhfjE" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
```
It's simple enough: you're a traveller, without a penny to your name. You enter a town with a simple grid-based street layout, like this:
```
import networkx as nx
from matplotlib import pyplot as plt
import sys
AA = 'ABCDE'
BB = '12345'
def make_graph():
g = nx.Graph()
g.add_nodes_from(a + b for a in AA for b in BB)
g.add_edges_from(((a+b1, a+b2) for a in AA for (b1, b2) in zip(BB[:-1], BB[1:])), direction='EW')
g.add_edges_from(((a1+b, a2+b) for (a1, a2) in zip(AA[:-1], AA[1:]) for b in BB), direction='NS')
return g
g = make_graph()
node_positions = {a+b: (j, len(AA) - i) for (i, a) in enumerate('ABCDE') for (j,b) in enumerate('12345')}
def draw_graph(g):
plt.figure(figsize=(4,4))
nx.draw(g, pos=node_positions,
edge_color=['r' if g[u][v].get('trodden') else 'grey' for (u,v) in g.edges],
width=3,
node_size=100,
node_color=['b' if n in ('A1', 'A3', 'E5') else 'k' for n in g.nodes])
g['A1']['A2']['trodden'] = True
g['A2']['A3']['trodden'] = True
draw_graph(g)
```
You enter the town at the north-west gate, and walk two blocks to the tourist information office. Your goal is to reach the temple at the far (south-east) corner of town. At the tourist information, you learn that the town has a curious system of tolls levied on all trips along the town's streets:
* Your trip through town is taxed based on the route you take.
* You are not allowed to use the same street twice in a trip, but you *are* allowed to cross an intersection multiple times.
* Walking one block from west to east increases your tax bill by 2 silver.
* Walking one from east to west decreases your bill by 2.
* Walking one from north to south doubles you tax bill.
* Walking one from south to north halves your tax bill.
As you've already walked to tourist information, two blocks east of the gate, you currently owe 4 silver. You want to get to the temple, and **you have no money**. Can you get to the temple without ending up in debtors' prison?
One of the more direct routes, going due south and turning east at the southern wall, would cost 68 silver. A lot more than you have!
I must admit that I didn't spend a lot of time trying to figure out a solution before giving up and watching the rest of the video, which gives a nice and elegant path that end up costing you nothing.
### Python to the rescue
After the fact, I couldn't help but wonder if there are other free routes to the temple. If so, how many? Are there any on which you *make* money?
Thankfully, this is fairly easy to brute force on a capable computer.
If we describe the town layout as a graph `g` using (way overpowered) `networkx`, edges being streets and nodes, labelled chessboard-style, being intersections, we mark the paths we've already taken as *“trodden”*
```
g['A1']['A2']['trodden'] = True
g['A2']['A3']['trodden'] = True
```
and without too much effort we can figure out where we *could* go next from our current position, and what that would cost us. Add a bit of housekeeping to produce a new graph for every route with the trodden streets properly marked,
```
def possible_steps(g, pos, cost):
for dest, props in g[pos].items():
if not props.get('trodden'):
if props['direction'] == 'NS':
new_cost = cost * 2 if dest[0] > pos[0] else cost / 2
else:
new_cost = cost + 2 if dest[1] > pos[1] else cost - 2
new_g = g.copy()
new_g[pos][dest]['trodden'] = True
yield new_g, dest, new_cost
```
… and all we have to do is walk the graph!
I'll be doing this depth-first, as it were, as there's no easy way to discard partial routes that I can be bothered to think of.
```
def walk_on(g, steps, cost, dest=AA[-1]+BB[-1]):
for next_g, next_step, next_cost in possible_steps(g, steps[-1], cost):
new_steps = [*steps, next_step]
if next_step == dest:
yield (next_g, new_steps, next_cost)
else:
yield from walk_on(next_g, new_steps, next_cost)
```
This only takes about ten minutes on a single core of my aging PC. It should be easily parallelizable, but that's not for the here and now.
```
solutions = []
for solu in walk_on(g, ['A1', 'A2', 'A3'], 4):
solutions.append(solu)
sys.stdout.write(f'\r{len(solutions)}')
sys.stdout.flush()
print(f'\n{len(solutions)} solutions found, min cost {min(c for g, s, c in solutions)}')
optima = [(g, s, c) for (g, s, c) in solutions if c <= 0]
print(f'{len(optima)} routes free or better')
```
It turns out that of the 58192 allowed routes, we can afford a grand total of 6, some of which will, actually, give as a healthy tax refund of up to 4 shiny silver coins.
What do they look like (and are they correct)?
```
def explain_cost(steps):
expl = ''
g = make_graph()
cost = 0
for prev, step in zip(steps[:-1], steps[1:]):
for new_g, next_step, new_cost in possible_steps(g, prev, cost):
if next_step == step:
g = new_g
cost = new_cost
break
else:
expl += 'ERROR\n'
return expl
expl += f'{prev} to {step} owing {cost}\n'
return expl
def draw_path(g, s, c):
draw_graph(g)
nx.draw_networkx_edge_labels(g, pos=node_positions,
edge_labels={(u, v): str(i+1) for (i, (u,v)) in enumerate(zip(s[:-1], s[1:]))})
plt.title(f'Cost: {c} silver')
plt.figtext(1.1, 0, explain_cost(s))
for (g, s, c) in optima:
draw_path(g, s, c)
```
Reassuringly, this found the canonical route:
```
draw_path(*optima[5])
[len(s) for (g, s, c) in optima]
```
This is, also, the shortest route we can afford. However, it's not the best. The best route involves a minor detour down the pub at C2:
```
draw_path(*optima[4])
```
| github_jupyter |
# Bulk Labelling as a Notebook
This notebook contains a convenient pattern to cluster and label new text data. The end-goal is to discover intents that might be used in a virtual assistant setting. This can be especially useful in an early stage and is part of the "iterate on your data"-mindset.
## Dependencies
You'll need to install a few things to get started.
- [whatlies](https://rasahq.github.io/whatlies/)
- [human-learn](https://koaning.github.io/human-learn/)
You can install both tools by running this line in an empty cell;
```python
%pip install "whatlies[tfhub]" "human-learn"
```
We use `whatlies` to fetch embeddings and to handle the dimensionality reduction. We use `human-learn` for the interactive labelling interface. Feel free to check the documentation of both packages to learn more.
## Let's go
To get started we'll first import a few tools.
```
import pathlib
import numpy as np
from whatlies.language import CountVectorLanguage, UniversalSentenceLanguage, BytePairLanguage, SentenceTFMLanguage
from whatlies.transformers import Pca, Umap
```
Next we will load in some embedding frameworks. There can be very heavy, just so you know!
```
lang_cv = CountVectorLanguage(10)
lang_use = UniversalSentenceLanguage("large")
lang_bp = BytePairLanguage("en", dim=300, vs=200_000)
lang_brt = SentenceTFMLanguage('distilbert-base-nli-stsb-mean-tokens')
```
Next we'll load in the texts that we'd like to embed/cluster. Feel free to provide another file here.
```
txt = pathlib.Path("nlu.md").read_text()
texts = list(set([t.replace(" - ", "") for t in txt.split("\n") if len(t) > 0 and t[0] != "#"]))
print(f"We're going to plot {len(texts)} texts.")
```
Keep in mind that it's better to start out with 1000 sentences or so. Much more might break the browser's memory in the next visual.
## Showing Clusters

The cell below will take the texts and have them pass through different language backends. After this they will be mapped to a two dimensional space by using [UMAP](https://umap-learn.readthedocs.io/en/latest/). It takes a while to plot everything (mainly because the universal sentence encoder and the transformer language models are heavy).
```
%%time
def make_plot(lang):
return (lang[texts]
.transform(Umap(2))
.plot_interactive(annot=False)
.properties(width=200, height=200, title=type(lang).__name__))
make_plot(lang_cv) | make_plot(lang_bp) | make_plot(lang_use) | make_plot(lang_brt)
```
What you see are four charts. You should notice that certain clusters have appeared. For your usecase you might need to check which language backend makes the most sense.
## Note for Non-English
The only model shown here that is English specific is the universal sentence encoder (`lang_use`). All the other ones also support other languages. For more information check the [bytepair documentation](https://nlp.h-its.org/bpemb/) and the [sentence transformer documentation](https://www.sbert.net/docs/pretrained_models.html#multi-lingual-models).
## Towards Labelling
We'll now prepare a dataframe that we'll assign labels to. We'll do that by loading in the same text file but now into a pandas dataframe.
```
df = lang_use[texts].transform(Umap(2)).to_dataframe().reset_index()
df.columns = ['text', 'd1', 'd2']
df['label'] = ''
df.shape[0]
```
We are now going to be labelling!
# Fancy interactive drawing!
We'll be using Vincent's infamous [human-learn library](https://koaning.github.io/human-learn/guide/drawing-features/custom-features.html) for this. First we'll need to instantiate some charts.
Next we get to draw! Drawing can be a bit tricky though, so pay attention.
1. You'll want to double-click to start drawing.
2. You can then click points together to form a polygon.
3. Next you need to double-click to stop drawing.
This allows you to draw polygons that can be used in the code below to fetch the examples that you're interested in.
## Rerun
This is where we will start labelling. That also means that we might re-run this cell after we've added labels.
```
from hulearn.experimental.interactive import InteractiveCharts
charts = InteractiveCharts(df.loc[lambda d: d['label'] == ''], labels=['group'])
charts.add_chart(x='d1', y='d2')
```
We can now use this selection to retreive a subset of rows. This is a quick varification to see if the points you select indeed belong to the same cluster.
```
from hulearn.preprocessing import InteractivePreprocessor
tfm = InteractivePreprocessor(json_desc=charts.data())
df.pipe(tfm.pandas_pipe).loc[lambda d: d['group'] != 0].head(10)
```
If you're confident that you'd like to assign a label, you can do so below.
```
label_name = 'weather'
idx
idx = df.pipe(tfm.pandas_pipe).loc[lambda d: d['group'] != 0].index
df.iloc[idx, 3] = label_name
print(f"We just assigned {len(idx)} labels!")
```
That's it! You've just attached a label to a group of points!
## Rerun
You can now scroll up and start relabelling clusters that aren't assigned yet. Once you're confident that this works, you can export by running the final code below.
```
df.head()
df.to_csv("first_order_labelled.csv")
```
## Final Notes
There's a few things to mention.
1. This method of labelling is great when you're working on version 0 of something. It'll get you a whole lot of data fast but it won't be high quality data.
2. The use-case for this method might be at the start of design a virtual assistant. You've probably got data from social media that you'd like to use as a source of inspiration for intents. This is certainly a valid starting point but you should be aware that the language that folks use on a feedback form is different than the language used in a chatbox. Again, these labels are a reasonable starting point, but they should not be regarded as ground truth.
3. Labelling is only part of the goal here. Another big part is understanding the data. This is very much a qualitative/human task. You might be able to quickly label 1000 points in 5 minutes with this technique but you'll lack an understanding if you don't take the time for it.
| github_jupyter |
<!--BOOK_INFORMATION-->
<img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png">
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*
*The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!*
<!--NAVIGATION-->
< [Visualization with Seaborn](04.14-Visualization-With-Seaborn.ipynb) | [Contents](Index.ipynb) | [Machine Learning](05.00-Machine-Learning.ipynb) >
<a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/04.15-Further-Resources.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
# Further Resources
## Matplotlib Resources
A single chapter in a book can never hope to cover all the available features and plot types available in Matplotlib.
As with other packages we've seen, liberal use of IPython's tab-completion and help functions (see [Help and Documentation in IPython](01.01-Help-And-Documentation.ipynb)) can be very helpful when exploring Matplotlib's API.
In addition, Matplotlib’s [online documentation](http://matplotlib.org/) can be a helpful reference.
See in particular the [Matplotlib gallery](http://matplotlib.org/gallery.html) linked on that page: it shows thumbnails of hundreds of different plot types, each one linked to a page with the Python code snippet used to generate it.
In this way, you can visually inspect and learn about a wide range of different plotting styles and visualization techniques.
For a book-length treatment of Matplotlib, I would recommend [*Interactive Applications Using Matplotlib*](https://www.packtpub.com/application-development/interactive-applications-using-matplotlib), written by Matplotlib core developer Ben Root.
## Other Python Graphics Libraries
Although Matplotlib is the most prominent Python visualization library, there are other more modern tools that are worth exploring as well.
I'll mention a few of them briefly here:
- [Bokeh](http://bokeh.pydata.org) is a JavaScript visualization library with a Python frontend that creates highly interactive visualizations capable of handling very large and/or streaming datasets. The Python front-end outputs a JSON data structure that can be interpreted by the Bokeh JS engine.
- [Plotly](http://plot.ly) is the eponymous open source product of the Plotly company, and is similar in spirit to Bokeh. Because Plotly is the main product of a startup, it is receiving a high level of development effort. Use of the library is entirely free.
- [Vispy](http://vispy.org/) is an actively developed project focused on dynamic visualizations of very large datasets. Because it is built to target OpenGL and make use of efficient graphics processors in your computer, it is able to render some quite large and stunning visualizations.
- [Vega](https://vega.github.io/) and [Vega-Lite](https://vega.github.io/vega-lite) are declarative graphics representations, and are the product of years of research into the fundamental language of data visualization. The reference rendering implementation is JavaScript, but the API is language agnostic. There is a Python API under development in the [Altair](https://altair-viz.github.io/) package. Though as of summer 2016 it's not yet fully mature, I'm quite excited for the possibilities of this project to provide a common reference point for visualization in Python and other languages.
The visualization space in the Python community is very dynamic, and I fully expect this list to be out of date as soon as it is published.
Keep an eye out for what's coming in the future!
<!--NAVIGATION-->
< [Visualization with Seaborn](04.14-Visualization-With-Seaborn.ipynb) | [Contents](Index.ipynb) | [Machine Learning](05.00-Machine-Learning.ipynb) >
<a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/04.15-Further-Resources.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
| github_jupyter |
# PyWRspice Wrapper Tutorial: Run simulation on remote SSH server
#### Prerequisite:
* You need to complete the *Tutorial.ipynb* notebook first.
Here we assume you are already famililar with running PyWRspice on a local computer.
```
# Add pyWRspice location to system path, if you haven't run setup.py
import sys
sys.path.append("../")
import numpy as np
import logging, importlib
from pyWRspice import script, simulation, remote
import matplotlib.pyplot as plt
%matplotlib inline
logging.basicConfig(level=logging.WARNING)
```
### 0. Set up a connection to an SSH server
Assume you store login info into a variable ```ssh_login = (server_name, user_name, password)```
and specify local directory as ```local_dir```, and remote directory as ```remote_dir``` to store simulation related temporary files.
Set up a handler
```
engine_remote = remote.WRWrapperSSH(ssh_login[0],ssh_login[1],ssh_login[2],
local_dir=local_dir,
remote_dir=remote_dir,
command = "/usr/local/xictools/bin/wrspice")
```
## 1. Run a WRspice script one time
Let's try to run the same script from *Tutorial.ipynb*, this time on an SSH server.
```
script2 = """* Transient response of RLC circuit
.tran 50p 100n
* RLC model of a transmission line
R1 1 2 0.1
L1 2 3 1n
C1 3 0 {cap}p
R2 3 0 1e3
* Load impedance
Rload 3 0 50
* Pulse voltage source
V1 1 0 pulse(0 1 1n 1n 1n {dur}n)
*
.control
run
set filetype=binary
write {output_file} v(2) v(3)
.endc
"""
```
We then specify the values of ```cap``` and ```dur``` when execute the script with the ```run``` function, the same way we would do when running on local machine.
```
dat2 = engine_remote.run(script2,cap=30, dur=40)
# Extract the data
dat2 = dat2.to_array()
ts = dat2[0]
v2 = dat2[1]
v3 = dat2[2]
# Plot the data
fig = plt.figure(figsize=(12,6))
plt.plot(ts*1e9, v2, label="v(2)")
plt.plot(ts*1e9, v3, label="v(3)")
plt.xlabel("Time [ns]")
plt.ylabel("Voltage [V]")
plt.legend()
plt.show()
```
## 2. Run WRspice script with multiple parametric values in parallel
We can pass a list of values to one or more parameters and run them all in parallel, using multiprocessing, with the ```run_parallel()``` method, almost the same way as running on a local machine, except that we now have a few more options on how to handle the files.
Comparing to its local version, the remote function ```run_parallel``` has 2 new options:
* ```save_file```: The function create a series of files (to be explained later) on the local and remote machines, such as circuit files and output files (after execution). To remove these files, set ```save_file=True``` (default).
* ```read_raw```: By default (True), the function will read the output raw files into memory. If the output data can be too large, consider set ```read_raw=False```, then the returned value is the list of output filenames (to be manually imported later).
One can control how the function returns by the parameter ```reshape```: If True, return (params,values) whose shapes are the same (same as the local version). If False, return a pandas DataFrame object containing the params and results in the column ```result```.
```
# Read the docs
engine_remote.run_parallel?
```
#### Simple case: The same way as local version.
```
params = {}
params["cap"] = [20,50,100]
params["dur"] = [40,60]
params3, dat3 = engine_remote.run_parallel(script2,save_file=False,**params)
```
Because ```reshape=True``` by default, the returned values are the same as in the local case.
```
# Examine the returned parameter values
for k,v in params3.items():
print("%s = %s" %(k,v))
print("")
# Get the shape of the returned data
dat3.shape
# Plot the data
fig = plt.figure(figsize=(12,6))
shape = dat3.shape
for i in range(shape[0]):
for j in range(shape[1]):
dat = dat3[i,j].to_array()
ts = dat[0]
v3 = dat[2]
plt.plot(ts*1e9, v3, label="cap=%s[pF], dur=%s[ns]" %(params3["cap"][i,j],params3["dur"][i,j]))
plt.xlabel("Time [ns]")
plt.ylabel("Voltage [V]")
plt.legend()
plt.show()
```
#### A more controlled case: turn off ```reshape``` and ```read_raw```
```
params = {}
params["cap"] = [20,50,100]
params["dur"] = [40,60]
dat4 = engine_remote.run_parallel(script2,save_file=False,reshape=False,read_raw=False,**params)
```
Because in this case ```reshape=False```, the returned value is a pandas DataFrame with all the simulation parameters and output.
```
dat4
```
Because ```read_raw=False```, the returned output is a list of output raw filenames.
```
for fname in dat4["result"]:
print(fname)
```
So we need to do some extra steps to read the output data and reshape them. We can do so manually, or run the function ```reshape_result```.
```
params4, dat4r = engine_remote.reshape_results(dat4,params)
# Examine the returned parameter values
for k,v in params4.items():
print("%s = %s" %(k,v))
print("")
# Get the shape of the returned data
# Note that it is an array of output raw filenames
dat4r.shape
# Plot the data
fig = plt.figure(figsize=(12,6))
shape = dat4r.shape
for i in range(shape[0]):
for j in range(shape[1]):
dat = simulation.RawFile(dat4r[i,j]).to_array() # Need to import the raw file using RawFile class
ts = dat[0]
v3 = dat[2]
plt.plot(ts*1e9, v3, label="cap=%s[pF], dur=%s[ns]" %(params3["cap"][i,j],params3["dur"][i,j]))
plt.xlabel("Time [ns]")
plt.ylabel("Voltage [V]")
plt.legend()
plt.show()
```
## 3. Run long simulation on server
The ways we have run the simulation so far are appropriate for rather light-weight simulation which is expected to be completed in an hour or so on the server. When running heavy simulation on the server, we want to have the simulation running while we can disconnect the SSH connection, then we can be back later to collect the output.
The way to do so is to break up the function ```run_parallel``` into multiple steps: prepare the files needed for the simulation on the server, then manually execute the simulation, then collect the result.
#### Prepare the files
The function ```prepare_parallel``` creates local and remote copies of the circuit files. It returns a configuration file containing information for execution.
If there are additional files needed for the simulation (e.g. input files), they have to be copied to the server by the function ```put```.
```
fconfig = engine_remote.prepare_parallel(script2,**params)
print(fconfig)
# Let's read the first line of fconfig. We can get the local path by function local_fname
with open(engine_remote.local_fname(fconfig),'r') as f:
print(f.readline())
```
#### Execute
As shown above, the command to execute the simulation is ```python run_parallel.py simconfig_20200107_173940.csv --processes=<num>```. How to do it (safely):
1. Manually SSH log in to the server
2. Change directory ```cd``` to the working directory ```engine_remote.remote_dir```
3. Create a separate session by running the command ```screen``` (or ```screen -S <name>``` to specify the screen name)
4. Run the above command: ```python run_parallel.py simconfig_20200107_173940.csv``` with optional ```--processes=<num>``` (```num``` is the number of processes in parallel, default is 64)
5. Then hit ```Ctrl + a``` and ```d``` to detach from the screen session
6. Now you can disconnect from the SSH server. The job will continue in the background.
#### Collect results
Two ways to check if the job is done:
* Manually log in to the server, change to ```remote_dir``` and check if the file ```finish_<fconfig>.txt``` exists (when the simulation is completed, it will create that file).
* Run the function ```get_results``` to automatically check and collect the output files.
```
dat5 = engine_remote.get_results(fconfig,timeout=100,read_raw=False)
```
Because we set ```read_raw=False```, the returned ```dat5``` is the same as ```dat4``` above. We need to run extra steps to reshape and read the output.
```
params5, dat5r = engine_remote.reshape_results(dat5,params)
# The results should be the same as params4 and dat4r above
# Finally, after analyzing the results, we need to remove the temp files (circuit and output files, etc)
engine_remote.remove_fconfig(fconfig, dest="both") # Set dest="local" or dest="remote" if necessary
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Aiport-data" data-toc-modified-id="Aiport-data-1"><span class="toc-item-num">1 </span>Aiport data</a></span></li><li><span><a href="#Get-all-weather-data-..." data-toc-modified-id="Get-all-weather-data-...-2"><span class="toc-item-num">2 </span>Get all weather data ...</a></span></li><li><span><a href="#...-and-extract-weather-close-to-airports" data-toc-modified-id="...-and-extract-weather-close-to-airports-3"><span class="toc-item-num">3 </span>... and extract weather close to airports</a></span></li><li><span><a href="#Weather-patterns" data-toc-modified-id="Weather-patterns-4"><span class="toc-item-num">4 </span>Weather patterns</a></span></li><li><span><a href="#Correlations-from-top-10-pairs" data-toc-modified-id="Correlations-from-top-10-pairs-5"><span class="toc-item-num">5 </span>Correlations from top 10 pairs</a></span></li></ul></div>
# Aiport data
Combine airport codes and top airports into a table with relevant info (name, wikipedia link, latitude, longitute).
```
import pandas as pd
# load top 50 airports
top50 = pd.read_csv("hw_5_data/top_airports.csv")
top50_mini = top50[["ICAO", "Airport"]]
# load aiport info
codes = pd.read_csv("hw_5_data/ICAO_airports.csv")
codes_mini = codes[["ident", "latitude_deg", "longitude_deg", "wikipedia_link"]]
codes_mini = codes_mini.rename(columns={'ident': "ICAO"})
# combine data into one table
data = pd.merge(top50_mini, codes_mini, how="left", on="ICAO")
```
Load this table into a database.
```
from sqlalchemy import *
# initiate engine
engine = create_engine("sqlite:///airport_info.db")
# convert pd to list
data.to_sql(name='airports', con=engine)
```
# Get all weather data ...
# ... and extract weather close to airports
```
## if need to reload data, just do this instead to save time!
# import xarray as xr
# airport_weather = xr.open_dataset('airport_weather.nc')
```
Get historical weather information (min/max temperature, relative humidity, perciptation) for 1990-2000 from dataset used in class (with xarray/netCDF4).
```
# load weather data!
import xarray as xr
# temp: tasmax/tasmin, RH: rhsmax/rhsmin, precipitation: pr
datatypes = ["tasmax", "tasmin", "rhsmax", "rhsmin", "pr"]
datatypes_str = ["air_temperature", "air_temperature", "relative_humidity", "relative_humidity", "precipitation"]
datatypes_nice = ["air_temperature_max", "air_temperature_min", "relative_humidity_max", "relative_humidity_min", "precipitation"]
storage = []
for i in range(len(datatypes)):
data_path = ("http://thredds.northwestknowledge.net:8080/"
f"thredds/dodsC/agg_macav2metdata_{datatypes[i]}"
"_BNU-ESM_r1i1p1_historical_1950_2005_CONUS_daily.nc"
)
storage.append(xr.open_dataset(data_path).rename({datatypes_str[i] : datatypes_nice[i]}))
# combine all data types
weather_data = xr.merge(storage)
# reduce to time window of interest
time_window = pd.date_range(start='1/1/1990', end='12/31/2000', freq='D')
weather_data = weather_data.sel(time=time_window)
```
Just keep weather data for coordinates closest to each aiport.
```
# for each airport, find closest (long, lat) in weather data
import numpy as np
def find_closest(weather_long, weather_lat, this_long, this_lat):
"""
Find the closest weather station to aiport longitude and
latitude coordinates.
Parameters
----------
weather_long : np array
Vector of floats with longitude coordinates for all weather stations
weather_lat : np array
Vector of floats with latitude coordinates for all weather stations
this_long : float
Longitude coordinate for this airport
this_lat : float
Latitude coordinate for this airport
Returns
-------
idx : int
Index for closest weather station
"""
# convert weather long coordinates to match expected for aiport
weather_long_adjusted = weather_long - 360
# find closest long idx:
long_idx = np.abs(weather_long_adjusted - this_long).argmin()
# find closest lat idx:
lat_idx = np.abs(weather_lat - this_lat).argmin()
return long_idx, lat_idx
# extract all of the long, lat coordinates from the weather data
weather_long = weather_data["lon"].values
weather_lat = weather_data["lat"].values
# extract all of the airport long, lat coordinates
airport_long = engine.execute("SELECT longitude_deg FROM airports").fetchall()
airport_lat = engine.execute("SELECT latitude_deg FROM airports").fetchall()
# get airport names
airport_ICAO = engine.execute("SELECT ICAO FROM airports").fetchall()
# identify the closest weather station for each airport
weather_idx = [find_closest(weather_long, weather_lat, airport_long[i][0], airport_lat[i][0])
for i in range(len(airport_long))] # (long, lat)
# pull out weather data closest to each airport and concat into a new xarray
storage = []
for i in range(len(airport_ICAO)):
port_data = weather_data.sel(lon=weather_long[weather_idx[i][0]],
lat=weather_lat[weather_idx[i][1]])
storage.append(port_data.assign_coords({"airport_ICAO": airport_ICAO[i][0]}).drop(["lon","lat","crs"]))
%%time
airport_weather = xr.concat(storage, "airport_ICAO", coords="different")
airport_weather.to_netcdf('airport_weather.nc')
airport_weather
```
This method of concatenating data is pretty slow... need to look for a better way to flatten the weather (long, lat) coordinates to restrict to the top airports.
# Weather patterns
I'll compare the temperature and precipitation at pairs of aiports by correlating the timeseries. I'm assuming we're interested in comparisons between different airports, so I'll set the diagonal = 0.
```
import numpy as np
import matplotlib.pyplot as plt
def get_shifty_corr(variable, lag):
"""Calculate pairwise correlations between rows in
variable matrix (with lag).
Parameters
----------
variable : np array or x array
2D array (airports x time) for variable (e.g. max temperature, precipitation)
lag : int
Number of offset days between correlations
Returns
-------
corrcoeffs : np array
2D array (airports x airports) with pairwise correlations; columns shifted lag before rows
"""
# get number of aiports
n_ports, n_time = variable.shape
# make array for correlation coefficients
corrcoeffs = np.empty((n_ports, n_ports))
for port0 in range(n_ports):
for port1 in range(n_ports):
corrcoeffs[port0, port1] = np.corrcoef(variable[port0, lag:],
variable[port1, :n_time-lag])[0, 1]
# set diag to 0
corrcoeffs[np.eye(50)==1] = 0
return corrcoeffs
```
**This is the correlation of max temperature and precipitation between all airports (with no lag):**
```
fig = plt.figure(figsize=(15, 6))
ax = fig.add_subplot(1,2,1)
temp_corr0 = get_shifty_corr(airport_weather["air_temperature_max"], 0)
temp_heat = ax.imshow(temp_corr0, cmap="plasma", vmin=0.35, vmax=0.95)
ax.title.set_text("max air temperature")
fig.colorbar(temp_heat)
ax = fig.add_subplot(1,2,2)
prec_corr0 = get_shifty_corr(airport_weather["precipitation"], 0)
prec_heat = ax.imshow(prec_corr0, cmap="viridis", vmin=0, vmax=0.5)
ax.title.set_text("precipitation")
fig.colorbar(prec_heat)
```
**This is the correlation of max temperature between all airports with lags of 1, 3, or 7 days (columns = shifted before rows):**
```
daily_lag = [1, 3, 7]
n_lags = len(daily_lag)
temp_corr_lag = [get_shifty_corr(airport_weather["air_temperature_max"], d) for d in daily_lag]
fig = plt.figure(figsize=(15, 6))
for d in range(n_lags):
ax = fig.add_subplot(1, n_lags, d+1)
heat = ax.imshow(temp_corr_lag[d], cmap="plasma", vmin=0.35, vmax=0.95)
ax.set(xlabel="leading", ylabel="lagging", title=f"shift {daily_lag[d]}")
fig.colorbar(heat)
```
**This is the correlation of precipitation between all airports with lags of 1, 3, or 7 days (columns = shifted before rows):**
```
prec_corr_lag = [get_shifty_corr(airport_weather["precipitation"], d) for d in daily_lag]
fig = plt.figure(figsize=(15, 6))
for d in range(n_lags):
ax = fig.add_subplot(1, n_lags, d+1)
heat = ax.imshow(prec_corr_lag[d], cmap="viridis", vmin=0, vmax=0.5)
ax.set(xlabel="leading", ylabel="lagging", title=f"shift {daily_lag[d]}")
fig.colorbar(heat)
```
# Correlations from top 10 pairs
```
# For a given correlation matrix, find the long and lat coordinates of the top pairs
def pull_top_pairs(corrcoeffs, n_pairs, airport_long, airport_lat):
"""Find the top n_pairs; return values and
(long, lat) coordinates of ports in pairs.
Parameters
----------
corrcoeffs : np array
2D array (airports x airports) with pairwise correlations; columns shifted lag before rows
n_pairs : int
Number of top pairs
airport_long : np array
Vector of airport longitude coordinates
airport_lat : np array
Vector of aiport latitude coordinates
Returns
-------
top_values : np array
Vector with top correlations
top_long : np array
Vector of tuples with long coordinates for airports in pairs
top_lat : np array
Vector of tuples with lat coordinates for aiports in pairs
"""
# get number of aiports
n_ports = corrcoeffs.shape[0]
# get top n_pairs
top_idx = np.argsort(-corrcoeffs, axis=None)[:10] # ravelled index
top_xy = np.unravel_index(top_idx, (n_ports, n_ports)) # get (row, col) index
# get the corr for these pairs
top_values = [corrcoeffs[top_xy[0][i], top_xy[1][i]]
for i in range(n_pairs)]
# get longitude coords for pair
top_long = [(airport_long[top_xy[0][i]][0], airport_long[top_xy[1][i]][0])
for i in range(n_pairs)]
# get latitude coords for pair
top_lat = [(airport_lat[top_xy[0][i]][0], airport_lat[top_xy[1][i]][0])
for i in range(n_pairs)]
return top_values, top_long, top_lat
```
**Top temperature correlations as a function of distance:**
```
fig = plt.figure(figsize=(15, 10))
for d in range(n_lags):
# get top 10 pairs
top_values, top_long, top_lat = pull_top_pairs(temp_corr_lag[d], 10, airport_long, airport_lat)
# cartesian distance
cart_dist = [((long[0] - long[1]) ** 2 + (lat[0] - lat[1]) ** 2) ** 0.5
for long, lat in zip(top_long, top_lat)]
# longitude distance
long_dist = [np.abs(long[0] - long[1]) for long in top_long]
# plot value vs cart dist
ax = fig.add_subplot(2, n_lags, d+1)
ax.scatter(cart_dist, top_values, color='r')
ax.set(xlabel="distance between best pairs (~deg)",
ylabel="temperature correlation",
title=f"shift {daily_lag[d]}",
xlim=[0, 10],
ylim=[0.85, 1])
# plot value vs long dist
ax = fig.add_subplot(2, n_lags, d+1+n_lags)
ax.scatter(long_dist, top_values, color='r')
ax.set(xlabel="longitude difference between best pairs (deg)",
ylabel="temperature correlation",
title=f"shift {daily_lag[d]}",
xlim=[0, 10],
ylim=[0.85, 1])
```
Temperature correlations for the most tightly linked airport pairs decreased with longer lags (1 --> 7 days) but remained quite high overall (> 0.85), as expected given the relatively slow annual cycle in temperature. The best pairs spanned approximately the same range of distances for all lags. Airports with similar temperature trends were at similar latitudes: the distance range for the top pairs was very similar if measured as a function of longitude and latitude or just longitude alone.
(I used cartesian coordinates to approximate the distance between (long, lat) coordinates to simplify calculations. It would be more correct to calculate the distance along the sphere surface.)
**Top precipitation correlations as a function of distance:**
```
fig = plt.figure(figsize=(15, 10))
for d in range(n_lags):
# get top 10 pairs
top_values, top_long, top_lat = pull_top_pairs(prec_corr_lag[d], 10, airport_long, airport_lat)
# cartesian distance
cart_dist = [((long[0] - long[1]) ** 2 + (lat[0] - lat[1]) ** 2) ** 0.5
for long, lat in zip(top_long, top_lat)]
# longitude distance
long_dist = [np.abs(long[0] - long[1]) for long in top_long]
# plot value vs cart dist
ax = fig.add_subplot(2, n_lags, d+1)
ax.scatter(cart_dist, top_values, color='b')
ax.set(xlabel="distance between best pairs (~deg)",
ylabel="precipitation correlation",
title=f"shift {daily_lag[d]}",
xlim=[0, 11],
ylim=[0.1, 0.6])
# plot value vs long dist
ax = fig.add_subplot(2, n_lags, d+1+n_lags)
ax.scatter(long_dist, top_values, color='b')
ax.set(xlabel="longitude difference between best pairs (deg)",
ylabel="precipitation correlation",
title=f"shift {daily_lag[d]}",
xlim=[0, 11],
ylim=[0.1, 0.6])
```
Peak precipitation correlations fell from ~0.5 to ~0.1-0.2 within a couple of days, and the distance between the best pairs decreased from up to 10 deg to up to 2 deg. This is reasonable, since precipitation patterns are more local than temperature and can change daily.
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/NAIP/ndwi_single.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/NAIP/ndwi_single.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/NAIP/ndwi_single.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
```
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function.
```
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
```
## Add Earth Engine Python script
```
# Add Earth Engine dataset
```
## Display Earth Engine data layers
```
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
```
| github_jupyter |
# Check original file against published reports
## ADU / SPR
```
import intake
import numpy as np
import pandas as pd
import laplan
catalog = intake.open_catalog('../catalogs/*.yml')
bucket_name = "city-planning-entitlements"
start_date = "1/1/10"
end_date = "10/31/19"
# Let's throw our new master_pcts into the d1_step_by_step
#master_pcts = catalog.pcts2.read()
master_pcts = pd.read_parquet('s3://city-planning-entitlements/test_new_master_pcts.parquet')
```
### PCTS Reporting Module Results
```
def import_and_subset(name):
df = pd.read_excel(f'../data/pcts_{name}.xlsx', skiprows=4)
keep = ["CASE NUMBER", "FILE DATE"]
df = df[keep].rename(columns = {"CASE NUMBER": "CASE_NBR"})
return df
```
### ITA laplan function
```
# All prefixes and suffixes
# This is our old master_pcts
def laplan_subset(name):
name = name.upper()
pcts = laplan.pcts.subset_pcts(
master_pcts,
start_date = start_date , end_date = end_date,
get_dummies=True, verbose=False)
pcts = laplan.pcts.drop_child_cases(pcts, keep_child_entitlements=False)
pcts = pcts[pcts[name]==True]
return pcts
```
### ITA step-by-step in creating master_pcts
```
def ita_step_by_step(name):
name = name.upper()
print(f"{name}: Creating master PCTS step-by-step")
case = pd.read_parquet(f's3://{bucket_name}/data/raw/tCASE.parquet')
app = pd.read_parquet(f's3://{bucket_name}/data/raw/tAPLC.parquet')
geo_info = pd.read_parquet(f's3://{bucket_name}/data/raw/tPROP_GEO_INFO.parquet')
la_prop = pd.read_parquet(f's3://{bucket_name}/data/raw/tLA_PROP.parquet')
app1 = app[['APLC_ID', 'PROJ_DESC_TXT']]
geo_info1 = geo_info[['CASE_ID', 'PROP_ID']]
la_prop1 = la_prop[la_prop.ASSR_PRCL_NBR.notna()][['PROP_ID', 'ASSR_PRCL_NBR']]
# Subset by start/end date
case2 = case[(case.CASE_FILE_RCV_DT >= start_date) &
(case.CASE_FILE_RCV_DT <= end_date)]
# Subset by suffix
case3 = case2[case2.CASE_NBR.str.contains(f"-{name}")]
print(f'1-# unique cases (parents + child): {case3.CASE_NBR.nunique()}')
# Keep parent cases only
case4 = case3[case3.PARNT_CASE_ID.isna()]
print(f'2-# unique cases (parents): {case4.CASE_NBR.nunique()}')
m1 = pd.merge(case4, geo_info1, on = 'CASE_ID', how = 'inner', validate = '1:m')
m2 = pd.merge(m1, la_prop1, on = 'PROP_ID', how = 'inner', validate = 'm:1')
m3 = pd.merge(m2, app1, on = 'APLC_ID', how = 'left', validate = 'm:1')
print(f'3-# unique cases (parents), with geo_info merged: {m1.CASE_NBR.nunique()}')
print(f'4-# unique cases (parents), with la_prop merged: {m2.CASE_NBR.nunique()}')
print(f'5-# unique cases (parents), with app merged: {m3.CASE_NBR.nunique()}')
```
### ITA D1 step-by-step for dashboard
```
prefix_list = laplan.pcts.VALID_PCTS_PREFIX
suffix_list = laplan.pcts.VALID_PCTS_SUFFIX
remove_prefix = ["ENV"]
remove_suffix = [
"EIR",
"IPRO",
"CA",
"CATEX",
"CPIO",
"CPU",
"FH",
"G",
"HD",
"HPOZ",
"ICO",
"K",
"LCP",
"NSO",
"S",
"SN",
"SP",
"ZAI",
"CRA",
"RFA",
]
prefix_list = [x for x in prefix_list if x not in remove_prefix]
suffix_list = [x for x in suffix_list if x not in remove_suffix]
def d1_step_by_step(name):
name = name.upper()
print(f"{name}: D1 step-by-step")
# Load PCTS and subset to the prefix / suffix list we want
pcts = laplan.pcts.subset_pcts(
master_pcts,
start_date = start_date, end_date = end_date,
prefix_list=prefix_list, suffix_list=suffix_list,
get_dummies=True, verbose=False,
)
pcts = laplan.pcts.drop_child_cases(pcts, keep_child_entitlements=True)
pcts = pcts[pcts[name]==True][["CASE_NBR", "CASE_ID", "AIN"]]
print(f'1-# unique cases (parents) using laplan: {pcts.CASE_NBR.nunique()}')
# Add on tract info
# See which cases have AINs, but those AINs are not mapped onto tract GEOID
parcel_to_tract = catalog.crosswalk_parcels_tracts.read()
parcel_to_tract = parcel_to_tract[["AIN", "num_AIN", "GEOID"]]
pcts = pd.merge(
pcts,
parcel_to_tract,
on="AIN",
how="inner",
validate="m:1",
)
print(f'2-# unique cases (parents), with tract merged in: {pcts.CASE_NBR.nunique()}')
# Clean AIN data and get rid of outliers
case_counts = pcts.CASE_ID.value_counts()
big_cases = pcts[pcts.CASE_ID.isin(case_counts[case_counts > 20].index)]
pcts = pcts[~pcts.CASE_ID.isin(big_cases.CASE_ID)]
print(f'3-# unique cases (parents) removing outliers: {pcts.CASE_NBR.nunique()}')
```
## Comparisons
```
# Put functions all together
def comparison(suffix):
dcp = import_and_subset(suffix)
ita = laplan_subset(suffix)
print("Discrepancies in DCP vs ITA")
print(f'DCP-{suffix.upper()} unique cases (parents) in PCTS report: {dcp.CASE_NBR.nunique()}')
print(f'ITA-{suffix.upper()} unique cases (parents) with laplan, all prefixes/suffixes: {ita.CASE_NBR.nunique()}')
#ita_step_by_step(suffix)
d1_step_by_step(suffix)
comparison("1a")
comparison("2a")
comparison("5a")
#comparison("adu")
#comparison("spr")
```
| github_jupyter |
```
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
import time
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score,recall_score
from sklearn.metrics import roc_curve
from sklearn import metrics
## Preprocess
df_raw_data = pd.read_csv('reviews.csv',sep='|',header=0)
df_train = df_raw_data.iloc[df_raw_data.index %5 != 0]
df_test = df_raw_data.iloc[df_raw_data.index %5 == 0]
# preprocess X
# word of bag
word_bag = joblib.load('word_bag.pkl')
X_test = df_test.loc[:,'text'].tolist()
X_test = word_bag.transform(X_test)
X_train = df_train.loc[:,'text'].tolist()
X_train = word_bag.transform(X_train)
# preprocess y
y = df_test['label']
lec = LabelEncoder()
y_test = lec.fit_transform(y)
y = df_train['label']
lec = LabelEncoder()
y_train = lec.fit_transform(y)
# get the feature (v2)
from sklearn.feature_extraction.text import TfidfVectorizer
## Preprocess
df_raw_data = pd.read_csv('reviews.csv',sep='|',header=0)
df_train = df_raw_data.iloc[df_raw_data.index %5 != 0]
df_test = df_raw_data.iloc[df_raw_data.index %5 == 0]
## Preprocess
X_train = df_train.loc[:,'text'].tolist()
X_test = df_test.loc[:,'text'].tolist()
transformer = TfidfVectorizer()
#ngram_range=(1, 3)
X_train = transformer.fit_transform(X_train)
feature_tfidf_name = transformer.get_feature_names()
X_test = transformer.transform(X_test)
# preprocess y
y = df_test['label']
lec = LabelEncoder()
y_test = lec.fit_transform(y)
y = df_train['label']
lec = LabelEncoder()
y_train = lec.fit_transform(y)
joblib.dump(feature_tfidf_name,'tfidf_features.pkl')
joblib.dump(transformer,'final_model/Tfidf_n_3.pkl')
```
## Decision Tree
```
from sklearn.tree import DecisionTreeClassifier
tic = time.time()
clf = DecisionTreeClassifier(criterion='entropy',max_depth=50)
clf.fit(X_train,y_train)
tac = time.time()
print('training time:',tac-tic,'s')
joblib.dump(clf,'final_model/decision_tree.pkl')
# accurarcy
y_pred = clf.predict(X_train)
DT_train_ac = accuracy_score(y_train,y_pred)
y_pred = clf.predict(X_test)
DT_test_ac = accuracy_score(y_test,y_pred)
print('training accuracy:',DT_train_ac*100,'%')
print('testing accuracy:',DT_test_ac*100,'%')
# precision and recall
DT_p_score = precision_score(y_test,y_pred)
DT_r_score = recall_score(y_test,y_pred)
DT_f1_score = 2*(DT_p_score*DT_r_score)/(DT_p_score+DT_r_score)
print("precision: ",DT_p_score," recall: ",DT_r_score," f1: ",DT_f1_score)
# ROC
DT_fpr,DT_tpr, DT_thresholds = roc_curve(y_test,y_pred)
print(DT_fpr,DT_tpr, DT_thresholds)
```
## NN model
```
from sklearn.neural_network import MLPClassifier
tic = time.time()
clf = MLPClassifier(hidden_layer_sizes = (10,10),activation='logistic',max_iter=200,
learning_rate_init = 0.01,learning_rate='invscaling',
solver='adam',early_stopping=True,random_state=42)
clf.fit(X_train,y_train)
tac = time.time()
print('training time:',tac-tic,'s')
joblib.dump(clf,'final_model/nn_model.pkl')
# accurarcy
y_pred = clf.predict(X_train)
NN_train_ac = accuracy_score(y_train,y_pred)
y_pred = clf.predict(X_test)
NN_test_ac = accuracy_score(y_test,y_pred)
print('training accuracy:',NN_train_ac*100,'%')
print('testing accuracy:',NN_test_ac*100,'%')
# precision and recall
NN_p_score = precision_score(y_test,y_pred)
NN_r_score = recall_score(y_test,y_pred)
NN_f1_score = 2*(NN_p_score*NN_r_score)/(NN_p_score+NN_r_score)
print("precision: ",NN_p_score," recall: ",NN_r_score," f1: ",NN_f1_score)
# ROC
NN_fpr,NN_tpr, NN_thresholds = roc_curve(y_test,y_pred)
print(NN_fpr,NN_tpr, NN_thresholds)
```
## Logistic Regression
```
from sklearn.linear_model import LogisticRegression
tic = time.time()
clf = LogisticRegression()
clf.fit(X_train,y_train)
tac = time.time()
print('training time:',tac-tic,'s')
joblib.dump(clf,'final_model/logistic.pkl')
# accurarcy
y_pred = clf.predict(X_train)
LG_train_ac = accuracy_score(y_train,y_pred)
y_pred = clf.predict(X_test)
LG_test_ac = accuracy_score(y_test,y_pred)
print('training accuracy:',LG_train_ac*100,'%')
print('testing accuracy:',LG_test_ac*100,'%')
# precision and recall
LG_p_score = precision_score(y_test,y_pred)
LG_r_score = recall_score(y_test,y_pred)
LG_f1_score = 2*(LG_p_score*LG_r_score)/(LG_p_score+LG_r_score)
print("precision: ",LG_p_score," recall: ",LG_r_score," f1: ",LG_f1_score)
# ROC
LG_fpr,LG_tpr, LG_thresholds = roc_curve(y_test,y_pred)
print(LG_fpr,LG_tpr, LG_thresholds)
```
## Ensemble model
## RandomForest
```
from sklearn.ensemble import RandomForestClassifier
tic = time.time()
clf = RandomForestClassifier(n_jobs=-1)
clf.fit(X_train,y_train)
tac = time.time()
print('training time:',tac-tic,'s')
joblib.dump(clf,'final_model/random_forest.pkl')
# accurarcy
y_pred = clf.predict(X_train)
RF_train_ac = accuracy_score(y_train,y_pred)
y_pred = clf.predict(X_test)
RF_test_ac = accuracy_score(y_test,y_pred)
print('training accuracy:',RF_train_ac*100,'%')
print('testing accuracy:',RF_test_ac*100,'%')
# precision and recall
RF_p_score = precision_score(y_test,y_pred)
RF_r_score = recall_score(y_test,y_pred)
RF_f1_score = 2*(RF_p_score*RF_r_score)/(RF_p_score+RF_r_score)
print("precision: ",RF_p_score," recall: ",RF_r_score," f1: ",RF_f1_score)
# ROC
RF_fpr,RF_tpr, RF_thresholds = roc_curve(y_test,y_pred)
print(RF_fpr,RF_tpr, RF_thresholds)
```
## logistic+randomforest+NN
```
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
import time
tic = time.time()
clf1 = RandomForestClassifier(n_jobs=-1)
clf2 = LogisticRegression(n_jobs=-1,random_state=42)
clf3 = MLPClassifier(hidden_layer_sizes = (10,10),activation='logistic',max_iter=100,
learning_rate_init = 0.01,learning_rate='invscaling',
solver='adam',early_stopping=True,random_state=42)
eclf1 = VotingClassifier(estimators=[
('rf', clf1), ('lg', clf2),('nn',clf3)],n_jobs=-1,voting='soft')
eclf1.fit(X_train,y_train)
tac = time.time()
print('training time:',tac-tic,'s')
joblib.dump(eclf1,'final_model/ensemble.pkl')
# accurarcy
y_pred = eclf1.predict(X_train)
ES_train_ac = accuracy_score(y_train,y_pred)
y_pred = eclf1.predict(X_test)
ES_test_ac = accuracy_score(y_test,y_pred)
print('training accuracy:',RF_train_ac*100,'%')
print('testing accuracy:',RF_test_ac*100,'%')
# precision and recall
RF_p_score = precision_score(y_test,y_pred)
RF_r_score = recall_score(y_test,y_pred)
RF_f1_score = 2*(RF_p_score*RF_r_score)/(RF_p_score+RF_r_score)
print("precision: ",RF_p_score," recall: ",RF_r_score," f1: ",RF_f1_score)
```
## ROC curve
```
import matplotlib.pyplot as plt
DT_clf = joblib.load('final_model/decision_tree.pkl')
NN_clf = joblib.load('final_model/nn_model.pkl')
LG_clf = joblib.load('final_model/logistic.pkl')
RF_clf = joblib.load('final_model/random_forest.pkl')
ES_clf = joblib.load('final_model/ensemble.pkl')
print([DT_roc_auc*100,LG_roc_auc*100,NN_roc_auc*100,RF_roc_auc*100,ES_roc_auc*100])
#calculate the fpr and tpr for all thresholds of the classification
#decision tree
DT_probs = DT_clf.predict_proba(X_test)
DT_preds = DT_probs[:,1]
DT_fpr, DT_tpr, DT_threshold = metrics.roc_curve(y_test,DT_preds)
DT_roc_auc = metrics.auc(DT_fpr, DT_tpr)
LG_probs = LG_clf.predict_proba(X_test)
LG_preds = LG_probs[:,1]
LG_fpr, LG_tpr, LG_threshold = metrics.roc_curve(y_test,LG_preds)
LG_roc_auc = metrics.auc(LG_fpr, LG_tpr)
NN_probs = NN_clf.predict_proba(X_test)
NN_preds = NN_probs[:,1]
NN_fpr, NN_tpr, NN_threshold = metrics.roc_curve(y_test,NN_preds)
NN_roc_auc = metrics.auc(NN_fpr, NN_tpr)
RF_probs = RF_clf.predict_proba(X_test)
RF_preds = RF_probs[:,1]
RF_fpr, RF_tpr, RF_threshold = metrics.roc_curve(y_test,RF_preds)
RF_roc_auc = metrics.auc(RF_fpr, RF_tpr)
ES_probs = ES_clf.predict_proba(X_test)
ES_preds = ES_probs[:,1]
ES_fpr, ES_tpr, ES_threshold = metrics.roc_curve(y_test,ES_preds)
ES_roc_auc = metrics.auc(ES_fpr, ES_tpr)
# plot
plt.title('Receiver Operating Characteristic')
plt.plot(DT_fpr, DT_tpr, 'b', label = 'Decision_Tree_AUC = %0.2f' % DT_roc_auc)
plt.plot(LG_fpr, LG_tpr, 'm', label = 'Logistic_Regression_AUC = %0.2f' % LG_roc_auc)
plt.plot(NN_fpr, NN_tpr, 'g', label = 'Neural_net(10*10)_AUC = %0.2f' % NN_roc_auc)
plt.plot(RF_fpr, RF_tpr, 'c', label = 'Random_Forest_AUC = %0.2f' % RF_roc_auc)
plt.plot(ES_fpr, ES_tpr, 'y', label = 'Ensemble(logitistic+NN+Random_forest)_AUC = %0.2f' % ES_roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate (tpr)')
plt.xlabel('False Positive Rate (fpr)')
plt.show()
```
## Evaluate model
```
y_pred = DT_clf.predict(X_train)
DT_train_ac = accuracy_score(y_train,y_pred)*100
y_pred = NN_clf.predict(X_train)
NN_train_ac = accuracy_score(y_train,y_pred)*100
y_pred = LG_clf.predict(X_train)
LG_train_ac = accuracy_score(y_train,y_pred)*100
y_pred = RF_clf.predict(X_train)
RF_train_ac = accuracy_score(y_train,y_pred)*100
y_pred = ES_clf.predict(X_train)
ES_train_ac = accuracy_score(y_train,y_pred)*100
Training_ac = [DT_train_ac,LG_train_ac,NN_train_ac,RF_train_ac,ES_train_ac]
print(Training_ac)
y_pred = DT_clf.predict(X_test)
DT_test_ac = accuracy_score(y_test,y_pred)*100
DT_p_score = precision_score(y_test,y_pred)*100
DT_r_score = recall_score(y_test,y_pred)*100
DT_f1_score = 2*(DT_p_score*DT_r_score)/(DT_p_score+DT_r_score)
y_pred = LG_clf.predict(X_test)
LG_test_ac = accuracy_score(y_test,y_pred)*100
LG_p_score = precision_score(y_test,y_pred)*100
LG_r_score = recall_score(y_test,y_pred)*100
LG_f1_score = 2*(LG_p_score*LG_r_score)/(LG_p_score+LG_r_score)
y_pred = NN_clf.predict(X_test)
NN_test_ac = accuracy_score(y_test,y_pred)*100
NN_p_score = precision_score(y_test,y_pred)*100
NN_r_score = recall_score(y_test,y_pred)*100
NN_f1_score = 2*(NN_p_score*NN_r_score)/(NN_p_score+NN_r_score)
y_pred = RF_clf.predict(X_test)
RF_test_ac = accuracy_score(y_test,y_pred)*100
RF_test_ac = accuracy_score(y_test,y_pred)*100
RF_p_score = precision_score(y_test,y_pred)*100
RF_r_score = recall_score(y_test,y_pred)*100
RF_f1_score = 2*(RF_p_score*RF_r_score)/(RF_p_score+RF_r_score)
y_pred = ES_clf.predict(X_test)
ES_test_ac = accuracy_score(y_test,y_pred)*100
ES_test_ac = accuracy_score(y_test,y_pred)*100
ES_p_score = precision_score(y_test,y_pred)*100
ES_r_score = recall_score(y_test,y_pred)*100
ES_f1_score = 2*(ES_p_score*ES_r_score)/(ES_p_score+ES_r_score)
Testing_ac = [DT_test_ac,LG_test_ac,NN_test_ac,RF_test_ac,ES_test_ac]
print(Testing_ac)
p_score = [DT_p_score,LG_p_score,NN_p_score,RF_p_score,ES_p_score]
print(p_score)
r_score = [DT_r_score,LG_r_score,NN_r_score,RF_r_score,ES_r_score]
print(r_score)
f1_score = [DT_f1_score,LG_f1_score,NN_f1_score,RF_f1_score,ES_f1_score]
print(f1_score)
```
## Feature importance
```
values = sorted(zip(feature_tfidf_name, RF_clf.feature_importances_), key=lambda x: x[1] * -1)
feature_imp = {}
for i in zip(feature_tfidf_name, RF_clf.feature_importances_*100):
feature_imp[i[0]] = i[1]
print(values[:50])
top_50_imp_words = []
top_50_imp_rate = []
for i in values[:50]:
top_50_imp_words.append(i[0])
top_50_imp_rate.append(i[1]*100)
print(top_50_imp_words)
print(top_50_imp_rate)
joblib.dump(top_50_imp_words,'top_50_imp_words.pkl')
top_50_words = joblib.load('top_50_words.pkl')
feature_imp[''] = 0
importances = []
for i in top_50_words:
importances.append(feature_imp[i])
print(importances)
```
| github_jupyter |
# The Fuzzing Book
## Sitemap
While the chapters of this book can be read one after the other, there are many possible paths through the book. In this graph, an arrow _A_ → _B_ means that chapter _A_ is a prerequisite for chapter _B_. You can pick arbitrary paths in this graph to get to the topics that interest you most:
```
# ignore
from IPython.display import SVG
# ignore
SVG(filename='PICS/Sitemap.svg')
```
## [Table of Contents](index.ipynb)
### <a href="01_Intro.ipynb" title="Part I: Whetting Your Appetite (01_Intro) In this part, we introduce the topics of the book.">Part I: Whetting Your Appetite</a>
* <a href="Tours.ipynb" title="Tours through the Book (Tours) This book is massive. With 17,000 lines of code and 125,000 words of text, a printed version would cover more than 1,000 pages of text. Obviously, we do not assume that everybody wants to read everything.">Tours through the Book</a>
* <a href="Intro_Testing.ipynb" title="Introduction to Software Testing (Intro_Testing) Before we get to the central parts of the book, let us introduce essential concepts of software testing. Why is it necessary to test software at all? How does one test software? How can one tell whether a test has been successful? How does one know if one has tested enough? In this chapter, let us recall the most important concepts, and at the same time get acquainted with Python and interactive notebooks.">Introduction to Software Testing</a>
### <a href="02_Lexical_Fuzzing.ipynb" title="Part II: Lexical Fuzzing (02_Lexical_Fuzzing) This part introduces test generation at the lexical level, that is, composing sequences of characters.">Part II: Lexical Fuzzing</a>
* <a href="Fuzzer.ipynb" title="Fuzzing: Breaking Things with Random Inputs (Fuzzer) In this chapter, we'll start with one of the simplest test generation techniques. The key idea of random text generation, also known as fuzzing, is to feed a string of random characters into a program in the hope to uncover failures.">Fuzzing: Breaking Things with Random Inputs</a>
* <a href="Coverage.ipynb" title="Code Coverage (Coverage) In the previous chapter, we introduced basic fuzzing – that is, generating random inputs to test programs. How do we measure the effectiveness of these tests? One way would be to check the number (and seriousness) of bugs found; but if bugs are scarce, we need a proxy for the likelihood of a test to uncover a bug. In this chapter, we introduce the concept of code coverage, measuring which parts of a program are actually executed during a test run. Measuring such coverage is also crucial for test generators that attempt to cover as much code as possible.">Code Coverage</a>
* <a href="MutationFuzzer.ipynb" title="Mutation-Based Fuzzing (MutationFuzzer) Most randomly generated inputs are syntactically invalid and thus are quickly rejected by the processing program. To exercise functionality beyond input processing, we must increase chances to obtain valid inputs. One such way is so-called mutational fuzzing – that is, introducing small changes to existing inputs that may still keep the input valid, yet exercise new behavior. We show how to create such mutations, and how to guide them towards yet uncovered code, applying central concepts from the popular AFL fuzzer.">Mutation-Based Fuzzing</a>
* <a href="GreyboxFuzzer.ipynb" title="Greybox Fuzzing (GreyboxFuzzer) In the previous chapter, we have introduced mutation-based fuzzing, a technique that generates fuzz inputs by applying small mutations to given inputs. In this chapter, we show how to guide these mutations towards specific goals such as coverage. The algorithms in this book stem from the popular American Fuzzy Lop (AFL) fuzzer, in particular from its AFLFast and AFLGo flavors. We will explore the greybox fuzzing algorithm behind AFL and how we can exploit it to solve various problems for automated vulnerability detection.">Greybox Fuzzing</a>
* <a href="SearchBasedFuzzer.ipynb" title="Search-Based Fuzzing (SearchBasedFuzzer) Sometimes we are not only interested in fuzzing as many as possible diverse program inputs, but in deriving specific test inputs that achieve some objective, such as reaching specific statements in a program. When we have an idea of what we are looking for, then we can search for it. Search algorithms are at the core of computer science, but applying classic search algorithms like breadth or depth first search to search for tests is unrealistic, because these algorithms potentially require us to look at all possible inputs. However, domain-knowledge can be used to overcome this problem. For example, if we can estimate which of several program inputs is closer to the one we are looking for, then this information can guide us to reach the target quicker – this information is known as a heuristic. The way heuristics are applied systematically is captured in meta-heuristic search algorithms. The "meta" denotes that these algorithms are generic and can be instantiated differently to different problems. Meta-heuristics often take inspiration from processes observed in nature. For example, there are algorithms mimicking evolutionary processes, swarm intelligence, or chemical reactions. In general they are much more efficient than exhaustive search approaches such that they can be applied to vast search spaces – search spaces as vast as the domain of program inputs are no problem for them.">Search-Based Fuzzing</a>
* <a href="MutationAnalysis.ipynb" title="Mutation Analysis (MutationAnalysis) In the chapter on coverage, we showed how one can identify which parts of the program are executed by a program, and hence get a sense of the effectiveness of a set of test cases in covering the program structure. However, coverage alone may not be the best measure for the effectiveness of a test, as one can have great coverage without ever checking a result for correctness. In this chapter, we introduce another means for assessing the effectiveness of a test suite: After injecting mutations – artificial faults – into the code, we check whether a test suite can detect these artificial faults. The idea is that if it fails to detect such mutations, it will also miss real bugs.">Mutation Analysis</a>
### <a href="03_Syntactical_Fuzzing.ipynb" title="Part III: Syntactical Fuzzing (03_Syntactical_Fuzzing) This part introduces test generation at the syntactical level, that is, composing inputs from language structures.">Part III: Syntactical Fuzzing</a>
* <a href="Grammars.ipynb" title="Fuzzing with Grammars (Grammars) In the chapter on "Mutation-Based Fuzzing", we have seen how to use extra hints – such as sample input files – to speed up test generation. In this chapter, we take this idea one step further, by providing a specification of the legal inputs to a program. Specifying inputs via a grammar allows for very systematic and efficient test generation, in particular for complex input formats. Grammars also serve as the base for configuration fuzzing, API fuzzing, GUI fuzzing, and many more.">Fuzzing with Grammars</a>
* <a href="GrammarFuzzer.ipynb" title="Efficient Grammar Fuzzing (GrammarFuzzer) In the chapter on grammars, we have seen how to use grammars for very effective and efficient testing. In this chapter, we refine the previous string-based algorithm into a tree-based algorithm, which is much faster and allows for much more control over the production of fuzz inputs.">Efficient Grammar Fuzzing</a>
* <a href="GrammarCoverageFuzzer.ipynb" title="Grammar Coverage (GrammarCoverageFuzzer) Producing inputs from grammars gives all possible expansions of a rule the same likelihood. For producing a comprehensive test suite, however, it makes more sense to maximize variety – for instance, by not repeating the same expansions over and over again. In this chapter, we explore how to systematically cover elements of a grammar such that we maximize variety and do not miss out individual elements.">Grammar Coverage</a>
* <a href="Parser.ipynb" title="Parsing Inputs (Parser) In the chapter on Grammars, we discussed how grammars can be used to represent various languages. We also saw how grammars can be used to generate strings of the corresponding language. Grammars can also perform the reverse. That is, given a string, one can decompose the string into its constituent parts that correspond to the parts of grammar used to generate it – the derivation tree of that string. These parts (and parts from other similar strings) can later be recombined using the same grammar to produce new strings.">Parsing Inputs</a>
* <a href="ProbabilisticGrammarFuzzer.ipynb" title="Probabilistic Grammar Fuzzing (ProbabilisticGrammarFuzzer) Let us give grammars even more power by assigning probabilities to individual expansions. This allows us to control how many of each element should be produced, and thus allows us to target our generated tests towards specific functionality. We also show how to learn such probabilities from given sample inputs, and specifically direct our tests towards input features that are uncommon in these samples.">Probabilistic Grammar Fuzzing</a>
* <a href="GeneratorGrammarFuzzer.ipynb" title="Fuzzing with Generators (GeneratorGrammarFuzzer) In this chapter, we show how to extend grammars with functions – pieces of code that get executed during grammar expansion, and that can generate, check, or change elements produced. Adding functions to a grammar allows for very versatile test generation, bringing together the best of grammar generation and programming.">Fuzzing with Generators</a>
* <a href="GreyboxGrammarFuzzer.ipynb" title="Greybox Fuzzing with Grammars (GreyboxGrammarFuzzer) <!-- Previously, we have learned about mutational fuzzing, which generates new inputs by mutating seed inputs. Most mutational fuzzers represent inputs as a sequence of bytes and apply byte-level mutations to this byte sequence. Such byte-level mutations work great for compact file formats with a small number of structural constraints. However, most file formats impose a high-level structure on these byte sequences.">Greybox Fuzzing with Grammars</a>
* <a href="Reducer.ipynb" title="Reducing Failure-Inducing Inputs (Reducer) By construction, fuzzers create inputs that may be hard to read. This causes issues during debugging, when a human has to analyze the exact cause of the failure. In this chapter, we present techniques that automatically reduce and simplify failure-inducing inputs to a minimum in order to ease debugging.">Reducing Failure-Inducing Inputs</a>
### <a href="04_Semantical_Fuzzing.ipynb" title="Part IV: Semantical Fuzzing (04_Semantical_Fuzzing) This part introduces test generation techniques that take the semantics of the input into account, notably the behavior of the program that processes the input.">Part IV: Semantical Fuzzing</a>
* <a href="GrammarMiner.ipynb" title="Mining Input Grammars (GrammarMiner) So far, the grammars we have seen have been mostly specified manually – that is, you (or the person knowing the input format) had to design and write a grammar in the first place. While the grammars we have seen so far have been rather simple, creating a grammar for complex inputs can involve quite some effort. In this chapter, we therefore introduce techniques that automatically mine grammars from programs – by executing the programs and observing how they process which parts of the input. In conjunction with a grammar fuzzer, this allows us to 1. take a program, 2. extract its input grammar, and 3. fuzz it with high efficiency and effectiveness, using the concepts in this book.">Mining Input Grammars</a>
* <a href="InformationFlow.ipynb" title="Tracking Information Flow (InformationFlow) We have explored how one could generate better inputs that can penetrate deeper into the program in question. While doing so, we have relied on program crashes to tell us that we have succeeded in finding problems in the program. However, that is rather simplistic. What if the behavior of the program is simply incorrect, but does not lead to a crash? Can one do better?">Tracking Information Flow</a>
* <a href="ConcolicFuzzer.ipynb" title="Concolic Fuzzing (ConcolicFuzzer) We have previously seen how one can use dynamic taints to produce more intelligent test cases than simply looking for program crashes. We have also seen how one can use the taints to update the grammar, and hence focus more on the dangerous methods.">Concolic Fuzzing</a>
* <a href="SymbolicFuzzer.ipynb" title="Symbolic Fuzzing (SymbolicFuzzer) One of the problems with traditional methods of fuzzing is that they fail to exercise all the possible behaviors that a system can have, especially when the input space is large. Quite often the execution of a specific branch of execution may happen only with very specific inputs, which could represent an extremely small fraction of the input space. The traditional fuzzing methods relies on chance to produce inputs they need. However, relying on randomness to generate values that we want is a bad idea when the space to be explored is huge. For example, a function that accepts a string, even if one only considers the first $10$ characters, already has $2^{80}$ possible inputs. If one is looking for a specific string, random generation of values will take a few thousand years even in one of the super computers.">Symbolic Fuzzing</a>
* <a href="DynamicInvariants.ipynb" title="Mining Function Specifications (DynamicInvariants) When testing a program, one not only needs to cover its several behaviors; one also needs to check whether the result is as expected. In this chapter, we introduce a technique that allows us to mine function specifications from a set of given executions, resulting in abstract and formal descriptions of what the function expects and what it delivers.">Mining Function Specifications</a>
### <a href="05_Domain-Specific_Fuzzing.ipynb" title="Part V: Domain-Specific Fuzzing (05_Domain-Specific_Fuzzing) This part discusses test generation for a number of specific domains. For all these domains, we introduce fuzzers that generate inputs as well as miners that analyze the input structure.">Part V: Domain-Specific Fuzzing</a>
* <a href="ConfigurationFuzzer.ipynb" title="Testing Configurations (ConfigurationFuzzer) The behavior of a program is not only governed by its data. The configuration of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should be tested. In this chapter, we explore how to systematically test and cover software configurations. By automatically inferring configuration options, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover combinations of configuration options, quickly detecting unwanted interferences.">Testing Configurations</a>
* <a href="APIFuzzer.ipynb" title="Fuzzing APIs (APIFuzzer) So far, we have always generated system input, i.e. data that the program as a whole obtains via its input channels. However, we can also generate inputs that go directly into individual functions, gaining flexibility and speed in the process. In this chapter, we explore the use of grammars to synthesize code for function calls, which allows you to generate program code that very efficiently invokes functions directly.">Fuzzing APIs</a>
* <a href="Carver.ipynb" title="Carving Unit Tests (Carver) So far, we have always generated system input, i.e. data that the program as a whole obtains via its input channels. If we are interested in testing only a small set of functions, having to go through the system can be very inefficient. This chapter introduces a technique known as carving, which, given a system test, automatically extracts a set of unit tests that replicate the calls seen during the unit test. The key idea is to record such calls such that we can replay them later – as a whole or selectively. On top, we also explore how to synthesize API grammars from carved unit tests; this means that we can synthesize API tests without having to write a grammar at all.">Carving Unit Tests</a>
* <a href="WebFuzzer.ipynb" title="Testing Web Applications (WebFuzzer) In this chapter, we explore how to generate tests for Graphical User Interfaces (GUIs), notably on Web interfaces. We set up a (vulnerable) Web server and demonstrate how to systematically explore its behavior – first with hand-written grammars, then with grammars automatically inferred from the user interface. We also show how to conduct systematic attacks on these servers, notably with code and SQL injection.">Testing Web Applications</a>
* <a href="GUIFuzzer.ipynb" title="Testing Graphical User Interfaces (GUIFuzzer) In this chapter, we explore how to generate tests for Graphical User Interfaces (GUIs), abstracting from our previous examples on Web testing. Building on general means to extract user interface elements and to activate them, our techniques generalize to arbitrary graphical user interfaces, from rich Web applications to mobile apps, and systematically explore user interfaces through forms and navigation elements.">Testing Graphical User Interfaces</a>
### <a href="06_Managing_Fuzzing.ipynb" title="Part VI: Managing Fuzzing (06_Managing_Fuzzing) This part discusses how to manage fuzzing in the large.">Part VI: Managing Fuzzing</a>
* <a href="FuzzingInTheLarge.ipynb" title="Fuzzing in the Large (FuzzingInTheLarge) In the past chapters, we have always looked at fuzzing taking place on one machine for a few seconds only. In the real world, however, fuzzers are run on dozens or even thousands of machines; for hours, days and weeks; for one program or dozens of programs. In such contexts, one needs an infrastructure to collect failure data from the individual fuzzer runs, and to aggregate such data in a central repository. In this chapter, we will examine such an infrastructure, the FuzzManager framework from Mozilla.">Fuzzing in the Large</a>
* <a href="WhenToStopFuzzing.ipynb" title="When To Stop Fuzzing (WhenToStopFuzzing) In the past chapters, we have discussed several fuzzing techniques. Knowing what to do is important, but it is also important to know when to stop doing things. In this chapter, we will learn when to stop fuzzing – and use a prominent example for this purpose: The Enigma machine that was used in the second world war by the navy of Nazi Germany to encrypt communications, and how Alan Turing and I.J. Good used fuzzing techniques to crack ciphers for the Naval Enigma machine.">When To Stop Fuzzing</a>
### <a href="99_Appendices.ipynb" title="Appendices (99_Appendices) This part holds notebooks and modules that support other notebooks.">Appendices</a>
* <a href="PrototypingWithPython.ipynb" title="Prototyping with Python (PrototypingWithPython) This is the manuscript of Andreas Zeller's keynote "Coding Effective Testing Tools Within Minutes" at the TAIC PART 2020 conference.">Prototyping with Python</a>
* <a href="ExpectError.ipynb" title="Error Handling (ExpectError) The code in this notebook helps with handling errors. Normally, an error in notebook code causes the execution of the code to stop; while an infinite loop in notebook code causes the notebook to run without end. This notebook provides two classes to help address these concerns.">Error Handling</a>
* <a href="Timer.ipynb" title="Timer (Timer) The code in this notebook helps with measuring time.">Timer</a>
* <a href="ControlFlow.ipynb" title="Control Flow Graph (ControlFlow) The code in this notebook helps with obtaining the control flow graph of python functions.">Control Flow Graph</a>
* <a href="RailroadDiagrams.ipynb" title="Railroad Diagrams (RailroadDiagrams) The code in this notebook helps with drawing syntax-diagrams. It is a (slightly customized) copy of the excellent library from Tab Atkins jr., which unfortunately is not available as a Python package.">Railroad Diagrams</a>
| github_jupyter |
# Transfer Learning Template
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os, json, sys, time, random
import numpy as np
import torch
from torch.optim import Adam
from easydict import EasyDict
import matplotlib.pyplot as plt
from steves_models.steves_ptn import Steves_Prototypical_Network
from steves_utils.lazy_iterable_wrapper import Lazy_Iterable_Wrapper
from steves_utils.iterable_aggregator import Iterable_Aggregator
from steves_utils.ptn_train_eval_test_jig import PTN_Train_Eval_Test_Jig
from steves_utils.torch_sequential_builder import build_sequential
from steves_utils.torch_utils import get_dataset_metrics, ptn_confusion_by_domain_over_dataloader
from steves_utils.utils_v2 import (per_domain_accuracy_from_confusion, get_datasets_base_path)
from steves_utils.PTN.utils import independent_accuracy_assesment
from torch.utils.data import DataLoader
from steves_utils.stratified_dataset.episodic_accessor import Episodic_Accessor_Factory
from steves_utils.ptn_do_report import (
get_loss_curve,
get_results_table,
get_parameters_table,
get_domain_accuracies,
)
from steves_utils.transforms import get_chained_transform
```
# Allowed Parameters
These are allowed parameters, not defaults
Each of these values need to be present in the injected parameters (the notebook will raise an exception if they are not present)
Papermill uses the cell tag "parameters" to inject the real parameters below this cell.
Enable tags to see what I mean
```
required_parameters = {
"experiment_name",
"lr",
"device",
"seed",
"dataset_seed",
"n_shot",
"n_query",
"n_way",
"train_k_factor",
"val_k_factor",
"test_k_factor",
"n_epoch",
"patience",
"criteria_for_best",
"x_net",
"datasets",
"torch_default_dtype",
"NUM_LOGS_PER_EPOCH",
"BEST_MODEL_PATH",
}
from steves_utils.CORES.utils import (
ALL_NODES,
ALL_NODES_MINIMUM_1000_EXAMPLES,
ALL_DAYS
)
from steves_utils.ORACLE.utils_v2 import (
ALL_DISTANCES_FEET_NARROWED,
ALL_RUNS,
ALL_SERIAL_NUMBERS,
)
standalone_parameters = {}
standalone_parameters["experiment_name"] = "STANDALONE PTN"
standalone_parameters["lr"] = 0.001
standalone_parameters["device"] = "cuda"
standalone_parameters["seed"] = 1337
standalone_parameters["dataset_seed"] = 1337
standalone_parameters["n_way"] = 8
standalone_parameters["n_shot"] = 3
standalone_parameters["n_query"] = 2
standalone_parameters["train_k_factor"] = 1
standalone_parameters["val_k_factor"] = 2
standalone_parameters["test_k_factor"] = 2
standalone_parameters["n_epoch"] = 50
standalone_parameters["patience"] = 10
standalone_parameters["criteria_for_best"] = "source_loss"
standalone_parameters["datasets"] = [
{
"labels": ALL_SERIAL_NUMBERS,
"domains": ALL_DISTANCES_FEET_NARROWED,
"num_examples_per_domain_per_label": 100,
"pickle_path": os.path.join(get_datasets_base_path(), "oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl"),
"source_or_target_dataset": "source",
"x_transforms": ["unit_mag", "minus_two"],
"episode_transforms": [],
"domain_prefix": "ORACLE_"
},
{
"labels": ALL_NODES,
"domains": ALL_DAYS,
"num_examples_per_domain_per_label": 100,
"pickle_path": os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"),
"source_or_target_dataset": "target",
"x_transforms": ["unit_power", "times_zero"],
"episode_transforms": [],
"domain_prefix": "CORES_"
}
]
standalone_parameters["torch_default_dtype"] = "torch.float32"
standalone_parameters["x_net"] = [
{"class": "nnReshape", "kargs": {"shape":[-1, 1, 2, 256]}},
{"class": "Conv2d", "kargs": { "in_channels":1, "out_channels":256, "kernel_size":(1,7), "bias":False, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":256}},
{"class": "Conv2d", "kargs": { "in_channels":256, "out_channels":80, "kernel_size":(2,7), "bias":True, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 80*256, "out_features": 256}}, # 80 units per IQ pair
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features":256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
]
# Parameters relevant to results
# These parameters will basically never need to change
standalone_parameters["NUM_LOGS_PER_EPOCH"] = 10
standalone_parameters["BEST_MODEL_PATH"] = "./best_model.pth"
# Parameters
parameters = {
"experiment_name": "tl_1_cores-metehan",
"device": "cuda",
"lr": 0.001,
"seed": 1337,
"dataset_seed": 1337,
"n_shot": 3,
"n_query": 2,
"train_k_factor": 3,
"val_k_factor": 2,
"test_k_factor": 2,
"torch_default_dtype": "torch.float32",
"n_epoch": 50,
"patience": 3,
"criteria_for_best": "target_loss",
"x_net": [
{"class": "nnReshape", "kargs": {"shape": [-1, 1, 2, 256]}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 1,
"out_channels": 256,
"kernel_size": [1, 7],
"bias": False,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 256}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 256,
"out_channels": 80,
"kernel_size": [2, 7],
"bias": True,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 20480, "out_features": 256}},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features": 256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
],
"NUM_LOGS_PER_EPOCH": 10,
"BEST_MODEL_PATH": "./best_model.pth",
"n_way": 19,
"datasets": [
{
"labels": [
"1-10.",
"1-11.",
"1-15.",
"1-16.",
"1-17.",
"1-18.",
"1-19.",
"10-4.",
"10-7.",
"11-1.",
"11-14.",
"11-17.",
"11-20.",
"11-7.",
"13-20.",
"13-8.",
"14-10.",
"14-11.",
"14-14.",
"14-7.",
"15-1.",
"15-20.",
"16-1.",
"16-16.",
"17-10.",
"17-11.",
"17-2.",
"19-1.",
"19-16.",
"19-19.",
"19-20.",
"19-3.",
"2-10.",
"2-11.",
"2-17.",
"2-18.",
"2-20.",
"2-3.",
"2-4.",
"2-5.",
"2-6.",
"2-7.",
"2-8.",
"3-13.",
"3-18.",
"3-3.",
"4-1.",
"4-10.",
"4-11.",
"4-19.",
"5-5.",
"6-15.",
"7-10.",
"7-14.",
"8-18.",
"8-20.",
"8-3.",
"8-8.",
],
"domains": [1, 2, 3, 4, 5],
"num_examples_per_domain_per_label": 100,
"pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/cores.stratified_ds.2022A.pkl",
"source_or_target_dataset": "source",
"x_transforms": [],
"episode_transforms": [],
"domain_prefix": "CORES_",
},
{
"labels": [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
],
"domains": [0, 1, 2],
"num_examples_per_domain_per_label": 100,
"pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/metehan.stratified_ds.2022A.pkl",
"source_or_target_dataset": "target",
"x_transforms": [],
"episode_transforms": [],
"domain_prefix": "Metehan_",
},
],
}
# Set this to True if you want to run this template directly
STANDALONE = False
if STANDALONE:
print("parameters not injected, running with standalone_parameters")
parameters = standalone_parameters
if not 'parameters' in locals() and not 'parameters' in globals():
raise Exception("Parameter injection failed")
#Use an easy dict for all the parameters
p = EasyDict(parameters)
supplied_keys = set(p.keys())
if supplied_keys != required_parameters:
print("Parameters are incorrect")
if len(supplied_keys - required_parameters)>0: print("Shouldn't have:", str(supplied_keys - required_parameters))
if len(required_parameters - supplied_keys)>0: print("Need to have:", str(required_parameters - supplied_keys))
raise RuntimeError("Parameters are incorrect")
###################################
# Set the RNGs and make it all deterministic
###################################
np.random.seed(p.seed)
random.seed(p.seed)
torch.manual_seed(p.seed)
torch.use_deterministic_algorithms(True)
###########################################
# The stratified datasets honor this
###########################################
torch.set_default_dtype(eval(p.torch_default_dtype))
###################################
# Build the network(s)
# Note: It's critical to do this AFTER setting the RNG
###################################
x_net = build_sequential(p.x_net)
start_time_secs = time.time()
p.domains_source = []
p.domains_target = []
train_original_source = []
val_original_source = []
test_original_source = []
train_original_target = []
val_original_target = []
test_original_target = []
# global_x_transform_func = lambda x: normalize(x.to(torch.get_default_dtype()), "unit_power") # unit_power, unit_mag
# global_x_transform_func = lambda x: normalize(x, "unit_power") # unit_power, unit_mag
def add_dataset(
labels,
domains,
pickle_path,
x_transforms,
episode_transforms,
domain_prefix,
num_examples_per_domain_per_label,
source_or_target_dataset:str,
iterator_seed=p.seed,
dataset_seed=p.dataset_seed,
n_shot=p.n_shot,
n_way=p.n_way,
n_query=p.n_query,
train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor),
):
if x_transforms == []: x_transform = None
else: x_transform = get_chained_transform(x_transforms)
if episode_transforms == []: episode_transform = None
else: raise Exception("episode_transforms not implemented")
episode_transform = lambda tup, _prefix=domain_prefix: (_prefix + str(tup[0]), tup[1])
eaf = Episodic_Accessor_Factory(
labels=labels,
domains=domains,
num_examples_per_domain_per_label=num_examples_per_domain_per_label,
iterator_seed=iterator_seed,
dataset_seed=dataset_seed,
n_shot=n_shot,
n_way=n_way,
n_query=n_query,
train_val_test_k_factors=train_val_test_k_factors,
pickle_path=pickle_path,
x_transform_func=x_transform,
)
train, val, test = eaf.get_train(), eaf.get_val(), eaf.get_test()
train = Lazy_Iterable_Wrapper(train, episode_transform)
val = Lazy_Iterable_Wrapper(val, episode_transform)
test = Lazy_Iterable_Wrapper(test, episode_transform)
if source_or_target_dataset=="source":
train_original_source.append(train)
val_original_source.append(val)
test_original_source.append(test)
p.domains_source.extend(
[domain_prefix + str(u) for u in domains]
)
elif source_or_target_dataset=="target":
train_original_target.append(train)
val_original_target.append(val)
test_original_target.append(test)
p.domains_target.extend(
[domain_prefix + str(u) for u in domains]
)
else:
raise Exception(f"invalid source_or_target_dataset: {source_or_target_dataset}")
for ds in p.datasets:
add_dataset(**ds)
# from steves_utils.CORES.utils import (
# ALL_NODES,
# ALL_NODES_MINIMUM_1000_EXAMPLES,
# ALL_DAYS
# )
# add_dataset(
# labels=ALL_NODES,
# domains = ALL_DAYS,
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"cores_{u}"
# )
# from steves_utils.ORACLE.utils_v2 import (
# ALL_DISTANCES_FEET,
# ALL_RUNS,
# ALL_SERIAL_NUMBERS,
# )
# add_dataset(
# labels=ALL_SERIAL_NUMBERS,
# domains = list(set(ALL_DISTANCES_FEET) - {2,62}),
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"),
# source_or_target_dataset="source",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"oracle1_{u}"
# )
# from steves_utils.ORACLE.utils_v2 import (
# ALL_DISTANCES_FEET,
# ALL_RUNS,
# ALL_SERIAL_NUMBERS,
# )
# add_dataset(
# labels=ALL_SERIAL_NUMBERS,
# domains = list(set(ALL_DISTANCES_FEET) - {2,62,56}),
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"),
# source_or_target_dataset="source",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"oracle2_{u}"
# )
# add_dataset(
# labels=list(range(19)),
# domains = [0,1,2],
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "metehan.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"met_{u}"
# )
# # from steves_utils.wisig.utils import (
# # ALL_NODES_MINIMUM_100_EXAMPLES,
# # ALL_NODES_MINIMUM_500_EXAMPLES,
# # ALL_NODES_MINIMUM_1000_EXAMPLES,
# # ALL_DAYS
# # )
# import steves_utils.wisig.utils as wisig
# add_dataset(
# labels=wisig.ALL_NODES_MINIMUM_100_EXAMPLES,
# domains = wisig.ALL_DAYS,
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "wisig.node3-19.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"wisig_{u}"
# )
###################################
# Build the dataset
###################################
train_original_source = Iterable_Aggregator(train_original_source, p.seed)
val_original_source = Iterable_Aggregator(val_original_source, p.seed)
test_original_source = Iterable_Aggregator(test_original_source, p.seed)
train_original_target = Iterable_Aggregator(train_original_target, p.seed)
val_original_target = Iterable_Aggregator(val_original_target, p.seed)
test_original_target = Iterable_Aggregator(test_original_target, p.seed)
# For CNN We only use X and Y. And we only train on the source.
# Properly form the data using a transform lambda and Lazy_Iterable_Wrapper. Finally wrap them in a dataloader
transform_lambda = lambda ex: ex[1] # Original is (<domain>, <episode>) so we strip down to episode only
train_processed_source = Lazy_Iterable_Wrapper(train_original_source, transform_lambda)
val_processed_source = Lazy_Iterable_Wrapper(val_original_source, transform_lambda)
test_processed_source = Lazy_Iterable_Wrapper(test_original_source, transform_lambda)
train_processed_target = Lazy_Iterable_Wrapper(train_original_target, transform_lambda)
val_processed_target = Lazy_Iterable_Wrapper(val_original_target, transform_lambda)
test_processed_target = Lazy_Iterable_Wrapper(test_original_target, transform_lambda)
datasets = EasyDict({
"source": {
"original": {"train":train_original_source, "val":val_original_source, "test":test_original_source},
"processed": {"train":train_processed_source, "val":val_processed_source, "test":test_processed_source}
},
"target": {
"original": {"train":train_original_target, "val":val_original_target, "test":test_original_target},
"processed": {"train":train_processed_target, "val":val_processed_target, "test":test_processed_target}
},
})
from steves_utils.transforms import get_average_magnitude, get_average_power
print(set([u for u,_ in val_original_source]))
print(set([u for u,_ in val_original_target]))
s_x, s_y, q_x, q_y, _ = next(iter(train_processed_source))
print(s_x)
# for ds in [
# train_processed_source,
# val_processed_source,
# test_processed_source,
# train_processed_target,
# val_processed_target,
# test_processed_target
# ]:
# for s_x, s_y, q_x, q_y, _ in ds:
# for X in (s_x, q_x):
# for x in X:
# assert np.isclose(get_average_magnitude(x.numpy()), 1.0)
# assert np.isclose(get_average_power(x.numpy()), 1.0)
###################################
# Build the model
###################################
model = Steves_Prototypical_Network(x_net, device=p.device, x_shape=(2,256))
optimizer = Adam(params=model.parameters(), lr=p.lr)
###################################
# train
###################################
jig = PTN_Train_Eval_Test_Jig(model, p.BEST_MODEL_PATH, p.device)
jig.train(
train_iterable=datasets.source.processed.train,
source_val_iterable=datasets.source.processed.val,
target_val_iterable=datasets.target.processed.val,
num_epochs=p.n_epoch,
num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH,
patience=p.patience,
optimizer=optimizer,
criteria_for_best=p.criteria_for_best,
)
total_experiment_time_secs = time.time() - start_time_secs
###################################
# Evaluate the model
###################################
source_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test)
target_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test)
source_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val)
target_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val)
history = jig.get_history()
total_epochs_trained = len(history["epoch_indices"])
val_dl = Iterable_Aggregator((datasets.source.original.val,datasets.target.original.val))
confusion = ptn_confusion_by_domain_over_dataloader(model, p.device, val_dl)
per_domain_accuracy = per_domain_accuracy_from_confusion(confusion)
# Add a key to per_domain_accuracy for if it was a source domain
for domain, accuracy in per_domain_accuracy.items():
per_domain_accuracy[domain] = {
"accuracy": accuracy,
"source?": domain in p.domains_source
}
# Do an independent accuracy assesment JUST TO BE SURE!
# _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device)
# _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device)
# _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device)
# _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device)
# assert(_source_test_label_accuracy == source_test_label_accuracy)
# assert(_target_test_label_accuracy == target_test_label_accuracy)
# assert(_source_val_label_accuracy == source_val_label_accuracy)
# assert(_target_val_label_accuracy == target_val_label_accuracy)
experiment = {
"experiment_name": p.experiment_name,
"parameters": dict(p),
"results": {
"source_test_label_accuracy": source_test_label_accuracy,
"source_test_label_loss": source_test_label_loss,
"target_test_label_accuracy": target_test_label_accuracy,
"target_test_label_loss": target_test_label_loss,
"source_val_label_accuracy": source_val_label_accuracy,
"source_val_label_loss": source_val_label_loss,
"target_val_label_accuracy": target_val_label_accuracy,
"target_val_label_loss": target_val_label_loss,
"total_epochs_trained": total_epochs_trained,
"total_experiment_time_secs": total_experiment_time_secs,
"confusion": confusion,
"per_domain_accuracy": per_domain_accuracy,
},
"history": history,
"dataset_metrics": get_dataset_metrics(datasets, "ptn"),
}
ax = get_loss_curve(experiment)
plt.show()
get_results_table(experiment)
get_domain_accuracies(experiment)
print("Source Test Label Accuracy:", experiment["results"]["source_test_label_accuracy"], "Target Test Label Accuracy:", experiment["results"]["target_test_label_accuracy"])
print("Source Val Label Accuracy:", experiment["results"]["source_val_label_accuracy"], "Target Val Label Accuracy:", experiment["results"]["target_val_label_accuracy"])
json.dumps(experiment)
```
| github_jupyter |
# Applying DMD for transient modeling, surrogates and Uncertainty Quantificaton.
## 2D LRA Benchmark:
In this test case, a control rod ejection in the 2D well known LRA benchmark has been simulated by Detran (developed by J. A. Roberts). The objective here is to build a data-driven, yet physics-revealing time-dependent surrogate model(s). The linearity inherited from the connection to Koopman theory will facilitate a forward/backward uncertainty propagation.
First of all, lets make the necessary imports including the DMD class from PyDMD (developed by math)we import the DMD class from the pydmd package, we set matplotlib for the notebook and we import numpy.
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import loadmat
import scipy as sp
from pydmd import DMD_jov
import pickle
import matplotlib.pyplot as plt
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
#plt.rcParams['mathtext.fontset'] = 'custom'
#plt.rcParams['mathtext.rm'] = 'Times New Roman'
#plt.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
#plt.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'
plt.rcParams['font.size'] = 18
plt.rcParams['axes.labelsize'] = 20
#plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['xtick.labelsize'] = 18
plt.rcParams['ytick.labelsize'] = 18
plt.rcParams['legend.fontsize'] = 18
plt.rcParams['legend.fancybox'] = False
plt.rcParams['legend.frameon'] = False
plt.rcParams['figure.titlesize'] = 20
plt.rcParams['axes.autolimit_mode'] = 'round_numbers'
plt.rcParams['axes.xmargin'] = 0
plt.rcParams['axes.ymargin'] = 0
plt.rcParams['text.usetex'] = True
plt.rcParams['savefig.bbox'] = 'tight'
```
We load the Detran simulation data of the Transient LRA Benchmark
```
A = pickle.load(open('../inputs/diffusion2x2_ref_with_mesh_temps.p','rb'),encoding='latin')
kappa = 3.204e-11
#%% Plots of raw data
t = np.array(A['times']) # time
mp = np.array(A['meshpower']).T # mesh-dependent powers
mp = mp * kappa
p = np.array(A['power'])# total power
c = p[0]/sp.sum(mp,axis=0)[0]# use to reconstruct power from mesh power
maxtemp = A['maxtemp']#
np.where(p==max(p))
#mp = mp * c
```
Build the surrogates using a batch of DMD's
```
#%% DMD analysis
import time
et_0 = time.time()
# Time step
dt = t[1]-t[0]
# Chop the time domain into discrete patches
time_interval = [1.36,1.5,max(t)]
# Define desire subspace size for each patch
r = [10,13,40]
#r = [50,1e5,15]
#step=[10,1,1]
optimal=['Jov',False,False]
# Perform dmd
time_index = [0]
for i in range(len(time_interval)):
time_index.append(sp.sum(t<=time_interval[i]))
F_norm = 0.0
results={}
for i in range(len(time_interval)):
start, stop = time_index[i], time_index[i+1]
t_i = t[start:stop]
dmd = DMD_jov(svd_rank=r[i],opt=optimal[i])
fuel_idx = mp[:, 0]>0 # pick out fuel mesh
tmp_reduced = mp[fuel_idx, start:stop] # extract fuel data
tmp_full = 0*mp[:, start:stop] # initialize full data
dmd.fit(tmp_reduced) # do the fit
tmp_full[fuel_idx] = dmd.reconstructed_data.real
results[i]={}
results[i]['dmd'] = dmd
results[i]['t'] = t_i # All the coming lines can be ommitted except p_dmd
results[i]['Phi'] = dmd.modes
results[i]['eigs'] = dmd.eigs
results[i]['mp_dmd'] = tmp_full.copy()#dmd.reconstructed_data
results[i]['p_dmd'] = sp.zeros(stop-start)
results[i]['p_dmd'] = c*sp.sum(tmp_full, axis=0)# c*sp.sum(dmd.reconstructed_data.real,axis=0)
F_norm_tmp = np.linalg.norm(tmp_reduced-dmd.reconstructed_data.real)
print("patch {} norm = {:.2e}".format(i, F_norm_tmp))
F_norm += F_norm_tmp**2
et = time.time() - et_0
F_norm = np.sqrt(F_norm)
print("final norm is {:.2e}".format(F_norm))
print('elapsed time = ', et)
#for mode in dmd.modes.T:
# plt.plot(x, mode.real)
# plt.title('Modes')
#plt.show()T.real)
#plt.pcolor(xgrid, tgrid, ((mp[start:stop, :].T-dmd.reconstructed_data).T).real)
#fig = plt.colorbar()
markers = ['o', '^', 's', 'v']
fig5=plt.figure(figsize=(15,5))
# Plot the surrogate and reference on a linear plot
ax1=fig5.add_subplot(1,3,1)
plt.plot(t, p, 'k-', label='reference')
for k in range(len(time_interval)):
plt.plot(results[k]['t'], results[k]['p_dmd'].real, marker=markers[k], ls='', mfc='w', label='interval '+str(k))
plt.axis([0, 3, 0, 5000])
plt.xlabel('t (s)')
plt.ylabel('power (W/cm$^3$)')
plt.legend(loc="upper right")
# Plot the surrogate and reference on a log plot. Put the derivative on the other axis.
ax2=fig5.add_subplot(1,3,2)
plt.semilogy(t, p, 'k-', label='reference')
for k in range(len(time_interval)):
plt.semilogy(results[k]['t'], results[k]['p_dmd'].real, marker=markers[k], ls='', mfc='w', label='interval '+str(k))
plt.xlabel('t (s)')
plt.ylabel('power (W/cm$^3$)')
dpdt = np.gradient(p, t)
idx_pos = dpdt>0
idx_neg = dpdt<0
#ax2left = ax2.twinx()
#plt.semilogy(t, abs(dpdt), 'r:', label='derivative')
plt.legend()
#plt.legend()
# Plot the error
ax2=fig5.add_subplot(1,3,3)
t_start = 0
for k in range(len(time_interval)):
t_end = t_start + len(results[k]['t'])
ref = p[t_start:t_end]
err = abs(results[k]['p_dmd'].real-ref)/ref*100
plt.semilogy(t[t_start:t_end], err, marker=markers[k],
ls='', mfc='w', label='interval '+str(k))
t_start = t_end
plt.xlabel('t (s)')
plt.ylabel('absolute error in power (\%)')
plt.legend()
plt.tight_layout()
plt.savefig('../images/corepower.pdf')
#plt.semilogy(t[idx_pos], dpdt[idx_pos], 'r.', ms=2)
d2pdt2 = np.gradient(dpdt, t)
ff = abs(p)
plt.plot(t, ff, 'r-',
[time_interval[0],time_interval[0]], [min(ff), max(ff)],
[time_interval[1], time_interval[1]], [min(ff), max(ff)],
[1, 3], [6e3, 6e3])
#plt.axis([1.3, 1.6, min(ff)/5, max(ff)/5])
t[p==max(p[t>1.6])]
t[143]
np.sqrt(mp.shape[0])*7.5
Xdmd_2D=np.reshape(np.concatenate((results[0]['mp_dmd'],results[1]['mp_dmd'],results[2]['mp_dmd']),axis=1),(22,22,-1),order='F')
mp_2D=np.reshape(mp,(22,22,-1),order='F')
X_lim = 21*7.5
X,Y=np.linspace(0,X_lim,22),np.linspace(0,X_lim,22)
xgrid,ygrid=np.meshgrid(X,Y)
Xdmd_2D[:,:,0].shape,mp_2D[:,:,0].shape
E = abs(mp_2D.real-Xdmd_2D.real)/mp_2D.real*100
E[mp_2D==0]=0
## print('t=0')
fig = plt.figure(figsize=(15,12.75))
steps = 0, 143, 200
color = 'inferno'
for i in range(len(steps)):
ax1=fig.add_subplot(3,3,3*i+1)
ax1.set_aspect('equal')
vmax = max(np.max(mp_2D[:,:,steps[i]]), np.max(Xdmd_2D[:,:,steps[i]].real))
vmin = 0.0#min(np.min(mp_2D[:,:,steps[i]]>0), np.min(Xdmd_2D[:,:,steps[i]].real>0))
plot = plt.pcolor(xgrid, ygrid, mp_2D[:,:,steps[i]].real.T,cmap=color,
vmin=vmin, vmax=vmax, rasterized=True, linewidth=0)
plot.set_edgecolor('face')
cbar = fig.colorbar(plot)
cbar.formatter.set_powerlimits((0, 0))
cbar.update_ticks()
if i == 0:
plt.title('Reference')
plt.xlabel('x (cm)')
plt.ylabel('t = {:.2f} s \ny (cm)'.format(t[steps[i]]))
plt.axis([0, 135, 0, 135])
ax2=fig.add_subplot(3,3,3*i+2)
ax2.set_aspect('equal')
plt.axis([0, 135, 0, 135])
plot=plt.pcolor(xgrid, ygrid, Xdmd_2D[:,:,steps[i]].real.T,cmap=color,
vmin=0, vmax=vmax, rasterized=True, linewidth=0)
cbar = fig.colorbar(plot)
cbar.formatter.set_powerlimits((0, 0))
cbar.update_ticks()
if i == 0:
plt.title('DMD')
ax3=fig.add_subplot(3,3,3*i+3)
ax3.set_aspect('equal')
plt.axis([0, 135, 0, 135])
plt.pcolor(xgrid, ygrid, E[:,:,steps[i]].T,cmap=color,
rasterized=True, linewidth=0)
plt.colorbar()
if i == 0:
plt.title('Relative Error (\%)')
plt.tight_layout()
plt.savefig('../images/meshpower.pdf')
p[0]
p[0]/sum(mp[:, 0])*17550.0
sum(mp[:,0]), p[0]*17550.0
```
| github_jupyter |
# [SOLUTION] Attention Basics
In this notebook, we look at how attention is implemented. We will focus on implementing attention in isolation from a larger model. That's because when implementing attention in a real-world model, a lot of the focus goes into piping the data and juggling the various vectors rather than the concepts of attention themselves.
We will implement attention scoring as well as calculating an attention context vector.
## Attention Scoring
### Inputs to the scoring function
Let's start by looking at the inputs we'll give to the scoring function. We will assume we're in the first step in the decoging phase. The first input to the scoring function is the hidden state of decoder (assuming a toy RNN with three hidden nodes -- not usable in real life, but easier to illustrate):
```
dec_hidden_state = [5,1,20]
```
Let's visualize this vector:
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Let's visualize our decoder hidden state
plt.figure(figsize=(1.5, 4.5))
sns.heatmap(np.transpose(np.matrix(dec_hidden_state)), annot=True, cmap=sns.light_palette("purple", as_cmap=True), linewidths=1)
```
Our first scoring function will score a single annotation (encoder hidden state), which looks like this:
```
annotation = [3,12,45] #e.g. Encoder hidden state
# Let's visualize the single annotation
plt.figure(figsize=(1.5, 4.5))
sns.heatmap(np.transpose(np.matrix(annotation)), annot=True, cmap=sns.light_palette("orange", as_cmap=True), linewidths=1)
```
### IMPLEMENT: Scoring a Single Annotation
Let's calculate the dot product of a single annotation. Numpy's [dot()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) is a good candidate for this operation
```
def single_dot_attention_score(dec_hidden_state, enc_hidden_state):
# TODO: return the dot product of the two vectors
return np.dot(dec_hidden_state, enc_hidden_state)
single_dot_attention_score(dec_hidden_state, annotation)
```
### Annotations Matrix
Let's now look at scoring all the annotations at once. To do that, here's our annotation matrix:
```
annotations = np.transpose([[3,12,45], [59,2,5], [1,43,5], [4,3,45.3]])
```
And it can be visualized like this (each column is a hidden state of an encoder time step):
```
# Let's visualize our annotation (each column is an annotation)
ax = sns.heatmap(annotations, annot=True, cmap=sns.light_palette("orange", as_cmap=True), linewidths=1)
```
### IMPLEMENT: Scoring All Annotations at Once
Let's calculate the scores of all the annotations in one step using matrix multiplication. Let's continue to us the dot scoring method
<img src="images/scoring_functions.png" />
To do that, we'll have to transpose `dec_hidden_state` and [matrix multiply](https://docs.scipy.org/doc/numpy/reference/generated/numpy.matmul.html) it with `annotations`.
```
def dot_attention_score(dec_hidden_state, annotations):
# TODO: return the product of dec_hidden_state transpose and enc_hidden_states
return np.matmul(np.transpose(dec_hidden_state), annotations)
attention_weights_raw = dot_attention_score(dec_hidden_state, annotations)
attention_weights_raw
```
Looking at these scores, can you guess which of the four vectors will get the most attention from the decoder at this time step?
## Softmax
Now that we have our scores, let's apply softmax:
<img src="images/softmax.png" />
```
def softmax(x):
x = np.array(x, dtype=np.float128)
e_x = np.exp(x)
return e_x / e_x.sum(axis=0)
attention_weights = softmax(attention_weights_raw)
attention_weights
```
Even when knowing which annotation will get the most focus, it's interesting to see how drastic softmax makes the end score become. The first and last annotation had the respective scores of 927 and 929. But after softmax, the attention they'll get is 0.119 and 0.880 respectively.
# Applying the scores back on the annotations
Now that we have our scores, let's multiply each annotation by its score to proceed closer to the attention context vector. This is the multiplication part of this formula (we'll tackle the summation part in the latter cells)
<img src="images/Context_vector.png" />
```
def apply_attention_scores(attention_weights, annotations):
# TODO: Multiple the annotations by their weights
return attention_weights * annotations
applied_attention = apply_attention_scores(attention_weights, annotations)
applied_attention
```
Let's visualize how the context vector looks now that we've applied the attention scores back on it:
```
# Let's visualize our annotations after applying attention to them
ax = sns.heatmap(applied_attention, annot=True, cmap=sns.light_palette("orange", as_cmap=True), linewidths=1)
```
Contrast this with the raw annotations visualized earlier in the notebook, and we can see that the second and third annotations (columns) have been nearly wiped out. The first annotation maintains some of its value, and the fourth annotation is the most pronounced.
# Calculating the Attention Context Vector
All that remains to produce our attention context vector now is to sum up the four columns to produce a single attention context vector
```
def calculate_attention_vector(applied_attention):
return np.sum(applied_attention, axis=1)
attention_vector = calculate_attention_vector(applied_attention)
attention_vector
# Let's visualize the attention context vector
plt.figure(figsize=(1.5, 4.5))
sns.heatmap(np.transpose(np.matrix(attention_vector)), annot=True, cmap=sns.light_palette("Blue", as_cmap=True), linewidths=1)
```
Now that we have the context vector, we can concatinate it with the hidden state and pass it through a hidden layer to produce the the result of this decoding time step.
| github_jupyter |
```
import h5py
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('presentation')
from shabanipy.jj.plotting_general import plot_inplane_vs_bias, plot_inplane_vs_Ic_Rn, plot_inplane_vs_IcRn
#: Name of the sample that must appear in the measurement name usually of the form "{Wafer}-{Piece}_{Design}-{Iteration}_{Junction}_{Cooldown}"
SAMPLE_NAME = "{Wafer}-{Piece}_{Design}-{Iteration}"
SAMPLE_ID = "{Wafer}-{Piece}_{Design}-{Iteration}_{Junction}_{Cooldown}"
#: hdf5 file number
FILE_NUM = ''
#: Path to store generated files
PATH = (f"/Users/bh/Desktop/Code/Topological JJ/Samples/{SAMPLE_NAME}/{SAMPLE_ID}")
#: Name of generated processed data file
PROCESSED_DATA_NAME = (f"{PATH}/Data/{SAMPLE_ID}_processed-data-{FILE_NUM}.hdf5")
h = h5py.File(PROCESSED_DATA_NAME, 'r')
# field_y = 'In-plane Field - Y
field_z = 'In-plane Field - Z'
vg = 'Vg::'
f = h['Data'][f'{field_z}'][f'{vg}']
#[f'{field_y}']
in_field = np.array(f['Vector Magnet - Field Y'])
v_drop = np.array(f["Voltage Drop"])
scaled_v_drop = np.array(f["ScaledVoltage"])
bias = np.array(f["Bias"])
dVdI = np.diff(np.array(f["ScaledVoltage"]))/np.diff(np.array(f["Bias"]))
dR = np.array(f["dR"])
plot_inplane_vs_bias(in_field, bias, np.abs(dR)
# savgol_windowl = 3, savgol_polyorder = 1,
# cvmax = , cvmin = ,
# bias_limits = ,
# in_field_limits = ,
# fig_size = ,
)
plt.savefig(f"Figs/In-plane Field/inplane_vs_bias__{SAMPLE_NAME}_field-z:{field_z[16:]}_Vg:{vg[4:]}_{FILE_NUM}.pdf", dpi = 400, bbox_inches = 'tight')
# plt.savefig(f"Figs/In-plane Field/inplane_vs_bias__{SAMPLE_NAME}_field-y:{field_y[16:]}_Vg:{vg[4:]}_{FILE_NUM}.pdf", dpi = 400, bbox_inches = 'tight')
"""Voltage threshold in V above which the junction is not considered to carry a
supercurrent anymore. Used in the determination of the critical current. Usually of the order of a couple e-5 or e-4.
Default is 1e-4."""
ic_voltage_threshold =
"""Positive bias value above which the data can be used to extract the
normal resistance. Default is 10e-6."""
high_bias_threshold =
plot_inplane_vs_Ic_Rn(in_field, bias, scaled_v_drop,
ic_voltage_threshold = ic_voltage_threshold,
high_bias_threshold = high_bias_threshold,
# savgol_windowl = 3, savgol_polyorder = 1,
# ic_limits = ,
# rn_limits = ,
# in_field_limits = ,
# fig_size = ,
)
plt.savefig(f"Figs/In-plane Field/inplane_vs_Ic_Rn__{SAMPLE_NAME}_field-z:{field_z[16:]}_Vg:{vg[4:]}_{FILE_NUM}.pdf", dpi = 400, bbox_inches = 'tight')
# plt.savefig(f"Figs/In-plane Field/inplane_vs_Ic_Rn__{SAMPLE_NAME}_field-y:{field_y[16:]}_Vg:{vg[4:]}_{FILE_NUM}.pdf", dpi = 400, bbox_inches = 'tight')
plot_inplane_vs_IcRn(in_field, bias, scaled_v_drop,
ic_voltage_threshold = ic_voltage_threshold,
high_bias_threshold = high_bias_threshold,
# savgol_windowl = 3, savgol_polyorder = 1,
# icrn_limits = ,
# in_field_limits = ,
# fig_size = ,)
plt.savefig(f"Figs/In-plane Field/inplane_vs_IcRn__{SAMPLE_NAME}_field-z:{field_z[16:]}_Vg:{vg[4:]}_{FILE_NUM}.pdf", dpi = 400, bbox_inches = 'tight')
# plt.savefig(f"Figs/In-plane Field/inplane_vs_IcRn__{SAMPLE_NAME}_field-y:{field_y[16:]}_Vg:{vg[4:]}_{FILE_NUM}.pdf", dpi = 400, bbox_inches = 'tight')
```
| github_jupyter |
# WOR Forecasting
In this section is introduced the basic classes and functions to make Forecast by applying the Wor Methodology
```
import os
from dcapy import dca
from datetime import date
import numpy as np
```
The WOR forecasting is an empirical method to estimate the trend of the water production with respect the cumulative oil production.
Generally you can determine the WOR (Water-Oil Ratio) vs the Np (Cumulative Oil Production) linear relationship on a semi-log plot when preducing at a constant rate of total fluids.
$
WOR = \frac{q_{w}}{q_{o}}
$
## Simple Functions to convert Bsw to Wor
```
list_bsw = [0.01,0.01,0.1,0.5,0.8,0.9,0.95,0.99]
list_wor = dca.bsw_to_wor(list_bsw)
dca.wor_to_bsw(list_wor)
```
## Wor Forecasting function
The parameters required to define a WOR model are:
+ **Slope**: It is the relationship between the WOR and Np. It is defined as $\frac{d(log(WOR))}{d Np}$
+ **Fluid Rate**: Total fluid rate production target
+ **Ti**: Initial Time
+ **WOR initial**: The Wor value at the initial time
```
time1 = np.arange(0,10,1)
slope = 3e-6
bswi = 0.5
wori = dca.bsw_to_wor(bswi)
fluid_rate = [5000]*10
f1 = dca.wor_forecast(time1,fluid_rate,slope,wori)
print(f1)
```
In this case you have to pass an array with the desired rate whose length be equal to the time array. That means you can pass a fluid rate array with different values.
```
time1 = np.arange(0,10,1)
slope = 3e-5
bswi = 0.5
wori = dca.bsw_to_wor(bswi)
fluid_rate = [5000]*5 + [6000]*5
f1 = dca.wor_forecast(time1,fluid_rate,slope,wori)
print(f1)
```
## Wor Class
Like Arps class, the Wor class have the same advantages described before. In this case you can pass the initial bsw directly so it internally will convert it to WOR value.
```
bsw = 0.5
slope = 3.5e-6
ti = 0
fluid = 1000
w1 = dca.Wor(bsw=bsw,slope=slope,ti=ti, fluid_rate = fluid)
print(type(w1))
```
The forecast method is also present with the same parameters as seen in Arps class
```
fr = w1.forecast(
start = 0,
end = 5,
)
print(fr)
```
If you want to change the fluid rate you can pass a different value when calling the `forecast` method
```
fr = w1.forecast(
start = 0,
end = 10,
fluid_rate = 2000
)
print(fr)
```
## Multiple Values
You can create Wor instances with multiple values on each of the parameters. This will create additional iterations accorging with the number of cases and the broadcast shape
```
bsw = [0.4,0.5,0.6]
slope = 3.5e-6
ti = 0
fluid = 1000
w2 = dca.Wor(bsw=bsw,slope=slope,ti=ti, fluid_rate = fluid)
fr = w2.forecast(
start = 0,
end = 4,
fluid_rate = 2000
)
print(fr)
```
As the each case of fluid rate can be an array with multiple values, you can pass a 2D array to make more than one iteration.
```
bsw = 0.4
slope = 3.5e-6
ti = 0
fluid = [[1000],[2000]]
w3 = dca.Wor(bsw=bsw,slope=slope,ti=ti, fluid_rate = fluid)
fr = w3.forecast(
start = 0,
end = 4,
)
print(fr)
bsw = 0.4
slope = 3.5e-6
ti = 0
fluid = [[1000,1200,1300,1250],[2000,2200,2300,2250]]
w4 = dca.Wor(bsw=bsw,slope=slope,ti=ti, fluid_rate = fluid)
fr = w4.forecast(
start = 0,
end = 4,
)
print(fr)
```
## Wor with Dates
```
w1 = dca.Wor(
bsw = 0.5,
slope = 3e-5,
fluid_rate = 4000,
ti=date(2021,1,1)
)
print(w1)
fr = w1.forecast(start=date(2021,1,1),end=date(2021,1,10),freq_output='D')
print(fr)
fr = w1.forecast(start=date(2021,1,1),end=date(2022,1,1),freq_output='M')
print(fr)
fr = w1.forecast(start=date(2021,1,1),end=date(2024,1,1),freq_output='A')
print(fr)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/jads-nl/intro-to-python/blob/develop/00_intro/00_content.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# An Introduction to Python and Programming
This book is a *thorough* introduction to programming in [Python <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://www.python.org/).
It teaches the concepts behind and the syntax of the core Python language as defined by the [Python Software Foundation <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://www.python.org/psf/) in the official [language reference <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://docs.python.org/3/reference/index.html). Furthermore, it introduces commonly used functionalities from the [standard library <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://docs.python.org/3/library/index.html) and popular third-party libraries like [numpy <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://www.numpy.org/), [pandas <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://pandas.pydata.org/), [matplotlib <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://matplotlib.org/), and others.
<img src="https://github.com/jads-nl/intro-to-python/blob/develop/00_intro/static/logo.png?raw=1" width="15%" align="left">
## Prerequisites
There are *no* prerequisites for reading this book.
## Objective
The **main goal** of this introduction is to **prepare** the student **for further studies** in the "field" of **data science**.
### Why data science?
The term **[data science <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_wiki.png?raw=1">](https://en.wikipedia.org/wiki/Data_science)** is rather vague and does *not* refer to an academic discipline. Instead, the term was popularized by the tech industry, who also coined non-meaningful job titles such as "[rockstar](https://www.quora.com/Why-are-engineers-called-rockstars-and-ninjas)" or "[ninja developers](https://www.quora.com/Why-are-engineers-called-rockstars-and-ninjas)." Most *serious* definitions describe the field as being **multi-disciplinary** *integrating* scientific methods, algorithms, and systems thinking to extract knowledge from structured and unstructured data, *and* also emphasize the importance of **[domain knowledge <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_wiki.png?raw=1">](https://en.wikipedia.org/wiki/Domain_knowledge)**.
Recently, this integration aspect feeds back into the academic world. The [MIT](https://www.mit.edu/), for example, created the new [Stephen A. Schwarzman College of Computing](http://computing.mit.edu) for [artificial intelligence <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_wiki.png?raw=1">](https://en.wikipedia.org/wiki/Artificial_intelligence) with a 1 billion dollar initial investment where students undergo a "bilingual" curriculum with half the classes in quantitative and method-centric fields and the other half in domains such as biology, business, chemistry, politics, (art) history, or linguistics (cf., the [official Q&As](http://computing.mit.edu/faq/) or this [NYT article](https://www.nytimes.com/2018/10/15/technology/mit-college-artificial-intelligence.html)). Their strategists see a future where programming skills are just as naturally embedded into students' curricula as are nowadays subjects like calculus, statistics, or academic writing. Then, programming literacy is not just another "nice to have" skill but a prerequisite, or an enabler, to understanding more advanced topics in the actual domains studied.
## Installation
To "read" this book in the most meaningful way, a working installation of **Python 3.8** with [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/) is needed.
For a tutorial on how to install Python on your computer, follow the instructions in the [README.md](https://github.com/webartifex/intro-to-python/blob/develop/README.md#installation) file in the project's [GitHub repository <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_gh.png?raw=1">](https://github.com/webartifex/intro-to-python). If you cannot install Python on your own machine, you may open the book interactively in the cloud with [Binder <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_mb.png?raw=1">](https://mybinder.org/v2/gh/webartifex/intro-to-python/develop?urlpath=lab).
## Jupyter Notebooks
The document you are viewing is a so-called [Jupyter notebook](https://jupyter-notebook.readthedocs.io/en/stable/notebook.html), a file format introduced by the [Jupyter Project](https://jupyter.org/).
"Jupyter" is an [acronym <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_wiki.png?raw=1">](https://en.wikipedia.org/wiki/Acronym) derived from the names of the three major programming languages **[Julia](https://julialang.org/)**, **[Python <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://www.python.org)**, and **[R](https://www.r-project.org/)**, all of which play significant roles in the world of data science. The Jupyter Project's idea is to serve as an integrating platform such that different programming languages and software packages can be used together within the same project.
Jupyter notebooks have become a de-facto standard for communicating and exchanging results in the data science community - both in academia and business - and provide an alternative to command-line interface (CLI or "terminal") based ways of running Python code. As an example for the latter case, we could start the default [Python interpreter <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://docs.python.org/3/tutorial/interpreter.html) that comes with every installation by typing the `python` command into a CLI (or `poetry run python` if the project is managed with the [poetry](https://python-poetry.org/docs/) CLI tool as explained in the [README.md](https://github.com/webartifex/intro-to-python/blob/develop/README.md#alternative-installation-for-instructors) file). Then, as the screenshot below shows, we could execute Python code like `1 + 2` or `print("Hello World")` line by line simply by typing it following the `>>>` **prompt** and pressing the **Enter** key. For an introductory course, however, this would be rather tedious and probably scare off many beginners.
<img src="https://github.com/jads-nl/intro-to-python/blob/develop/00_intro/static/cli_example.png?raw=1" width="50%">
One reason for the popularity of Jupyter notebooks is that they allow mixing text with code in the same document. Text may be formatted with the [Markdown <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_gh.png?raw=1">](https://guides.github.com/features/mastering-markdown/) language and mathematical formulas typeset with [LaTeX](https://www.overleaf.com/learn/latex/Free_online_introduction_to_LaTeX_%28part_1%29). Moreover, we may include pictures, plots, and even videos. Because of these features, the notebooks developed for this book come in a self-contained "tutorial" style enabling students to simply read them from top to bottom while executing the code snippets.
Other ways of running Python code are to use the [IPython <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://ipython.org/) CLI tool instead of the default interpreter or a full-fledged [Integrated Development Environment <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_wiki.png?raw=1">](https://en.wikipedia.org/wiki/Integrated_development_environment) (e.g., the commercial [PyCharm](https://www.jetbrains.com/pycharm/) or the free [Spyder <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_gh.png?raw=1">](https://github.com/spyder-ide/spyder) that comes with the Anaconda Distribution).
### Markdown Cells vs. Code Cells
A Jupyter notebook consists of cells that have a type associated with them. So far, only cells of type "Markdown" have been used, which is the default way to present formatted text.
The cells below are examples of "Code" cells containing actual Python code: They calculate the sum of `1` and `2` and print out `"Hello World"` when executed, respectively. To edit an existing code cell, enter into it with a mouse click. You are "in" a code cell if its frame is highlighted in blue. We call that the **edit mode**.
There is also a **command mode** that you reach by hitting the **Escape** key. That un-highlights the frame. You are now "out" of but still "on" the cell. If you were already in command mode, hitting the Escape key does *nothing*.
Using the **Enter** and **Escape** keys, you can now switch between the two modes.
To **execute**, or "run," a code cell, hold down the **Control** key and press **Enter**. Note how you do *not* go to the subsequent cell if you keep re-executing the cell you are on. Alternatively, you can hold the **Shift** key and press **Enter**, which executes a cell *and* places your focus on the subsequent cell or creates a new one if there is none.
```
1 + 2
print("Hello World")
```
Similarly, a Markdown cell is also in either edit or command mode. For example, double-click on the text you are reading: This puts you into edit mode. Now, you could change the formatting (e.g., print a word in *italics* or **bold**) and "execute" the cell to render the text as specified.
To change a cell's type, choose either "Code" or "Markdown" in the navigation bar at the top. Alternatively, you can press either the **Y** or **M** key in command mode.
Sometimes, a code cell starts with an exclamation mark `!`. Then, the Jupyter notebook behaves as if the following command were typed directly into a terminal. The cell below asks the `python` CLI to show its version number and is *not* Python code but a command in the [Shell <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_wiki.png?raw=1">](https://en.wikipedia.org/wiki/Shell_%28computing%29) language. The `!` is useful to execute short CLI commands without leaving a Jupyter notebook.
```
!python --version
```
## Programming vs. Computer Science vs. IT
In this book, **programming** is defined as
- a *structured* way of *problem-solving*
- by *expressing* the steps of a *computation* or *process*
- and thereby *documenting* the process in a formal way.
Programming is always *concrete* and based on a *particular case*. It exhibits elements of an *art* or a *craft* as we hear programmers call code "beautiful" or "ugly" or talk about the "expressive" power of an application.
That is different from **computer science**, which is
- a field of study comparable to applied *mathematics* that
- asks *abstract* questions (e.g., "Is something computable at all?"),
- develops and analyses *algorithms* and *data structures*,
- and *proves* the *correctness* of a program.
In a sense, a computer scientist does not need to know a programming language to work, and many computer scientists only know how to produce "ugly" looking code in the eyes of professional programmers.
**IT** or **information technology** is a term that has many meanings to different people. Often, it has something to do with hardware or physical devices, both of which are out of scope for programmers and computer scientists. Sometimes, it refers to a [support function](https://en.wikipedia.org/wiki/Value_chain#Support_activities) within a company. Many computer scientists and programmers are more than happy if their printer and internet connection work as they often do not know a lot more about that than "non-technical" people.
## Why Python?
### What is Python?
Here is a brief history of and some background on Python (cf., also this [TechRepublic article](https://www.techrepublic.com/article/python-is-eating-the-world-how-one-developers-side-project-became-the-hottest-programming-language-on-the-planet/) for a more elaborate story):
- [Guido van Rossum <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_wiki.png?raw=1">](https://en.wikipedia.org/wiki/Guido_van_Rossum) (Python’s **[Benevolent Dictator for Life <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_wiki.png?raw=1">](https://en.wikipedia.org/wiki/Benevolent_dictator_for_life)**) was bored during a week around Christmas 1989 and started Python as a hobby project "that would keep \[him\] occupied" for some days
- the idea was to create a **general-purpose** scripting **language** that would allow fast *prototyping* and would *run on every operating system*
- Python grew through the 90s as van Rossum promoted it via his "Computer Programming for Everybody" initiative that had the *goal to encourage a basic level of coding literacy* as an equal knowledge alongside English literacy and math skills
- to become more independent from its creator, the next major version **Python 2** - released in 2000 and still in heavy use as of today - was **open-source** from the get-go which attracted a *large and global community of programmers* that *contributed* their expertise and best practices in their free time to make Python even better
- **Python 3** resulted from a significant overhaul of the language in 2008 taking into account the *learnings from almost two decades*, streamlining the language, and getting ready for the age of **big data**
- the language is named after the sketch comedy group [Monty Python <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_wiki.png?raw=1">](https://en.wikipedia.org/wiki/Monty_Python)
#### Summary
Python is a **general-purpose** programming **language** that allows for *fast development*, is *easy to read*, **open-source**, long-established, unifies the knowledge of *hundreds of thousands of experts* around the world, runs on basically every machine, and can handle the complexities of applications involving **big data**.
### Why open-source?
Couldn't a company like Google, Facebook, or Microsoft come up with a better programming language? The following is an argument on why this can likely not be the case.
Wouldn't it be weird if professors and scholars of English literature and language studies dictated how we'd have to speak in day-to-day casual conversations or how authors of poesy and novels should use language constructs to achieve a particular type of mood? If you agree with that premise, it makes sense to assume that even programming languages should evolve in a "natural" way as users *use* the language over time and in *new* and *unpredictable* contexts creating new conventions.
Loose *communities* are the primary building block around which open-source software projects are built. Someone - like Guido - starts a project and makes it free to use for anybody (e.g., on a code-sharing platform like [GitHub <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_gh.png?raw=1">](https://github.com/)). People find it useful enough to solve one of their daily problems and start using it. They see how a project could be improved and provide new use cases (e.g., via the popularized concept of a [pull request <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_gh.png?raw=1">](https://help.github.com/articles/about-pull-requests/)). The project grows both in lines of code and people using it. After a while, people start local user groups to share their same interests and meet regularly (e.g., this is a big market for companies like [Meetup](https://www.meetup.com/) or non-profits like [PyData <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://pydata.org/)). Out of these local and usually monthly meetups grow yearly conferences on the country or even continental level (e.g., the original [PyCon <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://us.pycon.org/) in the US, [EuroPython <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://europython.eu/), or [PyCon.DE <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://de.pycon.org/)). The content presented at these conferences is made publicly available via GitHub and YouTube (e.g., [PyCon 2019 <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://www.youtube.com/channel/UCxs2IIVXaEHHA4BtTiWZ2mQ) or [EuroPython <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](http://europython.tv/)) and serves as references on what people are working on and introductions to the endless number of specialized fields.
While these communities are somewhat loose and continuously changing, smaller in-groups, often democratically organized and elected (e.g., the [Python Software Foundation <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://www.python.org/psf/)), take care of, for example, the development of the "core" Python language itself.
Python itself is just a specification (i.e., a set of rules) as to what is allowed and what not: It must first be implemented (c.f., next section below). The current version of Python can always be looked up in the [Python Language Reference <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://docs.python.org/3/reference/index.html). To make changes to that, anyone can make a so-called **[Python Enhancement Proposal <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://www.python.org/dev/peps/)**, or **PEP** for short, where it needs to be specified what exact changes are to be made and argued why that is a good thing to do. These PEPs are reviewed by the [core developers <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://devguide.python.org/coredev/) and interested people and are then either accepted, modified, or rejected if, for example, the change introduces internal inconsistencies. This process is similar to the **double-blind peer review** established in academia, just a lot more transparent. Many of the contributors even held or hold positions in academia, one more indicator of the high quality standards in the Python community. To learn more about PEPs, check out [PEP 1 <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://www.python.org/dev/peps/pep-0001/) that describes the entire process.
In total, no one single entity can control how the language evolves, and the users' needs and ideas always feed back to the language specification via a quality controlled and "democratic" process.
Besides being **free** as in "free beer," a major benefit of open-source is that one can always *look up how something works in detail*: That is the literal meaning of *open* source and a difference to commercial languages (e.g., [MATLAB](https://www.mathworks.com/products/matlab.html)) as a programmer can always continue to *study best practices* or find out how things are implemented. Along this way, many *errors are uncovered*, as well. Furthermore, if one runs an open-source application, one can be reasonably sure that no bad people built in a "backdoor." [Free software <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_wiki.png?raw=1">](https://en.wikipedia.org/wiki/Free_software) is consequently free of charge but brings *many other freedoms* with it, most notably the freedom to change the code.
### Isn't C a lot faster?
The default Python implementation is written in the C language and called CPython. This is also what the Anaconda Distribution uses.
[C <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_wiki.png?raw=1">](https://en.wikipedia.org/wiki/C_%28programming_language%29) and [C++ <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_wiki.png?raw=1">](https://en.wikipedia.org/wiki/C%2B%2B) (cf., this [introduction](https://www.learncpp.com/)) are wide-spread and long-established (i.e., since the 1970s) programming languages employed in many mission-critical software systems (e.g., operating systems themselves, low latency databases and web servers, nuclear reactor control systems, airplanes, ...). They are fast, mainly because the programmer not only needs to come up with the **business logic** but also manage the computer's memory.
In contrast, Python automatically manages the memory for the programmer. So, speed here is a trade-off between application run time and engineering/development time. Often, the program's run time is not that important: For example, what if C needs 0.001 seconds in a case where Python needs 0.1 seconds to do the same thing? When the requirements change and computing speed becomes an issue, the Python community offers many third-party libraries - usually also written in C - where specific problems can be solved in near-C time.
#### Summary
While it is true that a language like C is a lot faster than Python when it comes to *pure* **computation time**, this does not matter in many cases as the *significantly shorter* **development cycles** are the more significant cost factor in a rapidly changing world.
### Who uses it?
<img src="https://github.com/jads-nl/intro-to-python/blob/develop/00_intro/static/example_python_users.png?raw=1" width="70%">
While ad-hominem arguments are usually not the best kind of reasoning, we briefly look at some examples of who uses Python and leave it up to the reader to decide if this is convincing or not:
- **[Massachusetts Institute of Technology](https://www.mit.edu/)**
- teaches Python in its [introductory course](https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-0001-introduction-to-computer-science-and-programming-in-python-fall-2016/) to computer science independent of the student's major
- replaced the infamous course on the [Scheme](https://groups.csail.mit.edu/mac/projects/scheme/) language (cf., [source <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_hn.png?raw=1">](https://news.ycombinator.com/item?id=602307))
- **[Google](https://www.google.com/)**
- used the strategy "Python where we can, C++ where we must" from its early days on to stay flexible in a rapidly changing environment (cf., [source <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_so.png?raw=1">](https://stackoverflow.com/questions/2560310/heavy-usage-of-python-at-google))
- the very first web-crawler was written in Java and so difficult to maintain that it was rewritten in Python right away (cf., [source](https://www.amazon.com/Plex-Google-Thinks-Works-Shapes/dp/1416596585/ref=sr_1_1?ie=UTF8&qid=1539101827&sr=8-1&keywords=in+the+plex))
- Guido van Rossom was hired by Google from 2005 to 2012 to advance the language there
- **[NASA](https://www.nasa.gov/)** open-sources many of its projects, often written in Python and regarding analyses with big data (cf., [source](https://code.nasa.gov/language/python/))
- **[Facebook](https://facebook.com/)** uses Python besides C++ and its legacy PHP (a language for building websites; the "cool kid" from the early 2000s)
- **[Instagram](https://instagram.com/)** operates the largest installation of the popular **web framework [Django](https://www.djangoproject.com/)** (cf., [source](https://instagram-engineering.com/web-service-efficiency-at-instagram-with-python-4976d078e366))
- **[Spotify](https://spotify.com/)** bases its data science on Python (cf., [source](https://labs.spotify.com/2013/03/20/how-we-use-python-at-spotify/))
- **[Netflix](https://netflix.com/)** also runs its predictive models on Python (cf., [source](https://medium.com/netflix-techblog/python-at-netflix-86b6028b3b3e))
- **[Dropbox](https://dropbox.com/)** "stole" Guido van Rossom from Google to help scale the platform (cf., [source](https://medium.com/dropbox-makers/guido-van-rossum-on-finding-his-way-e018e8b5f6b1))
- **[JPMorgan Chase](https://www.jpmorganchase.com/)** requires new employees to learn Python as part of the onboarding process starting with the 2018 intake (cf., [source](https://www.ft.com/content/4c17d6ce-c8b2-11e8-ba8f-ee390057b8c9?segmentId=a7371401-027d-d8bf-8a7f-2a746e767d56))
As images tell more than words, here are two plots of popular languages' "market shares" based on the number of questions asked on [Stack Overflow <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_so.png?raw=1">](https://stackoverflow.blog/2017/09/06/incredible-growth-python/), the most relevant platform for answering programming-related questions: As of late 2017, Python surpassed [Java](https://www.java.com/en/), heavily used in big corporates, and [JavaScript](https://developer.mozilla.org/en-US/docs/Web/JavaScript), the "language of the internet" that does everything in web browsers, in popularity. Two blog posts from "technical" people explain this in more depth to the layman: [Stack Overflow <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_so.png?raw=1">](https://stackoverflow.blog/2017/09/14/python-growing-quickly/) and [DataCamp](https://www.datacamp.com/community/blog/python-scientific-computing-case).
<img src="https://github.com/jads-nl/intro-to-python/blob/develop/00_intro/static/growth_of_major_programming_languages.png?raw=1" width="50%">
As the graph below shows, neither Google's very own language **[Go](https://golang.org/)** nor **[R](https://www.r-project.org/)**, a domain-specific language in the niche of statistics, can compete with Python's year-to-year growth.
<img src="https://github.com/jads-nl/intro-to-python/blob/develop/00_intro/static/growth_of_smaller_programming_languages.png?raw=1" width="50%">
[IEEE Sprectrum](https://spectrum.ieee.org/computing/software/the-top-programming-languages-2019) provides a more recent comparison of programming language's popularity. Even news and media outlets notice the recent popularity of Python: [Economist](https://www.economist.com/graphic-detail/2018/07/26/python-is-becoming-the-worlds-most-popular-coding-language), [Huffington Post](https://www.huffingtonpost.com/entry/why-python-is-the-best-programming-language-with-which_us_59ef8f62e4b04809c05011b9), [TechRepublic](https://www.techrepublic.com/article/why-python-is-so-popular-with-developers-3-reasons-the-language-has-exploded/), and [QZ](https://qz.com/1408660/the-rise-of-python-as-seen-through-a-decade-of-stack-overflow/).
## How to learn Programming
### ABC Rule
**A**lways **b**e **c**oding.
Programming is more than just writing code into a text file. It means reading through parts of the [documentation <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_py.png?raw=1">](https://docs.python.org/), blogs with best practices, and tutorials, or researching problems on [Stack Overflow <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_so.png?raw=1">](https://stackoverflow.com/) while trying to implement features in the application at hand. Also, it means using command-line tools to automate some part of the work or manage different versions of a program, for example, with **[git](https://git-scm.com/)**. In short, programming involves a lot of "muscle memory," which can only be built and kept up through near-daily usage.
Further, many aspects of software architecture and best practices can only be understood after having implemented some requirements for the very first time. Coding also means "breaking" things to find out what makes them work in the first place.
Therefore, coding is learned best by just doing it for some time on a daily or at least a regular basis and not right before some task is due, just like learning a "real" language.
### The Maker's Schedule
[Y Combinator <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_hn.png?raw=1">](https://www.ycombinator.com/) co-founder [Paul Graham <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_wiki.png?raw=1">](https://en.wikipedia.org/wiki/Paul_Graham_%28programmer%29) wrote a very popular and often cited [article](http://www.paulgraham.com/makersschedule.html) where he divides every person into belonging to one of two groups:
- **Managers**: People that need to organize things and command others (e.g., a "boss" or manager). Their schedule is usually organized by the hour or even 30-minute intervals.
- **Makers**: People that create things (e.g., programmers, artists, or writers). Such people think in half days or full days.
Have you ever wondered why so many tech people work during nights and sleep at "weird" times? The reason is that many programming-related tasks require a "flow" state in one's mind that is hard to achieve when one can get interrupted, even if it is only for one short question. Graham describes that only knowing that one has an appointment in three hours can cause a programmer to not get into a flow state.
As a result, do not set aside a certain amount of time for learning something but rather plan in an *entire evening* or a *rainy Sunday* where you can work on a problem in an *open end* setting. And do not be surprised anymore to hear "I looked at it over the weekend" from a programmer.
### Phase Iteration
When being asked the above question, most programmers answer something that can be classified into one of two broader groups.
**1) Toy Problem, Case Study, or Prototype**: Pick some problem, break it down into smaller sub-problems, and solve them with an end in mind.
**2) Books, Video Tutorials, and Courses**: Research the best book, blog, video, or tutorial for something and work it through from start to end.
The truth is that you need to iterate between these two phases.
Building a prototype always reveals issues no book or tutorial can think of before. Data is never as clean as it should be. An algorithm from a textbook must be adapted to a peculiar aspect of a case study. It is essential to learn to "ship a product" because only then will one have looked at all the aspects.
The major downside of this approach is that one likely learns bad "patterns" overfitted to the case at hand, and one does not get the big picture or mental concepts behind a solution. This gap can be filled in by well-written books: For example, check the Python/programming books offered by [Packt](https://www.packtpub.com/packt/offers/free-learning/) or [O’Reilly](https://www.oreilly.com/).
## Contents
**Part A: Expressing Logic**
- What is a programming language? What kind of words exist?
- *Chapter 1*: [Elements of a Program <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_nb.png?raw=1">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/01_elements/00_content.ipynb)
- *Chapter 2*: [Functions & Modularization <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_nb.png?raw=1">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/02_functions/00_content.ipynb)
- What is the flow of execution? How can we form sentences from words?
- *Chapter 3*: [Conditionals & Exceptions <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_nb.png?raw=1">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/03_conditionals/00_content.ipynb)
- *Chapter 4*: [Recursion & Looping <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_nb.png?raw=1">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/04_iteration/00_content.ipynb)
**Part B: Managing Data and Memory**
- How is data stored in memory?
- *Chapter 5*: [Numbers & Bits <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_nb.png?raw=1">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/05_numbers/00_content.ipynb)
- *Chapter 6*: [Text & Bytes <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_nb.png?raw=1">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/06_text/00_content.ipynb)
- *Chapter 7*: [Sequential Data <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_nb.png?raw=1">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/07_sequences/00_content.ipynb)
- *Chapter 8*: [Map, Filter, & Reduce <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_nb.png?raw=1">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/08_mfr/00_content.ipynb)
- *Chapter 9*: [Mappings & Sets <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_nb.png?raw=1">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/09_mappings/00_content.ipynb)
- *Chapter 10*: Arrays & Dataframes
- How can we create custom data types?
- *Chapter 11*: [Classes & Instances <img height="12" style="display: inline-block" src="https://github.com/jads-nl/intro-to-python/blob/develop/static/link/to_nb.png?raw=1">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/11_classes/00_content.ipynb)
## xkcd Comic
As with every good book, there has to be a [xkcd](https://xkcd.com/353/) comic somewhere.
```
import antigravity
```
<img src="https://github.com/jads-nl/intro-to-python/blob/develop/00_intro/static/xkcd.png?raw=1" width="30%">
| github_jupyter |
```
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
from numpy import array
origin_image=mpimg.imread("canny-edge-detection-test.jpg")
plt.figure()
# plt.subplot(1,3,1)
# plt.imshow(image)
image=array(origin_image)
ysize = image.shape[0]
xsize = image.shape[1]
left_bottom = [10,540]
right_bottom = [900,540]
apex = [480, 280]
# Fit lines (y=Ax+B) to identify the 3 sided region of interest
# np.polyfit() returns the coefficients [A, B] of the fit
fit_left = np.polyfit((left_bottom[0], apex[0]), (left_bottom[1], apex[1]), 1)
fit_right = np.polyfit((right_bottom[0], apex[0]), (right_bottom[1], apex[1]), 1)
fit_bottom = np.polyfit((left_bottom[0], right_bottom[0]), (left_bottom[1], right_bottom[1]), 1)
# Find the region inside the lines
XX, YY = np.meshgrid(np.arange(0, xsize), np.arange(0, ysize))
region_thresholds = (YY > (XX*fit_left[0] + fit_left[1])) & \
(YY > (XX*fit_right[0] + fit_right[1])) & \
(YY < (XX*fit_bottom[0] + fit_bottom[1]))
gray_image=cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
# plt.subplot(1,3,2)
# plt.imshow(gray_image)
# Define a kernel size and apply Gaussian smoothing
kernel_size = 5
blur_gray = cv2.GaussianBlur(gray_image,(kernel_size, kernel_size),0)
# Define our parameters for Canny and apply
low_threshold = 50
high_threshold = 150
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
edges[~region_thresholds] = False
# plt.subplot(1,3,3)
# plt.imshow(edges)
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 1
theta = np.pi/180
threshold = 15
min_line_length = 40
max_line_gap = 20
line_image = np.copy(image)*0 #creating a blank to draw lines on
# Run Hough on edge detected image
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
# Iterate over the output "lines" and draw lines on the blank
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)
# Create a "color" binary image to combine with line image
color_edges = np.dstack((edges, edges, edges))
# Draw the lines on the edge image
combo = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)
plt.imshow(combo)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
# Read in and grayscale the image
# Note: in the previous example we were reading a .jpg
# Here we read a .png and convert to 0,255 bytescale
image = mpimg.imread("canny-edge-detection-test.jpg")
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
# Define a kernel size and apply Gaussian smoothing
kernel_size = 5
blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0)
# Define our parameters for Canny and apply
low_threshold = 50
high_threshold = 150
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
# Next we'll create a masked edges image using cv2.fillPoly()
mask = np.zeros_like(edges)
ignore_mask_color = 255
# This time we are defining a four sided polygon to mask
imshape = image.shape
vertices = np.array([[(0,imshape[0]),(450, 290), (490, 290), (imshape[1],imshape[0])]], dtype=np.int32)
#在全0的图像上,在指定区域内填入了255
cv2.fillPoly(mask, vertices, ignore_mask_color)
#将原始图像与上面填充的图像进行按位与,感兴趣区域外的点会被置为0,感兴趣区域内的点的边沿点原本就是255,按位与之后还是255,其余点均为0
masked_edges = cv2.bitwise_and(edges, mask)
#可以试试自行调整以下参数,看看都有什么神奇的效果
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 40 #minimum number of pixels making up a line
max_line_gap = 20 # maximum gap in pixels between connectable line segments
line_image = np.copy(image)*0 # creating a blank to draw lines on
# Run Hough on edge detected image
# Output "lines" is an array containing endpoints of detected line segments
lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
#由于输出的只是“线条的端点集合”,所以我们要将这些点连起来,才能最终呈现我们想要的线条
# Iterate over the output "lines" and draw lines on a blank image
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)
# Create a "color" binary image to combine with line image
#由于edges获得的只是2D的数组,每个点上的元素为一个数字,而真正的图像是每个点为[R,G,B]的数组,要想将edge图像与cv2.line输出的图像结合,需要将其转换为真正的图像,这就用到了dstack,感兴趣的同学可自行百度
color_edges = np.dstack((edges, edges, edges))
# Draw the lines on the edge image
lines_edges = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)
plt.imshow(lines_edges)
```
| github_jupyter |
# Bayesian Regression Using NumPyro
In this tutorial, we will explore how to do bayesian regression in NumPyro, using a simple example adapted from Statistical Rethinking [[1](#References)]. In particular, we would like to explore the following:
- Write a simple model using the `sample` NumPyro primitive.
- Run inference using MCMC in NumPyro, in particular, using the No U-Turn Sampler (NUTS) to get a posterior distribution over our regression parameters of interest.
- Learn about inference utilities such as `Predictive` and `log_likelihood`.
- Learn how we can use effect-handlers in NumPyro to generate execution traces from the model, condition on sample statements, seed models with RNG seeds, etc., and use this to implement various utilities that will be useful for MCMC. e.g. computing model log likelihood, generating empirical distribution over the posterior predictive, etc.
## Tutorial Outline:
1. [Dataset](#Dataset)
2. [Regression Model to Predict Divorce Rate](#Regression-Model-to-Predict-Divorce-Rate)
- [Model-1: Predictor-Marriage Rate](#Model-1:-Predictor---Marriage-Rate)
- [Posterior Distribution over the Regression Parameters](#Posterior-Distribution-over-the-Regression-Parameters)
- [Posterior Predictive Distribution](#Posterior-Predictive-Distribution)
- [Predictive Utility With Effect Handlers](#Predictive-Utility-With-Effect-Handlers)
- [Model Predictive Density](#Model-Predictive-Density)
- [Model-2: Predictor-Median Age of Marriage](#Model-2:-Predictor---Median-Age-of-Marriage)
- [Model-3: Predictor-Marriage Rate and Median Age of Marriage](#Model-3:-Predictor---Marriage-Rate-and-Median-Age-of-Marriage)
- [Divorce Rate Residuals by State](#Divorce-Rate-Residuals-by-State)
3. [Regression Model with Measurement Error](#Regression-Model-with-Measurement-Error)
- [Effect of Incorporating Measurement Noise on Residuals](#Effect-of-Incorporating-Measurement-Noise-on-Residuals)
4. [References](#References)
```
!pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro
import os
from IPython.display import set_matplotlib_formats
import jax.numpy as jnp
from jax import random, vmap
from jax.scipy.special import logsumexp
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import numpyro
from numpyro.diagnostics import hpdi
import numpyro.distributions as dist
from numpyro import handlers
from numpyro.infer import MCMC, NUTS
plt.style.use("bmh")
if "NUMPYRO_SPHINXBUILD" in os.environ:
set_matplotlib_formats("svg")
assert numpyro.__version__.startswith("0.8.0")
```
## Dataset
For this example, we will use the `WaffleDivorce` dataset from Chapter 05, Statistical Rethinking [[1](#References)]. The dataset contains divorce rates in each of the 50 states in the USA, along with predictors such as population, median age of marriage, whether it is a Southern state and, curiously, number of Waffle Houses.
```
DATASET_URL = "https://raw.githubusercontent.com/rmcelreath/rethinking/master/data/WaffleDivorce.csv"
dset = pd.read_csv(DATASET_URL, sep=";")
dset
```
Let us plot the pair-wise relationship amongst the main variables in the dataset, using `seaborn.pairplot`.
```
vars = [
"Population",
"MedianAgeMarriage",
"Marriage",
"WaffleHouses",
"South",
"Divorce",
]
sns.pairplot(dset, x_vars=vars, y_vars=vars, palette="husl");
```
From the plots above, we can clearly observe that there is a relationship between divorce rates and marriage rates in a state (as might be expected), and also between divorce rates and median age of marriage.
There is also a weak relationship between number of Waffle Houses and divorce rates, which is not obvious from the plot above, but will be clearer if we regress `Divorce` against `WaffleHouse` and plot the results.
```
sns.regplot(x="WaffleHouses", y="Divorce", data=dset);
```
This is an example of a spurious association. We do not expect the number of Waffle Houses in a state to affect the divorce rate, but it is likely correlated with other factors that have an effect on the divorce rate. We will not delve into this spurious association in this tutorial, but the interested reader is encouraged to read Chapters 5 and 6 of [[1](#References)] which explores the problem of causal association in the presence of multiple predictors.
For simplicity, we will primarily focus on marriage rate and the median age of marriage as our predictors for divorce rate throughout the remaining tutorial.
## Regression Model to Predict Divorce Rate
Let us now write a regressionn model in *NumPyro* to predict the divorce rate as a linear function of marriage rate and median age of marriage in each of the states.
First, note that our predictor variables have somewhat different scales. It is a good practice to standardize our predictors and response variables to mean `0` and standard deviation `1`, which should result in [faster inference](https://mc-stan.org/docs/2_19/stan-users-guide/standardizing-predictors-and-outputs.html).
```
standardize = lambda x: (x - x.mean()) / x.std()
dset["AgeScaled"] = dset.MedianAgeMarriage.pipe(standardize)
dset["MarriageScaled"] = dset.Marriage.pipe(standardize)
dset["DivorceScaled"] = dset.Divorce.pipe(standardize)
```
We write the NumPyro model as follows. While the code should largely be self-explanatory, take note of the following:
- In NumPyro, *model* code is any Python callable which can optionally accept additional arguments and keywords. For HMC which we will be using for this tutorial, these arguments and keywords remain static during inference, but we can reuse the same model to generate [predictions](#Posterior-Predictive-Distribution) on new data.
- In addition to regular Python statements, the model code also contains primitives like `sample`. These primitives can be interpreted with various side-effects using effect handlers. For more on effect handlers, refer to [[3](#References)], [[4](#References)]. For now, just remember that a `sample` statement makes this a stochastic function that samples some latent parameters from a *prior distribution*. Our goal is to infer the *posterior distribution* of these parameters conditioned on observed data.
- The reason why we have kept our predictors as optional keyword arguments is to be able to reuse the same model as we vary the set of predictors. Likewise, the reason why the response variable is optional is that we would like to reuse this model to sample from the posterior predictive distribution. See the [section](#Posterior-Predictive-Distribution) on plotting the posterior predictive distribution, as an example.
```
def model(marriage=None, age=None, divorce=None):
a = numpyro.sample("a", dist.Normal(0.0, 0.2))
M, A = 0.0, 0.0
if marriage is not None:
bM = numpyro.sample("bM", dist.Normal(0.0, 0.5))
M = bM * marriage
if age is not None:
bA = numpyro.sample("bA", dist.Normal(0.0, 0.5))
A = bA * age
sigma = numpyro.sample("sigma", dist.Exponential(1.0))
mu = a + M + A
numpyro.sample("obs", dist.Normal(mu, sigma), obs=divorce)
```
### Model 1: Predictor - Marriage Rate
We first try to model the divorce rate as depending on a single variable, marriage rate. As mentioned above, we can use the same `model` code as earlier, but only pass values for `marriage` and `divorce` keyword arguments. We will use the No U-Turn Sampler (see [[5](#References)] for more details on the NUTS algorithm) to run inference on this simple model.
The Hamiltonian Monte Carlo (or, the NUTS) implementation in NumPyro takes in a potential energy function. This is the negative log joint density for the model. Therefore, for our model description above, we need to construct a function which given the parameter values returns the potential energy (or negative log joint density). Additionally, the verlet integrator in HMC (or, NUTS) returns sample values simulated using Hamiltonian dynamics in the unconstrained space. As such, continuous variables with bounded support need to be transformed into unconstrained space using bijective transforms. We also need to transform these samples back to their constrained support before returning these values to the user. Thankfully, this is handled on the backend for us, within a convenience class for doing [MCMC inference](https://numpyro.readthedocs.io/en/latest/mcmc.html#numpyro.mcmc.MCMC) that has the following methods:
- `run(...)`: runs warmup, adapts steps size and mass matrix, and does sampling using the sample from the warmup phase.
- `print_summary()`: print diagnostic information like quantiles, effective sample size, and the Gelman-Rubin diagnostic.
- `get_samples()`: gets samples from the posterior distribution.
Note the following:
- JAX uses functional PRNGs. Unlike other languages / frameworks which maintain a global random state, in JAX, every call to a sampler requires an [explicit PRNGKey](https://github.com/google/jax#random-numbers-are-different). We will split our initial random seed for subsequent operations, so that we do not accidentally reuse the same seed.
- We run inference with the `NUTS` sampler. To run vanilla HMC, we can instead use the [HMC](https://numpyro.readthedocs.io/en/latest/mcmc.html#numpyro.mcmc.HMC) class.
```
# Start from this source of randomness. We will split keys for subsequent operations.
rng_key = random.PRNGKey(0)
rng_key, rng_key_ = random.split(rng_key)
# Run NUTS.
kernel = NUTS(model)
num_samples = 2000
mcmc = MCMC(kernel, num_warmup=1000, num_samples=num_samples)
mcmc.run(
rng_key_, marriage=dset.MarriageScaled.values, divorce=dset.DivorceScaled.values
)
mcmc.print_summary()
samples_1 = mcmc.get_samples()
```
#### Posterior Distribution over the Regression Parameters
We notice that the progress bar gives us online statistics on the acceptance probability, step size and number of steps taken per sample while running NUTS. In particular, during warmup, we adapt the step size and mass matrix to achieve a certain target acceptance probability which is 0.8, by default. We were able to successfully adapt our step size to achieve this target in the warmup phase.
During warmup, the aim is to adapt hyper-parameters such as step size and mass matrix (the HMC algorithm is very sensitive to these hyper-parameters), and to reach the typical set (see [[6](#References)] for more details). If there are any issues in the model specification, the first signal to notice would be low acceptance probabilities or very high number of steps. We use the sample from the end of the warmup phase to seed the MCMC chain (denoted by the second `sample` progress bar) from which we generate the desired number of samples from our target distribution.
At the end of inference, NumPyro prints the mean, std and 90% CI values for each of the latent parameters. Note that since we standardized our predictors and response variable, we would expect the intercept to have mean 0, as can be seen here. It also prints other convergence diagnostics on the latent parameters in the model, including [effective sample size](https://numpyro.readthedocs.io/en/latest/diagnostics.html#numpyro.diagnostics.effective_sample_size) and the [gelman rubin diagnostic](https://numpyro.readthedocs.io/en/latest/diagnostics.html#numpyro.diagnostics.gelman_rubin) ($\hat{R}$). The value for these diagnostics indicates that the chain has converged to the target distribution. In our case, the "target distribution" is the posterior distribution over the latent parameters that we are interested in. Note that this is often worth verifying with multiple chains for more complicated models. In the end, `samples_1` is a collection (in our case, a `dict` since `init_samples` was a `dict`) containing samples from the posterior distribution for each of the latent parameters in the model.
To look at our regression fit, let us plot the regression line using our posterior estimates for the regression parameters, along with the 90% Credibility Interval (CI). Note that the [hpdi](https://numpyro.readthedocs.io/en/latest/diagnostics.html#numpyro.diagnostics.hpdi) function in NumPyro's diagnostics module can be used to compute CI. In the functions below, note that the collected samples from the posterior are all along the leading axis.
```
def plot_regression(x, y_mean, y_hpdi):
# Sort values for plotting by x axis
idx = jnp.argsort(x)
marriage = x[idx]
mean = y_mean[idx]
hpdi = y_hpdi[:, idx]
divorce = dset.DivorceScaled.values[idx]
# Plot
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 6))
ax.plot(marriage, mean)
ax.plot(marriage, divorce, "o")
ax.fill_between(marriage, hpdi[0], hpdi[1], alpha=0.3, interpolate=True)
return ax
# Compute empirical posterior distribution over mu
posterior_mu = (
jnp.expand_dims(samples_1["a"], -1)
+ jnp.expand_dims(samples_1["bM"], -1) * dset.MarriageScaled.values
)
mean_mu = jnp.mean(posterior_mu, axis=0)
hpdi_mu = hpdi(posterior_mu, 0.9)
ax = plot_regression(dset.MarriageScaled.values, mean_mu, hpdi_mu)
ax.set(
xlabel="Marriage rate", ylabel="Divorce rate", title="Regression line with 90% CI"
);
```
We can see from the plot, that the CI broadens towards the tails where the data is relatively sparse, as can be expected.
#### Prior Predictive Distribution
Let us check that we have set sensible priors by sampling from the prior predictive distribution. NumPyro provides a handy [Predictive](http://num.pyro.ai/en/latest/utilities.html#numpyro.infer.util.Predictive) utility for this purpose.
```
from numpyro.infer import Predictive
rng_key, rng_key_ = random.split(rng_key)
prior_predictive = Predictive(model, num_samples=100)
prior_predictions = prior_predictive(rng_key_, marriage=dset.MarriageScaled.values)[
"obs"
]
mean_prior_pred = jnp.mean(prior_predictions, axis=0)
hpdi_prior_pred = hpdi(prior_predictions, 0.9)
ax = plot_regression(dset.MarriageScaled.values, mean_prior_pred, hpdi_prior_pred)
ax.set(xlabel="Marriage rate", ylabel="Divorce rate", title="Predictions with 90% CI");
```
#### Posterior Predictive Distribution
Let us now look at the posterior predictive distribution to see how our predictive distribution looks with respect to the observed divorce rates. To get samples from the posterior predictive distribution, we need to run the model by substituting the latent parameters with samples from the posterior. Note that by default we generate a single prediction for each sample from the joint posterior distribution, but this can be controlled using the `num_samples` argument.
```
rng_key, rng_key_ = random.split(rng_key)
predictive = Predictive(model, samples_1)
predictions = predictive(rng_key_, marriage=dset.MarriageScaled.values)["obs"]
df = dset.filter(["Location"])
df["Mean Predictions"] = jnp.mean(predictions, axis=0)
df.head()
```
#### Predictive Utility With Effect Handlers
To remove the magic behind `Predictive`, let us see how we can combine [effect handlers](https://numpyro.readthedocs.io/en/latest/handlers.html) with the [vmap](https://github.com/google/jax#auto-vectorization-with-vmap) JAX primitive to implement our own simplified predictive utility function that can do vectorized predictions.
```
def predict(rng_key, post_samples, model, *args, **kwargs):
model = handlers.seed(handlers.condition(model, post_samples), rng_key)
model_trace = handlers.trace(model).get_trace(*args, **kwargs)
return model_trace["obs"]["value"]
# vectorize predictions via vmap
predict_fn = vmap(
lambda rng_key, samples: predict(
rng_key, samples, model, marriage=dset.MarriageScaled.values
)
)
```
Note the use of the `condition`, `seed` and `trace` effect handlers in the `predict` function.
- The `seed` effect-handler is used to wrap a stochastic function with an initial `PRNGKey` seed. When a sample statement inside the model is called, it uses the existing seed to sample from a distribution but this effect-handler also splits the existing key to ensure that future `sample` calls in the model use the newly split key instead. This is to prevent us from having to explicitly pass in a `PRNGKey` to each `sample` statement in the model.
- The `condition` effect handler conditions the latent sample sites to certain values. In our case, we are conditioning on values from the posterior distribution returned by MCMC.
- The `trace` effect handler runs the model and records the execution trace within an `OrderedDict`. This trace object contains execution metadata that is useful for computing quantities such as the log joint density.
It should be clear now that the `predict` function simply runs the model by substituting the latent parameters with samples from the posterior (generated by the `mcmc` function) to generate predictions. Note the use of JAX's auto-vectorization transform called [vmap](https://github.com/google/jax#auto-vectorization-with-vmap) to vectorize predictions. Note that if we didn't use `vmap`, we would have to use a native for loop which for each sample which is much slower. Each draw from the posterior can be used to get predictions over all the 50 states. When we vectorize this over all the samples from the posterior using `vmap`, we will get a `predictions_1` array of shape `(num_samples, 50)`. We can then compute the mean and 90% CI of these samples to plot the posterior predictive distribution. We note that our mean predictions match those obtained from the `Predictive` utility class.
```
# Using the same key as we used for Predictive - note that the results are identical.
predictions_1 = predict_fn(random.split(rng_key_, num_samples), samples_1)
mean_pred = jnp.mean(predictions_1, axis=0)
df = dset.filter(["Location"])
df["Mean Predictions"] = mean_pred
df.head()
hpdi_pred = hpdi(predictions_1, 0.9)
ax = plot_regression(dset.MarriageScaled.values, mean_pred, hpdi_pred)
ax.set(xlabel="Marriage rate", ylabel="Divorce rate", title="Predictions with 90% CI");
```
We have used the same `plot_regression` function as earlier. We notice that our CI for the predictive distribution is much broader as compared to the last plot due to the additional noise introduced by the `sigma` parameter. Most data points lie well within the 90% CI, which indicates a good fit.
#### Posterior Predictive Density
Likewise, making use of effect-handlers and `vmap`, we can also compute the log likelihood for this model given the dataset, and the log posterior predictive density [[6](#References)] which is given by
$$ log \prod_{i=1}^{n} \int p(y_i | \theta) p_{post}(\theta) d\theta
\approx \sum_{i=1}^n log \frac{\sum_s p(\theta^{s})}{S} \\
= \sum_{i=1}^n (log \sum_s p(\theta^{s}) - log(S))
$$.
Here, $i$ indexes the observed data points $y$ and $s$ indexes the posterior samples over the latent parameters $\theta$. If the posterior predictive density for a model has a comparatively high value, it indicates that the observed data-points have higher probability under the given model.
```
def log_likelihood(rng_key, params, model, *args, **kwargs):
model = handlers.condition(model, params)
model_trace = handlers.trace(model).get_trace(*args, **kwargs)
obs_node = model_trace["obs"]
return obs_node["fn"].log_prob(obs_node["value"])
def log_pred_density(rng_key, params, model, *args, **kwargs):
n = list(params.values())[0].shape[0]
log_lk_fn = vmap(
lambda rng_key, params: log_likelihood(rng_key, params, model, *args, **kwargs)
)
log_lk_vals = log_lk_fn(random.split(rng_key, n), params)
return (logsumexp(log_lk_vals, 0) - jnp.log(n)).sum()
```
Note that NumPyro provides the [log_likelihood](http://num.pyro.ai/en/latest/utilities.html#log-likelihood) utility function that can be used directly for computing `log likelihood` as in the first function for any general model. In this tutorial, we would like to emphasize that there is nothing magical about such utility functions, and you can roll out your own inference utilities using NumPyro's effect handling stack.
```
rng_key, rng_key_ = random.split(rng_key)
print(
"Log posterior predictive density: {}".format(
log_pred_density(
rng_key_,
samples_1,
model,
marriage=dset.MarriageScaled.values,
divorce=dset.DivorceScaled.values,
)
)
)
```
### Model 2: Predictor - Median Age of Marriage
We will now model the divorce rate as a function of the median age of marriage. The computations are mostly a reproduction of what we did for Model 1. Notice the following:
- Divorce rate is inversely related to the age of marriage. Hence states where the median age of marriage is low will likely have a higher divorce rate.
- We get a higher log likelihood as compared to Model 2, indicating that median age of marriage is likely a much better predictor of divorce rate.
```
rng_key, rng_key_ = random.split(rng_key)
mcmc.run(rng_key_, age=dset.AgeScaled.values, divorce=dset.DivorceScaled.values)
mcmc.print_summary()
samples_2 = mcmc.get_samples()
posterior_mu = (
jnp.expand_dims(samples_2["a"], -1)
+ jnp.expand_dims(samples_2["bA"], -1) * dset.AgeScaled.values
)
mean_mu = jnp.mean(posterior_mu, axis=0)
hpdi_mu = hpdi(posterior_mu, 0.9)
ax = plot_regression(dset.AgeScaled.values, mean_mu, hpdi_mu)
ax.set(
xlabel="Median marriage age",
ylabel="Divorce rate",
title="Regression line with 90% CI",
);
rng_key, rng_key_ = random.split(rng_key)
predictions_2 = Predictive(model, samples_2)(rng_key_, age=dset.AgeScaled.values)["obs"]
mean_pred = jnp.mean(predictions_2, axis=0)
hpdi_pred = hpdi(predictions_2, 0.9)
ax = plot_regression(dset.AgeScaled.values, mean_pred, hpdi_pred)
ax.set(xlabel="Median Age", ylabel="Divorce rate", title="Predictions with 90% CI");
rng_key, rng_key_ = random.split(rng_key)
print(
"Log posterior predictive density: {}".format(
log_pred_density(
rng_key_,
samples_2,
model,
age=dset.AgeScaled.values,
divorce=dset.DivorceScaled.values,
)
)
)
```
### Model 3: Predictor - Marriage Rate and Median Age of Marriage
Finally, we will also model divorce rate as depending on both marriage rate as well as the median age of marriage. Note that the model's posterior predictive density is similar to Model 2 which likely indicates that the marginal information from marriage rate in predicting divorce rate is low when the median age of marriage is already known.
```
rng_key, rng_key_ = random.split(rng_key)
mcmc.run(
rng_key_,
marriage=dset.MarriageScaled.values,
age=dset.AgeScaled.values,
divorce=dset.DivorceScaled.values,
)
mcmc.print_summary()
samples_3 = mcmc.get_samples()
rng_key, rng_key_ = random.split(rng_key)
print(
"Log posterior predictive density: {}".format(
log_pred_density(
rng_key_,
samples_3,
model,
marriage=dset.MarriageScaled.values,
age=dset.AgeScaled.values,
divorce=dset.DivorceScaled.values,
)
)
)
```
### Divorce Rate Residuals by State
The regression plots above shows that the observed divorce rates for many states differs considerably from the mean regression line. To dig deeper into how the last model (Model 3) under-predicts or over-predicts for each of the states, we will plot the posterior predictive and residuals (`Observed divorce rate - Predicted divorce rate`) for each of the states.
```
# Predictions for Model 3.
rng_key, rng_key_ = random.split(rng_key)
predictions_3 = Predictive(model, samples_3)(
rng_key_, marriage=dset.MarriageScaled.values, age=dset.AgeScaled.values
)["obs"]
y = jnp.arange(50)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 16))
pred_mean = jnp.mean(predictions_3, axis=0)
pred_hpdi = hpdi(predictions_3, 0.9)
residuals_3 = dset.DivorceScaled.values - predictions_3
residuals_mean = jnp.mean(residuals_3, axis=0)
residuals_hpdi = hpdi(residuals_3, 0.9)
idx = jnp.argsort(residuals_mean)
# Plot posterior predictive
ax[0].plot(jnp.zeros(50), y, "--")
ax[0].errorbar(
pred_mean[idx],
y,
xerr=pred_hpdi[1, idx] - pred_mean[idx],
marker="o",
ms=5,
mew=4,
ls="none",
alpha=0.8,
)
ax[0].plot(dset.DivorceScaled.values[idx], y, marker="o", ls="none", color="gray")
ax[0].set(
xlabel="Posterior Predictive (red) vs. Actuals (gray)",
ylabel="State",
title="Posterior Predictive with 90% CI",
)
ax[0].set_yticks(y)
ax[0].set_yticklabels(dset.Loc.values[idx], fontsize=10)
# Plot residuals
residuals_3 = dset.DivorceScaled.values - predictions_3
residuals_mean = jnp.mean(residuals_3, axis=0)
residuals_hpdi = hpdi(residuals_3, 0.9)
err = residuals_hpdi[1] - residuals_mean
ax[1].plot(jnp.zeros(50), y, "--")
ax[1].errorbar(
residuals_mean[idx], y, xerr=err[idx], marker="o", ms=5, mew=4, ls="none", alpha=0.8
)
ax[1].set(xlabel="Residuals", ylabel="State", title="Residuals with 90% CI")
ax[1].set_yticks(y)
ax[1].set_yticklabels(dset.Loc.values[idx], fontsize=10);
```
The plot on the left shows the mean predictions with 90% CI for each of the states using Model 3. The gray markers indicate the actual observed divorce rates. The right plot shows the residuals for each of the states, and both these plots are sorted by the residuals, i.e. at the bottom, we are looking at states where the model predictions are higher than the observed rates, whereas at the top, the reverse is true.
Overall, the model fit seems good because most observed data points like within a 90% CI around the mean predictions. However, notice how the model over-predicts by a large margin for states like Idaho (bottom left), and on the other end under-predicts for states like Maine (top right). This is likely indicative of other factors that we are missing out in our model that affect divorce rate across different states. Even ignoring other socio-political variables, one such factor that we have not yet modeled is the measurement noise given by `Divorce SE` in the dataset. We will explore this in the next section.
## Regression Model with Measurement Error
Note that in our previous models, each data point influences the regression line equally. Is this well justified? We will build on the previous model to incorporate measurement error given by `Divorce SE` variable in the dataset. Incorporating measurement noise will be useful in ensuring that observations that have higher confidence (i.e. lower measurement noise) have a greater impact on the regression line. On the other hand, this will also help us better model outliers with high measurement errors. For more details on modeling errors due to measurement noise, refer to Chapter 14 of [[1](#References)].
To do this, we will reuse Model 3, with the only change that the final observed value has a measurement error given by `divorce_sd` (notice that this has to be standardized since the `divorce` variable itself has been standardized to mean 0 and std 1).
```
def model_se(marriage, age, divorce_sd, divorce=None):
a = numpyro.sample("a", dist.Normal(0.0, 0.2))
bM = numpyro.sample("bM", dist.Normal(0.0, 0.5))
M = bM * marriage
bA = numpyro.sample("bA", dist.Normal(0.0, 0.5))
A = bA * age
sigma = numpyro.sample("sigma", dist.Exponential(1.0))
mu = a + M + A
divorce_rate = numpyro.sample("divorce_rate", dist.Normal(mu, sigma))
numpyro.sample("obs", dist.Normal(divorce_rate, divorce_sd), obs=divorce)
# Standardize
dset["DivorceScaledSD"] = dset["Divorce SE"] / jnp.std(dset.Divorce.values)
rng_key, rng_key_ = random.split(rng_key)
kernel = NUTS(model_se, target_accept_prob=0.9)
mcmc = MCMC(kernel, num_warmup=1000, num_samples=3000)
mcmc.run(
rng_key_,
marriage=dset.MarriageScaled.values,
age=dset.AgeScaled.values,
divorce_sd=dset.DivorceScaledSD.values,
divorce=dset.DivorceScaled.values,
)
mcmc.print_summary()
samples_4 = mcmc.get_samples()
```
### Effect of Incorporating Measurement Noise on Residuals
Notice that our values for the regression coefficients is very similar to Model 3. However, introducing measurement noise allows us to more closely match our predictive distribution to the observed values. We can see this if we plot the residuals as earlier.
```
rng_key, rng_key_ = random.split(rng_key)
predictions_4 = Predictive(model_se, samples_4)(
rng_key_,
marriage=dset.MarriageScaled.values,
age=dset.AgeScaled.values,
divorce_sd=dset.DivorceScaledSD.values,
)["obs"]
sd = dset.DivorceScaledSD.values
residuals_4 = dset.DivorceScaled.values - predictions_4
residuals_mean = jnp.mean(residuals_4, axis=0)
residuals_hpdi = hpdi(residuals_4, 0.9)
err = residuals_hpdi[1] - residuals_mean
idx = jnp.argsort(residuals_mean)
y = jnp.arange(50)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 16))
# Plot Residuals
ax.plot(jnp.zeros(50), y, "--")
ax.errorbar(
residuals_mean[idx], y, xerr=err[idx], marker="o", ms=5, mew=4, ls="none", alpha=0.8
)
# Plot SD
ax.errorbar(residuals_mean[idx], y, xerr=sd[idx], ls="none", color="orange", alpha=0.9)
# Plot earlier mean residual
ax.plot(
jnp.mean(dset.DivorceScaled.values - predictions_3, 0)[idx],
y,
ls="none",
marker="o",
ms=6,
color="black",
alpha=0.6,
)
ax.set(xlabel="Residuals", ylabel="State", title="Residuals with 90% CI")
ax.set_yticks(y)
ax.set_yticklabels(dset.Loc.values[idx], fontsize=10)
ax.text(
-2.8,
-7,
"Residuals (with error-bars) from current model (in red). "
"Black marker \nshows residuals from the previous model (Model 3). "
"Measurement \nerror is indicated by orange bar.",
);
```
The plot above shows the residuals for each of the states, along with the measurement noise given by inner error bar. The gray dots are the mean residuals from our earlier Model 3. Notice how having an additional degree of freedom to model the measurement noise has shrunk the residuals. In particular, for Idaho and Maine, our predictions are now much closer to the observed values after incorporating measurement noise in the model.
To better see how measurement noise affects the movement of the regression line, let us plot the residuals with respect to the measurement noise.
```
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 6))
x = dset.DivorceScaledSD.values
y1 = jnp.mean(residuals_3, 0)
y2 = jnp.mean(residuals_4, 0)
ax.plot(x, y1, ls="none", marker="o")
ax.plot(x, y2, ls="none", marker="o")
for i, (j, k) in enumerate(zip(y1, y2)):
ax.plot([x[i], x[i]], [j, k], "--", color="gray")
ax.set(
xlabel="Measurement Noise",
ylabel="Residual",
title="Mean residuals (Model 4: red, Model 3: blue)",
);
```
The plot above shows what has happend in more detail - the regression line itself has moved to ensure a better fit for observations with low measurement noise (left of the plot) where the residuals have shrunk very close to 0. That is to say that data points with low measurement error have a concomitantly higher contribution in determining the regression line. On the other hand, for states with high measurement error (right of the plot), incorporating measurement noise allows us to move our posterior distribution mass closer to the observations resulting in a shrinkage of residuals as well.
## References
1. McElreath, R. (2016). Statistical Rethinking: A Bayesian Course with Examples in R and Stan CRC Press.
2. Stan Development Team. [Stan User's Guide](https://mc-stan.org/docs/2_19/stan-users-guide/index.html)
3. Goodman, N.D., and StuhlMueller, A. (2014). [The Design and Implementation of Probabilistic Programming Languages](http://dippl.org/)
4. Pyro Development Team. [Poutine: A Guide to Programming with Effect Handlers in Pyro](http://pyro.ai/examples/effect_handlers.html)
5. Hoffman, M.D., Gelman, A. (2011). The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo.
6. Betancourt, M. (2017). A Conceptual Introduction to Hamiltonian Monte Carlo.
7. JAX Development Team (2018). [Composable transformations of Python+NumPy programs: differentiate, vectorize, JIT to GPU/TPU, and more](https://github.com/google/jax)
8. Gelman, A., Hwang, J., and Vehtari A. [Understanding predictive information criteria for Bayesian models](https://arxiv.org/pdf/1307.5928.pdf)
| github_jupyter |
# Reasoning in LTN
This tutorial defines and illustrates reasoning in LTN. It expects basic familiarity with other parts of LTN.
### Logical Consequence in LTN
The essence of reasoning is to determine if a closed formula $\phi$ is the logical consequence of a knowledgebase $(\mathcal{K},\mathcal{G}_\theta,\Theta)$, where $\mathcal{K}$ denotes the set of rules in the knowledgebase and $\mathcal{G}_\theta$ denotes a grounding that depends on some parameters $\theta \in \Theta$.
The notion of logical consequence is adapted to Real Logic as follows:
- In classical logic (boolean truth values), a formula $\phi$ is the logical consequence of a knowledgebase $\mathcal{K}$ is for every interpretation (or model) that verifies every formula in $\mathcal{K}$, $\phi$ is verified;
- In Real Logic (fuzzy truth values), a formula $\phi$ is the logical consequence of $(\mathcal{K},\mathcal{G}_\theta,\Theta)$ if for every grounding $\mathcal{G}_\theta$ such that $\mathrm{SatAgg}_{\phi'\in\mathcal{K}}\mathcal{G}_{\theta}(\phi') \geq q $, we have $\mathcal{G}_\theta(\phi)\geq q$, where $q$ is a fixed satisfaction threshold.
Logical consequence in Real Logic, by direct application of the definition, requires querying the truth value of $\phi$ for a potentially infinite set of groundings.
We therefore, in practice, consider the following directions:
1. **Reasoning by brave inference**: one seeks to verify if for all the grounded theories that *maximally satisfy* $\mathcal{K}$,
the grounding of $\phi$ gives a truth value greater than a threshold $q$.
This often requires to check an infinite number of groundings.
Instead, one can approximate the search for these grounded theories by running the optimization w.r.t. the knowledgebase satisfiability multiple times and checking these groundings only.
2. **Reasoning by refutation**: one seeks to find out a counterexample of a grounding that does satisfy the knowledgebase $\mathcal{K}$ but not the formula $\phi$ (given the threshold $q$). A directed search for such examples is performed using a different learning objective.
In this tutorial, we illustrate the second option, **reasoning by refutation**.
```
import logictensornetworks as ltn
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
```
### Example
We illustrate reasoning on the following toy example:
$$
(A \lor B) \models_q A \ ?
$$
where $A$ and $B$ are two propositional variables, and $\frac{1}{2} < q < 1$ is the satisfaction threshold.
We define $\mathcal{K}=\{A \lor B\}$ and $\phi=A$.
```
A = ltn.proposition(0.,trainable=True)
B = ltn.proposition(0.,trainable=True)
Or = ltn.Wrapper_Connective(ltn.fuzzy_ops.Or_ProbSum())
def axioms():
return Or(A,B)
def phi():
return A
```
### Reasoning by Refutation
The goal is to find a grounding that satisfies $\mathcal{K}$ but does not satisfy $\phi$. One can perform a directed search for such a counterexample by minimizing $\mathcal{G}_\theta(\phi)$ while imposing a constraint that invalidates results where $\mathcal{G}_\theta(\mathcal{K})<q$.
Let us define $\mathrm{penalty}(\mathcal{G}_\theta,q)=\begin{cases}
c \ \text{if}\ \mathcal{G}_\theta(\mathcal{K}) < q,\\
0 \ \text{otherwise},
\end{cases}$ where $c>1$ and set the objective:
$$
\mathcal{G}^\ast = \mathrm{argmin}_{\mathcal{G}_\theta} (\mathcal{G}_\theta(\phi) + \mathrm{penalty}(\mathcal{G}_\theta,q))
$$
The penalty $c$ ($>1$) is higher than any potential reduction in $\mathcal{G}(\phi)$ ($\leq 1$). $\mathcal{G}^\ast$ should satisfy in priority $\mathcal{G}^*(\mathcal{K}) \geq q$ before reducing $\mathcal{G}^*(\phi)$.
- If $\mathcal{G}^\ast(\mathcal{K}) < q$ : Then for all $\mathcal{G}_\theta$, $\mathcal{G}_\theta(\mathcal{K}) < q$ and therefore $(\mathcal{K},\mathcal{G}(\ \cdot\mid \theta), \Theta)\models_q\phi$.
- If $\mathcal{G}^\ast(\mathcal{K}) \geq q \ \text{and}\ \mathcal{G}^\ast(\phi) \geq q$ : Then for all $\mathcal{G}_\theta$ with $\mathcal{G}_\theta(\mathcal{K}) \geq q$, we have that $\mathcal{G}_\theta(\phi) \geq \mathcal{G}^\ast(\phi) \geq q$ and therefore $(\mathcal{K},\mathcal{G}(\ \cdot\mid\theta),\Theta)\models_q\phi$.
- If $\mathcal{G}^\ast(\mathcal{K}) \geq q \ \text{and}\ \mathcal{G}^\ast(\phi) < q$ : Then $(\mathcal{K},\mathcal{G}(\ \cdot\mid\theta),\Theta) \nvDash_q\phi$.
### Soft constraint
However, as $\mathrm{penalty}(\mathcal{G}_\theta,q)$ is a constant function on the continuous parts of its domain (zero gradients), it cannot be used directly as an objective to reach via gradient descent optimization.Instead, one should approximate the penalty with a soft constraint.
We use $\mathtt{elu}(\alpha,\beta (q-\mathcal{G}_\theta(\mathcal{K})))=\begin{cases}
\beta (q-\mathcal{G}_\theta(\mathcal{K}))\ &\text{if}\ \mathcal{G}_\theta(\mathcal{K}) \leq 0,\\
\alpha (e^{q-\mathcal{G}_\theta(\mathcal{K})}-1) \ &\text{otherwise},
\end{cases}$ where $\alpha \geq 0$ and $\beta \geq 0$ are two hyper-parameters:
- When $\mathcal{G}_\theta(\mathcal{K}) < q$, the penalty is linear in $(q-\mathcal{G}_\theta(\mathcal{K}))$ with a slope of $\beta$.
Setting $\beta$ high, the gradients for $\mathcal{G}_\theta(\mathcal{K})$ will be high in absolute value if the knowledgebase in not satisfied; the minimizer will prioritize increasing the satisfaction of the knowledgebase.
- When $\mathcal{G}_\theta(\mathcal{K}) > q$, the penalty is a negative exponential that converges to $-\alpha$.
Setting $\alpha$ low but non zero ensures that, while the penalty plays an insignificant role when the knowledgebase is satisfied, the gradients do not vanish.
```
trainable_variables = [A,B]
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
# hyperparameters of the soft constraint
alpha = 0.05
beta = 10
# satisfaction threshold
q = 0.95
for epoch in range(4000):
with tf.GradientTape() as tape:
sat_KB = axioms()
sat_phi = phi()
penalty = tf.keras.activations.elu(beta*(q-sat_KB),alpha=alpha)
loss = sat_phi + penalty
grads = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(grads, trainable_variables))
if epoch%400 == 0:
print("Epoch %d: Sat Level Knowledgebase %.3f Sat Level phi %.3f"%(epoch, axioms(), phi()))
print("Training finished at Epoch %d with Sat Level Knowledgebase %.3f Sat Level phi %.3f"%(epoch, axioms(), phi()))
```
At the end of training, the optimizer has found a grounding that satisfies $A \lor B$ but not $A$ (given the satisfaction threshold $q=0.95$). This is a counterexample to the logical consequence, proving that $A \lor B \nvDash A$
| github_jupyter |
# Customizing datasets in fastai
```
from fastai import *
from fastai.gen_doc.nbdoc import *
from fastai.vision import *
```
In this tutorial, we'll see how to create custom subclasses of [`ItemBase`](/core.html#ItemBase) or [`ItemList`](/data_block.html#ItemList) while retaining everything the fastai library has to offer. To allow basic functions to work consistently across various applications, the fastai library delegates several tasks to one of those specific objets, and we'll see here which methods you have to implement to be able to have everything work properly. But first let's see take a step back to see where you'll use your end result.
## Links with the data block API
The data block API works by allowing you to pick a class that is responsible to get your items and another class that is charged with getting your targets. Combined together, they create a pytorch [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset) that is then wrapped inside a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). The training set, validation set and maybe test set are then all put in a [`DataBunch`](/basic_data.html#DataBunch).
The data block API allows you to mix and match what class your inputs have, what clas you target have, how to do the split between train and validation set, then how to create the [`DataBunch`](/basic_data.html#DataBunch), but if you have a very specific kind of input/target, the fastai classes might no be sufficient to you. This tutorial is there to explain what is needed to create a new class of items and what methods are important to implement or override.
It goes in two phases: first we focus on what you need to create a custom [`ItemBase`](/core.html#ItemBase) class (which the type of your inputs/targets) then on how to create your custom [`ItemList`](/data_block.html#ItemList) (which is basically a set of [`ItemBase`](/core.html#ItemBase)) while highlining which methods are called by the library.
## Creating a custom [`ItemBase`](/core.html#ItemBase) subclass
The fastai library contains three basic type of [`ItemBase`](/core.html#ItemBase) that you might want to subclass:
- [`Image`](/vision.image.html#Image) for vision applications
- [`Text`](/text.data.html#Text) for text applications
- [`TabularLine`](/tabular.data.html#TabularLine) for tabular applications
Whether you decide to create your own item class or to subclass one of the above, here is what you need to implement:
### Basic attributes
Those are the more importants attribute your custom [`ItemBase`](/core.html#ItemBase) needs as they're used everywhere in the fastai library:
- `ItemBase.data` is the thing that is passed to pytorch when you want to create a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). This is what needs to be fed to your model. Note that it might be different from the representation of your item since you might want something that is more understandable.
- `ItemBase.obj` is the thing that truly represents the underlying object behind your item. It should be sufficient to create a copy of your item. For instance, when creating the test set, the basic label is the `obj` attribute of the first label (or y) in the training set.
- `__str__` representation: if applicable, this is what will be displayed when the fastai library has to show your item.
If we take the example of a [`MultiCategory`](/core.html#MultiCategory) object `o` for instance:
- `o.obj` is the list of tags that object has
- `o.data` is a tensor where the tags are one-hot encoded
- `str(o)` returns the tags separated by ;
If you want to code the way data augmentation should be applied to your custom `Item`, you should write an `apply_tfms` method. This is what will be called if you apply a [`transform`](/vision.transform.html#vision.transform) block in the data block API.
### Advanced show methods
If you want to use methods such a `data.show_batch()` or `learn.show_results()` with a brand new kind of [`ItemBase`](/core.html#ItemBase) you will need to implement two other methods. In both cases, the generic function will grab the tensors of inputs, targets and predictions (if applicable), reconstruct the corespoding [`ItemBase`](/core.html#ItemBase) (see below) but it will delegate to the [`ItemBase`](/core.html#ItemBase) the way to display the results.
``` python
def show_xys(self, xs, ys, **kwargs)->None:
def show_xyzs(self, xs, ys, zs, **kwargs)->None:
```
In both cases `xs` and `ys` represent the inputs and the targets, in the second case `zs` represent the predictions. They are lists of the same length that depend on the `rows` argument you passed. The kwargs are passed from `data.show_batch()` / `learn.show_results()`. As an example, here is the source code of those methods in [`Image`](/vision.image.html#Image):
``` python
def show_xys(self, xs, ys, figsize:Tuple[int,int]=(9,10), **kwargs):
"Show the `xs` and `ys` on a figure of `figsize`. `kwargs` are passed to the show method."
rows = int(math.sqrt(len(xs)))
fig, axs = plt.subplots(rows,rows,figsize=figsize)
for i, ax in enumerate(axs.flatten() if rows > 1 else [axs]):
xs[i].show(ax=ax, y=ys[i], **kwargs)
plt.tight_layout()
def show_xyzs(self, xs, ys, zs, figsize:Tuple[int,int]=None, **kwargs):
"""Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`.
`kwargs` are passed to the show method."""
figsize = ifnone(figsize, (6,3*len(xs)))
fig,axs = plt.subplots(len(xs), 2, figsize=figsize)
fig.suptitle('Ground truth / Predictions', weight='bold', size=14)
for i,(x,y,z) in enumerate(zip(xs,ys,zs)):
x.show(ax=axs[i,0], y=y, **kwargs)
x.show(ax=axs[i,1], y=z, **kwargs)
```
### Example: ImageTuple
For cycleGANs, we need to create a custom type of items since we feed the model tuples of images. Let's look at how to code this. The basis is to code the `obj` and [`data`](/vision.data.html#vision.data) attributes. We do that in the init. The object is the tuple of images and the data their underlying tensors normalized between -1 and 1.
```
class ImageTuple(ItemBase):
def __init__(self, img1, img2):
self.img1,self.img2 = img1,img2
self.obj,self.data = (img1,img2),[-1+2*img1.data,-1+2*img2.data]
```
Then we want to apply data augmentation to our tuple of images. That's done by writing and `apply_tfms` method as we saw before. Here we just pass that call to the two underlying images then update the data.
```
def apply_tfms(self, tfms, **kwargs):
self.img1 = self.img1.apply_tfms(tfms, **kwargs)
self.img2 = self.img2.apply_tfms(tfms, **kwargs)
self.data = [-1+2*self.img1.data,-1+2*self.img2.data]
return self
```
We define a last method to stack the two images next ot each other, which we will use later for a customized `show_batch`/ `show_results` behavior.
```
def to_one(self): return Image(0.5+torch.cat(self.data,2)/2)
```
This is all your need to create your custom [`ItemBase`](/core.html#ItemBase). You won't be able to use it until you have put it inside your custom [`ItemList`](/data_block.html#ItemList) though, so you should continue reading the next section.
## Creating a custom [`ItemList`](/data_block.html#ItemList) subclass
This is the main class that allows you to group your inputs or your targets in the data block API. You can then use any of the splitting or labelling methods before creating a [`DataBunch`](/basic_data.html#DataBunch). To make sure everything is properly working, her eis what you need to know.
### Class variables
Whether you're directly subclassing [`ItemList`](/data_block.html#ItemList) or one of the particular fastai ones, make sure to know the content of the following three variables as you may need to adjust them:
- `_bunch` contains the name of the class that will be used to create a [`DataBunch`](/basic_data.html#DataBunch)
- `_processor` contains a class (or a list of classes) of [`PreProcessor`](/data_block.html#PreProcessor) that will then be used as the default to create processor for this [`ItemList`](/data_block.html#ItemList)
- `_label_cls` contains the class that will be used to create the labels by default
`_label_cls` is the first to be used in the data block API, in the labelling function. If this variable is set to `None`, the label class will be guessed between [`CategoryList`](/data_block.html#CategoryList), [`MultiCategoryList`](/data_block.html#MultiCategoryList) and [`FloatList`](/data_block.html#FloatList) depending on the type of the first item. The default can be overriden by passing a `label_cls` in the kwargs of the labelling function.
`_processor` is the second to be used. The processors are called at the end of the labelling to apply some kind of function on your items. The default processor of the inputs can be overriden by passing a `processor` in the kwargs when creating the [`ItemList`](/data_block.html#ItemList), the default processor of the targets can be overriden by passing a `processor` in the kwargs of the labelling function.
Processors are useful for pre-processing some data, but you also need to put in their state any variable you want to save for the call of `data.export()` before creating a [`Learner`](/basic_train.html#Learner) object for inference: the state of the [`ItemList`](/data_block.html#ItemList) isn't saved there, only their processors. For instance `SegmentationProcessor` only reason to exist is to save the dataset classes, and during the process call, it doesn't do anything apart from setting the `classes` and `c` attributes to its dataset.
``` python
class SegmentationProcessor(PreProcessor):
def __init__(self, ds:ItemList): self.classes = ds.classes
def process(self, ds:ItemList): ds.classes,ds.c = self.classes,len(self.classes)
```
`_bunch` is the last class variable usd in the data block. When you type the final `databunch()`, the data block API calls the `_bunch.create` method with the `_bunch` of the inputs.
### Keeping \_\_init\_\_ arguments
If you pass additional arguments in your `__init__` call that you save in the state of your [`ItemList`](/data_block.html#ItemList), be wary to also pass them along in the `new` method as this one is used to create your training and validation set when splitting. The basic scheme is:
``` python
class MyCustomItemList(ItemList):
def __init__(self, items, my_arg, **kwargs):
self.my_arg = my_arg
super().__init__(items, **kwargs)
def new(self, items, **kwargs):
return super().new(items, self.my_arg, **kwargs)
```
Be sure to keep the kwargs as is, as they contain all the additional stuff you can pass to an [`ItemList`](/data_block.html#ItemList).
### Important methods
#### - get
The most important method you have to implement is `get`: this one will explain your custom [`ItemList`](/data_block.html#ItemList) how to general an [`ItemBase`](/core.html#ItemBase) from the thign stored in its `items` array. For instance an [`ImageItemList`](/vision.data.html#ImageItemList) has the following `get` method:
``` python
def get(self, i):
fn = super().get(i)
res = self.open(fn)
self.sizes[i] = res.size
return res
```
The first line basically looks at `self.items[i]` (which is a filename). The second line opens it since the `open`method is just
``` python
def open(self, fn): return open_image(fn)
```
The third line is there for [`ImagePoints`](/vision.image.html#ImagePoints) or [`ImageBBox`](/vision.image.html#ImageBBox) targets that require the size of the input [`Image`](/vision.image.html#Image) to be created. Note that if you are building a custom target class and you need the size of an image, you should call `self.x.size[i]`.
```
jekyll_note("""If you just want to customize the way an `Image` is opened, subclass `Image` and just change the
`open` method.""")
```
#### - reconstruct
This is the method that is called in `data.show_batch()`, `learn.predict()` or `learn.show_results()` to transform a pytorch tensor back in an [`ItemBase`](/core.html#ItemBase). In a way, it does the opposite of calling `ItemBase.data`. It should take a tensor `t` and return the same king of thing as the `get` method.
In some situations ([`ImagePoints`](/vision.image.html#ImagePoints), [`ImageBBox`](/vision.image.html#ImageBBox) for instance) you need to have a look at the corresponding input to rebuild your item. In this case, you should have a second argument called `x` (don't change that name). For instance, here is the `reconstruct` method of [`PointsItemList`](/vision.data.html#PointsItemList):
```python
def reconstruct(self, t, x): return ImagePoints(FlowField(x.size, t), scale=False)
```
#### - analyze_pred
This is the method that is called in `learn.predict()` or `learn.show_results()` to transform predictions in an output tensor suitable for `reconstruct`. For instance we may need to take the maximum argument (for [`Category`](/core.html#Category)) or the predictions greater than a certain threshold (for [`MultiCategory`](/core.html#MultiCategory)). It should take a tensor, along with optional kwargs and return a tensor.
For instance, here is the `anaylze_pred` method of [`MultiCategoryList`](/data_block.html#MultiCategoryList):
```python
def analyze_pred(self, pred, thresh:float=0.5): return (pred >= thresh).float()
```
`thresh` can then be passed as kwarg during the calls to `learn.predict()` or `learn.show_results()`.
### Advanced show methods
If you want to use methods such a `data.show_batch()` or `learn.show_results()` with a brand new kind of [`ItemBase`](/core.html#ItemBase) you will need to implement two other methods. In both cases, the generic function will grab the tensors of inputs, targets and predictions (if applicable), reconstruct the coresponding (as seen before) but it will delegate to the [`ItemList`](/data_block.html#ItemList) the way to display the results.
``` python
def show_xys(self, xs, ys, **kwargs)->None:
def show_xyzs(self, xs, ys, zs, **kwargs)->None:
```
In both cases `xs` and `ys` represent the inputs and the targets, in the second case `zs` represent the predictions. They are lists of the same length that depend on the `rows` argument you passed. The kwargs are passed from `data.show_batch()` / `learn.show_results()`. As an example, here is the source code of those methods in [`ImageItemList`](/vision.data.html#ImageItemList):
``` python
def show_xys(self, xs, ys, figsize:Tuple[int,int]=(9,10), **kwargs):
"Show the `xs` and `ys` on a figure of `figsize`. `kwargs` are passed to the show method."
rows = int(math.sqrt(len(xs)))
fig, axs = plt.subplots(rows,rows,figsize=figsize)
for i, ax in enumerate(axs.flatten() if rows > 1 else [axs]):
xs[i].show(ax=ax, y=ys[i], **kwargs)
plt.tight_layout()
def show_xyzs(self, xs, ys, zs, figsize:Tuple[int,int]=None, **kwargs):
"""Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`.
`kwargs` are passed to the show method."""
figsize = ifnone(figsize, (6,3*len(xs)))
fig,axs = plt.subplots(len(xs), 2, figsize=figsize)
fig.suptitle('Ground truth / Predictions', weight='bold', size=14)
for i,(x,y,z) in enumerate(zip(xs,ys,zs)):
x.show(ax=axs[i,0], y=y, **kwargs)
x.show(ax=axs[i,1], y=z, **kwargs)
```
Linked to this method is the class variable `_show_square` of an [`ItemList`](/data_block.html#ItemList). It defaults to `False` but if it's `True`, the `show_batch` method will send `rows * rows` `xs` and `ys` to `show_xys` (so that it shows a square of inputs/targets), like here for iamges.
### Example: ImageTupleList
Continuing our custom item example, we create a custom [`ItemList`](/data_block.html#ItemList) class that will wrap those `ImageTuple` properly. The first thing is to write a custom `__init__` method (since we need to list of filenames here) which means we also have to change the `new` method.
```
class ImageTupleList(ImageItemList):
def __init__(self, items, itemsB=None, **kwargs):
self.itemsB = itemsB
super().__init__(items, **kwargs)
def new(self, items, **kwargs):
return super().new(items, itemsB=self.itemsB, **kwargs)
```
We then specify how to get one item. Here we pass the image in the first list of items, and pick one randomly in the second list.
```
def get(self, i):
img1 = super().get(i)
fn = self.itemsB[random.randint(0, len(self.itemsB)-1)]
return ImageTuple(img1, open_image(fn))
```
We also add a custom factory method to directly create an `ImageTupleList` from two folders.
```
@classmethod
def from_folders(cls, path, folderA, folderB, **kwargs):
itemsB = ImageItemList.from_folder(path/folderB).items
res = super().from_folder(path/folderA, itemsB=itemsB, **kwargs)
res.path = path
return res
```
Finally, we have to specify how to reconstruct the `ImageTuple` from tensors if we want `show_batch` to work. We recreate the images and denormalize.
```
def reconstruct(self, t:Tensor):
return ImageTuple(Image(t[0]/2+0.5),Image(t[1]/2+0.5))
```
There is no need to write a `analyze_preds` method since the default behavior (returning the output tensor) is what we need here. However `show_results` won't work properly unless the target (which we don't really care about here) has the right `reconstruct` method: the fastai library uses the `reconstruct` method of the target on the outputs. That's why we create another custom [`ItemList`](/data_block.html#ItemList) with just that `reconstruct` method. The first line is to reconstruct our dummy targets, and the second one is the same as in `ImageTupleList`.
```
class TargetTupleList(ItemList):
def reconstruct(self, t:Tensor):
if len(t.size()) == 0: return t
return ImageTuple(Image(t[0]/2+0.5),Image(t[1]/2+0.5))
```
To make sure our `ImageTupleList` uses that for labelling, we pass it in `_label_cls` and this is what the result looks like.
```
class ImageTupleList(ImageItemList):
_label_cls=TargetTupleList
def __init__(self, items, itemsB=None, **kwargs):
self.itemsB = itemsB
super().__init__(items, **kwargs)
def new(self, items, **kwargs):
return super().new(items, itemsB=self.itemsB, **kwargs)
def get(self, i):
img1 = super().get(i)
fn = self.itemsB[random.randint(0, len(self.itemsB)-1)]
return ImageTuple(img1, open_image(fn))
def reconstruct(self, t:Tensor):
return ImageTuple(Image(t[0]/2+0.5),Image(t[1]/2+0.5))
@classmethod
def from_folders(cls, path, folderA, folderB, **kwargs):
itemsB = ImageItemList.from_folder(path/folderB).items
res = super().from_folder(path/folderA, itemsB=itemsB, **kwargs)
res.path = path
return res
```
Lastly, we want to customize the behavior of `show_batch` and `show_results`. Remember the `to_one` method just puts the two images next to each other.
```
def show_xys(self, xs, ys, figsize:Tuple[int,int]=(12,6), **kwargs):
"Show the `xs` and `ys` on a figure of `figsize`. `kwargs` are passed to the show method."
rows = int(math.sqrt(len(xs)))
fig, axs = plt.subplots(rows,rows,figsize=figsize)
for i, ax in enumerate(axs.flatten() if rows > 1 else [axs]):
xs[i].to_one().show(ax=ax, **kwargs)
plt.tight_layout()
def show_xyzs(self, xs, ys, zs, figsize:Tuple[int,int]=None, **kwargs):
"""Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`.
`kwargs` are passed to the show method."""
figsize = ifnone(figsize, (12,3*len(xs)))
fig,axs = plt.subplots(len(xs), 2, figsize=figsize)
fig.suptitle('Ground truth / Predictions', weight='bold', size=14)
for i,(x,z) in enumerate(zip(xs,zs)):
x.to_one().show(ax=axs[i,0], **kwargs)
z.to_one().show(ax=axs[i,1], **kwargs)
```
| github_jupyter |
# Creating a Real-Time Inferencing Service
You've spent a lot of time in this course training and registering machine learning models. Now it's time to deploy a model as a real-time service that clients can use to get predictions from new data.
## Connect to Your Workspace
The first thing you need to do is to connect to your workspace using the Azure ML SDK.
> **Note**: If the authenticated session with your Azure subscription has expired since you completed the previous exercise, you'll be prompted to reauthenticate.
```
import azureml.core
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
```
## Deploy a Model as a Web Service
You have trained and registered a machine learning model that classifies patients based on the likelihood of them having diabetes. This model could be used in a production environment such as a doctor's surgery where only patients deemed to be at risk need to be subjected to a clinical test for diabetes. To support this scenario, you will deploy the model as a web service.
First, let's determine what models you have registered in the workspace.
```
from azureml.core import Model
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
```
Right, now let's get the model that we want to deploy. By default, if we specify a model name, the latest version will be returned.
```
model = ws.models['diabetes_model']
print(model.name, 'version', model.version)
```
We're going to create a web service to host this model, and this will require some code and configuration files; so let's create a folder for those.
```
import os
folder_name = 'diabetes_service'
# Create a folder for the web service files
experiment_folder = './' + folder_name
os.makedirs(folder_name, exist_ok=True)
print(folder_name, 'folder created.')
```
The web service where we deploy the model will need some Python code to load the input data, get the model from the workspace, and generate and return predictions. We'll save this code in an *entry script* that will be deployed to the web service:
```
%%writefile $folder_name/score_diabetes.py
import json
import joblib
import numpy as np
from azureml.core.model import Model
# Called when the service is loaded
def init():
global model
# Get the path to the deployed model file and load it
model_path = Model.get_model_path('diabetes_model')
model = joblib.load(model_path)
# Called when a request is received
def run(raw_data):
# Get the input data as a numpy array
data = np.array(json.loads(raw_data)['data'])
# Get a prediction from the model
predictions = model.predict(data)
# Get the corresponding classname for each prediction (0 or 1)
classnames = ['not-diabetic', 'diabetic']
predicted_classes = []
for prediction in predictions:
predicted_classes.append(classnames[prediction])
# Return the predictions as JSON
return json.dumps(predicted_classes)
```
The web service will be hosted in a container, and the container will need to install any required Python dependencies when it gets initialized. In this case, our scoring code requires **scikit-learn**, so we'll create a .yml file that tells the container host to install this into the environment.
```
from azureml.core.conda_dependencies import CondaDependencies
# Add the dependencies for our model (AzureML defaults is already included)
myenv = CondaDependencies()
myenv.add_conda_package('scikit-learn')
# Save the environment config as a .yml file
env_file = folder_name + "/diabetes_env.yml"
with open(env_file,"w") as f:
f.write(myenv.serialize_to_string())
print("Saved dependency info in", env_file)
# Print the .yml file
with open(env_file,"r") as f:
print(f.read())
```
Now you're ready to deploy. We'll deploy the container a service named **diabetes-service**. The deployment process includes the following steps:
1. Define an inference configuration, which includes the scoring and environment files required to load and use the model.
2. Define a deployment configuration that defines the execution environment in which the service will be hosted. In this case, an Azure Container Instance.
3. Deploy the model as a web service.
4. Verify the status of the deployed service.
> **More Information**: For more details about model deployment, and options for target execution environments, see the [documentation](https://docs.microsoft.com/azure/machine-learning/how-to-deploy-and-where).
Deployment will take some time as it first runs a process to create a container image, and then runs a process to create a web service based on the image. When deployment has completed successfully, you'll see a status of **Healthy**.
```
from azureml.core.webservice import AciWebservice
from azureml.core.model import InferenceConfig
# Configure the scoring environment
inference_config = InferenceConfig(runtime= "python",
source_directory = folder_name,
entry_script="score_diabetes.py",
conda_file="diabetes_env.yml")
deployment_config = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1)
service_name = "diabetes-service"
service = Model.deploy(ws, service_name, [model], inference_config, deployment_config)
service.wait_for_deployment(True)
print(service.state)
```
Hopefully, the deployment has been successful and you can see a status of **Healthy**. If not, you can use the following code to check the status and get the service logs to help you troubleshoot.
```
print(service.state)
print(service.get_logs())
# If you need to make a change and redeploy, you may need to delete unhealthy service using the following code:
#service.delete()
```
Take a look at your workspace in [Azure ML Studio](https://ml.azure.com) and view the **Endpoints** page, which shows the deployed services in your workspace.
You can also retrieve the names of web services in your workspace by running the following code:
```
for webservice_name in ws.webservices:
print(webservice_name)
```
## Use the Web Service
With the service deployed, now you can consume it from a client application.
```
import json
x_new = [[2,180,74,24,21,23.9091702,1.488172308,22]]
print ('Patient: {}'.format(x_new[0]))
# Convert the array to a serializable list in a JSON document
input_json = json.dumps({"data": x_new})
# Call the web service, passing the input data (the web service will also accept the data in binary format)
predictions = service.run(input_data = input_json)
# Get the predicted class - it'll be the first (and only) one.
predicted_classes = json.loads(predictions)
print(predicted_classes[0])
```
You can also send multiple patient observations to the service, and get back a prediction for each one.
```
import json
# This time our input is an array of two feature arrays
x_new = [[2,180,74,24,21,23.9091702,1.488172308,22],
[0,148,58,11,179,39.19207553,0.160829008,45]]
# Convert the array or arrays to a serializable list in a JSON document
input_json = json.dumps({"data": x_new})
# Call the web service, passing the input data
predictions = service.run(input_data = input_json)
# Get the predicted classes.
predicted_classes = json.loads(predictions)
for i in range(len(x_new)):
print ("Patient {}".format(x_new[i]), predicted_classes[i] )
```
The code above uses the Azure ML SDK to connect to the containerized web service and use it to generate predictions from your diabetes classification model. In production, a model is likely to be consumed by business applications that do not use the Azure ML SDK, but simply make HTTP requests to the web service.
Let's determine the URL to which these applications must submit their requests:
```
endpoint = service.scoring_uri
print(endpoint)
```
Now that you know the endpoint URI, an application can simply make an HTTP request, sending the patient data in JSON (or binary) format, and receive back the predicted class(es).
```
import requests
import json
x_new = [[2,180,74,24,21,23.9091702,1.488172308,22],
[0,148,58,11,179,39.19207553,0.160829008,45]]
# Convert the array to a serializable list in a JSON document
input_json = json.dumps({"data": x_new})
# Set the content type
headers = { 'Content-Type':'application/json' }
predictions = requests.post(endpoint, input_json, headers = headers)
predicted_classes = json.loads(predictions.json())
for i in range(len(x_new)):
print ("Patient {}".format(x_new[i]), predicted_classes[i] )
```
You've deployed your web service as an Azure Container Instance (ACI) service that requires no authentication. This is fine for development and testing, but for production you should consider deploying to an Azure Kubernetes Service (AKS) cluster and enabling authentication. This would require REST requests to include an **Authorization** header.
## Delete the Service
When you no longer need your service, you should delete it to avoid incurring unecessary charges.
```
service.delete()
print ('Service deleted.')
```
For more information about publishing a model as a service, see the [documentation](https://docs.microsoft.com/azure/machine-learning/how-to-deploy-and-where)
| github_jupyter |
# Extensisq methods & Lotka-Volterra problem
The extensisq methods are compared to the explicit runge kutta methods of scipy on the Lotka-Volterra problem (predator prey model). This problem was copied from the solve_ivp page in scipy's reference manual.
## Problem definition
The parameters of this problem are defined as additional arguments `arg` to the derivative function.
```
def lotkavolterra(t, z, a, b, c, d):
x, y = z
return [a*x - b*x*y, -c*y + d*x*y]
problem = {'fun' : lotkavolterra,
'y0' : [10., 5.],
't_span' : [0., 15.],
'args' : (1.5, 1, 3, 1)}
```
## Reference solution
First a reference solution is created by solving this problem with low tolerance.
```
from scipy.integrate import solve_ivp
reference = solve_ivp(**problem, atol=1e-12, rtol=1e-12, method='DOP853', dense_output=True)
```
## Solution plot
This solution has derivatives that change rapidly.
```
%matplotlib notebook
import matplotlib.pyplot as plt
plt.figure()
plt.plot(reference.t, reference.y.T)
plt.title('Lotka-Volterra')
plt.legend(('prey', 'predator'))
plt.show()
```
## Efficiency plot
Let's solve this problem with the explicit runge kutta methods of scipy (`RK45` and `DOP853`) and those of extensisq (`Ts45`, `BS45`, `BS45_i`, `CK45` and `CK45_o`) at a few absolute tolerance values and make a plot to compare their efficiency. The bottom left corner of that plot is the efficiency sweet spot: low error and few fuction evaluations.
```
import numpy as np
from extensisq import *
methods = ['RK45', 'DOP853', Ts45, BS45, BS45_i, CK45, CK45_o]
tolerances = np.logspace(-4, -9, 6)
plt.figure()
for method in methods:
name = method if isinstance(method, str) else method.__name__
e = []
n = []
for tol in tolerances:
sol = solve_ivp(**problem, rtol=1e-13, atol=tol, method=method,
dense_output=True) # only to separate BS45 and BS45_i
err = sol.y - reference.sol(sol.t)
e.append(np.linalg.norm(err))
n.append(sol.nfev)
if name == 'RK45':
style = '--k.'
elif name == 'DOP853':
style = '-k.'
else:
style = '.:'
plt.loglog(e, n, style, label=name)
plt.legend()
plt.xlabel(r'||error||')
plt.ylabel('nr of function evaluations')
plt.title('efficiency')
plt.show()
```
## Discussion
The efficiency graph shows:
* `RK45` has the poorest efficiency of all considered methods.
* `Ts45` is quite similar to `RK45`, but just a bit better.
* `BS45` and `BS45_i` are the most efficient fifth order methods for lower (tighter) tolerances. These two methods have exactly the same accuracy, but `BS45` needs more evaluations for its accurate interpolant. That interpolant is not used in this case. It was only enabeled, by setting `dense_output=True`, to show the difference with respect to `BS45_i`.
* `CK45` and `CK45_o` are the most efficient methods at higher (looser) tolerances. The performance at lower tolerance is similar to `Ts45`.
* `DOP853` is a higher order method (eighth). Typically, it is more efficient at lower tolerance, but for this problem and these tolerances it does not work so well.
These observation may not be valid for other problems.
| github_jupyter |
# Creation of synthetic data for Wisoncsin Breat Cancer data set using a Variational AutoEncoder. Tested using a logistic regression model.
## Aim
To test a a Variational AutoEncoder (VAE) for synthesising data that can be used to train a logistic regression machine learning model.
## Data
Raw data is avilable at:
https://www.kaggle.com/uciml/breast-cancer-wisconsin-data
## Basic methods description
* Create synthetic data by use of a Variational AutoEncoder
Kingma, D.P. and Welling, M. (2013) Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114,2013.
* Train logistic regression model on synthetic data and test against held-back raw data
## Code & results
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
# Turn warnings off for notebook publication
import warnings
warnings.filterwarnings("ignore")
```
### Import Data
```
def load_data():
""""
Load Wisconsin Breast Cancer Data Set
Inputs
------
None
Returns
-------
X: NumPy array of X
y: Numpy array of y
col_names: column names for X
"""
# Load data and drop 'id' column
data = pd.read_csv('./wisconsin.csv')
data.drop('id', axis=1, inplace=True)
# Change 'diagnosis' column to 'malignant', and put in last column place
malignant = pd.DataFrame()
data['malignant'] = data['diagnosis'] == 'M'
data.drop('diagnosis', axis=1, inplace=True)
# Split data in X and y
X = data.drop(['malignant'], axis=1)
y = data['malignant']
# Get col names and convert to NumPy arrays
X_col_names = list(X)
X = X.values
y = y.values
return data, X, y, X_col_names
```
### Data processing
Split X and y into training and test sets
```
def split_into_train_test(X, y, test_proportion=0.25):
""""
Randomly split X and y numpy arrays into training and test data sets
Inputs
------
X and y NumPy arrays
Returns
-------
X_test, X_train, y_test, y_train Numpy arrays
"""
X_train, X_test, y_train, y_test = \
train_test_split(X, y, shuffle=True, test_size=test_proportion)
return X_train, X_test, y_train, y_test
```
Standardise data
```
def standardise_data(X_train, X_test):
""""
Standardise training and tets data sets according to mean and standard
deviation of test set
Inputs
------
X_train, X_test NumPy arrays
Returns
-------
X_train_std, X_test_std
"""
mu = X_train.mean(axis=0)
std = X_train.std(axis=0)
X_train_std = (X_train - mu) / std
X_test_std = (X_test - mu) /std
return X_train_std, X_test_std
```
### Calculate accuracy measures
```
def calculate_diagnostic_performance(actual, predicted):
""" Calculate sensitivty and specificty.
Inputs
------
actual, predted numpy arrays (1 = +ve, 0 = -ve)
Returns
-------
A dictionary of results:
1) accuracy: proportion of test results that are correct
2) sensitivity: proportion of true +ve identified
3) specificity: proportion of true -ve identified
4) positive likelihood: increased probability of true +ve if test +ve
5) negative likelihood: reduced probability of true +ve if test -ve
6) false positive rate: proportion of false +ves in true -ve patients
7) false negative rate: proportion of false -ves in true +ve patients
8) positive predictive value: chance of true +ve if test +ve
9) negative predictive value: chance of true -ve if test -ve
10) actual positive rate: proportion of actual values that are +ve
11) predicted positive rate: proportion of predicted vales that are +ve
12) recall: same as sensitivity
13) precision: the proportion of predicted +ve that are true +ve
14) f1 = 2 * ((precision * recall) / (precision + recall))
*false positive rate is the percentage of healthy individuals who
incorrectly receive a positive test result
* alse neagtive rate is the percentage of diseased individuals who
incorrectly receive a negative test result
"""
# Calculate results
actual_positives = actual == 1
actual_negatives = actual == 0
test_positives = predicted == 1
test_negatives = predicted == 0
test_correct = actual == predicted
accuracy = test_correct.mean()
true_positives = actual_positives & test_positives
false_positives = actual_negatives & test_positives
true_negatives = actual_negatives & test_negatives
sensitivity = true_positives.sum() / actual_positives.sum()
specificity = np.sum(true_negatives) / np.sum(actual_negatives)
positive_likelihood = sensitivity / (1 - specificity)
negative_likelihood = (1 - sensitivity) / specificity
false_postive_rate = 1 - specificity
false_negative_rate = 1 - sensitivity
positive_predictive_value = true_positives.sum() / test_positives.sum()
negative_predicitive_value = true_negatives.sum() / test_negatives.sum()
actual_positive_rate = actual.mean()
predicted_positive_rate = predicted.mean()
recall = sensitivity
precision = \
true_positives.sum() / (true_positives.sum() + false_positives.sum())
f1 = 2 * ((precision * recall) / (precision + recall))
# Add results to dictionary
results = dict()
results['accuracy'] = accuracy
results['sensitivity'] = sensitivity
results['specificity'] = specificity
results['positive_likelihood'] = positive_likelihood
results['negative_likelihood'] = negative_likelihood
results['false_postive_rate'] = false_postive_rate
results['false_postive_rate'] = false_postive_rate
results['false_negative_rate'] = false_negative_rate
results['positive_predictive_value'] = positive_predictive_value
results['negative_predicitive_value'] = negative_predicitive_value
results['actual_positive_rate'] = actual_positive_rate
results['predicted_positive_rate'] = predicted_positive_rate
results['recall'] = recall
results['precision'] = precision
results['f1'] = f1
return results
```
### Logistic Regression Model
```
def fit_and_test_logistic_regression_model(X_train, X_test, y_train, y_test):
""""
Fit and test logistic regression model.
Return a dictionary of accuracy measures.
Calls on `calculate_diagnostic_performance` to calculate results
Inputs
------
X_train, X_test NumPy arrays
Returns
-------
A dictionary of accuracy results.
"""
# Fit logistic regression model
lr = LogisticRegression(C=0.1)
lr.fit(X_train,y_train)
# Predict tets set labels
y_pred = lr.predict(X_test_std)
# Get accuracy results
accuracy_results = calculate_diagnostic_performance(y_test, y_pred)
return accuracy_results
```
### Synthetic Data Method - Variational AutoEncoder
```
def sampling(args):
"""
Reparameterization trick by sampling from an isotropic unit Gaussian.
Instead of sampling from Q(z|X), sample epsilon = N(0,I)
z = z_mean + sqrt(var) * epsilon
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
import tensorflow
from tensorflow.keras import backend as K
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean = 0 and std = 1.0
epsilon = K.random_normal(shape=(batch, dim))
sample = z_mean + K.exp(0.5 * z_log_var) * epsilon
return sample
def make_synthetic_data_vae(X_original, y_original,
batch_size=256,
latent_dim=8,
epochs=10000,
learning_rate=2e-5,
dropout=0.25,
number_of_samples=1000):
"""
Synthetic data generation.
Calls on `get_principal_component_model` for PCA model
If number of components not defined then the function sets it to the number
of features in X
Inputs
------
original_data: X, y numpy arrays
number_of_samples: number of synthetic samples to generate
n_components: number of principal components to use for data synthesis
Returns
-------
X_synthetic: NumPy array
y_synthetic: NumPy array
"""
import tensorflow
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
from tensorflow.keras.losses import mean_squared_error
# Standardise X
mean = X_original.mean(axis=0)
std = X_original.mean(axis=0)
X_std = (X_original - mean) / std
# network parameters
input_shape = X_original.shape[1]
intermediate_dim = X_original.shape[1]
# Split the training data into positive and negative
mask = y_original == 1
X_train_pos = X_std[mask]
mask = y_original == 0
X_train_neg = X_std[mask]
# Set up list for positive and negative synthetic data sets
synthetic_X_sets = []
# Run fir twice: once for positive label examples, the other for negative
for training_set in [X_train_pos, X_train_neg]:
# Clear Tensorflow
K.clear_session()
# VAE model = encoder + decoder
# build encoder model
inputs = layers.Input(shape=input_shape, name='encoder_input')
encode_dense_1 = layers.Dense(
intermediate_dim, activation='relu')(inputs)
dropout_encoder_layer_1 = layers.Dropout(dropout)(encode_dense_1)
encode_dense_2 = layers.Dense(
intermediate_dim, activation='relu')(dropout_encoder_layer_1)
z_mean = layers.Dense(latent_dim, name='z_mean')(encode_dense_2)
z_log_var = layers.Dense(latent_dim, name='z_log_var')(encode_dense_2)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = layers.Lambda(
sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
# instantiate encoder model
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
# build decoder model
latent_inputs = layers.Input(shape=(latent_dim,), name='z_sampling')
decode_dense_1 = layers.Dense(
intermediate_dim, activation='relu')(latent_inputs)
dropout_decoder_layer_1 = layers.Dropout(dropout)(decode_dense_1)
decode_dense_2 = layers.Dense(
intermediate_dim, activation='relu')(dropout_decoder_layer_1)
outputs = layers.Dense(input_shape)(decode_dense_2)
# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
# instantiate VAE model
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs, name='vae_mlp')
# Train the autoencoder
optimizer = Adam(lr=learning_rate)
# VAE loss = mse_loss or xent_loss + kl_loss
vae.compile(optimizer=optimizer, loss = mean_squared_error)
# Train the autoencoder
vae.fit(training_set, training_set,
batch_size = batch_size,
shuffle = True,
epochs = epochs,
verbose=0)
# Produce synthetic data
z_new = np.random.normal(size = (number_of_samples, latent_dim))
reconst = decoder.predict(np.array(z_new))
reconst = mean + (reconst * std)
synthetic_X_sets.append(reconst)
# Clear models
K.clear_session()
del encoder
del decoder
del vae
# Combine data
# Combine positive and negative and shuffle rows
X_synthetic = np.concatenate(
(synthetic_X_sets[0], synthetic_X_sets[1]), axis=0)
y_synthetic_pos = np.ones((number_of_samples, 1))
y_synthetic_neg = np.zeros((number_of_samples, 1))
y_synthetic = np.concatenate((y_synthetic_pos, y_synthetic_neg), axis=0)
# Randomise order of X, y
synthetic = np.concatenate((X_synthetic, y_synthetic), axis=1)
shuffle_index = np.random.permutation(np.arange(X_synthetic.shape[0]))
synthetic = synthetic[shuffle_index]
X_synthetic = synthetic[:,0:-1]
y_synthetic = synthetic[:,-1]
return X_synthetic, y_synthetic
```
### Main code
```
# Load data
original_data, X, y, X_col_names = load_data()
# Set up results DataFrame
results = pd.DataFrame()
```
Fitting classification model to raw data
```
# Set number of replicate runs
number_of_runs = 30
# Set up lists for results
accuracy_measure_names = []
accuracy_measure_data = []
for run in range(number_of_runs):
# Print progress
print (run + 1, end=' ')
# Split training and test set
X_train, X_test, y_train, y_test = split_into_train_test(X, y)
# Standardise data
X_train_std, X_test_std = standardise_data(X_train, X_test)
# Get accuracy of fitted model
accuracy = fit_and_test_logistic_regression_model(
X_train_std, X_test_std, y_train, y_test)
# Get accuracy measure names if not previously done
if len(accuracy_measure_names) == 0:
for key, value in accuracy.items():
accuracy_measure_names.append(key)
# Get accuracy values
run_accuracy_results = []
for key, value in accuracy.items():
run_accuracy_results.append(value)
# Add results to results list
accuracy_measure_data.append(run_accuracy_results)
# Strore mean and sem in results DataFrame
accuracy_array = np.array(accuracy_measure_data)
results['raw_mean'] = accuracy_array.mean(axis=0)
results['raw_sem'] = accuracy_array.std(axis=0)/np.sqrt(number_of_runs)
results.index = accuracy_measure_names
```
Fitting classification model to synthetic data
```
# Set number of replicate runs
number_of_runs = 30
for run in range(number_of_runs):
# Print progress
print (run + 1, end=' ')
X_synthetic, y_synthetic = \
make_synthetic_data_vae(X, y)
# Split training and test set
X_train, X_test, y_train, y_test = split_into_train_test(X, y)
# Standardise data (using synthetic data)
X_train_std, X_test_std = standardise_data(X_synthetic, X_test)
# Get accuracy of fitted model
accuracy = fit_and_test_logistic_regression_model(
X_train_std, X_test_std, y_synthetic, y_test)
# Get accuracy measure names if not previously done
if len(accuracy_measure_names) == 0:
for key, value in accuracy.items():
accuracy_measure_names.append(key)
# Get accuracy values
run_accuracy_results = []
for key, value in accuracy.items():
run_accuracy_results.append(value)
# Add results to results list
accuracy_measure_data.append(run_accuracy_results)
# Strore mean and sem in results DataFrame
accuracy_array = np.array(accuracy_measure_data)
results['vae_mean'] = accuracy_array.mean(axis=0)
results['vae_sem'] = accuracy_array.std(axis=0)/np.sqrt(number_of_runs)
```
Save last synthetic data set
```
# Create a data frame with id
synth_df = pd.DataFrame()
synth_df['id'] = np.arange(y_synthetic.shape[0])
# Transfer X values to DataFrame
synth_df=pd.concat([synth_df,
pd.DataFrame(X_synthetic, columns=X_col_names)],
axis=1)
# Add a 'M' or 'B' diagnosis
y_list = list(y_synthetic)
diagnosis = ['M' if y==1 else 'B' for y in y_list]
synth_df['diagnosis'] = diagnosis
# Shuffle data
synth_df = synth_df.sample(frac=1.0)
# Save data
synth_df.to_csv('./Output/synthetic_data_vae.csv', index=False)
```
### Show results
```
results
```
## Compare raw and synthetic data means and standard deviations
```
# Process synthetic data
synth_df.drop('id', axis=1, inplace=True)
malignant = pd.DataFrame()
synth_df['malignant'] = synth_df['diagnosis'] == 'M'
synth_df.drop('diagnosis', axis=1, inplace=True)
descriptive_stats = pd.DataFrame()
descriptive_stats['Original M mean'] = \
original_data[original_data['malignant']==True].mean()
descriptive_stats['Synthetic M mean'] = \
synth_df[synth_df['malignant']==True].mean()
descriptive_stats['Original B mean'] = \
original_data[original_data['malignant']==False].mean()
descriptive_stats['Synthetic B mean'] = \
synth_df[synth_df['malignant']==False].mean()
descriptive_stats['Original M std'] = \
original_data[original_data['malignant']==True].std()
descriptive_stats['Synthetic M std'] = \
synth_df[synth_df['malignant']==True].std()
descriptive_stats['Original B std'] = \
original_data[original_data['malignant']==False].std()
descriptive_stats['Synthetic B std'] = \
synth_df[synth_df['malignant']==False].std()
descriptive_stats
```
| github_jupyter |
**MNIST Handwritten Digit Classification Dataset**
```
# example of loading the mnist dataset
from tensorflow.keras.datasets import mnist
from matplotlib import pyplot as plt
# load dataset
(trainX, trainy), (testX, testy) = mnist.load_data()
# summarize loaded dataset
print('Train: X=%s, y=%s' % (trainX.shape, trainy.shape))
print('Test: X=%s, y=%s' % (testX.shape, testy.shape))
# plot first few images
for i in range(9):
# define subplot
plt.subplot(330 + 1 + i)
# plot raw pixel data
plt.imshow(trainX[i], cmap=plt.get_cmap('gray'))
# show the figure
plt.show()
```
from the above output we can see that MINST has 60000 training dataset (hand written numbers / images) & 10000 testing dataset (hand written numbers / images ) for evaluation . the images are in square size of 28*28 pixels
The task is to classify a given image of a handwritten digit into one of 10 classes representing integer values from 0 to 9
```
# baseline cnn model for mnist
from numpy import mean
from numpy import std
from matplotlib import pyplot as plt
from sklearn.model_selection import KFold
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.optimizers import SGD
```
**Model Evaluation Methodology**
**How to Develop a Baseline Model**
there are 5 steps in creating a Baseline model . they are as follows They are the loading of the dataset, the preparation of the dataset, the definition of the model, the evaluation of the model, and the presentation of results.
**Load Dataset**
we are going to load the images and reshape the data arrays to have a single color channel.
```
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainY), (testX, testY) = mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
return trainX, trainY, testX, testY
```
there are 10 classes and that classes are represented as unique integers.
We can, therefore, use a one hot encoding for the class element of each sample, transforming the integer into a 10 element binary vector with a 1 for the index of the class value, and 0 values for all other classes.so i have done 1 hot encoding in the above code
**Prepare Pixel Data**
We know that the pixel values for each image in the dataset are unsigned integers in the range between black and white, or 0 and 255
```
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
```
**Define Model**
The model has two main aspects: the feature extraction front end comprised of convolutional and pooling layers, and the classifier backend that will make a prediction.
For the convolutional front-end, we can start with a single convolutional layer with a small filter size (3,3) and a modest number of filters (32) followed by a max pooling layer. The filter maps can then be flattened to provide features to the classifier.
```
# define cnn model
def define_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(learning_rate=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
```
**Evaluate Model**
After the model is defined, we need to evaluate it
```
# evaluate a model using k-fold cross-validation
def evaluate_model(dataX, dataY, n_folds=5):
scores, histories = list(), list()
# prepare cross validation
kfold = KFold(n_folds, shuffle=True, random_state=1)
# enumerate splits
for train_ix, test_ix in kfold.split(dataX):
# define model
model = define_model()
# select rows for train and test
trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]
# fit model
history = model.fit(trainX, trainY, epochs=10, batch_size=32, validation_data=(testX, testY), verbose=0)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
# stores scores
scores.append(acc)
histories.append(history)
return scores, histories
```
**Present Results**
There are two key aspects to present: the diagnostics of the learning behavior of the model during training and the estimation of the model performance. These can be implemented using separate functions.
```
# plot diagnostic learning curves
def summarize_diagnostics(histories):
for i in range(len(histories)):
# plot loss
plt.subplot(2, 1, 1)
plt.title('Cross Entropy Loss')
plt.plot(histories[i].history['loss'], color='blue', label='train')
plt.plot(histories[i].history['val_loss'], color='orange', label='test')
# plot accuracy
plt.subplot(2, 1, 2)
plt.title('Classification Accuracy')
plt.plot(histories[i].history['accuracy'], color='blue', label='train')
plt.plot(histories[i].history['val_accuracy'], color='orange', label='test')
plt.show()
# summarize model performance
def summarize_performance(scores):
# print summary
print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))
# box and whisker plots of results
plt.boxplot(scores)
plt.show()
```
**Complete Example**
We need a function that will drive the test harness.
```
# run the test harness for evaluating a model
def run_test_harness():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# prepare pixel data
trainX, testX = prep_pixels(trainX, testX)
# evaluate model
scores, histories = evaluate_model(trainX, trainY)
# learning curves
summarize_diagnostics(histories)
# summarize estimated performance
summarize_performance(scores)
# entry point, run the test harness
run_test_harness()
```
**How to Develop an Improved Model**
ook at areas of model configuration that often result in an improvement
The first is a change to the learning algorithm, and the second is an increase in the depth of the model
**Improvement to Learning**
approach that can rapidly accelerate the learning of a model and can result in large performance improvements is batch normalization.
We will evaluate the effect that batch normalization has on our baseline model.
So i am going to use the same set of code that i have used earlier for load & train & test the dataset
scaling the pixel
defining the CNN model
evaluate using K -FOLD CROSS VALIDATION
Plotting & summarising
in tehbaove steps the code for the 3rd step (Defining CNN model ) has be changed just to include the code required for Batch normalisation
the full code to improve the modle has been presneted to you below
```
# cnn model with batch normalization for mnist
from numpy import mean
from numpy import std
from matplotlib import pyplot as plt
from sklearn.model_selection import KFold
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.layers import BatchNormalization
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainY), (testX, testY) = mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
return trainX, trainY, testX, testY
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
# define cnn model
def define_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(learning_rate=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# evaluate a model using k-fold cross-validation
def evaluate_model(dataX, dataY, n_folds=5):
scores, histories = list(), list()
# prepare cross validation
kfold = KFold(n_folds, shuffle=True, random_state=1)
# enumerate splits
for train_ix, test_ix in kfold.split(dataX):
# define model
model = define_model()
# select rows for train and test
trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]
# fit model
history = model.fit(trainX, trainY, epochs=10, batch_size=32, validation_data=(testX, testY), verbose=0)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
# stores scores
scores.append(acc)
histories.append(history)
return scores, histories
# plot diagnostic learning curves
def summarize_diagnostics(histories):
for i in range(len(histories)):
# plot loss
plt.subplot(2, 1, 1)
plt.title('Cross Entropy Loss')
plt.plot(histories[i].history['loss'], color='blue', label='train')
plt.plot(histories[i].history['val_loss'], color='orange', label='test')
# plot accuracy
plt.subplot(2, 1, 2)
plt.title('Classification Accuracy')
plt.plot(histories[i].history['accuracy'], color='blue', label='train')
plt.plot(histories[i].history['val_accuracy'], color='orange', label='test')
plt.show()
# summarize model performance
def summarize_performance(scores):
# print summary
print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))
# box and whisker plots of results
plt.boxplot(scores)
plt.show()
# run the test harness for evaluating a model
def run_test_harness():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# prepare pixel data
trainX, testX = prep_pixels(trainX, testX)
# evaluate model
scores, histories = evaluate_model(trainX, trainY)
# learning curves
summarize_diagnostics(histories)
# summarize estimated performance
summarize_performance(scores)
# entry point, run the test harness
run_test_harness()
```
From the above we can see that intially we had the accuracy as 98.65 and now after batch normalisation (improving the model) the accuracy obtained is 98.757
futher i am going to try to improve the model by increasing the model depth
**Increase in Model Depth**
There are many ways to change the model configuration in order to explore improvements over the baseline model.
We can increase the depth of the feature extractor part of the model, following a VGG-like pattern of adding more convolutional and pooling layers with the same sized filter, while increasing the number of filters. In this case, we will add a double convolutional layer with 64 filters each, followed by another max pooling laye
```
# deeper cnn model for mnist
from numpy import mean
from numpy import std
from matplotlib import pyplot as plt
from sklearn.model_selection import KFold
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.optimizers import SGD
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainY), (testX, testY) = mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
return trainX, trainY, testX, testY
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
# define cnn model
def define_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(learning_rate=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# evaluate a model using k-fold cross-validation
def evaluate_model(dataX, dataY, n_folds=5):
scores, histories = list(), list()
# prepare cross validation
kfold = KFold(n_folds, shuffle=True, random_state=1)
# enumerate splits
for train_ix, test_ix in kfold.split(dataX):
# define model
model = define_model()
# select rows for train and test
trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]
# fit model
history = model.fit(trainX, trainY, epochs=10, batch_size=32, validation_data=(testX, testY), verbose=0)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
# stores scores
scores.append(acc)
histories.append(history)
return scores, histories
# plot diagnostic learning curves
def summarize_diagnostics(histories):
for i in range(len(histories)):
# plot loss
plt.subplot(2, 1, 1)
plt.title('Cross Entropy Loss')
plt.plot(histories[i].history['loss'], color='blue', label='train')
plt.plot(histories[i].history['val_loss'], color='orange', label='test')
# plot accuracy
plt.subplot(2, 1, 2)
plt.title('Classification Accuracy')
plt.plot(histories[i].history['accuracy'], color='blue', label='train')
plt.plot(histories[i].history['val_accuracy'], color='orange', label='test')
plt.show()
# summarize model performance
def summarize_performance(scores):
# print summary
print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))
# box and whisker plots of results
plt.boxplot(scores)
plt.show()
# run the test harness for evaluating a model
def run_test_harness():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# prepare pixel data
trainX, testX = prep_pixels(trainX, testX)
# evaluate model
scores, histories = evaluate_model(trainX, trainY)
# learning curves
summarize_diagnostics(histories)
# summarize estimated performance
summarize_performance(scores)
# entry point, run the test harness
run_test_harness()
```
**How to Finalize the Model and Make Predictions**
The process of model improvement may continue for as long as we have ideas and the time and resources to test them out.
First, we will finalize our model, but fitting a model on the entire training dataset and saving the model to file for later use. We will then load the model and evaluate its performance on the hold out test dataset to get an idea of how well the chosen model actually performs in practice. Finally, we will use the saved model to make a prediction on a single image.
**Save Final Model**
A final model is typically fit on all available data, such as the combination of all train and test dataset.
```
# save the final model to file
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.optimizers import SGD
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainY), (testX, testY) = mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
return trainX, trainY, testX, testY
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
# define cnn model
def define_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(learning_rate=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# run the test harness for evaluating a model
def run_test_harness():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# prepare pixel data
trainX, testX = prep_pixels(trainX, testX)
# define model
model = define_model()
# fit model
model.fit(trainX, trainY, epochs=10, batch_size=32, verbose=0)
# save model
model.save('final_model.h5')
# entry point, run the test harness
run_test_harness()
```
**Evaluate Final Model**
We can now load the final model and evaluate it on the hold out test dataset.
This is something we might do if we were interested in presenting the performance of the chosen model to project stakeholders.
```
# evaluate the deep model on the test dataset
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import to_categorical
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainY), (testX, testY) = mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
return trainX, trainY, testX, testY
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
# run the test harness for evaluating a model
def run_test_harness():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# prepare pixel data
trainX, testX = prep_pixels(trainX, testX)
# load model
model = load_model('final_model.h5')
# evaluate model on test dataset
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
# entry point, run the test harness
run_test_harness()
```
Make Prediction
We can use our saved model to make a prediction on new images.
The model assumes that new images are grayscale, that they have been aligned so that one image contains one centered handwritten digit, and that the size of the image is square with the size 28×28 pixels.
```
from google.colab import files
uploaded = files.upload()
```
i have uploaded the sample_image2 we apply the saved model on that image & we will check whether it classifies the image & predicts it correctly
```
# make a prediction for a new image.
from numpy import argmax
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.models import load_model
# load and prepare the image
def load_image(filename):
# load the image
img = load_img(filename, grayscale=True, target_size=(28, 28))
# convert to array
img = img_to_array(img)
# reshape into a single sample with 1 channel
img = img.reshape(1, 28, 28, 1)
# prepare pixel data
img = img.astype('float32')
img = img / 255.0
return img
# load an image and predict the class
def run_example():
# load the image
img = load_image('sample_image2.png')
# load model
model = load_model('final_model.h5')
# predict the class
predict_value = model.predict(img)
digit = argmax(predict_value)
print(digit)
# entry point, run the example
run_example()
```
so the sample image 2 that i have uploaded has been predicted properly
```
from google.colab import files
uploaded = files.upload()
```
i have uploaded the sample_image1 which we apply the saved model on that image & we will check whether it classifies the image & predicts it correctly
```
# make a prediction for a new image.
from numpy import argmax
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.models import load_model
# load and prepare the image
def load_image(filename):
# load the image
img = load_img(filename, grayscale=True, target_size=(28, 28))
# convert to array
img = img_to_array(img)
# reshape into a single sample with 1 channel
img = img.reshape(1, 28, 28, 1)
# prepare pixel data
img = img.astype('float32')
img = img / 255.0
return img
# load an image and predict the class
def run_example():
# load the image
img = load_image('sample_image1.png')
# load model
model = load_model('final_model.h5')
# predict the class
predict_value = model.predict(img)
digit = argmax(predict_value)
print(digit)
# entry point, run the example
run_example()
```
so the sample image 1 that i have uploaded has been predicted properly
```
from google.colab import files
uploaded = files.upload()
```
```
# make a prediction for a new image.
from numpy import argmax
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.models import load_model
# load and prepare the image
def load_image(filename):
# load the image
img = load_img(filename, grayscale=True, target_size=(28, 28))
# convert to array
img = img_to_array(img)
# reshape into a single sample with 1 channel
img = img.reshape(1, 28, 28, 1)
# prepare pixel data
img = img.astype('float32')
img = img / 255.0
return img
# load an image and predict the class
def run_example():
# load the image
img = load_image('sample_image3.png')
# load model
model = load_model('final_model.h5')
# predict the class
predict_value = model.predict(img)
digit = argmax(predict_value)
print(digit)
# entry point, run the example
run_example()
from google.colab import files
uploaded = files.upload()
```
```
# make a prediction for a new image.
from numpy import argmax
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.models import load_model
# load and prepare the image
def load_image(filename):
# load the image
img = load_img(filename, grayscale=True, target_size=(28, 28))
# convert to array
img = img_to_array(img)
# reshape into a single sample with 1 channel
img = img.reshape(1, 28, 28, 1)
# prepare pixel data
img = img.astype('float32')
img = img / 255.0
return img
# load an image and predict the class
def run_example():
# load the image
img = load_image('sample_image4.png')
# load model
model = load_model('final_model.h5')
# predict the class
predict_value = model.predict(img)
digit = argmax(predict_value)
print(digit)
# entry point, run the example
run_example()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/bhuiyanmobasshir94/Cow-weight-and-Breed-Prediction/blob/main/notebooks/031_dec.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
import pandas as pd
import sys
import os
import PIL
import PIL.Image
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
import tensorflow_datasets as tfds
import pathlib
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
## Globals
YT_IMAGE_TO_TAKE = 4
images_dataset_url = "https://cv-datasets-2021.s3.amazonaws.com/images.tar.gz"
images_data_dir = tf.keras.utils.get_file(origin=images_dataset_url,
fname='images',
untar=True)
images_data_dir = pathlib.Path(images_data_dir)
yt_images_dataset_url = "https://cv-datasets-2021.s3.amazonaws.com/yt_images.tar.gz"
yt_images_data_dir = tf.keras.utils.get_file(origin=yt_images_dataset_url,
fname='yt_images',
untar=True)
yt_images_data_dir = pathlib.Path(yt_images_data_dir)
if sys.platform == 'darwin':
os.system(f"dot_clean {images_data_dir}")
os.system(f"dot_clean {yt_images_data_dir}")
elif sys.platform.startswith("lin"):
os.system(f"cd {images_data_dir} && find . -type f -name '._*' -delete")
os.system(f"cd {yt_images_data_dir} && find . -type f -name '._*' -delete")
image_count = len(list(images_data_dir.glob('*/*.jpg')))
print(image_count)
yt_image_count = len(list(yt_images_data_dir.glob('*/*.jpg')))
print(yt_image_count)
df = pd.read_csv("https://cv-datasets-2021.s3.amazonaws.com/dataset.csv")
df.shape
df.columns
df.head(2)
images = list(images_data_dir.glob('*/*.jpg'))
yt_images = list(yt_images_data_dir.glob('*/*.jpg'))
min_height = 0
max_height = 0
min_width = 0
max_width = 0
for i, image in enumerate(images):
w, h = PIL.Image.open(str(image)).size
if i == 0:
min_height = h
max_height = h
min_width = w
max_width = w
if h <= min_height:
min_height = h
if h >= max_height:
max_height = h
if w <= min_width:
min_width = w
if w >= max_width:
max_width = w
print(f"min_height: {min_height}")
print(f"min_width: {min_width}")
print(f"max_height: {max_height}")
print(f"max_width: {max_width}")
min_height = 0
max_height = 0
min_width = 0
max_width = 0
for i, image in enumerate(yt_images):
w, h = PIL.Image.open(str(image)).size
if i == 0:
min_height = h
max_height = h
min_width = w
max_width = w
if h <= min_height:
min_height = h
if h >= max_height:
max_height = h
if w <= min_width:
min_width = w
if w >= max_width:
max_width = w
print(f"min_height: {min_height}")
print(f"min_width: {min_width}")
print(f"max_height: {max_height}")
print(f"max_width: {max_width}")
f_df = pd.DataFrame(columns = ['file_path', 'teeth', 'age_in_year', 'breed', 'height_in_inch', 'weight_in_kg'])
for index, row in df.iterrows():
images = list(images_data_dir.glob(f"{row['sku']}/*.jpg"))
yt_images = list(yt_images_data_dir.glob(f"{row['sku']}/*.jpg"))
for image in images:
f_df = f_df.append({'file_path' : image, 'teeth' : row['teeth'], 'age_in_year' : row['age_in_year'], 'breed': row['breed'], 'height_in_inch': row['height_in_inch'], 'weight_in_kg': row['weight_in_kg']},
ignore_index = True)
for idx, image in enumerate(yt_images):
if idx == (YT_IMAGE_TO_TAKE - 1):
break
f_df = f_df.append({'file_path' : image, 'teeth' : row['teeth'], 'age_in_year' : row['age_in_year'], 'breed': row['breed'], 'height_in_inch': row['height_in_inch'], 'weight_in_kg': row['weight_in_kg']},
ignore_index = True)
f_df.shape
f_df.head(1)
def label_encode(df):
teeth_le = preprocessing.LabelEncoder()
df['teeth']= teeth_le.fit_transform(df['teeth'])
breed_le = preprocessing.LabelEncoder()
df['breed']= breed_le.fit_transform(df['breed'])
age_in_year_le = preprocessing.LabelEncoder()
df['age_in_year']= age_in_year_le.fit_transform(df['age_in_year'])
print(teeth_le.classes_)
print(breed_le.classes_)
print(age_in_year_le.classes_)
return df
def inverse_transform(le, series=[]):
return le.inverse_transform(series)
f_df = label_encode(f_df)
# train_df, valid_test_df = train_test_split(f_df, test_size=0.3)
# validation_df, test_df = train_test_split(valid_test_df, test_size=0.3)
# print(f"train_df: {train_df.shape}")
# print(f"validation_df: {validation_df.shape}")
# print(f"test_df: {test_df.shape}")
train_df, test_df = train_test_split(f_df, test_size=0.1)
print(f"train_df: {train_df.shape}")
print(f"test_df: {test_df.shape}")
# min_height: 450
# min_width: 800
# input: [image, teeth]
# outpur: [age_in_year, breed, height_in_inch, weight_in_kg]
# class CustomDataGen(tf.keras.utils.Sequence):
# def __init__(self, df, X_col, y_col,
# batch_size,
# input_size=(450, 800, 3), # (input_height, input_width, input_channel)
# shuffle=True):
# self.df = df.copy()
# self.X_col = X_col
# self.y_col = y_col
# self.batch_size = batch_size
# self.input_size = input_size
# self.shuffle = shuffle
# self.n = len(self.df)
# # self.n_teeth = df[X_col['teeth']].max()
# # self.n_breed = df[y_col['breed']].nunique()
# def on_epoch_end(self):
# if self.shuffle:
# self.df = self.df.sample(frac=1).reset_index(drop=True)
# def __get_input(self, path, target_size):
# image = tf.keras.preprocessing.image.load_img(path)
# image_arr = tf.keras.preprocessing.image.img_to_array(image)
# # image_arr = image_arr[ymin:ymin+h, xmin:xmin+w]
# image_arr = tf.image.resize(image_arr,(target_size[0], target_size[1])).numpy()
# return image_arr/255.
# def __get_output(self, label, num_classes):
# return tf.keras.utils.to_categorical(label, num_classes=num_classes)
# def __get_data(self, batches):
# # Generates data containing batch_size samples
# path_batch = batches[self.X_col['file_path']]
# # teeth_batch = batches[self.X_col['teeth']]
# # breed_batch = batches[self.y_col['breed']]
# weight_in_kg_batch = batches[self.y_col['weight_in_kg']]
# height_in_inch_batch = batches[self.y_col['height_in_inch']]
# age_in_year_batch = batches[self.y_col['age_in_year']]
# X0 = np.asarray([self.__get_input(x, self.input_size) for x in path_batch])
# # y0_batch = np.asarray([self.__get_output(y, self.n_teeth) for y in teeth_batch])
# # y1_batch = np.asarray([self.__get_output(y, self.n_breed) for y in breed_batch])
# y0 = np.asarray([tf.cast(y, tf.float32) for y in weight_in_kg_batch])
# y1 = np.asarray([tf.cast(y, tf.float32) for y in height_in_inch_batch])
# y2 = np.asarray([tf.cast(y, tf.float32) for y in age_in_year_batch])
# return X0, tuple([y0, y1, y2])
# def __getitem__(self, index):
# batches = self.df[index * self.batch_size:(index + 1) * self.batch_size]
# X, y = self.__get_data(batches)
# return X, y
# def __len__(self):
# return self.n // self.batch_size
# traingen = CustomDataGen(train_df,
# X_col={'file_path':'file_path', 'teeth': 'teeth'},
# y_col={'breed': 'breed', 'weight_in_kg': 'weight_in_kg', 'height_in_inch': 'height_in_inch', 'age_in_year': 'age_in_year'},
# batch_size=128, input_size=(450, 800, 3))
# testgen = CustomDataGen(test_df,
# X_col={'file_path':'file_path', 'teeth': 'teeth'},
# y_col={'breed': 'breed', 'weight_in_kg': 'weight_in_kg', 'height_in_inch': 'height_in_inch', 'age_in_year': 'age_in_year'},
# batch_size=128, input_size=(450, 800, 3))
# validgen = CustomDataGen(validation_df,
# X_col={'file_path':'file_path', 'teeth': 'teeth'},
# y_col={'breed': 'breed', 'weight_in_kg': 'weight_in_kg', 'height_in_inch': 'height_in_inch', 'age_in_year': 'age_in_year'},
# batch_size=128, input_size=(450, 800, 3))
def __get_input(path, target_size):
image = tf.keras.preprocessing.image.load_img(path)
image_arr = tf.keras.preprocessing.image.img_to_array(image)
image_arr = tf.image.resize(image_arr,(target_size[0], target_size[1])).numpy()
return image_arr/255.
def data_loader(df, image_size=(450, 800, 3)):
y0 = tf.cast(df.weight_in_kg, tf.float32)
print(y0.shape)
y1 = tf.cast(df.height_in_inch, tf.float32)
print(y1.shape)
# y2 = tf.cast(df.age_in_year, tf.float32)
y2 = keras.utils.to_categorical(df.age_in_year)
print(y2.shape)
y3 = keras.utils.to_categorical(df.breed)
print(y3.shape)
path_batch = df.file_path
X0 = tf.cast([__get_input(x, image_size) for x in path_batch], tf.float32)
print(X0.shape)
X1 = keras.utils.to_categorical(df.teeth)
print(X1.shape)
return (X0, X1), (y0, y1, y2, y3)
(X0, X1), (y0, y1, y2, y3) = data_loader(train_df, (150, 150, 3))
# input = keras.Input(shape=(128, 128, 3), name="original_img")
# x = layers.Conv2D(64, 3, activation="relu")(input)
# x = layers.Conv2D(128, 3, activation="relu")(x)
# x = layers.MaxPooling2D(3)(x)
# x = layers.Conv2D(128, 3, activation="relu")(x)
# x = layers.Conv2D(64, 3, activation="relu")(x)
# x = layers.GlobalMaxPooling2D()(x)
input0 = keras.Input(shape=(150, 150, 3), name="img")
x = layers.Conv2D(32, 3, activation="relu")(input0)
x = layers.MaxPooling2D(2)(x)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Conv2D(64, 3, activation="relu")(x)
x = layers.GlobalMaxPooling2D()(x)
# input1 = keras.Input(shape=(3,), name="teeth")
out_a = keras.layers.Dense(1, activation='linear', name='wt_rg')(x)
out_b = keras.layers.Dense(1, activation='linear', name='ht_rg')(x)
# out_c = keras.layers.Dense(1, activation='linear', name='ag_rg')(x)
out_c = keras.layers.Dense(3, activation='softmax', name='ag_3cls')(x)
out_d = keras.layers.Dense(8, activation='softmax', name='brd_8cls')(x)
encoder = keras.Model( inputs = input0 , outputs = [out_a, out_b, out_c, out_d], name="encoder")
encoder.compile(
loss = {
"wt_rg": tf.keras.losses.MeanSquaredError(),
"ht_rg": tf.keras.losses.MeanSquaredError(),
# "ag_rg": tf.keras.losses.MeanSquaredError()
"ag_3cls": tf.keras.losses.CategoricalCrossentropy(),
"brd_8cls": tf.keras.losses.CategoricalCrossentropy()
},
metrics = {
"wt_rg": 'mse',
"ht_rg": 'mse',
"ag_3cls": 'accuracy',
"brd_8cls": 'accuracy'
},
optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)
)
encoder.fit(X0, [y0, y1, y2, y3], epochs=30, verbose=2, batch_size=32, validation_split=0.2)
# encoder.output
keras.utils.plot_model(encoder, "encoder.png", show_shapes=True)
(tX0, tX1), (ty0, ty1, ty2, ty3) = data_loader(test_df, (150, 150, 3))
test_scores = encoder.evaluate(tX0, [ty0, ty1, ty2, ty3], verbose=2)
print("Test loss:", test_scores[0])
print("Test accuracy:", test_scores[1])
p1, p2, p3 = encoder.predict([tf.expand_dims(tX0[0], 0), tf.expand_dims(tX1[0], 0)])
# print(p0);ty0[0]
print(p1);ty1[0]
print(p2.argmax());ty2[0].argmax()
print(p3.argmax());ty3[0].argmax()
Cattle are commonly raised as livestock for meat (beef or veal, see beef cattle), for milk (see dairy cattle), and for hides, which are used to make leather. They are used as riding animals and draft animals (oxen or bullocks, which pull carts, plows and other implements). Another product of cattle is their dung, which can be used to create manure or fuel.
```
| github_jupyter |
# Python for Geosciences
Nikolay Koldunov
koldunovn@gmail.com
This is part of [**Python for Geosciences**](https://github.com/koldunovn/python_for_geosciences) notes.
# Why python?
## - It's easy to learn, easy to read and fast to develop
It is considered to be the language of choice for beginners, and proper code formatting is in the design of the language. This is especially useful when you remember, that we are the scientist not programmers. What we need is to have a language that can be learned quickly, but at the same time is powerful enough to satisfy our needs.
## - It's free and opensource.
You will be able to use your scripts even if your institute does not have enough money to buy expensive software (MATLAB or IDL). You can make changes in the code, or at least have the possibility to look at the source code if you suspect that there is a bug.
## - It's multiplatform
You can find it on many systems, so you are not tied to Windows, Mac or Linux. It sounds great, but is not always the case: some modules will work only on limited number of operating systems (e.g. PyNGL, pyFerret).
## - It's general purpose language
You can use it not only for data processing and visualization, but also for system administration, web development, database programming and so on. It is relatively easy to make your code run in the parallel mode. Last but not the least - if you ever decide to leave academia your chances on the market are much better with some python skills.
# Downsides:
## - There is a smaller legacy code base
FORTRAN and Matlab are used for decades, and have a lot of libraries for all kinds of scientific needs. Although now main functionality is covered by python modules, there are still many specific areas where no python solution is available. This problem can be partly solved by python's integration with other languages ([MLabWrap](http://mlabwrap.sourceforge.net/), [F2py](http://www.f2py.com/)).
## - It's slow
... if you don't use vectorization or [Cython](http://www.cython.org/) or [numba](https://numba.pydata.org/) when loops are inevitable, or [dask](https://dask.pydata.org/en/latest/) when you have to work with parallel code. Critical parts still can be written in FORTRAN or C.
More reading on this topic:
* [10 Reasons Python Rocks for Research (And a Few Reasons it Doesn’t)](http://www.stat.washington.edu/~hoytak/blog/whypython.html)
* [I used Matlab. Now I use Python](http://stevetjoa.com/305/)
* [Eight Advantages of Python Over Matlab](http://phillipmfeldman.org/Python/Advantages_of_Python_Over_Matlab.html)
# Python in Earth Sciences
Lin, J. W.-B. (2012). [**Why Python Is the Next Wave in Earth Sciences Computing**](http://journals.ametsoc.org/doi/full/10.1175/BAMS-D-12-00148.1). *Bulletin of the American Meteorological Society*, 93(12), 1823–1824. doi:10.1175/BAMS-D-12-00148.1
- Though it has been around for two decades, it exploded into use in the atmospheric sciences just a few years ago after the development community converged upon the standard scientific packages (e.g., array handling) needed for atmospheric sciences work.
- Much more robust and flexible workflow. Everything from data download to data analysys, visualization and finally writing a paper can be done in one environment.
- Ability to access innovations from industries outside of the Earth sciences (cloud computing, big data, mobile computing).
- Institutional support includes groups at Lawrence Livermore National Laboratory’s Program for Climate Model Diagnosis and Intercomparison, NCAR’s Computer Information Systems Laboratory, and the British Atmospheric Data Centre.
## Further reading:
* [Lectures on Scientific Computing with Python.](https://github.com/jrjohansson/scientific-python-lectures#online-read-only-versions)
* [Python Scientific Lecture Notes](http://scipy-lectures.github.io/)
* [NumPy for Matlab Users](http://wiki.scipy.org/NumPy_for_Matlab_Users)
* [Python data tools just keep getting better](http://strata.oreilly.com/2013/03/python-data-tools-just-keep-getting-better.html)
* [Third Symposium on Advances in Modeling and Analysis Using Python](http://annual.ametsoc.org/2013/index.cfm/programs-and-events/conferences-and-symposia/third-symposium-on-advances-in-modeling-and-analysis-using-python/)
| github_jupyter |
References:
- http://www.diva-portal.org/smash/get/diva2:1382324/FULLTEXT01.pdf
- https://stackabuse.com/hierarchical-clustering-with-python-and-scikit-learn/
```
import numpy as np
import pandas as pd
from sklearn.cluster import AgglomerativeClustering
from sklearn import metrics
from sklearn.model_selection import train_test_split
from numpy import sqrt, array, random, argsort
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage, centroid, fcluster
import scipy.cluster.hierarchy as shc
from scipy.spatial.distance import cdist, pdist
from sklearn.neighbors import NearestCentroid
#from google.colab import drive
#drive.mount('/content/gdrive')
df = pd.read_csv("https://raw.githubusercontent.com/AIML-Makgeolli/CpE-AIDL/main/thesis_database/Crop_recommendation.csv")
df_train = df.drop(['label','rainfall'], axis = 1)
```
Declarations
```
X_N= df_train[['N']]
X_P= df_train[['P']]
X_K= df_train[['K']]
X_temp= df_train[['temperature']]
X_moist= df_train[['humidity']]
y = df_train[['ph']]
```
Nitrogen and ph
```
class hierarchical():
def __init__(self):
return
def input_train(self, X_in, y_in):
self.X = X_in
self.y = y_in
X_train, X_test, y_train, y_test = train_test_split(self.X, self.y,test_size=0.3, random_state=42)
self.data = pd.concat([X_train, y_train], axis=1).to_numpy()
return self.data
def dendograms(self):
plt.figure(figsize=(7, 5))
plt.title("Dendograms")
dend = shc.dendrogram(shc.linkage(self.data, method='ward'))
def cluster_fit(self, clust):
self.cluster = AgglomerativeClustering(n_clusters = clust, affinity ='euclidean', linkage='ward')
self.res = self.cluster.fit_predict(self.data)
self.labels = self.cluster.labels_
print(self.labels)
print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(self.data, self.labels))
print("Calinski-Harabasz Index: %0.3f" % metrics.calinski_harabasz_score(self.data, self.labels))
print("Davies-Bouldin Index: %0.3f" % metrics.davies_bouldin_score(self.data, self.labels))
return self.res
def outlier(self,threshold):
clf = NearestCentroid()
clf.fit(self.data, self.res)
self.centroids = clf.centroids_
self.points = np.empty((0,len(self.data[0])), float)
self.distances = np.empty((0,len(self.data[0])), float)
for i, center_elem in enumerate(self.centroids):
self.distances = np.append(self.distances, cdist([center_elem],self.data[self.res == i], 'euclidean'))
self.points = np.append(self.points, self.data[self.res == i], axis=0)
percentile = threshold
self.outliers = self.points[np.where(self.distances > np.percentile(self.distances, percentile))]
outliers_df = pd.DataFrame(self.outliers,columns =['X','y'])
return outliers_df
def cluster_graph(self):
plt.figure(figsize=(7, 5))
plt.scatter(self.data[:,0], self.data[:,1], c=self.cluster.labels_, cmap='rainbow')
plt.scatter(*zip(*self.outliers),marker="o",facecolor="None",edgecolor="g",s=70);
plt.scatter(*zip(*self.centroids),marker="o",facecolor="b",edgecolor="b",s=20);
hierarchical_test = hierarchical()
```
Nitrogen and pH
```
hierarchical_test.input_train(X_N,y)
hierarchical_test.dendograms()
hierarchical_test.cluster_fit(3)
hierarchical_test.outlier(80)
hierarchical_test.cluster_graph()
```
Phosphorus and pH
```
hierarchical_test.input_train(X_P,y)
hierarchical_test.dendograms()
hierarchical_test.cluster_fit(3)
hierarchical_test.outlier(80)
hierarchical_test.cluster_graph()
```
Potassium and pH
```
hierarchical_test.input_train(X_K,y)
hierarchical_test.dendograms()
hierarchical_test.cluster_fit(3)
hierarchical_test.outlier(80)
hierarchical_test.cluster_graph()
```
Temperature and pH
```
hierarchical_test.input_train(X_temp,y)
hierarchical_test.dendograms()
hierarchical_test.cluster_fit(3)
hierarchical_test.outlier(80)
hierarchical_test.cluster_graph()
```
Moisture and pH
```
hierarchical_test.input_train(X_moist,y)
hierarchical_test.dendograms()
hierarchical_test.cluster_fit(3)
hierarchical_test.outlier(80)
hierarchical_test.cluster_graph()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, BaggingClassifier,AdaBoostClassifier,GradientBoostingClassifier
from sklearn.linear_model import LinearRegression,LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import RFE
from collections import Counter
from imblearn.over_sampling import SMOTE
import warnings
warnings.filterwarnings('ignore')
df=pd.read_csv("datasets/cervical_cancer.csv")
pd.set_option('display.max_columns', 40)
df.head(20)
df.columns
df.columns=['Age', 'No_of_sex_partner', 'First_sexual_intercourse',\
'No_pregnancies','Smokes', 'Smokes_yrs', 'Smokes_packs_yr', 'Hormonal_Contraceptives',\
'Hormonal_Contraceptives_years','IUD', 'IUD_years', 'STDs', 'STDs_number', 'STDs_condylomatosis',\
'STDs_cervical_condylomatosis', 'STDs_vaginal_condylomatosis', 'STDs_vulvo_perineal_condylomatosis',\
'STDs_syphilis', 'STDs_pelvic_inflammatory_disease', 'STDs_genital_herpes', 'STDs_molluscum_contagiosum',\
'STDs_AIDS', 'STDs_HIV', 'STDs_Hepatitis_B', 'STDs_HPV', 'STDs_No_of_diagnosis', 'STD_Time_since_first_diagnosis',\
'STDs_Time_since_last_diagnosis', 'Dx_Cancer', 'Dx_CIN', 'Dx_HPV', 'Dx', 'Hinselmann','Schiller' ,'Citology', 'Biopsy']
df.info()
df.shape
## replace ? with NaN
df = df.replace('?', np.NaN)
plt.figure(figsize=(10,10))
np.round(df.isnull().sum()/df.shape[0]*100).sort_values().plot(kind='bar')
df=df.drop(['STD_Time_since_first_diagnosis','STDs_Time_since_last_diagnosis'],axis=1)
df=df.drop(df.index[df.Smokes.isnull()] | df.index[df.First_sexual_intercourse.isnull()])
x_features=list(df.columns)
x_features.remove('Biopsy')
x_features_categorical=[
'Smokes','Hormonal_Contraceptives','IUD','STDs','STDs_condylomatosis','STDs_cervical_condylomatosis','STDs_vaginal_condylomatosis','STDs_vulvo_perineal_condylomatosis','STDs_syphilis','STDs_pelvic_inflammatory_disease','STDs_genital_herpes','STDs_molluscum_contagiosum','STDs_AIDS','STDs_HIV','STDs_Hepatitis_B','STDs_HPV','Dx_Cancer','Dx_CIN','Dx_HPV','Dx']
x_features_categorical
x_features_numerical=[i for i in x_features if i not in x_features_categorical]
x_features_numerical
df_iud=df.copy()
x_features_categorical.remove('IUD')
for i in x_features_categorical:
df_iud[i]=df_iud[i].fillna(df_iud[i].mode()[0])
for i in x_features_numerical:
df_iud[i]=df_iud[i].fillna(df_iud[i].median())
df_iud=df_iud.astype('float')
df_iud[x_features_categorical]=df_iud[x_features_categorical].replace(0,'no')
df_iud[x_features_categorical]=df_iud[x_features_categorical].replace(1,'yes')
df_iud=pd.get_dummies(df_iud)
train_iud=df_iud[df_iud.IUD.isnull()==False]
test_iud=df_iud[df_iud.IUD.isnull()]
train_iud_x=train_iud.drop('IUD',axis=1)
train_iud_y=train_iud['IUD']
test_iud_x=test_iud.drop('IUD',axis=1)
test_iud_y=test_iud['IUD']
dt=DecisionTreeClassifier()
iud_model=dt.fit(train_iud_x,train_iud_y)
test_iud['IUD']=iud_model.predict(test_iud_x)
iud_complete=pd.concat([train_iud,test_iud],axis=0)
df_impute=df.copy()
df_impute['IUD']=iud_complete['IUD'].sort_index()
df_hor=df_impute.drop(['Biopsy'],axis=1)
x_features_categorical.remove('Hormonal_Contraceptives')
for i in x_features_categorical:
df_hor[i]=df_hor[i].fillna(df_hor[i].mode()[0])
for i in x_features_numerical:
df_hor[i]=df_hor[i].fillna(df_hor[i].median())
df_hor=df_hor.astype('float')
df_hor[x_features_categorical]=df_hor[x_features_categorical].replace(0,'no')
df_hor[x_features_categorical]=df_hor[x_features_categorical].replace(1,'yes')
df_hor=pd.get_dummies(df_hor)
train_hor=df_hor[df_hor.Hormonal_Contraceptives.isnull()==False]
test_hor=df_hor[df_hor.Hormonal_Contraceptives.isnull()]
train_hor_x=train_hor.drop('Hormonal_Contraceptives',axis=1)
train_hor_y=train_hor['Hormonal_Contraceptives']
test_hor_x=test_hor.drop('Hormonal_Contraceptives',axis=1)
test_hor_y=test_hor['Hormonal_Contraceptives']
dt=DecisionTreeClassifier()
hor_model=dt.fit(train_hor_x,train_hor_y)
test_hor['Hormonal_Contraceptives']=hor_model.predict(test_hor_x)
hor_complete=pd.concat([train_hor,test_hor],axis=0)
df_impute['Hormonal_Contraceptives']=hor_complete['Hormonal_Contraceptives'].sort_index()
df_hor_y=df_impute.drop(['Biopsy'],axis=1)
x_features_numerical.remove('Hormonal_Contraceptives_years')
for i in x_features_categorical:
df_hor_y[i]=df_hor_y[i].fillna(df_hor_y[i].mode()[0])
for i in x_features_numerical:
df_hor_y[i]=df_hor_y[i].fillna(df_hor_y[i].median())
df_hor_y=df_hor_y.astype('float')
df_hor_y[x_features_categorical]=df_hor_y[x_features_categorical].replace(0,'no')
df_hor_y[x_features_categorical]=df_hor_y[x_features_categorical].replace(1,'yes')
df_hor_y=pd.get_dummies(df_hor_y)
train_hor_yrs=df_hor_y[df_hor_y.Hormonal_Contraceptives_years.isnull()==False]
test_hor_yrs=df_hor_y[df_hor_y.Hormonal_Contraceptives_years.isnull()]
train_hor_yrs_x=train_hor_yrs.drop('Hormonal_Contraceptives_years',axis=1)
train_hor_yrs_y=train_hor_yrs['Hormonal_Contraceptives_years']
test_hor_yrs_x=test_hor_yrs.drop('Hormonal_Contraceptives_years',axis=1)
test_hor_yrs_y=test_hor_yrs['Hormonal_Contraceptives_years']
dt=DecisionTreeRegressor()
hor_yrs_model=dt.fit(train_hor_yrs_x,train_hor_yrs_y)
test_hor_yrs['Hormonal_Contraceptives_years']=hor_yrs_model.predict(test_hor_yrs_x)
hor_yrs_complete=pd.concat([train_hor_yrs,test_hor_yrs],axis=0)
df_impute['Hormonal_Contraceptives_years']=hor_yrs_complete['Hormonal_Contraceptives_years'].sort_index()
df_std=df_impute.drop(['Biopsy'],axis=1)
x_features_categorical.remove('STDs')
for i in x_features_categorical:
df_std[i]=df_std[i].fillna(df_std[i].mode()[0])
for i in x_features_numerical:
df_std[i]=df_std[i].fillna(df_std[i].median())
df_std=df_std.astype('float')
df_std[x_features_categorical]=df_std[x_features_categorical].replace(0,'no')
df_std[x_features_categorical]=df_std[x_features_categorical].replace(1,'yes')
df_std=pd.get_dummies(df_std)
train_std=df_std[df_std.STDs.isnull()==False]
test_std=df_std[df_std.STDs.isnull()]
train_std_x=train_std.drop('STDs',axis=1)
train_std_y=train_std['STDs']
test_std_x=test_std.drop('STDs',axis=1)
test_std_y=test_std['STDs']
dt=DecisionTreeClassifier()
std_model=dt.fit(train_std_x,train_std_y)
test_std['STDs']=std_model.predict(test_std_x)
std_complete=pd.concat([train_std,test_std],axis=0)
df_impute['STDs']=std_complete['STDs'].sort_index()
df_std_num=df_impute.drop(['Biopsy'],axis=1)
x_features_numerical.remove('STDs_number')
for i in x_features_categorical:
df_std_num[i]=df_std_num[i].fillna(df_std_num[i].mode()[0])
for i in x_features_numerical:
df_std_num[i]=df_std_num[i].fillna(df_std_num[i].median())
df_std_num=df_std_num.astype('float')
df_std_num[x_features_categorical]=df_std_num[x_features_categorical].replace(0,'no')
df_std_num[x_features_categorical]=df_std_num[x_features_categorical].replace(1,'yes')
df_std_num=pd.get_dummies(df_std_num)
train_std_num=df_std_num[df_std_num.STDs_number.isnull()==False]
test_std_num=df_std_num[df_std_num.STDs_number.isnull()]
train_std_num_x=train_std_num.drop('STDs_number',axis=1)
train_std_num_y=train_std_num['STDs_number']
test_std_num_x=test_std_num.drop('STDs_number',axis=1)
test_std_num_y=test_std_num['STDs_number']
dt=DecisionTreeRegressor()
std_model_num=dt.fit(train_std_num_x,train_std_num_y)
test_std_num['STDs_number']=std_model_num.predict(test_std_num_x)
std_num_complete=pd.concat([train_std_num,test_std_num],axis=0)
df_impute['STDs_number']=std_num_complete['STDs_number'].sort_index()
df_std_con=df_impute.drop(['Biopsy'],axis=1)
x_features_categorical.remove('STDs_condylomatosis')
for i in x_features_categorical:
df_std_con[i]=df_std_con[i].fillna(df_std_con[i].mode()[0])
for i in x_features_numerical:
df_std_con[i]=df_std_con[i].fillna(df_std_con[i].median())
df_std_con=df_std_con.astype('float')
df_std_con[x_features_categorical]=df_std_con[x_features_categorical].replace(0,'no')
df_std_con[x_features_categorical]=df_std_con[x_features_categorical].replace(1,'yes')
df_std_con=pd.get_dummies(df_std_con)
train_std_con=df_std_con[df_std_con.STDs_condylomatosis.isnull()==False]
test_std_con=df_std_con[df_std_con.STDs_condylomatosis.isnull()]
train_std_con_x=train_std_con.drop('STDs_condylomatosis',axis=1)
train_std_con_y=train_std_con['STDs_condylomatosis']
test_std_con_x=test_std_con.drop('STDs_condylomatosis',axis=1)
test_std_con_y=test_std_con['STDs_condylomatosis']
dt=DecisionTreeClassifier()
std_model_con=dt.fit(train_std_con_x,train_std_con_y)
test_std_con['STDs_condylomatosis']=std_model_con.predict(test_std_con_x)
std_con_complete=pd.concat([train_std_con,test_std_con],axis=0)
df_impute['STDs_condylomatosis']=std_con_complete['STDs_condylomatosis'].sort_index()
df_std_cerv=df_impute.drop(['Biopsy'],axis=1)
x_features_categorical.remove('STDs_cervical_condylomatosis')
for i in x_features_categorical:
df_std_cerv[i]=df_std_cerv[i].fillna(df_std_cerv[i].mode()[0])
for i in x_features_numerical:
df_std_cerv[i]=df_std_cerv[i].fillna(df_std_cerv[i].median())
df_std_cerv=df_std_cerv.astype('float')
df_std_cerv[x_features_categorical]=df_std_cerv[x_features_categorical].replace(0,'no')
df_std_cerv[x_features_categorical]=df_std_cerv[x_features_categorical].replace(1,'yes')
df_std_cerv=pd.get_dummies(df_std_cerv)
train_std_cerv=df_std_cerv[df_std_cerv.STDs_cervical_condylomatosis.isnull()==False]
test_std_cerv=df_std_cerv[df_std_cerv.STDs_cervical_condylomatosis.isnull()]
train_std_cerv_x=train_std_cerv.drop('STDs_cervical_condylomatosis',axis=1)
train_std_cerv_y=train_std_cerv['STDs_cervical_condylomatosis']
test_std_cerv_x=test_std_cerv.drop('STDs_cervical_condylomatosis',axis=1)
test_std_cerv_y=test_std_cerv['STDs_cervical_condylomatosis']
dt=DecisionTreeClassifier()
std_model_cerv=dt.fit(train_std_cerv_x,train_std_cerv_y)
test_std_cerv['STDs_cervical_condylomatosis']=std_model_cerv.predict(test_std_cerv_x)
std_cerv_complete=pd.concat([train_std_cerv,test_std_cerv],axis=0)
df_impute['STDs_cervical_condylomatosis']=std_cerv_complete['STDs_cervical_condylomatosis'].sort_index()
df_std_vagi=df_impute.drop(['Biopsy'],axis=1)
x_features_categorical.remove('STDs_vaginal_condylomatosis')
for i in x_features_categorical:
df_std_vagi[i]=df_std_vagi[i].fillna(df_std_vagi[i].mode()[0])
for i in x_features_numerical:
df_std_vagi[i]=df_std_vagi[i].fillna(df_std_vagi[i].median())
df_std_vagi=df_std_vagi.astype('float')
df_std_vagi[x_features_categorical]=df_std_vagi[x_features_categorical].replace(0,'no')
df_std_vagi[x_features_categorical]=df_std_vagi[x_features_categorical].replace(1,'yes')
df_std_vagi=pd.get_dummies(df_std_vagi)
train_std_vagi=df_std_vagi[df_std_vagi.STDs_vaginal_condylomatosis.isnull()==False]
test_std_vagi=df_std_vagi[df_std_vagi.STDs_vaginal_condylomatosis.isnull()]
train_std_vagi_x=train_std_vagi.drop('STDs_vaginal_condylomatosis',axis=1)
train_std_vagi_y=train_std_vagi['STDs_vaginal_condylomatosis']
test_std_vagi_x=test_std_vagi.drop('STDs_vaginal_condylomatosis',axis=1)
test_std_vagi_y=test_std_vagi['STDs_vaginal_condylomatosis']
dt=DecisionTreeClassifier()
std_model_vagi=dt.fit(train_std_vagi_x,train_std_vagi_y)
test_std_vagi['STDs_vaginal_condylomatosis']=std_model_vagi.predict(test_std_vagi_x)
std_vagi_complete=pd.concat([train_std_vagi,test_std_vagi],axis=0)
df_impute['STDs_vaginal_condylomatosis']=std_vagi_complete['STDs_vaginal_condylomatosis'].sort_index()
df_std_peri=df_impute.drop(['Biopsy'],axis=1)
x_features_categorical.remove('STDs_vulvo_perineal_condylomatosis')
for i in x_features_categorical:
df_std_peri[i]=df_std_peri[i].fillna(df_std_peri[i].mode()[0])
for i in x_features_numerical:
df_std_peri[i]=df_std_peri[i].fillna(df_std_peri[i].median())
df_std_peri=df_std_peri.astype('float')
df_std_peri[x_features_categorical]=df_std_peri[x_features_categorical].replace(0,'no')
df_std_peri[x_features_categorical]=df_std_peri[x_features_categorical].replace(1,'yes')
df_std_peri=pd.get_dummies(df_std_peri)
train_std_peri=df_std_peri[df_std_peri.STDs_vulvo_perineal_condylomatosis.isnull()==False]
test_std_peri=df_std_peri[df_std_peri.STDs_vulvo_perineal_condylomatosis.isnull()]
train_std_peri_x=train_std_peri.drop('STDs_vulvo_perineal_condylomatosis',axis=1)
train_std_peri_y=train_std_peri['STDs_vulvo_perineal_condylomatosis']
test_std_peri_x=test_std_peri.drop('STDs_vulvo_perineal_condylomatosis',axis=1)
test_std_peri_y=test_std_peri['STDs_vulvo_perineal_condylomatosis']
dt=DecisionTreeClassifier()
std_model_peri=dt.fit(train_std_peri_x,train_std_peri_y)
test_std_peri['STDs_vulvo_perineal_condylomatosis']=std_model_peri.predict(test_std_peri_x)
std_peri_complete=pd.concat([train_std_peri,test_std_peri],axis=0)
df_impute['STDs_vulvo_perineal_condylomatosis']=std_peri_complete['STDs_vulvo_perineal_condylomatosis'].sort_index()
df_std_syp=df_impute.drop(['Biopsy'],axis=1)
x_features_categorical.remove('STDs_syphilis')
for i in x_features_categorical:
df_std_syp[i]=df_std_syp[i].fillna(df_std_syp[i].mode()[0])
for i in x_features_numerical:
df_std_syp[i]=df_std_syp[i].fillna(df_std_syp[i].median())
df_std_syp=df_std_syp.astype('float')
df_std_syp[x_features_categorical]=df_std_syp[x_features_categorical].replace(0,'no')
df_std_syp[x_features_categorical]=df_std_syp[x_features_categorical].replace(1,'yes')
df_std_syp=pd.get_dummies(df_std_syp)
train_std_syp=df_std_syp[df_std_syp.STDs_syphilis.isnull()==False]
test_std_syp=df_std_syp[df_std_syp.STDs_syphilis.isnull()]
train_std_syp_x=train_std_syp.drop('STDs_syphilis',axis=1)
train_std_syp_y=train_std_syp['STDs_syphilis']
test_std_syp_x=test_std_syp.drop('STDs_syphilis',axis=1)
test_std_syp_y=test_std_syp['STDs_syphilis']
dt=DecisionTreeClassifier()
std_model_syp=dt.fit(train_std_syp_x,train_std_syp_y)
test_std_syp['STDs_syphilis']=std_model_syp.predict(test_std_syp_x)
std_syp_complete=pd.concat([train_std_syp,test_std_syp],axis=0)
df_impute['STDs_syphilis']=std_syp_complete['STDs_syphilis'].sort_index()
df_std_pelv=df_impute.drop(['Biopsy'],axis=1)
x_features_categorical.remove('STDs_pelvic_inflammatory_disease')
for i in x_features_categorical:
df_std_pelv[i]=df_std_pelv[i].fillna(df_std_pelv[i].mode()[0])
for i in x_features_numerical:
df_std_pelv[i]=df_std_pelv[i].fillna(df_std_pelv[i].median())
df_std_pelv=df_std_pelv.astype('float')
df_std_pelv[x_features_categorical]=df_std_pelv[x_features_categorical].replace(0,'no')
df_std_pelv[x_features_categorical]=df_std_pelv[x_features_categorical].replace(1,'yes')
df_std_pelv=pd.get_dummies(df_std_pelv)
train_std_pelv=df_std_pelv[df_std_pelv.STDs_pelvic_inflammatory_disease.isnull()==False]
test_std_pelv=df_std_pelv[df_std_pelv.STDs_pelvic_inflammatory_disease.isnull()]
train_std_pelv_x=train_std_pelv.drop('STDs_pelvic_inflammatory_disease',axis=1)
train_std_pelv_y=train_std_pelv['STDs_pelvic_inflammatory_disease']
test_std_pelv_x=test_std_pelv.drop('STDs_pelvic_inflammatory_disease',axis=1)
test_std_pelv_y=test_std_pelv['STDs_pelvic_inflammatory_disease']
dt=DecisionTreeClassifier()
std_model_pelv=dt.fit(train_std_pelv_x,train_std_pelv_y)
test_std_pelv['STDs_pelvic_inflammatory_disease']=std_model_pelv.predict(test_std_pelv_x)
std_pelv_complete=pd.concat([train_std_pelv,test_std_pelv],axis=0)
df_impute['STDs_pelvic_inflammatory_disease']=std_pelv_complete['STDs_pelvic_inflammatory_disease'].sort_index()
df_std_geni=df_impute.drop(['Biopsy'],axis=1)
x_features_categorical.remove('STDs_genital_herpes')
for i in x_features_categorical:
df_std_geni[i]=df_std_geni[i].fillna(df_std_geni[i].mode()[0])
for i in x_features_numerical:
df_std_geni[i]=df_std_geni[i].fillna(df_std_geni[i].median())
df_std_geni=df_std_geni.astype('float')
df_std_geni[x_features_categorical]=df_std_geni[x_features_categorical].replace(0,'no')
df_std_geni[x_features_categorical]=df_std_geni[x_features_categorical].replace(1,'yes')
df_std_geni=pd.get_dummies(df_std_geni)
train_std_geni=df_std_geni[df_std_geni.STDs_genital_herpes.isnull()==False]
test_std_geni=df_std_geni[df_std_geni.STDs_genital_herpes.isnull()]
train_std_geni_x=train_std_geni.drop('STDs_genital_herpes',axis=1)
train_std_geni_y=train_std_geni['STDs_genital_herpes']
test_std_geni_x=test_std_geni.drop('STDs_genital_herpes',axis=1)
test_std_geni_y=test_std_geni['STDs_genital_herpes']
dt=DecisionTreeClassifier()
std_model_geni=dt.fit(train_std_geni_x,train_std_geni_y)
test_std_geni['STDs_genital_herpes']=std_model_geni.predict(test_std_geni_x)
std_geni_complete=pd.concat([train_std_geni,test_std_geni],axis=0)
df_impute['STDs_genital_herpes']=std_geni_complete['STDs_genital_herpes'].sort_index()
df_std_mollu=df_impute.drop(['Biopsy'],axis=1)
x_features_categorical.remove('STDs_molluscum_contagiosum')
for i in x_features_categorical:
df_std_mollu[i]=df_std_mollu[i].fillna(df_std_mollu[i].mode()[0])
for i in x_features_numerical:
df_std_mollu[i]=df_std_mollu[i].fillna(df_std_mollu[i].median())
df_std_mollu=df_std_mollu.astype('float')
df_std_mollu[x_features_categorical]=df_std_mollu[x_features_categorical].replace(0,'no')
df_std_mollu[x_features_categorical]=df_std_mollu[x_features_categorical].replace(1,'yes')
df_std_mollu=pd.get_dummies(df_std_mollu)
train_std_mollu=df_std_mollu[df_std_mollu.STDs_molluscum_contagiosum.isnull()==False]
test_std_mollu=df_std_mollu[df_std_mollu.STDs_molluscum_contagiosum.isnull()]
train_std_mollu_x=train_std_mollu.drop('STDs_molluscum_contagiosum',axis=1)
train_std_mollu_y=train_std_mollu['STDs_molluscum_contagiosum']
test_std_mollu_x=test_std_mollu.drop('STDs_molluscum_contagiosum',axis=1)
test_std_mollu_y=test_std_mollu['STDs_molluscum_contagiosum']
dt=DecisionTreeClassifier()
std_model_mollu=dt.fit(train_std_mollu_x,train_std_mollu_y)
test_std_mollu['STDs_molluscum_contagiosum']=std_model_mollu.predict(test_std_mollu_x)
std_mollu_complete=pd.concat([train_std_mollu,test_std_mollu],axis=0)
df_impute['STDs_molluscum_contagiosum']=std_mollu_complete['STDs_molluscum_contagiosum'].sort_index()
df_std_aids=df_impute.drop(['Biopsy'],axis=1)
x_features_categorical.remove('STDs_AIDS')
for i in x_features_categorical:
df_std_aids[i]=df_std_aids[i].fillna(df_std_aids[i].mode()[0])
for i in x_features_numerical:
df_std_aids[i]=df_std_aids[i].fillna(df_std_aids[i].median())
df_std_aids=df_std_aids.astype('float')
df_std_aids[x_features_categorical]=df_std_aids[x_features_categorical].replace(0,'no')
df_std_aids[x_features_categorical]=df_std_aids[x_features_categorical].replace(1,'yes')
df_std_aids=pd.get_dummies(df_std_aids)
train_std_aids=df_std_aids[df_std_aids.STDs_AIDS.isnull()==False]
test_std_aids=df_std_aids[df_std_aids.STDs_AIDS.isnull()]
train_std_aids_x=train_std_aids.drop('STDs_AIDS',axis=1)
train_std_aids_y=train_std_aids['STDs_AIDS']
test_std_aids_x=test_std_aids.drop('STDs_AIDS',axis=1)
test_std_aids_y=test_std_aids['STDs_AIDS']
dt=DecisionTreeClassifier()
std_model_aids=dt.fit(train_std_aids_x,train_std_aids_y)
test_std_aids['STDs_AIDS']=std_model_aids.predict(test_std_aids_x)
std_aids_complete=pd.concat([train_std_aids,test_std_aids],axis=0)
df_impute['STDs_AIDS']=std_aids_complete['STDs_AIDS'].sort_index()
df_std_hiv=df_impute.drop(['Biopsy'],axis=1)
x_features_categorical.remove('STDs_HIV')
for i in x_features_categorical:
df_std_hiv[i]=df_std_hiv[i].fillna(df_std_hiv[i].mode()[0])
for i in x_features_numerical:
df_std_hiv[i]=df_std_hiv[i].fillna(df_std_hiv[i].median())
df_std_hiv=df_std_hiv.astype('float')
df_std_hiv[x_features_categorical]=df_std_hiv[x_features_categorical].replace(0,'no')
df_std_hiv[x_features_categorical]=df_std_hiv[x_features_categorical].replace(1,'yes')
df_std_hiv=pd.get_dummies(df_std_hiv)
train_std_hiv=df_std_hiv[df_std_hiv.STDs_HIV.isnull()==False]
test_std_hiv=df_std_hiv[df_std_hiv.STDs_HIV.isnull()]
train_std_hiv_x=train_std_hiv.drop('STDs_HIV',axis=1)
train_std_hiv_y=train_std_hiv['STDs_HIV']
test_std_hiv_x=test_std_hiv.drop('STDs_HIV',axis=1)
test_std_hiv_y=test_std_hiv['STDs_HIV']
dt=DecisionTreeClassifier()
std_model_hiv=dt.fit(train_std_hiv_x,train_std_hiv_y)
test_std_hiv['STDs_HIV']=std_model_hiv.predict(test_std_hiv_x)
std_hiv_complete=pd.concat([train_std_hiv,test_std_hiv],axis=0)
df_impute['STDs_HIV']=std_hiv_complete['STDs_HIV'].sort_index()
df_std_hepa=df_impute.drop(['Biopsy'],axis=1)
x_features_categorical.remove('STDs_Hepatitis_B')
for i in x_features_categorical:
df_std_hepa[i]=df_std_hepa[i].fillna(df_std_hepa[i].mode()[0])
for i in x_features_numerical:
df_std_hepa[i]=df_std_hepa[i].fillna(df_std_hepa[i].median())
df_std_hepa=df_std_hepa.astype('float')
df_std_hepa[x_features_categorical]=df_std_hepa[x_features_categorical].replace(0,'no')
df_std_hepa[x_features_categorical]=df_std_hepa[x_features_categorical].replace(1,'yes')
df_std_hepa=pd.get_dummies(df_std_hepa)
train_std_hepa=df_std_hepa[df_std_hepa.STDs_Hepatitis_B.isnull()==False]
test_std_hepa=df_std_hepa[df_std_hepa.STDs_Hepatitis_B.isnull()]
train_std_hepa_x=train_std_hepa.drop(['STDs_Hepatitis_B'],axis=1)
train_std_hepa_y=train_std_hepa['STDs_Hepatitis_B']
test_std_hepa_x=test_std_hepa.drop(['STDs_Hepatitis_B'],axis=1)
test_std_hepa_y=test_std_hepa['STDs_Hepatitis_B']
dt=DecisionTreeClassifier()
std_model_hepa=dt.fit(train_std_hepa_x,train_std_hepa_y)
test_std_hepa['STDs_Hepatitis_B']=std_model_hepa.predict(test_std_hepa_x)
std_hepa_complete=pd.concat([train_std_hepa,test_std_hepa],axis=0)
df_impute['STDs_Hepatitis_B']=std_hepa_complete['STDs_Hepatitis_B'].sort_index()
df_std_hpv=df_impute.drop(['Biopsy'],axis=1)
x_features_categorical.remove('STDs_HPV')
for i in x_features_categorical:
df_std_hpv[i]=df_std_hpv[i].fillna(df_std_hpv[i].mode()[0])
for i in x_features_numerical:
df_std_hpv[i]=df_std_hpv[i].fillna(df_std_hpv[i].median())
df_std_hpv=df_std_hpv.astype('float')
df_std_hpv[x_features_categorical]=df_std_hpv[x_features_categorical].replace(0,'no')
df_std_hpv[x_features_categorical]=df_std_hpv[x_features_categorical].replace(1,'yes')
df_std_hpv=pd.get_dummies(df_std_hpv)
train_std_hpv=df_std_hpv[df_std_hpv.STDs_HPV.isnull()==False]
test_std_hpv=df_std_hpv[df_std_hpv.STDs_HPV.isnull()]
train_std_hpv_x=train_std_hpv.drop(['STDs_HPV'],axis=1)
train_std_hpv_y=train_std_hpv['STDs_HPV']
test_std_hpv_x=test_std_hpv.drop(['STDs_HPV'],axis=1)
test_std_hpv_y=test_std_hpv['STDs_HPV']
dt=DecisionTreeClassifier()
std_model_hpv=dt.fit(train_std_hpv_x,train_std_hpv_y)
test_std_hpv['STDs_HPV']=std_model_hpv.predict(test_std_hpv_x)
std_hpv_complete=pd.concat([train_std_hpv,test_std_hpv],axis=0)
df_impute['STDs_HPV']=std_hpv_complete['STDs_HPV'].sort_index()
df_no_preg=df_impute.drop(['Biopsy'],axis=1)
x_features_numerical.remove('No_pregnancies')
for i in x_features_numerical:
df_no_preg[i]=df_no_preg[i].fillna(df_no_preg[i].median())
for i in x_features_categorical:
df_no_preg[i]=df_no_preg[i].fillna(df_no_preg[i].mode()[0])
df_no_preg=df_no_preg.astype('float')
df_no_preg[x_features_categorical]=df_no_preg[x_features_categorical].replace(0,'no')
df_no_preg[x_features_categorical]=df_no_preg[x_features_categorical].replace(1,'yes')
df_no_preg=pd.get_dummies(df_no_preg)
train_no_preg=df_no_preg[df_no_preg.No_pregnancies.isnull()==False]
test_no_preg=df_no_preg[df_no_preg.No_pregnancies.isnull()]
train_no_preg_x=train_no_preg.drop(['No_pregnancies'],axis=1)
train_no_preg_y=train_no_preg['No_pregnancies']
test_no_preg_x=test_no_preg.drop(['No_pregnancies'],axis=1)
test_no_preg_y=test_no_preg['No_pregnancies']
dt=DecisionTreeRegressor()
model_no_preg=dt.fit(train_no_preg_x,train_no_preg_y)
test_no_preg['No_pregnancies']=model_no_preg.predict(test_no_preg_x)
no_preg_complete=pd.concat([train_no_preg,test_no_preg],axis=0)
df_impute['No_pregnancies']=no_preg_complete['No_pregnancies'].sort_index()
df_no_sexptnr=df_impute.drop(['Biopsy'],axis=1)
x_features_numerical.remove('No_of_sex_partner')
for i in x_features_numerical:
df_no_sexptnr[i]=df_no_sexptnr[i].fillna(df_no_sexptnr[i].median())
for i in x_features_categorical:
df_no_sexptnr[i]=df_no_sexptnr[i].fillna(df_no_sexptnr[i].mode()[0])
df_no_sexptnr=df_no_sexptnr.astype('float')
df_no_sexptnr[x_features_categorical]=df_no_sexptnr[x_features_categorical].replace(0,'no')
df_no_sexptnr[x_features_categorical]=df_no_sexptnr[x_features_categorical].replace(1,'yes')
df_no_sexptnr=pd.get_dummies(df_no_sexptnr)
train_no_sexptnr=df_no_sexptnr[df_no_sexptnr.No_of_sex_partner.isnull()==False]
test_no_sexptnr=df_no_sexptnr[df_no_sexptnr.No_of_sex_partner.isnull()]
train_no_sexptnr_x=train_no_sexptnr.drop(['No_of_sex_partner'],axis=1)
train_no_sexptnr_y=train_no_sexptnr['No_of_sex_partner']
test_no_sexptnr_x=test_no_sexptnr.drop(['No_of_sex_partner'],axis=1)
test_no_sexptnr_y=test_no_sexptnr['No_of_sex_partner']
dt=DecisionTreeRegressor()
model_no_sexptnr=dt.fit(train_no_sexptnr_x,train_no_sexptnr_y)
test_no_sexptnr['No_of_sex_partner']=model_no_sexptnr.predict(test_no_sexptnr_x)
no_sexptnr_complete=pd.concat([train_no_sexptnr,test_no_sexptnr],axis=0)
df_impute['No_of_sex_partner']=no_sexptnr_complete['No_of_sex_partner'].sort_index()
df_impute.isnull().sum()
df_impute[['Age','No_pregnancies', 'No_of_sex_partner',
'First_sexual_intercourse',
'Smokes_yrs',
'Smokes_packs_yr',
'STDs_No_of_diagnosis', 'Hormonal_Contraceptives_years', 'IUD_years', 'STDs_number']].describe()
df_impute.to_csv('datasets/df_imputation.csv')
df = pd.read_csv('df_imputation.csv', index_col=0) #df_imputation is the new CSV file that doesn't have any null values.
#Again manually segregating categorical and numerical colmuns
x_features_categorical = ['Smokes','Hormonal_Contraceptives','IUD','STDs','STDs_condylomatosis','STDs_cervical_condylomatosis',
'STDs_vaginal_condylomatosis','STDs_vulvo_perineal_condylomatosis','STDs_syphilis',
'STDs_pelvic_inflammatory_disease','STDs_genital_herpes','STDs_molluscum_contagiosum','STDs_AIDS',
'STDs_HIV','STDs_Hepatitis_B','STDs_HPV','Dx_Cancer','Dx_CIN','Dx_HPV','Dx', 'Hinselmann', 'Citology', 'Biopsy']
x_features_numerical = [x for x in df.columns if x not in x_features_categorical]
impute = df.copy()
impute=df.astype('float')
plt.figure(figsize = (12,8))
plt.pie(impute['Biopsy'].value_counts(), labels = ['NO', 'YES'], autopct = '%1.1f%%', labeldistance=1.1, textprops = {'fontsize': 20})
plt.title('Biopsy Percentage', fontsize=20)
plt.show()
print("Count Plots of Categorical Columns");print()
for i in impute[x_features_categorical]:
print('*'*100)
sns.countplot(impute[i])
plt.title(i)
plt.show()
print("Density Plots");print()
for i in impute[x_features_numerical]:
print('*'*100)
sns.distplot(impute[i])
plt.title(i)
plt.show()
numerical=['Age','No_of_sex_partner','First_sexual_intercourse','No_pregnancies','Smokes_yrs','Smokes_packs_yr',
'Hormonal_Contraceptives_years','IUD_years'] # --> Choosing the proper numerical features
df_copy = df.copy()
df_copy[numerical]=df_copy[numerical].astype('float64')
df_copy[numerical].plot(kind='bar',subplots=True, layout=(4,4), fontsize=8, figsize=(14,14))
IQR=df_copy[numerical].describe().T['75%']-df_copy[numerical].describe().T['25%']
min,max=[df_copy[numerical].describe().T['25%']-(IQR*1.5),df_copy[numerical].describe().T['75%']+(IQR*1.5)]
for i in numerical:
print('range of',i,'b/w',min[i],'and',max[i])
for i in numerical:
df_copy[i][df_copy[i]>max[i]]=max[i]
df_copy[i][df_copy[i]<min[i]]=min[i]
df_copy[numerical].plot(kind='bar',subplots=True, layout=(4,4), fontsize=8, figsize=(14,14))
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
#from catboost import CatBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn import tree
#import lightgbm as lgb
#from lightgbm import LGBMClassifier
import xgboost as xgb
from sklearn.model_selection import train_test_split
from scipy import interp
from sklearn.metrics import classification_report, accuracy_score, auc
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn import ensemble
our_anomaly_detector = ensemble.IsolationForest(contamination = 0.1, random_state=42)
our_anomaly_detector.fit(df[numerical])
training_predictions = our_anomaly_detector.predict(df[numerical])
print(len(training_predictions))
outlier_label = []
outlier_label = list(training_predictions)
anomaly_iso = outlier_label.count(-1)
print(anomaly_iso)
normal_iso = outlier_label.count(1)
print(normal_iso)
df = df.astype('float64')
x = df.drop('Biopsy', axis=1)
y = df['Biopsy']
SS = StandardScaler()
df_scaled = pd.DataFrame(SS.fit_transform(x), columns = x.columns) # as scaling mandotory for KNN model
x_train,x_test,y_train,y_test = train_test_split(x,y, test_size = 0.3, random_state = 1)
x_train1,x_test1,y_train,y_test = train_test_split(df_scaled,y, test_size = 0.3, random_state = 1)
l= [] #List to store the various model metrics
def models_lr(x,y):
mod = {}
model = LogisticRegression().fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'LogisticRegression'
mod['Train_Score'] = model.score(x_train,y_train)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l.append(models_lr(x_train,y_train))
def models_dt(x,y):
mod = {}
model = DecisionTreeClassifier().fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'Decision Tree'
mod['Train_Score'] = model.score(x_train,y_train)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l.append(models_dt(x_train,y_train))
def models_rf(x,y):
mod = {}
model = RandomForestClassifier().fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'Random Forest'
mod['Train_Score'] = model.score(x_train,y_train)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l.append(models_rf(x_train,y_train))
def models_nb(x,y):
mod = {}
model = GaussianNB().fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'GaussianNB'
mod['Train_Score'] = model.score(x_train,y_train)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l.append(models_nb(x_train,y_train))
def models_knn(x,y):
mod = {}
model = KNeighborsClassifier().fit(x,y)
ypred = model.predict(x_test1)
mod['Model'] = 'KNN'
mod['Train_Score'] = model.score(x_train1,y_train)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test1)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l.append(models_knn(x_train1,y_train))
def models_ada(x,y):
mod = {}
model = AdaBoostClassifier(n_estimators=100, random_state=0).fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'AdaBoostClassifier'
mod['Train_Score'] = model.score(x_train,y_train)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l.append(models_ada(x_train,y_train))
def models_xg(x,y):
mod = {}
model = xgb.XGBClassifier(objective="binary:logistic", random_state=42, eval_metric="auc").fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'XGBClasssifier'
mod['Train_Score'] = model.score(x_train,y_train)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l.append(models_xg(x_train,y_train))
def models_gbc(x,y):
mod = {}
model = GradientBoostingClassifier(loss='exponential', learning_rate=0.03, n_estimators=75 , max_depth=6).fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'GradientBoostingClassifier'
mod['Train_Score'] = model.score(x_train,y_train)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l.append(models_gbc(x_train,y_train))
def models_svm(x,y):
mod = {}
model = SVC(kernel='rbf', probability=True).fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'SupportVectorClassifier'
mod['Train_Score'] = model.score(x_train,y_train)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l.append(models_svm(x_train,y_train))
def models_etc(x,y):
mod = {}
model = ExtraTreesClassifier(n_estimators=250, random_state=0).fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'ExtraTreesClassifier'
mod['Train_Score'] = model.score(x_train,y_train)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l.append(models_etc(x_train,y_train))
from sklearn.naive_bayes import BernoulliNB
def models_bnb(x,y):
mod = {}
model = BernoulliNB().fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'BernoulliNB'
mod['Train_Score'] = model.score(x_train,y_train)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l.append(models_bnb(x_train,y_train))
base_df = pd.DataFrame(l)
base_df
knneig = KNeighborsClassifier(n_neighbors=10)
knneig.fit(x_train1, y_train)
pred_knneigh = knneig.predict(x_test1)
score_knneigh_before = accuracy_score(y_test, pred_knneigh)
print("Score KNeighnors :",score_knneigh_before)
print(classification_report(y_test, pred_knneigh))
# q = 0
# while q < len(outlier_label):
# if outlier_label[q] == -1:
# df.drop(q, axis = 0, inplace = True)
# q+=1
from imblearn.over_sampling import (RandomOverSampler,SMOTE,ADASYN)
x_train_s, y_train_s = ADASYN(random_state=42).fit_resample(x_train, y_train.ravel())
print(sorted(Counter(y_train_s).items()))
l_final = [] #--> New list for storing metrics of base models
def models_dt(x,y):
mod = {}
model = DecisionTreeClassifier().fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'Decision Tree After Sampling'
mod['Train_Score'] = model.score(x_train_s,y_train_s)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l_final.append(models_dt(x_train_s,y_train_s))
def models_rf(x,y):
mod = {}
model = RandomForestClassifier().fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'Random Forest After Sampling'
mod['Train_Score'] = model.score(x_train_s,y_train_s)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l_final.append(models_rf(x_train_s,y_train_s))
def models_lr(x,y):
mod = {}
model = LogisticRegression().fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'LogisticRegression'
mod['Train_Score'] = model.score(x_train_s,y_train_s)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l_final.append(models_lr(x_train_s,y_train_s))
def models_nb(x,y):
mod = {}
model = GaussianNB().fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'GaussianNB'
mod['Train_Score'] = model.score(x_train_s,y_train_s)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l_final.append(models_nb(x_train_s,y_train_s))
def models_knn(x,y):
mod = {}
model = KNeighborsClassifier().fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'KNN'
mod['Train_Score'] = model.score(x_train_s,y_train_s)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l_final.append(models_knn(x_train_s,y_train_s))
def models_ada(x,y):
mod = {}
model = AdaBoostClassifier(n_estimators=100, random_state=0).fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'AdaBoostClassifier'
mod['Train_Score'] = model.score(x_train_s,y_train_s)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l_final.append(models_ada(x_train_s,y_train_s))
def models_xg(x,y):
mod = {}
model = xgb.XGBClassifier(objective="binary:logistic", random_state=42, eval_metric="auc").fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'XGBClassifier'
mod['Train_Score'] = model.score(x_train_s,y_train_s)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l_final.append(models_xg(x_train_s,y_train_s))
def models_gbc(x,y):
mod = {}
model = GradientBoostingClassifier(loss='exponential', learning_rate=0.03, n_estimators=75 , max_depth=6).fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'GradientBoostingClassifier'
mod['Train_Score'] = model.score(x_train_s,y_train_s)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l_final.append(models_gbc(x_train_s,y_train_s))
def models_svm(x,y):
mod = {}
model = SVC(kernel='rbf', probability=True).fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'SupportVectorClassifier'
mod['Train_Score'] = model.score(x_train_s,y_train_s)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l_final.append(models_svm(x_train_s,y_train_s))
def models_etc(x,y):
mod = {}
model = ExtraTreesClassifier(n_estimators=250, random_state=0).fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'ExtraTreesClassifier'
mod['Train_Score'] = model.score(x_train_s,y_train_s)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l_final.append(models_etc(x_train_s,y_train_s))
from sklearn.naive_bayes import BernoulliNB
def models_bnb(x,y):
mod = {}
model = BernoulliNB().fit(x,y)
ypred = model.predict(x_test)
mod['Model'] = 'BernoulliNB'
mod['Train_Score'] = model.score(x_train_s,y_train_s)
mod['Test_accuracy'] = metrics.accuracy_score(y_test,ypred)
mod['f1score'] = metrics.f1_score(y_test,ypred)
mod['recall'] = metrics.recall_score(y_test, ypred)
mod['precision'] = metrics.precision_score(y_test, ypred)
model.predict_proba(x_test)
mod['roc_auc'] = metrics.roc_auc_score(y_test,ypred)
return mod
l_final.append(models_bnb(x_train_s,y_train_s))
final_model = pd.DataFrame(l_final)
final_model
knneig = KNeighborsClassifier(n_neighbors=10)
knneig.fit(x_train_s, y_train_s)
pred_knneigh = knneig.predict(x_test1)
score_knneigh_before = accuracy_score(y_test, pred_knneigh)
print("Score KNeighnors :",score_knneigh_before)
print(classification_report(y_test, pred_knneigh))
rfc = RandomForestClassifier(n_estimators=100,random_state = 42)
rfc.fit(x_train_s, y_train_s)
rfc_pred = rfc.predict(x_test)
print(accuracy_score(y_test,rfc_pred))
print(classification_report(y_test,rfc_pred))
logmodel = LogisticRegression()
logmodel.fit(x_train_s,y_train_s)
predictions = logmodel.predict(x_test)
print(accuracy_score(y_test, predictions))
print(classification_report(y_test,predictions))
gbc = GradientBoostingClassifier()
gbc.fit(x_train_s,y_train_s)
predictions = gbc.predict(x_test)
print(accuracy_score(y_test, predictions))
print(classification_report(y_test,predictions))
ada = AdaBoostClassifier()
ada.fit(x_train_s,y_train_s)
predictions = ada.predict(x_test)
print(accuracy_score(y_test, predictions))
print(classification_report(y_test,predictions))
dt = DecisionTreeClassifier()
dt.fit(x_train_s,y_train_s)
predictions = dt.predict(x_test)
print(accuracy_score(y_test, predictions))
print(classification_report(y_test,predictions))
r_probs = [0 for _ in range(len(y_test))]
KNN_probs = knneig.predict_proba(x_test1)
RF_probs = rfc.predict_proba(x_test)
GBC_probs = gbc.predict_proba(x_test)
DT_probs = dt.predict_proba(x_test)
LR_probs = logmodel.predict_proba(x_test)
ADA_probs = ada.predict_proba(x_test)
KNN_probs = KNN_probs[:, 1]
RF_probs = RF_probs[:, 1]
GBC_probs = GBC_probs[:, 1]
LR_probs = LR_probs[:, 1]
DT_probs = DT_probs[:, 1]
ADA_probs = ADA_probs[:, 1]
r_auc = roc_auc_score(y_test, r_probs)
KNN_auc = roc_auc_score(y_test, KNN_probs)
RF_auc = roc_auc_score(y_test, RF_probs)
GBC_auc = roc_auc_score(y_test, GBC_probs)
LR_auc = roc_auc_score(y_test, LR_probs)
DT_auc = roc_auc_score(y_test, DT_probs)
ADA_auc = roc_auc_score(y_test, ADA_probs)
r_fpr, r_tpr, _ = roc_curve(y_test, r_probs)
KNN_fpr, KNN_tpr, _ = roc_curve(y_test, KNN_probs)
RF_fpr, RF_tpr, _ = roc_curve(y_test, RF_probs)
GBC_fpr, GBC_tpr, _ = roc_curve(y_test, GBC_probs)
LR_fpr, LR_tpr, _ = roc_curve(y_test, LR_probs)
DT_fpr, DT_tpr, _ = roc_curve(y_test, DT_probs)
ADA_fpr, ADA_tpr, _ = roc_curve(y_test, ADA_probs)
plt.figure(figsize=(10,6))
plt.plot(r_fpr, r_tpr, linestyle='--')
#plt.plot(rf_fpr, rf_tpr, marker='.', label='Random Forest (AUROC = %0.3f)' % rf_auc)
plt.plot(KNN_fpr, KNN_tpr, label='KNN (AUROC = %0.3f)' % KNN_auc)
plt.plot(RF_fpr, RF_tpr, label='RF (AUROC = %0.3f)' % RF_auc)
plt.plot(GBC_fpr, GBC_tpr, label='GBC (AUROC = %0.3f)' % GBC_auc)
plt.plot(LR_fpr, LR_tpr, label='LR (AUROC = %0.3f)' % LR_auc)
plt.plot(DT_fpr, DT_tpr, label='DT (AUROC = %0.3f)' % DT_auc)
plt.plot(ADA_fpr, ADA_tpr, label='ADA (AUROC = %0.3f)' % ADA_auc)
# Title
plt.title('ROC Plot (After Oversampling)')
# Axis labels
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
# Show legend
plt.legend() #
# Show plot
plt.show()
```
## ANN
```
import tensorflow as tf
model= tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(units=200,activation='relu',input_shape= (33,)))
model.add(tf.keras.layers.Dense(units=200,activation='relu'))
model.add(tf.keras.layers.Dense(units=1,activation='sigmoid'))
model.summary()
```
## After Outlier
```
from sklearn import metrics
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
epochs_hist = model.fit(x_train,y_train,epochs=50,batch_size=20)
model.metrics_names
y_pred=model.predict(x_test)
y_pred = (y_pred>0.5)
plt.plot(epochs_hist.history['loss'])
plt.plot(epochs_hist.history['acc'])
plt.xlabel('Epochs')
plt.ylabel('percentage')
plt.legend(['loss','accuracy'])
plt.title('Loss and Accuracy plot')
from sklearn.metrics import confusion_matrix,classification_report
cm = confusion_matrix(y_test,y_pred)
sns.heatmap(cm,annot=True)
print(classification_report(y_test,y_pred))
```
## After Oversampling
```
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
epochs_hist = model.fit(x_train_s,y_train_s,epochs=50,batch_size=20)
y_pred=model.predict(x_test)
y_pred = (y_pred>0.5)
plt.plot(epochs_hist.history['loss'])
plt.plot(epochs_hist.history['acc'])
plt.xlabel('Epochs')
plt.ylabel('percentage')
plt.legend(['loss','accuracy'])
plt.title('Loss and Accuracy plot')
from sklearn.metrics import confusion_matrix,classification_report
cm = confusion_matrix(y_test,y_pred)
sns.heatmap(cm,annot=True)
print(classification_report(y_test,y_pred))
```
# Feature Scaling
| github_jupyter |
```
import afqinsight.nn.tf_models as nn
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from afqinsight.datasets import AFQDataset
from afqinsight.nn.tf_models import cnn_lenet, mlp4, cnn_vgg, lstm1v0, lstm1, lstm2, blstm1, blstm2, lstm_fcn, cnn_resnet
from sklearn.impute import SimpleImputer
import os.path
# Harmonization
from sklearn.model_selection import train_test_split
from neurocombat_sklearn import CombatModel
import pandas as pd
from sklearn.utils import shuffle, resample
from afqinsight.augmentation import jitter, time_warp, scaling
import tempfile
afq_dataset = AFQDataset.from_files(
fn_nodes="../data/raw/combined_tract_profiles.csv",
fn_subjects="../data/raw/participants_updated_id.csv",
dwi_metrics=["dki_fa", "dki_md", "dki_mk"],
index_col="subject_id",
target_cols=["age", "dl_qc_score", "scan_site_id"],
label_encode_cols=["scan_site_id"]
)
afq_dataset.drop_target_na()
print(len(afq_dataset.subjects))
print(afq_dataset.X.shape)
print(afq_dataset.y.shape)
full_dataset = list(afq_dataset.as_tensorflow_dataset().as_numpy_iterator())
X = np.concatenate([xx[0][None] for xx in full_dataset], 0)
y = np.array([yy[1][0] for yy in full_dataset])
qc = np.array([yy[1][1] for yy in full_dataset])
site = np.array([yy[1][2] for yy in full_dataset])
X = X[qc>0]
y = y[qc>0]
site = site[qc>0]
# Split the data into train and test sets:
X_train, X_test, y_train, y_test, site_train, site_test = train_test_split(X, y, site, test_size=0.2, random_state=42)
imputer = SimpleImputer(strategy="median")
# Impute train and test separately:
X_train = np.concatenate([imputer.fit_transform(X_train[..., ii])[:, :, None] for ii in range(X_train.shape[-1])], -1)
X_test = np.concatenate([imputer.fit_transform(X_test[..., ii])[:, :, None] for ii in range(X_test.shape[-1])], -1)
# Combat
X_train = np.concatenate([CombatModel().fit_transform(X_train[..., ii], site_train[:, None], None, None)[:, :, None] for ii in range(X_train.shape[-1])], -1)
X_test = np.concatenate([CombatModel().fit_transform(X_test[..., ii], site_test[:, None], None, None)[:, :, None] for ii in range(X_test.shape[-1])], -1)
n_epochs = 1000
# EarlyStopping
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=0.001,
mode="min",
patience=100
)
# ReduceLROnPlateau
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(
monitor="val_loss",
factor=0.5,
patience=20,
verbose=1,
)
def augment_this(X, y, rounds=n_round):
new_X = X[:]
new_y = y[:]
for f in range(rounds):
aug_X = np.zeros_like(X)
# Do each channel separately:
for channel in range(aug_X.shape[-1]):
this_X = X[..., channel][..., np.newaxis]
this_X = jitter(this_X, sigma=np.mean(this_X)/25)
this_X = scaling(this_X, sigma=np.mean(this_X)/25)
this_X = time_warp(this_X, sigma=np.mean(this_X)/25)
aug_X[..., channel] = this_X[...,0]
new_X = np.concatenate([new_X, aug_X])
new_y = np.concatenate([new_y, y])
return new_X, new_y
# Generate evaluation results, training history, number of epochs
def model_augmentation(model_name, name_str, lr, X_train, y_train, X_test, y_test, n_round):
model = model_name(input_shape=(100, 72), n_classes=1, output_activation=None, verbose=True)
model.compile(loss='mean_squared_error',
optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
metrics=['mean_squared_error',
tf.keras.metrics.RootMeanSquaredError(name='rmse'),
'mean_absolute_error'])
ckpt_filepath = tempfile.NamedTemporaryFile().name + '.h5'
ckpt = tf.keras.callbacks.ModelCheckpoint(
filepath = ckpt_filepath,
monitor="val_loss",
verbose=1,
save_best_only=True,
save_weights_only=True,
mode="auto",
)
X_train, y_train = augment_this(X_train, y_train)
log = tf.keras.callbacks.CSVLogger(filename=(name_str + '.csv'), append=True)
callbacks = [early_stopping, ckpt, reduce_lr, log]
model.fit(X_train, y_train, epochs=n_epochs, batch_size=128,
validation_split=0.2, callbacks=callbacks)
model.load_weights(ckpt_filepath)
y_predict = model.predict(X_test)
y_predict = y_predict.reshape(y_test.shape)
coef = np.corrcoef(y_test, y_predict)[0,1] ** 2
evaluation = model.evaluate(X_test, y_test)
# Results
result = {'Model': [name_str]*16,
'Train_site': [site_1] * 4 + [site_2] * 4 + [site_3] * 4 + [f'{site_2}, {site_3}'] * 4,
'Test_site': [site_1] * 16,
'Metric': ['MSE', 'RMSE', 'MAE', 'coef'] * 4,
'Value': [eval_1[1], eval_1[2], eval_1[3], coef1,
eval_2[1], eval_2[2], eval_2[3], coef2,
eval_3[1], eval_3[2], eval_3[3], coef3,
eval_4[1], eval_4[2], eval_3[3], coef4]}
df = pd.DataFrame(result)
return df
def
for i in range(10):+
df = model_augmentation(cnn_resnet, 'cnn_resnet', 0.01, 0, 3, 4, X, y)
df_resnet = (df_resnet1.merge(df_resnet2, how='outer')).merge(df_resnet3, how='outer')
```
| github_jupyter |
# 06_Feed-forward_Neural_Networks
In this notebook, we will see how to define simple feed-foward neural networks.
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
%matplotlib inline
torch.manual_seed(777) # reproducibility
```
## Neural Networks
A typical training procedure for a neural network is as follows:
- Define the neural network that has some learnable parameters (or weights)
- Iterate over a dataset of inputs
- Process input through the network
- Compute the loss (how far is the output from being correct)
- Propagate gradients back into the network’s parameters
- Update the weights of the network, typically using an optimizer.
We will look at all the above processes with a concrete example, MNIST.
### Define the network
First of all, we need a new feed-foward neural network for performing image classification on MNIST.
In PyTorch, you can build your own neural network using the `torch.nn`package:
```
# Hyper-parameters
input_size = 784
hidden_size = 256
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.001
# Device configuration
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
# Fully connected neural network with one hidden layer
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
# Define the operations to use for input processing.
# torch.nn.Linear(in_features, out_features, bias=True)
# : a linear projection(fc) layer(in_feeatures -> out_features)
# torch.nn.RELU(inplace=False): a ReLU activation function
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# Define the input processing through network
z1 = self.fc1(x)
h1 = self.relu(z1)
out = self.fc2(h1)
return out
model = NeuralNet(input_size, hidden_size, num_classes).to(device)
print(model)
```
You just have to define the `forward` function, and the `backward` function (where gradients are computed) is automatically defined for you using `autograd`.
The architecture of the above `NeuralNet` is as follows:
<img src="images/nn_architecture.png" width="500">
Here, x and y are the input, target (true label) values, respectively.
The learnable parameters of a model are returned by `model.parameters()`.
```
params = list(model.parameters())
print(len(params))
print(params[0].size()) # fc1's .weight
```
### Loss function and Optimizer
A loss function takes the (output, target) pair of inputs, and computes a value that estimates how far away the output is from the target.
There are several different loss functions under the nn package.
We use `nn.CrossEntropyLoss()`.
```
input = torch.randn(1, 784) # a random input, for example
output = model(input) # output: (batch_size, num_classes)
print(output)
target = torch.tensor([0]) # a dummy target, for example. target: (batch_size) where 0 <= each element < num_classes
criterion = nn.CrossEntropyLoss()
loss = criterion(output, target)
print(loss)
```
Furtheremore, PyTorch supports several optimizers from `torch.optim`.
We use an Adam optimizer.
```
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
```
### DataLoader
```
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='./data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='./data',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# plot one example
print(train_dataset.train_data.size()) # (60000, 28, 28)
print(train_dataset.train_labels.size()) # (60000)
idx = 0
plt.title('%d' % train_dataset.train_labels[idx].item())
plt.imshow(train_dataset.train_data[idx,:,:].numpy(), cmap='gray')
```
### Train the network
```
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
running_loss = 0.0
for i, (images, labels) in enumerate(train_loader):
# Move tensors to the configured device
images = images.reshape(-1, input_size).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# zero the parameter gradients
optimizer.zero_grad()
# backward + optimize
loss.backward()
optimizer.step()
running_loss += loss.item()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, running_loss / 100))
running_loss = 0.0
```
### Test the network
```
# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, input_size).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total))
```
### Save/Load the network parameters
```
# Save the model checkpoint
torch.save(model.state_dict(), './data/nn_model.ckpt')
# Load the model checkpoint if needed
# new_model = NeuralNet(input_size, hidden_size, num_classes).to(device)
# new_model.load_state_dict(torch.load('./data/nn_model.ckpt'))
```
## Practice: CIFAR10
<img src="images/cifar10.png" width="400">
The CIFAR-10 dataset has the following specification:
- The images in CIFAR-10 are of size 3x32x32, i.e. 3-channel color images of 32x32 pixels in size.
- CIFAR-10 has the ten classes: ‘airplane’, ‘automobile’, ‘bird’, ‘cat’, ‘deer’, ‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’.
You have to define a feed-foward neural network with two hidden layers for performing image classifcation on the CIFAR-10 dataset as well as train and test the network.
```
# Hyper-parameters
input_size = 3*32*32
hidden1_size = 512
hidden2_size = 128
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.001
# Device configuration
device = torch.device('cpu')
# transform images to tensors of normalized range [-1, 1]
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
shuffle=True, num_workers=2)
test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,
shuffle=False, num_workers=2)
# Write the code to define a neural network with two hidden layers.
class Net(nn.Module):
def __init__(self, input_size, hidden1_size, hidden2_size, num_classes):
super(Net, self).__init__()
# ============ YOUR CODE HERE ============
# ========================================
def forward(self, x):
# ============ YOUR CODE HERE ============
return x # DUMMY
# ========================================
model = Net(input_size, hidden1_size, hidden2_size, num_classes).to(device)
print(model)
# Write the code to train and test the network.
# ============ YOUR CODE HERE ============
# ========================================
```
| github_jupyter |
## 7. Fourier-transzformációs módszer, FFTMethod
A kiértékelés a lépései:
**betöltés → előfeldolgozás → IFFT → ablakolás → FFT → fázis**
A programban is hasonló nevű a függvényeket kell meghívni. Az ajánlott sorrend a függvények hívásában a fenti folyamatábra, mivel nem garantált, hogy a tengelyek helyesen fognak transzformálódni tetszőleges sorrendű függvényhívások után.
A bemutatást szimulált példákkal kezdem, majd rátérek egy mért interferogram kiértékelésére is.
```
import numpy as np
import matplotlib.pyplot as plt
import pysprint as ps
g = ps.Generator(1, 4, 2.5, 1500, GDD=400, TOD=400, FOD=1000, pulse_width=4, resolution=0.05)
g.generate_freq()
```
#### 7.1 Automatikus kiértékelés
```
f = ps.FFTMethod(*g.data)
f.autorun(reference_point=2.5, order=4)
```
Egy másik automatikus kiértékelés (ugyan azon az interferogramon), ezúttal csak a fázist kapjuk meg. Ennek a fázisgrafikonnak a széleit kivágjuk a `slice` függvénnyel, majd a `fit` metódust használva számolhatjuk a diszperziós együtthatókat.
```
f2 = ps.FFTMethod(*g.data)
phase = f.autorun(show_graph=False, enable_printing=False)
print(type(phase))
phase.slice(1.1, 3.9)
phase.fit(reference_point=2.5, order=4);
```
Bár látható volt, hogy a program jól határozta meg a Gauss ablakfüggvény paramétereit és ezáltal a diszperziós együtthatókat is, de jelzett, hogy a kivágandó csúcs túl közel van az origóhoz, így jobb ha azt manuálisan állítjuk be. Nézzük meg a fázist az illesztett görbével:
```
phase.plot()
```
Majd az illesztési hiba:
```
phase.errorplot(percent=True)
```
#### 7.2 Manuális kiértékelés
Nézzünk meg egy manuális kiértékelést. Itt a nekem meglévő interferogramot fogom használni, ami enyhén szólva sem ideális a Fourier-transzformációs kiértékeléshez, de megpróbálom a legtöbb használható információt kihozni belőle. Mivel már előre tudom hogy hogyan érdemes az ablakfüggvényt beállítani, így itt az ún. `inplace=False` argumentumot fogom használni. Alapvetően minden függvény amit meghívunk `inplace=True` módon hajtódik végre, azaz megváltoztatja magát az objektumot. Így működik pl. a python listáknál az `append` függvény:
```python
>>> a = []
>>> a.append(1)
>>> print(a)
[1]
```
A csomag során sok függvénynél lehetőség van megadni az `inplace=False` argumentumot, ami nem változtatja meg magát az objektumot, hanem visszaad egy új másolatot belőle, és kért függvényt azon a másolaton fogja végrehajtani. Ennek két előnye van: Az eredeti objektum (és így vele minden eredetileg betöltött adatsor) megmarad, és anélkül hogy újra és újra betöltenénk más objektumba az adatokat, ezért elég belőle egy. A második előny pedig abból adódik, hogy megengedi a műveletek láncolását, ahogy az alábbi példa mutatja. ([fluent interfacing](https://en.wikipedia.org/wiki/Fluent_interface) and [method cascading](https://en.wikipedia.org/wiki/Method_cascading)) Itt a szokásos kiértékelési lépéseket hajtottam végre. Az utolsó függvény amit meghívtam rajta, az a `build_phase`, ami egy fázist ad vissza, ezért a hosszú láncolat után az lesz a visszatérített érték (ezt elneveztem `phase3`-nak).
```
f3 = ps.FFTMethod.parse_raw('datasets/ifg.trt', skiprows=8, meta_len=8, decimal=",", delimiter=";")
phase3 = (
f3.chdomain(inplace=False)
.ifft(inplace=False)
.window(at=145, fwhm=240, window_order=16, inplace=False)
.apply_window(inplace=False)
.fft(inplace=False)
.build_phase()
)
```
Itt a jobb olvashatóság miatt minden új függvénynél új sort kezdtem és zárójelbe tettem. Ezek nélkül így festene:
```
phase4 = f3.chdomain(inplace=False).ifft(inplace=False).window(at=145, fwhm=240, window_order=16, plot=False, inplace=False).apply_window(inplace=False).fft(inplace=False).build_phase()
```
Mivel nem volt ideális az interferogram vizsgáljuk meg milyen fázist kaptunk vissza.
```
phase3.plot()
```
Itt észrevehető, hogy vannak olyan részei a görbének, amely valóban tartalmazza a minta fázistulajdonságait. Vágjuk ki ezt a részt a `slice` függvénnyel.
```
phase3.slice(1.71, 2.72)
phase3.plot()
```
Ezután végezzük el az illesztést a `fit` függvénnyel:
```
phase3.fit(reference_point=2.355, order=3);
```
A kapott diszperziós együtthatók valóban jó közelítéssel tükrözik a mintára jellemző (már egyéb módszerekkel meghatározott) koefficienseket. Vizsgáljuk meg az illesztési hibát is:
```
phase3.errorplot(percent=True)
```
Ugyan ez a kiértékelés hagyományosan, az `inplace=False` paraméterek nélkül így néz ki:
```
f4 = ps.FFTMethod.parse_raw('datasets/ifg.trt', skiprows=8, meta_len=8, decimal=",", delimiter=";")
f4.chdomain()
f4.ifft()
f4.window(at=145, fwhm=240, window_order=16, plot=False)
f4.apply_window()
f4.fft()
phase4 = f4.build_phase()
phase4.slice(1.71, 2.72)
phase4.fit(2.355, 3);
```
Próbáljuk meg az impulzus időbeli alakját kiszámolni. Ehhez a `get_pulse_shape_from_file` függvényt fogom használni, aminek a tárgykar spektrumát adom meg.
```
x_t, y_t = f4.get_pulse_shape_from_file("datasets/sam.trt", truncate=True, chdomain=True, skiprows=8, decimal=",", sep=";")
plt.plot(x_t, y_t)
plt.grid()
plt.xlabel("t [fs]");
```
Mivel a használt interferogram nem volt ideális, így itt az impulzus alakját nem lehetett tökéletesen visszakapni.
Alapértelmezetten néhány dolog el van rejtve a felhasználó elől. Az előző `get_pulse_shape_from_file` függvényt újra lefuttatom, ezúttal teljes logging outputtal. Ezt szinte soha nem kell használnunk, itt is csak a magyarázat miatt van létjogosultsága.
```
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
x_t, y_t = f4.get_pulse_shape_from_file("datasets/sam.trt", truncate=True, chdomain=True, skiprows=8, decimal=",", sep=";")
```
Látható, hogy *2.322* és *2.719 PHz* között 287 adatpontnál sikerült kiszámítani a
$I(t) = |\mathcal{F}^{-1}\{\sqrt{|I_{tárgy}(\omega)|}\cdot e^{-i\Phi{(\omega)}}\}|^2$
kifejezés értékét. Ez annak köszönhető, hogy a kiszámolt fázist elég nagy tartományban nem tudtuk felhasználni (eredetileg a *1.71 - 2.72* *PHz* tartományt vágtuk ki), illetve az transzformációk során behozott numerikus hiba is közrejátszott.
#### 7.3 NUFFT
Végül a Non-unifrom FFT használata. Itt teljesen ugyan azt hajtom végre, mint fentebb, csak `usenifft=True` argumentummal.
```
# csak visszaállítom a log szintet az alapértelmezettre, hogy ne árassza el a képernyőt
logger.setLevel(logging.ERROR)
f5 = ps.FFTMethod.parse_raw('datasets/ifg.trt', skiprows=8, meta_len=8, decimal=",", delimiter=";")
f5.chdomain()
f5.ifft(usenifft=True)
f5.window(at=155, fwhm=260, window_order=16, plot=False)
f5.apply_window()
f5.fft()
phase6 = f5.build_phase()
phase6.slice(None, 2.49)
phase6.fit(2.355, 3);
phase6.plot()
```
A szimulációk alapján a NUFFT valamivel pontatlanabb eredményt ad, mint az interpoláció + FFT.
| github_jupyter |
```
# our usual things!
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# weather in Champaign!
w = pd.read_csv("/Users/jillnaiman1/Downloads/2018_ChampaignWeather.csv")
w
# sort by date
w.sort_values(by='DATE') # w is our pandas dataframe, sort_values is a pandas call
type(w['DATE'])
w['DATE'] = pd.to_datetime(w['DATE']) # changing to datetime format
# lets just look at 1 station
mask = w['NAME'] == 'CHAMPAIGN 3 S, IL US'
mask
# minium temperature during a day of 2018
plt.plot(w['DATE'][mask], w['TMIN'][mask], label='Min Temp')
plt.plot(w['DATE'][mask], w['TMAX'][mask], label='Max Temp')
# label our axes
plt.xlabel('Date')
plt.ylabel('Temp in F')
plt.legend()
# make our plots a bit bigger
plt.rcParams['figure.dpi'] = 100
```
# Histograms & Rolling Averages
```
mean_temp = 0.5*(w['TMIN']+w['TMAX'])[mask]
import ipywidgets # interactivity
# make our data look less noisy with rolling averages
@ipywidgets.interact(window=(1,40,1))
def make_plot(window):
mean_temp_avg = mean_temp.rolling(window=window).mean()
plt.plot(mean_temp, marker='.', linewidth=0.5, alpha=0.5)
plt.plot(mean_temp_avg, marker='.', linewidth=1.5)
plt.xlabel('Date')
plt.ylabel('Mean Daily Temp in F')
w.keys()
precp = w['PRCP'][mask]
# we want to format our dates correctly
import matplotlib.dates as mdates
set_ind = False
for k in w.keys():
if k.find('DATE') != -1: # have we indexed by date yet?
set_ind = True
if set_ind: w.set_index('DATE', inplace=True)
#w['PRCP']
#w
names = ['SteveBob', 'Jerry', 'Frank']
for n in names:
print(n.find('Bob'))
# because we have re-indexed, lets redefine our arrays
mask = w['NAME'] == 'CHAMPAIGN 3 S, IL US'
mean_temp = 0.5*(w['TMIN']+w['TMAX'])[mask]
precp = w['PRCP'][mask]
@ipywidgets.interact(window=(1,60,2))
def make_plot(window):
fig, ax = plt.subplots(1,2, figsize=(10,4))
# (2) This was right-handed binning
#mean_temp_avg = mean_temp.rolling(window=window).mean()
mean_temp_avg = mean_temp.rolling(window=window, center=True).mean()
mean_temp.plot(ax=ax[0]) # using pandas to plot
mean_temp_avg.plot(ax=ax[0])
# (1) We tried this, but its not highlighting what
# we want
#precp_avg = precp.rolling(window=window).mean()
# (2) This was right-handed binning
#precp_avg = precp.rolling(window=window).sum()
precp_avg = precp.rolling(window=window, center=True).sum()
precp.plot(ax=ax[1], marker='.', linewidth=0.5, alpha=0.5)
precp_avg.plot(ax=ax[1], marker='.', linewidth=1.5)
ax[1].set_xlabel('Date')
ax[1].set_ylabel('Daily rainfall in inches')
ax[0].set_xlabel('Date')
ax[0].set_ylabel('Mean Daily Temp in F')
precp.rolling?
# now lets look at a binning example for our rainfall data
# this is a strict histogram/rebinning exercise, NOT smoothing
# Note: rolling averages/sums as we've been using, is somewhere
# between smoothing & binning
@ipywidgets.interact(window=(1,60,1), day_bins=(1,100,5))
def make_plot(window,day_bins):
fig, ax = plt.subplots(1,2,figsize=(10,4))
mean_temp_avg = mean_temp.rolling(window=window,center=True).mean()
mean_temp.plot(ax=ax[0])
mean_temp_avg.plot(ax=ax[0])
precp.plot(ax=ax[1], marker='.', linewidth=0.5, alpha=0.5)
precp_resampled = precp.resample(str(day_bins)+'D').sum()
# day_bins = 5 => '5D', resampling by months is 'M'
precp_resampled.plot(ax=ax[1], marker='.')
ax[1].set_xlabel('Date')
ax[1].set_ylabel('Summed Rainfall over ' + str(day_bins) + 'days, in Inches')
ax[0].set_xlabel('Date')
ax[0].set_ylabel('Mean Daily Temp in F')
```
## Take aways
* rolling averages => smoothing-lite, or fancy binning => like on our left temp plot
* on the right rainfall plot => HISTOGRAM
* so, in binning or histograming we are more "truthful" to the originial data, where in smoothing we can "double count" data points across bins
## Quick look at windowing
Toy example first
```
# window of 10 bins, constant data in the window
npoints = 10
x = np.arange(0,npoints)
y = np.repeat(1,npoints)
plt.plot(x,y,'o', label='Original Data')
# so lets say we really want to highlight the center
# bins
plt.plot(x,y*np.bartlett(npoints),'o', label='Bartlett')
# also another type of window
plt.plot(x,y*np.hamming(npoints),'o',label='Hamming')
plt.legend()
# plot available windows
windows_avail = [None,'boxcar','triang','blackman','hamming',
'bartlett','parzen', 'bohman',
'blackmanharris','nuttall','barthann']
@ipywidgets.interact(window=(1,100,1), window_type=windows_avail)
def make_plot(window, window_type):
plt.plot(mean_temp)
mean_temp_avg = mean_temp.rolling(window=window,center=True,win_type=window_type).mean()
plt.plot(mean_temp_avg)
```
# Similar binning & Smoothing in 2D
```
ufos = pd.read_csv("/Users/jillnaiman1/Downloads/ufo-scrubbed-geocoded-time-standardized-00.csv",
names = ["date", "city", "state", "country",
"shape", "duration_seconds", "duration",
"comment", "report_date", "latitude", "longitude"],
parse_dates = ["date", "report_date"])
# if you get a memory error, don't panic!
ufos
# quick plot
plt.plot(ufos['longitude'], ufos['latitude'],'.')
# colormaps
import matplotlib.cm as cm
plt.colormaps()
plt.scatter(ufos['longitude'],ufos['latitude'],c=np.log10(ufos['duration_seconds']))
plt.scatter(ufos['longitude'][0:10], ufos['latitude'][0:10],c=ufos['duration_seconds'][0:10])
# our data is hard to see, lets try some rebinning in 2D
plt.hexbin(ufos["longitude"], ufos["latitude"], ufos["duration_seconds"], gridsize=32,bins='log')
# almost the exact same thing we did with histograms before in 1D for our rainfall data
# can also smooth 2D images
import PIL.Image as Image
im = Image.open('/Users/jillnaiman1/Downloads/stitch_reworked.png')
fig,ax = plt.subplots(figsize=(5,5))
ax.imshow(im)
import PIL.ImageFilter as ImageFilter
myFilter = ImageFilter.GaussianBlur(radius=1)
smoothed_image = im.filter(myFilter)
fig,ax = plt.subplots(figsize=(5,5))
ax.imshow(smoothed_image)
data_im = np.array(im)
np.unique(data_im)
data_sm = np.array(smoothed_image)
np.unique(data_sm)
```
| github_jupyter |
# Lab 2 - Logistic Regression (LR) with MNIST
This lab corresponds to Module 2 of the "Deep Learning Explained" course. We assume that you have successfully completed Lab 1 (Downloading the MNIST data).
In this lab we will build and train a Multiclass Logistic Regression model using the MNIST data.
## Introduction
**Problem**:
Optical Character Recognition (OCR) is a hot research area and there is a great demand for automation. The MNIST data is comprised of hand-written digits with little background noise making it a nice dataset to create, experiment and learn deep learning models with reasonably small comptuing resources.
**Goal**:
Our goal is to train a classifier that will identify the digits in the MNIST dataset.
**Approach**:
There are 4 stages in this lab:
- **Data reading**: We will use the CNTK Text reader.
- **Data preprocessing**: Covered in part A (suggested extension section).
- **Model creation**: Multiclass Logistic Regression model.
- **Train-Test-Predict**: This is the same workflow introduced in the lectures
## Logistic Regression
[Logistic Regression](https://en.wikipedia.org/wiki/Logistic_regression) (LR) is a fundamental machine learning technique that uses a linear weighted combination of features and generates probability-based predictions of different classes.
There are two basic forms of LR: **Binary LR** (with a single output that can predict two classes) and **multiclass LR** (with multiple outputs, each of which is used to predict a single class).

In **Binary Logistic Regression** (see top of figure above), the input features are each scaled by an associated weight and summed together. The sum is passed through a squashing (aka activation) function and generates an output in [0,1]. This output value is then compared with a threshold (such as 0.5) to produce a binary label (0 or 1), predicting 1 of 2 classes. This technique supports only classification problems with two output classes, hence the name binary LR. In the binary LR example shown above, the [sigmoid][] function is used as the squashing function.
[sigmoid]: https://en.wikipedia.org/wiki/Sigmoid_function
In **Multiclass Linear Regression** (see bottom of figure above), 2 or more output nodes are used, one for each output class to be predicted. Each summation node uses its own set of weights to scale the input features and sum them together. Instead of passing the summed output of the weighted input features through a sigmoid squashing function, the output is often passed through a [softmax][] function (which in addition to squashing, like the sigmoid, the softmax normalizes each nodes' output value using the sum of all unnormalized nodes). (Details in the context of MNIST image to follow)
We will use multiclass LR for classifying the MNIST digits (0-9) using 10 output nodes (1 for each of our output classes). In our approach, we will move the softmax function out of the model and into our Loss function used in training (details to follow).
[softmax]: https://en.wikipedia.org/wiki/Softmax_function
```
# Import the relevant components
from IPython.display import Image
from __future__ import print_function # Use a function definition from future version (say 3.x from 2.7 interpreter)
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
import cntk as C
%matplotlib inline
```
In the block below, we check if we are running this notebook in the CNTK internal test machines by looking for environment variables defined there. We then select the right target device (GPU vs CPU) to test this notebook. In other cases, we use CNTK's default policy to use the best available device (GPU, if available, else CPU).
```
# Select the right target device when this notebook is being tested:
if 'TEST_DEVICE' in os.environ:
if os.environ['TEST_DEVICE'] == 'cpu':
C.device.try_set_default_device(C.device.cpu())
else:
C.device.try_set_default_device(C.device.gpu(0))
# Test for CNTK version
#if not C.__version__ == "2.0":
# raise Exception("this lab is designed to work with 2.0. Current Version: " + C.__version__)
```
## Initialization
```
# Ensure we always get the same amount of randomness
np.random.seed(0)
C.cntk_py.set_fixed_random_seed(1)
C.cntk_py.force_deterministic_algorithms()
# Define the data dimensions
input_dim = 256 #784
num_output_classes = 11 #10
```
## Data reading
There are different ways one can read data into CNTK. The easiest way is to load the data in memory using NumPy / SciPy / Pandas readers. However, this can be done only for small data sets. Since deep learning requires large amount of data we have chosen in this course to show how to leverage built-in distributed readers that can scale to terrabytes of data with little extra effort.
We are using the MNIST data you have downloaded using Lab 1 DataLoader notebook. The dataset has 60,000 training images and 10,000 test images with each image being 28 x 28 pixels. Thus the number of features is equal to 784 (= 28 x 28 pixels), 1 per pixel. The variable `num_output_classes` is set to 10 corresponding to the number of digits (0-9) in the dataset.
In Lab 1, the data was downloaded and written to 2 CTF (CNTK Text Format) files, 1 for training, and 1 for testing. Each line of these text files takes the form:
|labels 0 0 0 1 0 0 0 0 0 0 |features 0 0 0 0 ...
(784 integers each representing a pixel)
We are going to use the image pixels corresponding the integer stream named "features". We define a `create_reader` function to read the training and test data using the [CTF deserializer](https://cntk.ai/pythondocs/cntk.io.html?highlight=ctfdeserializer#cntk.io.CTFDeserializer). The labels are [1-hot encoded](https://en.wikipedia.org/wiki/One-hot). Refer to Lab 1 for data format visualizations.
```
# Read a CTF formatted text (as mentioned above) using the CTF deserializer from a file
def create_reader(path, is_training, input_dim, num_label_classes):
labelStream = C.io.StreamDef(field='labels', shape=num_label_classes, is_sparse=False)
featureStream = C.io.StreamDef(field='features', shape=input_dim, is_sparse=False)
deserailizer = C.io.CTFDeserializer(path, C.io.StreamDefs(labels = labelStream, features = featureStream))
return C.io.MinibatchSource(deserailizer,
randomize = is_training, max_sweeps = C.io.INFINITELY_REPEAT if is_training else 1)
# Ensure the training and test data is generated and available for this lab.
# We search in two locations in the toolkit for the cached MNIST data set.
data_found = False
for data_dir in [os.path.join(".", "PLAID")]:
train_file = os.path.join(data_dir, "train_log.txt")
test_file = os.path.join(data_dir, "test_log.txt")
if os.path.isfile(train_file) and os.path.isfile(test_file):
data_found = True
break
if not data_found:
raise ValueError("Please generate the data by completing Lab1_MNIST_DataLoader")
print("Data directory is {0}".format(data_dir))
```
# Model Creation
A multiclass logistic regression (LR) network is a simple building block that has been effectively powering many ML
applications in the past decade. The figure below summarizes the model in the context of the MNIST data.

LR is a simple linear model that takes as input, a vector of numbers describing the properties of what we are classifying (also known as a feature vector, $\bf \vec{x}$, the pixels in the input MNIST digit image) and emits the *evidence* ($z$). For each of the 10 digits, there is a vector of weights corresponding to the input pixels as show in the figure. These 10 weight vectors define the weight matrix ($\bf {W}$) with dimension of 10 x 784. Each feature in the input layer is connected with a summation node by a corresponding weight $w$ (individual weight values from the $\bf{W}$ matrix). Note there are 10 such nodes, 1 corresponding to each digit to be classified.
The first step is to compute the evidence for an observation.
$$\vec{z} = \textbf{W} \bf \vec{x}^T + \vec{b}$$
where $\bf{W}$ is the weight matrix of dimension 10 x 784 and $\vec{b}$ is known as the *bias* vector with lenght 10, one for each digit.
The evidence ($\vec{z}$) is not squashed (hence no activation). Instead the output is normalized using a [softmax](https://en.wikipedia.org/wiki/Softmax_function) function such that all the outputs add up to a value of 1, thus lending a probabilistic iterpretation to the prediction. In CNTK, we use the softmax operation combined with the cross entropy error as our Loss Function for training.
```
#2nd submission, we use 2, 400 respectively
#3rd submission, we use 4, 100 respectively
#4rd submission, we use 4, 200 respectively
num_hidden_layers = 8
hidden_layers_dim = 400
```
Network input and output:
- **input** variable (a key CNTK concept):
>An **input** variable is a container in which we fill different observations, in this case image pixels, during model learning (a.k.a.training) and model evaluation (a.k.a. testing). Thus, the shape of the `input` must match the shape of the data that will be provided. For example, when data are images each of height 10 pixels and width 5 pixels, the input feature dimension will be 50 (representing the total number of image pixels).
**Knowledge Check:** What is the input dimension of your chosen model? This is fundamental to our understanding of variables in a network or model representation in CNTK.
```
input = C.input_variable(input_dim)
label = C.input_variable(num_output_classes)
```
## Logistic Regression network setup
The CNTK Layers module provides a Dense function that creates a fully connected layer which performs the above operations of weighted input summing and bias addition.
```
def create_model(features):
with C.layers.default_options(init = C.layers.glorot_uniform(), activation = C.ops.relu):
h = features
for _ in range(num_hidden_layers):
h = C.layers.Dense(hidden_layers_dim)(h)
r = C.layers.Dense(num_output_classes, activation = None)(h)
return r
```
`z` will be used to represent the output of a network.
```
# Scale the input to 0-1 range by dividing each pixel by 255.
#input_s = input/255
z = create_model(input)
```
## Training
Below, we define the **Loss** function, which is used to guide weight changes during training.
As explained in the lectures, we use the `softmax` function to map the accumulated evidences or activations to a probability distribution over the classes (Details of the [softmax function][] and other [activation][] functions).
[softmax function]: http://cntk.ai/pythondocs/cntk.ops.html#cntk.ops.softmax
[activation]: https://github.com/Microsoft/CNTK/wiki/Activation-Functions
We minimize the cross-entropy between the label and predicted probability by the network.
```
loss = C.cross_entropy_with_softmax(z, label)
```
#### Evaluation
Below, we define the **Evaluation** (or metric) function that is used to report a measurement of how well our model is performing.
For this problem, we choose the **classification_error()** function as our metric, which returns the average error over the associated samples (treating a match as "1", where the model's prediction matches the "ground truth" label, and a non-match as "0").
```
label_error = C.classification_error(z, label)
```
### Configure training
The trainer strives to reduce the `loss` function by different optimization approaches, [Stochastic Gradient Descent][] (`sgd`) being one of the most popular. Typically, one would start with random initialization of the model parameters. The `sgd` optimizer would calculate the `loss` or error between the predicted label against the corresponding ground-truth label and using [gradient-decent][] generate a new set model parameters in a single iteration.
The aforementioned model parameter update using a single observation at a time is attractive since it does not require the entire data set (all observation) to be loaded in memory and also requires gradient computation over fewer datapoints, thus allowing for training on large data sets. However, the updates generated using a single observation sample at a time can vary wildly between iterations. An intermediate ground is to load a small set of observations and use an average of the `loss` or error from that set to update the model parameters. This subset is called a *minibatch*.
With minibatches, we sample observations from the larger training dataset. We repeat the process of model parameters update using different combination of training samples and over a period of time minimize the `loss` (and the error metric). When the incremental error rates are no longer changing significantly or after a preset number of maximum minibatches to train, we claim that our model is trained.
One of the key optimization parameters is called the `learning_rate`. For now, we can think of it as a scaling factor that modulates how much we change the parameters in any iteration.
With this information, we are ready to create our trainer.
[optimization]: https://en.wikipedia.org/wiki/Category:Convex_optimization
[Stochastic Gradient Descent]: https://en.wikipedia.org/wiki/Stochastic_gradient_descent
[gradient-decent]: http://www.statisticsviews.com/details/feature/5722691/Getting-to-the-Bottom-of-Regression-with-Gradient-Descent.html
```
# Instantiate the trainer object to drive the model training
learning_rate = 0.2
lr_schedule = C.learning_rate_schedule(learning_rate, C.UnitType.minibatch)
learner = C.sgd(z.parameters, lr_schedule)
trainer = C.Trainer(z, (loss, label_error), [learner])
```
First let us create some helper functions that will be needed to visualize different functions associated with training.
```
# Define a utility function to compute the moving average sum.
# A more efficient implementation is possible with np.cumsum() function
def moving_average(a, w=5):
if len(a) < w:
return a[:] # Need to send a copy of the array
return [val if idx < w else sum(a[(idx-w):idx])/w for idx, val in enumerate(a)]
# Defines a utility that prints the training progress
def print_training_progress(trainer, mb, frequency, verbose=1):
training_loss = "NA"
eval_error = "NA"
if mb%frequency == 0:
training_loss = trainer.previous_minibatch_loss_average
eval_error = trainer.previous_minibatch_evaluation_average
if verbose:
print ("Minibatch: {0}, Loss: {1:.4f}, Error: {2:.2f}%".format(mb, training_loss, eval_error*100))
return mb, training_loss, eval_error
```
<a id='#Run the trainer'></a>
### Run the trainer
We are now ready to train our fully connected neural net. We want to decide what data we need to feed into the training engine.
In this example, each iteration of the optimizer will work on `minibatch_size` sized samples. We would like to train on all 60000 observations. Additionally we will make multiple passes through the data specified by the variable `num_sweeps_to_train_with`. With these parameters we can proceed with training our simple feed forward network.
```
# Initialize the parameters for the trainer
minibatch_size = 64
num_samples_per_sweep = 60000
num_sweeps_to_train_with = 10
num_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size
# Create the reader to training data set
reader_train = create_reader(train_file, True, input_dim, num_output_classes)
# Map the data streams to the input and labels.
input_map = {
label : reader_train.streams.labels,
input : reader_train.streams.features
}
# Run the trainer on and perform model training
training_progress_output_freq = 500
plotdata = {"batchsize":[], "loss":[], "error":[]}
for i in range(0, int(num_minibatches_to_train)):
# Read a mini batch from the training data file
data = reader_train.next_minibatch(minibatch_size, input_map = input_map)
trainer.train_minibatch(data)
batchsize, loss, error = print_training_progress(trainer, i, training_progress_output_freq, verbose=1)
if not (loss == "NA" or error =="NA"):
plotdata["batchsize"].append(batchsize)
plotdata["loss"].append(loss)
plotdata["error"].append(error)
```
Let us plot the errors over the different training minibatches. Note that as we progress in our training, the loss decreases though we do see some intermediate bumps.
```
# Compute the moving average loss to smooth out the noise in SGD
plotdata["avgloss"] = moving_average(plotdata["loss"])
plotdata["avgerror"] = moving_average(plotdata["error"])
# Plot the training loss and the training error
import matplotlib.pyplot as plt
plt.figure(1)
plt.subplot(211)
plt.plot(plotdata["batchsize"], plotdata["avgloss"], 'b--')
plt.xlabel('Minibatch number')
plt.ylabel('Loss')
plt.title('Minibatch run vs. Training loss')
plt.show()
plt.subplot(212)
plt.plot(plotdata["batchsize"], plotdata["avgerror"], 'r--')
plt.xlabel('Minibatch number')
plt.ylabel('Label Prediction Error')
plt.title('Minibatch run vs. Label Prediction Error')
plt.show()
```
## Evaluation / Testing
Now that we have trained the network, let us evaluate the trained network on the test data. This is done using `trainer.test_minibatch`.
```
out = C.softmax(z)
# Read the data for evaluation
reader_eval = create_reader(test_file, False, input_dim, num_output_classes)
eval_minibatch_size = 1
eval_input_map = {input: reader_eval.streams.features}
num_samples = 659
num_minibatches_to_test = num_samples // eval_minibatch_size
test_result = 0.0
results=[]
for i in range(num_minibatches_to_test):
data = reader_eval.next_minibatch(eval_minibatch_size, input_map = eval_input_map)
#img_label = data[label].asarray()
img_data = data[input].asarray()
predicted_label_prob = [out.eval(img_data[i]) for i in range(len(img_data))]
pred = [np.argmax(predicted_label_prob[i]) for i in range(len(predicted_label_prob))]
#print(predicted_label_prob)
results.extend(pred )
#print(results)
np.savetxt(str(num_hidden_layers)+"x"+str(hidden_layers_dim)+"_log.csv", np.array(results).astype(int), fmt='%i', delimiter=",")
```
We have so far been dealing with aggregate measures of error. Let us now get the probabilities associated with individual data points. For each observation, the `eval` function returns the probability distribution across all the classes. The classifier is trained to recognize digits, hence has 10 classes. First let us route the network output through a `softmax` function. This maps the aggregated activations across the network to probabilities across the 10 classes.
Let us test a small minibatch sample from the test data.
As you can see above, our model is not yet perfect.
Let us visualize one of the test images and its associated label. Do they match?
**Suggested Explorations**
A. Change the `minibatch_size` parameter (from 64) to 128 and then to 512 during training. What is the observed average test error rate (rounded to 2nd decimal place) with each new model?
B. Increase the number of sweeps. How does the test error change?
C. Can you change the network to reduce the training error rate? When do you see *overfitting* happening?
D. Lets now add more features to our model. We will add square of the input values as additional features. You will take the input pixels, scale them by 255. Use `C.square` and `C.splice` functions to create a new model. Use this model to perform classification. Note: use the original setting for the rest of the notebook
E. Now add sqrt as another set of features to the model. Use this model to perform classification.
| github_jupyter |
# GeoNet FDSN webservice with Obspy demo - Station Service
This demo introduces some simple code that requests data using [GeoNet's FDSN webservices](http://www.geonet.org.nz/data/tools/FDSN) and the [obspy module](https://github.com/obspy/obspy/wiki) in python. This notebook uses Python 3.
### Getting Started - Import Modules
```
from obspy import UTCDateTime
from obspy.clients.fdsn import Client as FDSN_Client
from obspy import read_inventory
```
### Define GeoNet FDSN client
```
client = FDSN_Client("GEONET")
```
## Accessing Station Metadata
Use the **station** service to access station metadata from GeoNet stations.
Note, that metadata provided is prodominately associated with data types available from the FDSN archive, and therefore does not include things such as Geodetic station information.
This example gets all stations that are operating at the time of the Kaikoura earthquake and that are located within a 0.5 degrees radius of the epicentre. It lists the station codes and plots them on a map.
```
inventory = client.get_stations(latitude=-42.693,longitude=173.022,maxradius=0.5, starttime = "2016-11-13 11:05:00.000",endtime = "2016-11-14 11:00:00.000")
print(inventory)
_=inventory.plot(projection="local")
```
The following examples dive into retrieving different information from the inventory object. This object is based on FDSN stationXML and therefore can provide much the same information.
To get all available information into the inventory you will want to request data down to the response level. The default requests information just to a station level. For more information, see the [obspy inventory class](http://docs.obspy.org/packages/obspy.core.inventory.html#module-obspy.core.inventory).
This example gets data from a station, KUZ, and prints a summary of the inventory contents
```
inventory = client.get_stations(station="KUZ",level="response",
starttime = "2016-11-13 11:05:00.000",endtime = "2016-11-14 11:00:00.000")
print(inventory)
```
Now, we can look at more information, such as specifics about the station. Such as the time it opened and location.
```
network = inventory[0]
station = network[0] # equivalent to inventory[0][0]
num_channels = len(station)
print(station)
```
We can drill down even futher into a particular channel and look at the time it was operating for, whether it was continously recording, the sample rate and some basic sensor information.
```
channel = station[0] # equivalent to inventory[0][0][0]
print(channel)
```
This channel states that there is response information available, so we can look at a summary of the response and plot it.
```
resp = channel.response
print(resp)
resp.plot(0.001,output="VEL",label='KUZ HHZ')
```
| github_jupyter |
```
import pandas as pd
from shapely.ops import unary_union
import shapely
import geopandas as gpd
from shapely.geometry import Polygon
from shapely.geometry import Point
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import requests
import re
import glob
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
def download(des_loc,
http_page,
str1,
str2):
"""
@ author: Shervan Gharari
@ Github: https://github.com/ShervanGharari/shapefile_standardization
@ author's email id: sh.gharari@gmail.com
@license: MIT
This function gets name of a http and two str in the name of the link and save them in
provided destnation
Arguments
---------
des_loc: string, the name of the source file including path and extension
http_page: string, the name of the corresponding catchment (subbasin) for the unresolved hills
str1: string, a part of the link name to filter
str2: string, a second part of the link name to filter
Returns
-------
Saves Files
-------
downlaod the files from the websites and save them in the correct location
"""
# first get all the links in the page
req = Request(http_page)
html_page = urlopen(req)
soup = BeautifulSoup(html_page, "lxml")
links = []
for link in soup.findAll('a'):
links.append(link.get('href'))
# specify the link to be downloaded
link_to_download = []
for link in links:
# if "hillslope" in link and "clean" in link: # links that have cat_pfaf and Basins in them
if str1 in link and str2 in link: # links that have cat_pfaf and Basins in them
link_to_download.append(link)
print(link)
# creat urls to download
urls =[]
for file_name in link_to_download:
urls.append(http_page+file_name) # link of the page + file names
print(http_page+file_name)
print(urls)
# loop to download the data
for url in urls:
name = url.split('/')[-1] # get the name of the file at the end of the url to download
r = requests.get(url) # download the URL
# print the specification of the download
print(r.status_code, r.headers['content-type'], r.encoding)
# if download successful the statuse code is 200 then save the file, else print not downloaded
if r.status_code == 200:
print('download was successful for '+url)
with open(des_loc+name, 'wb') as f:
f.write(r.content)
else:
print('download was not successful for '+url)
```
## Download section and input path
### A list of IDs based on 2 digit pfaf code are provided for download, the path to save the donwload is provided and also the website to download the model
```
# the 2 digit pfaf code for the shapefile to be processed
# list of IDs for downloading the processing
IDs = ['11', '12', '13', '14', '15', '16', '17', '18',
'21', '22', '23', '24', '25', '26', '27', '28', '29',
'31', '32', '33', '34', '35', '36',
'41', '42', '43', '44', '45', '46', '47', '48', '49',
'51', '52', '53', '54', '55', '56', '57',
'61', '62', '63', '64', '65', '66', '67',
'71', '72', '73', '74', '75', '76', '77', '78',
'81', '82', '83', '84', '85', '86',
'91']
# location of files online
http_path = 'XXX/for_martyn/' # link to the page that the data exists
# in this folder create subfolders cat, riv, hill, cat_step_1,cat_step_2
path = '/Users/shg096/Desktop/MERIT_Hydro/'
# STEP- prepare the folder and subfolders for download
# path is the location were all the shapefiles and anupulaed shapfiles are saved
# under path create five subfolders: cat, riv, hill, cat_step_0, cat_step_1, cat_fixed, hill_fixed
if not os.path.exists(path+'cat'):
os.mkdir(path+'cat')
if not os.path.exists(path+'riv'):
os.mkdir(path+'riv')
if not os.path.exists(path+'hill'):
os.mkdir(path+'hill')
if not os.path.exists(path+'cat_step_0'):
os.mkdir(path+'cat_step_0')
if not os.path.exists(path+'cat_step_1'):
os.mkdir(path+'cat_step_1')
if not os.path.exists(path+'cat_fixed'):
os.mkdir(path+'cat_fixed')
if not os.path.exists(path+'ERA5int'):
os.mkdir(path+'ERA5int')
if not os.path.exists(path+'hill_fixed'):
os.mkdir(path+'hill_fixed')
if not os.path.exists(path+'hill_step_0'):
os.mkdir(path+'hill_step_0')
# downlaod the catchment, river, costal hillslope
for ID in IDs:
download(path+'cat/',
http_path+'MERIT_Hydro_v07_Basins_v01_bugfix1/pfaf_level_02/',
'cat',
ID)
download(path+'riv/',
http_path+'MERIT_Hydro_v07_Basins_v01_bugfix1/pfaf_level_02/',
'riv',
ID)
if ID != '49' # there is no hillslope for 49
download(path+'hill/',
http_path+'coastal_hillslopes/',
'hill',
ID)
```
| github_jupyter |
```
from keras import applications
from keras.models import Sequential, Model
from keras.models import Model
from keras.layers import Dropout, Flatten, Dense, Activation, Reshape
from keras.callbacks import CSVLogger
import tensorflow as tf
from scipy.ndimage import imread
import numpy as np
import random
from keras.layers import GRU, CuDNNGRU, LSTM, Input
from keras.layers import Conv1D, MaxPooling1D
from keras.layers.advanced_activations import LeakyReLU
from keras import backend as K
import keras
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras.backend.tensorflow_backend import set_session
from keras import optimizers
import h5py
from sklearn.preprocessing import MinMaxScaler
import os
import pandas as pd
import matplotlib.pyplot as plt
import h5py
with h5py.File('../Data/' + ''.join(['BTC.h5']), 'r') as hf:
datas = hf['inputs'].value
labels = hf['outputs'].value
input_times = hf['input_times'].value
output_times = hf['output_times'].value
original_inputs = hf['original_inputs'].value
original_outputs = hf['original_outputs'].value
original_datas = hf['original_datas'].value
scaler=MinMaxScaler((-1, 1))
#split training validation
# training_size = int(0.8* datas.shape[0])
training_size = datas.shape[0] - 1
training_datas = datas[:training_size,:,:]
training_labels = labels[:training_size,:,:]
validation_datas = datas[training_size:,:,:]
validation_labels = labels[training_size:,:,:]
validation_original_outputs = original_outputs[training_size:,:,:]
validation_original_inputs = original_inputs[training_size:,:,:]
validation_input_times = input_times[training_size:,:,:]
validation_output_times = output_times[training_size:,:,:]
validation_size = datas.shape[0] - training_size
training_labels = [np.array(training_labels[:, :, 0]).reshape((training_size, -1)),
np.array(training_labels[:, :, 1]).reshape((training_size, -1)),
np.array(training_labels[:, :, 2]).reshape((training_size, -1))]
validation_labels = [np.array(validation_labels[:, :, 0]).reshape((validation_size, -1)),
np.array(validation_labels[:, :, 1]).reshape((validation_size, -1)),
np.array(validation_labels[:, :, 2]).reshape((validation_size, -1))]
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
ground_true = np.append(validation_original_inputs,validation_original_outputs, axis=1)
ground_true_times = np.append(validation_input_times,validation_output_times, axis=1)
print(ground_true_times.shape)
print(ground_true.shape)
step_size = datas.shape[1]
batch_size = 8
n_features = datas.shape[2]
epochs = 1
output_size = 12
units = 150
# model = Sequential()
# model.add(GRU(units=units, activation=None, input_shape=(step_size,nb_features),return_sequences=False))
# model.add(Activation('tanh'))
# model.add(Dropout(0.2))
# model.add(Dense(output_size, activation="linear"))
# model.add(LeakyReLU(alpha=0.001))
# model.load_weights('../weights/BTC_GRU_1_tanh_relu-49-0.00001.hdf5')
# model.compile(loss='mape', optimizer='adam')
input_layer = Input(shape=(step_size, n_features))
layer_1 = GRU(units=units, return_sequences=True)(input_layer)
layer_1 = Dropout(0.5)(layer_1)
layer_2 = GRU(units=units, return_sequences=False)(layer_1)
layer_2 = Dropout(0.5)(layer_2)
output_1 = Dense(output_size, activation="tanh", name="close_dense")(layer_2)
output_2 = Dense(output_size, activation="tanh", name="high_dense")(layer_2)
output_3 = Dense(output_size, activation="tanh", name="low_dense")(layer_2)
model = Model(inputs=input_layer, outputs=[output_1, output_2, output_3])
model.load_weights('../weights/BTC_GRU_1_tanh_relu-209-0.00000034.hdf5')
model.compile(optimizer="adam", loss=["mse", "mse", "mse"], loss_weights=[0.001, 0.001, 0.001])
predicted = np.array(model.predict(validation_datas))
print(predicted.shape)
predicted = predicted.reshape((predicted.shape[1] * predicted.shape[2], predicted.shape[0]))
predicted_inverted = []
predicted.shape
scaler.fit(original_datas.reshape(-1, n_features))
# predicted_inverted.append(scaler.inverse_transform(predicted))
predicted_inverted = scaler.inverse_transform(predicted[:, :])
print(np.array(predicted_inverted).shape)
#get only the close data
ground_true = ground_true[:, :, :].reshape(-1, n_features)
ground_true_times = ground_true_times.reshape(-1)
ground_true_times = pd.to_datetime(ground_true_times, unit='s')
# since we are appending in the first dimension
# predicted_inverted = np.array(predicted_inverted)[0,:,:].reshape(-1)
print(np.array(predicted_inverted).shape)
validation_output_times = pd.to_datetime(validation_output_times.reshape(-1), unit='s')
predicted_inverted[:, 0]
validation_output_times.shape
ground_true_df = pd.DataFrame()
ground_true_df['times'] = ground_true_times
ground_true_df['close'] = ground_true[:, 0]
ground_true_df['high'] = ground_true[:, 1]
ground_true_df['low'] = ground_true[:, 2]
ground_true_df.set_index('times').reset_index()
ground_true_df.shape
prediction_df = pd.DataFrame()
prediction_df['times'] = validation_output_times
prediction_df['close'] = predicted_inverted[:, 0]
prediction_df['high'] = predicted_inverted[:, 1]
prediction_df['low'] = predicted_inverted[:, 2]
prediction_df.shape
prediction_df = prediction_df.loc[(prediction_df["times"].dt.year == 2018 )&(prediction_df["times"].dt.month >= 7 ),: ]
# ground_true_df = ground_true_df.loc[(ground_true_df["times"].dt.year >= 2017 )&(ground_true_df["times"].dt.month > 7 ),:]
ground_true_df = ground_true_df.loc[:,:]
start_idx = 350
plt.figure(figsize=(20,10))
plt.plot(ground_true_df.times[start_idx:],ground_true_df.close[start_idx:], label = 'Actual Close')
# plt.plot(ground_true_df.times[start_idx:],ground_true_df.high[start_idx:], label = 'Actual High')
# plt.plot(ground_true_df.times[start_idx:],ground_true_df.low[start_idx:], label = 'Actual Low')
plt.plot(prediction_df.times,prediction_df.high,'g-', label='Predicted High')
plt.plot(prediction_df.times,prediction_df.close,'r-', label='Predicted Close')
plt.plot(prediction_df.times,prediction_df.low,'b-', label='Predicted Low')
plt.legend(loc='upper left')
plt.grid()
plt.title("Predicted USD for last 7 days from " + str(ground_true_df["times"].dt.date.iloc[-12]) + " to " + str(ground_true_df["times"].dt.date.iloc[-1]))
plt.savefig('../Results/BTC/New/BTC_close_GRU_1_tanh_relu_result.png')
plt.show()
from sklearn.metrics import mean_squared_error
mean_squared_error(validation_original_outputs[:,:,0].reshape(-1),predicted_inverted[:, 0])
```
| github_jupyter |
Thanks for @christofhenkel @abhishek @iezepov for their great work:
https://www.kaggle.com/christofhenkel/how-to-preprocessing-for-glove-part2-usage
https://www.kaggle.com/abhishek/pytorch-bert-inference
https://www.kaggle.com/iezepov/starter-gensim-word-embeddings
```
import sys
package_dir = "../input/ppbert/pytorch-pretrained-bert/pytorch-pretrained-BERT"
sys.path.append(package_dir)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import fastai
from fastai.train import Learner
from fastai.train import DataBunch
from fastai.callbacks import TrainingPhase, GeneralScheduler
from fastai.basic_data import DatasetType
import fastprogress
from fastprogress import force_console_behavior
import numpy as np
from pprint import pprint
import pandas as pd
import os
import time
import gc
import random
from tqdm._tqdm_notebook import tqdm_notebook as tqdm
from keras.preprocessing import text, sequence
import torch
from torch import nn
from torch.utils import data
from torch.nn import functional as F
import torch.utils.data
from tqdm import tqdm
import warnings
from pytorch_pretrained_bert import BertTokenizer, BertForSequenceClassification, BertAdam
from pytorch_pretrained_bert import BertConfig
from nltk.tokenize.treebank import TreebankWordTokenizer
from gensim.models import KeyedVectors
def convert_lines(example, max_seq_length,tokenizer):
max_seq_length -=2
all_tokens = []
longer = 0
for text in tqdm(example):
tokens_a = tokenizer.tokenize(text)
if len(tokens_a)>max_seq_length:
tokens_a = tokens_a[:max_seq_length]
longer += 1
one_token = tokenizer.convert_tokens_to_ids(["[CLS]"]+tokens_a+["[SEP]"])+[0] * (max_seq_length - len(tokens_a))
all_tokens.append(one_token)
return np.array(all_tokens)
def is_interactive():
return 'SHLVL' not in os.environ
def seed_everything(seed=123):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def get_coefs(word, *arr):
return word, np.asarray(arr, dtype='float32')
def load_embeddings(path):
#with open(path,'rb') as f:
emb_arr = KeyedVectors.load(path)
return emb_arr
def build_matrix(word_index, path):
embedding_index = load_embeddings(path)
embedding_matrix = np.zeros((max_features + 1, 300))
unknown_words = []
for word, i in word_index.items():
if i <= max_features:
try:
embedding_matrix[i] = embedding_index[word]
except KeyError:
try:
embedding_matrix[i] = embedding_index[word.lower()]
except KeyError:
try:
embedding_matrix[i] = embedding_index[word.title()]
except KeyError:
unknown_words.append(word)
return embedding_matrix, unknown_words
def sigmoid(x):
return 1 / (1 + np.exp(-x))
class SpatialDropout(nn.Dropout2d):
def forward(self, x):
x = x.unsqueeze(2) # (N, T, 1, K)
x = x.permute(0, 3, 2, 1) # (N, K, 1, T)
x = super(SpatialDropout, self).forward(x) # (N, K, 1, T), some features are masked
x = x.permute(0, 3, 2, 1) # (N, T, 1, K)
x = x.squeeze(2) # (N, T, K)
return x
def train_model(learn,test,output_dim,lr=0.001,
batch_size=512, n_epochs=4,
enable_checkpoint_ensemble=True):
all_test_preds = []
checkpoint_weights = [2 ** epoch for epoch in range(n_epochs)]
test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False)
n = len(learn.data.train_dl)
phases = [(TrainingPhase(n).schedule_hp('lr', lr * (0.6**(i)))) for i in range(n_epochs)]
sched = GeneralScheduler(learn, phases)
learn.callbacks.append(sched)
for epoch in range(n_epochs):
learn.fit(1)
test_preds = np.zeros((len(test), output_dim))
for i, x_batch in enumerate(test_loader):
X = x_batch[0].cuda()
y_pred = sigmoid(learn.model(X).detach().cpu().numpy())
test_preds[i * batch_size:(i+1) * batch_size, :] = y_pred
all_test_preds.append(test_preds)
if enable_checkpoint_ensemble:
test_preds = np.average(all_test_preds, weights=checkpoint_weights, axis=0)
else:
test_preds = all_test_preds[-1]
return test_preds
def handle_punctuation(x):
x = x.translate(remove_dict)
x = x.translate(isolate_dict)
return x
def handle_contractions(x):
x = tokenizer.tokenize(x)
return x
def fix_quote(x):
x = [x_[1:] if x_.startswith("'") else x_ for x_ in x]
x = ' '.join(x)
return x
def preprocess(x):
x = handle_punctuation(x)
x = handle_contractions(x)
x = fix_quote(x)
return x
class SequenceBucketCollator():
def __init__(self, choose_length, sequence_index, length_index, label_index=None):
self.choose_length = choose_length
self.sequence_index = sequence_index
self.length_index = length_index
self.label_index = label_index
def __call__(self, batch):
batch = [torch.stack(x) for x in list(zip(*batch))]
sequences = batch[self.sequence_index]
lengths = batch[self.length_index]
length = self.choose_length(lengths)
mask = torch.arange(start=maxlen, end=0, step=-1) < length
padded_sequences = sequences[:, mask]
batch[self.sequence_index] = padded_sequences
if self.label_index is not None:
return [x for i, x in enumerate(batch) if i != self.label_index], batch[self.label_index]
return batch
class NeuralNet(nn.Module):
def __init__(self, embedding_matrix, num_aux_targets):
super(NeuralNet, self).__init__()
embed_size = embedding_matrix.shape[1]
self.embedding = nn.Embedding(max_features, embed_size)
self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32))
self.embedding.weight.requires_grad = False
self.embedding_dropout = SpatialDropout(0.3)
self.lstm1 = nn.LSTM(embed_size, LSTM_UNITS, bidirectional=True, batch_first=True)
self.lstm2 = nn.LSTM(LSTM_UNITS * 2, LSTM_UNITS, bidirectional=True, batch_first=True)
self.linear1 = nn.Linear(DENSE_HIDDEN_UNITS, DENSE_HIDDEN_UNITS)
self.linear2 = nn.Linear(DENSE_HIDDEN_UNITS, DENSE_HIDDEN_UNITS)
self.linear_out = nn.Linear(DENSE_HIDDEN_UNITS, 1)
self.linear_aux_out = nn.Linear(DENSE_HIDDEN_UNITS, num_aux_targets)
def forward(self, x, lengths=None):
h_embedding = self.embedding(x.long())
h_embedding = self.embedding_dropout(h_embedding)
h_lstm1, _ = self.lstm1(h_embedding)
h_lstm2, _ = self.lstm2(h_lstm1)
# global average pooling
avg_pool = torch.mean(h_lstm2, 1)
# global max pooling
max_pool, _ = torch.max(h_lstm2, 1)
h_conc = torch.cat((max_pool, avg_pool), 1)
h_conc_linear1 = F.relu(self.linear1(h_conc))
h_conc_linear2 = F.relu(self.linear2(h_conc))
hidden = h_conc + h_conc_linear1 + h_conc_linear2
result = self.linear_out(hidden)
aux_result = self.linear_aux_out(hidden)
out = torch.cat([result, aux_result], 1)
return out
def custom_loss(data, targets):
bce_loss_1 = nn.BCEWithLogitsLoss(weight=targets[:,1:2])(data[:,:1],targets[:,:1])
bce_loss_2 = nn.BCEWithLogitsLoss()(data[:,1:],targets[:,2:])
return (bce_loss_1 * loss_weight) + bce_loss_2
def reduce_mem_usage(df):
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
warnings.filterwarnings(action='once')
device = torch.device('cuda')
MAX_SEQUENCE_LENGTH = 300
SEED = 1234
BATCH_SIZE = 512
BERT_MODEL_PATH = '../input/bert-pretrained-models/uncased_l-12_h-768_a-12/uncased_L-12_H-768_A-12/'
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
bert_config = BertConfig('../input/arti-bert-inference/bert/bert_config.json')
tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_PATH, cache_dir=None,do_lower_case=True)
tqdm.pandas()
CRAWL_EMBEDDING_PATH = '../input/gensim-embeddings-dataset/crawl-300d-2M.gensim'
GLOVE_EMBEDDING_PATH = '../input/gensim-embeddings-dataset/glove.840B.300d.gensim'
NUM_MODELS = 2
LSTM_UNITS = 128
DENSE_HIDDEN_UNITS = 4 * LSTM_UNITS
MAX_LEN = 220
if not is_interactive():
def nop(it, *a, **k):
return it
tqdm = nop
fastprogress.fastprogress.NO_BAR = True
master_bar, progress_bar = force_console_behavior()
fastai.basic_train.master_bar, fastai.basic_train.progress_bar = master_bar, progress_bar
seed_everything()
```
**BERT Part**
```
test_df = pd.read_csv("../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv")
test_df['comment_text'] = test_df['comment_text'].astype(str)
X_test = convert_lines(test_df["comment_text"].fillna("DUMMY_VALUE"), MAX_SEQUENCE_LENGTH, tokenizer)
model = BertForSequenceClassification(bert_config, num_labels=1)
model.load_state_dict(torch.load("../input/arti-bert-inference/bert/bert_pytorch.bin"))
model.to(device)
for param in model.parameters():
param.requires_grad = False
model.eval()
test_preds = np.zeros((len(X_test)))
test = torch.utils.data.TensorDataset(torch.tensor(X_test, dtype=torch.long))
test_loader = torch.utils.data.DataLoader(test, batch_size=512, shuffle=False)
tk0 = tqdm(test_loader)
for i, (x_batch,) in enumerate(tk0):
pred = model(x_batch.to(device), attention_mask=(x_batch > 0).to(device), labels=None)
test_preds[i * 512:(i + 1) * 512] = pred[:, 0].detach().cpu().squeeze().numpy()
test_pred = torch.sigmoid(torch.tensor(test_preds)).numpy().ravel()
submission_bert = pd.DataFrame.from_dict({
'id': test_df['id'],
'prediction': test_pred
})
```
**LSTM Part**
```
train_df = reduce_mem_usage(pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/train.csv'))
symbols_to_isolate = '.,?!-;*"…:—()%#$&_/@\・ω+=”“[]^–>\\°<~•≠™ˈʊɒ∞§{}·τα❤☺ɡ|¢→̶`❥━┣┫┗O►★©―ɪ✔®\x96\x92●£♥➤´¹☕≈÷♡◐║▬′ɔː€۩۞†μ✒➥═☆ˌ◄½ʻπδηλσερνʃ✬SUPERIT☻±♍µº¾✓◾؟.⬅℅»Вав❣⋅¿¬♫CMβ█▓▒░⇒⭐›¡₂₃❧▰▔◞▀▂▃▄▅▆▇↙γ̄″☹➡«φ⅓„✋:¥̲̅́∙‛◇✏▷❓❗¶˚˙)сиʿ✨。ɑ\x80◕!%¯−flfi₁²ʌ¼⁴⁄₄⌠♭✘╪▶☭✭♪☔☠♂☃☎✈✌✰❆☙○‣⚓年∎ℒ▪▙☏⅛casǀ℮¸w‚∼‖ℳ❄←☼⋆ʒ⊂、⅔¨͡๏⚾⚽Φ×θ₩?(℃⏩☮⚠月✊❌⭕▸■⇌☐☑⚡☄ǫ╭∩╮,例>ʕɐ̣Δ₀✞┈╱╲▏▕┃╰▊▋╯┳┊≥☒↑☝ɹ✅☛♩☞AJB◔◡↓♀⬆̱ℏ\x91⠀ˤ╚↺⇤∏✾◦♬³の|/∵∴√Ω¤☜▲↳▫‿⬇✧ovm-208'‰≤∕ˆ⚜☁'
symbols_to_delete = '\n🍕\r🐵😑\xa0\ue014\t\uf818\uf04a\xad😢🐶️\uf0e0😜😎👊\u200b\u200e😁عدويهصقأناخلىبمغر😍💖💵Е👎😀😂\u202a\u202c🔥😄🏻💥ᴍʏʀᴇɴᴅᴏᴀᴋʜᴜʟᴛᴄᴘʙғᴊᴡɢ😋👏שלוםבי😱‼\x81エンジ故障\u2009🚌ᴵ͞🌟😊😳😧🙀😐😕\u200f👍😮😃😘אעכח💩💯⛽🚄🏼ஜ😖ᴠ🚲‐😟😈💪🙏🎯🌹😇💔😡\x7f👌ἐὶήιὲκἀίῃἴξ🙄H😠\ufeff\u2028😉😤⛺🙂\u3000تحكسة👮💙فزط😏🍾🎉😞\u2008🏾😅😭👻😥😔😓🏽🎆🍻🍽🎶🌺🤔😪\x08‑🐰🐇🐱🙆😨🙃💕𝘊𝘦𝘳𝘢𝘵𝘰𝘤𝘺𝘴𝘪𝘧𝘮𝘣💗💚地獄谷улкнПоАН🐾🐕😆ה🔗🚽歌舞伎🙈😴🏿🤗🇺🇸мυтѕ⤵🏆🎃😩\u200a🌠🐟💫💰💎эпрд\x95🖐🙅⛲🍰🤐👆🙌\u2002💛🙁👀🙊🙉\u2004ˢᵒʳʸᴼᴷᴺʷᵗʰᵉᵘ\x13🚬🤓\ue602😵άοόςέὸתמדףנרךצט😒͝🆕👅👥👄🔄🔤👉👤👶👲🔛🎓\uf0b7\uf04c\x9f\x10成都😣⏺😌🤑🌏😯ех😲Ἰᾶὁ💞🚓🔔📚🏀👐\u202d💤🍇\ue613小土豆🏡❔⁉\u202f👠》कर्मा🇹🇼🌸蔡英文🌞🎲レクサス😛外国人关系Сб💋💀🎄💜🤢َِьыгя不是\x9c\x9d🗑\u2005💃📣👿༼つ༽😰ḷЗз▱ц🤣卖温哥华议会下降你失去所有的钱加拿大坏税骗子🐝ツ🎅\x85🍺آإشء🎵🌎͟ἔ油别克🤡🤥😬🤧й\u2003🚀🤴ʲшчИОРФДЯМюж😝🖑ὐύύ特殊作戦群щ💨圆明园קℐ🏈😺🌍⏏ệ🍔🐮🍁🍆🍑🌮🌯🤦\u200d𝓒𝓲𝓿𝓵안영하세요ЖљКћ🍀😫🤤ῦ我出生在了可以说普通话汉语好极🎼🕺🍸🥂🗽🎇🎊🆘🤠👩🖒🚪天一家⚲\u2006⚭⚆⬭⬯⏖新✀╌🇫🇷🇩🇪🇮🇬🇧😷🇨🇦ХШ🌐\x1f杀鸡给猴看ʁ𝗪𝗵𝗲𝗻𝘆𝗼𝘂𝗿𝗮𝗹𝗶𝘇𝗯𝘁𝗰𝘀𝘅𝗽𝘄𝗱📺ϖ\u2000үսᴦᎥһͺ\u2007հ\u2001ɩye൦lƽh𝐓𝐡𝐞𝐫𝐮𝐝𝐚𝐃𝐜𝐩𝐭𝐢𝐨𝐧Ƅᴨןᑯ໐ΤᏧ௦Іᴑ܁𝐬𝐰𝐲𝐛𝐦𝐯𝐑𝐙𝐣𝐇𝐂𝐘𝟎ԜТᗞ౦〔Ꭻ𝐳𝐔𝐱𝟔𝟓𝐅🐋ffi💘💓ё𝘥𝘯𝘶💐🌋🌄🌅𝙬𝙖𝙨𝙤𝙣𝙡𝙮𝙘𝙠𝙚𝙙𝙜𝙧𝙥𝙩𝙪𝙗𝙞𝙝𝙛👺🐷ℋ𝐀𝐥𝐪🚶𝙢Ἱ🤘ͦ💸ج패티W𝙇ᵻ👂👃ɜ🎫\uf0a7БУі🚢🚂ગુજરાતીῆ🏃𝓬𝓻𝓴𝓮𝓽𝓼☘﴾̯﴿₽\ue807𝑻𝒆𝒍𝒕𝒉𝒓𝒖𝒂𝒏𝒅𝒔𝒎𝒗𝒊👽😙\u200cЛ‒🎾👹⎌🏒⛸公寓养宠物吗🏄🐀🚑🤷操美𝒑𝒚𝒐𝑴🤙🐒欢迎来到阿拉斯ספ𝙫🐈𝒌𝙊𝙭𝙆𝙋𝙍𝘼𝙅ﷻ🦄巨收赢得白鬼愤怒要买额ẽ🚗🐳𝟏𝐟𝟖𝟑𝟕𝒄𝟗𝐠𝙄𝙃👇锟斤拷𝗢𝟳𝟱𝟬⦁マルハニチロ株式社⛷한국어ㄸㅓ니͜ʖ𝘿𝙔₵𝒩ℯ𝒾𝓁𝒶𝓉𝓇𝓊𝓃𝓈𝓅ℴ𝒻𝒽𝓀𝓌𝒸𝓎𝙏ζ𝙟𝘃𝗺𝟮𝟭𝟯𝟲👋🦊多伦🐽🎻🎹⛓🏹🍷🦆为和中友谊祝贺与其想象对法如直接问用自己猜本传教士没积唯认识基督徒曾经让相信耶稣复活死怪他但当们聊些政治题时候战胜因圣把全堂结婚孩恐惧且栗谓这样还♾🎸🤕🤒⛑🎁批判检讨🏝🦁🙋😶쥐스탱트뤼도석유가격인상이경제황을렵게만들지않록잘관리해야합다캐나에서대마초와화약금의품런성분갈때는반드시허된사용🔫👁凸ὰ💲🗯𝙈Ἄ𝒇𝒈𝒘𝒃𝑬𝑶𝕾𝖙𝖗𝖆𝖎𝖌𝖍𝖕𝖊𝖔𝖑𝖉𝖓𝖐𝖜𝖞𝖚𝖇𝕿𝖘𝖄𝖛𝖒𝖋𝖂𝕴𝖟𝖈𝕸👑🚿💡知彼百\uf005𝙀𝒛𝑲𝑳𝑾𝒋𝟒😦𝙒𝘾𝘽🏐𝘩𝘨ὼṑ𝑱𝑹𝑫𝑵𝑪🇰🇵👾ᓇᒧᔭᐃᐧᐦᑳᐨᓃᓂᑲᐸᑭᑎᓀᐣ🐄🎈🔨🐎🤞🐸💟🎰🌝🛳点击查版🍭𝑥𝑦𝑧NG👣\uf020っ🏉ф💭🎥Ξ🐴👨🤳🦍\x0b🍩𝑯𝒒😗𝟐🏂👳🍗🕉🐲چی𝑮𝗕𝗴🍒ꜥⲣⲏ🐑⏰鉄リ事件ї💊「」\uf203\uf09a\uf222\ue608\uf202\uf099\uf469\ue607\uf410\ue600燻製シ虚偽屁理屈Г𝑩𝑰𝒀𝑺🌤𝗳𝗜𝗙𝗦𝗧🍊ὺἈἡχῖΛ⤏🇳𝒙ψՁմեռայինրւդձ冬至ὀ𝒁🔹🤚🍎𝑷🐂💅𝘬𝘱𝘸𝘷𝘐𝘭𝘓𝘖𝘹𝘲𝘫کΒώ💢ΜΟΝΑΕ🇱♲𝝈↴💒⊘Ȼ🚴🖕🖤🥘📍👈➕🚫🎨🌑🐻𝐎𝐍𝐊𝑭🤖🎎😼🕷grntidufbk𝟰🇴🇭🇻🇲𝗞𝗭𝗘𝗤👼📉🍟🍦🌈🔭《🐊🐍\uf10aლڡ🐦\U0001f92f\U0001f92a🐡💳ἱ🙇𝗸𝗟𝗠𝗷🥜さようなら🔼'
tokenizer = TreebankWordTokenizer()
isolate_dict = {ord(c):f' {c} ' for c in symbols_to_isolate}
remove_dict = {ord(c):f'' for c in symbols_to_delete}
x_train = train_df['comment_text'].progress_apply(lambda x:preprocess(x))
y_aux_train = train_df[['target', 'severe_toxicity', 'obscene', 'identity_attack', 'insult', 'threat']]
x_test = test_df['comment_text'].progress_apply(lambda x:preprocess(x))
identity_columns = [
'male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish',
'muslim', 'black', 'white', 'psychiatric_or_mental_illness']
# Overall
weights = np.ones((len(x_train),)) / 4
# Subgroup
weights += (train_df[identity_columns].fillna(0).values>=0.5).sum(axis=1).astype(bool).astype(np.int) / 4
# Background Positive, Subgroup Negative
weights += (( (train_df['target'].values>=0.5).astype(bool).astype(np.int) +
(train_df[identity_columns].fillna(0).values<0.5).sum(axis=1).astype(bool).astype(np.int) ) > 1 ).astype(bool).astype(np.int) / 4
# Background Negative, Subgroup Positive
weights += (( (train_df['target'].values<0.5).astype(bool).astype(np.int) +
(train_df[identity_columns].fillna(0).values>=0.5).sum(axis=1).astype(bool).astype(np.int) ) > 1 ).astype(bool).astype(np.int) / 4
loss_weight = 1.0 / weights.mean()
y_train = np.vstack([(train_df['target'].values>=0.5).astype(np.int),weights]).T
max_features = 410047
tokenizer = text.Tokenizer(num_words = max_features, filters='',lower=False)
tokenizer.fit_on_texts(list(x_train) + list(x_test))
crawl_matrix, unknown_words_crawl = build_matrix(tokenizer.word_index, CRAWL_EMBEDDING_PATH)
print('n unknown words (crawl): ', len(unknown_words_crawl))
glove_matrix, unknown_words_glove = build_matrix(tokenizer.word_index, GLOVE_EMBEDDING_PATH)
print('n unknown words (glove): ', len(unknown_words_glove))
max_features = max_features or len(tokenizer.word_index) + 1
max_features
embedding_matrix = np.concatenate([crawl_matrix, glove_matrix], axis=-1)
embedding_matrix.shape
del crawl_matrix
del glove_matrix
gc.collect()
y_train_torch = torch.tensor(np.hstack([y_train, y_aux_train]), dtype=torch.float32)
x_train = tokenizer.texts_to_sequences(x_train)
x_test = tokenizer.texts_to_sequences(x_test)
lengths = torch.from_numpy(np.array([len(x) for x in x_train]))
maxlen = 300
x_train_padded = torch.from_numpy(sequence.pad_sequences(x_train, maxlen=maxlen))
test_lengths = torch.from_numpy(np.array([len(x) for x in x_test]))
x_test_padded = torch.from_numpy(sequence.pad_sequences(x_test, maxlen=maxlen))
batch_size = 512
test_dataset = data.TensorDataset(x_test_padded, test_lengths)
train_dataset = data.TensorDataset(x_train_padded, lengths, y_train_torch)
valid_dataset = data.Subset(train_dataset, indices=[0, 1])
train_collator = SequenceBucketCollator(lambda lenghts: lenghts.max(),
sequence_index=0,
length_index=1,
label_index=2)
test_collator = SequenceBucketCollator(lambda lenghts: lenghts.max(), sequence_index=0, length_index=1)
train_loader = data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=train_collator)
valid_loader = data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, collate_fn=train_collator)
test_loader = data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=test_collator)
databunch = DataBunch(train_dl=train_loader, valid_dl=valid_loader, collate_fn=train_collator)
all_test_preds = []
for model_idx in range(NUM_MODELS):
print('Model ', model_idx)
seed_everything(1 + model_idx)
model = NeuralNet(embedding_matrix, y_aux_train.shape[-1])
learn = Learner(databunch, model, loss_func=custom_loss)
test_preds = train_model(learn,test_dataset,output_dim=7)
all_test_preds.append(test_preds)
submission_lstm = pd.DataFrame.from_dict({
'id': test_df['id'],
'prediction': np.mean(all_test_preds, axis=0)[:, 0]
})
```
**Blending part**
```
submission = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/sample_submission.csv')
submission['prediction'] = ((submission_bert.prediction + submission_lstm.prediction)/2 + submission_lstm.prediction)/2
submission.to_csv('submission.csv', index=False)
```
| github_jupyter |
# Univariate Plots
(understanding each attribute of dataset independently)
## Histogram
* Histograms group data into bins and provides a count of the number of observations in each bin.
* From the shape of the bins we can quickly get a feeling for whether an attribute is Gaussian,skewed or even has exponential distribution.
* It can also help with possible outliers
```
from matplotlib import pyplot
from pandas import read_csv
filename = 'pima-indians-diabetes.data.csv'
names = ['preg','plas','pres','skin','test','mass','pedi','age','class']
data = read_csv(filename,names=names)
data.hist()
pyplot.show()
```
## Density Plots
* It is also another way to get idea of the distribution of each attribute
* It looks like an abstracted histogram with a smooth curve drawn through the top of each bin.
```
data.plot(kind='density',subplots=True,layout=(3,3),sharex=False)
pyplot.show()
```
## Box and Whisker Plots
* Boxplots summarize the distribution of each attribute,drawing a line for the median and a box around the 25th and 75th percentiles.
* The whiskers give an idea of the spread of the data and dots outside of the whiskers show candidate outlier values
```
data.plot(kind='box',subplots=True,layout=(3,3),sharex=False,sharey=False)
pyplot.show()
```
# Multivariate Plots
(It shows the interactions between multiple variables in our dataset)
## Correlation Matrix Plot
* It gives an indication of how related the changes are between two variables.
* If two variables change in the same direction they are **positively correlated**.
* If they change in opposite directions together then they are **negatively related**.
* Algoruthms like *linear and logistic regression* can have poor performance if there are highly correlated input variables in our data.
```
import numpy as np
correlations = data.corr()
# plotting correlation matrix
fig = pyplot.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(correlations,vmin=-1,vmax=1)
fig.colorbar(cax)
ticks = np.arange(0,9,1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(names)
ax.set_yticklabels(names)
pyplot.show()
fig = pyplot.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(correlations,vmin=-1,vmax=1)
fig.colorbar(cax)
pyplot.show()
```
## Scatter Plot Matrix
* It shows the relationship between two variables as dots in two dimensions,one axis for each attribute
```
from pandas.plotting import scatter_matrix
scatter_matrix(data)
pyplot.show()
```
## Summary
* Learnt how to analyse data using various plots like histogram,density plots,correlation matrix and sactter plot matrix
| github_jupyter |
<a href="https://www.nvidia.com/en-us/deep-learning-ai/education/"> <img src="images/DLI Header.png" alt="Header" style="width: 400px;"/> </a>
<a href="https://www.mayoclinic.org/"><img src="images/mayologo.png" alt="Mayo Logo"></a>
# Medical Image Classification Using the MedNIST Dataset
### Special thanks to <a href="https://www.mayo.edu/research/labs/radiology-informatics/overview">Dr. Bradley J. Erickson M.D., Ph.D.</a> - Department of Radiology, Mayo Clinic
#### Acknowledgements: <a href="http://www.cancerimagingarchive.net/">The Cancer Imaging Archive (TCIA)</a>; <a href ="http://rsnachallenges.cloudapp.net/competitions/4">Radiological Society of North America</a>; <a href= "http://openaccess.thecvf.com/content_cvpr_2017/papers/Wang_ChestX-ray8_Hospital-Scale_Chest_CVPR_2017_paper.pdf">National Institute of Health</a>
## Introduction
The use of Artificial Intelligence (AI), and deep Convolutional Neural Networks (CNNs) in particular, has led to improvements in the speed of radiological image processing and diagnosis. This speed-up has not come at the price of accuracy; cutting-edge algorithms are comparable to the current standard of care. The best human experts still outperform AI, so the technologies being developed serve as a complement to doctors and researchers, not as their replacement. Thus, it's important that those using these new tools attain some familiarity with their inner workings.
## Outline
<ul>
<li>Discussion of deep learning frameworks</li>
<li>Creating a dataset for training and testing</li>
<li>Transforming and partitioning data</li>
<li>Architecting a CNN</li>
<li>Training the model</li>
<li>Testing on new images</li>
<li>Exercises</li>
</ul>
## Deep Learning and Frameworks
A generic deep neural network consists of a series of <em>layers</em>, running between the input and output layers. Each layer is comprised of <em>nodes</em>, which store intermediate numerical values. The values from each layer are fed to the next after linear transformation by a tensor of <em>weights</em> and a nonlinear <em>activation function</em>. This overall structure is called the <em>architecture</em>.
In <em>supervised learning</em>, which we will be studying here, each input datum (X value) is provided along with a target output or label (Y value). The inputs can be very general types of data: images, sentences, video clips, etc. The outputs are often things like image classes, text sentiments, or object locations.
The X values are mapped through the network to outputs (Y predictions). The Y predictions are compared to the actual Y values, and the difference between them is quantified through a <em>loss function</em>. An <em>optimizer</em> varies the weights of the network over several iterations through the dataset, or <em>epochs</em>, in order to minimize the loss. This process is called <em>training</em> the network.
By providing a large and detailed training dataset, creating an adequately complex network architecture, and training for a sufficient amount of time, the model should be able to predict the correct label for inputs that it has never seen before. Feeding new input into a trained model and making use of its predictions is known as <em>deployment</em>.
The overhead to create and train networks with standard programming libraries is quite large. Fortunately, deep learning enthusiasts have done the heavy lifting in this process by creating specialized libraries, or <em>frameworks</em>, that allow us to condense what would be thousands of lines of tangled code down into a few dozen straightforward and readable ones. The framework used in this lab is PyTorch, which is on the beginner-friendly side while having its own technical advantages of interest to power users, too. Other popular frameworks include TensorFlow, MS Cognitive Toolkit, and MXNet, each of which has unique tradeoffs between ease of use, flexibility, speed, and accuracy.
There are also higher-level frameworks called <em>wrappers</em> that can be set up with simpler code or even graphical interfaces, that in turn are able to switch between several different lower-level frameworks with the setting of a single toggle. A popular code-based wrapper is Keras, while DIGITS is a graphical one.
In the code below, we load the PyTorch framework and other useful libraries. Due to the mathematically intensive nature of training, the code runs much faster on a GPU than on a conventional CPU, so we set parameters allowing for GPU acceleration.
### Using Jupyter
Simply press `Shift+Enter` or the "Run" button in the toolbar above while a cell is highlighted to execute the code contained within. Code can be stopped by pressing the "Stop" button next to the "Run" button. Sometimes, a markdown cell will get switched to editing mode, though changes cannot actually be made. Pressing `Shift+Enter` will switch it back to a readable form.
### Code Block 1
```
import numpy as np
import random
import os
import time
%matplotlib inline
import matplotlib.pyplot as mp
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as om
import torchvision as tv
import torch.utils.data as dat
if torch.cuda.is_available(): # Make sure GPU is available
dev = torch.device("cuda:0")
kwar = {'num_workers': 8, 'pin_memory': True}
cpu = torch.device("cpu")
else:
print("Warning: CUDA not found, CPU only.")
dev = torch.device("cpu")
kwar = {}
cpu = torch.device("cpu")
seed = 551
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
```
## Loading Data and Creating Datasets
Due to cost, privacy restrictions, and the rarity of certain conditions, gathering medical datasets can be particularly challenging. Once gathered from disparate sources, these data will need to be standardized for training. Preparing data at this level is beyond the scope of this short introductory primer. We have gathered images from several sets at TCIA, the RSNA Bone Age Challenge, and the NIH Chest X-ray dataset and standardized them to the same size.
The code below examines our image set, organizes the filenames, and displays some statistics about them.
### Code Block 2
```
dataDir = 'resized' # The main data directory
classNames = os.listdir(dataDir) # Each type of image can be found in its own subdirectory
numClass = len(classNames) # Number of types = number of subdirectories
imageFiles = [[os.path.join(dataDir,classNames[i],x) for x in os.listdir(os.path.join(dataDir,classNames[i]))]
for i in range(numClass)] # A nested list of filenames
numEach = [len(imageFiles[i]) for i in range(numClass)] # A count of each type of image
imageFilesList = [] # Created an un-nested list of filenames
imageClass = [] # The labels -- the type of each individual image in the list
for i in range(numClass):
imageFilesList.extend(imageFiles[i])
imageClass.extend([i]*numEach[i])
numTotal = len(imageClass) # Total number of images
imageWidth, imageHeight = Image.open(imageFilesList[0]).size # The dimensions of each image
print("There are",numTotal,"images in",numClass,"distinct categories")
print("Label names:",classNames)
print("Label counts:",numEach)
print("Image dimensions:",imageWidth,"x",imageHeight)
```
Because it is comparable to the <a href="http://yann.lecun.com/exdb/mnist/">MNIST dataset</a>, which has 70,000 total 28 x 28 images of handwritten digits from 0 - 9, we call this the MedNIST dataset. Notice, however, that the data aren't perfectly balanced. We'll address that while training the model.
As the saying goes, a picture is worth 1,000 ± 32 statistics, so let's examine a few random sample images. The following cell can be run repeatedly.
### Code Block 3
```
mp.subplots(3,3,figsize=(8,8))
for i,k in enumerate(np.random.randint(numTotal, size=9)): # Take a random sample of 9 images and
im = Image.open(imageFilesList[k]) # plot and label them
arr = np.array(im)
mp.subplot(3,3,i+1)
mp.xlabel(classNames[imageClass[k]])
mp.imshow(arr,cmap='gray',vmin=0,vmax=255)
mp.tight_layout()
mp.show()
```
## Transforming Data and Partitioning into Training, Validation, and Testing Sets
Depending on the images shown, you may notice a few things. There are definitely higher and lower quality images. Also, some images also have a different scale - the background isn't black but gray. Because there's a smaller difference between pixels, our model might have a harder time extracting information from them. Thus, to increase the contrast, we first rescale every image so the pixel values run from 0 to 1.
Next, we subtract the mean pixel value of each individual image from the rest. The network could in principle learn to do this through training. However, the activation functions tend to be most sensitive / nonlinear near zero. Therefore, shifting our data to have an average input value of zero will improve the sensitivity and stability of initial training steps and tends to speed things up a little. While it doesn't matter much for the simple model we use here, such tricks can make a noticeable difference in complex models.
Also, before doing any of these, we'll need to convert the JPEG images into tensors. We define a function below that combines all these steps.
### Code Block 4
```
toTensor = tv.transforms.ToTensor()
def scaleImage(x): # Pass a PIL image, return a tensor
y = toTensor(x)
if(y.min() < y.max()): # Assuming the image isn't empty, rescale so its values run from 0 to 1
y = (y - y.min())/(y.max() - y.min())
z = y - y.mean() # Subtract the mean value of the image
return z
```
With the image-to-tensor transformation function defined, we now create a master tensor out of all these images. We also create a tensor for the labels. Execution of this code takes a moment. We double check the final range of scaled pixel values and verify that the mean is (practically) zero.
### Code Block 5
```
imageTensor = torch.stack([scaleImage(Image.open(x)) for x in imageFilesList]) # Load, scale, and stack image (X) tensor
classTensor = torch.tensor(imageClass) # Create label (Y) tensor
print("Rescaled min pixel value = {:1.3}; Max = {:1.3}; Mean = {:1.3}"
.format(imageTensor.min().item(),imageTensor.max().item(),imageTensor.mean().item()))
```
With everything in order so far, we move on to partitioning these master tensors into three datatsets.
Because a model may have millions of free parameters, it is quite possible for it to <em>overfit</em> to the data provided. That is, it may adjust its weights to the precise values needed to predict every given image correctly, yet fail to recognize even slight variations on the original images, let alone brand new ones.
A common solution is to separate data into a training set, used to minimize the loss function, and a validation set, evaluated separately during training without directly affecting the model's weights. However, the validation set may be used to modify the <em>hyperparameters</em> (parameters outside the model governing its training), select the best model every epoch, or indirectly impact the training in some other way. For this reason, a third, independent testing set is usually created for final evaluation once the training is complete.
Because the more data the model sees, the more accurate it tends to become, we usually reserve relatively small fractions for validation and testing.
PyTorch has a built in <a href="https://pytorch.org/docs/stable/_modules/torch/utils/data/dataset.html">Dataset</a> object that can simplify these steps when working with more complex types of data, but in this case, they would require more effort than they save.
The code below will randomly assign approximately 10% of the indices to lists corresponding to the validation and testing sets. Once this is done, we can create these datasets by slicing the master image and label tensors using these lists.
### Code Block 6
```
validFrac = 0.1 # Define the fraction of images to move to validation dataset
testFrac = 0.1 # Define the fraction of images to move to test dataset
validList = []
testList = []
trainList = []
for i in range(numTotal):
rann = np.random.random() # Randomly reassign images
if rann < validFrac:
validList.append(i)
elif rann < testFrac + validFrac:
testList.append(i)
else:
trainList.append(i)
nTrain = len(trainList) # Count the number in each set
nValid = len(validList)
nTest = len(testList)
print("Training images =",nTrain,"Validation =",nValid,"Testing =",nTest)
```
If we're satisfied with the breakdown into training, validation, and testing, we can now use these lists to slice the master tensors with the code below. If not, we can rerun the cell above with different fractions set aside.
### Code Block 7
```
trainIds = torch.tensor(trainList) # Slice the big image and label tensors up into
validIds = torch.tensor(validList) # training, validation, and testing tensors
testIds = torch.tensor(testList)
trainX = imageTensor[trainIds,:,:,:]
trainY = classTensor[trainIds]
validX = imageTensor[validIds,:,:,:]
validY = classTensor[validIds]
testX = imageTensor[testIds,:,:,:]
testY = classTensor[testIds]
```
## Model Architecture
The details of the architecture are explained in the comments within the code, but here we give an overview of the two types of layers encountered.
The first is the <em>convolutional</em> layer. When interpreting an image, the eye first identifies edges and boundaries. Then, one can make out curves, shapes, and more complex structures at higher levels of abstraction. By only combining information from nearby pixels at first, a series of convolutional layers mimics this organic process. The size of the convolution is how many adjacent pixels are weighted and added up when moving to the next layer, and we can apply multiple convolutions to every pixel in an image (or in a higher layer). Pictured below is a single 3 × 3 convolution. The value of each pixel of the convolutional kernel - these are the weights that are trained - is multiplied with the corresponding pixel value within the neighborhood of the original, central image pixel. These products are summed up, and the total is placed in the central pixel (node, to use the nomenclature) of the new layer. The process is repeated for each pixel and each convolution within the layer. Several convolutional layers can be stacked on top of each other; this has the effect of finding increasingly complex features.
<img src="images/Convolution.png" width="600" alt="Convolution">
After several convolutional layers, it is typical to have a few fully connected layers. First, all the information from the last layer is "flattened" into a vector. In a fully connected layer, there are weights connecting every single node (place to store a value) of the input layer to every single node of the output layer - no special preference is given to neighboring nodes as in the pixels of a convolutional layer. The weights multiply the values in the nodes of the input layer, are summed together, and then placed in a node of the output layer. This is repeated for each node in the output layer.
Now, there is one additional step in each of these that has been omitted: appication of the activation function. If the linear function `y = 3x + 2` is composed with `z = 4y - 7`, then z is still a linear function of x: `z = 12x + 1`. The same thing is true for linear functions in higher dimensions (multiplication by weights and summing, as we have been doing). Without the activation function, no matter how many layers we stack together, it could ultimately be replaced by a single one. To avoid this, at each output layer, we apply a nonlinear activation function. This need not be the same function at every layer. In this architecture, we choose <a href="http://image-net.org/challenges/posters/JKU_EN_RGB_Schwarz_poster.pdf">ELU</a> functions, but there are many other popular options, such as <a href="https://en.wikipedia.org/wiki/Rectifier_(neural_networks)">ReLU</a>.
### Code Block 8
```
class MedNet(nn.Module):
def __init__(self,xDim,yDim,numC): # Pass image dimensions and number of labels when initializing a model
super(MedNet,self).__init__() # Extends the basic nn.Module to the MedNet class
# The parameters here define the architecture of the convolutional portion of the CNN. Each image pixel
# has numConvs convolutions applied to it, and convSize is the number of surrounding pixels included
# in each convolution. Lastly, the numNodesToFC formula calculates the final, remaining nodes at the last
# level of convolutions so that this can be "flattened" and fed into the fully connected layers subsequently.
# Each convolution makes the image a little smaller (convolutions do not, by default, "hang over" the edges
# of the image), and this makes the effective image dimension decreases.
numConvs1 = 8
convSize1 = 3
numConvs2 = 16
convSize2 = 3
numConvs3 = 32
convSize3 = 3
numConvs4 = 64
convSize4 = 3
# nn.Conv2d(channels in, channels out, convolution height/width)
# 1 channel -- grayscale -- feeds into the first convolution. The same number output from one layer must be
# fed into the next. These variables actually store the weights between layers for the model.
self.cnv1 = nn.Conv2d(1, numConvs1, convSize1)
self.cnv2 = nn.Conv2d(numConvs1, numConvs2, convSize2)
self.mp1 = nn.MaxPool2d((2,2))
self.bn1 = nn.BatchNorm2d(16)
self.dp1 = nn.Dropout(.1)
self.cnv3 = nn.Conv2d(numConvs2, numConvs3, convSize3)
self.cnv4 = nn.Conv2d(numConvs3, numConvs4, convSize4)
self.dp2 = nn.Dropout(.1)
self.mp2 = nn.MaxPool2d((2,2))
self.bn2 = nn.BatchNorm2d(64)
numNodesToFC = int(numConvs4 * (xDim / 4 - 3) * (yDim / 4 - 3))
# These parameters define the number of output nodes of each fully connected layer.
# Each layer must output the same number of nodes as the next layer begins with.
# The final layer must have output nodes equal to the number of labels used.
fcSize1 = 100
# fcSize2 = 20
# nn.Linear(nodes in, nodes out)
# Stores the weights between the fully connected layers
self.ful1 = nn.Linear(numNodesToFC,fcSize1)
# self.ful2 = nn.Linear(fcSize1, fcSize2)
self.ful3 = nn.Linear(fcSize1,numC)
def forward(self,x):
# This defines the steps used in the computation of output from input.
# It makes uses of the weights defined in the __init__ method.
# Each assignment of x here is the result of feeding the input up through one layer.
# Here we use the activation function elu, which is a smoother version of the popular relu function.
x = F.elu(self.cnv1(x)) # Feed through first convolutional layer, then apply activation
x = F.elu(self.cnv2(x)) # Feed through second convolutional layer, apply activation
x = F.elu(self.mp1(x))
x = F.elu(self.bn1(x))
x = F.elu(self.dp1(x))
x = F.elu(self.cnv3(x)) # Feed through thirst convolutional layer, then apply activation
x = F.elu(self.cnv4(x)) # Feed through fourth convolutional layer, apply activation
x = F.elu(self.mp2(x))
x = F.elu(self.bn2(x))
x = F.elu(self.dp2(x))
x = x.view(-1,self.num_flat_features(x)) # Flatten convolutional layer into fully connected layer
x = F.relu(self.ful1(x)) # Feed through first fully connected layer, apply activation
# x = F.elu(self.ful2(x)) # Feed through second FC layer, apply output
x = self.ful3(x) # Final FC layer to output. No activation, because it's used to calculate loss
return x
def num_flat_features(self, x): # Count the individual nodes in a layer
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
```
With the architecture defined, we create an instance of the model. This single line is separated out so that we can continue or repeat the training code below without resetting the model from scratch, if needed.
### Code Block 9
```
model = MedNet(imageWidth,imageHeight,numClass).to(dev)
model
```
## Training the Model
Now, it's time to train the model. The next code block does so.
First, we define the hyperparameters of the training. The learning rate reflects how much the model is updated per batch. If it is too small, the training proceeds slowly. If it's too large, the weights will be adjusted too much and miss the true minimum loss, or even become unstable. An epoch is a full run through the training data. Some models require thousands of epochs to train; this one will produce high accuracy with just a handful.
We use validation data to prevent overtraining in our model. The training and validation data are drawn from the same set of data; therefore, the model ought to have similar loss for both. Thus, we set a limit on how much larger the validation loss can be than the training loss. Because random fluctuation might account for some discrepancy, we require a few epochs pass with high validation loss before halting.
The memory overhead required to feed large datasets through the model can be prohibitive. <em>Batches</em> are an important workaround for this problem. By loading smaller data subsets onto the GPU and training off of them, we can not only save memory, but also speed up the training by making more adjustments to the model per epoch. Smaller batches generally require smaller learning rates to avoid instability, however, so there is some tradeoff.
Imagine that a dataset had only a handful of examples of a particular label. The model could still acheive high accuracy overall while totally ignoring these. Using weights in the loss function, with larger weights for less numerous classes, is one strategy to combat this. If a class is particularly tiny, however, it is preferable to use data augmentation to generate new images rather than using weights alone, which are equivalent to feeding the same image over and over again through the network.
Now, we move on to the actual training loop. The first real step is to shuffle the data before slicing it into batches. Once again, PyTorch provides a <a href="https://pytorch.org/docs/stable/_modules/torch/utils/data/dataloader.html#DataLoader">DataLoader</a> class that can automate this, but it's about the same difficulty in this example to implement by hand.
Next, we iterate through the batches. We zero out the accumulated information in the optimizer, feed the batch through the model, and compute the loss for a batch. We use the <a href="https://en.wikipedia.org/wiki/Cross_entropy">cross entropy</a>, a common metric for classifiers. This loss is added to a running total for the epoch, and then we <em>backpropagate</em> it. Backpropagation is a mathematical determination of how much each weight in the model should be changed relative to the others to reduce the loss. The optimizer then takes a step and updates the weights.
After all the training batches are complete, the same process happens for the validation data, without the backpropagation and optimization steps. The average loss is calculated, and we compare the validation loss relative to the training loss to test for overfitting.
Run the cell to train the model.
### Code Block 10
```
learnRate = 0.001 # Define a learning rate.
maxEpochs = 20 # Maximum training epochs
t2vRatio = 1.2 # Maximum allowed ratio of validation to training loss
t2vEpochs = 3 # Number of consecutive epochs before halting if validation loss exceeds above limit
batchSize = 300 # Batch size. Going too large will cause an out-of-memory error.
trainBats = nTrain // batchSize # Number of training batches per epoch. Round down to simplify last batch
validBats = nValid // batchSize # Validation batches. Round down
testBats = -(-nTest // batchSize) # Testing batches. Round up to include all
CEweights = torch.zeros(numClass) # This takes into account the imbalanced dataset.
for i in trainY.tolist(): # By making rarer images count more to the loss,
CEweights[i].add_(1) # we prevent the model from ignoring them.
CEweights = 1. / CEweights.clamp_(min=1.) # Weights should be inversely related to count
CEweights = (CEweights * numClass / CEweights.sum()).to(dev) # The weights average to 1
opti = om.Adam(model.parameters(), lr = learnRate) # Initialize an optimizer
for i in range(maxEpochs):
model.train() # Set model to training mode
epochLoss = 0.
permute = torch.randperm(nTrain) # Shuffle data to randomize batches
trainX = trainX[permute,:,:,:]
trainY = trainY[permute]
for j in range(trainBats): # Iterate over batches
opti.zero_grad() # Zero out gradient accumulated in optimizer
batX = trainX[j*batchSize:(j+1)*batchSize,:,:,:].to(dev) # Slice shuffled data into batches
batY = trainY[j*batchSize:(j+1)*batchSize].to(dev) # .to(dev) moves these batches to the GPU
yOut = model(batX) # Evalute predictions
loss = F.cross_entropy(yOut, batY,weight=CEweights) # Compute loss
epochLoss += loss.item() # Add loss
loss.backward() # Backpropagate loss
opti.step() # Update model weights using optimizer
validLoss = 0.
permute = torch.randperm(nValid) # We go through the exact same steps, without backprop / optimization
validX = validX[permute,:,:,:] # in order to evaluate the validation loss
validY = validY[permute]
model.eval() # Set model to evaluation mode
with torch.no_grad(): # Temporarily turn off gradient descent
for j in range(validBats):
opti.zero_grad()
batX = validX[j*batchSize:(j+1)*batchSize,:,:,:].to(dev)
batY = validY[j*batchSize:(j+1)*batchSize].to(dev)
yOut = model(batX)
validLoss += F.cross_entropy(yOut, batY,weight=CEweights).item()
epochLoss /= trainBats # Average loss over batches and print
validLoss /= validBats
print("Epoch = {:-3}; Training loss = {:.4f}; Validation loss = {:.4f}".format(i,epochLoss,validLoss))
if validLoss > t2vRatio * epochLoss:
t2vEpochs -= 1 # Test if validation loss exceeds halting threshold
if t2vEpochs < 1:
print("Validation loss too high; halting to prevent overfitting")
break
confuseMtx = np.zeros((numClass,numClass),dtype=int) # Create empty confusion matrix
model.eval()
with torch.no_grad():
permute = torch.randperm(nTest) # Shuffle test data
testX = testX[permute,:,:,:]
testY = testY[permute]
for j in range(testBats): # Iterate over test batches
batX = testX[j*batchSize:(j+1)*batchSize,:,:,:].to(dev)
batY = testY[j*batchSize:(j+1)*batchSize].to(dev)
yOut = model(batX) # Pass test batch through model
pred = yOut.max(1,keepdim=True)[1] # Generate predictions by finding the max Y values
for j in torch.cat((batY.view_as(pred), pred),dim=1).tolist(): # Glue together Actual and Predicted to
confuseMtx[j[0],j[1]] += 1 # make (row, col) pairs, and increment confusion matrix
correct = sum([confuseMtx[i,i] for i in range(numClass)]) # Sum over diagonal elements to count correct predictions
print("Correct predictions: ",correct,"of",nTest)
print("Confusion Matrix:")
print(confuseMtx)
print(classNames)
print(f'{nTest-correct}')
print(f'{correct/nTest}')
```
It is most likely that training was halted early to prevent overfitting. Still, the final loss should be roughly 0.01, a huge improvement on the random guessing that the model begins with. (These are slightly dependent on how the random numbers pan out based on how many times earlier cells were executed)
You may have also noticed that the training loss is quite a bit larger than the validation loss for the first several training steps. <b>Based on what you've learned about the training process, can you explain why this happens?</b>
## Testing the Model on New Data
With the model fully trained, it's time to apply it to generate predictions from the test dataset. The model outputs a 6 element vector for each image. The individual values of this vector can be thought of, roughly, as relative probabilities that the image belongs in each class. Thus, we consider the class with the maximum value to be the prediction of the model.
We'll use these predictions to generate a confusion matrix. Despite its name, the confusion matrix is easily understood. The rows in the matrix represent the correct classifications, while the columns represent the predictions of the model. When the row and the column agree (i.e., along the diagonal), the model predicted correctly.
The short code snippet below iterates through the test batches and fills the confusion matrix.
### Code Block 11
```
train_run_errors = []
for tr in range(100):
torch.cuda.empty_cache()
model = MedNet(imageWidth,imageHeight,numClass).to(dev)
learnRate = 0.001 # Define a learning rate.
maxEpochs = 20 # Maximum training epochs
t2vRatio = 1.2 # Maximum allowed ratio of validation to training loss
t2vEpochs = 3 # Number of consecutive epochs before halting if validation loss exceeds above limit
batchSize = 300 # Batch size. Going too large will cause an out-of-memory error.
trainBats = nTrain // batchSize # Number of training batches per epoch. Round down to simplify last batch
validBats = nValid // batchSize # Validation batches. Round down
testBats = -(-nTest // batchSize) # Testing batches. Round up to include all
CEweights = torch.zeros(numClass) # This takes into account the imbalanced dataset.
for i in trainY.tolist(): # By making rarer images count more to the loss,
CEweights[i].add_(1) # we prevent the model from ignoring them.
CEweights = 1. / CEweights.clamp_(min=1.) # Weights should be inversely related to count
CEweights = (CEweights * numClass / CEweights.sum()).to(dev) # The weights average to 1
opti = om.Adam(model.parameters(), lr = learnRate) # Initialize an optimizer
for i in range(maxEpochs):
model.train() # Set model to training mode
epochLoss = 0.
permute = torch.randperm(nTrain) # Shuffle data to randomize batches
trainX = trainX[permute,:,:,:]
trainY = trainY[permute]
for j in range(trainBats): # Iterate over batches
opti.zero_grad() # Zero out gradient accumulated in optimizer
batX = trainX[j*batchSize:(j+1)*batchSize,:,:,:].to(dev) # Slice shuffled data into batches
batY = trainY[j*batchSize:(j+1)*batchSize].to(dev) # .to(dev) moves these batches to the GPU
yOut = model(batX) # Evalute predictions
loss = F.cross_entropy(yOut, batY,weight=CEweights) # Compute loss
epochLoss += loss.item() # Add loss
loss.backward() # Backpropagate loss
opti.step() # Update model weights using optimizer
validLoss = 0.
permute = torch.randperm(nValid) # We go through the exact same steps, without backprop / optimization
validX = validX[permute,:,:,:] # in order to evaluate the validation loss
validY = validY[permute]
model.eval() # Set model to evaluation mode
with torch.no_grad(): # Temporarily turn off gradient descent
for j in range(validBats):
opti.zero_grad()
batX = validX[j*batchSize:(j+1)*batchSize,:,:,:].to(dev)
batY = validY[j*batchSize:(j+1)*batchSize].to(dev)
yOut = model(batX)
validLoss += F.cross_entropy(yOut, batY,weight=CEweights).item()
epochLoss /= trainBats # Average loss over batches and print
validLoss /= validBats
print("Epoch = {:-3}; Training loss = {:.4f}; Validation loss = {:.4f}".format(i,epochLoss,validLoss))
if validLoss > t2vRatio * epochLoss:
t2vEpochs -= 1 # Test if validation loss exceeds halting threshold
if t2vEpochs < 1:
print("Validation loss too high; halting to prevent overfitting")
break
confuseMtx = np.zeros((numClass,numClass),dtype=int) # Create empty confusion matrix
model.eval()
with torch.no_grad():
permute = torch.randperm(nTest) # Shuffle test data
testX = testX[permute,:,:,:]
testY = testY[permute]
for j in range(testBats): # Iterate over test batches
batX = testX[j*batchSize:(j+1)*batchSize,:,:,:].to(dev)
batY = testY[j*batchSize:(j+1)*batchSize].to(dev)
yOut = model(batX) # Pass test batch through model
pred = yOut.max(1,keepdim=True)[1] # Generate predictions by finding the max Y values
for j in torch.cat((batY.view_as(pred), pred),dim=1).tolist(): # Glue together Actual and Predicted to
confuseMtx[j[0],j[1]] += 1 # make (row, col) pairs, and increment confusion matrix
correct = sum([confuseMtx[i,i] for i in range(numClass)]) # Sum over diagonal elements to count correct predictions
train_run_errors.append(nTest-correct)
train_run_errors = np.array(train_run_errors)
train_run_errors.mean()
train_run_errors.sort()
train_run_errors[:-6].mean()
mp.hist(train_run_errors)
mp.boxplot(train_run_errors)
```
You're likely to see 99%+ accuracy. Not bad for a fairly minimal model that trains in just a couple minutes. Now look at the confusion matrix. Notice that some mistakes are more common than others - <b>does this type of confusion make sense to you?</b>
Before we get to the exercises, let's take a look at some of the images that confused the model. This cell can be rerun to produce more examples.
### Code Block 12
```
def scaleBack(x): # Pass a tensor, return a numpy array from 0 to 1
if(x.min() < x.max()): # Assuming the image isn't empty, rescale so its values run from 0 to 1
x = (x - x.min())/(x.max() - x.min())
return x[0].to(cpu).numpy() # Remove channel (grayscale anyway)
model.eval()
mp.subplots(3,3,figsize=(8,8))
imagesLeft = 9
permute = torch.randperm(nTest) # Shuffle test data
testX = testX[permute,:,:,:]
testY = testY[permute]
for j in range(testBats): # Iterate over test batches
batX = testX[j*batchSize:(j+1)*batchSize,:,:,:].to(dev)
batY = testY[j*batchSize:(j+1)*batchSize].to(dev)
yOut = model(batX) # Pass test batch through model
pred = yOut.max(1)[1].tolist() # Generate predictions by finding the max Y values
for i, y in enumerate(batY.tolist()):
if imagesLeft and y != pred[i]: # Compare the actual y value to the prediction
imagesLeft -= 1
mp.subplot(3,3,9-imagesLeft)
mp.xlabel(classNames[pred[i]]) # Label image with what the model thinks it is
mp.imshow(scaleBack(batX[i]),cmap='gray',vmin=0,vmax=1)
mp.tight_layout()
mp.show()
pwd
ls
permute = torch.randperm(nTest, device='cuda')
x = testX[permute,:,:,:]
torch.onnx.export(model,
x,
"model_opti.onnx",
export_params=True,
opset_version=10,
do_constant_folding=True,
input_names = ['input'],
output_names = ['output'],
dynamic_axes={'input' : {0 : 'batch_size'},
'output' : {0 : 'batch_size'}})
# Input to the model
permute = torch.randperm(nTest) # Shuffle test data
x = testX[permute,:,:,:]
model.to('cpu')
# torch_out = model(x)
# Export the model
torch.onnx.export(model, # model being run
x, # model input (or a tuple for multiple inputs)
"MedNet.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names = ['input'], # the model's input names
output_names = ['output'], # the model's output names
dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes
'output' : {0 : 'batch_size'}})
```
Some of these images are indeed confusing, even for a human observer. Yet other other ones are harder to fathom - why did a model with 99% accuracy misclassify what is so obviously a hand, for example? The field of <a href="https://medium.com/@jrzech/what-are-radiological-deep-learning-models-actually-learning-f97a546c5b98">interpretability is beginning to explore these questions</a>.
## Exercises
It may be useful to restart the kernel (under Kernel menu) to clear the memory between exercises, or even to copy the notebook (under File menu) and do each exercise in a clean copy. Because time may be limited, scan through the exercises and start with the ones that most appeal to you. Exercises 1, 2, and 3 are particularly recommended if you are new to deep learning.
<ol>
<li>Without resetting the kernel, increase <code>t2vRatio</code> in code block 10 and continue the training. Can you improve the final accuracy observed on the test dataset this way? If so, does it improve as much as the change in training loss would seem to indicate? What might this suggest about the relative value of architecting to training?</li><hr>
<li>Reset the model by running code block 9, then modify the hyperparameters and retraing the model in code block 10. The most interesting ones are the learning rate (larger or smaller) and the batch size (smaller works well; when increasing the batch size, you will run out of memory somewhere in the low thousands). Note the effects on training. Can you make it converge faster than the default values?</li><hr>
<li>Modify the architecture in code block 8 and note the effects on the training speed and final accuracy
<ol>
<li> Easy: Change the number of convolutions, the size of the convolutions, and the number of fully connected layers</li>
<li> Medium: Add additional convolutional and/or fully connected layers. Use the existing code for reference.</li>
<li> Hard: Add <a href="https://pytorch.org/docs/stable/_modules/torch/nn/modules/batchnorm.html#BatchNorm1d">batchnorm</a>, <a href="https://pytorch.org/docs/stable/_modules/torch/nn/modules/pooling.html#MaxPool2d">maxpool</a>, and/or <a href="https://pytorch.org/docs/stable/_modules/torch/nn/modules/dropout#Dropout">dropout</a> layers.</li>
</ol>
</li><hr>
<li>Unbalance the classes. This is a common problem in medical imaging, and can be done by adding a single line of code in code block 2: <code>imageFiles[5] = imageFiles[5][:-NNN]</code> where <code>NNN</code> is the number images from the final class to remove. You could also replace the 5 with 0 - 4, instead. Insert this line between <code>imageFiles = ...</code> and <code>numEach = ...</code> How small of a set can you have while still getting good results for this class in the confusion matrix? This exercise combines well with the next one.</li><hr>
<li>Remove the weights for the loss function by adding <code>CEweights = torch.ones(numClass).to(dev)</code> after the line <code>opti = ...</code> in code block 10. What effect does this have in the confusion matrix when identifying the rarer image class? You could also implement custom weights by using <code>CEweights = torch.tensor([a,b,c,d,e,f]).to(dev)</code> where <code>a ... f</code> are floating point numbers. In this case, note the effects of having one or more relatively large weights.</li><hr>
<li>Remove one or both of the modifications to the tensor from code block 4. Note the effects on the early training and final accuracy.</li><hr>
<li><b>Final challenge:</b>By using experience gained from the previous exercises, you can adjust the architecture and training to make a more accurate final model. Can your improved model make fewer than 10 mistakes on the testing set?</li>
</ol>
<a href="hints.txt">Hints and partial solutions to the exercises</a>
| github_jupyter |
# python-sonic - Programming Music with Python, Sonic Pi or Supercollider
Python-Sonic is a simple Python interface for Sonic Pi, which is a real great music software created by Sam Aaron (http://sonic-pi.net).
At the moment Python-Sonic works with Sonic Pi. It is planned, that it will work with Supercollider, too.
If you like it, use it. If you have some suggestions, tell me (gkvoelkl@nelson-games.de).
## Installation
* First you need Python 3 (https://www.python.org, ) - Python 3.5 should work, because it's the development environment
* Then Sonic Pi (https://sonic-pi.net) - That makes the sound
* Modul python-osc (https://pypi.python.org/pypi/python-osc) - Connection between Python and Sonic Pi Server
* And this modul python-sonic - simply copy the source
Or try
That should work.
## Limitations
* You have to start _Sonic Pi_ first before you can use it with python-sonic
* Only the notes from C2 to C6
## Changelog
|Version | |
|--------------|------------------------------------------------------------------------------------------|
| 0.2.0 | Some changes for Sonic Pi 2.11. Simpler multi-threading with decorator *@in_thread*. Messaging with *cue* and *sync*. |
| 0.3.0 | OSC Communication |
## Examples
Many of the examples are inspired from the help menu in *Sonic Pi*.
```
from psonic import *
```
The first sound
```
play(70) #play MIDI note 70
```
Some more notes
```
play(72)
sleep(1)
play(75)
sleep(1)
play(79)
```
In more tratitional music notation
```
play(C5)
sleep(0.5)
play(D5)
sleep(0.5)
play(G5)
```
Play sharp notes like *F#* or dimished ones like *Eb*
```
play(Fs5)
sleep(0.5)
play(Eb5)
```
Play louder (parameter amp) or from a different direction (parameter pan)
```
play(72,amp=2)
sleep(0.5)
play(74,pan=-1) #left
```
Different synthesizer sounds
```
use_synth(SAW)
play(38)
sleep(0.25)
play(50)
sleep(0.5)
use_synth(PROPHET)
play(57)
sleep(0.25)
```
ADSR *(Attack, Decay, Sustain and Release)* Envelope
```
play (60, attack=0.5, decay=1, sustain_level=0.4, sustain=2, release=0.5)
sleep(4)
```
Play some samples
```
sample(AMBI_LUNAR_LAND, amp=0.5)
sample(LOOP_AMEN,pan=-1)
sleep(0.877)
sample(LOOP_AMEN,pan=1)
sample(LOOP_AMEN,rate=0.5)
sample(LOOP_AMEN,rate=1.5)
sample(LOOP_AMEN,rate=-1)#back
sample(DRUM_CYMBAL_OPEN,attack=0.01,sustain=0.3,release=0.1)
sample(LOOP_AMEN,start=0.5,finish=0.8,rate=-0.2,attack=0.3,release=1)
```
Play some random notes
```
import random
for i in range(5):
play(random.randrange(50, 100))
sleep(0.5)
for i in range(3):
play(random.choice([C5,E5,G5]))
sleep(1)
```
Sample slicing
```
from psonic import *
number_of_pieces = 8
for i in range(16):
s = random.randrange(0,number_of_pieces)/number_of_pieces #sample starts at 0.0 and finishes at 1.0
f = s + (1.0/number_of_pieces)
sample(LOOP_AMEN,beat_stretch=2,start=s,finish=f)
sleep(2.0/number_of_pieces)
```
An infinite loop and if
```
while True:
if one_in(2):
sample(DRUM_HEAVY_KICK)
sleep(0.5)
else:
sample(DRUM_CYMBAL_CLOSED)
sleep(0.25)
```
If you want to hear more than one sound at a time, use Threads.
```
import random
from psonic import *
from threading import Thread
def bass_sound():
c = chord(E3, MAJOR7)
while True:
use_synth(PROPHET)
play(random.choice(c), release=0.6)
sleep(0.5)
def snare_sound():
while True:
sample(ELEC_SNARE)
sleep(1)
bass_thread = Thread(target=bass_sound)
snare_thread = Thread(target=snare_sound)
bass_thread.start()
snare_thread.start()
while True:
pass
```
Every function *bass_sound* and *snare_sound* have its own thread. Your can hear them running.
```
from psonic import *
from threading import Thread, Condition
from random import choice
def random_riff(condition):
use_synth(PROPHET)
sc = scale(E3, MINOR)
while True:
s = random.choice([0.125,0.25,0.5])
with condition:
condition.wait() #Wait for message
for i in range(8):
r = random.choice([0.125, 0.25, 1, 2])
n = random.choice(sc)
co = random.randint(30,100)
play(n, release = r, cutoff = co)
sleep(s)
def drums(condition):
while True:
with condition:
condition.notifyAll() #Message to threads
for i in range(16):
r = random.randrange(1,10)
sample(DRUM_BASS_HARD, rate=r)
sleep(0.125)
condition = Condition()
random_riff_thread = Thread(name='consumer1', target=random_riff, args=(condition,))
drums_thread = Thread(name='producer', target=drums, args=(condition,))
random_riff_thread.start()
drums_thread.start()
input("Press Enter to continue...")
```
To synchronize the thread, so that they play a note at the same time, you can use *Condition*. One function sends a message with *condition.notifyAll* the other waits until the message comes *condition.wait*.
More simple with decorator __@in_thread__
```
from psonic import *
from random import choice
tick = Message()
@in_thread
def random_riff():
use_synth(PROPHET)
sc = scale(E3, MINOR)
while True:
s = random.choice([0.125,0.25,0.5])
tick.sync()
for i in range(8):
r = random.choice([0.125, 0.25, 1, 2])
n = random.choice(sc)
co = random.randint(30,100)
play(n, release = r, cutoff = co)
sleep(s)
@in_thread
def drums():
while True:
tick.cue()
for i in range(16):
r = random.randrange(1,10)
sample(DRUM_BASS_HARD, rate=r)
sleep(0.125)
random_riff()
drums()
input("Press Enter to continue...")
from psonic import *
tick = Message()
@in_thread
def metronom():
while True:
tick.cue()
sleep(1)
@in_thread
def instrument():
while True:
tick.sync()
sample(DRUM_HEAVY_KICK)
metronom()
instrument()
while True:
pass
```
Play a list of notes
```
from psonic import *
play ([64, 67, 71], amp = 0.3)
sleep(1)
play ([E4, G4, B4])
sleep(1)
```
Play chords
```
play(chord(E4, MINOR))
sleep(1)
play(chord(E4, MAJOR))
sleep(1)
play(chord(E4, MINOR7))
sleep(1)
play(chord(E4, DOM7))
sleep(1)
```
Play arpeggios
```
play_pattern( chord(E4, 'm7'))
play_pattern_timed( chord(E4, 'm7'), 0.25)
play_pattern_timed(chord(E4, 'dim'), [0.25, 0.5])
```
Play scales
```
play_pattern_timed(scale(C3, MAJOR), 0.125, release = 0.1)
play_pattern_timed(scale(C3, MAJOR, num_octaves = 2), 0.125, release = 0.1)
play_pattern_timed(scale(C3, MAJOR_PENTATONIC, num_octaves = 2), 0.125, release = 0.1)
```
The function *scale* returns a list with all notes of a scale. So you can use list methodes or functions. For example to play arpeggios descending or shuffeld.
```
import random
from psonic import *
s = scale(C3, MAJOR)
s
s.reverse()
play_pattern_timed(s, 0.125, release = 0.1)
random.shuffle(s)
play_pattern_timed(s, 0.125, release = 0.1)
```
### Live Loop
One of the best in SONIC PI is the _Live Loop_. While a loop is playing music you can change it and hear the change. Let's try it in Python, too.
```
from psonic import *
from threading import Thread
def my_loop():
play(60)
sleep(1)
def looper():
while True:
my_loop()
looper_thread = Thread(name='looper', target=looper)
looper_thread.start()
input("Press Enter to continue...")
```
Now change the function *my_loop* und you can hear it.
```
def my_loop():
use_synth(TB303)
play (60, release= 0.3)
sleep (0.25)
def my_loop():
use_synth(TB303)
play (chord(E3, MINOR), release= 0.3)
sleep(0.5)
def my_loop():
use_synth(TB303)
sample(DRUM_BASS_HARD, rate = random.uniform(0.5, 2))
play(random.choice(chord(E3, MINOR)), release= 0.2, cutoff=random.randrange(60, 130))
sleep(0.25)
```
To stop the sound you have to end the kernel. In IPython with Kernel --> Restart
Now with two live loops which are synch.
```
from psonic import *
from threading import Thread, Condition
from random import choice
def loop_foo():
play (E4, release = 0.5)
sleep (0.5)
def loop_bar():
sample (DRUM_SNARE_SOFT)
sleep (1)
def live_loop_1(condition):
while True:
with condition:
condition.notifyAll() #Message to threads
loop_foo()
def live_loop_2(condition):
while True:
with condition:
condition.wait() #Wait for message
loop_bar()
condition = Condition()
live_thread_1 = Thread(name='producer', target=live_loop_1, args=(condition,))
live_thread_2 = Thread(name='consumer1', target=live_loop_2, args=(condition,))
live_thread_1.start()
live_thread_2.start()
input("Press Enter to continue...")
def loop_foo():
play (A4, release = 0.5)
sleep (0.5)
def loop_bar():
sample (DRUM_HEAVY_KICK)
sleep (0.125)
```
If would be nice if we can stop the loop with a simple command. With stop event it works.
```
from psonic import *
from threading import Thread, Condition, Event
def loop_foo():
play (E4, release = 0.5)
sleep (0.5)
def loop_bar():
sample (DRUM_SNARE_SOFT)
sleep (1)
def live_loop_1(condition,stop_event):
while not stop_event.is_set():
with condition:
condition.notifyAll() #Message to threads
loop_foo()
def live_loop_2(condition,stop_event):
while not stop_event.is_set():
with condition:
condition.wait() #Wait for message
loop_bar()
condition = Condition()
stop_event = Event()
live_thread_1 = Thread(name='producer', target=live_loop_1, args=(condition,stop_event))
live_thread_2 = Thread(name='consumer1', target=live_loop_2, args=(condition,stop_event))
live_thread_1.start()
live_thread_2.start()
input("Press Enter to continue...")
stop_event.set()
```
More complex live loops
```
sc = Ring(scale(E3, MINOR_PENTATONIC))
def loop_foo():
play (next(sc), release= 0.1)
sleep (0.125)
sc2 = Ring(scale(E3,MINOR_PENTATONIC,num_octaves=2))
def loop_bar():
use_synth(DSAW)
play (next(sc2), release= 0.25)
sleep (0.25)
```
Now a simple structure with four live loops
```
import random
from psonic import *
from threading import Thread, Condition, Event
def live_1():
pass
def live_2():
pass
def live_3():
pass
def live_4():
pass
def live_loop_1(condition,stop_event):
while not stop_event.is_set():
with condition:
condition.notifyAll() #Message to threads
live_1()
def live_loop_2(condition,stop_event):
while not stop_event.is_set():
with condition:
condition.wait() #Wait for message
live_2()
def live_loop_3(condition,stop_event):
while not stop_event.is_set():
with condition:
condition.wait() #Wait for message
live_3()
def live_loop_4(condition,stop_event):
while not stop_event.is_set():
with condition:
condition.wait() #Wait for message
live_4()
condition = Condition()
stop_event = Event()
live_thread_1 = Thread(name='producer', target=live_loop_1, args=(condition,stop_event))
live_thread_2 = Thread(name='consumer1', target=live_loop_2, args=(condition,stop_event))
live_thread_3 = Thread(name='consumer2', target=live_loop_3, args=(condition,stop_event))
live_thread_4 = Thread(name='consumer3', target=live_loop_3, args=(condition,stop_event))
live_thread_1.start()
live_thread_2.start()
live_thread_3.start()
live_thread_4.start()
input("Press Enter to continue...")
```
After starting the loops you can change them
```
def live_1():
sample(BD_HAUS,amp=2)
sleep(0.5)
pass
def live_2():
#sample(AMBI_CHOIR, rate=0.4)
#sleep(1)
pass
def live_3():
use_synth(TB303)
play(E2, release=4,cutoff=120,cutoff_attack=1)
sleep(4)
def live_4():
notes = scale(E3, MINOR_PENTATONIC, num_octaves=2)
for i in range(8):
play(random.choice(notes),release=0.1,amp=1.5)
sleep(0.125)
```
And stop.
```
stop_event.set()
```
### Creating Sound
```
from psonic import *
synth(SINE, note=D4)
synth(SQUARE, note=D4)
synth(TRI, note=D4, amp=0.4)
detune = 0.7
synth(SQUARE, note = E4)
synth(SQUARE, note = E4+detune)
detune=0.1 # Amplitude shaping
synth(SQUARE, note = E2, release = 2)
synth(SQUARE, note = E2+detune, amp = 2, release = 2)
synth(GNOISE, release = 2, amp = 1, cutoff = 60)
synth(GNOISE, release = 0.5, amp = 1, cutoff = 100)
synth(NOISE, release = 0.2, amp = 1, cutoff = 90)
```
### Next Step
Using FX *Not implemented yet*
```
from psonic import *
with Fx(SLICER):
synth(PROPHET,note=E2,release=8,cutoff=80)
synth(PROPHET,note=E2+4,release=8,cutoff=80)
with Fx(SLICER, phase=0.125, probability=0.6,prob_pos=1):
synth(TB303, note=E2, cutoff_attack=8, release=8)
synth(TB303, note=E3, cutoff_attack=4, release=8)
synth(TB303, note=E4, cutoff_attack=2, release=8)
```
## OSC Communication (Sonic Pi Ver. 3.x or better)
In Sonic Pi version 3 or better you can work with messages.
```
from psonic import *
```
First you need a programm in the Sonic Pi server that receives messages. You can write it in th GUI or send one with Python.
```
run("""live_loop :foo do
use_real_time
a, b, c = sync "/osc/trigger/prophet"
synth :prophet, note: a, cutoff: b, sustain: c
end """)
```
Now send a message to Sonic Pi.
```
send_message('/trigger/prophet', 70, 100, 8)
stop()
```
## More Examples
```
from psonic import *
#Inspired by Steve Reich Clapping Music
clapping = [1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0]
for i in range(13):
for j in range(4):
for k in range(12):
if clapping[k] ==1 : sample(DRUM_SNARE_SOFT,pan=-0.5)
if clapping[(i+k)%12] == 1: sample(DRUM_HEAVY_KICK,pan=0.5)
sleep (0.25)
```
## Projects that use Python-Sonic
Raspberry Pi sonic-track.py a Sonic-pi Motion Track Demo https://github.com/pageauc/sonic-track
## Sources
Joe Armstrong: Connecting Erlang to the Sonic Pi http://joearms.github.io/2015/01/05/Connecting-Erlang-to-Sonic-Pi.html
Joe Armstrong: Controlling Sound with OSC Messages http://joearms.github.io/2016/01/29/Controlling-Sound-with-OSC-Messages.html
..
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/image_displacement.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/image_displacement.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Image/image_displacement.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/image_displacement.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
```
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function.
```
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
```
## Add Earth Engine Python script
```
# Add Earth Engine dataset
import math
# Load the two images to be registered.
image1 = ee.Image('SKYSAT/GEN-A/PUBLIC/ORTHO/MULTISPECTRAL/s01_20150502T082736Z')
image2 = ee.Image('SKYSAT/GEN-A/PUBLIC/ORTHO/MULTISPECTRAL/s01_20150305T081019Z')
# Use bicubic resampling during registration.
image1Orig = image1.resample('bicubic')
image2Orig = image2.resample('bicubic')
# Choose to register using only the 'R' bAnd.
image1RedBAnd = image1Orig.select('R')
image2RedBAnd = image2Orig.select('R')
# Determine the displacement by matching only the 'R' bAnds.
displacement = image2RedBAnd.displacement(**{
'referenceImage': image1RedBAnd,
'maxOffset': 50.0,
'patchWidth': 100.0
})
# Compute image offset And direction.
offset = displacement.select('dx').hypot(displacement.select('dy'))
angle = displacement.select('dx').atan2(displacement.select('dy'))
# Display offset distance And angle.
Map.addLayer(offset, {'min':0, 'max': 20}, 'offset')
Map.addLayer(angle, {'min': -math.pi, 'max': math.pi}, 'angle')
Map.setCenter(37.44,0.58, 15)
# Use the computed displacement to register all Original bAnds.
registered = image2Orig.displace(displacement)
# Show the results of co-registering the images.
visParams = {'bands': ['R', 'G', 'B'], 'max': 4000}
Map.addLayer(image1Orig, visParams, 'Reference')
Map.addLayer(image2Orig, visParams, 'BefOre Registration')
Map.addLayer(registered, visParams, 'After Registration')
alsoRegistered = image2Orig.register(**{
'referenceImage': image1Orig,
'maxOffset': 50.0,
'patchWidth': 100.0
})
Map.addLayer(alsoRegistered, visParams, 'Also Registered')
```
## Display Earth Engine data layers
```
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
```
| github_jupyter |
[Loss Function](https://www.bualabs.com/archives/2673/what-is-loss-function-cost-function-error-function-loss-function-how-cost-function-work-machine-learning-ep-1/) หรือ Cost Function คือ การคำนวน Error ว่า yhat ที่โมเดลทำนายออกมา ต่างจาก y ของจริง อยู่เท่าไร แล้วหาค่าเฉลี่ย เพื่อที่จะนำมาหา Gradient ของ Loss ขึ้นกับ Weight ต่าง ๆ ด้วย Backpropagation แล้วใช้อัลกอริทึม [Gradient Descent](https://www.bualabs.com/archives/631/what-is-gradient-descent-in-deep-learning-what-is-stochastic-gradient-descent-sgd-optimization-ep-1/) ทำให้ Loss น้อยลง ในการเทรนรอบถัดไป
ในเคสนี้เราจะพูดถึง Loss Function สำหรับงาน Classification (Discrete ค่าไม่ต่อเนื่อง) ที่เป็นที่นิยมมากที่สุด ได้แก่ Cross Entropy Loss
* yhat เป็น Probability ที่ออกมาจากโมเดลที่ Layer สุดท้ายเป็น [Softmax Function](https://www.bualabs.com/archives/1819/what-is-softmax-function-how-to-use-softmax-function-benefit-of-softmax/)
* y เป็นข้อมูลที่อยู่ในรูปแบบ [One Hot Encoding](https://www.bualabs.com/archives/1902/what-is-one-hot-encoding-benefit-one-hot-encoding-why-one-hot-encoding-in-machine-learning/)
# 0. Import
```
import torch
from torch import tensor
import matplotlib.pyplot as plt
```
# 1. Data
เราจะสร้างข้อมูลตัวอย่างขึ้นมา Dog = 0, Cat 1, Rat = 2
## y
สมมติค่า y จากข้อมูลตัวอย่าง ที่เราต้องการจริง ๆ เป็นดังนี้
```
y = tensor([0, 1, 2, 0, 0, 1, 0, 2, 2, 1])
y
n, c = len(y), y.max()+1
y_onehot = torch.zeros(n, c)
y_onehot[torch.arange(n), y] = 1
y_onehot
```
## yhat
สมมติว่า โมเดลเราทำนายออกมาได้ nn
```
yhat = tensor([[3., 2., 1.],
[5., 6., 2.],
[0., 0., 5.],
[2., 3., 1.],
[5., 4., 3.],
[1., 0., 3.],
[5., 3., 2.],
[2., 2., 4.],
[8., 5., 3.],
[3., 4., 0.]])
```
เราจะใช้ [Softmax Function จาก ep ที่แล้ว](https://www.bualabs.com/archives/1819/what-is-softmax-function-how-to-use-softmax-function-benefit-of-softmax/) แล้วเติม log เอาไว้สำหรับใช้ในขั้นตอนถัดไป
$$\hbox{softmax(x)}_{i} = \frac{e^{x_{i}}}{\sum_{0 \leq j \leq n-1} e^{x_{j}}}$$
```
def log_softmax(z):
z = z - z.max(-1, keepdim=True)[0]
exp_z = torch.exp(z)
sum_exp_z = torch.sum(exp_z, -1, keepdim=True)
return (exp_z / sum_exp_z).log()
```
yhat กลายเป็น Probability ของ 3 Category
```
log_softmax(yhat)
```
## argmax เปรียบเทียบ y และ yhat
argmax ใช้หาตำแหน่งที่ มีค่ามากที่สุด ในที่นี้ เราสนใจค่ามากที่สุดใน มิติที่ 1
```
yhat.argmax(1)
y
```
ตรงกัน 7 อัน
```
(yhat.argmax(1) == y).sum()
```
# 2. Cross Entropy Loss
Cross Entropy Loss (Logistic Regression) หรือ Log Loss คือ การคำนวน Error ว่า yhat ต่างจาก y อยู่เท่าไร ด้วยการนำ Probability มาคำนวน หมายถึง ทายถูก แต่ไม่มั่นใจก็จะ Loss มาก หรือ ยิ่งทายผิด แต่มั่นใจมาก ก็จะ Loss มาก โดยคำนวนทั้ง Batch แล้วหาค่าเฉลี่ย
* p(x) มีค่าระหว่าง 0 ถึง 1 (ทำให้ผ่าน log แล้วติดลบ เมื่อเจอกับเครื่องหมายลบด้านหน้า จะกลายเป็นบวก)
* Cross Entropy Loss มีค่าระหว่าง 0 ถึง Infinity (ถ้าเป็น 0 คือไม่ Error เลย)
# 2.1 สูตร Cross Entropy Loss
เรียกว่า Negative Log Likelihood
$$ NLL = -\sum x\, \log p(x) $$
เนื่องจาก ค่า $x$ อยู่ในรูป One Hot Encoding เราสามารถเขียนใหม่ได้เป็น $-\log(p_{i})$ โดย i เป็น Index ของ y ที่เราต้องการ
## 2.2 โค้ด Negative Log Likelihood
```
# log_probs = log of probability, target = target
def nll(log_probs, target):
return -(log_probs[torch.arange(log_probs.size()[0]), target]).mean()
```
## 2.3 การใช้งาน Negative Log Likelihood
```
loss = nll(log_softmax(yhat), y)
loss
```
## 2.4 Optimize
เนื่องจาก
$$\log \left ( \frac{a}{b} \right ) = \log(a) - \log(b)$$
ทำให้เราแยก เศษและส่วน ออกเป็น 2 ก่อนลบกัน
และถ้า x ใหญ่เกินไป เมื่อนำมา exp จะทำให้ nan ได้ จากสูตรด้านล่าง
$$\log \left ( \sum_{j=1}^{n} e^{x_{j}} \right ) = \log \left ( e^{a} \sum_{j=1}^{n} e^{x_{j}-a} \right ) = a + \log \left ( \sum_{j=1}^{n} e^{x_{j}-a} \right )$$
a คือ max(x) เราสามารถ exp(x-a) ให้ x เป็นค่าติดลบให้หมด เมื่อ exp จะได้ไม่เกิน 1 แล้วค่อยไปบวกก a กลับทีหลังได้
จาก 2 สูตรด้านบน เราสามารถ Optimize โค้ด ได้ดังนี้
```
def log_softmax2(z):
m = z.max(-1, keepdim=True)[0]
return z - ((z-m).exp().sum(-1, keepdim=True).log()+m)
```
หรือ
```
def log_softmax3(z):
return z - (z).logsumexp(-1, keepdim=True)
```
### เปรียบเทียบผลลัพธ์กับ PyTorch
```
import torch.nn.functional as F
F.cross_entropy(yhat, y)
nll(log_softmax(yhat), y)
nll(log_softmax2(yhat), y)
nll(log_softmax3(yhat), y)
```
ผลลัพธ์ถูกต้อง ตรงกับ PyTorch F.cross_entropy
## 2.5 พล็อตกราฟ
เราจะสมมติว่า Dog = 0, Cat = 1 และในข้อมูลตัวอย่างมีแต่ Dog (0) อย่างเดียว เราจะลองดูว่าพล็อตกราฟไล่ตั้งแต่ ความน่าจะเป็น 0-100%
เราจะสร้างข้อมูลตัวอย่างขึ้นมา ให้ y เป็น 0 จำนวน 100 ตัว แทนรูปภาพ Dog 100 รูป เราจะได้เอาไว้พล็อตกราฟ
```
y = torch.zeros(100)
y[:10]
```
yhat คือ Output ของโมเดลว่า ความน่าจะเป็นรูป Dog (Column 0) และความน่าจะเป็นรูป Cat (Column 1) เราจะไล่ข้อมูลตั้งแต่ (หมา 0% แมว 100%) ไปยัง (หมา 100% แมว 0%)
```
yhat = torch.zeros(100, 2)
yhat[range(0, 100), 0] = torch.arange(0., 1., 0.01)
yhat[:, 1] = 1-yhat[:, 0]
yhat[:10]
```
คำนวนค่าความน่าจะเป็น ของทั้ง 2 Class เอาไว้พล็อตกราฟ
```
classes = torch.tensor([0., 1.])
yhat_classes = yhat @ classes.t()
yhat_classes[:10]
```
Log ค่า Probability (ของจริงจะมาจาก Softmax ตามตัวอย่างด้านบน) เตรียมไว้เข้าสูตร
```
log_probs = yhat.log()
log_probs[:10]
```
Negative Log Likelihood
```
loss = -(log_probs[torch.arange(log_probs.size()[0]), y.long()])
loss[:10]
```
### พล็อตกราฟ y, yhat, loss และ log loss
* ข้อมูลตัวอย่าง y ที่สมมติว่าเท่ากับ 0 อย่างเดียว (เส้นสีแดง) เทียบกับ yhat ที่ทำนายไล่ตั้งแต่ 1 ไปถึง 0 (ทายผิดไล่ไปถึงทายถูก เส้นสีเขียว)
* สังเกต Loss สีส้ม เริ่มจากซ้ายสุด Ground Truth เท่ากับ 0 (เส้นสีแดง) แต่โมเดลทายผิด ทายว่าเป็น 1 (เส้นสีเขียว) ด้วยความมั่นใจ 100% ทำให้ Loss พุ่งขึ้นถึง Infinity
* เลื่อนมาตรงกลาง Loss จะลดลงอย่างรวดเร็ว เมื่อโมเดลทายผิด แต่ไม่ได้มั่นใจเต็มร้อย
* ด้านขวา Loss ลดลงเรื่อย ๆ จนเป็น 0 เมื่อโมเดลทายถูก ว่าเป็น 0 ด้วยความมั่นใจ 100%
* Log of Loss คือเปลี่ยน Loss ที่อยู่ช่วง Infinity ถึง 0 เป็น Log Scale จะได้ช่วง Infinity ถึง -Infinity จะได้ Balance ดูง่ายขึ้น
```
fig,ax = plt.subplots(figsize=(9, 9))
ax.scatter(yhat[:,0].numpy(), loss.log(), label="Log of Loss")
ax.scatter(yhat[:,0].numpy(), loss, label="Loss")
ax.plot(yhat[:,0].numpy(), yhat_classes.numpy(), label="yhat", color='green')
ax.plot(yhat[:,0].numpy(), y.numpy(), label="y", color='red')
ax.grid(True)
ax.legend(loc='upper right')
```
# 3. Loss Function อื่น ๆ
เราจะเป็นที่ต้องเข้าใจความเป็นมา และกลไกการทำงานภายใน ของ Loss Function เนื่องจากเมื่อเราต้องการออกแบบโมเดล ในการแก้ปัญหาที่ซับซ้อนมากขึ้น เราต้องออกแบบ Loss Function ให้เข้ากับงานนั้นด้วย เช่น อาจจะเอาหลาย ๆ Loss Function เช่น [Regression Loss](https://www.bualabs.com/archives/1928/what-is-mean-absolute-error-mae-mean-squared-error-mse-root-mean-squared-error-rmse-loss-function-ep-2/) มาผสมกัน แล้ว Weight น้ำหนัก รวมเป็น Loss ที่เราต้องการ เป็นต้น
```
```
| github_jupyter |
# Example: CanvasXpress circular Chart No. 6
This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:
https://www.canvasxpress.org/examples/circular-6.html
This example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.
Everything required for the chart to render is included in the code below. Simply run the code block.
```
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="circular6",
data={
"z": {
"chr": [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
6,
6,
6,
6,
6,
6,
6,
6,
6,
7,
7,
7,
7,
7,
7,
7,
7,
8,
8,
8,
8,
8,
8,
8,
8,
9,
9,
9,
9,
9,
9,
9,
9,
10,
10,
10,
10,
10,
10,
10,
11,
11,
11,
11,
11,
11,
11,
12,
12,
12,
12,
12,
12,
12,
13,
13,
13,
13,
13,
13,
14,
14,
14,
14,
14,
14,
15,
15,
15,
15,
15,
15,
16,
16,
16,
16,
16,
17,
17,
17,
17,
18,
18,
18,
18,
19,
19,
19,
19,
20,
20,
20,
20,
21,
21,
21,
22,
22,
22,
"X",
"X",
"X",
"X",
"X",
"X",
"X",
"X",
"Y",
"Y",
"Y",
"X",
3,
19,
5,
11,
10,
15,
18,
11,
13,
14,
21,
1,
7,
14,
"Y",
21,
2,
18,
1,
7,
9,
7,
19,
19,
20,
20,
9,
"X",
16,
8,
20,
"X",
18
],
"pos": [
176158308,
195792629,
229516707,
127588847,
79643728,
185593801,
9679485,
244523632,
236753568,
128133434,
228644565,
150003054,
219011541,
168916847,
26949439,
102746811,
2474221,
209897353,
113021141,
77762431,
163020942,
171034774,
213334477,
97455775,
83291531,
143519956,
122953780,
134434993,
6501153,
36509633,
134712403,
16094381,
159112661,
16092021,
29530674,
98680615,
19640420,
108401923,
143243174,
16342895,
42326293,
115086153,
86673182,
138017594,
40287060,
133573077,
138457582,
17843222,
54643446,
31433785,
74774102,
178335068,
56846964,
539920,
95028169,
121007542,
131105053,
79720263,
48227800,
142747889,
62543189,
50598801,
33328141,
158733438,
47107967,
5246518,
131713113,
12326167,
58372056,
28321194,
108652542,
103359699,
103536939,
56208609,
87012547,
3341929,
124836752,
59833292,
39064309,
31063538,
67409926,
10777547,
48520782,
18875793,
81484304,
35095469,
120807273,
36875340,
126128712,
100677585,
118570992,
9612077,
77867215,
19151335,
53602699,
49087920,
38708284,
113120818,
101439886,
75343477,
26249259,
54093637,
20596380,
98938748,
40533585,
89574094,
80301557,
56696139,
106845694,
10555451,
101114606,
50732192,
17458821,
9173140,
86898750,
76472186,
16266789,
93249681,
87911171,
9404454,
56147990,
54904212,
87210495,
20386568,
32880981,
14002843,
12161519,
39664472,
73880383,
47714897,
868308,
66004051,
24127310,
54211025,
15902150,
8721825,
46962668,
39093389,
55603291,
41233282,
63103970,
10443615,
6248945,
24491648,
19429871,
4170936,
40286870,
54989240,
39471767,
44811148,
28873711,
7738820,
20461957,
23030024,
7678949,
113205707,
9117671,
55230156,
73702110,
105064343,
7484814,
6345698,
45891043,
14020510,
2971362,
29256349,
146673737,
141355615,
31567310,
3395353,
2464888,
68581686,
60314299,
58307384,
26528612,
38283186,
43483316,
41830860,
160212793,
83692467,
39167742,
8309133,
26927848,
197477698,
65796042,
145369367,
91838386,
59033170,
31843957,
33440512,
47490053,
49915055,
1878719,
93047574,
145870982,
75626142,
35819134,
60862499,
18121170,
49128537
],
"Annt1": [
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:2"
],
"Annt2": [
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:3",
"Desc:2",
"Desc:3",
"Desc:3",
"Desc:3",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:3",
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:3",
"Desc:1",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:1",
"Desc:3",
"Desc:1",
"Desc:1",
"Desc:3",
"Desc:3",
"Desc:2",
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:3",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:3",
"Desc:3",
"Desc:2",
"Desc:1",
"Desc:3",
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:3",
"Desc:3",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:3",
"Desc:1",
"Desc:3",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:3",
"Desc:3",
"Desc:1",
"Desc:3",
"Desc:1",
"Desc:3",
"Desc:3",
"Desc:3",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:3",
"Desc:3",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:3",
"Desc:3",
"Desc:2",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:3",
"Desc:1",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:3",
"Desc:3",
"Desc:3",
"Desc:2",
"Desc:3",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:3",
"Desc:1",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1"
],
"Annt3": [
"Desc:4",
"Desc:2",
"Desc:4",
"Desc:2",
"Desc:4",
"Desc:4",
"Desc:4",
"Desc:2",
"Desc:1",
"Desc:3",
"Desc:4",
"Desc:2",
"Desc:4",
"Desc:3",
"Desc:4",
"Desc:3",
"Desc:4",
"Desc:4",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:4",
"Desc:1",
"Desc:3",
"Desc:1",
"Desc:4",
"Desc:4",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:1",
"Desc:4",
"Desc:3",
"Desc:4",
"Desc:3",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:3",
"Desc:3",
"Desc:4",
"Desc:2",
"Desc:4",
"Desc:4",
"Desc:4",
"Desc:1",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:4",
"Desc:2",
"Desc:4",
"Desc:4",
"Desc:2",
"Desc:1",
"Desc:4",
"Desc:2",
"Desc:2",
"Desc:4",
"Desc:3",
"Desc:2",
"Desc:3",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:4",
"Desc:4",
"Desc:2",
"Desc:4",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:4",
"Desc:1",
"Desc:1",
"Desc:4",
"Desc:4",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:3",
"Desc:3",
"Desc:3",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:3",
"Desc:4",
"Desc:4",
"Desc:3",
"Desc:3",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:4",
"Desc:2",
"Desc:4",
"Desc:3",
"Desc:2",
"Desc:1",
"Desc:4",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:3",
"Desc:3",
"Desc:4",
"Desc:4",
"Desc:3",
"Desc:3",
"Desc:1",
"Desc:4",
"Desc:3",
"Desc:4",
"Desc:2",
"Desc:3",
"Desc:4",
"Desc:3",
"Desc:4",
"Desc:4",
"Desc:4",
"Desc:3",
"Desc:3",
"Desc:4",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:4",
"Desc:3",
"Desc:4",
"Desc:2",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:4",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:4",
"Desc:3",
"Desc:4",
"Desc:4",
"Desc:4",
"Desc:1",
"Desc:1",
"Desc:3",
"Desc:4",
"Desc:1",
"Desc:4",
"Desc:1",
"Desc:3",
"Desc:3",
"Desc:2",
"Desc:1",
"Desc:4",
"Desc:2",
"Desc:3",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:4",
"Desc:1",
"Desc:4",
"Desc:3",
"Desc:4",
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:4",
"Desc:2",
"Desc:3",
"Desc:4",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:4",
"Desc:4",
"Desc:1"
],
"Annt4": [
"Desc:4",
"Desc:2",
"Desc:3",
"Desc:4",
"Desc:4",
"Desc:5",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:3",
"Desc:4",
"Desc:3",
"Desc:3",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:5",
"Desc:2",
"Desc:4",
"Desc:3",
"Desc:2",
"Desc:5",
"Desc:3",
"Desc:1",
"Desc:4",
"Desc:2",
"Desc:4",
"Desc:1",
"Desc:3",
"Desc:3",
"Desc:5",
"Desc:4",
"Desc:1",
"Desc:2",
"Desc:4",
"Desc:2",
"Desc:3",
"Desc:2",
"Desc:5",
"Desc:5",
"Desc:4",
"Desc:4",
"Desc:5",
"Desc:1",
"Desc:4",
"Desc:3",
"Desc:5",
"Desc:2",
"Desc:4",
"Desc:3",
"Desc:1",
"Desc:1",
"Desc:3",
"Desc:2",
"Desc:2",
"Desc:4",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:4",
"Desc:2",
"Desc:2",
"Desc:3",
"Desc:1",
"Desc:5",
"Desc:4",
"Desc:5",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:3",
"Desc:5",
"Desc:1",
"Desc:4",
"Desc:2",
"Desc:4",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:5",
"Desc:1",
"Desc:4",
"Desc:5",
"Desc:4",
"Desc:1",
"Desc:1",
"Desc:1",
"Desc:3",
"Desc:5",
"Desc:5",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:5",
"Desc:1",
"Desc:2",
"Desc:5",
"Desc:2",
"Desc:2",
"Desc:5",
"Desc:5",
"Desc:3",
"Desc:5",
"Desc:4",
"Desc:5",
"Desc:4",
"Desc:3",
"Desc:3",
"Desc:3",
"Desc:4",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:5",
"Desc:2",
"Desc:5",
"Desc:4",
"Desc:4",
"Desc:4",
"Desc:3",
"Desc:4",
"Desc:4",
"Desc:1",
"Desc:2",
"Desc:5",
"Desc:5",
"Desc:5",
"Desc:4",
"Desc:2",
"Desc:2",
"Desc:4",
"Desc:4",
"Desc:3",
"Desc:5",
"Desc:5",
"Desc:2",
"Desc:4",
"Desc:2",
"Desc:5",
"Desc:5",
"Desc:5",
"Desc:4",
"Desc:2",
"Desc:2",
"Desc:3",
"Desc:5",
"Desc:5",
"Desc:5",
"Desc:2",
"Desc:3",
"Desc:5",
"Desc:1",
"Desc:2",
"Desc:1",
"Desc:4",
"Desc:4",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:1",
"Desc:2",
"Desc:4",
"Desc:3",
"Desc:5",
"Desc:3",
"Desc:5",
"Desc:3",
"Desc:1",
"Desc:1",
"Desc:3",
"Desc:4",
"Desc:2",
"Desc:1",
"Desc:5",
"Desc:3",
"Desc:4",
"Desc:4",
"Desc:5",
"Desc:1",
"Desc:2",
"Desc:2",
"Desc:3",
"Desc:5",
"Desc:3",
"Desc:2",
"Desc:2"
]
},
"x": {
"Factor1": [
"Lev:2",
"Lev:1",
"Lev:1",
"Lev:1",
"Lev:2",
"Lev:1",
"Lev:1",
"Lev:1",
"Lev:2",
"Lev:1"
],
"Factor2": [
"Lev:1",
"Lev:2",
"Lev:2",
"Lev:1",
"Lev:1",
"Lev:3",
"Lev:3",
"Lev:1",
"Lev:1",
"Lev:2"
],
"Factor3": [
"Lev:4",
"Lev:1",
"Lev:2",
"Lev:1",
"Lev:4",
"Lev:4",
"Lev:1",
"Lev:4",
"Lev:2",
"Lev:2"
],
"Factor4": [
"Lev:1",
"Lev:4",
"Lev:2",
"Lev:4",
"Lev:5",
"Lev:2",
"Lev:2",
"Lev:1",
"Lev:4",
"Lev:3"
]
},
"y": {
"vars": [
"V1",
"V2",
"V3",
"V4",
"V5",
"V6",
"V7",
"V8",
"V9",
"V10",
"V11",
"V12",
"V13",
"V14",
"V15",
"V16",
"V17",
"V18",
"V19",
"V20",
"V21",
"V22",
"V23",
"V24",
"V25",
"V26",
"V27",
"V28",
"V29",
"V30",
"V31",
"V32",
"V33",
"V34",
"V35",
"V36",
"V37",
"V38",
"V39",
"V40",
"V41",
"V42",
"V43",
"V44",
"V45",
"V46",
"V47",
"V48",
"V49",
"V50",
"V51",
"V52",
"V53",
"V54",
"V55",
"V56",
"V57",
"V58",
"V59",
"V60",
"V61",
"V62",
"V63",
"V64",
"V65",
"V66",
"V67",
"V68",
"V69",
"V70",
"V71",
"V72",
"V73",
"V74",
"V75",
"V76",
"V77",
"V78",
"V79",
"V80",
"V81",
"V82",
"V83",
"V84",
"V85",
"V86",
"V87",
"V88",
"V89",
"V90",
"V91",
"V92",
"V93",
"V94",
"V95",
"V96",
"V97",
"V98",
"V99",
"V100",
"V101",
"V102",
"V103",
"V104",
"V105",
"V106",
"V107",
"V108",
"V109",
"V110",
"V111",
"V112",
"V113",
"V114",
"V115",
"V116",
"V117",
"V118",
"V119",
"V120",
"V121",
"V122",
"V123",
"V124",
"V125",
"V126",
"V127",
"V128",
"V129",
"V130",
"V131",
"V132",
"V133",
"V134",
"V135",
"V136",
"V137",
"V138",
"V139",
"V140",
"V141",
"V142",
"V143",
"V144",
"V145",
"V146",
"V147",
"V148",
"V149",
"V150",
"V151",
"V152",
"V153",
"V154",
"V155",
"V156",
"V157",
"V158",
"V159",
"V160",
"V161",
"V162",
"V163",
"V164",
"V165",
"V166",
"V167",
"V168",
"V169",
"V170",
"V171",
"V172",
"V173",
"V174",
"V175",
"V176",
"V177",
"V178",
"V179",
"V180",
"V181",
"V182",
"V183",
"V184",
"V185",
"V186",
"V187",
"V188",
"V189",
"V190",
"V191",
"V192",
"V193",
"V194",
"V195",
"V196",
"V197",
"V198",
"V199",
"V200"
],
"smps": [
"S1",
"S2",
"S3",
"S4",
"S5",
"S6",
"S7",
"S8",
"S9",
"S10"
],
"data": [
[
52.79,
24.71,
14.35,
22.23,
42.42,
12.38,
19.18,
19.6,
51.81,
20.2
],
[
53.39,
28.1,
8.02,
24.12,
21.36,
28.89,
16.29,
27.44,
38.8,
18.3
],
[
31.11,
13.84,
16.32,
7.62,
29.04,
2.66,
11.83,
8.6,
52.39,
8.55
],
[
42.48,
6.54,
14.22,
6.19,
51.77,
6.26,
6.4,
4.32,
47.32,
3.13
],
[
21.44,
8.39,
17.61,
1.59,
42.14,
6.91,
14.92,
5.04,
30.25,
20.55
],
[
47.6,
25.45,
7.53,
6.22,
40.27,
10.96,
28.39,
19.22,
37.05,
11.5
],
[
22.9,
23.06,
3.38,
25.36,
31.83,
1.15,
25.07,
10.77,
23.24,
24.39
],
[
35.39,
3.88,
19.33,
20.16,
42.14,
21.13,
25.74,
24.42,
25.39,
2.43
],
[
52.37,
6.73,
13.85,
11.98,
21.89,
4.12,
24.02,
19,
25.97,
23.68
],
[
52.58,
23.67,
7.66,
21.44,
47.43,
4.87,
22.18,
20.36,
52.05,
2.86
],
[
31.75,
16.88,
21.22,
1.41,
45.79,
26.28,
17.74,
9.64,
22.42,
9.45
],
[
24.37,
4.16,
24.06,
23.13,
41.6,
27.75,
13.84,
7.34,
45.81,
16.44
],
[
46.31,
15.54,
6.64,
22.44,
52.65,
14.93,
5.5,
24.09,
22.05,
9.59
],
[
35.7,
19.79,
2.5,
26.23,
34.06,
23.88,
16.45,
7.96,
24.18,
3.22
],
[
35.47,
5.46,
8.68,
2.91,
22.42,
6.71,
1.83,
22.89,
28.1,
14.21
],
[
52.58,
27.38,
27.51,
14.4,
47.27,
15.92,
13.59,
26.11,
33,
23.06
],
[
38.08,
15.55,
28.65,
28.74,
47.06,
16.09,
20.78,
26.79,
43.36,
22.28
],
[
53.38,
7.1,
26.57,
11.79,
40.73,
26.29,
26.21,
5.92,
53.16,
6.62
],
[
29.9,
13.26,
7.23,
5.38,
53.67,
28.59,
25.78,
21.36,
52.78,
13.46
],
[
52.6,
19.51,
14.72,
15.76,
52.13,
11.8,
1.05,
20.58,
29.03,
17.05
],
[
32.48,
24.14,
21.4,
11.61,
32.99,
7.31,
10.01,
27.88,
43.2,
1.14
],
[
30.62,
26.5,
26.38,
24.57,
21.88,
20.69,
13.17,
13.96,
41.89,
28.89
],
[
43.32,
18.91,
11.68,
22.23,
26.85,
11.23,
25.33,
2.87,
46.3,
9.78
],
[
22.35,
22.74,
27.62,
12.85,
36.96,
1.77,
26.33,
2.53,
27.88,
26.44
],
[
52.04,
3.77,
14.24,
14.65,
28.39,
14.11,
25.47,
21.81,
42.57,
18.74
],
[
47.25,
13.18,
26.74,
17.95,
53.01,
26.92,
1.61,
4.51,
30.27,
7.34
],
[
25.46,
16.02,
25.93,
11.33,
53.77,
17.54,
1.39,
24.03,
23.83,
13.23
],
[
22.81,
10.33,
24.77,
26.01,
28.95,
23.99,
12.01,
17.69,
35.64,
12.08
],
[
49.27,
26.71,
6.64,
14.77,
51.25,
28.48,
1.55,
10.43,
31.01,
17.18
],
[
42.41,
10.58,
15.27,
14.52,
40.17,
22.83,
12.05,
1.56,
22.99,
27.03
],
[
45.11,
18.03,
2.03,
22.29,
47.98,
18.79,
1.27,
3.41,
30.1,
9.83
],
[
52.17,
16.1,
19.34,
5.16,
42.09,
20.79,
20.52,
1.7,
29.79,
28.78
],
[
42.62,
6.73,
9.88,
20.52,
32.33,
23.13,
27.96,
18.97,
47.05,
26.31
],
[
34.15,
27.79,
1.84,
8.72,
23.68,
13.74,
8.7,
13.29,
45.97,
10.63
],
[
53.41,
8.13,
27.7,
1.16,
33.81,
17.28,
17.65,
3.52,
26.6,
1.81
],
[
46.08,
16.98,
2.9,
6.39,
48.8,
14.27,
1.16,
19.95,
26.05,
16.55
],
[
47.68,
20.6,
26.02,
2.24,
30.25,
5.2,
27.69,
27.32,
25.77,
1.83
],
[
43.9,
17.09,
3.2,
27.51,
21.37,
25.93,
26.64,
20.24,
33.7,
17.47
],
[
48.23,
13.08,
5.52,
7.6,
49.08,
28.7,
8.77,
13.11,
32.41,
2.72
],
[
33.18,
9.4,
10.42,
21.24,
44.66,
25.64,
12.85,
7.75,
21.55,
28.84
],
[
42.38,
23.57,
21.18,
25.08,
43.04,
11.07,
14.21,
1.42,
32.97,
21.7
],
[
30.55,
3.97,
4.38,
28.78,
39.17,
12.88,
4.53,
18.51,
48.28,
7.76
],
[
41.59,
14.67,
21.58,
15.97,
32.76,
6.12,
26.85,
15.79,
41.7,
7.31
],
[
46,
15.58,
27.91,
1.88,
31.55,
28.62,
14.72,
15.09,
52.69,
8.05
],
[
52.78,
22.19,
15.16,
1.41,
45.68,
9.69,
12.1,
7.3,
21.85,
3.27
],
[
47.42,
27.04,
15.18,
26.67,
23.72,
24.41,
28.73,
22.77,
22.13,
8.03
],
[
25.76,
1.63,
11.07,
19.24,
29.78,
9.65,
21.95,
13.94,
48.78,
7.68
],
[
48.6,
27.58,
20.39,
19.72,
35.11,
28.69,
23.7,
1.95,
33.49,
27.96
],
[
41.85,
15.79,
7.88,
16.83,
52.66,
16.14,
5.35,
18.82,
27.15,
5.45
],
[
45.62,
21.21,
11.66,
5.16,
22.28,
11.81,
16.28,
15.32,
33.85,
22.43
],
[
46.29,
25.75,
19.88,
20.76,
22.11,
21.46,
7.11,
26.73,
44.82,
3.51
],
[
24.36,
8.99,
9.74,
27.87,
32.85,
27.9,
18.32,
14.07,
37.25,
17.46
],
[
36.43,
20.25,
1.86,
10.53,
21.23,
4.09,
19.13,
18.67,
36.86,
19.04
],
[
25.61,
11.06,
9.71,
17.51,
32.42,
5.87,
28.71,
7.12,
44.66,
3.21
],
[
28.35,
16.72,
6.6,
21.55,
39.72,
5.16,
9.52,
6.6,
41.89,
4.98
],
[
31.16,
24.99,
5.19,
8.29,
32.85,
5.62,
21.49,
16.94,
48.36,
26.06
],
[
28.1,
14.02,
18.97,
24.52,
48.45,
18.57,
11.57,
26.8,
23.16,
7.75
],
[
34.22,
18.8,
4.05,
5.68,
22.38,
6.06,
19,
9.99,
38.28,
12.62
],
[
50,
5.17,
13.58,
27.32,
48.93,
12.52,
12.53,
3.9,
27.92,
13.57
],
[
51.8,
14.39,
27.67,
17.76,
49.12,
18.48,
5.37,
21.47,
26.36,
5.42
],
[
21.06,
27.43,
4.66,
4.66,
43.69,
23.29,
10.97,
24.48,
39.68,
24.51
],
[
38.43,
26.14,
9.59,
21.03,
21.09,
20.03,
19.43,
26.53,
35.09,
19.22
],
[
52.81,
25.31,
18.03,
8.39,
47.76,
23.69,
26.31,
26.5,
37.19,
25.91
],
[
25.14,
7.08,
17.93,
13.86,
21.06,
23.32,
27.77,
1.05,
51.25,
19.22
],
[
48.36,
8.08,
18.26,
16.15,
28.64,
13.73,
13.87,
16.39,
42.95,
1.75
],
[
37.43,
11.5,
23.1,
10.51,
48.75,
5.03,
28.38,
18.39,
27.03,
17.35
],
[
22.67,
19.74,
20.84,
15.24,
40.62,
18.13,
5.79,
8.72,
45.6,
27.13
],
[
39.33,
19.08,
25.75,
14.02,
38.13,
13.18,
25.47,
2.38,
33.72,
3.71
],
[
32.02,
13.58,
25.5,
3.88,
22.28,
5.56,
8.13,
18.99,
32.71,
4.26
],
[
26.62,
3.28,
25.59,
18.33,
27.65,
15.9,
20.44,
28.41,
46.91,
6.13
],
[
51.91,
26.1,
2.84,
28.74,
31.25,
23.36,
12.53,
15.14,
51.49,
10.04
],
[
48.71,
21.97,
15.89,
28.65,
49.3,
21.22,
3.76,
20.03,
42.07,
18.88
],
[
23.13,
2.08,
10.52,
21.58,
48.12,
17.61,
4.93,
15.71,
26.94,
28.32
],
[
25.16,
27.29,
27.77,
21.57,
53.14,
19.33,
6.46,
15.55,
38.21,
22.02
],
[
27.01,
18.7,
18.35,
25.85,
34.58,
16.19,
13.52,
21.68,
33.73,
7.28
],
[
40.99,
15.97,
19.43,
22.44,
46.51,
27.81,
11.62,
2.95,
44.24,
27.83
],
[
27.63,
20.4,
23.63,
18.05,
39.83,
27.58,
26.87,
8.77,
34.69,
3.6
],
[
27.43,
2.53,
1.74,
26.48,
22.16,
14.38,
7.54,
11.17,
43.99,
15.86
],
[
37.72,
24.1,
13.48,
1.62,
31.68,
24.96,
23.16,
12.29,
25.18,
16.59
],
[
46.47,
23.57,
6.71,
11.72,
53.77,
7.37,
1.13,
20.3,
22.93,
6.53
],
[
49.41,
28.3,
16.59,
15.22,
27.74,
6.38,
3.01,
20.2,
38.05,
2.12
],
[
52.69,
14.29,
4.48,
5.06,
38.24,
20.31,
13.41,
10.79,
35.45,
9.82
],
[
48.98,
21.97,
22.63,
3.21,
46.84,
28.64,
5.27,
15.32,
23.21,
17.51
],
[
25.76,
3.48,
16.51,
15.99,
40.09,
17.21,
22.1,
24.21,
22.85,
26.39
],
[
29.57,
22.65,
14.76,
4.48,
47.37,
12.4,
21.85,
12.72,
25.18,
11.03
],
[
22.01,
6.11,
22.28,
15.93,
46.41,
6.62,
21.88,
8.61,
23.99,
15.67
],
[
23.29,
24.59,
2.47,
21.52,
23.92,
11.13,
14.74,
17.02,
33.5,
22.62
],
[
30.62,
18.08,
2.31,
19.1,
45.56,
27.75,
3.24,
9.69,
42.93,
4.19
],
[
24.13,
1.35,
11.88,
25.51,
48.22,
1.37,
28.94,
5.28,
38.25,
15.38
],
[
49.63,
5.33,
18.6,
20.61,
22.34,
11.06,
2.22,
16.54,
53.47,
8.68
],
[
42.2,
19.69,
4.01,
26.61,
34.98,
13.31,
4.99,
26.61,
47,
22.4
],
[
48.24,
17.15,
28.34,
10.62,
30.8,
15.28,
21.08,
5.84,
49.72,
13.17
],
[
51.71,
3.67,
25.57,
13.12,
38.31,
8.22,
22.73,
13.4,
47.61,
1.33
],
[
41.29,
6.55,
21.66,
3.17,
36.62,
8.21,
19.98,
23.25,
50.76,
6.85
],
[
40.45,
27.37,
2.53,
13.66,
28.2,
14.32,
5.53,
14.67,
45.83,
15.08
],
[
42.23,
15.24,
10.62,
15.37,
33.92,
1.51,
8.22,
23.53,
49.44,
23.54
],
[
30.88,
19.84,
8.42,
12.73,
24.76,
15.13,
23.73,
25.79,
48.92,
25.5
],
[
52.89,
23.31,
3.3,
21.71,
29.33,
10.32,
6.42,
23.83,
25.11,
19.65
],
[
41.38,
7.44,
4.04,
17.54,
22.94,
1.33,
8.61,
15.45,
49.55,
15.54
],
[
36.81,
21.34,
3.01,
7.09,
40.02,
20.06,
21.23,
20.02,
31.02,
17.84
],
[
52.29,
5.38,
9.99,
27.28,
29.16,
1.64,
7.27,
7.86,
23.61,
19.17
],
[
48.66,
10.33,
15.84,
12.52,
31.17,
15.37,
9.89,
16.43,
25.28,
27.01
],
[
22.81,
20.15,
13.89,
6.64,
30.87,
28.08,
11.89,
7.24,
44.44,
4.9
],
[
34.43,
7.32,
7.79,
12.2,
33.55,
16.55,
13.96,
9.8,
51.31,
3.19
],
[
39.06,
17.33,
9.56,
20.28,
40.86,
17.91,
22.71,
13.64,
31.37,
6.01
],
[
45.03,
5.67,
22.87,
12.66,
29.29,
4.14,
7.56,
16.01,
36.65,
12.56
],
[
51.77,
4.1,
26,
3.96,
32.23,
5.26,
11.71,
17.96,
39.59,
20.08
],
[
49.81,
12.7,
1.62,
11.22,
52.71,
22.16,
13.26,
15,
42.64,
9.27
],
[
43.15,
20.12,
28.12,
8.84,
26.77,
20.31,
9.84,
15.72,
24.4,
27.82
],
[
41.2,
20.39,
7.76,
8.74,
39.75,
20.92,
15.3,
10.16,
50.94,
15.1
],
[
43.31,
21.41,
24.5,
14.25,
42.58,
10.01,
9.79,
11.6,
24.88,
27.58
],
[
38.03,
9.96,
18.37,
3.06,
28.42,
2.99,
1.76,
4.72,
39.11,
21.21
],
[
38.78,
11.89,
15.05,
21.22,
38.79,
15.56,
7.43,
16.35,
24.81,
13.12
],
[
29.68,
28.51,
27.6,
27.92,
44.52,
18.12,
25.71,
16.97,
36.77,
10.44
],
[
52.9,
19.04,
28.71,
10.37,
27.04,
18.85,
28.06,
18.31,
31.31,
27.66
],
[
50.77,
21.27,
28.56,
5.12,
35.39,
8.15,
11.73,
8.75,
43.82,
26.65
],
[
28.09,
21.14,
17.46,
27.36,
51.84,
26.42,
13.57,
18.96,
33.6,
19.64
],
[
29.81,
7.84,
27.68,
22.15,
34.79,
18.01,
26.49,
15.11,
44.04,
4
],
[
43.29,
24.01,
13.9,
27.21,
42.29,
17.94,
16.38,
25.6,
49.04,
14.41
],
[
31.46,
9.34,
8.84,
19.22,
52.91,
13.62,
6.53,
26.15,
24.57,
13.06
],
[
42.03,
21.07,
27.35,
24.6,
45.33,
24.58,
5.59,
9.23,
51.35,
1.02
],
[
41.69,
22.28,
12.03,
7.11,
32.98,
5.01,
15.36,
3.16,
22.33,
3.7
],
[
47.62,
11.61,
28.85,
10.52,
36.34,
19.35,
22.81,
15.83,
42.17,
25.63
],
[
30.29,
9.92,
2.32,
4.99,
34.83,
23.06,
17.59,
10.86,
45.17,
16.3
],
[
49.02,
26.6,
15.79,
20.37,
52.66,
28.84,
19.57,
24.66,
42.02,
20.58
],
[
37.26,
10.71,
22.96,
5.43,
29.59,
14.85,
16.53,
24,
52.94,
26.8
],
[
33.25,
2.41,
17.29,
27.27,
32.39,
28.05,
24.16,
20.49,
28.8,
11.27
],
[
39.38,
6.45,
20.91,
9.61,
45.47,
18.77,
9.13,
13.85,
31.49,
21.25
],
[
51.05,
15.04,
20.68,
8.44,
48.1,
19.95,
19.61,
28.13,
35.33,
17.97
],
[
29.34,
16.32,
6.32,
2.27,
53.69,
14.11,
18.18,
14.14,
35.59,
6.2
],
[
36.38,
12.11,
8.04,
28.07,
24.38,
23.47,
22.72,
27.67,
49.74,
5.53
],
[
29.6,
10.14,
24.5,
8.97,
48.86,
20.44,
1.83,
3.96,
21.76,
9.1
],
[
52.36,
21.32,
23.69,
20.39,
46.22,
24.85,
21.1,
24.07,
30.68,
11.32
],
[
22.61,
13.82,
28.27,
3.5,
32.82,
12.1,
28.91,
10.63,
52.58,
25.55
],
[
25.18,
27.88,
26.97,
24.2,
53.01,
23.7,
22.25,
10.12,
29.71,
5.07
],
[
23.97,
27.01,
9.14,
11.7,
23.19,
12.18,
20.88,
25.48,
38.24,
20.58
],
[
49.63,
13.67,
1.34,
17.56,
50.43,
7.5,
4.14,
12.52,
48.7,
22.08
],
[
51.08,
10.04,
18.23,
14.37,
44.22,
1.55,
7.89,
23.5,
24.09,
8.86
],
[
32.88,
4.6,
4.6,
3.62,
48.38,
2.13,
28.81,
7.23,
25.57,
8.73
],
[
32.27,
17.45,
28.26,
1.66,
39.41,
28.36,
2.61,
23.5,
26.42,
26.57
],
[
45.43,
1.89,
19.53,
14.48,
31.9,
20.54,
1.01,
23.49,
53.54,
11.51
],
[
53.68,
22.09,
15.49,
6.19,
40.87,
25.97,
25.33,
1.17,
31.83,
23.54
],
[
36.4,
3.93,
11.53,
12.81,
28.34,
2.62,
4.94,
21.85,
31.44,
15.91
],
[
38.8,
19.03,
24.23,
9.15,
30.01,
18.03,
19.11,
5.56,
45.77,
28.97
],
[
50.44,
24.58,
14.49,
11.83,
53.21,
13.68,
9.97,
19.76,
35.37,
5.13
],
[
31.63,
16.26,
23.55,
20.4,
27.16,
1.02,
21.76,
12.51,
23.73,
10.42
],
[
38.24,
7.25,
5.81,
19.28,
30.62,
16.17,
19.11,
3.93,
49.73,
19.5
],
[
46.77,
12.63,
25.33,
27.77,
23.56,
28.56,
18.59,
10.73,
25.1,
2.19
],
[
49.79,
2.32,
1.86,
26.89,
41.33,
17.48,
14.26,
15.09,
51.34,
19.84
],
[
26.78,
2.77,
19.56,
18.37,
45.22,
16.96,
3.91,
25.44,
35.53,
8.37
],
[
51.84,
11.17,
21.27,
1.05,
32.11,
2.47,
16.07,
2.64,
41.37,
4.85
],
[
41.1,
27.01,
22.43,
1.42,
22.96,
18.39,
21.03,
3.31,
47.51,
12.6
],
[
46.15,
23.08,
15.63,
10.09,
29.78,
16.61,
11.71,
5.25,
28.76,
15.49
],
[
48.44,
19.05,
12.58,
4.44,
30.65,
17.2,
22.99,
13.83,
24.92,
25.95
],
[
43.75,
27.45,
7.52,
28.08,
48.03,
7.82,
28.79,
13.96,
43.92,
1.08
],
[
23.27,
9.73,
6.63,
7.57,
46.95,
5.47,
8.81,
27.18,
43.93,
20.68
],
[
46.11,
9.8,
19.72,
1.68,
39.37,
8.94,
7.18,
22.96,
43.29,
16.61
],
[
37.81,
28.13,
27.44,
14.8,
38.41,
6.19,
12.98,
15.88,
34.2,
21.84
],
[
25.45,
7.63,
13.02,
13.04,
45.67,
25.06,
18.63,
5.5,
24.81,
10.08
],
[
31.85,
12.55,
10.13,
13.15,
23.25,
16.16,
20.33,
27.88,
36.94,
3.71
],
[
38.29,
16.24,
22.73,
14.31,
43.97,
10.44,
26.83,
20.28,
38.77,
2.73
],
[
27.41,
10.64,
18.83,
16.97,
31.26,
13.18,
2.64,
5.84,
35.93,
24.41
],
[
53.9,
1.2,
28.76,
5.34,
32.91,
18.14,
1.6,
27.94,
41.53,
16.48
],
[
42.34,
8.83,
28.06,
1.11,
21.38,
14.28,
28.54,
14.8,
45.92,
5.65
],
[
22.59,
27.42,
2.06,
3.08,
42.51,
18.3,
21.8,
10.97,
28.17,
9.76
],
[
24.03,
5.37,
3.06,
24.75,
26.88,
17.01,
7.32,
6.12,
53.62,
19.39
],
[
25.21,
12.38,
4.06,
8.5,
23.66,
26.27,
6.5,
13.97,
52.23,
2.53
],
[
21.43,
9.02,
11.43,
24.84,
45.26,
14.65,
1.01,
2.52,
21.9,
16.26
],
[
45.57,
11.08,
20.8,
26.15,
29.2,
26.35,
9.27,
15.34,
34.89,
28.51
],
[
46.2,
11.07,
12.05,
16.71,
45.23,
6.56,
27.86,
17.12,
51.75,
6.62
],
[
21.35,
20.14,
19.06,
22.41,
36.04,
14.14,
1.79,
19.66,
25.71,
23.23
],
[
49.33,
24.28,
22.3,
27.59,
53.76,
26.28,
18.32,
7.68,
47.18,
1.02
],
[
21.78,
17.01,
23.26,
23.39,
25.3,
4.18,
2.08,
10.78,
33.78,
1.32
],
[
30.89,
8.35,
1.4,
25.68,
28.98,
22.62,
18.18,
8.84,
37.04,
2.99
],
[
21.55,
11.85,
24.33,
8.5,
38.4,
9.96,
10.91,
19.72,
41.95,
12.44
],
[
52.66,
1.12,
23.65,
27.21,
37.26,
27.38,
7.5,
17.98,
52.83,
13.38
],
[
34.72,
18.76,
28.1,
18.17,
23.51,
15.65,
7.27,
23.02,
30.09,
18.72
],
[
30.5,
4.8,
3.22,
16.88,
31,
28.99,
26.39,
24.91,
45.3,
11.19
],
[
43.95,
11.96,
13.07,
25,
31.82,
21.9,
17.47,
15.41,
39.76,
10.66
],
[
48.1,
1.12,
2.47,
21.34,
43.53,
25.06,
16.5,
6.3,
28.09,
25.49
],
[
29.38,
11.34,
22.09,
6.79,
21.35,
4.75,
1.38,
8.5,
33.73,
15.17
],
[
21.45,
3.14,
13.84,
21.62,
34.39,
1.32,
23.04,
9.6,
33.36,
12.83
],
[
27.55,
13.14,
1.92,
23.19,
34.43,
21.65,
6.65,
8.66,
23.27,
13.41
],
[
42,
8.04,
10.01,
22.62,
31.02,
26.42,
10.56,
6.07,
52.1,
22.73
],
[
50.68,
11.8,
20.5,
13.2,
49.5,
16.36,
23.47,
1.26,
52.11,
6.87
],
[
46.9,
6.11,
9.83,
5.76,
33.14,
20.6,
22.2,
10.86,
24.28,
7.22
],
[
23.49,
20.91,
13.9,
3.77,
41.37,
16.67,
20.85,
14.56,
41.59,
2.48
],
[
39.56,
7.5,
22.68,
14.88,
26.3,
26.24,
22.45,
27.49,
33.75,
12.28
],
[
41,
21.73,
26.46,
24.01,
37.7,
19.38,
23.42,
12.84,
32.01,
5.28
],
[
28.02,
16.13,
9.13,
22.94,
29.45,
20.2,
15.4,
13.69,
45.03,
21.07
],
[
26.45,
14.49,
20.43,
7.38,
24.85,
25.36,
2.69,
4.91,
46.74,
18.85
],
[
35.11,
7.17,
22.37,
7.18,
35.22,
17.4,
13.75,
25.76,
37.96,
14.03
],
[
31.8,
28.62,
4.03,
27.35,
34.46,
4.35,
14.3,
8.43,
31.49,
27.2
],
[
52.7,
5.97,
5.7,
10.52,
44.97,
8.57,
15.36,
6.99,
47.65,
17.3
],
[
50.45,
22.98,
24.95,
20.9,
22.78,
22.91,
17.43,
21.92,
25.3,
24.46
],
[
21.86,
20.42,
18.33,
5.72,
29.44,
28.62,
7.23,
5.17,
37.31,
3.12
],
[
48.8,
13.89,
15.33,
11.12,
48.7,
27.98,
19.88,
14.3,
42.36,
19.42
],
[
28.11,
9.97,
25.52,
3.68,
48.55,
2.26,
20.76,
9.64,
42.63,
9.84
],
[
30.79,
2.41,
13.33,
23.13,
52.03,
4.8,
13.08,
26.53,
41.04,
18.68
],
[
45.36,
25.66,
7.86,
3.99,
45.06,
21.64,
4.39,
23.03,
23,
14.41
]
]
}
},
config={
"arcSegmentsSeparation": 3,
"circularAnchors2Align": "inside",
"circularAnchorsAlign": "outside",
"circularCenterProportion": 0.5,
"circularLabelsAlign": "inside",
"colorScheme": "Tableau",
"colors": [
"#332288",
"#6699CC",
"#88CCEE",
"#44AA99",
"#117733",
"#999933",
"#DDCC77",
"#661100",
"#CC6677",
"#AA4466",
"#882255",
"#AA4499"
],
"connections": [
[
"rgb(0,0,255)",
1,
17615830,
13,
60500000,
100000000,
20000000
],
[
"rgb(0,255,0)",
1,
2300000,
8,
13650000,
40000000,
80000000
],
[
"rgb(120,0,255)",
3,
71800000,
17,
6800000,
50000000,
25000000
],
[
"rgb(0,40,255)",
7,
71800000,
12,
5520000,
200000000,
80000000
],
[
"rgb(80,0,55)",
4,
8430000,
22,
6600000,
100000000,
50000000
],
[
"rgb(0,55,140)",
4,
3100000,
14,
64100000,
58000000,
10000000
],
[
"rgb(255,0,0)",
2,
94840000,
20,
6243500,
70000000,
30000000
]
],
"graphType": "Circular",
"ringGraphType": [
"heatmap",
"stacked"
],
"ringsOrder": [
"chromosomes",
"Annt1",
"Lev:1",
"anchors",
"labels",
"ideogram",
"anchors2",
"Lev:4"
],
"segregateSamplesBy": [
"Factor4"
],
"showIdeogram": True,
"title": "Custom Plotting Order"
},
width=713,
height=613,
events=CXEvents(),
after_render=[],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="circular_6.html")
```
| github_jupyter |
# Found in translation/interpreting (MoTra21)
In this paper we classify written, spoken, translation and interpreting using
competing sets of features, with a SVM
```
import time
import nltk
from sklearn import svm
import sklearn
from sklearn.svm import SVC
import glob
from lexical_diversity import lex_div as ld
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import cross_val_score
```
```
## KLD preselection
# translation versus original written (written originals)
tvow = {
"title": "SITR (written) vs. ORG (written)",
"terms": [
["this", 0.01395132, 0.01711909, 0.00000000],
["we", 0.01053052, 0.01862605, 0.00043504],
["gentlemen", 0.00675825, 0.00132613, 0.00000000],
["ladies", 0.00675825, 0.00132613, 0.00000000],
["that", 0.00565117, 0.02322731, 0.02922500],
["also", 0.00521316, 0.00528441, 0.00000202],
["you", 0.00471150, 0.00415922, 0.00877080],
["regard", 0.00363400, 0.00126585, 0.00001542],
["european", 0.00307788, 0.00608813, 0.02442100],
["in", 0.00263547, 0.02670337, 0.13595000],
["however", 0.00262847, 0.00261207, 0.00055496],
["like", 0.00240968, 0.00317467, 0.00441320],
["here", 0.00226540, 0.00168780, 0.00088326],
["must", 0.00221923, 0.00492274, 0.06620900],
["therefore", 0.00191655, 0.00174808, 0.00261950],
["which", 0.00188422, 0.00584701, 0.15325000],
["i", 0.00184087, 0.01209588, 0.12773000],
["eulex", 0.00174076, 0.00034158, 0.03430800],
["is", 0.00169917, 0.02081617, 0.51649000],
["important", 0.00169504, 0.00170789, 0.00448390],
["social", 0.00165145, 0.00130603, 0.08807500],
["president-in-office", 0.00163836, 0.00032149, 0.00015225],
["whether", 0.00161815, 0.00120557, 0.01290900],
["kosovo", 0.00159914, 0.00076353, 0.07778200],
["things", 0.00154647, 0.00066306, 0.01012600],
["opinion", 0.00146664, 0.00072334, 0.00571210],
["policy", 0.00146277, 0.00170789, 0.04552100],
["once", 0.00145597, 0.00102473, 0.00644990],
["micro-entities", 0.00143357, 0.00028130, 0.05340300],
["would", 0.00140190, 0.00490265, 0.06362100],
["regulations", 0.00135180, 0.00056260, 0.01564300],
["thing", 0.00135180, 0.00056260, 0.01689600],
["wine", 0.00133117, 0.00026121, 0.07972300],
["again", 0.00131423, 0.00130603, 0.00955600],
["gambling", 0.00123471, 0.00048223, 0.12219000],
["guantánamo", 0.00123471, 0.00048223, 0.02762900],
["fellow", 0.00123471, 0.00048223, 0.00155680],
["dialogue", 0.00120883, 0.00052241, 0.03431900],
["company", 0.00120883, 0.00052241, 0.11371000],
["treaty", 0.00117067, 0.00134622, 0.13536000],
["because", 0.00114853, 0.00196910, 0.08543800],
["signal", 0.00112638, 0.00022102, 0.00277350],
["have", 0.00112320, 0.01030762, 0.28088000],
["question", 0.00111624, 0.00092427, 0.01523000],
["germany", 0.00108657, 0.00044204, 0.01126700],
["want", 0.00108465, 0.00140650, 0.05764100],
["clear", 0.00107931, 0.00164761, 0.04204500],
["order", 0.00107623, 0.00104483, 0.03524100],
["german", 0.00106910, 0.00048223, 0.03376500],
["know", 0.00106122, 0.00106492, 0.04006100],
["with", 0.00102508, 0.00729370, 0.12851000],
["fundamental", 0.00102430, 0.00074343, 0.04723000],
["decisive", 0.00102398, 0.00020093, 0.00320750],
["method", 0.00102398, 0.00020093, 0.04281500],
["redundancies", 0.00102398, 0.00020093, 0.13355000],
["credibility", 0.00102398, 0.00020093, 0.00943270],
["chancellor", 0.00102398, 0.00020093, 0.04239600],
["wines", 0.00102398, 0.00020093, 0.10225000],
["believe", 0.00100082, 0.00136631, 0.02579700],
["something", 0.00099757, 0.00070325, 0.06523800],
["need", 0.00099105, 0.00287327, 0.19003000],
["been", 0.00098955, 0.00361671, 0.12982000],
["parliament", 0.00098435, 0.00223030, 0.19163000],
["austria", 0.00098022, 0.00030139, 0.02045900],
["who", 0.00096951, 0.00297374, 0.14869000],
["rights", 0.00096620, 0.00198919, 0.07099800],
["few", 0.00096319, 0.00056260, 0.00990810],
["great", 0.00095528, 0.00080371, 0.11022000],
["together", 0.00094730, 0.00062288, 0.01834300],
["democrats", 0.00094211, 0.00040186, 0.01698100],
["not", 0.00093834, 0.00980530, 0.27992000],
["people", 0.00092445, 0.00319476, 0.25933000],
["contrary", 0.00092158, 0.00018084, 0.00705680],
["steps", 0.00092158, 0.00018084, 0.00574860],
["introduce", 0.00092158, 0.00018084, 0.00372150],
["about", 0.00091875, 0.00233077, 0.14106000],
["are", 0.00091793, 0.00972493, 0.33488000],
["service", 0.00090540, 0.00078362, 0.20498000],
["say", 0.00090360, 0.00124576, 0.09488700],
["namely", 0.00089867, 0.00034158, 0.03508100],
["mission", 0.00089482, 0.00028130, 0.07246900],
["progress", 0.00089128, 0.00060278, 0.02106900],
["reason", 0.00088998, 0.00066306, 0.00700210],
["can", 0.00087214, 0.00327513, 0.19702000],
["am", 0.00087046, 0.00172798, 0.17837000],
["talk", 0.00086631, 0.00042195, 0.01970300],
["course", 0.00086240, 0.00122566, 0.10684000],
["said", 0.00086240, 0.00122566, 0.05517000],
["thank", 0.00084040, 0.00086399, 0.00576800],
["made", 0.00082622, 0.00144668, 0.10811000],
["unfortunately", 0.00082559, 0.00048223, 0.02032000],
["quickly", 0.00082559, 0.00048223, 0.03195600],
["seat", 0.00081918, 0.00016074, 0.04701400],
["merkel", 0.00081918, 0.00016074, 0.03316600],
["decrees", 0.00081918, 0.00016074, 0.08171400],
["austrian", 0.00081918, 0.00016074, 0.02413900],
["camp", 0.00081918, 0.00016074, 0.04907100],
["syria", 0.00081918, 0.00016074, 0.15943000],
["ought", 0.00081918, 0.00016074, 0.01429600],
["previous", 0.00081918, 0.00016074, 0.00531000],
["greens", 0.00081918, 0.00016074, 0.00637020],
["these", 0.00081785, 0.00261207, 0.10962000],
["already", 0.00081480, 0.00066306, 0.02822700],
["point", 0.00081245, 0.00100464, 0.14201000],
["during", 0.00081084, 0.00054251, 0.02659300],
["alliance", 0.00081054, 0.00026121, 0.01279600],
["your", 0.00080832, 0.00082381, 0.09844500],
["at", 0.00079908, 0.00387792, 0.55385000],
["subject", 0.00078169, 0.00056260, 0.01782400],
["then", 0.00076587, 0.00122566, 0.09960600],
["particular", 0.00075995, 0.00100464, 0.07123800],
["represents", 0.00073629, 0.00038176, 0.02217700],
["opportunities", 0.00073342, 0.00034158, 0.02649000],
["chance", 0.00072751, 0.00024111, 0.01132200],
["malmström", 0.00072751, 0.00024111, 0.06483500],
["only", 0.00072644, 0.00196910, 0.12706000],
["example", 0.00071820, 0.00098455, 0.05974400],
["gsp", 0.00071678, 0.00014065, 0.08666000],
["america", 0.00071678, 0.00014065, 0.01920200],
["documents", 0.00071678, 0.00014065, 0.04198600],
["car", 0.00071678, 0.00014065, 0.01735900],
["transposition", 0.00071678, 0.00014065, 0.07560600],
["comment", 0.00071678, 0.00014065, 0.00943510],
["applies", 0.00071678, 0.00014065, 0.01849800],
["group", 0.00071247, 0.00138641, 0.18386000],
["legislative", 0.00070774, 0.00044204, 0.06497100],
["result", 0.00070458, 0.00056260, 0.02959800],
["matter", 0.00070458, 0.00056260, 0.03219500],
["us", 0.00068375, 0.00235086, 0.14378000],
["case", 0.00067698, 0.00096446, 0.08257000],
["otherwise", 0.00067590, 0.00028130, 0.03273900],
["protection", 0.00066855, 0.00098455, 0.11076000],
["sense", 0.00066630, 0.00032149, 0.02008600],
["mentioned", 0.00065040, 0.00042195, 0.06257900],
["uighurs", 0.00064589, 0.00022102, 0.06915200],
["media", 0.00064589, 0.00022102, 0.08560800],
["particularly", 0.00062830, 0.00074343, 0.06479900],
["as", 0.00062204, 0.00845908, 0.28377000],
["terms", 0.00062000, 0.00044204, 0.04773700],
["incorporated", 0.00061439, 0.00012056, 0.03290900],
["debates", 0.00061439, 0.00012056, 0.01133000],
["consideration", 0.00061439, 0.00012056, 0.01493400],
["played", 0.00061439, 0.00012056, 0.00851350],
["separatism", 0.00061439, 0.00012056, 0.15943000],
["eeas", 0.00061439, 0.00012056, 0.07967700],
["adopt", 0.00061439, 0.00012056, 0.02331500],
["langen", 0.00061439, 0.00012056, 0.03763100],
["internet", 0.00061103, 0.00034158, 0.16080000],
["represent", 0.00060442, 0.00026121, 0.04336800],
["implement", 0.00060056, 0.00030139, 0.05357500],
["extent", 0.00060056, 0.00030139, 0.10183000],
["pleased", 0.00060056, 0.00030139, 0.06866300],
["partners", 0.00060056, 0.00030139, 0.08306600],
["relating", 0.00060056, 0.00030139, 0.02232200],
["will", 0.00058648, 0.00651008, 0.38870000],
["quite", 0.00056993, 0.00056260, 0.08209900],
["democracy", 0.00056993, 0.00056260, 0.20140000],
["connection", 0.00056587, 0.00020093, 0.04240600],
["ban", 0.00056587, 0.00020093, 0.06918600],
["fund", 0.00056335, 0.00066306, 0.46497000],
["able", 0.00055655, 0.00090418, 0.09375700],
["what", 0.00055655, 0.00285318, 0.31158000],
["process", 0.00054232, 0.00070325, 0.18886000],
["against", 0.00053544, 0.00126585, 0.22221000],
["give", 0.00053088, 0.00062288, 0.10980000],
["questions", 0.00052350, 0.00046214, 0.08952900],
["cannot", 0.00052067, 0.00132613, 0.25883000],
["situation", 0.00051548, 0.00076353, 0.19654000],
["peace", 0.00051378, 0.00034158, 0.15058000],
["aspect", 0.00051199, 0.00010046, 0.03394500],
["arrive", 0.00051199, 0.00010046, 0.04695000],
["frequency", 0.00051199, 0.00010046, 0.04297600],
["italian", 0.00051199, 0.00010046, 0.06263300],
["react", 0.00051199, 0.00010046, 0.01526600],
["session", 0.00051199, 0.00010046, 0.02304400],
["dtp", 0.00051199, 0.00010046, 0.15943000],
["rail", 0.00051199, 0.00010046, 0.05243300],
["malaysia", 0.00051199, 0.00010046, 0.15943000],
["approval", 0.00051199, 0.00010046, 0.01647700],
["prove", 0.00051199, 0.00010046, 0.03374800],
["fraud", 0.00051199, 0.00010046, 0.15943000],
["house", 0.00050041, 0.00068316, 0.36691000],
["really", 0.00049990, 0.00080371, 0.06425600],
["actually", 0.00049990, 0.00080371, 0.21074000],
["make", 0.00049866, 0.00178826, 0.11869000],
["a", 0.00049786, 0.02109747, 0.58171000],
["take", 0.00049334, 0.00144668, 0.08622100],
["internal", 0.00049282, 0.00042195, 0.11967000],
["currently", 0.00049140, 0.00050232, 0.08607300],
["starting", 0.00048766, 0.00018084, 0.06897400],
["yes", 0.00048766, 0.00018084, 0.05608200],
["factor", 0.00048766, 0.00018084, 0.02434200],
["adjustment", 0.00048766, 0.00018084, 0.18879000],
["end", 0.00048573, 0.00084390, 0.17023000],
["illegal", 0.00047801, 0.00052241, 0.05672300],
["procedure", 0.00047491, 0.00044204, 0.11012000],
["thus", 0.00047366, 0.00026121, 0.02822200],
["discussions", 0.00047366, 0.00026121, 0.08783800],
["message", 0.00047366, 0.00026121, 0.03924600],
["beginning", 0.00046647, 0.00022102, 0.02856800],
["discussed", 0.00046647, 0.00022102, 0.07867300],
["demands", 0.00046647, 0.00022102, 0.13342000],
["negotiations", 0.00046260, 0.00038176, 0.11352000],
["within", 0.00046080, 0.00078362, 0.26257000],
["consider", 0.00045840, 0.00032149, 0.06071100],
["return", 0.00045840, 0.00032149, 0.07435500],
["committee", 0.00044904, 0.00112520, 0.14525000],
["instead", 0.00044570, 0.00048223, 0.18665000],
["lot", 0.00044361, 0.00040186, 0.03833300],
["solidarity", 0.00044361, 0.00040186, 0.30948000],
["members", 0.00043523, 0.00086399, 0.27660000],
["local", 0.00043360, 0.00028130, 0.06865600],
["talking", 0.00043360, 0.00028130, 0.06528400],
["had", 0.00043345, 0.00102473, 0.20477000],
["joint", 0.00043306, 0.00034158, 0.07330300],
["problems", 0.00042695, 0.00062288, 0.13146000],
["too", 0.00042381, 0.00090418, 0.21940000],
["itself", 0.00042247, 0.00052241, 0.17476000],
["very", 0.00041716, 0.00245133, 0.29312000],
["europe", 0.00041610, 0.00225040, 0.47060000],
["time", 0.00041605, 0.00164761, 0.34898000],
["opposition", 0.00041279, 0.00024111, 0.16789000],
["online", 0.00041279, 0.00024111, 0.20989000],
["calling", 0.00041279, 0.00024111, 0.06335300],
["behind", 0.00041264, 0.00036167, 0.08424200],
["addition", 0.00041264, 0.00036167, 0.04619300],
["accept", 0.00041264, 0.00036167, 0.08557000],
["greece", 0.00041244, 0.00054251, 0.50075000],
["promote", 0.00041157, 0.00016074, 0.09586400],
["explain", 0.00041157, 0.00016074, 0.08766300],
["combating", 0.00041157, 0.00016074, 0.18824000],
["christian", 0.00041157, 0.00016074, 0.05222200],
["laid", 0.00040959, 0.00008037, 0.03197300],
["penalties", 0.00040959, 0.00008037, 0.07634400],
["hearing", 0.00040959, 0.00008037, 0.05171000],
["traditional", 0.00040959, 0.00008037, 0.06684900],
["engage", 0.00040959, 0.00008037, 0.10094000],
["copyright", 0.00040959, 0.00008037, 0.04721700],
["housing", 0.00040959, 0.00008037, 0.05919300],
["arguments", 0.00040959, 0.00008037, 0.06178200],
["orientation", 0.00040959, 0.00008037, 0.08892900],
["reporting", 0.00040959, 0.00008037, 0.11438000],
["regulate", 0.00040959, 0.00008037, 0.09144100],
["human", 0.00040503, 0.00114529, 0.19312000],
["everyone", 0.00040454, 0.00030139, 0.06085100],
["insurance", 0.00040037, 0.00020093, 0.18009000],
["absolutely", 0.00040037, 0.00020093, 0.04854800],
["structure", 0.00040037, 0.00020093, 0.09398600],
["carefully", 0.00040037, 0.00020093, 0.07218200],
["sometimes", 0.00040037, 0.00020093, 0.08652200]
]
}
ivos = {
"title": "SITR (spoken) vs. ORG (spoken)",
"terms": [
["euh", 0.03960763, 0.04218045, 0.00000000],
["hum", 0.00698372, 0.00375940, 0.00001041],
["we", 0.00596984, 0.02483083, 0.02644400],
["be", 0.00460203, 0.01135338, 0.00547490],
["kosovo", 0.00363414, 0.00069549, 0.00526050],
["'t", 0.00317923, 0.00535714, 0.01024100],
["need", 0.00315935, 0.00411654, 0.00293900],
["'re", 0.00312006, 0.00413534, 0.00359710],
["can", 0.00306121, 0.00518797, 0.00601240],
["going", 0.00264991, 0.00225564, 0.00377160],
["policy", 0.00259379, 0.00146617, 0.00915090],
["gentlemen", 0.00239302, 0.00065789, 0.00002205],
["talking", 0.00238054, 0.00131579, 0.00445210],
["external", 0.00235728, 0.00045113, 0.00149750],
["have", 0.00233189, 0.01122180, 0.10449000],
["ladies", 0.00228686, 0.00069549, 0.00006145],
["gambling", 0.00216084, 0.00041353, 0.04878800],
["are", 0.00215417, 0.00845865, 0.13382000],
["guantánamo", 0.00206262, 0.00039474, 0.01344400],
["now", 0.00205627, 0.00419173, 0.08461300],
["has", 0.00202141, 0.00469925, 0.05163700],
["citizens", 0.00201887, 0.00122180, 0.04574100],
["'s", 0.00195831, 0.01033835, 0.10421000],
["something", 0.00194038, 0.00140977, 0.00111650],
["europe", 0.00192766, 0.00195489, 0.02778800],
["a", 0.00190769, 0.02016917, 0.30463000],
["peace", 0.00176892, 0.00045113, 0.07242000],
["crisis", 0.00165246, 0.00127820, 0.07221400],
["micro", 0.00157152, 0.00030075, 0.06122100],
["really", 0.00154350, 0.00184211, 0.02499000],
["these", 0.00147660, 0.00306391, 0.06105100],
["arctic", 0.00147330, 0.00028195, 0.07270700],
["social", 0.00145241, 0.00109023, 0.15607000],
["wine", 0.00137508, 0.00026316, 0.08263800],
["eulex", 0.00137508, 0.00026316, 0.02612200],
["so", 0.00136696, 0.00486842, 0.11456000],
["freedom", 0.00134374, 0.00046992, 0.01827800],
["austria", 0.00127686, 0.00024436, 0.00402600],
["against", 0.00126790, 0.00114662, 0.02897300],
["mean", 0.00126591, 0.00088346, 0.06794400],
["there", 0.00124564, 0.00569549, 0.21563000],
["obviously", 0.00120633, 0.00054511, 0.00863260],
["germany", 0.00119494, 0.00043233, 0.01396700],
["european", 0.00116613, 0.00445489, 0.12981000],
["think", 0.00110828, 0.00443609, 0.10853000],
["council", 0.00109536, 0.00193609, 0.09522600],
["transfer", 0.00108042, 0.00020677, 0.03513100],
["dialogue", 0.00107833, 0.00050752, 0.09429100],
["got", 0.00106633, 0.00140977, 0.08530700],
["should", 0.00104067, 0.00342105, 0.12440000],
["rights", 0.00100295, 0.00165414, 0.05575900],
["human", 0.00100177, 0.00103383, 0.06221200],
["hm", 0.00100021, 0.00082707, 0.16249000],
["greece", 0.00099450, 0.00045113, 0.18266000],
["president-in-office", 0.00098340, 0.00028195, 0.00294290],
["shouldn", 0.00098056, 0.00050752, 0.02400800],
["then", 0.00095598, 0.00148496, 0.07346400],
["future", 0.00092064, 0.00095865, 0.02827300],
["financial", 0.00091838, 0.00097744, 0.04161500],
["if", 0.00090971, 0.00366541, 0.34595000],
["german", 0.00090718, 0.00035714, 0.07650100],
["s", 0.00090551, 0.00065789, 0.05154400],
["border", 0.00089963, 0.00026316, 0.00719300],
["certain", 0.00088651, 0.00069549, 0.04223500],
["detainees", 0.00088398, 0.00016917, 0.02963400],
["eurozone", 0.00088398, 0.00016917, 0.09689100],
["corruption", 0.00088398, 0.00016917, 0.03999200],
["pact", 0.00088398, 0.00016917, 0.07985900],
["service", 0.00087832, 0.00060150, 0.02732100],
["illegal", 0.00084598, 0.00030075, 0.03595500],
["clear", 0.00082946, 0.00110902, 0.09575200],
["means", 0.00082513, 0.00071429, 0.08311900],
["talk", 0.00082513, 0.00071429, 0.05385800],
["media", 0.00081686, 0.00024436, 0.06365100],
["market", 0.00079575, 0.00122180, 0.06558300],
["like", 0.00079183, 0.00229323, 0.07310500],
["things", 0.00079015, 0.00107143, 0.03889200],
["minority", 0.00078576, 0.00015038, 0.00386270],
["merkel", 0.00078576, 0.00015038, 0.03238400],
["austrian", 0.00078576, 0.00015038, 0.01600500],
["redundancies", 0.00078576, 0.00015038, 0.11032000],
["resolution", 0.00077615, 0.00056391, 0.03879100],
["data", 0.00077615, 0.00056391, 0.08171900],
["turkey", 0.00077215, 0.00028195, 0.07406600],
["justice", 0.00077215, 0.00028195, 0.02172700],
["same", 0.00076085, 0.00078947, 0.05429300],
["taken", 0.00075664, 0.00082707, 0.21130000],
["lot", 0.00075261, 0.00088346, 0.08247200],
["motion", 0.00073520, 0.00022556, 0.03229600],
["'d", 0.00073079, 0.00129699, 0.02472500],
["people", 0.00072993, 0.00422932, 0.38731000],
["comes", 0.00072943, 0.00069549, 0.02460500],
["solidarity", 0.00071889, 0.00033835, 0.24116000],
["fund", 0.00071726, 0.00056391, 0.18754000],
["government", 0.00071354, 0.00082707, 0.30528000],
["community", 0.00069140, 0.00041353, 0.10879000],
["regardless", 0.00068754, 0.00013158, 0.05607200],
["serbia", 0.00068754, 0.00013158, 0.04735900],
["considerations", 0.00068754, 0.00013158, 0.02276700],
["international", 0.00068225, 0.00069549, 0.11971000],
["situation", 0.00067159, 0.00084586, 0.15583000],
["kind", 0.00066915, 0.00054511, 0.10928000],
["violence", 0.00065624, 0.00031955, 0.13030000],
["china", 0.00065624, 0.00031955, 0.06901900],
["policies", 0.00065624, 0.00031955, 0.16789000],
["croatia", 0.00065479, 0.00020677, 0.16094000],
["colleagues", 0.00065422, 0.00120301, 0.06007500],
["with", 0.00064291, 0.00644737, 0.13583000],
["guarantee", 0.00062823, 0.00024436, 0.02654400],
["for", 0.00062165, 0.01077068, 0.24142000],
["fundamental", 0.00060057, 0.00035714, 0.02081300],
["step", 0.00059692, 0.00045113, 0.01373200],
["summit", 0.00059482, 0.00030075, 0.11075000],
["arab", 0.00058932, 0.00011278, 0.03103500],
["gsp", 0.00058932, 0.00011278, 0.09039200],
["incorporated", 0.00058932, 0.00011278, 0.04483800],
["pornography", 0.00058932, 0.00011278, 0.07908000],
["path", 0.00058932, 0.00011278, 0.03185300],
["react", 0.00058932, 0.00011278, 0.03180000],
["regards", 0.00058932, 0.00011278, 0.01314600],
["applies", 0.00058932, 0.00011278, 0.00969780],
["origin", 0.00058932, 0.00011278, 0.01651700],
["waiting", 0.00058932, 0.00011278, 0.01457900],
["said", 0.00058169, 0.00140977, 0.16987000],
["question", 0.00057997, 0.00112782, 0.04002900],
["resolved", 0.00057580, 0.00018797, 0.03481300],
["iran", 0.00057580, 0.00018797, 0.02953600],
["epp", 0.00057580, 0.00018797, 0.03160100],
["adjustment", 0.00057580, 0.00018797, 0.09919800],
["chinese", 0.00057580, 0.00018797, 0.07496200],
["afghanistan", 0.00057580, 0.00018797, 0.12620000],
["ready", 0.00057580, 0.00018797, 0.01595500],
["minorities", 0.00057098, 0.00026316, 0.05626900],
["online", 0.00057098, 0.00026316, 0.14655000],
["country", 0.00056127, 0.00092105, 0.38118000],
["whether", 0.00055995, 0.00090226, 0.11708000],
["possible", 0.00055407, 0.00078947, 0.12690000],
["up", 0.00053996, 0.00212406, 0.10071000],
["strategy", 0.00052969, 0.00035714, 0.17243000],
["euro", 0.00052969, 0.00035714, 0.07489400],
["states", 0.00051703, 0.00233083, 0.25204000],
["problems", 0.00051519, 0.00075188, 0.16840000],
["system", 0.00051344, 0.00065789, 0.12513000],
["civil", 0.00050977, 0.00030075, 0.01437100],
["won", 0.00050778, 0.00039474, 0.15635000],
["fighting", 0.00050774, 0.00024436, 0.02073800],
["signal", 0.00050774, 0.00024436, 0.20075000],
["swedish", 0.00050774, 0.00024436, 0.22400000],
["smes", 0.00050010, 0.00041353, 0.34871000],
["unity", 0.00049845, 0.00016917, 0.05247000],
["former", 0.00049845, 0.00016917, 0.09462300],
["dairy", 0.00049845, 0.00016917, 0.08302300],
["other", 0.00049482, 0.00182331, 0.18230000],
["region", 0.00049166, 0.00031955, 0.12420000],
["decisions", 0.00049166, 0.00031955, 0.05622300],
["speculation", 0.00049110, 0.00009398, 0.11465000],
["quartet", 0.00049110, 0.00009398, 0.04340400],
["membership", 0.00049110, 0.00009398, 0.02128000],
["reinfeldt", 0.00049110, 0.00009398, 0.09043600],
["dtp", 0.00049110, 0.00009398, 0.15940000],
["employees", 0.00049110, 0.00009398, 0.07172000],
["deserves", 0.00049110, 0.00009398, 0.03248500],
["malaysia", 0.00049110, 0.00009398, 0.15940000],
["peaceful", 0.00049110, 0.00009398, 0.02728900],
["separatism", 0.00049110, 0.00009398, 0.15940000],
["palestinians", 0.00049110, 0.00009398, 0.04977000],
["existence", 0.00049110, 0.00009398, 0.01551900],
["torture", 0.00049110, 0.00009398, 0.05693100],
["south", 0.00049020, 0.00020677, 0.06394100],
["fight", 0.00049020, 0.00020677, 0.02791300],
["structural", 0.00049020, 0.00020677, 0.04122900],
["influence", 0.00049020, 0.00020677, 0.05267800],
["favour", 0.00048902, 0.00045113, 0.06286900],
["responsibility", 0.00048191, 0.00048872, 0.10281000],
["calling", 0.00047809, 0.00033835, 0.06777800],
["interest", 0.00047752, 0.00052632, 0.04114400],
["wants", 0.00047611, 0.00026316, 0.06613800],
["banks", 0.00047611, 0.00026316, 0.18713000],
["hamas", 0.00047611, 0.00026316, 0.55874000],
["cases", 0.00047611, 0.00026316, 0.03022100],
["deal", 0.00047608, 0.00054511, 0.03943400],
["after", 0.00047505, 0.00056391, 0.15886000],
["thing", 0.00047439, 0.00065789, 0.05530600],
["sure", 0.00046573, 0.00097744, 0.20605000],
["sort", 0.00044827, 0.00041353, 0.06543600],
["military", 0.00044612, 0.00022556, 0.28371000],
["therefore", 0.00044469, 0.00077068, 0.08073500],
["individual", 0.00043916, 0.00030075, 0.13141000],
["towards", 0.00043896, 0.00046992, 0.03245900],
["always", 0.00043509, 0.00052632, 0.06240000],
["set", 0.00043482, 0.00060150, 0.13341000],
["great", 0.00043450, 0.00058271, 0.11045000],
["effects", 0.00042380, 0.00018797, 0.09255300],
["task", 0.00042380, 0.00018797, 0.04240600],
["roma", 0.00042299, 0.00015038, 0.26522000],
["prisoners", 0.00042299, 0.00015038, 0.09167000],
["take", 0.00042278, 0.00133459, 0.17778000],
["dear", 0.00041903, 0.00024436, 0.05192400],
["here", 0.00041843, 0.00186090, 0.13236000],
["being", 0.00041419, 0.00165414, 0.43106000],
["involved", 0.00040751, 0.00037594, 0.09075900],
["position", 0.00040751, 0.00037594, 0.13326000],
["still", 0.00040713, 0.00073308, 0.28810000],
["politics", 0.00040088, 0.00026316, 0.32770000],
["budget", 0.00039852, 0.00043233, 0.19639000],
["act", 0.00039587, 0.00046992, 0.15614000],
["don", 0.00039436, 0.00152256, 0.31144000],
["remind", 0.00039288, 0.00007519, 0.03341400],
["decrees", 0.00039288, 0.00007519, 0.08392600],
["mexico", 0.00039288, 0.00007519, 0.15940000],
["greek", 0.00039288, 0.00007519, 0.03689900],
["florenz", 0.00039288, 0.00007519, 0.02522900],
["belarus", 0.00039288, 0.00007519, 0.15940000],
["journalists", 0.00039288, 0.00007519, 0.09591000],
["bosnia", 0.00039288, 0.00007519, 0.08988100],
["ultimately", 0.00039288, 0.00007519, 0.02715500],
["langen", 0.00039288, 0.00007519, 0.05544100],
["violations", 0.00039288, 0.00007519, 0.06813000],
["quickly", 0.00038808, 0.00028195, 0.15480000],
["globalisation", 0.00038628, 0.00020677, 0.28176000],
["entities", 0.00038628, 0.00020677, 0.25151000],
["direction", 0.00038628, 0.00020677, 0.02494300],
["economic", 0.00038540, 0.00107143, 0.53376000],
["help", 0.00038204, 0.00080827, 0.24730000],
["live", 0.00037877, 0.00030075, 0.13346000],
["haven", 0.00037877, 0.00030075, 0.18950000],
["israel", 0.00037877, 0.00030075, 0.66719000],
["good", 0.00037744, 0.00140977, 0.22628000],
["would", 0.00037249, 0.00323308, 0.29239000],
["area", 0.00036457, 0.00063910, 0.13225000],
["partners", 0.00036366, 0.00022556, 0.25846000],
["internet", 0.00036295, 0.00035714, 0.24287000],
["presidency", 0.00036151, 0.00060150, 0.24366000],
["difficult", 0.00036015, 0.00058271, 0.25790000],
["everything", 0.00036015, 0.00037594, 0.22185000],
["immediately", 0.00035944, 0.00016917, 0.06266200],
["application", 0.00035944, 0.00016917, 0.16907000],
["implement", 0.00035944, 0.00016917, 0.07810300],
["developments", 0.00035944, 0.00016917, 0.04401100],
["suffering", 0.00035944, 0.00016917, 0.10158000],
["quite", 0.00035892, 0.00056391, 0.11396000],
["not", 0.00035808, 0.00601504, 0.48961000],
["they", 0.00035570, 0.00488722, 0.65931000],
["achieve", 0.00035544, 0.00046992, 0.32708000],
["none", 0.00034976, 0.00013158, 0.05371500],
["assume", 0.00034976, 0.00013158, 0.04553600],
["decisive", 0.00034976, 0.00013158, 0.08279500],
["accounts", 0.00034976, 0.00013158, 0.08787400],
["hasn", 0.00034879, 0.00024436, 0.10585000],
["burden", 0.00034879, 0.00024436, 0.17489000]
]
}
owvt = {
"title": "ORG (written) vs. SITR (written)",
"terms": [
["and", 0.00960090, 0.03562200, 0.00224550],
["eu", 0.00692414, 0.00488063, 0.00043077],
["to", 0.00623391, 0.04048055, 0.02807700],
["uk", 0.00368134, 0.00086129, 0.00006078],
["sri", 0.00356908, 0.00068461, 0.01355400],
["but", 0.00310011, 0.00483647, 0.00122370],
["on", 0.00257630, 0.01020295, 0.13891000],
["lanka", 0.00241777, 0.00046377, 0.01519500],
["ltte", 0.00218750, 0.00041960, 0.05155200],
["britain", 0.00196322, 0.00050794, 0.00232440],
["by", 0.00191546, 0.00583026, 0.01039500],
["'s", 0.00184886, 0.00373225, 0.04232300],
["capital", 0.00181136, 0.00055211, 0.02766000],
["member", 0.00177253, 0.00320223, 0.11940000],
["across", 0.00174368, 0.00059628, 0.00285810],
["debt", 0.00174368, 0.00059628, 0.05600200],
["mrls", 0.00172698, 0.00033126, 0.15962000],
["those", 0.00169945, 0.00178883, 0.02922700],
["its", 0.00166869, 0.00278262, 0.02764400],
["british", 0.00165240, 0.00044169, 0.00280400],
["schengen", 0.00162173, 0.00050794, 0.07640100],
["children", 0.00156147, 0.00092754, 0.07471200],
["sovereign", 0.00155004, 0.00041960, 0.00406740],
["welcome", 0.00149580, 0.00099379, 0.00991630],
["visa", 0.00146314, 0.00057419, 0.11699000],
["trading", 0.00144839, 0.00039752, 0.00459020],
["some", 0.00143826, 0.00189925, 0.01402800],
["health", 0.00141765, 0.00081712, 0.22798000],
["may", 0.00139295, 0.00117047, 0.00598600],
["bank", 0.00139253, 0.00050794, 0.00616290],
["review", 0.00138158, 0.00026501, 0.00145920],
["e-money", 0.00138158, 0.00026501, 0.08895800],
["tamil", 0.00138158, 0.00026501, 0.10616000],
["timber", 0.00138158, 0.00026501, 0.13994000],
["travel", 0.00138111, 0.00055211, 0.10882000],
["membership", 0.00134752, 0.00037543, 0.01217800],
["cut", 0.00134752, 0.00037543, 0.02324800],
["economies", 0.00134752, 0.00037543, 0.00539170],
["when", 0.00130412, 0.00207593, 0.03304900],
["systems", 0.00127979, 0.00064045, 0.03040200],
["electricity", 0.00126645, 0.00024293, 0.00976480],
["profiling", 0.00126645, 0.00024293, 0.15962000],
["eurozone", 0.00126645, 0.00024293, 0.02058800],
["death", 0.00126645, 0.00024293, 0.00168400],
["ireland", 0.00121982, 0.00050794, 0.12647000],
["where", 0.00120641, 0.00119255, 0.05085300],
["own", 0.00117263, 0.00145756, 0.09212000],
["such", 0.00117124, 0.00192134, 0.03242800],
["legislation", 0.00115758, 0.00072878, 0.05764000],
["lankan", 0.00115132, 0.00022084, 0.01815600],
["imf", 0.00115132, 0.00022084, 0.01110900],
["kingdom", 0.00115132, 0.00022084, 0.00803850],
["islamic", 0.00115132, 0.00022084, 0.01893200],
["firm", 0.00115132, 0.00022084, 0.00130560],
["england", 0.00115132, 0.00022084, 0.00285080],
["beef", 0.00115132, 0.00022084, 0.11243000],
["wales", 0.00115132, 0.00022084, 0.05193700],
["while", 0.00114933, 0.00083920, 0.01268200],
["their", 0.00111230, 0.00335682, 0.14261000],
["needs", 0.00109674, 0.00094963, 0.00150940],
["so", 0.00109316, 0.00249553, 0.20793000],
["system", 0.00107999, 0.00139131, 0.08261500],
["support", 0.00107772, 0.00165632, 0.04740600],
["arrest", 0.00105045, 0.00030918, 0.01384800],
["could", 0.00104805, 0.00117047, 0.11704000],
["regime", 0.00103619, 0.00019876, 0.01048500],
["commit", 0.00103619, 0.00019876, 0.02662400],
["banking", 0.00103619, 0.00019876, 0.02058300],
["warrant", 0.00103619, 0.00019876, 0.00978190],
["crimes", 0.00103619, 0.00019876, 0.01799500],
["minor", 0.00103619, 0.00019876, 0.07386800],
["israel", 0.00102180, 0.00081712, 0.06791000],
["between", 0.00101499, 0.00103796, 0.03237100],
["public", 0.00099579, 0.00088337, 0.08658200],
["consumers", 0.00098930, 0.00064045, 0.15580000],
["asylum", 0.00098560, 0.00044169, 0.16540000],
["young", 0.00098560, 0.00044169, 0.08878400],
["states", 0.00098071, 0.00311389, 0.20279000],
["farmers", 0.00097099, 0.00057419, 0.09895200],
["aid", 0.00093470, 0.00046377, 0.05410000],
["humanitarian", 0.00093470, 0.00046377, 0.05648100],
["full", 0.00092716, 0.00061836, 0.02685700],
["indeed", 0.00092716, 0.00061836, 0.07214400],
["extra", 0.00092105, 0.00017667, 0.00394070],
["refused", 0.00092105, 0.00017667, 0.02245600],
["poorest", 0.00092105, 0.00017667, 0.00748940],
["laboratory", 0.00092105, 0.00017667, 0.15962000],
["general", 0.00090591, 0.00055211, 0.05304200],
["eastern", 0.00089974, 0.00033126, 0.12326000],
["price", 0.00089499, 0.00037543, 0.00597400],
["mechanism", 0.00089499, 0.00037543, 0.04070400],
["countries", 0.00087946, 0.00178883, 0.20607000],
["workers", 0.00087294, 0.00070670, 0.08853200],
["real", 0.00086604, 0.00050794, 0.01130000],
["they", 0.00086499, 0.00379850, 0.19935000],
["care", 0.00086318, 0.00044169, 0.09461300],
["his", 0.00086275, 0.00099379, 0.05980600],
["liberal", 0.00085809, 0.00026501, 0.03825300],
["both", 0.00082468, 0.00088337, 0.08190000],
["years", 0.00081570, 0.00143548, 0.15359000],
["looks", 0.00080592, 0.00015459, 0.00738310],
["vis", 0.00080592, 0.00015459, 0.15962000],
["commend", 0.00080592, 0.00015459, 0.02372400],
["underlying", 0.00080592, 0.00015459, 0.00472790],
["gay", 0.00080592, 0.00015459, 0.15962000],
["race", 0.00080592, 0.00015459, 0.03034500],
["operating", 0.00080592, 0.00015459, 0.00474870],
["terrible", 0.00080592, 0.00015459, 0.01336400],
["common", 0.00077034, 0.00090546, 0.20545000],
["reduce", 0.00076404, 0.00024293, 0.01515700],
["fiscal", 0.00076404, 0.00024293, 0.01613300],
["most", 0.00076289, 0.00106005, 0.19334000],
["agency", 0.00076221, 0.00037543, 0.18557000],
["forward", 0.00075506, 0.00068461, 0.07918900],
["justice", 0.00074661, 0.00055211, 0.09529300],
["vital", 0.00073920, 0.00033126, 0.01723900],
["long-term", 0.00073157, 0.00028710, 0.01080000],
["recognise", 0.00072400, 0.00039752, 0.04702100],
["or", 0.00069890, 0.00324639, 0.47882000],
["used", 0.00069223, 0.00070670, 0.04066400],
["markets", 0.00069223, 0.00070670, 0.08073800],
["any", 0.00069101, 0.00136923, 0.05404300],
["shadows", 0.00069079, 0.00013251, 0.00958950],
["deliver", 0.00069079, 0.00013251, 0.01512500],
["suspect", 0.00069079, 0.00013251, 0.07565400],
["conduct", 0.00069079, 0.00013251, 0.00917600],
["recession", 0.00069079, 0.00013251, 0.02585600],
["families", 0.00069079, 0.00013251, 0.01829000],
["excuse", 0.00069079, 0.00013251, 0.01014300],
["poor", 0.00069079, 0.00013251, 0.01835500],
["glad", 0.00069079, 0.00013251, 0.04595600],
["london", 0.00069079, 0.00013251, 0.00815520],
["fee", 0.00069079, 0.00013251, 0.06084100],
["sadly", 0.00069079, 0.00013251, 0.03773700],
["irish", 0.00069054, 0.00035335, 0.13403000],
["the", 0.00069008, 0.07775888, 0.37829000],
["world", 0.00067480, 0.00108213, 0.09473000],
["secure", 0.00067167, 0.00022084, 0.03283900],
["seeking", 0.00067167, 0.00022084, 0.04644800],
["macedonia", 0.00067167, 0.00022084, 0.17389000],
["banks", 0.00066897, 0.00057419, 0.11501000],
["operators", 0.00066364, 0.00030918, 0.10097000],
["access", 0.00065659, 0.00046377, 0.03402100],
["agenda", 0.00065659, 0.00046377, 0.15424000],
["framework", 0.00064998, 0.00064045, 0.12441000],
["was", 0.00064596, 0.00282679, 0.17714000],
["jobs", 0.00064553, 0.00066253, 0.15948000],
["interest", 0.00064279, 0.00048585, 0.09284600],
["money", 0.00062804, 0.00090546, 0.10613000],
["services", 0.00062237, 0.00053002, 0.07822900],
["fair", 0.00061476, 0.00055211, 0.07082300],
["standards", 0.00059885, 0.00061836, 0.15451000],
["global", 0.00059885, 0.00061836, 0.09567500],
["more", 0.00059593, 0.00293721, 0.20248000],
["benefits", 0.00059052, 0.00035335, 0.10176000],
["economy", 0.00058983, 0.00068461, 0.09245500],
["told", 0.00058982, 0.00028710, 0.00733570],
["cooperation", 0.00058632, 0.00072878, 0.33848000],
["work", 0.00058551, 0.00145756, 0.19123000],
["get", 0.00058425, 0.00077295, 0.09034500],
["use", 0.00058365, 0.00079504, 0.12336000],
["commitments", 0.00058123, 0.00019876, 0.03432900],
["month", 0.00058123, 0.00019876, 0.02528100],
["equivalent", 0.00058123, 0.00019876, 0.07688000],
["northern", 0.00058123, 0.00019876, 0.15202000],
["whilst", 0.00058123, 0.00019876, 0.06963000],
["lukashenko", 0.00058123, 0.00019876, 0.14499000],
["many", 0.00058107, 0.00174466, 0.24745000],
["third", 0.00057580, 0.00048585, 0.12276000],
["everybody", 0.00057566, 0.00011042, 0.01887000],
["clarity", 0.00057566, 0.00011042, 0.05418700],
["chain", 0.00057566, 0.00011042, 0.01899500],
["lastly", 0.00057566, 0.00011042, 0.01284000],
["maybe", 0.00057566, 0.00011042, 0.01663300],
["investments", 0.00057566, 0.00011042, 0.01823900],
["frankly", 0.00057566, 0.00011042, 0.01601000],
["post", 0.00057566, 0.00011042, 0.01450900],
["rate", 0.00057566, 0.00011042, 0.01515300],
["failing", 0.00057033, 0.00024293, 0.04582000],
["seek", 0.00057033, 0.00024293, 0.00887600],
["medicines", 0.00057033, 0.00024293, 0.20931000],
["short-term", 0.00057033, 0.00024293, 0.03125600],
["over", 0.00056441, 0.00123672, 0.19464000],
["credit", 0.00055205, 0.00030918, 0.10260000],
["resources", 0.00055205, 0.00030918, 0.15391000],
["under", 0.00054872, 0.00103796, 0.19014000],
["change", 0.00054461, 0.00097171, 0.24638000],
["less", 0.00053934, 0.00041960, 0.09825200],
["come", 0.00053866, 0.00081712, 0.09809300],
["state", 0.00053784, 0.00134714, 0.08044600],
["an", 0.00052796, 0.00408560, 0.34540000],
["animal", 0.00052617, 0.00033126, 0.43848000],
["them", 0.00052369, 0.00154590, 0.40272000],
["proposed", 0.00052126, 0.00046377, 0.07103600],
["attacks", 0.00051791, 0.00026501, 0.07841600],
["belarus", 0.00051791, 0.00026501, 0.27334000],
["colleagues", 0.00051791, 0.00026501, 0.05888900],
["gaza", 0.00050968, 0.00050794, 0.23557000],
["products", 0.00050748, 0.00035335, 0.31976000],
["since", 0.00050554, 0.00053002, 0.08439400],
["out", 0.00050464, 0.00167841, 0.26686000],
["there", 0.00049653, 0.00430644, 0.25915000],
["rules", 0.00049601, 0.00061836, 0.30057000],
["labour", 0.00049351, 0.00037543, 0.07335200],
["stability", 0.00049351, 0.00037543, 0.12922000],
["cuba", 0.00049302, 0.00017667, 0.22124000],
["discipline", 0.00049302, 0.00017667, 0.05298300],
["relations", 0.00049302, 0.00017667, 0.10779000],
["sources", 0.00049302, 0.00017667, 0.05298700],
["record", 0.00049302, 0.00017667, 0.09184600],
["turkish", 0.00049302, 0.00017667, 0.07753500],
["compromises", 0.00049302, 0.00017667, 0.13970000],
["regret", 0.00049302, 0.00017667, 0.07353600],
["alde", 0.00049302, 0.00017667, 0.09756400],
["welfare", 0.00049280, 0.00022084, 0.38266000],
["patients", 0.00049280, 0.00022084, 0.23850000],
["projects", 0.00049280, 0.00022084, 0.10579000],
["urge", 0.00048549, 0.00028710, 0.08580200],
["regarding", 0.00048549, 0.00028710, 0.09579900],
["europol", 0.00048549, 0.00028710, 0.30561000],
["businesses", 0.00048283, 0.00039752, 0.17997000],
["government", 0.00047919, 0.00112630, 0.10428000],
["financial", 0.00046883, 0.00136923, 0.20056000],
["food", 0.00046804, 0.00044169, 0.16545000],
["he", 0.00046716, 0.00099379, 0.29353000],
["lives", 0.00046358, 0.00030918, 0.13064000],
["join", 0.00046358, 0.00030918, 0.08651700],
["address", 0.00046358, 0.00030918, 0.05182900],
["pay", 0.00046358, 0.00030918, 0.14578000],
["civilians", 0.00046358, 0.00030918, 0.09292500],
["dimension", 0.00046053, 0.00008834, 0.02648400],
["deaths", 0.00046053, 0.00008834, 0.06614400],
["helping", 0.00046053, 0.00008834, 0.04280800],
["s", 0.00046053, 0.00008834, 0.10251000],
["solvency", 0.00046053, 0.00008834, 0.09885700],
["professionals", 0.00046053, 0.00008834, 0.15962000],
["business", 0.00045339, 0.00053002, 0.16599000],
["look", 0.00044866, 0.00066253, 0.11425000],
["powers", 0.00044809, 0.00024293, 0.15572000],
["expected", 0.00044809, 0.00024293, 0.09303600],
["practices", 0.00044809, 0.00024293, 0.08129600],
["coming", 0.00044809, 0.00024293, 0.01714900],
["investment", 0.00044809, 0.00024293, 0.03333700],
["confidence", 0.00044809, 0.00024293, 0.03762300],
["past", 0.00044797, 0.00033126, 0.06629100],
["un", 0.00044797, 0.00033126, 0.08095200],
["value", 0.00043647, 0.00035335, 0.10649000],
["might", 0.00043647, 0.00035335, 0.03910600],
["partnership", 0.00043647, 0.00035335, 0.19236000],
["week", 0.00042782, 0.00037543, 0.07779300]
]
}
osvow = {
"title": "ORG (spoken) vs. ORG (written)",
"terms": [
["euh", 0.09633409, 0.02102153, 0.00000000],
["you", 0.02140486, 0.00972162, 0.00000000],
["'ve", 0.01855840, 0.00362672, 0.00000000],
["'t", 0.01569518, 0.00345881, 0.00000000],
["we", 0.01484792, 0.02082004, 0.00000864],
["i", 0.01264441, 0.01811679, 0.00000189],
["that", 0.01224740, 0.02709963, 0.00000829],
["thank", 0.01148094, 0.00401289, 0.00000000],
["'re", 0.01109373, 0.00236744, 0.00000000],
["'s", 0.01061868, 0.00899963, 0.00000152],
["'m", 0.00746077, 0.00147755, 0.00000000],
["think", 0.00717772, 0.00369388, 0.00000050],
["don", 0.00621711, 0.00125928, 0.00000000],
["this", 0.00587547, 0.01304611, 0.00059879],
["very", 0.00563114, 0.00500353, 0.00008040],
["about", 0.00476960, 0.00413043, 0.00003830],
["'d", 0.00409951, 0.00085631, 0.00000002],
["hum", 0.00369391, 0.00092347, 0.00001696],
["because", 0.00310904, 0.00287115, 0.00011263],
["want", 0.00307503, 0.00223312, 0.00057311],
["'ll", 0.00306671, 0.00062124, 0.00000064],
["she", 0.00301866, 0.00082273, 0.03661300],
["and", 0.00301455, 0.03776151, 0.12378000],
["iraq", 0.00285131, 0.00048692, 0.15954000],
["got", 0.00275224, 0.00080594, 0.00000716],
["it", 0.00263665, 0.01314685, 0.08638600],
["at", 0.00262916, 0.00493636, 0.02568800],
["so", 0.00248249, 0.00396252, 0.00379450],
["but", 0.00235001, 0.00634675, 0.03569600],
["know", 0.00223549, 0.00152792, 0.00055865],
["what", 0.00220259, 0.00379462, 0.02912100],
["our", 0.00208613, 0.00505390, 0.07023200],
["say", 0.00200169, 0.00172941, 0.00086378],
["had", 0.00198240, 0.00174620, 0.00303480],
["here", 0.00196746, 0.00157829, 0.00201660],
["important", 0.00193871, 0.00181336, 0.00719570],
["people", 0.00185406, 0.00372746, 0.08619100],
["terms", 0.00173362, 0.00080594, 0.00284980],
["yes", 0.00172380, 0.00047013, 0.00331470],
["ow", 0.00167146, 0.00028544, 0.15954000],
["your", 0.00165132, 0.00115853, 0.01135400],
["just", 0.00159858, 0.00189731, 0.02235900],
["all", 0.00158511, 0.00465093, 0.09555000],
["isn", 0.00156228, 0.00030223, 0.00096069],
["evening", 0.00154635, 0.00028544, 0.00069755],
["actually", 0.00153624, 0.00125928, 0.02904800],
["hm", 0.00148506, 0.00033581, 0.00565710],
["together", 0.00142870, 0.00078915, 0.01041100],
["house", 0.00138741, 0.00105779, 0.01101400],
["course", 0.00136894, 0.00146076, 0.06251700],
["things", 0.00135968, 0.00062124, 0.00628620],
["feed", 0.00135183, 0.00050371, 0.02554300],
["parliament", 0.00132912, 0.00241781, 0.14269000],
["look", 0.00131865, 0.00136002, 0.04925800],
["much", 0.00129427, 0.00194768, 0.02192300],
["gonna", 0.00128514, 0.00023506, 0.00371930],
["cod", 0.00127817, 0.00021827, 0.15954000],
["they", 0.00125688, 0.00463414, 0.08616100],
["doesn", 0.00123565, 0.00025186, 0.00079774],
["colleagues", 0.00119095, 0.00080594, 0.00503380],
["well", 0.00117644, 0.00169583, 0.04906800],
["back", 0.00116608, 0.00110816, 0.00657190],
["v", 0.00115920, 0.00020148, 0.05599000],
["pacific", 0.00115920, 0.00020148, 0.11441000],
["reporting", 0.00114569, 0.00021827, 0.02038900],
["issue", 0.00114178, 0.00112495, 0.02611600],
["me", 0.00114166, 0.00112495, 0.02004500],
["particularly", 0.00112747, 0.00095705, 0.02402900],
["w", 0.00112104, 0.00021827, 0.03731300],
["car", 0.00112104, 0.00021827, 0.01927500],
["f", 0.00110605, 0.00020148, 0.04316300],
["big", 0.00109218, 0.00048692, 0.02509900],
["emas", 0.00108153, 0.00018469, 0.08251700],
["fourty", 0.00108153, 0.00018469, 0.01143400],
["didn", 0.00107803, 0.00021827, 0.00119650],
["report", 0.00105981, 0.00213238, 0.14089000],
["misses", 0.00104605, 0.00031902, 0.00596590],
["whole", 0.00102287, 0.00077236, 0.02984400],
["going", 0.00101139, 0.00094026, 0.01373700],
["can", 0.00100714, 0.00335807, 0.13918000],
["wasn", 0.00100440, 0.00020148, 0.00319530],
["looking", 0.00100335, 0.00067161, 0.04127700],
["commissioner", 0.00099902, 0.00179657, 0.06215700],
["pcbs", 0.00098321, 0.00016790, 0.10350000],
["was", 0.00097989, 0.00347560, 0.10372000],
["somebody", 0.00097963, 0.00018469, 0.00296020],
["perhaps", 0.00095137, 0.00077236, 0.02477400],
["t", 0.00094234, 0.00018469, 0.02061700],
["debates", 0.00094234, 0.00018469, 0.00620460],
["now", 0.00091925, 0.00292152, 0.18000000],
["m", 0.00089684, 0.00016790, 0.03233700],
["debate", 0.00089486, 0.00119212, 0.03304300],
["committee", 0.00088985, 0.00136002, 0.13202000],
["work", 0.00088841, 0.00201484, 0.13383000],
["really", 0.00088715, 0.00099063, 0.03063000],
["cause", 0.00088166, 0.00033581, 0.03076600],
["if", 0.00088070, 0.00305584, 0.10138000],
["working", 0.00086555, 0.00073878, 0.05344600],
["were", 0.00085258, 0.00157829, 0.15017000],
["point", 0.00081658, 0.00100742, 0.06185900],
["sure", 0.00080547, 0.00068840, 0.05025400],
["go", 0.00080083, 0.00097384, 0.11144000],
["get", 0.00079965, 0.00124249, 0.02846200],
["there", 0.00079143, 0.00485241, 0.35193000],
["those", 0.00078886, 0.00230028, 0.09897500],
["coming", 0.00078602, 0.00062124, 0.03293400],
["copyright", 0.00077520, 0.00015111, 0.10797000],
["into", 0.00075835, 0.00196447, 0.20363000],
["give", 0.00075454, 0.00072199, 0.06712900],
["p", 0.00074833, 0.00013432, 0.00264070],
["referendums", 0.00074833, 0.00013432, 0.04047600],
["question", 0.00074557, 0.00077236, 0.10911000],
["who", 0.00074465, 0.00283757, 0.08520800],
["thing", 0.00073032, 0.00038618, 0.06339800],
["good", 0.00072687, 0.00115853, 0.07400800],
["some", 0.00071198, 0.00236744, 0.43026000],
["referendum", 0.00071107, 0.00050371, 0.24000000],
["pleased", 0.00070999, 0.00033581, 0.04711600],
["doha", 0.00070405, 0.00013432, 0.05987200],
["tonight", 0.00070109, 0.00040297, 0.11332000],
["documents", 0.00070077, 0.00015111, 0.13042000],
["morning", 0.00069123, 0.00028544, 0.02571700],
["indeed", 0.00066840, 0.00100742, 0.02679100],
["won", 0.00065535, 0.00015111, 0.00471150],
["last", 0.00065065, 0.00125928, 0.03806300],
["o", 0.00064154, 0.00021827, 0.18245000],
["trying", 0.00064011, 0.00038618, 0.02052600],
["anti-discrimination", 0.00063498, 0.00011753, 0.03531100],
["tell", 0.00063399, 0.00026865, 0.02286900],
["regard", 0.00063027, 0.00041976, 0.16020000],
["moment", 0.00062934, 0.00050371, 0.06180300],
["syria", 0.00061799, 0.00013432, 0.13214000],
["previous", 0.00061092, 0.00013432, 0.01125800],
["lot", 0.00060867, 0.00047013, 0.01996700],
["hearing", 0.00060823, 0.00011753, 0.01049700],
["my", 0.00060135, 0.00256892, 0.16913000],
["fraud", 0.00059660, 0.00011753, 0.15954000],
["approval", 0.00059660, 0.00011753, 0.01470700],
["consideration", 0.00058590, 0.00011753, 0.01518500],
["irish", 0.00058563, 0.00067161, 0.36750000],
["treaty", 0.00057796, 0.00105779, 0.38244000],
["parliaments", 0.00057151, 0.00025186, 0.04652100],
["talking", 0.00056937, 0.00033581, 0.01162100],
["find", 0.00056786, 0.00058766, 0.11860000],
["happy", 0.00056522, 0.00020148, 0.03693300],
["proposal", 0.00056239, 0.00114174, 0.16646000],
["putting", 0.00055298, 0.00028544, 0.03176300],
["british", 0.00055204, 0.00075557, 0.15419000],
["process", 0.00054587, 0.00070519, 0.20001000],
["where", 0.00054505, 0.00154471, 0.24447000],
["different", 0.00054476, 0.00065482, 0.28669000],
["please", 0.00054440, 0.00031902, 0.07251300],
["introduce", 0.00054256, 0.00011753, 0.01354600],
["saying", 0.00052678, 0.00043655, 0.03568700],
["n", 0.00052486, 0.00010074, 0.01934700],
["syrian", 0.00051748, 0.00018469, 0.23235000],
["haven", 0.00051615, 0.00011753, 0.01961100],
["r", 0.00051595, 0.00018469, 0.10157000],
["negotiate", 0.00051297, 0.00010074, 0.01176600],
["aren", 0.00051297, 0.00010074, 0.01742100],
["damage", 0.00051293, 0.00018469, 0.02896300],
["ratification", 0.00050998, 0.00018469, 0.04152000],
["already", 0.00050993, 0.00053729, 0.08866500],
["property", 0.00050569, 0.00018469, 0.07145700],
["said", 0.00050335, 0.00104100, 0.09534600],
["couldn", 0.00050220, 0.00010074, 0.02641700],
["again", 0.00050164, 0.00092347, 0.06917800],
["bit", 0.00049841, 0.00033581, 0.05331200],
["feel", 0.00049211, 0.00030223, 0.05013400],
["companies", 0.00049098, 0.00055408, 0.14581000],
["something", 0.00048743, 0.00050371, 0.09667500],
["talk", 0.00048076, 0.00030223, 0.09304000],
["minister", 0.00047877, 0.00047013, 0.12789000],
["pointed", 0.00047495, 0.00010074, 0.00995720],
["chair", 0.00046571, 0.00021827, 0.02603900],
["shouldn", 0.00046409, 0.00011753, 0.01487700],
["re", 0.00045993, 0.00010074, 0.02425800],
["let", 0.00045911, 0.00080594, 0.13327000],
["ha", 0.00045313, 0.00010074, 0.03224400],
["see", 0.00045310, 0.00130965, 0.17785000],
["getting", 0.00045261, 0.00031902, 0.04547400],
["issues", 0.00045253, 0.00087310, 0.21956000],
["corporate", 0.00044925, 0.00016790, 0.08274700],
["hear", 0.00044804, 0.00040297, 0.03771700],
["place", 0.00044592, 0.00082273, 0.10003000],
["legislation", 0.00044464, 0.00100742, 0.09331700],
["interesting", 0.00044399, 0.00016790, 0.04159900],
["mechanisms", 0.00044146, 0.00016790, 0.06627600],
["doing", 0.00043747, 0.00055408, 0.15853000],
["relation", 0.00043464, 0.00036939, 0.07493300],
["times", 0.00043270, 0.00036939, 0.08097800],
["ought", 0.00042453, 0.00010074, 0.04648300],
["him", 0.00042157, 0.00033581, 0.12452000],
["worked", 0.00042062, 0.00033581, 0.18668000],
["laid", 0.00041850, 0.00008395, 0.02876500],
["th", 0.00041850, 0.00008395, 0.01634300],
["programs", 0.00041850, 0.00008395, 0.02360000],
["weren", 0.00041850, 0.00008395, 0.03443300],
["seen", 0.00041778, 0.00047013, 0.18235000],
["maybe", 0.00041076, 0.00030223, 0.05281100],
["ireland", 0.00040883, 0.00075557, 0.21383000],
["engage", 0.00040874, 0.00008395, 0.05462800],
["wouldn", 0.00040874, 0.00008395, 0.01550600],
["complex", 0.00040874, 0.00008395, 0.02358000],
["delighted", 0.00040655, 0.00020148, 0.04400500],
["man", 0.00040472, 0.00020148, 0.13691000],
["also", 0.00040453, 0.00282078, 0.48669000],
["her", 0.00040236, 0.00043655, 0.35098000],
["steps", 0.00040224, 0.00010074, 0.00911790],
["traditional", 0.00039991, 0.00008395, 0.03912400],
["attitude", 0.00039991, 0.00008395, 0.05296500],
["traffic", 0.00039991, 0.00008395, 0.04128600],
["regulate", 0.00039991, 0.00008395, 0.02987900],
["organisations", 0.00039836, 0.00023506, 0.30728000],
["side", 0.00039708, 0.00026865, 0.12711000],
["done", 0.00039524, 0.00095705, 0.17565000],
["session", 0.00039185, 0.00008395, 0.01329000],
["prove", 0.00039185, 0.00008395, 0.01411600],
["bring", 0.00039032, 0.00060445, 0.15287000],
["us", 0.00038876, 0.00216596, 0.28295000],
["program", 0.00038445, 0.00008395, 0.05644400],
["aspect", 0.00038445, 0.00008395, 0.04174500],
["vat", 0.00038328, 0.00015111, 0.09417700],
["heard", 0.00038327, 0.00040297, 0.08657100],
["food", 0.00038325, 0.00067161, 0.29133000],
["implications", 0.00038214, 0.00015111, 0.08327400],
["covered", 0.00037879, 0.00015111, 0.09390100],
["minutes", 0.00037879, 0.00015111, 0.07411900],
["room", 0.00037770, 0.00015111, 0.03297600],
["forward", 0.00037671, 0.00092347, 0.30466000],
["comes", 0.00037206, 0.00031902, 0.09716900],
["came", 0.00036525, 0.00028544, 0.10075000],
["making", 0.00036518, 0.00047013, 0.11632000],
["women", 0.00036368, 0.00028544, 0.11160000],
["hasn", 0.00035981, 0.00008395, 0.04191500],
["tha", 0.00035981, 0.00008395, 0.01643900],
["chamber", 0.00035630, 0.00025186, 0.10152000],
["local", 0.00035208, 0.00025186, 0.10303000],
["up", 0.00035096, 0.00176299, 0.28776000],
["went", 0.00034832, 0.00018469, 0.07536300],
["absolutely", 0.00034782, 0.00018469, 0.03060000],
["few", 0.00034775, 0.00033581, 0.11599000],
["challenges", 0.00034732, 0.00018469, 0.11891000],
["sometimes", 0.00034633, 0.00018469, 0.06390600],
["talked", 0.00034633, 0.00018469, 0.06261300],
["points", 0.00034566, 0.00038618, 0.09212700],
["beginning", 0.00034298, 0.00018469, 0.08968000],
["particular", 0.00034123, 0.00078915, 0.20631000],
["able", 0.00034086, 0.00078915, 0.24134000],
["context", 0.00033501, 0.00030223, 0.16236000]
]
}
osvi = {
"title": "ORG (spoken) vs. SITR (spoken)",
"terms": [
["i", 0.00691985, 0.01811679, 0.00690470],
["the", 0.00627184, 0.06612042, 0.07359200],
["of", 0.00607479, 0.03238860, 0.06737900],
["on", 0.00470029, 0.00987273, 0.00390810],
["and", 0.00370189, 0.03776151, 0.05203800],
["our", 0.00330152, 0.00505390, 0.00909200],
["my", 0.00325766, 0.00256892, 0.00017945],
["british", 0.00315225, 0.00075557, 0.00924450],
["feed", 0.00256161, 0.00050371, 0.00322400],
["iraq", 0.00247622, 0.00048692, 0.15954000],
["by", 0.00238901, 0.00359314, 0.00645790],
["that", 0.00224544, 0.02709963, 0.08061100],
["some", 0.00221019, 0.00236744, 0.07720800],
["report", 0.00213053, 0.00213238, 0.03347100],
["work", 0.00196264, 0.00201484, 0.01405200],
["believe", 0.00194179, 0.00085631, 0.00235510],
["was", 0.00192274, 0.00347560, 0.00603850],
["it", 0.00187846, 0.01314685, 0.13321000],
["irish", 0.00187670, 0.00067161, 0.02146800],
["food", 0.00187670, 0.00067161, 0.00891010],
["all", 0.00187448, 0.00465093, 0.11016000],
["health", 0.00186025, 0.00075557, 0.04231900],
["their", 0.00181168, 0.00315659, 0.01448400],
["ireland", 0.00172099, 0.00075557, 0.00540750],
["she", 0.00170703, 0.00082273, 0.07769700],
["as", 0.00168282, 0.00634675, 0.18602000],
["in", 0.00160689, 0.02313711, 0.15992000],
["his", 0.00155837, 0.00104100, 0.01494700],
["recognise", 0.00153697, 0.00030223, 0.00016274],
["actually", 0.00153360, 0.00125928, 0.03373800],
["had", 0.00150735, 0.00174620, 0.02900300],
["tonight", 0.00148367, 0.00040297, 0.00567320],
["at", 0.00147261, 0.00493636, 0.03092100],
["ow", 0.00145158, 0.00028544, 0.15954000],
["enforcement", 0.00145158, 0.00028544, 0.01617200],
["even", 0.00144587, 0.00097384, 0.00036536],
["last", 0.00141570, 0.00125928, 0.00112020],
["referendum", 0.00140753, 0.00050371, 0.09726700],
["legislation", 0.00139506, 0.00100742, 0.02270500],
["indeed", 0.00139506, 0.00100742, 0.00525960],
["may", 0.00137548, 0.00083952, 0.01725400],
["britain", 0.00136619, 0.00026865, 0.00193260],
["relation", 0.00133111, 0.00036939, 0.00186350],
["proposal", 0.00130039, 0.00114174, 0.07566400],
["chamber", 0.00128081, 0.00025186, 0.00672820],
["kingdom", 0.00128081, 0.00025186, 0.00080461],
["accountability", 0.00128081, 0.00025186, 0.02431600],
["but", 0.00125895, 0.00634675, 0.18852000],
["record", 0.00125550, 0.00035260, 0.01633600],
["those", 0.00125006, 0.00230028, 0.03952600],
["me", 0.00120356, 0.00112495, 0.03329900],
["commitments", 0.00119542, 0.00023506, 0.00406020],
["members", 0.00117203, 0.00065482, 0.03983200],
["business", 0.00114883, 0.00070519, 0.06787900],
["regulation", 0.00112102, 0.00063803, 0.03306800],
["cod", 0.00111003, 0.00021827, 0.15954000],
["chair", 0.00111003, 0.00021827, 0.00199080],
["face", 0.00111003, 0.00021827, 0.00058163],
["requirements", 0.00110579, 0.00031902, 0.00549030],
["from", 0.00105322, 0.00402969, 0.05717500],
["issue", 0.00104706, 0.00112495, 0.02917200],
["house", 0.00104583, 0.00105779, 0.00529700],
["earlier", 0.00103179, 0.00030223, 0.00857950],
["into", 0.00103120, 0.00196447, 0.18329000],
["an", 0.00103109, 0.00357635, 0.04809700],
["v", 0.00102464, 0.00020148, 0.05599000],
["chain", 0.00102464, 0.00020148, 0.02803100],
["pacific", 0.00102464, 0.00020148, 0.11441000],
["where", 0.00099843, 0.00154471, 0.06653000],
["consumer", 0.00096082, 0.00038618, 0.05640600],
["emas", 0.00093926, 0.00018469, 0.08251700],
["fourty", 0.00093926, 0.00018469, 0.01143400],
["industries", 0.00093926, 0.00018469, 0.01155500],
["syrian", 0.00093926, 0.00018469, 0.15954000],
["him", 0.00093835, 0.00033581, 0.01081300],
["services", 0.00093116, 0.00065482, 0.01258400],
["perhaps", 0.00091411, 0.00077236, 0.06221600],
["course", 0.00091310, 0.00146076, 0.05615300],
["farmers", 0.00090716, 0.00053729, 0.03105200],
["place", 0.00086638, 0.00082273, 0.01612800],
["vote", 0.00085776, 0.00052050, 0.04428800],
["commend", 0.00085387, 0.00016790, 0.00639830],
["corporate", 0.00085387, 0.00016790, 0.04773800],
["pcbs", 0.00085387, 0.00016790, 0.10350000],
["professionals", 0.00085387, 0.00016790, 0.13578000],
["looking", 0.00084263, 0.00067161, 0.05727600],
["forward", 0.00084113, 0.00092347, 0.13585000],
["debate", 0.00082656, 0.00119212, 0.04922500],
["very", 0.00082300, 0.00500353, 0.37943000],
["this", 0.00081751, 0.01304611, 0.22339000],
["he", 0.00079263, 0.00117532, 0.23405000],
["back", 0.00078200, 0.00110816, 0.09418600],
["to", 0.00077970, 0.03742570, 0.61131000],
["systems", 0.00077961, 0.00033581, 0.03656100],
["consumers", 0.00077919, 0.00052050, 0.07285000],
["over", 0.00077786, 0.00120891, 0.04992000],
["many", 0.00077694, 0.00141039, 0.12153000],
["years", 0.00077076, 0.00122570, 0.01619900],
["tribute", 0.00076848, 0.00015111, 0.01105700],
["dossier", 0.00076848, 0.00015111, 0.01322200],
["refused", 0.00076848, 0.00015111, 0.00790590],
["committee", 0.00075734, 0.00136002, 0.24660000],
["yet", 0.00075310, 0.00058766, 0.03011700],
["second", 0.00073047, 0.00041976, 0.04168000],
["let", 0.00072521, 0.00080594, 0.05975300],
["staff", 0.00072088, 0.00031902, 0.09156600],
["public", 0.00071608, 0.00062124, 0.08559200],
["best", 0.00071346, 0.00047013, 0.09452000],
["did", 0.00071000, 0.00057087, 0.04028500],
["member", 0.00070502, 0.00253534, 0.29929000],
["use", 0.00069722, 0.00070519, 0.06085800],
["welcome", 0.00069722, 0.00070519, 0.06775300],
["suggested", 0.00068310, 0.00013432, 0.01732100],
["increasing", 0.00068310, 0.00013432, 0.00861660],
["religion", 0.00068310, 0.00013432, 0.02844600],
["referendums", 0.00068310, 0.00013432, 0.04047600],
["ensuring", 0.00068310, 0.00013432, 0.00352260],
["stakeholders", 0.00068310, 0.00013432, 0.05108400],
["working", 0.00067290, 0.00073878, 0.14935000],
["support", 0.00067082, 0.00151113, 0.30934000],
["existing", 0.00066826, 0.00036939, 0.04453000],
["maybe", 0.00066309, 0.00030223, 0.00799430],
["ways", 0.00066309, 0.00030223, 0.09035300],
["coming", 0.00065975, 0.00062124, 0.06074600],
["legal", 0.00064371, 0.00052050, 0.22590000],
["particular", 0.00064143, 0.00078915, 0.09200700],
["you", 0.00063748, 0.00972162, 0.38216000],
["proud", 0.00062008, 0.00025186, 0.06442000],
["while", 0.00060629, 0.00028544, 0.08553300],
["crucial", 0.00060629, 0.00028544, 0.03828500],
["global", 0.00060135, 0.00050371, 0.14136000],
["authorities", 0.00060084, 0.00040297, 0.10854000],
["package", 0.00060084, 0.00040297, 0.14665000],
["inclusion", 0.00059771, 0.00011753, 0.02882300],
["assets", 0.00059771, 0.00011753, 0.08907500],
["argument", 0.00059771, 0.00011753, 0.01014900],
["core", 0.00059771, 0.00011753, 0.00545350],
["ii", 0.00059771, 0.00011753, 0.02624600],
["industry", 0.00058318, 0.00065482, 0.05199900],
["who", 0.00057917, 0.00283757, 0.09889100],
["am", 0.00057744, 0.00047013, 0.03550000],
["'ve", 0.00056947, 0.00362672, 0.22194000],
["initiative", 0.00056697, 0.00033581, 0.03942700],
["access", 0.00055968, 0.00048692, 0.08611000],
["document", 0.00055947, 0.00023506, 0.03803400],
["organisations", 0.00055947, 0.00023506, 0.15736000],
["issues", 0.00055730, 0.00087310, 0.15715000],
["lives", 0.00055056, 0.00026865, 0.09254000],
["trade", 0.00054254, 0.00057087, 0.16363000],
["must", 0.00053601, 0.00114174, 0.11569000],
["levels", 0.00053572, 0.00018469, 0.03271700],
["damage", 0.00053572, 0.00018469, 0.04718400],
["families", 0.00053572, 0.00018469, 0.03214400],
["due", 0.00053572, 0.00018469, 0.01464800],
["amongst", 0.00053572, 0.00018469, 0.02568400],
["r", 0.00053572, 0.00018469, 0.07969000],
["does", 0.00052956, 0.00052050, 0.12909000],
["particularly", 0.00052451, 0.00095705, 0.17990000],
["do", 0.00051916, 0.00313980, 0.18838000],
["making", 0.00051872, 0.00047013, 0.10956000],
["recognised", 0.00051232, 0.00010074, 0.01591000],
["caught", 0.00051232, 0.00010074, 0.00992190],
["hopefully", 0.00051232, 0.00010074, 0.04082000],
["collective", 0.00051232, 0.00010074, 0.03245600],
["voted", 0.00051232, 0.00010074, 0.00986450],
["spend", 0.00051232, 0.00010074, 0.00847940],
["setting", 0.00051232, 0.00010074, 0.06293900],
["invest", 0.00051232, 0.00010074, 0.00767940],
["such", 0.00051227, 0.00078915, 0.18284000],
["times", 0.00050924, 0.00036939, 0.05259900],
["across", 0.00050397, 0.00048692, 0.06500500],
["costs", 0.00050388, 0.00028544, 0.06730600],
["projects", 0.00050009, 0.00021827, 0.07731300],
["range", 0.00050009, 0.00021827, 0.01576900],
["emissions", 0.00050009, 0.00021827, 0.02820900],
["regulators", 0.00050009, 0.00021827, 0.18011000],
["elected", 0.00050009, 0.00021827, 0.03443300],
["balance", 0.00049598, 0.00025186, 0.04070400],
["her", 0.00049383, 0.00043655, 0.17878000],
["off", 0.00048765, 0.00038618, 0.03352600],
["find", 0.00048351, 0.00058766, 0.20710000],
["agreement", 0.00048161, 0.00097384, 0.22919000],
["rapporteur", 0.00047850, 0.00045334, 0.11404000],
["come", 0.00047084, 0.00100742, 0.10596000],
["pay", 0.00046959, 0.00030223, 0.15129000],
["relationship", 0.00046918, 0.00016790, 0.17219000],
["cars", 0.00046918, 0.00016790, 0.06629800],
["recession", 0.00046918, 0.00016790, 0.04648900],
["constituency", 0.00046918, 0.00016790, 0.04377600],
["voters", 0.00046918, 0.00016790, 0.07774000],
["year", 0.00046584, 0.00080594, 0.08560700],
["sector", 0.00046482, 0.00047013, 0.07982500],
["your", 0.00046127, 0.00115853, 0.24488000],
["union", 0.00045947, 0.00183015, 0.31279000],
["serious", 0.00045320, 0.00041976, 0.12912000],
["around", 0.00044500, 0.00036939, 0.04765400],
["uk", 0.00044206, 0.00020148, 0.08279400],
["delighted", 0.00044206, 0.00020148, 0.04902100],
["probably", 0.00044206, 0.00020148, 0.02373400],
["stage", 0.00044206, 0.00020148, 0.01519300],
["cross-border", 0.00044206, 0.00020148, 0.05814500],
["bring", 0.00043040, 0.00060445, 0.12795000],
["areas", 0.00042810, 0.00038618, 0.14303000],
["shadows", 0.00042694, 0.00008395, 0.01513400],
["consistent", 0.00042694, 0.00008395, 0.02793000],
["regime", 0.00042694, 0.00008395, 0.05994200],
["list", 0.00042694, 0.00008395, 0.05056000],
["gay", 0.00042694, 0.00008395, 0.08148000],
["submitted", 0.00042694, 0.00008395, 0.04358900],
["extra", 0.00042694, 0.00008395, 0.02063700],
["sovereign", 0.00042694, 0.00008395, 0.03451100],
["plant", 0.00042694, 0.00008395, 0.04500100],
["book", 0.00042694, 0.00008395, 0.06163400],
["faith", 0.00042694, 0.00008395, 0.01527400],
["nothing", 0.00042252, 0.00028544, 0.03061500],
["aid", 0.00042252, 0.00028544, 0.14530000],
["came", 0.00042252, 0.00028544, 0.05934600],
["which", 0.00042247, 0.00396252, 0.19304000],
["worked", 0.00042132, 0.00033581, 0.10076000],
["between", 0.00042053, 0.00094026, 0.07552500],
["hear", 0.00041340, 0.00040297, 0.04456000],
["will", 0.00040978, 0.00465093, 0.30212000],
["effect", 0.00040450, 0.00025186, 0.03132000],
["local", 0.00040450, 0.00025186, 0.09355100],
["strongly", 0.00040416, 0.00015111, 0.02095300],
["speech", 0.00040416, 0.00015111, 0.06264300],
["caused", 0.00040416, 0.00015111, 0.06784000],
["mutual", 0.00040416, 0.00015111, 0.15871000],
["interim", 0.00040416, 0.00015111, 0.16240000],
["early", 0.00040416, 0.00015111, 0.02721300],
["frankly", 0.00040416, 0.00015111, 0.02198900],
["takes", 0.00040416, 0.00015111, 0.05460400],
["detail", 0.00040416, 0.00015111, 0.12731000],
["wider", 0.00040416, 0.00015111, 0.09766800],
["important", 0.00040231, 0.00181336, 0.44057000],
["whole", 0.00040225, 0.00077236, 0.18329000],
["concerns", 0.00039839, 0.00030223, 0.21993000],
["what", 0.00039728, 0.00379462, 0.46166000],
["car", 0.00039068, 0.00021827, 0.05315300],
["despite", 0.00039068, 0.00021827, 0.03474000],
["comments", 0.00039068, 0.00021827, 0.07040000],
["history", 0.00039068, 0.00021827, 0.14876000],
["took", 0.00039068, 0.00021827, 0.17571000],
["safety", 0.00038788, 0.00036939, 0.18726000],
["jobs", 0.00038613, 0.00052050, 0.22132000],
["care", 0.00038552, 0.00018469, 0.15517000],
["met", 0.00038552, 0.00018469, 0.03357100],
["congratulate", 0.00038552, 0.00018469, 0.02386500],
["needed", 0.00038552, 0.00018469, 0.07083600],
["done", 0.00038105, 0.00095705, 0.18788000]
]
}
owvos = {
"title": "ORG (written) vs. ORG (spoken)",
"terms": [
["the", 0.01721251, 0.07775888, 0.00002504],
["is", 0.00776894, 0.01961087, 0.00056669],
["of", 0.00746952, 0.03747709, 0.00775670],
["eu", 0.00687078, 0.00488063, 0.00067786],
["not", 0.00572750, 0.00914291, 0.00052560],
["for", 0.00537463, 0.01375853, 0.00207860],
["must", 0.00531162, 0.00353349, 0.00010846],
["to", 0.00434510, 0.04048055, 0.01574500],
["sri", 0.00425140, 0.00068461, 0.01355400],
["by", 0.00383010, 0.00583026, 0.00317260],
["has", 0.00356932, 0.00552108, 0.00136990],
["be", 0.00353305, 0.01073298, 0.01227800],
["its", 0.00328165, 0.00278262, 0.00249940],
["are", 0.00317841, 0.00907665, 0.05370300],
["lanka", 0.00287998, 0.00046377, 0.01519500],
["a", 0.00274743, 0.02073717, 0.06314100],
["ltte", 0.00260570, 0.00041960, 0.05155200],
["asylum", 0.00257168, 0.00044169, 0.03474900],
["as", 0.00255230, 0.00801661, 0.01788900],
["system", 0.00245497, 0.00139131, 0.00244890],
["in", 0.00242062, 0.02484486, 0.17096000],
["capital", 0.00239068, 0.00055211, 0.00638730],
["such", 0.00230355, 0.00192134, 0.00091774],
["will", 0.00224467, 0.00609527, 0.03850000],
["species", 0.00224257, 0.00039752, 0.15962000],
["membership", 0.00218488, 0.00037543, 0.00309050],
["international", 0.00213672, 0.00123672, 0.00145580],
["however", 0.00211161, 0.00123672, 0.00105140],
["israel", 0.00207558, 0.00081712, 0.17266000],
["mrls", 0.00205713, 0.00033126, 0.15962000],
["economic", 0.00202849, 0.00185508, 0.00829330],
["europe", 0.00194527, 0.00196550, 0.02234500],
["justice", 0.00193105, 0.00055211, 0.00317890],
["states", 0.00190975, 0.00311389, 0.04346700],
["arrest", 0.00189230, 0.00030918, 0.00327730],
["financial", 0.00189104, 0.00136923, 0.01261300],
["crisis", 0.00170937, 0.00130297, 0.03362200],
["uk", 0.00168222, 0.00086129, 0.01004100],
["tamil", 0.00164570, 0.00026501, 0.10616000],
["e-money", 0.00164570, 0.00026501, 0.08895800],
["timber", 0.00164570, 0.00026501, 0.13994000],
["banks", 0.00159561, 0.00057419, 0.04189300],
["would", 0.00157290, 0.00397518, 0.26236000],
["countries", 0.00155503, 0.00178883, 0.02580000],
["debt", 0.00155244, 0.00059628, 0.06251700],
["profiling", 0.00150856, 0.00024293, 0.15962000],
["china", 0.00149503, 0.00055211, 0.05556400],
["belarus", 0.00148909, 0.00026501, 0.13829000],
["am", 0.00148656, 0.00119255, 0.02352400],
["citizens", 0.00148493, 0.00103796, 0.04590700],
["children", 0.00146991, 0.00092754, 0.18075000],
["failing", 0.00143280, 0.00024293, 0.01421700],
["government", 0.00142990, 0.00112630, 0.03235300],
["turkey", 0.00140302, 0.00044169, 0.05713200],
["general", 0.00137431, 0.00055211, 0.05360800],
["lankan", 0.00137142, 0.00022084, 0.01815600],
["macedonia", 0.00134411, 0.00022084, 0.13382000],
["eurozone", 0.00132250, 0.00024293, 0.02058800],
["state", 0.00131037, 0.00134714, 0.03095100],
["requirement", 0.00123797, 0.00022084, 0.00676830],
["while", 0.00121956, 0.00083920, 0.00534840],
["lukashenko", 0.00120711, 0.00019876, 0.07970600],
["warrant", 0.00120711, 0.00019876, 0.00978190],
["gaza", 0.00120552, 0.00050794, 0.06862800],
["should", 0.00115451, 0.00348932, 0.20642000],
["market", 0.00114556, 0.00139131, 0.05334500],
["civilians", 0.00114441, 0.00030918, 0.01434000],
["visa", 0.00111976, 0.00057419, 0.08763200],
["schengen", 0.00109918, 0.00050794, 0.08870000],
["trading", 0.00108955, 0.00039752, 0.01356700],
["euro", 0.00105113, 0.00053002, 0.03811100],
["travel", 0.00104720, 0.00055211, 0.13726000],
["cuba", 0.00104613, 0.00017667, 0.15962000],
["detention", 0.00104613, 0.00017667, 0.01611200],
["including", 0.00104362, 0.00070670, 0.00648640],
["resolution", 0.00104340, 0.00066253, 0.02213600],
["discipline", 0.00102452, 0.00017667, 0.02680500],
["policy", 0.00102272, 0.00090546, 0.05213200],
["member", 0.00101986, 0.00320223, 0.22584000],
["turkish", 0.00100488, 0.00017667, 0.04286300],
["dairy", 0.00100055, 0.00028710, 0.10247000],
["economies", 0.00099913, 0.00037543, 0.01245400],
["agency", 0.00099384, 0.00037543, 0.13221000],
["country", 0.00098867, 0.00112630, 0.11568000],
["council", 0.00098302, 0.00187717, 0.09643300],
["repression", 0.00097033, 0.00017667, 0.03031500],
["ensure", 0.00096744, 0.00103796, 0.03865900],
["which", 0.00096138, 0.00461562, 0.15226000],
["party", 0.00093665, 0.00046377, 0.03985200],
["markets", 0.00091857, 0.00070670, 0.05737900],
["programme", 0.00091297, 0.00026501, 0.01075900],
["external", 0.00090887, 0.00022084, 0.03164000],
["bank", 0.00090775, 0.00050794, 0.05041800],
["sovereign", 0.00090562, 0.00041960, 0.02564500],
["responsible", 0.00087789, 0.00030918, 0.04584800],
["energy", 0.00087202, 0.00081712, 0.32737000],
["security", 0.00086818, 0.00079504, 0.03364400],
["mechanism", 0.00085945, 0.00037543, 0.03969500],
["illegally", 0.00085307, 0.00015459, 0.07992900],
["transaction", 0.00085307, 0.00015459, 0.04369600],
["future", 0.00084792, 0.00092754, 0.01639800],
["common", 0.00084730, 0.00090546, 0.19811000],
["with", 0.00084063, 0.00658113, 0.20422000],
["strategy", 0.00083835, 0.00046377, 0.12758000],
["immediate", 0.00082219, 0.00033126, 0.01517800],
["action", 0.00081512, 0.00077295, 0.15647000],
["former", 0.00079984, 0.00024293, 0.02553500],
["path", 0.00077494, 0.00015459, 0.00447200],
["humanitarian", 0.00077051, 0.00046377, 0.06907800],
["proposed", 0.00077014, 0.00046377, 0.02059000],
["hamas", 0.00076249, 0.00035335, 0.29829000],
["against", 0.00076163, 0.00092754, 0.14536000],
["under", 0.00076079, 0.00103796, 0.13320000],
["money", 0.00075976, 0.00090546, 0.08351700],
["workers", 0.00075926, 0.00070670, 0.27007000],
["or", 0.00075674, 0.00324639, 0.22885000],
["european", 0.00075638, 0.00419602, 0.26119000],
["both", 0.00075500, 0.00088337, 0.15196000],
["price", 0.00075380, 0.00037543, 0.07707800],
["standards", 0.00074840, 0.00061836, 0.18395000],
["means", 0.00074824, 0.00068461, 0.04488100],
["islamic", 0.00074653, 0.00022084, 0.10443000],
["war", 0.00074597, 0.00037543, 0.06668500],
["cannot", 0.00074388, 0.00099379, 0.14373000],
["an", 0.00074319, 0.00408560, 0.40488000],
["competitiveness", 0.00073576, 0.00022084, 0.02954400],
["area", 0.00073272, 0.00081712, 0.08831600],
["does", 0.00072603, 0.00092754, 0.07423800],
["fear", 0.00072317, 0.00022084, 0.04826200],
["liberal", 0.00072251, 0.00026501, 0.17966000],
["corruption", 0.00072108, 0.00015459, 0.01552800],
["basel", 0.00071998, 0.00013251, 0.14024000],
["minority", 0.00071357, 0.00015459, 0.00755560],
["attacks", 0.00071281, 0.00026501, 0.09840300],
["especially", 0.00071132, 0.00041960, 0.03609900],
["freedom", 0.00070520, 0.00030918, 0.09112800],
["president", 0.00070183, 0.00375433, 0.33492000],
["framework", 0.00069941, 0.00064045, 0.04171600],
["full", 0.00069588, 0.00061836, 0.03011500],
["become", 0.00069524, 0.00046377, 0.06111300],
["policies", 0.00069305, 0.00033126, 0.06783500],
["risk", 0.00068852, 0.00053002, 0.05575200],
["mexico", 0.00068010, 0.00013251, 0.08549400],
["illegal", 0.00067563, 0.00026501, 0.18367000],
["more", 0.00067044, 0.00293721, 0.35061000],
["human", 0.00066885, 0.00088337, 0.13371000],
["nation", 0.00066373, 0.00028710, 0.01022100],
["crimes", 0.00064652, 0.00019876, 0.06350300],
["commit", 0.00064408, 0.00019876, 0.08276500],
["death", 0.00064075, 0.00024293, 0.01895900],
["fiscal", 0.00063944, 0.00024293, 0.05132000],
["electricity", 0.00063944, 0.00024293, 0.08610300],
["credible", 0.00063932, 0.00019876, 0.05123100],
["migrants", 0.00063932, 0.00019876, 0.08871200],
["clear", 0.00063838, 0.00101588, 0.14792000],
["guidelines", 0.00063699, 0.00019876, 0.09038900],
["fair", 0.00063570, 0.00055211, 0.14698000],
["any", 0.00063487, 0.00136923, 0.08848700],
["programmes", 0.00062803, 0.00019876, 0.03123300],
["data", 0.00062744, 0.00050794, 0.22088000],
["terrorist", 0.00062344, 0.00024293, 0.15777000],
["information", 0.00061846, 0.00083920, 0.14213000],
["own", 0.00060528, 0.00145756, 0.31862000],
["serbia", 0.00060052, 0.00013251, 0.03818000],
["political", 0.00059770, 0.00099379, 0.06485000],
["value", 0.00059379, 0.00035335, 0.10955000],
["agencies", 0.00059226, 0.00035335, 0.11919000],
["developing", 0.00059112, 0.00035335, 0.08158400],
["single", 0.00058417, 0.00055211, 0.02668100],
["efforts", 0.00058065, 0.00037543, 0.05021700],
["same", 0.00057533, 0.00070670, 0.07765700],
["exploitation", 0.00057480, 0.00011042, 0.07917700],
["achievement", 0.00057480, 0.00011042, 0.03034900],
["administration", 0.00057480, 0.00011042, 0.03237200],
["still", 0.00057203, 0.00081712, 0.03295200],
["players", 0.00056290, 0.00011042, 0.02475800],
["systems", 0.00056154, 0.00064045, 0.12691000],
["only", 0.00056005, 0.00150173, 0.32289000],
["england", 0.00055555, 0.00022084, 0.04345500],
["wales", 0.00055555, 0.00022084, 0.09104600],
["secure", 0.00055441, 0.00022084, 0.02693100],
["firm", 0.00055328, 0.00022084, 0.03441900],
["bosnia", 0.00055203, 0.00011042, 0.11022000],
["via", 0.00055203, 0.00011042, 0.01585200],
["beef", 0.00055104, 0.00022084, 0.18105000],
["added", 0.00054994, 0.00022084, 0.09732700],
["welfare", 0.00054994, 0.00022084, 0.17347000],
["imf", 0.00054777, 0.00022084, 0.07990100],
["other", 0.00054685, 0.00185508, 0.12918000],
["regret", 0.00054496, 0.00017667, 0.03983400],
["western", 0.00054249, 0.00022084, 0.04804400],
["operators", 0.00054151, 0.00030918, 0.30374000],
["provision", 0.00054089, 0.00017667, 0.04790900],
["outstanding", 0.00054089, 0.00017667, 0.04677800],
["join", 0.00054021, 0.00030918, 0.03334600],
["resources", 0.00053767, 0.00030918, 0.09647000],
["palestinians", 0.00053279, 0.00011042, 0.02274900],
["south", 0.00053162, 0.00022084, 0.12820000],
["budgets", 0.00053125, 0.00017667, 0.02295300],
["eastern", 0.00053098, 0.00033126, 0.16491000],
["may", 0.00053023, 0.00117047, 0.15116000],
["world", 0.00052874, 0.00108213, 0.11270000],
["than", 0.00052475, 0.00132506, 0.24280000],
["concern", 0.00052455, 0.00048585, 0.22289000],
["greece", 0.00052325, 0.00030918, 0.18090000],
["monetary", 0.00052229, 0.00017667, 0.04406300],
["budget", 0.00052206, 0.00048585, 0.15231000],
["care", 0.00052123, 0.00044169, 0.26232000],
["crime", 0.00051960, 0.00035335, 0.09367900],
["rights", 0.00051841, 0.00139131, 0.33272000],
["available", 0.00051717, 0.00041960, 0.03879200],
["essential", 0.00051667, 0.00037543, 0.15083000],
["step", 0.00051277, 0.00041960, 0.01563700],
["kosovo", 0.00051274, 0.00015459, 0.10056000],
["group", 0.00051214, 0.00094963, 0.26642000],
["short-term", 0.00051129, 0.00024293, 0.06370800],
["forces", 0.00050853, 0.00024293, 0.04544900],
["many", 0.00050632, 0.00174466, 0.22310000],
["border", 0.00050461, 0.00017667, 0.00659670],
["high", 0.00050344, 0.00059628, 0.09846200],
["minorities", 0.00049814, 0.00024293, 0.09655400],
["employees", 0.00049490, 0.00011042, 0.05040800],
["laws", 0.00048722, 0.00026501, 0.04477300],
["urge", 0.00047476, 0.00028710, 0.05435000],
["minor", 0.00047300, 0.00019876, 0.14942000],
["based", 0.00047206, 0.00046377, 0.13903000],
["national", 0.00047189, 0.00101588, 0.16652000],
["month", 0.00047010, 0.00019876, 0.19871000],
["young", 0.00046892, 0.00044169, 0.41259000],
["permanent", 0.00046820, 0.00019876, 0.17748000],
["fail", 0.00046820, 0.00019876, 0.04596000],
["reform", 0.00046751, 0.00044169, 0.09587700],
["civil", 0.00046653, 0.00028710, 0.02263400],
["purpose", 0.00046542, 0.00019876, 0.04073900],
["provided", 0.00046494, 0.00030918, 0.12874000],
["welcome", 0.00046459, 0.00099379, 0.26340000],
["immigration", 0.00046361, 0.00019876, 0.06774700],
["cut", 0.00046233, 0.00037543, 0.13901000],
["positive", 0.00046179, 0.00030918, 0.15983000],
["seems", 0.00046158, 0.00039752, 0.13867000],
["stability", 0.00046069, 0.00037543, 0.25470000],
["on", 0.00045975, 0.01020295, 0.53251000],
["suffering", 0.00045835, 0.00019876, 0.05535400],
["most", 0.00045708, 0.00106005, 0.26715000],
["race", 0.00045131, 0.00015459, 0.07985100],
["protectionism", 0.00044787, 0.00015459, 0.09102900],
["needs", 0.00044786, 0.00094963, 0.24175000],
["therefore", 0.00044708, 0.00077295, 0.07928100],
["grant", 0.00044588, 0.00008834, 0.07036100],
["prosperity", 0.00044454, 0.00015459, 0.02653000]
]
}
# We gathered this way five dictionaries of words
dics = [owvt,tvow,ivos,osvi,osvow]
# We now select for each dictionary only words having a KLD value higher than a
# given threhsold (thres)
kld_words = []
kld_words_scores = []
thres = .05
freqs = [] # array for frequencies
for dic in dics:
terms = dic['terms']
kld_words += [t[0] for t in terms if t[-1]<=thres] # kld words
freqs += [(t[1],t[-2],t[0]) for t in terms if t[-1]<=thres] # frequencies
kld_words_scores += [(t[2],t[0]) for t in terms if t[-1]<thres] # kld words with scores
len(kld_words)
# Sort and reverse frequences
freqs.sort()
freqs.reverse()
# here you can see some of the selected kld words
print(kld_words[:30])
#Now we take the corpora's directories
directory = '/Users/yuribizzoni/Downloads/Translation/B7/'
#Originals
# ewo = English Written Originals
ewo = glob.glob(directory+"epuds-parallel/words/*.en") + glob.glob(directory+"txt_translationese/en/originals/*") #english wo
# eso = English Spoken Originals
eso = glob.glob(directory+"1 interpreting data/ORG_SP_EN_korrigiert/*")#english spoken originals
print(len(ewo),len(eso))
#Translations
# ewt = English Written Translations
ewt = glob.glob(directory+"txt_translationese/en/translations_all/*")
print(len(ewt))
#Interpreting transcripts
# ei = English Interpreting
ei = glob.glob(directory+"1 interpreting data/SI_DE_EN_korrigiert/*")#english in
print(len(ei))
# some very useful functions
# cleaning a little bit the transcripts (can be made richer)
def clean(hm):
hm = hm.replace("/","").replace("[","").replace("]","").replace("#","")#.replace('euh','')
return hm
#from text to features: for the more traditional features
def featureit(text):
ttext = [e[0] for e in text]
ptext = [e[1] for e in text]
ttr = ld.ttr(ttext) ## type token ratio
semgro = "VERB ADV ADJ NOUN".split()
lexema = [e for e in ptext if e in semgro] # lexical (non function) words
density = (len(lexema)+1)/(len(ptext)+1) # textual density
#verbs, nouns = ptext.count("VERB"), ptext.count("NOUN") # other features you can add
#return [ttr,density,verbs,nouns]
return {'ttr':ttr, 'density':density} # in this form returns ttr and density
# keeping only instances of chosen words
def featureit_words(text, wordlist):
seltext = [w for w in text if w in wordlist]
return seltext
%%time
## Now we create the Y labels and the texts from which we will make our X
minamo = 110 # amount of documents
maxlen = 40000 # maximal amount of characters per document
classes = [ei, eso, ewt, ewo]
texts_,Y_ = [],[]
latins = []
lens = []
class_number=0
for each_class in classes:
print(class_number)
# feature every point
for point in each_class[:minamo]:
#print(point)
# take the text
try:
text = open(point, encoding='utf-16').read()#.split()
except:
text = open(point, encoding='latin1').read()
latins.append(text)
print("reading latin")
lens.append(len(text))
text = text[:maxlen]
text = nltk.wordpunct_tokenize(clean(text).lower())# clean and tokenize
# turn them into X and Y
if len(text)>10:
text = nltk.pos_tag(text, tagset='universal') # PoS tagging
texts_.append(text)#+[1/class_number])
Y_.append(class_number)
else: print("Not enough chars")
class_number+=1
#
#checking the Y_ class is okay
len(Y_),Y_.count(0), Y_.count(1), Y_.count(2), Y_.count(3)
```
```
# Explicit way of taking only classes 2 and 3
# If we want all 4 classes, go start = 0 etc.
start = Y_.count(0)+Y_.count(1)
classize = start+Y_.count(2)+Y_.count(3)
texts = texts_[start:classize]
Y = Y_[start:classize]
len(texts), len(Y)
#
## Feature Selection
#title = "min freq 20" #"Kld_unigrams" #"Pos_ngrams+Kld_unigrams"#
X,c=[],0
for text in texts:
text = [e for e in text if e[0]!='breath' and e[0]!='noise']
#print(text[:3], Y[c])
c+=1
# EXTRACT THE FEATURES: GATEWAY
# If the featureset is the whole text, uncomment:
features = " ".join([e[0] for e in text])
# If the features are both kld and tranditionals, uncomment:
#features = " ".join(featureit_words([e[0] for e in text], kld_words))
# If the features are only traditionals uncomment:
#features = featureit(text)
# If we want to have both words and Parts of Speech to our feature set, uncomment:
#features = [e[0] for e in text]
#features += " ".join([w[1] for w in text])##
# In any case, we end up adding the featureset to the X array:
X.append(features)
# THEN WE MAKE THE VECTORIZER
# in this instance: we take the top 9000 unigrams. Check the CountVectorizer for more functions
vectorizer = CountVectorizer(max_features=9000, ngram_range=(1,1))#, min_df=.5)
# ATTENTION: If using "featureit", use the following:
#vectorizer = DictVectorizer()
# Another possible dictionary
#vectorizer = TfidfVectorizer(max_features=4000, ngram_range=(1,1))#, min_df=10)
# Finally, we fit transform X with the vectorizer.
# Which means we apply the feature selection we defined in the vectorizer to X
X = vectorizer.fit_transform(X)
# This is how ti shows, f.e.
print(vectorizer.get_feature_names()[:100])
X.toarray()
# This is the number of distinct features in the vectorizer r.n.
print(len(vectorizer.get_feature_names()))
# Verify length and shape (Y and X should have the same length)
len(Y), X.shape # so, these are our X and Y rn
```
```
import pandas as pd
from sklearn.feature_selection import mutual_info_classif
res = zip(mutual_info_classif(X, Y, discrete_features='auto'),vectorizer.get_feature_names())
sorted_features = sorted(res, key=lambda x: x[0])
threshold=-400
selected = [e[1] for e in sorted_features[threshold:]]
toprint = [e for e in sorted_features[threshold:]]
len(selected)
```
You can go deeper with the IG, in the next cell. Since it is convoluted and we didn't take much out of it in the end, I marked it down and it is unpolished yet
bohed = [e for e in boh]
ig = [round(b[0],3) for b in bohed_]
fea = [b[1] for b in bohed_]
df = pd.DataFrame(dict(feature=fea,ig=ig))
print(df.to_latex(index=False))
X=[]
for text in texts:
# extract the features again
text = [e[1] for e in text]+[e[0] for e in text] #pos and word mixed
#features = " ".join(featureit_words(text, selected))
X.append(" ".join(text))
vectorizer = CountVectorizer(max_features=None, ngram_range=(1,3), vocabulary=fea)
X = vectorizer.fit_transform(X)
iss=[]
for i in range(1,len(Y)):
y = Y[i]
if y>Y[i-1]: print(i)
x2arr = X.toarray()
i2f, s2f, t2f, w2f = [],[],[],[]
for i in range(len(fea)):
feat = fea[i]
print(feat)
c = X.getcol(i).toarray()
a,b,c,d = c[:50], c[50:100], c[100:149], c[149:]
#i2f.append(a[0]), s2f.append(b[0]), t2f.append(c[0]), w2f.append(d[0])
#i2f.append(np.mean(a)), s2f.append(np.mean(b)), t2f.append(np.mean(c)), w2f.append(np.mean(d))
i2f.append(np.sum(a)), s2f.append(np.sum(b)), t2f.append(np.sum(c)), w2f.append(np.sum(d))
ig = [round(b[0],3) for b in bohed_]
fea = [b[1] for b in bohed_]
df = pd.DataFrame(dict(feature=fea, inte=i2f, spo=s2f, tra=t2f, wro=w2f, ig=ig))
print(df.to_latex(index=False))
# Here begins the Classification
```
# CLF (classifier) will be our linear-kerneled Support Vector Machine
clf = SVC(kernel='linear', random_state=0)
# we simply cross-validate X on Y with our machine, on 10 splits, using a weighter f1 score
scores = cross_val_score(clf, X, Y, cv=10, scoring='f1_weighted')
print(scores)
np.mean(scores), np.std(scores)
%%time
# The following cell brings it "all together" (without cross-validation)
# We split the data in train and test, fit the model and generate a number of useful
# performance metrics from it.
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=.1, random_state=42)
print(x_train.shape)
clf = SVC(kernel='linear', random_state=0)
clf.fit(x_train, y_train)
predicted = clf.predict(x_test)
gold = y_test
from sklearn.metrics import precision_recall_fscore_support as pr
bPrecis, bRecall, bFscore, bSupport = pr(gold, predicted, average='weighted')
print(bPrecis, bRecall, bFscore, bSupport)
# Visualization
from sklearn import metrics
from sklearn.metrics import plot_confusion_matrix
from matplotlib import pyplot as plt
title = ''
classifier = clf
X_test = x_test
class_names = "WrO Tra SpO Si".split()
disp = plot_confusion_matrix(classifier, X_test, y_test,
display_labels=class_names,
cmap=plt.cm.Blues,
normalize='pred')
disp.ax_.set_title(title)
print(title)
#plt.savefig("Kld_500_top"+'.png')
plt.show()
```
c=0
coff = clf.coef_ #clf.steps[1][1].coef_
coff = coff.data
tosort = []
for el in coff:
tup=(abs(el),names[c])
c+=1
tosort.append(tup)
#for feature in el: print(round(feature),3)
#print(round(el[0],3), round(el[1],3), round(el[2],3))#, round(el[3],3))
tosort.sort()
tosort.reverse()
for el in tosort: print(el)
```
# Feature Exploration
# We try to gather the most representative features used by the SVM to tell
# one class from another
values_array = clf.coef_.toarray()
used=[]
c=0
classes = set(Y)
names = vectorizer.get_feature_names()
classmap = {2:'Tra', 3:'Wr orig'} ## uncomment if focusing on written
#classmap = {0:'Int',1:'Sp orig'} ## uncomment if focusing on spoken
#classmap = {0:'Int', 1:'Sp orig', 2:'Tra', 3:'Wr orig'} ## uncomment if using all 4 classes
for y in classes:
for y_ in classes:
if y!=y_ and (y_,y) not in used:
print("\n",classmap[y], "vs.", classmap[y_],"&",end=" ")
used.append((y,y_))
values = values_array[c]
values_and_names = [(round(values[i],4), names[i]) for i in range(len(names))]
values_and_names.sort()
top=values_and_names[:10]
for el in top: print(el[1],"("+str(round(el[0],3))[1:]+")",end=", ")
print("\\\\")
c+=1
# writing it out
#out = open('/Users/yuribizzoni/Documents/.txt','w')
for el in values_and_names[:1000]:
out.write(el[1]+"("+str(el[0])[:]+")"+"\n")
out.close()
print(disp.confusion_matrix)
np.mean(scores), np.std(scores)
```
| github_jupyter |
# Exploring different Coastline options in Magics
This notebook will help you discover lots of posibilities for designing background of your maps in Magics.
From your workstation:
load magics
module swap(or load) Magics/new
jupyter notebook
load this notebook
**mcoast** controls background of our maps. Here you can set things like colours of land and sea, coastline resolution and colour, and also grid, rivers, boarders, cities etc.
List of all **mcoast** parameters you can find [here](https://confluence.ecmwf.int/display/MAGP/Coastlines "Coastlines parameters")
### Import Magics and define non coastline paramters
For start we will define some none coastline parameters, which we will not change later.
```
import Magics.macro as magics
projection = magics.mmap(
subpage_map_library_area = "on",
subpage_map_area_name = 'central_europe'
)
```
As with all Magics functions, default is something you can start with. But if you don't like it, *everything* can be changed
```
coast = magics.mcoast()
magics.plot(projection, coast)
```
### High resolution coastline and dash gridlines
In Magics we can fully control how land and sea look like.
Coastline resolution can be 'low', 'medium' or 'high', or if you want to leave Magics to decide, you can set it as 'automatic'.
Land and sea can be shaded or not, and you can set the colour.
You can choose to have gridlines or not, their frequency, style, colour, thickness, labels...
```
coast = magics.mcoast(
map_coastline_style = "solid",
map_coastline_colour = "tan",
map_coastline_resolution = "high",
map_coastline_land_shade = "on",
map_coastline_land_shade_colour = "cream",
map_grid = "on",
map_grid_colour = "tan",
map_grid_latitude_increment = 5.00,
map_grid_longitude_increment = 10.00,
map_grid_line_style = "dash",
map_label = "on",
map_label_colour = "charcoal",
map_label_height = 0.35,
map_label_latitude_frequency = 2,
map_label_longitude_frequency = 2,
map_label_blanking = "off"
)
magics.plot(projection, coast)
```
### Administrative boundaries, cities and rivers
```
coast = magics.mcoast(
map_boundaries = "on",
map_boundaries_colour = "red",
map_coastline_resolution = "high",
map_coastline_land_shade_colour = "cream",
map_cities = "on",
map_grid = "off",
map_coastline_land_shade = "on",
map_coastline_colour = "tan",
map_administrative_boundaries = "on",
map_administrative_boundaries_countries_list = ["FRA", "ESP", "GBR"],
map_administrative_boundaries_colour = "orange",
map_rivers = "on"
)
magics.plot(projection, coast)
```
### Grid lines, boundaries and rivers
```
coast = magics.mcoast(
map_boundaries = "on",
map_boundaries_colour = "red",
map_coastline_resolution = "high",
map_coastline_colour = "tan",
map_coastline_land_shade = "on",
map_coastline_land_shade_colour = "cream",
map_grid = "on",
map_grid_line_style = "dot",
map_grid_colour = "tan",
map_grid_latitude_increment = 2.00,
map_grid_longitude_increment = 2.00,
map_grid_latitude_reference = 0.00,
map_rivers = "on"
)
magics.plot(projection, coast)
```
### Sea, lakes and rivers
```
coast = magics.mcoast(
map_coastline_sea_shade_colour = "sky",
map_coastline_resolution = "high",
map_rivers_colour = "sky",
map_grid = "off",
map_coastline_land_shade = "off",
map_coastline_colour = "sky",
map_coastline_sea_shade = "on",
map_rivers = "on")
magics.plot(projection, coast)
```
| github_jupyter |
# Benchmarking with Argo Worfklows & Vegeta
In this notebook we will dive into how you can run bench marking with batch processing with Argo Workflows, Seldon Core and Vegeta.
Dependencies:
* Seldon core installed as per the docs with Istio as an ingress
* Argo Workfklows installed in cluster (and argo CLI for commands)
## Setup
### Install Seldon Core
Use the notebook to [set-up Seldon Core with Ambassador or Istio Ingress](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html).
Note: If running with KIND you need to make sure do follow [these steps](https://github.com/argoproj/argo-workflows/issues/2376#issuecomment-595593237) as workaround to the `/.../docker.sock` known issue.
### Install Argo Workflows
You can follow the instructions from the official [Argo Workflows Documentation](https://github.com/argoproj/argo#quickstart).
Download the right CLi for your environment following the documentation (https://github.com/argoproj/argo-workflows/releases/tag/v3.0.8)
You also need to make sure that argo has permissions to create seldon deployments - for this you can just create a default-admin rolebinding as follows:
Set up the RBAC so the argo workflow is able to create seldon deployments.
Set up the configmap in order for it to work in KIND and other environments where Docker may not be thr main runtime (see https://github.com/argoproj/argo-workflows/issues/5243#issuecomment-792993742)
### Create Benchmark Argo Workflow
In order to create a benchmark, we created a simple argo workflow template so you can leverage the power of the helm charts.
Before we dive into the contents of the full helm chart, let's first give it a try with some of the settings.
We will run a batch job that will set up a Seldon Deployment with 1 replicas and 4 cpus (with 100 max workers) to send requests.
```
!helm template seldon-benchmark-workflow ../../../helm-charts/seldon-benchmark-workflow/ \
--set workflow.namespace=argo \
--set workflow.name=seldon-benchmark-process \
--set workflow.parallelism=2 \
--set seldonDeployment.name=sklearn \
--set seldonDeployment.replicas="1" \
--set seldonDeployment.serverWorkers="5" \
--set seldonDeployment.serverThreads=1 \
--set seldonDeployment.modelUri="gs://seldon-models/v1.11.0-dev/sklearn/iris" \
--set seldonDeployment.server="SKLEARN_SERVER" \
--set seldonDeployment.apiType="rest|grpc" \
--set seldonDeployment.requests.cpu="2000Mi" \
--set seldonDeployment.limits.cpu="2000Mi" \
--set seldonDeployment.disableOrchestrator="true|false" \
--set benchmark.cpu="2" \
--set benchmark.concurrency="1" \
--set benchmark.duration="30s" \
--set benchmark.rate=0 \
--set benchmark.data='\{"data": {"ndarray": [[0\,1\,2\,3]]\}\}' \
| argo submit -
!argo list -n argo
!argo logs -f seldon-benchmark-process -n argo
!argo get seldon-benchmark-process -n argo
```
## Process the results
We can now print the results in a consumable format.
## Deeper Analysis
Now that we have all the parameters, we can do a deeper analysis
```
import sys
sys.path.append("../../../testing/scripts")
import pandas as pd
from seldon_e2e_utils import bench_results_from_output_logs
results = bench_results_from_output_logs("seldon-benchmark-process", namespace="argo")
df = pd.DataFrame.from_dict(results)
df.head()
!argo delete seldon-benchmark-process -n argo || echo "Argo workflow already deleted or not exists"
```
| github_jupyter |
# Part 1: Introducing txtai
[txtai](https://github.com/neuml/txtai) builds an AI-powered index over sections of text. txtai supports building text indices to perform similarity searches and create extractive question-answering based systems.
NeuML uses txtai and/or the concepts behind it to power all of our Natural Language Processing (NLP) applications. Example applications:
- [paperai](https://github.com/neuml/paperai) - AI-powered literature discovery and review engine for medical/scientific papers
- [tldrstory](https://github.com/neuml/tldrstory) - AI-powered understanding of headlines and story text
- [neuspo](https://neuspo.com) - a fact-driven, real-time sports event and news site
- [codequestion](https://github.com/neuml/codequestion) - Ask coding questions directly from the terminal
txtai is built on the following stack:
- [sentence-transformers](https://github.com/UKPLab/sentence-transformers)
- [transformers](https://github.com/huggingface/transformers)
- [faiss](https://github.com/facebookresearch/faiss)
- Python 3.6+
This notebook gives an overview of txtai and how to run similarity searches.
# Install dependencies
Install txtai and all dependencies
```
%%capture
!pip install git+https://github.com/neuml/txtai
```
# Create an Embeddings instance
The Embeddings instance is the main entrypoint for txtai. An Embeddings instance defines the method used to tokenize and convert a text section into an embeddings vector.
```
%%capture
from txtai.embeddings import Embeddings
# Create embeddings model, backed by sentence-transformers & transformers
embeddings = Embeddings({"method": "transformers", "path": "sentence-transformers/bert-base-nli-mean-tokens"})
```
# Running similarity queries
An embedding instance relies on the underlying transformer model to build text embeddings. The following example shows how to use an transformers Embedding instance to run similarity searches for a list of different concepts.
```
import numpy as np
sections = ["US tops 5 million confirmed virus cases",
"Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg",
"Beijing mobilises invasion craft along coast as Taiwan tensions escalate",
"The National Park Service warns against sacrificing slower friends in a bear attack",
"Maine man wins $1M from $25 lottery ticket",
"Make huge profits without work, earn up to $100,000 a day"]
print("%-20s %s" % ("Query", "Best Match"))
print("-" * 50)
for query in ("feel good story", "climate change", "health", "war", "wildlife", "asia", "north america", "dishonest junk"):
# Get index of best section that best matches query
uid = np.argmax(embeddings.similarity(query, sections))
print("%-20s %s" % (query, sections[uid]))
```
The example above shows for almost all of the queries, the actual text isn't stored in the list of text sections. This is the true power of transformer models over token based search. What you get out of the box is 🔥🔥🔥!
# Building an Embeddings index
For small lists of texts, the method above works. But for larger repositories of documents, it doesn't make sense to tokenize and convert to embeddings on each query. txtai supports building pre-computed indices which signficantly improve performance.
Building on the previous example, the following example runs an index method to build and store the text embeddings. In this case, only the query is converted to an embeddings vector each search.
```
# Create an index for the list of sections
embeddings.index([(uid, text, None) for uid, text in enumerate(sections)])
print("%-20s %s" % ("Query", "Best Match"))
print("-" * 50)
# Run an embeddings search for each query
for query in ("feel good story", "climate change", "health", "war", "wildlife", "asia", "north america", "dishonest junk"):
# Extract uid of first result
# search result format: (uid, score)
uid = embeddings.search(query, 1)[0][0]
# Print section
print("%-20s %s" % (query, sections[uid]))
```
# Embeddings load/save
Embeddings indices can be saved to disk and reloaded. At this time, indices are not incrementally created, the index needs a full rebuild to incorporate new data. But that enhancement is in the backlog.
```
embeddings.save("index")
embeddings = Embeddings()
embeddings.load("index")
uid = embeddings.search("climate change", 1)[0][0]
print(sections[uid])
!ls index
```
# Embedding methods
Embeddings supports two methods for creating text vectors, the sentence-transformers library and word embeddings vectors. Both methods have their merits as shown below:
- [sentence-transformers](https://github.com/UKPLab/sentence-transformers)
- Creates a single embeddings vector via mean pooling of vectors generated by the transformers library.
- Supports models stored on Hugging Face's model hub or stored locally.
- See sentence-transformers for details on how to create custom models, which can be kept local or uploaded to Hugging Face's model hub.
- Base models require significant compute capability (GPU preferred). Possible to build smaller/lighter weight models that tradeoff accuracy for speed.
- word embeddings
- Creates a single embeddings vector via BM25 scoring of each word component. See this [Medium article](https://towardsdatascience.com/building-a-sentence-embedding-index-with-fasttext-and-bm25-f07e7148d240) for the logic behind this method.
- Backed by the [pymagnitude](https://github.com/plasticityai/magnitude) library. Pre-trained word vectors can be installed from the referenced link.
- See [vectors.py](https://github.com/neuml/txtai/blob/master/src/python/txtai/vectors.py) for code that can build word vectors for custom datasets.
- Significantly better performance with default models. For larger datasets, it offers a good tradeoff of speed and accuracy.
# Next
In part 2 of this series, we'll look at how to use txtai to run extractive searches
| github_jupyter |
```
# IMPORT OUR DEPENDENCIES:
#To create our randomly-selected coordinates:
import random
import requests
import numpy as np
#To hold our data and create dataframes:
import pandas as pd
#Our API keys, and citipy (newly installed for project), to import the city weather-data.
from config import api_key
from citipy import citipy
#And to plot our data:
import matplotlib.pyplot as plt
import matplotlib
#Last, for any formating of plots (they may be needed):
import seaborn
# GETTING A RANDOM SET OF COORDINATES TO USE FOR CALLING ON CITY WEATHER:
# First we define the Latitude & Longitude Zones use numpy to select coordinates at random, given the ranges:
lat_zone = np.arange(-90,90,15)
lon_zone = np.arange(-200,200,15)
# DATAFRAME TO HOLD OUR COORDINATES:
# Create our list/Pandas DataFrame & calling it "cities" which will hold the coordinates to their city.
cities_df = pd.DataFrame()
cities_df["Latitude"] = ""
cities_df["Longitude"] = ""
# COORDINATE SELECTION!
# First, using a coordinate systen we will assign "X" for latitude, and "Y" for long.
# For both latitude "X" & longitude "Y", we randomly select **(50 ?) unique coordinates:
# **For the random sample we collect, we will assign "lats" for X and "lons" for y.
# We then will create/append the lists, "lat_samples" and "lon_samples", to use in dataframes.
for x in lat_zone:
for y in lon_zone:
x_values = list(np.arange(x,x+15,0.01))
y_values = list(np.arange(y,y+15,0.01))
lats = random.sample(x_values,50)
lons = random.sample(y_values,50)
lat_samples = [(x+dec_lat) for dec_lat in lats]
lon_samples = [y+dec_lon for dec_lon in lons]
cities_df = cities_df.append(pd.DataFrame.from_dict({"Latitude":lat_samples,
"Longitude":lon_samples}))
# We then place the coordinates into our "cities" dataframe that was created above.
cities_df = cities_df.reset_index(drop=True)
# IS THIS LINE NECC ??
cities_df.shape
# USING CITIPY MODULE TO TIE COORDINATES TO A CORRESPONDING/NEARBY CITY:
cities_df["Closest City name"] = ""
cities_df["Closest Country code"] = ""
for index,row in cities_df.iterrows():
city = citipy.nearest_city(row["Latitude"],row["Longitude"])
cities_df.set_value(index,"Closest City name",city.city_name)
cities_df.set_value(index,"Closest Country code",city.country_code)
# CLEANING THE DATAFRAME: ELIMINATE COORDINATE-SETS THAT DON'T YIELD NEARBY CITIES:
# First we create a new data frame that eliminates coordinates that aren't near any city:
# ..Calling it "clean_cities":
clean_cities_df = cities_df.drop(['Latitude', 'Longitude'],axis=1)
clean_cities_df
# Next we filter for any possible duplicates (cities that come twice)
clean_cities_df = clean_cities_df.drop_duplicates()
# **Neccesary?
clean_cities_df.shape
# CREATING OUR SET OF CITIES WE WILL MAKE AN API CALL WITH
# Creation of our random sample set of cities from our "clean" data frame (above).
# Now we use a sample size of 500 in order to return their weather data.
# ** We will call this group of 500, "selected_cities".
selected_cities = clean_cities_df.sample(500)
selected_cities = selected_cities.reset_index(drop=True)
# USING API CALLS TO GATHER WEATHER INFO ON OUR SELECTED CITIES:
# We use Openweathermap to make our API CALLS:
# Set up format for the calls:
base_url = "http://api.openweathermap.org/data/2.5/weather"
app_id = api_key
params = { "appid" :app_id,"units":"metric" }
# NOW enter the call data, url formatting, variables we want to collect &
# interate through for our "selected_cities" group:
def encrypt_key(input_url):
return input_url[0:53]+"<YourKey>"+input_url[85:]
for index,row in selected_cities.iterrows():
params["q"] =f'{row["Closest City name"]},{row["Closest Country code"]}'
print(f"Retrieving weather information for {params['q']}")
city_weather_resp = requests.get(base_url,params)
print(encrypt_key(city_weather_resp.url))
city_weather_resp = city_weather_resp.json()
selected_cities.set_value(index,"Latitude",city_weather_resp.get("coord",{}).get("lat"))
selected_cities.set_value(index,"Longitude",city_weather_resp.get("coord",{}).get("lon"))
selected_cities.set_value(index,"Temperature",city_weather_resp.get("main",{}).get("temp_max"))
selected_cities.set_value(index,"Wind speed",city_weather_resp.get("wind",{}).get("speed"))
selected_cities.set_value(index,"Humidity",city_weather_resp.get("main",{}).get("humidity"))
selected_cities.set_value(index,"Cloudiness",city_weather_resp.get("clouds",{}).get("all"))
# POST CALL-RETREIVING: CLEAN UP DATA (When needed) AND EXPORT OUR DATA TO CSV:
selected_cities = selected_cities.dropna()
selected_cities.shape
selected_cities.to_csv("City_Weather_data.csv")
```
| github_jupyter |
# Streaming Sample: Cosmos DB ChangeFeed - Databricks
In this notebook, you read a live stream of tweets that stored in Cosmos DB by leveraging Apache Spart to read the Cosmos DB's Change Feed, and run transformations on the data in Databricks cluster.
## prerequisites:
- Databricks Cluster (Spark)
- Cosmos DB Spark Connector (azure-cosmosdb-spark)
- Create a library using maven coordinates. Simply typed in `azure-cosmosdb-spark_2.2.0` in the search box and search it, or create library by simply uploading jar file that can be donwload from marven central repository
- Azure Cosmos DB Collection
## Test Feed Generator
- https://github.com/tknandu/TwitterCosmosDBFeed
## LINKS
- [Working with the change feed support in Azure Cosmos DB](https://docs.microsoft.com/en-us/azure/cosmos-db/change-feed)
- [Twitter with Spark and Azure Cosmos DB Change Feed Sample](https://github.com/Azure/azure-cosmosdb-spark/blob/master/samples/notebooks/Twitter%20with%20Spark%20and%20Azure%20Cosmos%20DB%20Change%20Feed.ipynb)
- [Stream Processing Changes using Azure Cosmos DB Change Feed and Apache Spark](https://github.com/Azure/azure-cosmosdb-spark/wiki/Stream-Processing-Changes-using-Azure-Cosmos-DB-Change-Feed-and-Apache-Spark)
- https://github.com/tknandu/TwitterCosmosDBFeed
## Configure Connection to Cosmos DB Change Feed using azure-cosmosdb-spark
The parameters below connect to the Cosmos DB Change Feed; for more information, please refer to Change Feed Test Runs.
```
# Adding variables
rollingChangeFeed = False
startFromTheBeginning = False
useNextToken = True
database = "changefeedsource"
collection = "tweet_new"
tweetsConfig = {
"Endpoint" : "https://dbstreamdemo.documents.azure.com:443/",
"Masterkey" : "ekRLXkETPJ93s6XZz4YubZOw1mjSnoO5Bhz1Gk29bVxCbtgtKmiyRz4SogOSxLOGTouXbwlaAHcHOzct4JVwtQ==",
#"Database" : database,
#"Collection" : collection,
"Database" : "changefeedsource",
"Collection" : "tweet_new",
"ReadChangeFeed" : "true",
"ChangeFeedQueryName" : database + collection + " ",
"ChangeFeedStartFromTheBeginning" : str(startFromTheBeginning),
"ChangeFeedUseNextToken" : str(useNextToken),
"RollingChangeFeed" : str(rollingChangeFeed),
#"ChangeFeedCheckpointLocation" : "./changefeedcheckpointlocation",
"SamplingRatio" : "1.0"
}# Adding
```
## Read a DataFrame
```
# Read a DataFrame
# SparkSession available as 'spark'.
tweets = spark.read.format("com.microsoft.azure.cosmosdb.spark").options(**tweetsConfig).load()
```
##Get the number of tweets
This provides the count of tweets; it will start off 0 and then continue growing as you re-run the cell below.
```
# Get the number of tweets
tweets.count()
# display(tweets)
# tweets.printSchema()
```
## Create tweets TempView
This way we can run SQL statements within the notebook
```
# Create tweets TempView
# This way we can run SQL statements within the notebook
tweets.createOrReplaceTempView("tweets")
%sql
select count(1) from tweets
```
## Show various attributes of the first 20 tweets
```
%sql
select
id,
created_at,
user.screen_name,
user.location,
text,
retweet_count,
entities.hashtags,
entities.user_mentions,
favorited,
source
from tweets
limit 20
```
## Determine Top 10 hashtags for the tweets
```
%sql
select concat(concat((dense_rank() OVER (PARTITION BY 1 ORDER BY tweets DESC)-1), '. '), text) as hashtags, tweets
from (
select hashtags.text, count(distinct id) as tweets
from (
select
explode(entities.hashtags) as hashtags,
id
from tweets
) a
group by hashtags.text
order by tweets desc
limit 10
) b
```
# [APPENDIX] Connnecting to Cosmos DB using pydocumentdb
```
# Import Necessary Libraries
import pydocumentdb
from pydocumentdb import document_client
from pydocumentdb import documents
import datetime
# Configuring the connection policy (allowing for endpoint discovery)
connectionPolicy = documents.ConnectionPolicy()
connectionPolicy.EnableEndpointDiscovery
connectionPolicy.PreferredLocations = ["Japan East", "Japan West"]
# Set keys to connect to Cosmos DB
masterKey = 'b3KPBHQvWTD8prYsQDiHlaM8kDzBholipD1sgshjT60ayDK9WkvRAT0Qywsi5FkcyKsYcvF4iIrUEBBzaZwJKw=='
host = 'https://videoanalytics.documents.azure.com:443/'
client = document_client.DocumentClient(host, {'masterKey': masterKey}, connectionPolicy)
# Configure Database and Collections
databaseId = 'asset'
collectionId = 'meta'
# Configurations the Cosmos DB client will use to connect to the database and collection
dbLink = 'dbs/' + databaseId
collLink = dbLink + '/colls/' + collectionId
# Set query parameter
#querystr = "SELECT c.City FROM c WHERE c.State='WA'"
querystr= "SELECT * FROM c"
# Query documents
query = client.QueryDocuments(collLink, querystr, options=None, partition_key=None)
# Query for partitioned collections
# query = client.QueryDocuments(collLink, query, options= { 'enableCrossPartitionQuery': True }, partition_key=None)
# Push into list `elements`
elements = list(query)
print(elements)
```
| github_jupyter |
```
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import pandas as pd
from sklearn.model_selection import train_test_split
import shutil
import time
import copy
#to transform images and to convert it in order to form to tensors
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test' : transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
}
import io
import zipfile
!unzip /content/jpgtraintest.zip
image_datasets = {x: datasets.ImageFolder(os.path.join('/content/jpgtraintest', x), data_transforms[x]) for x in ['train', 'test']}
image_datasets['train']
image_datasets['test']
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=16,
shuffle=True,
num_workers=4)
for x in ['train', 'test']}
#HSN: data loader: python iterable dataset
dataloaders
class_names = image_datasets['train'].classes
class_names
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#to use GPU version
device
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'test']}
def imshow(inp, title=None):
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.figure(figsize=(20,20))
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001)
inputs, classes = next(iter(dataloaders['train']))
out = torchvision.utils.make_grid(inputs)
#iter(dataloader) creates an object of class _DataLoaderIter and, in the loop, creates same object n times and retrieve the first batch only.
imshow(out, title=[class_names[x] for x in classes])
def train_model(model, criterion, optimizer, scheduler, num_epochs=20):
since = time.time()
best_acc = 0.0
best_model = copy.deepcopy(model.state_dict())
#HSN: making deep copy of model.
new_freeze_state = None
prev_freeze_state = False
for epoch in range(num_epochs):
print("Epoch {}/{}".format(epoch, num_epochs - 1))
print('-' * 10)
for phase in ['train', 'test']:
if phase == 'train':
scheduler.step()
#HSN: Following the scheme to Decay LR by a factor of 0.1 every 7 epochs
model.train()
#HSNL model.train() tells your model that you are training the model. So effectively layers like dropout, batchnorm etc.
else:
model.eval()
#HSN: model.eval() will notify all layers that you are in eval mode, that way, batchnorm or dropout layers will work in eval mode instead of training mode
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
#HSN: Saving it to CUDA:0 GPU
optimizer.zero_grad()
#HSN: to clear the existing gradient
#HSN: we need to set the gradients to zero before starting to do backpropragation
with torch.set_grad_enabled(phase == 'train'):
#HSNL enable grad, based on train phase
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
#HSN: maximum value of all elements in the input tensor.
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc:{:.4f}'.format(
phase, epoch_loss, epoch_acc))
if phase == 'test' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val acc: {:4f}'.format(best_acc))
model.load_state_dict(best_model)
return model
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
model_ft = models.resnet101(pretrained=True)
#HSN: ResNet-101 model from Deep Residual Learning for Image Recognition
num_frts = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_frts, len(class_names))
#HSN: in-feature for fc layer of resnet101
model_ft = model_ft.to(device)
#HSN: Model saved to CUDA:0 GPU
criterion = nn.CrossEntropyLoss()
#HSN: setting criterion as "training a classification problem with n number of classes".
#optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9)
optimizer_ft = optim.Adagrad(model_ft.parameters(), lr=0.001)
#HSN: Implements Adagrad algorithm with Learning rate 0.001
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
#HSN: Decays the learning rate of each parameter group by gamma every step_size epochs. This decay can happen simultaneously with other changes to the learning rate from outside this scheduler.
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=20)
torch.save(model_ft, 'x.pth')
model = None
def load_model():
global model
filepath = 'x.pth'
model = torch.load(filepath)
class_names = ['without_mask','with_mask']
import pickle
import numpy as np
from flask import Flask, request
model = None
app = Flask(__name__)
model = None
def load_model():
global model
filepath = 'x.pth'
model = torch.load(filepath)
class_names = ['without_mask','with_mask']
@app.route('/')
def home_endpoint():
return 'Hello World!'
@app.route('/predict', methods=['POST'])
def get_prediction():
# Works only for a single sample
if request.method == 'POST':
data = request.get_json() # Get data posted as a json
data = np.array(data)[np.newaxis, :] # converts shape from (4,) to (1, 4)
prediction = model.predict(data) # runs globally loaded model on the data
return str(prediction[0])
if __name__ == '__main__':
load_model() # load model at the beginning once only
app.run(host='0.0.0.0', port=80)
```
| github_jupyter |
```
#default_exp data.core
#export
from fastai2.torch_basics import *
from fastai2.data.load import *
from nbdev.showdoc import *
```
# Data core
> Core functionality for gathering data
The classes here provide functionality for applying a list of transforms to a set of items (`TfmdLists`, `Datasets`) or a `DataLoader` (`TfmdDl`) as well as the base class used to gather the data for model training: `DataLoaders`.
## TfmdDL -
```
#export
@typedispatch
def show_batch(x, y, samples, ctxs=None, max_n=9, **kwargs):
if ctxs is None: ctxs = Inf.nones
if hasattr(samples[0], 'show'):
ctxs = [s.show(ctx=c, **kwargs) for s,c,_ in zip(samples,ctxs,range(max_n))]
else:
for i in range_of(samples[0]):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
return ctxs
```
`show_batch` is a type-dispatched function that is responsible for showing decoded `samples`. `x` and `y` are the input and the target in the batch to be shown, and are passed along to dispatch on their types. There is a different implementation of `show_batch` if `x` is a `TensorImage` or a `TensorText` for instance (see vision.core or text.data for more details). `ctxs` can be passed but the function is responsible to create them if necessary. `kwargs` depend on the specific implementation.
```
#export
@typedispatch
def show_results(x, y, samples, outs, ctxs=None, max_n=9, **kwargs):
if ctxs is None: ctxs = Inf.nones
for i in range(len(samples[0])):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
for i in range(len(outs[0])):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(i),ctxs,range(max_n))]
return ctxs
```
`show_results` is a type-dispatched function that is responsible for showing decoded `samples` and their corresponding `outs`. Like in `show_batch`, `x` and `y` are the input and the target in the batch to be shown, and are passed along to dispatch on their types. `ctxs` can be passed but the function is responsible to create them if necessary. `kwargs` depend on the specific implementation.
```
#export
_all_ = ["show_batch", "show_results"]
#export
_batch_tfms = ('after_item','before_batch','after_batch')
#export
@log_args(but_as=DataLoader.__init__)
@delegates()
class TfmdDL(DataLoader):
"Transformed `DataLoader`"
def __init__(self, dataset, bs=64, shuffle=False, num_workers=None, verbose=False, do_setup=True, **kwargs):
if num_workers is None: num_workers = min(16, defaults.cpus)
for nm in _batch_tfms: kwargs[nm] = Pipeline(kwargs.get(nm,None))
super().__init__(dataset, bs=bs, shuffle=shuffle, num_workers=num_workers, **kwargs)
if do_setup:
for nm in _batch_tfms:
pv(f"Setting up {nm}: {kwargs[nm]}", verbose)
kwargs[nm].setup(self)
def _one_pass(self):
b = self.do_batch([self.do_item(0)])
if self.device is not None: b = to_device(b, self.device)
its = self.after_batch(b)
self._n_inp = 1 if not isinstance(its, (list,tuple)) or len(its)==1 else len(its)-1
self._types = explode_types(its)
def _retain_dl(self,b):
if not getattr(self, '_types', None): self._one_pass()
return retain_types(b, typs=self._types)
@delegates(DataLoader.new)
def new(self, dataset=None, cls=None, **kwargs):
res = super().new(dataset, cls, do_setup=False, **kwargs)
if not hasattr(self, '_n_inp') or not hasattr(self, '_types'):
self._one_pass()
res._n_inp,res._types = self._n_inp,self._types
else: res._n_inp,res._types = self._n_inp,self._types
return res
def before_iter(self):
super().before_iter()
split_idx = getattr(self.dataset, 'split_idx', None)
for nm in _batch_tfms:
f = getattr(self,nm)
if isinstance(f,Pipeline): f.split_idx=split_idx
def decode(self, b): return self.before_batch.decode(to_cpu(self.after_batch.decode(self._retain_dl(b))))
def decode_batch(self, b, max_n=9, full=True): return self._decode_batch(self.decode(b), max_n, full)
def _decode_batch(self, b, max_n=9, full=True):
f = self.after_item.decode
f = compose(f, partial(getattr(self.dataset,'decode',noop), full = full))
return L(batch_to_samples(b, max_n=max_n)).map(f)
def _pre_show_batch(self, b, max_n=9):
"Decode `b` to be ready for `show_batch`"
b = self.decode(b)
if hasattr(b, 'show'): return b,None,None
its = self._decode_batch(b, max_n, full=False)
if not is_listy(b): b,its = [b],L((o,) for o in its)
return detuplify(b[:self.n_inp]),detuplify(b[self.n_inp:]),its
def show_batch(self, b=None, max_n=9, ctxs=None, show=True, unique=False, **kwargs):
if unique:
old_get_idxs = self.get_idxs
self.get_idxs = lambda: Inf.zeros
if b is None: b = self.one_batch()
if not show: return self._pre_show_batch(b, max_n=max_n)
show_batch(*self._pre_show_batch(b, max_n=max_n), ctxs=ctxs, max_n=max_n, **kwargs)
if unique: self.get_idxs = old_get_idxs
def show_results(self, b, out, max_n=9, ctxs=None, show=True, **kwargs):
x,y,its = self.show_batch(b, max_n=max_n, show=False)
b_out = type(b)(b[:self.n_inp] + (tuple(out) if is_listy(out) else (out,)))
x1,y1,outs = self.show_batch(b_out, max_n=max_n, show=False)
res = (x,x1,None,None) if its is None else (x, y, its, outs.itemgot(slice(self.n_inp,None)))
if not show: return res
show_results(*res, ctxs=ctxs, max_n=max_n, **kwargs)
@property
def n_inp(self):
if hasattr(self.dataset, 'n_inp'): return self.dataset.n_inp
if not hasattr(self, '_n_inp'): self._one_pass()
return self._n_inp
def to(self, device):
self.device = device
for tfm in self.after_batch.fs:
for a in L(getattr(tfm, 'parameters', None)): setattr(tfm, a, getattr(tfm, a).to(device))
return self
```
A `TfmdDL` is a `DataLoader` that creates `Pipeline` from a list of `Transform`s for the callbacks `after_item`, `before_batch` and `after_batch`. As a result, it can decode or show a processed `batch`.
```
add_docs(TfmdDL,
decode="Decode `b` using `tfms`",
decode_batch="Decode `b` entirely",
new="Create a new version of self with a few changed attributes",
show_batch="Show `b` (defaults to `one_batch`), a list of lists of pipeline outputs (i.e. output of a `DataLoader`)",
show_results="Show each item of `b` and `out`",
before_iter="override",
to="Put self and its transforms state on `device`")
class _Category(int, ShowTitle): pass
#Test retain type
class NegTfm(Transform):
def encodes(self, x): return torch.neg(x)
def decodes(self, x): return torch.neg(x)
tdl = TfmdDL([(TensorImage([1]),)] * 4, after_batch=NegTfm(), bs=4, num_workers=4)
b = tdl.one_batch()
test_eq(type(b[0]), TensorImage)
b = (tensor([1.,1.,1.,1.]),)
test_eq(type(tdl.decode_batch(b)[0][0]), TensorImage)
class A(Transform):
def encodes(self, x): return x
def decodes(self, x): return TitledInt(x)
@Transform
def f(x)->None: return Tuple((x,x))
start = torch.arange(50)
test_eq_type(f(2), Tuple((2,2)))
a = A()
tdl = TfmdDL(start, after_item=lambda x: (a(x), f(x)), bs=4)
x,y = tdl.one_batch()
test_eq(type(y), Tuple)
s = tdl.decode_batch((x,y))
test_eq(type(s[0][1]), Tuple)
tdl = TfmdDL(torch.arange(0,50), after_item=A(), after_batch=NegTfm(), bs=4)
test_eq(tdl.dataset[0], start[0])
test_eq(len(tdl), (50-1)//4+1)
test_eq(tdl.bs, 4)
test_stdout(tdl.show_batch, '0\n1\n2\n3')
test_stdout(partial(tdl.show_batch, unique=True), '0\n0\n0\n0')
class B(Transform):
parameters = 'a'
def __init__(self): self.a = torch.tensor(0.)
def encodes(self, x): x
tdl = TfmdDL([(TensorImage([1]),)] * 4, after_batch=B(), bs=4)
test_eq(tdl.after_batch.fs[0].a.device, torch.device('cpu'))
tdl.to(default_device())
test_eq(tdl.after_batch.fs[0].a.device, default_device())
```
### Methods
```
show_doc(TfmdDL.one_batch)
tfm = NegTfm()
tdl = TfmdDL(start, after_batch=tfm, bs=4)
b = tdl.one_batch()
test_eq(tensor([0,-1,-2,-3]), b)
show_doc(TfmdDL.decode)
test_eq(tdl.decode(b), tensor(0,1,2,3))
show_doc(TfmdDL.decode_batch)
test_eq(tdl.decode_batch(b), [0,1,2,3])
show_doc(TfmdDL.show_batch)
show_doc(TfmdDL.to)
```
## DataLoaders -
```
# export
@docs
class DataLoaders(GetAttr):
"Basic wrapper around several `DataLoader`s."
_default='train'
def __init__(self, *loaders, path='.', device=None):
self.loaders,self.path = list(loaders),Path(path)
self.device = device
def __getitem__(self, i): return self.loaders[i]
def new_empty(self):
loaders = [dl.new(dl.dataset.new_empty()) for dl in self.loaders]
return type(self)(*loaders, path=self.path, device=self.device)
def _set(i, self, v): self.loaders[i] = v
train ,valid = add_props(lambda i,x: x[i], _set)
train_ds,valid_ds = add_props(lambda i,x: x[i].dataset)
@property
def device(self): return self._device
@device.setter
def device(self, d):
for dl in self.loaders: dl.to(d)
self._device = d
def to(self, device):
self.device = device
return self
def cuda(self): return self.to(device=default_device())
def cpu(self): return self.to(device=torch.device('cpu'))
@classmethod
def from_dsets(cls, *ds, path='.', bs=64, device=None, dl_type=TfmdDL, **kwargs):
default = (True,) + (False,) * (len(ds)-1)
defaults = {'shuffle': default, 'drop_last': default}
for nm in _batch_tfms:
if nm in kwargs: kwargs[nm] = Pipeline(kwargs[nm])
kwargs = merge(defaults, {k: tuplify(v, match=ds) for k,v in kwargs.items()})
kwargs = [{k: v[i] for k,v in kwargs.items()} for i in range_of(ds)]
return cls(*[dl_type(d, bs=bs, **k) for d,k in zip(ds, kwargs)], path=path, device=device)
@classmethod
def from_dblock(cls, dblock, source, path='.', bs=64, val_bs=None, shuffle_train=True, device=None, **kwargs):
return dblock.dataloaders(source, path=path, bs=bs, val_bs=val_bs, shuffle_train=shuffle_train, device=device, **kwargs)
_docs=dict(__getitem__="Retrieve `DataLoader` at `i` (`0` is training, `1` is validation)",
train="Training `DataLoader`",
valid="Validation `DataLoader`",
train_ds="Training `Dataset`",
valid_ds="Validation `Dataset`",
to="Use `device`",
cuda="Use the gpu if available",
cpu="Use the cpu",
new_empty="Create a new empty version of `self` with the same transforms",
from_dblock="Create a dataloaders from a given `dblock`")
dls = DataLoaders(tdl,tdl)
x = dls.train.one_batch()
x2 = first(tdl)
test_eq(x,x2)
x2 = dls.one_batch()
test_eq(x,x2)
#hide
#test assignment works
dls.train = dls.train.new(bs=4)
```
### Methods
```
show_doc(DataLoaders.__getitem__)
x2 = dls[0].one_batch()
test_eq(x,x2)
show_doc(DataLoaders.train, name="DataLoaders.train")
show_doc(DataLoaders.valid, name="DataLoaders.valid")
show_doc(DataLoaders.train_ds, name="DataLoaders.train_ds")
show_doc(DataLoaders.valid_ds, name="DataLoaders.valid_ds")
```
## TfmdLists -
```
#export
class FilteredBase:
"Base class for lists with subsets"
_dl_type,_dbunch_type = TfmdDL,DataLoaders
def __init__(self, *args, dl_type=None, **kwargs):
if dl_type is not None: self._dl_type = dl_type
self.dataloaders = delegates(self._dl_type.__init__)(self.dataloaders)
super().__init__(*args, **kwargs)
@property
def n_subsets(self): return len(self.splits)
def _new(self, items, **kwargs): return super()._new(items, splits=self.splits, **kwargs)
def subset(self): raise NotImplemented
def dataloaders(self, bs=64, val_bs=None, shuffle_train=True, n=None, path='.', dl_type=None, dl_kwargs=None,
device=None, **kwargs):
if device is None: device=default_device()
if dl_kwargs is None: dl_kwargs = [{}] * self.n_subsets
if dl_type is None: dl_type = self._dl_type
drop_last = kwargs.pop('drop_last', shuffle_train)
dl = dl_type(self.subset(0), bs=bs, shuffle=shuffle_train, drop_last=drop_last, n=n, device=device,
**merge(kwargs, dl_kwargs[0]))
dls = [dl] + [dl.new(self.subset(i), bs=(bs if val_bs is None else val_bs), shuffle=False, drop_last=False,
n=None, **dl_kwargs[i]) for i in range(1, self.n_subsets)]
return self._dbunch_type(*dls, path=path, device=device)
FilteredBase.train,FilteredBase.valid = add_props(lambda i,x: x.subset(i))
#export
class TfmdLists(FilteredBase, L, GetAttr):
"A `Pipeline` of `tfms` applied to a collection of `items`"
_default='tfms'
def __init__(self, items, tfms, use_list=None, do_setup=True, split_idx=None, train_setup=True,
splits=None, types=None, verbose=False, dl_type=None):
super().__init__(items, use_list=use_list)
if dl_type is not None: self._dl_type = dl_type
self.splits = L([slice(None),[]] if splits is None else splits).map(mask2idxs)
if isinstance(tfms,TfmdLists): tfms = tfms.tfms
if isinstance(tfms,Pipeline): do_setup=False
self.tfms = Pipeline(tfms, split_idx=split_idx)
store_attr(self, 'types,split_idx')
if do_setup:
pv(f"Setting up {self.tfms}", verbose)
self.setup(train_setup=train_setup)
def _new(self, items, split_idx=None, **kwargs):
split_idx = ifnone(split_idx,self.split_idx)
return super()._new(items, tfms=self.tfms, do_setup=False, types=self.types, split_idx=split_idx, **kwargs)
def subset(self, i): return self._new(self._get(self.splits[i]), split_idx=i)
def _after_item(self, o): return self.tfms(o)
def __repr__(self): return f"{self.__class__.__name__}: {self.items}\ntfms - {self.tfms.fs}"
def __iter__(self): return (self[i] for i in range(len(self)))
def show(self, o, **kwargs): return self.tfms.show(o, **kwargs)
def decode(self, o, **kwargs): return self.tfms.decode(o, **kwargs)
def __call__(self, o, **kwargs): return self.tfms.__call__(o, **kwargs)
def overlapping_splits(self): return L(Counter(self.splits.concat()).values()).filter(gt(1))
def new_empty(self): return self._new([])
def setup(self, train_setup=True):
self.tfms.setup(self, train_setup)
if len(self) != 0:
x = super().__getitem__(0) if self.splits is None else super().__getitem__(self.splits[0])[0]
self.types = []
for f in self.tfms.fs:
self.types.append(getattr(f, 'input_types', type(x)))
x = f(x)
self.types.append(type(x))
types = L(t if is_listy(t) else [t] for t in self.types).concat().unique()
self.pretty_types = '\n'.join([f' - {t}' for t in types])
def infer_idx(self, x):
idx = 0
for t in self.types:
if isinstance(x, t): break
idx += 1
types = L(t if is_listy(t) else [t] for t in self.types).concat().unique()
pretty_types = '\n'.join([f' - {t}' for t in types])
assert idx < len(self.types), f"Expected an input of type in \n{pretty_types}\n but got {type(x)}"
return idx
def infer(self, x):
return compose_tfms(x, tfms=self.tfms.fs[self.infer_idx(x):], split_idx=self.split_idx)
def __getitem__(self, idx):
res = super().__getitem__(idx)
if self._after_item is None: return res
return self._after_item(res) if is_indexer(idx) else res.map(self._after_item)
add_docs(TfmdLists,
setup="Transform setup with self",
decode="From `Pipeline",
show="From `Pipeline",
overlapping_splits="All splits that are in more than one split",
subset="New `TfmdLists` with same tfms that only includes items in `i`th split",
infer_idx="Finds the index where `self.tfms` can be applied to `x`, depending on the type of `x`",
infer="Apply `self.tfms` to `x` starting at the right tfm depending on the type of `x`",
new_empty="A new version of `self` but with no items")
#exports
def decode_at(o, idx):
"Decoded item at `idx`"
return o.decode(o[idx])
#exports
def show_at(o, idx, **kwargs):
"Show item at `idx`",
return o.show(o[idx], **kwargs)
```
A `TfmdLists` combines a collection of object with a `Pipeline`. `tfms` can either be a `Pipeline` or a list of transforms, in which case, it will wrap them in a `Pipeline`. `use_list` is passed along to `L` with the `items` and `split_idx` are passed to each transform of the `Pipeline`. `do_setup` indicates if the `Pipeline.setup` method should be called during initialization.
```
class _IntFloatTfm(Transform):
def encodes(self, o): return TitledInt(o)
def decodes(self, o): return TitledFloat(o)
int2f_tfm=_IntFloatTfm()
def _neg(o): return -o
neg_tfm = Transform(_neg, _neg)
items = L([1.,2.,3.]); tfms = [neg_tfm, int2f_tfm]
tl = TfmdLists(items, tfms=tfms)
test_eq_type(tl[0], TitledInt(-1))
test_eq_type(tl[1], TitledInt(-2))
test_eq_type(tl.decode(tl[2]), TitledFloat(3.))
test_stdout(lambda: show_at(tl, 2), '-3')
test_eq(tl.types, [float, float, TitledInt])
tl
# add splits to TfmdLists
splits = [[0,2],[1]]
tl = TfmdLists(items, tfms=tfms, splits=splits)
test_eq(tl.n_subsets, 2)
test_eq(tl.train, tl.subset(0))
test_eq(tl.valid, tl.subset(1))
test_eq(tl.train.items, items[splits[0]])
test_eq(tl.valid.items, items[splits[1]])
test_eq(tl.train.tfms.split_idx, 0)
test_eq(tl.valid.tfms.split_idx, 1)
test_eq(tl.train.new_empty().split_idx, 0)
test_eq(tl.valid.new_empty().split_idx, 1)
test_eq_type(tl.splits, L(splits))
assert not tl.overlapping_splits()
df = pd.DataFrame(dict(a=[1,2,3],b=[2,3,4]))
tl = TfmdLists(df, lambda o: o.a+1, splits=[[0],[1,2]])
test_eq(tl[1,2], [3,4])
tr = tl.subset(0)
test_eq(tr[:], [2])
val = tl.subset(1)
test_eq(val[:], [3,4])
class _B(Transform):
def __init__(self): self.m = 0
def encodes(self, o): return o+self.m
def decodes(self, o): return o-self.m
def setups(self, items):
print(items)
self.m = tensor(items).float().mean().item()
# test for setup, which updates `self.m`
tl = TfmdLists(items, _B())
test_eq(tl.m, 2)
```
Here's how we can use `TfmdLists.setup` to implement a simple category list, getting labels from a mock file list:
```
class _Cat(Transform):
order = 1
def encodes(self, o): return int(self.o2i[o])
def decodes(self, o): return TitledStr(self.vocab[o])
def setups(self, items): self.vocab,self.o2i = uniqueify(L(items), sort=True, bidir=True)
tcat = _Cat()
def _lbl(o): return TitledStr(o.split('_')[0])
# Check that tfms are sorted by `order` & `_lbl` is called first
fns = ['dog_0.jpg','cat_0.jpg','cat_2.jpg','cat_1.jpg','dog_1.jpg']
tl = TfmdLists(fns, [tcat,_lbl])
exp_voc = ['cat','dog']
test_eq(tcat.vocab, exp_voc)
test_eq(tl.tfms.vocab, exp_voc)
test_eq(tl.vocab, exp_voc)
test_eq(tl, (1,0,0,0,1))
test_eq([tl.decode(o) for o in tl], ('dog','cat','cat','cat','dog'))
#Check only the training set is taken into account for setup
tl = TfmdLists(fns, [tcat,_lbl], splits=[[0,4], [1,2,3]])
test_eq(tcat.vocab, ['dog'])
tfm = NegTfm(split_idx=1)
tds = TfmdLists(start, A())
tdl = TfmdDL(tds, after_batch=tfm, bs=4)
x = tdl.one_batch()
test_eq(x, torch.arange(4))
tds.split_idx = 1
x = tdl.one_batch()
test_eq(x, -torch.arange(4))
tds.split_idx = 0
x = tdl.one_batch()
test_eq(x, torch.arange(4))
tds = TfmdLists(start, A())
tdl = TfmdDL(tds, after_batch=NegTfm(), bs=4)
test_eq(tdl.dataset[0], start[0])
test_eq(len(tdl), (len(tds)-1)//4+1)
test_eq(tdl.bs, 4)
test_stdout(tdl.show_batch, '0\n1\n2\n3')
show_doc(TfmdLists.subset)
show_doc(TfmdLists.infer_idx)
show_doc(TfmdLists.infer)
def mult(x): return x*2
mult.order = 2
fns = ['dog_0.jpg','cat_0.jpg','cat_2.jpg','cat_1.jpg','dog_1.jpg']
tl = TfmdLists(fns, [_lbl,_Cat(),mult])
test_eq(tl.infer_idx('dog_45.jpg'), 0)
test_eq(tl.infer('dog_45.jpg'), 2)
test_eq(tl.infer_idx(4), 2)
test_eq(tl.infer(4), 8)
test_fail(lambda: tl.infer_idx(2.0))
test_fail(lambda: tl.infer(2.0))
#hide
#Test input_types works on a Transform
cat = _Cat()
cat.input_types = (str, float)
tl = TfmdLists(fns, [_lbl,cat,mult])
test_eq(tl.infer_idx(2.0), 1)
#hide
#Test type annotations work on a function
def mult(x:(int,float)): return x*2
mult.order = 2
tl = TfmdLists(fns, [_lbl,_Cat(),mult])
test_eq(tl.infer_idx(2.0), 2)
```
## Datasets -
```
#export
@docs
@delegates(TfmdLists)
class Datasets(FilteredBase):
"A dataset that creates a tuple from each `tfms`, passed thru `item_tfms`"
def __init__(self, items=None, tfms=None, tls=None, n_inp=None, dl_type=None, **kwargs):
super().__init__(dl_type=dl_type)
self.tls = L(tls if tls else [TfmdLists(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
self.n_inp = ifnone(n_inp, max(1, len(self.tls)-1))
def __getitem__(self, it):
res = tuple([tl[it] for tl in self.tls])
return res if is_indexer(it) else list(zip(*res))
def __getattr__(self,k): return gather_attrs(self, k, 'tls')
def __dir__(self): return super().__dir__() + gather_attr_names(self, 'tls')
def __len__(self): return len(self.tls[0])
def __iter__(self): return (self[i] for i in range(len(self)))
def __repr__(self): return coll_repr(self)
def decode(self, o, full=True): return tuple(tl.decode(o_, full=full) for o_,tl in zip(o,tuplify(self.tls, match=o)))
def subset(self, i): return type(self)(tls=L(tl.subset(i) for tl in self.tls), n_inp=self.n_inp)
def _new(self, items, *args, **kwargs): return super()._new(items, tfms=self.tfms, do_setup=False, **kwargs)
def overlapping_splits(self): return self.tls[0].overlapping_splits()
def new_empty(self): return type(self)(tls=[tl.new_empty() for tl in self.tls], n_inp=self.n_inp)
@property
def splits(self): return self.tls[0].splits
@property
def split_idx(self): return self.tls[0].tfms.split_idx
@property
def items(self): return self.tls[0].items
@items.setter
def items(self, v):
for tl in self.tls: tl.items = v
def show(self, o, ctx=None, **kwargs):
for o_,tl in zip(o,self.tls): ctx = tl.show(o_, ctx=ctx, **kwargs)
return ctx
@contextmanager
def set_split_idx(self, i):
old_split_idx = self.split_idx
for tl in self.tls: tl.tfms.split_idx = i
try: yield self
finally:
for tl in self.tls: tl.tfms.split_idx = old_split_idx
_docs=dict(
decode="Compose `decode` of all `tuple_tfms` then all `tfms` on `i`",
show="Show item `o` in `ctx`",
dataloaders="Get a `DataLoaders`",
overlapping_splits="All splits that are in more than one split",
subset="New `Datasets` that only includes subset `i`",
new_empty="Create a new empty version of the `self`, keeping only the transforms",
set_split_idx="Contextmanager to use the same `Datasets` with another `split_idx`"
)
```
A `Datasets` creates a tuple from `items` (typically input,target) by applying to them each list of `Transform` (or `Pipeline`) in `tfms`. Note that if `tfms` contains only one list of `tfms`, the items given by `Datasets` will be tuples of one element.
`n_inp` is the number of elements in the tuples that should be considered part of the input and will default to 1 if `tfms` consists of one set of transforms, `len(tfms)-1` otherwise. In most cases, the number of elements in the tuples spit out by `Datasets` will be 2 (for input,target) but it can happen that there is 3 (Siamese networks or tabular data) in which case we need to be able to determine when the inputs end and the targets begin.
```
items = [1,2,3,4]
dsets = Datasets(items, [[neg_tfm,int2f_tfm], [add(1)]])
t = dsets[0]
test_eq(t, (-1,2))
test_eq(dsets[0,1,2], [(-1,2),(-2,3),(-3,4)])
test_eq(dsets.n_inp, 1)
dsets.decode(t)
class Norm(Transform):
def encodes(self, o): return (o-self.m)/self.s
def decodes(self, o): return (o*self.s)+self.m
def setups(self, items):
its = tensor(items).float()
self.m,self.s = its.mean(),its.std()
items = [1,2,3,4]
nrm = Norm()
dsets = Datasets(items, [[neg_tfm,int2f_tfm], [neg_tfm,nrm]])
x,y = zip(*dsets)
test_close(tensor(y).mean(), 0)
test_close(tensor(y).std(), 1)
test_eq(x, (-1,-2,-3,-4,))
test_eq(nrm.m, -2.5)
test_stdout(lambda:show_at(dsets, 1), '-2')
test_eq(dsets.m, nrm.m)
test_eq(dsets.norm.m, nrm.m)
test_eq(dsets.train.norm.m, nrm.m)
#hide
#Check filtering is properly applied
class B(Transform):
def encodes(self, x)->None: return int(x+1)
def decodes(self, x): return TitledInt(x-1)
add1 = B(split_idx=1)
dsets = Datasets(items, [neg_tfm, [neg_tfm,int2f_tfm,add1]], splits=[[3],[0,1,2]])
test_eq(dsets[1], [-2,-2])
test_eq(dsets.valid[1], [-2,-1])
test_eq(dsets.valid[[1,1]], [[-2,-1], [-2,-1]])
test_eq(dsets.train[0], [-4,-4])
test_fns = ['dog_0.jpg','cat_0.jpg','cat_2.jpg','cat_1.jpg','kid_1.jpg']
tcat = _Cat()
dsets = Datasets(test_fns, [[tcat,_lbl]], splits=[[0,1,2], [3,4]])
test_eq(tcat.vocab, ['cat','dog'])
test_eq(dsets.train, [(1,),(0,),(0,)])
test_eq(dsets.valid[0], (0,))
test_stdout(lambda: show_at(dsets.train, 0), "dog")
inp = [0,1,2,3,4]
dsets = Datasets(inp, tfms=[None])
test_eq(*dsets[2], 2) # Retrieve one item (subset 0 is the default)
test_eq(dsets[1,2], [(1,),(2,)]) # Retrieve two items by index
mask = [True,False,False,True,False]
test_eq(dsets[mask], [(0,),(3,)]) # Retrieve two items by mask
inp = pd.DataFrame(dict(a=[5,1,2,3,4]))
dsets = Datasets(inp, tfms=attrgetter('a')).subset(0)
test_eq(*dsets[2], 2) # Retrieve one item (subset 0 is the default)
test_eq(dsets[1,2], [(1,),(2,)]) # Retrieve two items by index
mask = [True,False,False,True,False]
test_eq(dsets[mask], [(5,),(3,)]) # Retrieve two items by mask
#test n_inp
inp = [0,1,2,3,4]
dsets = Datasets(inp, tfms=[None])
test_eq(dsets.n_inp, 1)
dsets = Datasets(inp, tfms=[[None],[None],[None]])
test_eq(dsets.n_inp, 2)
dsets = Datasets(inp, tfms=[[None],[None],[None]], n_inp=1)
test_eq(dsets.n_inp, 1)
# splits can be indices
dsets = Datasets(range(5), tfms=[None], splits=[tensor([0,2]), [1,3,4]])
test_eq(dsets.subset(0), [(0,),(2,)])
test_eq(dsets.train, [(0,),(2,)]) # Subset 0 is aliased to `train`
test_eq(dsets.subset(1), [(1,),(3,),(4,)])
test_eq(dsets.valid, [(1,),(3,),(4,)]) # Subset 1 is aliased to `valid`
test_eq(*dsets.valid[2], 4)
#assert '[(1,),(3,),(4,)]' in str(dsets) and '[(0,),(2,)]' in str(dsets)
dsets
# splits can be boolean masks (they don't have to cover all items, but must be disjoint)
splits = [[False,True,True,False,True], [True,False,False,False,False]]
dsets = Datasets(range(5), tfms=[None], splits=splits)
test_eq(dsets.train, [(1,),(2,),(4,)])
test_eq(dsets.valid, [(0,)])
# apply transforms to all items
tfm = [[lambda x: x*2,lambda x: x+1]]
splits = [[1,2],[0,3,4]]
dsets = Datasets(range(5), tfm, splits=splits)
test_eq(dsets.train,[(3,),(5,)])
test_eq(dsets.valid,[(1,),(7,),(9,)])
test_eq(dsets.train[False,True], [(5,)])
# only transform subset 1
class _Tfm(Transform):
split_idx=1
def encodes(self, x): return x*2
def decodes(self, x): return TitledStr(x//2)
dsets = Datasets(range(5), [_Tfm()], splits=[[1,2],[0,3,4]])
test_eq(dsets.train,[(1,),(2,)])
test_eq(dsets.valid,[(0,),(6,),(8,)])
test_eq(dsets.train[False,True], [(2,)])
dsets
#A context manager to change the split_idx and apply the validation transform on the training set
ds = dsets.train
with ds.set_split_idx(1):
test_eq(ds,[(2,),(4,)])
test_eq(dsets.train,[(1,),(2,)])
#hide
#Test Datasets pickles
dsrc1 = pickle.loads(pickle.dumps(dsets))
test_eq(dsets.train, dsrc1.train)
test_eq(dsets.valid, dsrc1.valid)
dsets = Datasets(range(5), [_Tfm(),noop], splits=[[1,2],[0,3,4]])
test_eq(dsets.train,[(1,1),(2,2)])
test_eq(dsets.valid,[(0,0),(6,3),(8,4)])
start = torch.arange(0,50)
tds = Datasets(start, [A()])
tdl = TfmdDL(tds, after_item=NegTfm(), bs=4)
b = tdl.one_batch()
test_eq(tdl.decode_batch(b), ((0,),(1,),(2,),(3,)))
test_stdout(tdl.show_batch, "0\n1\n2\n3")
# only transform subset 1
class _Tfm(Transform):
split_idx=1
def encodes(self, x): return x*2
dsets = Datasets(range(8), [None], splits=[[1,2,5,7],[0,3,4,6]])
# only transform subset 1
class _Tfm(Transform):
split_idx=1
def encodes(self, x): return x*2
dsets = Datasets(range(8), [None], splits=[[1,2,5,7],[0,3,4,6]])
dls = dsets.dataloaders(bs=4, after_batch=_Tfm(), shuffle_train=False, device=torch.device('cpu'))
test_eq(dls.train, [(tensor([1,2,5, 7]),)])
test_eq(dls.valid, [(tensor([0,6,8,12]),)])
test_eq(dls.n_inp, 1)
```
### Methods
```
items = [1,2,3,4]
dsets = Datasets(items, [[neg_tfm,int2f_tfm]])
#hide_input
_dsrc = Datasets([1,2])
show_doc(_dsrc.dataloaders, name="Datasets.dataloaders")
show_doc(Datasets.decode)
test_eq(*dsets[0], -1)
test_eq(*dsets.decode((-1,)), 1)
show_doc(Datasets.show)
test_stdout(lambda:dsets.show(dsets[1]), '-2')
show_doc(Datasets.new_empty)
items = [1,2,3,4]
nrm = Norm()
dsets = Datasets(items, [[neg_tfm,int2f_tfm], [neg_tfm]])
empty = dsets.new_empty()
test_eq(empty.items, [])
#hide
#test it works for dataframes too
df = pd.DataFrame({'a':[1,2,3,4,5], 'b':[6,7,8,9,10]})
dsets = Datasets(df, [[attrgetter('a')], [attrgetter('b')]])
empty = dsets.new_empty()
```
## Add test set for inference
```
# only transform subset 1
class _Tfm1(Transform):
split_idx=0
def encodes(self, x): return x*3
dsets = Datasets(range(8), [[_Tfm(),_Tfm1()]], splits=[[1,2,5,7],[0,3,4,6]])
test_eq(dsets.train, [(3,),(6,),(15,),(21,)])
test_eq(dsets.valid, [(0,),(6,),(8,),(12,)])
#export
def test_set(dsets, test_items, rm_tfms=None, with_labels=False):
"Create a test set from `test_items` using validation transforms of `dsets`"
if isinstance(dsets, Datasets):
tls = dsets.tls if with_labels else dsets.tls[:dsets.n_inp]
test_tls = [tl._new(test_items, split_idx=1) for tl in tls]
if rm_tfms is None: rm_tfms = [tl.infer_idx(get_first(test_items)) for tl in test_tls]
else: rm_tfms = tuplify(rm_tfms, match=test_tls)
for i,j in enumerate(rm_tfms): test_tls[i].tfms.fs = test_tls[i].tfms.fs[j:]
return Datasets(tls=test_tls)
elif isinstance(dsets, TfmdLists):
test_tl = dsets._new(test_items, split_idx=1)
if rm_tfms is None: rm_tfms = dsets.infer_idx(get_first(test_items))
test_tl.tfms.fs = test_tl.tfms.fs[rm_tfms:]
return test_tl
else: raise Exception(f"This method requires using the fastai library to assemble your data. Expected a `Datasets` or a `TfmdLists` but got {dsets.__class__.__name__}")
class _Tfm1(Transform):
split_idx=0
def encodes(self, x): return x*3
dsets = Datasets(range(8), [[_Tfm(),_Tfm1()]], splits=[[1,2,5,7],[0,3,4,6]])
test_eq(dsets.train, [(3,),(6,),(15,),(21,)])
test_eq(dsets.valid, [(0,),(6,),(8,),(12,)])
#Tranform of the validation set are applied
tst = test_set(dsets, [1,2,3])
test_eq(tst, [(2,),(4,),(6,)])
#hide
#Test with different types
tfm = _Tfm1()
tfm.split_idx,tfm.order = None,2
dsets = Datasets(['dog', 'cat', 'cat', 'dog'], [[_Cat(),tfm]])
#With strings
test_eq(test_set(dsets, ['dog', 'cat', 'cat']), [(3,), (0,), (0,)])
#With ints
test_eq(test_set(dsets, [1,2]), [(3,), (6,)])
#hide
#Test with various input lengths
dsets = Datasets(range(8), [[_Tfm(),_Tfm1()],[_Tfm(),_Tfm1()],[_Tfm(),_Tfm1()]], splits=[[1,2,5,7],[0,3,4,6]])
tst = test_set(dsets, [1,2,3])
test_eq(tst, [(2,2),(4,4),(6,6)])
dsets = Datasets(range(8), [[_Tfm(),_Tfm1()],[_Tfm(),_Tfm1()],[_Tfm(),_Tfm1()]], splits=[[1,2,5,7],[0,3,4,6]], n_inp=1)
tst = test_set(dsets, [1,2,3])
test_eq(tst, [(2,),(4,),(6,)])
#hide
#Test with rm_tfms
dsets = Datasets(range(8), [[_Tfm(),_Tfm()]], splits=[[1,2,5,7],[0,3,4,6]])
tst = test_set(dsets, [1,2,3])
test_eq(tst, [(4,),(8,),(12,)])
dsets = Datasets(range(8), [[_Tfm(),_Tfm()]], splits=[[1,2,5,7],[0,3,4,6]])
tst = test_set(dsets, [1,2,3], rm_tfms=1)
test_eq(tst, [(2,),(4,),(6,)])
dsets = Datasets(range(8), [[_Tfm(),_Tfm()], [_Tfm(),_Tfm()]], splits=[[1,2,5,7],[0,3,4,6]], n_inp=2)
tst = test_set(dsets, [1,2,3], rm_tfms=(1,0))
test_eq(tst, [(2,4),(4,8),(6,12)])
#export
@delegates(TfmdDL.__init__)
@patch
def test_dl(self:DataLoaders, test_items, rm_type_tfms=None, with_labels=False, **kwargs):
"Create a test dataloader from `test_items` using validation transforms of `dls`"
test_ds = test_set(self.valid_ds, test_items, rm_tfms=rm_type_tfms, with_labels=with_labels
) if isinstance(self.valid_ds, (Datasets, TfmdLists)) else test_items
return self.valid.new(test_ds, **kwargs)
dsets = Datasets(range(8), [[_Tfm(),_Tfm1()]], splits=[[1,2,5,7],[0,3,4,6]])
dls = dsets.dataloaders(bs=4, device=torch.device('cpu'))
dsets = Datasets(range(8), [[_Tfm(),_Tfm1()]], splits=[[1,2,5,7],[0,3,4,6]])
dls = dsets.dataloaders(bs=4, device=torch.device('cpu'))
tst_dl = dls.test_dl([2,3,4,5])
test_eq(tst_dl._n_inp, 1)
test_eq(list(tst_dl), [(tensor([ 4, 6, 8, 10]),)])
#Test you can change transforms
tst_dl = dls.test_dl([2,3,4,5], after_item=add1)
test_eq(list(tst_dl), [(tensor([ 5, 7, 9, 11]),)])
```
## Export -
```
#hide
from nbdev.export import notebook2script
notebook2script()
```
| github_jupyter |
# Sentiment Analysis
## Updating a Model in SageMaker
_Deep Learning Nanodegree Program | Deployment_
---
In this notebook we will consider a situation in which a model that we constructed is no longer working as we intended. In particular, we will look at the XGBoost sentiment analysis model that we constructed earlier. In this case, however, we have some new data that our model doesn't seem to perform very well on. As a result, we will re-train our model and update an existing endpoint so that it uses our new model.
This notebook starts by re-creating the XGBoost sentiment analysis model that was created in earlier notebooks. This means that you will have already seen the cells up to the end of Step 4. The new content in this notebook begins at Step 5.
## Instructions
Some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this notebook. You will not need to modify the included code beyond what is requested. Sections that begin with '**TODO**' in the header indicate that you need to complete or implement some portion within them. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `# TODO: ...` comment. Please be sure to read the instructions carefully!
In addition to implementing code, there will be questions for you to answer which relate to the task and your implementation. Each section where you will answer a question is preceded by a '**Question:**' header. Carefully read each question and provide your answer below the '**Answer:**' header by editing the Markdown cell.
> **Note**: Code and Markdown cells can be executed using the **Shift+Enter** keyboard shortcut. In addition, a cell can be edited by typically clicking it (double-click for Markdown cells) or by pressing **Enter** while it is highlighted.
## Step 1: Downloading the data
The dataset we are going to use is very popular among researchers in Natural Language Processing, usually referred to as the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/). It consists of movie reviews from the website [imdb.com](http://www.imdb.com/), each labeled as either '**pos**itive', if the reviewer enjoyed the film, or '**neg**ative' otherwise.
> Maas, Andrew L., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011.
We begin by using some Jupyter Notebook magic to download and extract the dataset.
```
%mkdir ../data
!wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -zxf ../data/aclImdb_v1.tar.gz -C ../data
```
## Step 2: Preparing the data
The data we have downloaded is split into various files, each of which contains a single review. It will be much easier going forward if we combine these individual files into two large files, one for training and one for testing.
```
import os
import glob
def read_imdb_data(data_dir='../data/aclImdb'):
data = {}
labels = {}
for data_type in ['train', 'test']:
data[data_type] = {}
labels[data_type] = {}
for sentiment in ['pos', 'neg']:
data[data_type][sentiment] = []
labels[data_type][sentiment] = []
path = os.path.join(data_dir, data_type, sentiment, '*.txt')
files = glob.glob(path)
for f in files:
with open(f) as review:
data[data_type][sentiment].append(review.read())
# Here we represent a positive review by '1' and a negative review by '0'
labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0)
assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \
"{}/{} data size does not match labels size".format(data_type, sentiment)
return data, labels
data, labels = read_imdb_data()
print("IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg".format(
len(data['train']['pos']), len(data['train']['neg']),
len(data['test']['pos']), len(data['test']['neg'])))
from sklearn.utils import shuffle
def prepare_imdb_data(data, labels):
"""Prepare training and test sets from IMDb movie reviews."""
#Combine positive and negative reviews and labels
data_train = data['train']['pos'] + data['train']['neg']
data_test = data['test']['pos'] + data['test']['neg']
labels_train = labels['train']['pos'] + labels['train']['neg']
labels_test = labels['test']['pos'] + labels['test']['neg']
#Shuffle reviews and corresponding labels within training and test sets
data_train, labels_train = shuffle(data_train, labels_train)
data_test, labels_test = shuffle(data_test, labels_test)
# Return a unified training data, test data, training labels, test labets
return data_train, data_test, labels_train, labels_test
train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels)
print("IMDb reviews (combined): train = {}, test = {}".format(len(train_X), len(test_X)))
train_X[100]
```
## Step 3: Processing the data
Now that we have our training and testing datasets merged and ready to use, we need to start processing the raw data into something that will be useable by our machine learning algorithm. To begin with, we remove any html formatting that may appear in the reviews and perform some standard natural language processing in order to homogenize the data.
```
import nltk
nltk.download("stopwords")
from nltk.corpus import stopwords
from nltk.stem.porter import *
stemmer = PorterStemmer()
import re
from bs4 import BeautifulSoup
def review_to_words(review):
text = BeautifulSoup(review, "html.parser").get_text() # Remove HTML tags
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # Convert to lower case
words = text.split() # Split string into words
words = [w for w in words if w not in stopwords.words("english")] # Remove stopwords
words = [PorterStemmer().stem(w) for w in words] # stem
return words
review_to_words(train_X[100])
import pickle
cache_dir = os.path.join("../cache", "sentiment_analysis") # where to store cache files
os.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists
def preprocess_data(data_train, data_test, labels_train, labels_test,
cache_dir=cache_dir, cache_file="preprocessed_data.pkl"):
"""Convert each review to words; read from cache if available."""
# If cache_file is not None, try to read from it first
cache_data = None
if cache_file is not None:
try:
with open(os.path.join(cache_dir, cache_file), "rb") as f:
cache_data = pickle.load(f)
print("Read preprocessed data from cache file:", cache_file)
except:
pass # unable to read from cache, but that's okay
# If cache is missing, then do the heavy lifting
if cache_data is None:
# Preprocess training and test data to obtain words for each review
#words_train = list(map(review_to_words, data_train))
#words_test = list(map(review_to_words, data_test))
words_train = [review_to_words(review) for review in data_train]
words_test = [review_to_words(review) for review in data_test]
# Write to cache file for future runs
if cache_file is not None:
cache_data = dict(words_train=words_train, words_test=words_test,
labels_train=labels_train, labels_test=labels_test)
with open(os.path.join(cache_dir, cache_file), "wb") as f:
pickle.dump(cache_data, f)
print("Wrote preprocessed data to cache file:", cache_file)
else:
# Unpack data loaded from cache file
words_train, words_test, labels_train, labels_test = (cache_data['words_train'],
cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test'])
return words_train, words_test, labels_train, labels_test
# Preprocess data
train_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y)
```
### Extract Bag-of-Words features
For the model we will be implementing, rather than using the reviews directly, we are going to transform each review into a Bag-of-Words feature representation. Keep in mind that 'in the wild' we will only have access to the training set so our transformer can only use the training set to construct a representation.
```
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
# from sklearn.externals import joblib
import joblib
# joblib is an enhanced version of pickle that is more efficient for storing NumPy arrays
def extract_BoW_features(words_train, words_test, vocabulary_size=5000,
cache_dir=cache_dir, cache_file="bow_features.pkl"):
"""Extract Bag-of-Words for a given set of documents, already preprocessed into words."""
# If cache_file is not None, try to read from it first
cache_data = None
if cache_file is not None:
try:
with open(os.path.join(cache_dir, cache_file), "rb") as f:
cache_data = joblib.load(f)
print("Read features from cache file:", cache_file)
except:
pass # unable to read from cache, but that's okay
# If cache is missing, then do the heavy lifting
if cache_data is None:
# Fit a vectorizer to training documents and use it to transform them
# NOTE: Training documents have already been preprocessed and tokenized into words;
# pass in dummy functions to skip those steps, e.g. preprocessor=lambda x: x
vectorizer = CountVectorizer(max_features=vocabulary_size,
preprocessor=lambda x: x, tokenizer=lambda x: x) # already preprocessed
features_train = vectorizer.fit_transform(words_train).toarray()
# Apply the same vectorizer to transform the test documents (ignore unknown words)
features_test = vectorizer.transform(words_test).toarray()
# NOTE: Remember to convert the features using .toarray() for a compact representation
# Write to cache file for future runs (store vocabulary as well)
if cache_file is not None:
vocabulary = vectorizer.vocabulary_
cache_data = dict(features_train=features_train, features_test=features_test,
vocabulary=vocabulary)
with open(os.path.join(cache_dir, cache_file), "wb") as f:
joblib.dump(cache_data, f)
print("Wrote features to cache file:", cache_file)
else:
# Unpack data loaded from cache file
features_train, features_test, vocabulary = (cache_data['features_train'],
cache_data['features_test'], cache_data['vocabulary'])
# Return both the extracted features as well as the vocabulary
return features_train, features_test, vocabulary
# Extract Bag of Words features for both training and test datasets
train_X, test_X, vocabulary = extract_BoW_features(train_X, test_X)
len(train_X[100])
```
## Step 4: Classification using XGBoost
Now that we have created the feature representation of our training (and testing) data, it is time to start setting up and using the XGBoost classifier provided by SageMaker.
### Writing the dataset
The XGBoost classifier that we will be using requires the dataset to be written to a file and stored using Amazon S3. To do this, we will start by splitting the training dataset into two parts, the data we will train the model with and a validation set. Then, we will write those datasets to a file and upload the files to S3. In addition, we will write the test set input to a file and upload the file to S3. This is so that we can use SageMakers Batch Transform functionality to test our model once we've fit it.
```
import pandas as pd
# Earlier we shuffled the training dataset so to make things simple we can just assign
# the first 10 000 reviews to the validation set and use the remaining reviews for training.
val_X = pd.DataFrame(train_X[:10000])
train_X = pd.DataFrame(train_X[10000:])
val_y = pd.DataFrame(train_y[:10000])
train_y = pd.DataFrame(train_y[10000:])
```
The documentation for the XGBoost algorithm in SageMaker requires that the saved datasets should contain no headers or index and that for the training and validation data, the label should occur first for each sample.
For more information about this and other algorithms, the SageMaker developer documentation can be found on __[Amazon's website.](https://docs.aws.amazon.com/sagemaker/latest/dg/)__
```
# First we make sure that the local directory in which we'd like to store the training and validation csv files exists.
data_dir = '../data/sentiment_update'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
pd.DataFrame(test_X).to_csv(os.path.join(data_dir, 'test.csv'), header=False, index=False)
pd.concat([val_y, val_X], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False)
pd.concat([train_y, train_X], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)
# To save a bit of memory we can set text_X, train_X, val_X, train_y and val_y to None.
test_X = train_X = val_X = train_y = val_y = None
```
### Uploading Training / Validation files to S3
Amazon's S3 service allows us to store files that can be access by both the built-in training models such as the XGBoost model we will be using as well as custom models such as the one we will see a little later.
For this, and most other tasks we will be doing using SageMaker, there are two methods we could use. The first is to use the low level functionality of SageMaker which requires knowing each of the objects involved in the SageMaker environment. The second is to use the high level functionality in which certain choices have been made on the user's behalf. The low level approach benefits from allowing the user a great deal of flexibility while the high level approach makes development much quicker. For our purposes we will opt to use the high level approach although using the low-level approach is certainly an option.
Recall the method `upload_data()` which is a member of object representing our current SageMaker session. What this method does is upload the data to the default bucket (which is created if it does not exist) into the path described by the key_prefix variable. To see this for yourself, once you have uploaded the data files, go to the S3 console and look to see where the files have been uploaded.
For additional resources, see the __[SageMaker API documentation](http://sagemaker.readthedocs.io/en/latest/)__ and in addition the __[SageMaker Developer Guide.](https://docs.aws.amazon.com/sagemaker/latest/dg/)__
```
import sagemaker
session = sagemaker.Session() # Store the current SageMaker session
# S3 prefix (which folder will we use)
prefix = 'sentiment-update'
test_location = session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix)
val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix)
train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix)
```
### Creating the XGBoost model
Now that the data has been uploaded it is time to create the XGBoost model. To begin with, we need to do some setup. At this point it is worth discussing what a model is in SageMaker. It is easiest to think of a model of comprising three different objects in the SageMaker ecosystem, which interact with one another.
- Model Artifacts
- Training Code (Container)
- Inference Code (Container)
The Model Artifacts are what you might think of as the actual model itself. For example, if you were building a neural network, the model artifacts would be the weights of the various layers. In our case, for an XGBoost model, the artifacts are the actual trees that are created during training.
The other two objects, the training code and the inference code are then used the manipulate the training artifacts. More precisely, the training code uses the training data that is provided and creates the model artifacts, while the inference code uses the model artifacts to make predictions on new data.
The way that SageMaker runs the training and inference code is by making use of Docker containers. For now, think of a container as being a way of packaging code up so that dependencies aren't an issue.
```
from sagemaker import get_execution_role
# Our current execution role is require when creating the model as the training
# and inference code will need to access the model artifacts.
role = get_execution_role()
# We need to retrieve the location of the container which is provided by Amazon for using XGBoost.
# As a matter of convenience, the training and inference code both use the same container.
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(session.boto_region_name, 'xgboost')
# First we create a SageMaker estimator object for our model.
xgb = sagemaker.estimator.Estimator(container, # The location of the container we wish to use
role, # What is our current IAM Role
train_instance_count=1, # How many compute instances
train_instance_type='ml.m4.xlarge', # What kind of compute instances
output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix),
sagemaker_session=session)
# And then set the algorithm specific parameters.
xgb.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
silent=0,
objective='binary:logistic',
early_stopping_rounds=10,
num_round=500)
```
### Fit the XGBoost model
Now that our model has been set up we simply need to attach the training and validation datasets and then ask SageMaker to set up the computation.
```
s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv')
s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv')
xgb.fit({'train': s3_input_train, 'validation': s3_input_validation})
```
### Testing the model
Now that we've fit our XGBoost model, it's time to see how well it performs. To do this we will use SageMakers Batch Transform functionality. Batch Transform is a convenient way to perform inference on a large dataset in a way that is not realtime. That is, we don't necessarily need to use our model's results immediately and instead we can peform inference on a large number of samples. An example of this in industry might be peforming an end of month report. This method of inference can also be useful to us as it means to can perform inference on our entire test set.
To perform a Batch Transformation we need to first create a transformer objects from our trained estimator object.
```
xgb_transformer = xgb.transformer(instance_count = 1, instance_type = 'ml.m4.xlarge')
```
Next we actually perform the transform job. When doing so we need to make sure to specify the type of data we are sending so that it is serialized correctly in the background. In our case we are providing our model with csv data so we specify `text/csv`. Also, if the test data that we have provided is too large to process all at once then we need to specify how the data file should be split up. Since each line is a single entry in our data set we tell SageMaker that it can split the input on each line.
```
xgb_transformer.transform(test_location, content_type='text/csv', split_type='Line')
```
Currently the transform job is running but it is doing so in the background. Since we wish to wait until the transform job is done and we would like a bit of feedback we can run the `wait()` method.
```
xgb_transformer.wait()
```
Now the transform job has executed and the result, the estimated sentiment of each review, has been saved on S3. Since we would rather work on this file locally we can perform a bit of notebook magic to copy the file to the `data_dir`.
```
!aws s3 cp --recursive $xgb_transformer.output_path $data_dir
```
The last step is now to read in the output from our model, convert the output to something a little more usable, in this case we want the sentiment to be either `1` (positive) or `0` (negative), and then compare to the ground truth labels.
```
predictions = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None)
predictions = [round(num) for num in predictions.squeeze().values]
from sklearn.metrics import accuracy_score
accuracy_score(test_y, predictions)
```
## Step 5: Looking at New Data
So now we have an XGBoost sentiment analysis model that we believe is working pretty well. As a result, we deployed it and we are using it in some sort of app.
However, as we allow users to use our app we periodically record submitted movie reviews so that we can perform some quality control on our deployed model. Once we've accumulated enough reviews we go through them by hand and evaluate whether they are positive or negative (there are many ways you might do this in practice aside from by hand). The reason for doing this is so that we can check to see how well our model is doing.
```
import new_data
new_X, new_Y = new_data.get_new_data()
```
**NOTE:** Part of the fun in this notebook is trying to figure out what exactly is happening with the new data, so try not to cheat by looking in the `new_data` module. Also, the `new_data` module assumes that the cache created earlier in Step 3 is still stored in `../cache/sentiment_analysis`.
### (TODO) Testing the current model
Now that we've loaded the new data, let's check to see how our current XGBoost model performs on it.
First, note that the data that has been loaded has already been pre-processed so that each entry in `new_X` is a list of words that have been processed using `nltk`. However, we have not yet constructed the bag of words encoding, which we will do now.
First, we use the vocabulary that we constructed earlier using the original training data to construct a `CountVectorizer` which we will use to transform our new data into its bag of words encoding.
**TODO:** Create the CountVectorizer object using the vocabulary created earlier and use it to transform the new data.
```
# TODO: Create the CountVectorizer using the previously constructed vocabulary
vectorizer = CountVectorizer(vocabulary = vocabulary, preprocessor=lambda x: x, tokenizer=lambda x: x)
# TODO: Transform our new data set and store the transformed data in the variable new_XV
new_XV = vectorizer.transform(new_X).toarray()
```
As a quick sanity check, we make sure that the length of each of our bag of words encoded reviews is correct. In particular, it must be the same size as the vocabulary which in our case is `5000`.
```
len(new_XV[100])
```
Now that we've performed the data processing that is required by our model we can save it locally and then upload it to S3 so that we can construct a batch transform job in order to see how well our model is working.
First, we save the data locally.
**TODO:** Save the new data (after it has been transformed using the original vocabulary) to the local notebook instance.
```
# TODO: Save the data contained in new_XV locally in the data_dir with the file name new_data.csv
pd.DataFrame(new_XV).to_csv(os.path.join(data_dir, 'new_data.csv'), header=False, index=False)
```
Next, we upload the data to S3.
**TODO:** Upload the csv file created above to S3.
```
# TODO: Upload the new_data.csv file contained in the data_dir folder to S3 and save the resulting
# URI as new_data_location
new_data_location = session.upload_data(os.path.join(data_dir, 'new_data.csv'), key_prefix=prefix)
```
Then, once the new data has been uploaded to S3, we create and run the batch transform job to get our model's predictions about the sentiment of the new movie reviews.
**TODO:** Using the `xgb_transformer` object that was created earlier (at the end of Step 4 to test the XGBoost model), transform the data located at `new_data_location`.
```
# TODO: Using xgb_transformer, transform the new_data_location data. You may wish to **wait** until
# the batch transform job has finished.
xgb_transformer.transform(new_data_location, content_type='text/csv', split_type='Line')
xgb_transformer.wait()
```
As usual, we copy the results of the batch transform job to our local instance.
```
!aws s3 cp --recursive $xgb_transformer.output_path $data_dir
```
Read in the results of the batch transform job.
```
predictions = pd.read_csv(os.path.join(data_dir, 'new_data.csv.out'), header=None)
predictions = [round(num) for num in predictions.squeeze().values]
```
And check the accuracy of our current model.
```
accuracy_score(new_Y, predictions)
```
So it would appear that *something* has changed since our model is no longer (as) effective at determining the sentiment of a user provided review.
In a real life scenario you would check a number of different things to see what exactly is going on. In our case, we are only going to check one and that is whether some aspect of the underlying distribution has changed. In other words, we want to see if the words that appear in our new collection of reviews matches the words that appear in the original training set. Of course, we want to narrow our scope a little bit so we will only look at the `5000` most frequently appearing words in each data set, or in other words, the vocabulary generated by each data set.
Before doing that, however, let's take a look at some of the incorrectly classified reviews in the new data set.
To start, we will deploy the original XGBoost model. We will then use the deployed model to infer the sentiment of some of the new reviews. This will also serve as a nice excuse to deploy our model so that we can mimic a real life scenario where we have a model that has been deployed and is being used in production.
**TODO:** Deploy the XGBoost model.
```
# TODO: Deploy the model that was created earlier. Recall that the object name is 'xgb'.
xgb_predictor = xgb.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
```
### Diagnose the problem
Now that we have our deployed "production" model, we can send some of our new data to it and filter out some of the incorrectly classified reviews.
```
from sagemaker.predictor import csv_serializer
# We need to tell the endpoint what format the data we are sending is in so that SageMaker can perform the serialization.
xgb_predictor.content_type = 'text/csv'
xgb_predictor.serializer = csv_serializer
```
It will be useful to look at a few different examples of incorrectly classified reviews so we will start by creating a *generator* which we will use to iterate through some of the new reviews and find ones that are incorrect.
**NOTE:** Understanding what Python generators are isn't really required for this module. The reason we use them here is so that we don't have to iterate through all of the new reviews, searching for incorrectly classified samples.
```
def get_sample(in_X, in_XV, in_Y):
for idx, smp in enumerate(in_X):
res = round(float(xgb_predictor.predict(in_XV[idx])))
if res != in_Y[idx]:
yield smp, in_Y[idx]
gn = get_sample(new_X, new_XV, new_Y)
```
At this point, `gn` is the *generator* which generates samples from the new data set which are not classified correctly. To get the *next* sample we simply call the `next` method on our generator.
```
print(next(gn))
```
After looking at a few examples, maybe we decide to look at the most frequently appearing `5000` words in each data set, the original training data set and the new data set. The reason for looking at this might be that we expect the frequency of use of different words to have changed, maybe there is some new slang that has been introduced or some other artifact of popular culture that has changed the way that people write movie reviews.
To do this, we start by fitting a `CountVectorizer` to the new data.
```
new_vectorizer = CountVectorizer(max_features=5000,
preprocessor=lambda x: x, tokenizer=lambda x: x)
new_vectorizer.fit(new_X)
```
Now that we have this new `CountVectorizor` object, we can check to see if the corresponding vocabulary has changed between the two data sets.
```
original_vocabulary = set(vocabulary.keys())
new_vocabulary = set(new_vectorizer.vocabulary_.keys())
```
We can look at the words that were in the original vocabulary but not in the new vocabulary.
```
print(original_vocabulary - new_vocabulary)
```
And similarly, we can look at the words that are in the new vocabulary but which were not in the original vocabulary.
```
print(new_vocabulary - original_vocabulary)
```
These words themselves don't tell us much, however if one of these words occured with a large frequency, that might tell us something. In particular, we wouldn't really expect any of the words above to appear with too much frequency.
**Question** What exactly is going on here. Not only what (if any) words appear with a larger than expected frequency but also, what does this mean? What has changed about the world that our original model no longer takes into account?
**NOTE:** This is meant to be a very open ended question. To investigate you may need more cells than the one provided below. Also, there isn't really a *correct* answer, this is meant to be an opportunity to explore the data.
### (TODO) Build a new model
Supposing that we believe something has changed about the underlying distribution of the words that our reviews are made up of, we need to create a new model. This way our new model will take into account whatever it is that has changed.
To begin with, we will use the new vocabulary to create a bag of words encoding of the new data. We will then use this data to train a new XGBoost model.
**NOTE:** Because we believe that the underlying distribution of words has changed it should follow that the original vocabulary that we used to construct a bag of words encoding of the reviews is no longer valid. This means that we need to be careful with our data. If we send an bag of words encoded review using the *original* vocabulary we should not expect any sort of meaningful results.
In particular, this means that if we had deployed our XGBoost model like we did in the Web App notebook then we would need to implement this vocabulary change in the Lambda function as well.
```
new_XV = new_vectorizer.transform(new_X).toarray()
```
And a quick check to make sure that the newly encoded reviews have the correct length, which should be the size of the new vocabulary which we created.
```
len(new_XV[0])
```
Now that we have our newly encoded, newly collected data, we can split it up into a training and validation set so that we can train a new XGBoost model. As usual, we first split up the data, then save it locally and then upload it to S3.
```
import pandas as pd
# Earlier we shuffled the training dataset so to make things simple we can just assign
# the first 10 000 reviews to the validation set and use the remaining reviews for training.
new_val_X = pd.DataFrame(new_XV[:10000])
new_train_X = pd.DataFrame(new_XV[10000:])
new_val_y = pd.DataFrame(new_Y[:10000])
new_train_y = pd.DataFrame(new_Y[10000:])
```
In order to save some memory we will effectively delete the `new_X` variable. Remember that this contained a list of reviews and each review was a list of words. Note that once this cell has been executed you will need to read the new data in again if you want to work with it.
```
new_X = None
```
Next we save the new training and validation sets locally. Note that we overwrite the training and validation sets used earlier. This is mostly because the amount of space that we have available on our notebook instance is limited. Of course, you can increase this if you'd like but to do so may increase the cost of running the notebook instance.
```
pd.DataFrame(new_XV).to_csv(os.path.join(data_dir, 'new_data.csv'), header=False, index=False)
pd.concat([new_val_y, new_val_X], axis=1).to_csv(os.path.join(data_dir, 'new_validation.csv'), header=False, index=False)
pd.concat([new_train_y, new_train_X], axis=1).to_csv(os.path.join(data_dir, 'new_train.csv'), header=False, index=False)
```
Now that we've saved our data to the local instance, we can safely delete the variables to save on memory.
```
new_val_y = new_val_X = new_train_y = new_train_X = new_XV = None
```
Lastly, we make sure to upload the new training and validation sets to S3.
**TODO:** Upload the new data as well as the new training and validation data sets to S3.
```
# TODO: Upload the new data and the new validation.csv and train.csv files in the data_dir directory to S3.
new_data_location = session.upload_data(os.path.join(data_dir, 'new_data.csv'), key_prefix=prefix)
new_val_location = session.upload_data(os.path.join(data_dir, 'new_validation.csv'), key_prefix=prefix)
new_train_location = session.upload_data(os.path.join(data_dir, 'new_train.csv'), key_prefix=prefix)
```
Once our new training data has been uploaded to S3, we can create a new XGBoost model that will take into account the changes that have occured in our data set.
**TODO:** Create a new XGBoost estimator object.
```
# TODO: First, create a SageMaker estimator object for our model.
new_xgb = sagemaker.estimator.Estimator(container, # The image name of the training container
role, # The IAM role to use (our current role in this case)
train_instance_count=1, # The number of instances to use for training
train_instance_type='ml.m4.xlarge', # The type of instance to use for training
output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix),
# Where to save the output (the model artifacts)
sagemaker_session=session) # The current SageMaker session
# TODO: Then set the algorithm specific parameters. You may wish to use the same parameters that were
# used when training the original model.
new_xgb.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
objective='binary:logistic',
early_stopping_rounds=10,
num_round=200)
```
Once the model has been created, we can train it with our new data.
**TODO:** Train the new XGBoost model.
```
# TODO: First, make sure that you create s3 input objects so that SageMaker knows where to
# find the training and validation data.
s3_new_input_train = sagemaker.s3_input(s3_data=new_train_location, content_type='csv')
s3_new_input_validation = sagemaker.s3_input(s3_data=new_val_location, content_type='csv')
# TODO: Using the new validation and training data, 'fit' your new model.
new_xgb.fit({'train': s3_new_input_train, 'validation': s3_new_input_validation})
```
### (TODO) Check the new model
So now we have a new XGBoost model that we believe more accurately represents the state of the world at this time, at least in how it relates to the sentiment analysis problem that we are working on. The next step is to double check that our model is performing reasonably.
To do this, we will first test our model on the new data.
**Note:** In practice this is a pretty bad idea. We already trained our model on the new data, so testing it shouldn't really tell us much. In fact, this is sort of a textbook example of leakage. We are only doing it here so that we have a numerical baseline.
**Question:** How might you address the leakage problem?
First, we create a new transformer based on our new XGBoost model.
**TODO:** Create a transformer object from the newly created XGBoost model.
```
# TODO: Create a transformer object from the new_xgb model
new_xgb_transformer = new_xgb.transformer(instance_count = 1, instance_type = 'ml.m4.xlarge')
```
Next we test our model on the new data.
**TODO:** Use the transformer object to transform the new data (stored in the `new_data_location` variable)
```
# TODO: Using new_xgb_transformer, transform the new_data_location data. You may wish to
# 'wait' for the transform job to finish.
new_xgb_transformer.transform(new_data_location, content_type='text/csv', split_type='Line')
new_xgb_transformer.wait()
```
Copy the results to our local instance.
```
!aws s3 cp --recursive $new_xgb_transformer.output_path $data_dir
```
And see how well the model did.
```
predictions = pd.read_csv(os.path.join(data_dir, 'new_data.csv.out'), header=None)
predictions = [round(num) for num in predictions.squeeze().values]
accuracy_score(new_Y, predictions)
```
As expected, since we trained the model on this data, our model performs pretty well. So, we have reason to believe that our new XGBoost model is a "better" model.
However, before we start changing our deployed model, we should first make sure that our new model isn't too different. In other words, if our new model performed really poorly on the original test data then this might be an indication that something else has gone wrong.
To start with, since we got rid of the variable that stored the original test reviews, we will read them in again from the cache that we created in Step 3. Note that we need to make sure that we read in the original test data after it has been pre-processed with `nltk` but before it has been bag of words encoded. This is because we need to use the new vocabulary instead of the original one.
```
cache_data = None
with open(os.path.join(cache_dir, "preprocessed_data.pkl"), "rb") as f:
cache_data = pickle.load(f)
print("Read preprocessed data from cache file:", "preprocessed_data.pkl")
test_X = cache_data['words_test']
test_Y = cache_data['labels_test']
# Here we set cache_data to None so that it doesn't occupy memory
cache_data = None
```
Once we've loaded the original test reviews, we need to create a bag of words encoding of them using the new vocabulary that we created, based on the new data.
**TODO:** Transform the original test data using the new vocabulary.
```
# TODO: Use the new_vectorizer object that you created earlier to transform the test_X data.
test_X = new_vectorizer.transform(test_X).toarray()
```
Now that we have correctly encoded the original test data, we can write it to the local instance, upload it to S3 and test it.
```
pd.DataFrame(test_X).to_csv(os.path.join(data_dir, 'test.csv'), header=False, index=False)
test_location = session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix)
new_xgb_transformer.transform(test_location, content_type='text/csv', split_type='Line')
new_xgb_transformer.wait()
!aws s3 cp --recursive $new_xgb_transformer.output_path $data_dir
predictions = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None)
predictions = [round(num) for num in predictions.squeeze().values]
accuracy_score(test_Y, predictions)
```
It would appear that our new XGBoost model is performing quite well on the old test data. This gives us some indication that our new model should be put into production and replace our original model.
## Step 6: (TODO) Updating the Model
So we have a new model that we'd like to use instead of one that is already deployed. Furthermore, we are assuming that the model that is already deployed is being used in some sort of application. As a result, what we want to do is update the existing endpoint so that it uses our new model.
Of course, to do this we need to create an endpoint configuration for our newly created model.
First, note that we can access the name of the model that we created above using the `model_name` property of the transformer. The reason for this is that in order for the transformer to create a batch transform job it needs to first create the model object inside of SageMaker. Since we've sort of already done this we should take advantage of it.
```
new_xgb_transformer.model_name
```
Next, we create an endpoint configuration using the low level approach of creating the dictionary object which describes the endpoint configuration we want.
**TODO:** Using the low level approach, create a new endpoint configuration. Don't forget that it needs a name and that the name needs to be unique. If you get stuck, try looking at the Boston Housing Low Level Deployment tutorial notebook.
```
from time import gmtime, strftime
# TODO: Give our endpoint configuration a name. Remember, it needs to be unique.
new_xgb_endpoint_config_name = "sentiment-update-xgboost-endpoint-config-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
# TODO: Using the SageMaker Client, construct the endpoint configuration.
new_xgb_endpoint_config_info = session.sagemaker_client.create_endpoint_config(
EndpointConfigName = new_xgb_endpoint_config_name,
ProductionVariants = [{
"InstanceType": "ml.m4.xlarge",
"InitialVariantWeight": 1,
"InitialInstanceCount": 1,
"ModelName": new_xgb_transformer.model_name,
"VariantName": "XGB-Model"
}])
```
Once the endpoint configuration has been constructed, it is a straightforward matter to ask SageMaker to update the existing endpoint so that it uses the new endpoint configuration.
Of note here is that SageMaker does this in such a way that there is no downtime. Essentially, SageMaker deploys the new model and then updates the original endpoint so that it points to the newly deployed model. After that, the original model is shut down. This way, whatever app is using our endpoint won't notice that we've changed the model that is being used.
**TODO:** Use the SageMaker Client to update the endpoint that you deployed earlier.
```
# TODO: Update the xgb_predictor.endpoint so that it uses new_xgb_endpoint_config_name.
session.sagemaker_client.update_endpoint(EndpointName=xgb_predictor.endpoint, EndpointConfigName=new_xgb_endpoint_config_name)
```
And, as is generally the case with SageMaker requests, this is being done in the background so if we want to wait for it to complete we need to call the appropriate method.
```
session.wait_for_endpoint(xgb_predictor.endpoint)
```
## Step 7: Delete the Endpoint
Of course, since we are done with the deployed endpoint we need to make sure to shut it down, otherwise we will continue to be charged for it.
```
xgb_predictor.delete_endpoint()
```
## Some Additional Questions
This notebook is a little different from the other notebooks in this module. In part, this is because it is meant to be a little bit closer to the type of problem you may face in a real world scenario. Of course, this problem is a very easy one with a prescribed solution, but there are many other interesting questions that we did not consider here and that you may wish to consider yourself.
For example,
- What other ways could the underlying distribution change?
- Is it a good idea to re-train the model using only the new data?
- What would change if the quantity of new data wasn't large. Say you only received 500 samples?
## Optional: Clean up
The default notebook instance on SageMaker doesn't have a lot of excess disk space available. As you continue to complete and execute notebooks you will eventually fill up this disk space, leading to errors which can be difficult to diagnose. Once you are completely finished using a notebook it is a good idea to remove the files that you created along the way. Of course, you can do this from the terminal or from the notebook hub if you would like. The cell below contains some commands to clean up the created files from within the notebook.
```
# First we will remove all of the files contained in the data_dir directory
!rm $data_dir/*
# And then we delete the directory itself
!rmdir $data_dir
# Similarly we will remove the files in the cache_dir directory and the directory itself
!rm $cache_dir/*
!rmdir $cache_dir
```
| github_jupyter |
# Building your own algorithm container
With Amazon SageMaker, you can package your own algorithms that can than be trained and deployed in the SageMaker environment. This notebook will guide you through an example that shows you how to build a Docker container for SageMaker and use it for training and inference.
By packaging an algorithm in a container, you can bring almost any code to the Amazon SageMaker environment, regardless of programming language, environment, framework, or dependencies.
_**Note:**_ SageMaker now includes a [pre-built scikit container](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/sagemaker-python-sdk/scikit_learn_iris/Scikit-learn%20Estimator%20Example%20With%20Batch%20Transform.ipynb). We recommend the pre-built container be used for almost all cases requiring a scikit algorithm. However, this example remains relevant as an outline for bringing in other libraries to SageMaker as your own container.
1. [Building your own algorithm container](#Building-your-own-algorithm-container)
1. [When should I build my own algorithm container?](#When-should-I-build-my-own-algorithm-container%3F)
1. [Permissions](#Permissions)
1. [The example](#The-example)
1. [The presentation](#The-presentation)
1. [Part 1: Packaging and Uploading your Algorithm for use with Amazon SageMaker](#Part-1%3A-Packaging-and-Uploading-your-Algorithm-for-use-with-Amazon-SageMaker)
1. [An overview of Docker](#An-overview-of-Docker)
1. [How Amazon SageMaker runs your Docker container](#How-Amazon-SageMaker-runs-your-Docker-container)
1. [Running your container during training](#Running-your-container-during-training)
1. [The input](#The-input)
1. [The output](#The-output)
1. [Running your container during hosting](#Running-your-container-during-hosting)
1. [The parts of the sample container](#The-parts-of-the-sample-container)
1. [The Dockerfile](#The-Dockerfile)
1. [Building and registering the container](#Building-and-registering-the-container)
1. [Testing your algorithm on your local machine or on an Amazon SageMaker notebook instance](#Testing-your-algorithm-on-your-local-machine-or-on-an-Amazon-SageMaker-notebook-instance)
1. [Part 2: Using your Algorithm in Amazon SageMaker](#Part-2%3A-Using-your-Algorithm-in-Amazon-SageMaker)
1. [Set up the environment](#Set-up-the-environment)
1. [Create the session](#Create-the-session)
1. [Upload the data for training](#Upload-the-data-for-training)
1. [Create an estimator and fit the model](#Create-an-estimator-and-fit-the-model)
1. [Hosting your model](#Hosting-your-model)
1. [Deploy the model](#Deploy-the-model)
2. [Choose some data and use it for a prediction](#Choose-some-data-and-use-it-for-a-prediction)
3. [Optional cleanup](#Optional-cleanup)
1. [Run Batch Transform Job](#Run-Batch-Transform-Job)
1. [Create a Transform Job](#Create-a-Transform-Job)
2. [View Output](#View-Output)
_or_ I'm impatient, just [let me see the code](#The-Dockerfile)!
## When should I build my own algorithm container?
You may not need to create a container to bring your own code to Amazon SageMaker. When you are using a framework (such as Apache MXNet or TensorFlow) that has direct support in SageMaker, you can simply supply the Python code that implements your algorithm using the SDK entry points for that framework. This set of frameworks is continually expanding, so we recommend that you check the current list if your algorithm is written in a common machine learning environment.
Even if there is direct SDK support for your environment or framework, you may find it more effective to build your own container. If the code that implements your algorithm is quite complex on its own or you need special additions to the framework, building your own container may be the right choice.
If there isn't direct SDK support for your environment, don't worry. You'll see in this walk-through that building your own container is quite straightforward.
## Permissions
Running this notebook requires permissions in addition to the normal `SageMakerFullAccess` permissions. This is because we'll creating new repositories in Amazon ECR. The easiest way to add these permissions is simply to add the managed policy `AmazonEC2ContainerRegistryFullAccess` to the role that you used to start your notebook instance. There's no need to restart your notebook instance when you do this, the new permissions will be available immediately.
## The example
Here, we'll show how to package a simple Python example which showcases the [decision tree][] algorithm from the widely used [scikit-learn][] machine learning package. The example is purposefully fairly trivial since the point is to show the surrounding structure that you'll want to add to your own code so you can train and host it in Amazon SageMaker.
The ideas shown here will work in any language or environment. You'll need to choose the right tools for your environment to serve HTTP requests for inference, but good HTTP environments are available in every language these days.
In this example, we use a single image to support training and hosting. This is easy because it means that we only need to manage one image and we can set it up to do everything. Sometimes you'll want separate images for training and hosting because they have different requirements. Just separate the parts discussed below into separate Dockerfiles and build two images. Choosing whether to have a single image or two images is really a matter of which is more convenient for you to develop and manage.
If you're only using Amazon SageMaker for training or hosting, but not both, there is no need to build the unused functionality into your container.
[scikit-learn]: http://scikit-learn.org/stable/
[decision tree]: http://scikit-learn.org/stable/modules/tree.html
## The presentation
This presentation is divided into two parts: _building_ the container and _using_ the container.
# Part 1: Packaging and Uploading your Algorithm for use with Amazon SageMaker
### An overview of Docker
If you're familiar with Docker already, you can skip ahead to the next section.
For many data scientists, Docker containers are a new concept, but they are not difficult, as you'll see here.
Docker provides a simple way to package arbitrary code into an _image_ that is totally self-contained. Once you have an image, you can use Docker to run a _container_ based on that image. Running a container is just like running a program on the machine except that the container creates a fully self-contained environment for the program to run. Containers are isolated from each other and from the host environment, so the way you set up your program is the way it runs, no matter where you run it.
Docker is more powerful than environment managers like conda or virtualenv because (a) it is completely language independent and (b) it comprises your whole operating environment, including startup commands, environment variable, etc.
In some ways, a Docker container is like a virtual machine, but it is much lighter weight. For example, a program running in a container can start in less than a second and many containers can run on the same physical machine or virtual machine instance.
Docker uses a simple file called a `Dockerfile` to specify how the image is assembled. We'll see an example of that below. You can build your Docker images based on Docker images built by yourself or others, which can simplify things quite a bit.
Docker has become very popular in the programming and devops communities for its flexibility and well-defined specification of the code to be run. It is the underpinning of many services built in the past few years, such as [Amazon ECS].
Amazon SageMaker uses Docker to allow users to train and deploy arbitrary algorithms.
In Amazon SageMaker, Docker containers are invoked in a certain way for training and a slightly different way for hosting. The following sections outline how to build containers for the SageMaker environment.
Some helpful links:
* [Docker home page](http://www.docker.com)
* [Getting started with Docker](https://docs.docker.com/get-started/)
* [Dockerfile reference](https://docs.docker.com/engine/reference/builder/)
* [`docker run` reference](https://docs.docker.com/engine/reference/run/)
[Amazon ECS]: https://aws.amazon.com/ecs/
### How Amazon SageMaker runs your Docker container
Because you can run the same image in training or hosting, Amazon SageMaker runs your container with the argument `train` or `serve`. How your container processes this argument depends on the container:
* In the example here, we don't define an `ENTRYPOINT` in the Dockerfile so Docker will run the command `train` at training time and `serve` at serving time. In this example, we define these as executable Python scripts, but they could be any program that we want to start in that environment.
* If you specify a program as an `ENTRYPOINT` in the Dockerfile, that program will be run at startup and its first argument will be `train` or `serve`. The program can then look at that argument and decide what to do.
* If you are building separate containers for training and hosting (or building only for one or the other), you can define a program as an `ENTRYPOINT` in the Dockerfile and ignore (or verify) the first argument passed in.
#### Running your container during training
When Amazon SageMaker runs training, your `train` script is run just like a regular Python program. A number of files are laid out for your use, under the `/opt/ml` directory:
/opt/ml
|-- input
| |-- config
| | |-- hyperparameters.json
| | `-- resourceConfig.json
| `-- data
| `-- <channel_name>
| `-- <input data>
|-- model
| `-- <model files>
`-- output
`-- failure
##### The input
* `/opt/ml/input/config` contains information to control how your program runs. `hyperparameters.json` is a JSON-formatted dictionary of hyperparameter names to values. These values will always be strings, so you may need to convert them. `resourceConfig.json` is a JSON-formatted file that describes the network layout used for distributed training. Since scikit-learn doesn't support distributed training, we'll ignore it here.
* `/opt/ml/input/data/<channel_name>/` (for File mode) contains the input data for that channel. The channels are created based on the call to CreateTrainingJob but it's generally important that channels match what the algorithm expects. The files for each channel will be copied from S3 to this directory, preserving the tree structure indicated by the S3 key structure.
* `/opt/ml/input/data/<channel_name>_<epoch_number>` (for Pipe mode) is the pipe for a given epoch. Epochs start at zero and go up by one each time you read them. There is no limit to the number of epochs that you can run, but you must close each pipe before reading the next epoch.
##### The output
* `/opt/ml/model/` is the directory where you write the model that your algorithm generates. Your model can be in any format that you want. It can be a single file or a whole directory tree. SageMaker will package any files in this directory into a compressed tar archive file. This file will be available at the S3 location returned in the `DescribeTrainingJob` result.
* `/opt/ml/output` is a directory where the algorithm can write a file `failure` that describes why the job failed. The contents of this file will be returned in the `FailureReason` field of the `DescribeTrainingJob` result. For jobs that succeed, there is no reason to write this file as it will be ignored.
#### Running your container during hosting
Hosting has a very different model than training because hosting is responding to inference requests that come in via HTTP. In this example, we use our recommended Python serving stack to provide robust and scalable serving of inference requests:

This stack is implemented in the sample code here and you can mostly just leave it alone.
Amazon SageMaker uses two URLs in the container:
* `/ping` will receive `GET` requests from the infrastructure. Your program returns 200 if the container is up and accepting requests.
* `/invocations` is the endpoint that receives client inference `POST` requests. The format of the request and the response is up to the algorithm. If the client supplied `ContentType` and `Accept` headers, these will be passed in as well.
The container will have the model files in the same place they were written during training:
/opt/ml
`-- model
`-- <model files>
### The parts of the sample container
In the `container` directory are all the components you need to package the sample algorithm for Amazon SageMager:
.
|-- Dockerfile
|-- build_and_push.sh
`-- decision_trees
|-- nginx.conf
|-- predictor.py
|-- serve
|-- train
`-- wsgi.py
Let's discuss each of these in turn:
* __`Dockerfile`__ describes how to build your Docker container image. More details below.
* __`build_and_push.sh`__ is a script that uses the Dockerfile to build your container images and then pushes it to ECR. We'll invoke the commands directly later in this notebook, but you can just copy and run the script for your own algorithms.
* __`decision_trees`__ is the directory which contains the files that will be installed in the container.
* __`local_test`__ is a directory that shows how to test your new container on any computer that can run Docker, including an Amazon SageMaker notebook instance. Using this method, you can quickly iterate using small datasets to eliminate any structural bugs before you use the container with Amazon SageMaker. We'll walk through local testing later in this notebook.
In this simple application, we only install five files in the container. You may only need that many or, if you have many supporting routines, you may wish to install more. These five show the standard structure of our Python containers, although you are free to choose a different toolset and therefore could have a different layout. If you're writing in a different programming language, you'll certainly have a different layout depending on the frameworks and tools you choose.
The files that we'll put in the container are:
* __`nginx.conf`__ is the configuration file for the nginx front-end. Generally, you should be able to take this file as-is.
* __`predictor.py`__ is the program that actually implements the Flask web server and the decision tree predictions for this app. You'll want to customize the actual prediction parts to your application. Since this algorithm is simple, we do all the processing here in this file, but you may choose to have separate files for implementing your custom logic.
* __`serve`__ is the program started when the container is started for hosting. It simply launches the gunicorn server which runs multiple instances of the Flask app defined in `predictor.py`. You should be able to take this file as-is.
* __`train`__ is the program that is invoked when the container is run for training. You will modify this program to implement your training algorithm.
* __`wsgi.py`__ is a small wrapper used to invoke the Flask app. You should be able to take this file as-is.
In summary, the two files you will probably want to change for your application are `train` and `predictor.py`.
### The Dockerfile
The Dockerfile describes the image that we want to build. You can think of it as describing the complete operating system installation of the system that you want to run. A Docker container running is quite a bit lighter than a full operating system, however, because it takes advantage of Linux on the host machine for the basic operations.
For the Python science stack, we will start from a standard Ubuntu installation and run the normal tools to install the things needed by scikit-learn. Finally, we add the code that implements our specific algorithm to the container and set up the right environment to run under.
Along the way, we clean up extra space. This makes the container smaller and faster to start.
Let's look at the Dockerfile for the example:
```
!cat container/Dockerfile
```
### Building and registering the container
The following shell code shows how to build the container image using `docker build` and push the container image to ECR using `docker push`. This code is also available as the shell script `container/build-and-push.sh`, which you can run as `build-and-push.sh decision_trees_sample` to build the image `decision_trees_sample`.
This code looks for an ECR repository in the account you're using and the current default region (if you're using a SageMaker notebook instance, this will be the region where the notebook instance was created). If the repository doesn't exist, the script will create it.
```
%%sh
# The name of our algorithm
algorithm_name=sagemaker-decision-trees
cd container
chmod +x decision_trees/train
chmod +x decision_trees/serve
account=$(aws sts get-caller-identity --query Account --output text)
# Get the region defined in the current configuration (default to us-west-2 if none defined)
region=$(aws configure get region)
region=${region:-us-west-2}
fullname="${account}.dkr.ecr.${region}.amazonaws.com/${algorithm_name}:latest"
# If the repository doesn't exist in ECR, create it.
aws ecr describe-repositories --repository-names "${algorithm_name}" > /dev/null 2>&1
if [ $? -ne 0 ]
then
aws ecr create-repository --repository-name "${algorithm_name}" > /dev/null
fi
# Get the login command from ECR and execute it directly
aws ecr get-login-password --region ${region}|docker login --username AWS --password-stdin ${fullname}
# Build the docker image locally with the image name and then push it to ECR
# with the full name.
docker build -t ${algorithm_name} .
docker tag ${algorithm_name} ${fullname}
docker push ${fullname}
```
## Testing your algorithm on your local machine or on an Amazon SageMaker notebook instance
While you're first packaging an algorithm use with Amazon SageMaker, you probably want to test it yourself to make sure it's working right. In the directory `container/local_test`, there is a framework for doing this. It includes three shell scripts for running and using the container and a directory structure that mimics the one outlined above.
The scripts are:
* `train_local.sh`: Run this with the name of the image and it will run training on the local tree. For example, you can run `$ ./train_local.sh sagemaker-decision-trees`. It will generate a model under the `/test_dir/model` directory. You'll want to modify the directory `test_dir/input/data/...` to be set up with the correct channels and data for your algorithm. Also, you'll want to modify the file `input/config/hyperparameters.json` to have the hyperparameter settings that you want to test (as strings).
* `serve_local.sh`: Run this with the name of the image once you've trained the model and it should serve the model. For example, you can run `$ ./serve_local.sh sagemaker-decision-trees`. It will run and wait for requests. Simply use the keyboard interrupt to stop it.
* `predict.sh`: Run this with the name of a payload file and (optionally) the HTTP content type you want. The content type will default to `text/csv`. For example, you can run `$ ./predict.sh payload.csv text/csv`.
The directories as shipped are set up to test the decision trees sample algorithm presented here.
# Part 2: Using your Algorithm in Amazon SageMaker
Once you have your container packaged, you can use it to train models and use the model for hosting or batch transforms. Let's do that with the algorithm we made above.
## Set up the environment
Here we specify a bucket to use and the role that will be used for working with SageMaker.
```
# S3 prefix
prefix = "DEMO-scikit-byo-iris"
# Define IAM role
import boto3
import re
import os
import numpy as np
import pandas as pd
from sagemaker import get_execution_role
role = get_execution_role()
```
## Create the session
The session remembers our connection parameters to SageMaker. We'll use it to perform all of our SageMaker operations.
```
import sagemaker as sage
from time import gmtime, strftime
sess = sage.Session()
```
## Upload the data for training
When training large models with huge amounts of data, you'll typically use big data tools, like Amazon Athena, AWS Glue, or Amazon EMR, to create your data in S3. For the purposes of this example, we're using some the classic [Iris dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set), which we have included.
We can use use the tools provided by the SageMaker Python SDK to upload the data to a default bucket.
```
WORK_DIRECTORY = "data"
data_location = sess.upload_data(WORK_DIRECTORY, key_prefix=prefix)
```
## Create an estimator and fit the model
In order to use SageMaker to fit our algorithm, we'll create an `Estimator` that defines how to use the container to train. This includes the configuration we need to invoke SageMaker training:
* The __container name__. This is constructed as in the shell commands above.
* The __role__. As defined above.
* The __instance count__ which is the number of machines to use for training.
* The __instance type__ which is the type of machine to use for training.
* The __output path__ determines where the model artifact will be written.
* The __session__ is the SageMaker session object that we defined above.
Then we use fit() on the estimator to train against the data that we uploaded above.
```
account = sess.boto_session.client("sts").get_caller_identity()["Account"]
region = sess.boto_session.region_name
image = "{}.dkr.ecr.{}.amazonaws.com/sagemaker-decision-trees:latest".format(account, region)
tree = sage.estimator.Estimator(
image,
role,
1,
"ml.c4.2xlarge",
output_path="s3://{}/output".format(sess.default_bucket()),
sagemaker_session=sess,
)
tree.fit(data_location)
```
## Hosting your model
You can use a trained model to get real time predictions using HTTP endpoint. Follow these steps to walk you through the process.
### Deploy the model
Deploying the model to SageMaker hosting just requires a `deploy` call on the fitted model. This call takes an instance count, instance type, and optionally serializer and deserializer functions. These are used when the resulting predictor is created on the endpoint.
```
from sagemaker.predictor import csv_serializer
predictor = tree.deploy(1, "ml.m4.xlarge", serializer=csv_serializer)
```
### Choose some data and use it for a prediction
In order to do some predictions, we'll extract some of the data we used for training and do predictions against it. This is, of course, bad statistical practice, but a good way to see how the mechanism works.
```
shape = pd.read_csv("data/iris.csv", header=None)
shape.sample(3)
# drop the label column in the training set
shape.drop(shape.columns[[0]], axis=1, inplace=True)
shape.sample(3)
import itertools
a = [50 * i for i in range(3)]
b = [40 + i for i in range(10)]
indices = [i + j for i, j in itertools.product(a, b)]
test_data = shape.iloc[indices[:-1]]
```
Prediction is as easy as calling predict with the predictor we got back from deploy and the data we want to do predictions with. The serializers take care of doing the data conversions for us.
```
print(predictor.predict(test_data.values).decode("utf-8"))
```
### Optional cleanup
When you're done with the endpoint, you'll want to clean it up.
```
sess.delete_endpoint(predictor.endpoint)
```
## Run Batch Transform Job
You can use a trained model to get inference on large data sets by using [Amazon SageMaker Batch Transform](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html). A batch transform job takes your input data S3 location and outputs the predictions to the specified S3 output folder. Similar to hosting, you can extract inferences for training data to test batch transform.
### Create a Transform Job
We'll create an `Transformer` that defines how to use the container to get inference results on a data set. This includes the configuration we need to invoke SageMaker batch transform:
* The __instance count__ which is the number of machines to use to extract inferences
* The __instance type__ which is the type of machine to use to extract inferences
* The __output path__ determines where the inference results will be written
```
transform_output_folder = "batch-transform-output"
output_path = "s3://{}/{}".format(sess.default_bucket(), transform_output_folder)
transformer = tree.transformer(
instance_count=1,
instance_type="ml.m4.xlarge",
output_path=output_path,
assemble_with="Line",
accept="text/csv",
)
```
We use tranform() on the transfomer to get inference results against the data that we uploaded. You can use these options when invoking the transformer.
* The __data_location__ which is the location of input data
* The __content_type__ which is the content type set when making HTTP request to container to get prediction
* The __split_type__ which is the delimiter used for splitting input data
* The __input_filter__ which indicates the first column (ID) of the input will be dropped before making HTTP request to container
```
transformer.transform(
data_location, content_type="text/csv", split_type="Line", input_filter="$[1:]"
)
transformer.wait()
```
For more information on the configuration options, see [CreateTransformJob API](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateTransformJob.html)
### View Output
Lets read results of above transform job from s3 files and print output
```
s3_client = sess.boto_session.client("s3")
s3_client.download_file(
sess.default_bucket(), "{}/iris.csv.out".format(transform_output_folder), "/tmp/iris.csv.out"
)
with open("/tmp/iris.csv.out") as f:
results = f.readlines()
print("Transform results: \n{}".format("".join(results)))
```
| github_jupyter |
# Deep Neural Network
You see a lot of people around you who are interested in deep neural networks and you think that it might be interesting to start thinking about creating a software that is as flexible as possible and allows novice users to test this kind of methods.
You have no previous knowledge and while searching a bit on the internet, you come across this project https://github.com/HyTruongSon/Neural-Network-MNIST-CPP. You say to yourself that this is a good starting point and decide to spend a bit more time on it.
We recall here the key elements found in deep neural networks. We will not go into the mathematical details as this is not the purpose of this course.
A deep neurl network is composed of an input, an output and several hidden layers.
A neuron is illustrated by the following figure

This figure comes from a CNRS course called fiddle (https://gricad-gitlab.univ-grenoble-alpes.fr/talks/fidle).
We can observe that a neuron is made of weights, a bias and an activation function. The activation function can be a sigmoid, reLU, tanh, ...
A deep neural network is composed of several hidden layers with several neurons as illustrated in the following figure

This figure also comes from the CNRS course fiddle.
In the following, we will use these notations:
- $w^l_{j,i}$ is the weight of the layer $l$ for the neuron $j$ and the input entry $i$.
- $z^l_j$ is the aggregation: $\sum_i x_{i}^l w_{j, i}^l + b_j^l$ where $x_{i}$ is the input.
- $\sigma$ is the activation function.
- $a^l_j$ is the output of the neuron $j$ for the layer $l$.
- $L$ is the index of the last layer.
- $C(a^L, y)$ is the cost function where $a^L$ is the predict value and $y$ is the expected result.
The algorithm has three steps:
- The forward propagation: for a given input, cross all the layers until the output and fill $z^l$ and $a^l$.
- Change the weights and biases to minimize the cost function using a descent gradient. This is called backward propagation.
- iterate until reaching the maximum number of iterations or a given tolerance.
The gradient descent can be written as
$$
w_{j, i}^l = w_{j, i}^l - \mu \frac{\partial C}{\partial w_{j, i}^l},
$$
where $\mu$ is the learning rate.
The equations of the backward propagation are
- $\delta^L_j = \frac{\partial C}{\partial a_j^L}\sigma'(z_j^L)$
- $\delta^l_j = \sum_i w^{l+1}_{i, j}\delta^{l+1}_i \sigma'(z_j^l)$
- $\frac{\partial C}{\partial b^l_j} = \delta_j^l$
- $\frac{\partial C}{\partial w^l_{j, i}} = a^{l-1}_i \delta_j^l$
In our case,
$$
C'(\hat{y}, y) = \hat{y} - y.
$$
We need to set of datas: datas for training the neural network and datas for testing the final weights and biases.
- Read the code https://github.com/HyTruongSon/Neural-Network-MNIST-CPP carefully and try to recognize each element of the algorithm.
- Think of a code organization and data structure that offer more flexibility and readability.
- Duplicate `step_0` into `step_1` and add all the `CMakeLists.twt` to create a library of `dnn` source files and the executable of the main function
- Duplicate `step_1` into `step_2` and implement the following functions
- `forward_propagation`
- `backward_propagation`
- `evaluate`
- How to proceed to have more flexibility in the choice of the activation function ?
**Note**: for the moment, you have only seen the C++ functions. We can see that it is difficult to have a flexible implementation with only functions. The use of C++ classes will improve the implementation considerably and will allow to add several activation and cost functions more easily.
| github_jupyter |
Heroes of Pymoli Data Analysis
```
import pandas as pd
Pymoli="/Users/rulaothman/Desktop/pandas-challenge/04-Numpy-Pandas/Instructions/HeroesOfPymoli/purchase_data.json"
currency= '${0:.2f}'
Pymoli_df=pd.read_json(Pymoli)
Pymoli_df.head()
#Player Count
Player_count=Pymoli_df['SN'].value_counts().count()
Player_count
#Purchasing Analysis
unique = Pymoli_df['Item Name'].value_counts().count()
unique
count = Pymoli_df['Price'].count()
average = Pymoli_df['Price'].mean()
total = Pymoli_df['Price'].sum()
Player_data = [{'Number of Unique Items':unique,
'Number of Purchases':count,
'Average Purchase Price':average,
'Total Revenue': total}]
player_analysis = pd.DataFrame(Player_data).style.format({'Percentage': '{:.2%}'})
player_analysis
#Gender Demographics
Gender_df = Pymoli_df.groupby(['Gender'])
print(Gender_df)
count = Pymoli_df.groupby(['Gender']).count()
total = Pymoli_df['Gender'].count()
percentage = count/total
gender_data = {'Percentage': percentage['SN'],
'Total Players': count['SN']}
gender_analysis = pd.DataFrame(gender_data).style.format({'Percentage': '{:.2%}'})
gender_analysis
#Purchase Analysis by gender
count = Pymoli_df.groupby(['Gender']).count()
average = Pymoli_df.groupby(['Gender']).mean()
total = Pymoli_df.groupby(['Gender']).sum()
genderpurchase_data= {'Purchase Count': count['Price'],
'Average Purchase Price': average['Price'],
'Total Purchase Value': total['Price']}
purchase_analysis = pd.DataFrame(genderpurchase_data).style.format({'Average Purchase Price': currency, 'Total Purchase Value': currency})
purchase_analysis
Pymoli_df['Age'].max()
Pymoli_df['Age'].min()
#4-year bins
bins=([0, 10, 15, 20, 25, 30, 35, 40, 45])
group_names= ["<10","10-14","15-19","20-24","25-29","30-34","35-39","40+"]
df= Pymoli_df.groupby(pd.cut(Pymoli_df["Age"], bins, labels = group_names))
count= Pymoli_df.groupby(pd.cut(Pymoli_df["Age"], bins, labels = group_names)).count()
average= Pymoli_df.groupby(pd.cut(Pymoli_df["Age"], bins, labels = group_names)).mean()
total= Pymoli_df.groupby((pd.cut(Pymoli_df["Age"], bins, labels = group_names))).sum()
agedemo_data= {'Purchase Count': count['Price'],
'Average Purchase Price': average['Price'],
'Total Purchase Value': total['Price']}
purchase_analysis = pd.DataFrame(agedemo_data).rename(index={'0-10':'<10','10-14':'10-14','15-19':'15-19','20-24':'20-24','25-29':'25-29','30-34':'30-34','35-39':'35-39','40+':'>40'}).style.format({'Average Purchase Price': currency, 'Total Purchase Value': currency})
purchase_analysis
#Top 5 Spenders
count = Pymoli_df.groupby(['SN']).count()
average = Pymoli_df.groupby(['SN']).mean()
total = Pymoli_df.groupby(['SN']).sum()
topspender_data= {'Purchase Count': count['Price'],
'Average Purchase Price': average['Price'],
'Total Purchase Value': total['Price']}
purchase_analysis = pd.DataFrame(topspender_data).sort_values('Total Purchase Value', ascending=False).head(5).style.format({'Average Purchase Price': currency, 'Total Purchase Value': currency})
purchase_analysis
#Most Popular Item
count = Pymoli_df.groupby(['Item ID','Item Name']).count()
average = Pymoli_df.groupby(['Item ID','Item Name']).mean()
total = Pymoli_df.groupby(['Item ID','Item Name']).sum()
topitem_data= {'Purchase Count': count['Price'],
'Average Purchase Price': average['Price'],
'Total Purchase Value': total['Price']}
purchase_analysis = pd.DataFrame(topitem_data).sort_values('Purchase Count', ascending=False).head(5).style.format({'Average Purchase Price': currency, 'Total Purchase Value': currency})
purchase_analysis
#Most Profitable Item
purchase_analysis = pd.DataFrame(topitem_data).sort_values('Total Purchase Value', ascending=False).head(5).style.format({'Average Purchase Price': currency, 'Total Purchase Value': currency})
purchase_analysis
```
| github_jupyter |
```
from __future__ import print_function, division
import json
import numpy as np
import pandas as pd
import librosa
import soundfile as sf
import torch
from torch.utils.data import Dataset
from keras.preprocessing.sequence import pad_sequences
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
class SpeechDataset(Dataset):
"""Speech dataset."""
def __init__(self, csv_file, labels_file, audio_conf, transform=None, normalize=True):
"""
Args:
csv_file (string): Path to the csv file contain audio and transcript path.
labels_file (string): Path to the json file contain label dictionary.
audio_conf (dict) : Audio config info.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.speech_frame = pd.read_csv(csv_file, header=None)
with open(labels_file, 'r') as f:
self.labels = json.loads(f.read())
self.window = audio_conf['window']
self.window_size = audio_conf['window_size']
self.window_stride = audio_conf['window_stride']
self.sampling_rate = audio_conf['sampling_rate']
self.transform = transform
self.normalize = normalize
def __len__(self):
return len(self.speech_frame)
def __getitem__(self, idx):
wav_file = self.speech_frame.iloc[idx, 0]
transcript_file = self.speech_frame.iloc[idx, 1]
try:
signal, _ = sf.read(wav_file)
signal /= 1 << 31
signal = self.spectrogram(signal)
with open(transcript_file, 'r') as f:
transcript = f.read().strip()
transcript_idx = []
transcript_idx.append(self.labels['<sos>'])
for char in list(transcript):
if char in self.labels:
transcript_idx.append(self.labels[char])
transcript_idx.append(self.labels['<eos>'])
sample = {'signal': signal, 'transcript': np.array(transcript_idx)}
if self.transform:
sample = self.transform(sample)
return sample
except:
return wav_file
def spectrogram(self, signal):
n_fft = int(self.sampling_rate * self.window_size)
win_length = n_fft
hop_length = int(self.sampling_rate * self.window_stride)
# STFT
D = librosa.stft(signal, n_fft=n_fft, hop_length=hop_length,
window=self.window, win_length=win_length)
spect, phase = librosa.magphase(D)
# S = log(S+1)
spect = np.log1p(spect)
spect = torch.FloatTensor(spect)
if self.normalize:
mean = spect.mean()
std = spect.std()
spect.add_(-mean)
spect.div_(std)
return spect
class Padding(object):
"""Rescale the audio signal and transcript to a given size.
Args:
signal_size (int): Desired output size of signal.
transcript_size (int): Desired output size of transcript.
labels_file (string): Path to the json file contain label dictionary.
"""
def __init__(self, signal_size, transcript_size, labels_file):
assert isinstance(signal_size, (int))
assert isinstance(transcript_size, (int))
self.signal_size = signal_size
self.transcript_size = transcript_size
with open(labels_file, 'r') as f:
self.labels = json.loads(f.read())
def __call__(self, sample):
signal, transcript = sample['signal'], sample['transcript']
signal /= 1 << 31
signal = pad_sequences(signal,
maxlen=self.signal_size, padding='post',
truncating='post', value=0.0, dtype='float')
transcript = pad_sequences(transcript.reshape(1, -1),
maxlen=self.transcript_size, padding='post',
truncating='post', value=self.labels['pad'], dtype='int')
return {'signal': signal, 'transcript': transcript}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
signal, transcript = sample['signal'], sample['transcript']
return {'signal': torch.from_numpy(signal),
'transcript': torch.from_numpy(transcript)}
```
| github_jupyter |
# Amazon SageMaker Autopilot Data Exploration
This report provides insights about the dataset you provided as input to the AutoML job.
It was automatically generated by the AutoML training job: **automl-dm-1632956082**.
As part of the AutoML job, the input dataset was randomly split into two pieces, one for **training** and one for
**validation**. The training dataset was randomly sampled, and metrics were computed for each of the columns.
This notebook provides these metrics so that you can:
1. Understand how the job analyzed features to select the candidate pipelines.
2. Modify and improve the generated AutoML pipelines using knowledge that you have about the dataset.
We read **`7110`** rows from the training dataset.
The dataset has **`2`** columns and the column named **`sentiment`** is used as the target column.
This is identified as a **`MulticlassClassification`** problem.
Here are **3** examples of labels: `['-1', '1', '0']`.
<div class="alert alert-info"> 💡 <strong> Suggested Action Items</strong>
- Look for sections like this for recommended actions that you can take.
</div>
---
## Contents
1. [Dataset Sample](#Dataset-Sample)
1. [Column Analysis](#Column-Analysis)
---
## Dataset Sample
The following table is a random sample of **10** rows from the training dataset.
<div class="alert alert-info"> 💡 <strong> Suggested Action Items</strong>
- Verify the input headers correctly align with the columns of the dataset sample.
If they are incorrect, update the header names of your input dataset in Amazon Simple Storage Service (Amazon S3).
</div>
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>sentiment</th>
<th>review_body</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1</td>
<td>This skirt is stunning but i really wish it wa...</td>
</tr>
<tr>
<th>1</th>
<td>0</td>
<td>This is a very cute shirt with great details. ...</td>
</tr>
<tr>
<th>2</th>
<td>1</td>
<td>I'm really looking forward to wearing this sko...</td>
</tr>
<tr>
<th>3</th>
<td>-1</td>
<td>Ag jeans have been a main stay for me. they ar...</td>
</tr>
<tr>
<th>4</th>
<td>1</td>
<td>The fabric is thin though. you better wash it...</td>
</tr>
<tr>
<th>5</th>
<td>-1</td>
<td>I'm a rather small person--5'2" about 100 lbs...</td>
</tr>
<tr>
<th>6</th>
<td>0</td>
<td>This top fits like shown in the pictures. howe...</td>
</tr>
<tr>
<th>7</th>
<td>1</td>
<td>Super cute and comfy perfect for fall and win...</td>
</tr>
<tr>
<th>8</th>
<td>0</td>
<td>This dress has been one that i just adored onl...</td>
</tr>
<tr>
<th>9</th>
<td>0</td>
<td>Loved the design and print of this blouse. ho...</td>
</tr>
</tbody>
</table>
</div>
## Column Analysis
The AutoML job analyzed the **`2`** input columns to infer each data type and select
the feature processing pipelines for each training algorithm.
For more details on the specific AutoML pipeline candidates, see [Amazon SageMaker Autopilot Candidate Definition Notebook.ipynb](./SageMakerAutopilotCandidateDefinitionNotebook.ipynb).
### Percent of Missing Values
Within the data sample, the following columns contained missing values, such as: `nan`, white spaces, or empty fields.
SageMaker Autopilot will attempt to fill in missing values using various techniques. For example,
missing values can be replaced with a new 'unknown' category for `Categorical` features
and missing `Numerical` values can be replaced with the **mean** or **median** of the column.
We found **0 of the 2** of the columns contained missing values.
<div class="alert alert-info"> 💡 <strong> Suggested Action Items</strong>
- Investigate the governance of the training dataset. Do you expect this many missing values?
Are you able to fill in the missing values with real data?
- Use domain knowledge to define an appropriate default value for the feature. Either:
- Replace all missing values with the new default value in your dataset in Amazon S3.
- Add a step to the feature pre-processing pipeline to fill missing values, for example with a
[sklearn.impute.SimpleImputer](https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html).
</div>
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>% of Missing Values</th>
</tr>
</thead>
<tbody>
</tbody>
</table>
</div>
### Count Statistics
For `String` features, it is important to count the number of unique values to determine whether to treat a feature as `Categorical` or `Text`
and then processes the feature according to its type.
For example, SageMaker Autopilot counts the number of unique entries and the number of unique words.
The following string column would have **3** total entries, **2** unique entries, and **3** unique words.
| | String Column |
|-------|-------------------|
| **0** | "red blue" |
| **1** | "red blue" |
| **2** | "red blue yellow" |
If the feature is `Categorical`, SageMaker Autopilot can look at the total number of unique entries and transform it using techniques such as one-hot encoding.
If the field contains a `Text` string, we look at the number of unique words, or the vocabulary size, in the string.
We can use the unique words to then compute text-based features, such as Term Frequency-Inverse Document Frequency (tf-idf).
**Note:** If the number of unique values is too high, we risk data transformations expanding the dataset to too many features.
In that case, SageMaker Autopilot will attempt to reduce the dimensionality of the post-processed data,
such as by capping the number vocabulary words for tf-idf, applying Principle Component Analysis (PCA), or other dimensionality reduction techniques.
The table below shows **2 of the 2** columns ranked by the number of unique entries.
<div class="alert alert-info"> 💡 <strong> Suggested Action Items</strong>
- Verify the number of unique values of a feature is expected with respect to domain knowledge.
If it differs, one explanation could be multiple encodings of a value.
For example `US` and `U.S.` will count as two different words.
You could correct the error at the data source or pre-process your dataset in your S3 bucket.
- If the number of unique values seems too high for Categorical variables,
investigate if using domain knowledge to group the feature
to a new feature with a smaller set of possible values improves performance.
</div>
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Number of Unique Entries</th>
<th>Number of Unique Words (if Text)</th>
</tr>
</thead>
<tbody>
<tr>
<th>sentiment</th>
<td>3</td>
<td>n/a</td>
</tr>
<tr>
<th>review_body</th>
<td>7108</td>
<td>17986</td>
</tr>
</tbody>
</table>
</div>
### Descriptive Statistics
For each of the numerical input features, several descriptive statistics are computed from the data sample.
SageMaker Autopilot may treat numerical features as `Categorical` if the number of unique entries is sufficiently low.
For `Numerical` features, we may apply numerical transformations such as normalization, log and quantile transforms,
and binning to manage outlier values and difference in feature scales.
We found **1 of the 2** columns contained at least one numerical value.
The table below shows the **1** columns which have the largest percentage of numerical values.
<div class="alert alert-info"> 💡 <strong> Suggested Action Items</strong>
- Investigate the origin of the data field. Are some values non-finite (e.g. infinity, nan)?
Are they missing or is it an error in data input?
- Missing and extreme values may indicate a bug in the data collection process.
Verify the numerical descriptions align with expectations.
For example, use domain knowledge to check that the range of values for a feature meets with expectations.
</div>
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>% of Numerical Values</th>
<th>Mean</th>
<th>Median</th>
<th>Min</th>
<th>Max</th>
</tr>
</thead>
<tbody>
<tr>
<th>sentiment</th>
<td>100.0%</td>
<td>0.0</td>
<td>0.0</td>
<td>-1.0</td>
<td>1.0</td>
</tr>
</tbody>
</table>
</div>
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.