code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Table of Contents
# [Libraries Used](#libraries)
#
# Part I: [Read Data and EDA](#prelim)
#
# Part II: [Feature selection and importance](#featureselection)
#
# Part III: [Model selection and data training](#modelselection)
#
# Part IV: [Hyperparameter optimization of best model](#hyperparameter)
#
# Part V: [Employing XGBoost Regression - Best model](#bestmodelp)
#
# Part VI: [Predictions with designed ML model - Out of sample](#makeprediction1)
# +
#Import Libraries
#General libs
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from numpy import sort
# SKlearn libraries for learning.
from sklearn.model_selection import cross_val_score, cross_val_predict, KFold, RepeatedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.model_selection import ShuffleSplit, cross_validate, train_test_split
from sklearn import linear_model
from xgboost import XGBRegressor
import xgboost as xgb
import lightgbm as lgbm
# SKlearn libraries for evaluation.
from sklearn import metrics
from sklearn.metrics import r2_score, mean_squared_error,mean_absolute_error
from sklearn.feature_selection import SelectFromModel
# %matplotlib inline
# Control the hyperparameter optimization for the best model. It can be expensive
hyperparam_opt=False
#Set to true to download figures
savefigs=False
# -
# <a id='prelim'></a>
# ## Part I: Read Data and EDA
# ## Loading data
# +
oneT_0K = pd.read_csv('2DH-data/1T0K-Mechanicalppts.txt', index_col = 0, delimiter = ',')
oneT_300K = pd.read_csv('2DH-data/1T300K-Mechanicalppts.txt', index_col = 0, delimiter = ',')
oneT_600K = pd.read_csv('2DH-data/1T600K-Mechanicalppts.txt', index_col = 0, delimiter = ',')
twoH_0K = pd.read_csv('2DH-data/2H0K-Mechanicalppts.txt', index_col = 0, delimiter = ',')
twoH_300K = pd.read_csv('2DH-data/2H300K-Mechanicalppts.txt', index_col = 0, delimiter = ',')
twoH_600K = pd.read_csv('2DH-data/2H600K-Mechanicalppts.txt', index_col = 0, delimiter =',')
twoone_0K = pd.read_csv('2DH-data/2H1T0K-Mechanicalppts.txt', index_col = 0, delimiter = ',')
twoone_300K = pd.read_csv('2DH-data/2H1T300K-Mechanicalppts.txt', index_col = 0, delimiter = ',')
P_0K = pd.read_csv('2DH-data/P0K-Mechanicalppts.txt', index_col = 0, delimiter = ',')
P_300K = pd.read_csv('2DH-data/P300K-Mechanicalppts.txt', index_col = 0, delimiter = ',')
P_600K = pd.read_csv('2DH-data/P600K-Mechanicalppts.txt', index_col = 0, delimiter =',')
# -
datas = [oneT_0K, twoH_0K, twoone_0K, P_0K, oneT_300K, twoH_300K, twoone_300K, P_300K, oneT_600K, twoH_600K, P_600K]
Fullset= pd.concat(datas)
Fullset.describe()
# ### Cleaning data
# * SG is assumed to remain constant with temperature
# * Only stable materials are included (C11>0 and C11>C12)
# * Column of temperature is added
# * Only materials where a = b are included
# +
oneT_0K = oneT_0K[['a', 'b', 'c', 'SG', 'C11', 'C12']]
oneT_0K['Phase'] = '1T'
oneT_0K['Temp'] = '0K'
## Considering only physical solutions (C11>0 and C11>C12) and systems where a = b.
oneT_0K = oneT_0K[(oneT_0K['C11']> 0) & (oneT_0K['C11']>oneT_0K['C12']) & (oneT_0K['a']==oneT_0K['b']) ]
#display(oneT_0K)
# +
oneT_300K = oneT_300K[['a', 'b', 'c', 'SG', 'C11', 'C12']]
oneT_300K = oneT_300K[(oneT_300K['C11']> 0) & (oneT_300K['C11']>oneT_300K['C12']) & (oneT_300K['a']==oneT_300K['b']) ]
oneT_300K['a'] = oneT_300K['a']/2
oneT_300K['Phase'] = '1T'
oneT_300K['Temp'] = '300K'
## SG in temperature dependent calculations are assumed to be the same as that for 0K.
oneT_300K['SG'].replace(1.0, 156, True)
#display(oneT_300K)
# +
#oneT_600K.drop(labels=['Y_2D_a', 'Y_2D_b', 'G_a', 'G_b', 'B_a', 'B_b', 'V_a', 'V_b'], axis = 'columns', inplace = True)
oneT_600K.rename(columns={"aa" : "a", "bb" : "b", "cc" : "c"}, inplace = True)
oneT_600K = oneT_600K[['a', 'b', 'c', 'SG', 'C11', 'C12']]
oneT_600K = oneT_600K[(oneT_600K['C11']> 0) & (oneT_600K['C11']>oneT_600K['C12']) & (oneT_600K['a']==oneT_600K['b']) ]
oneT_600K['a'] = oneT_600K['a']/2
oneT_600K['Phase'] = '1T'
oneT_600K['Temp'] = '600K'
oneT_600K['SG'].replace(1.0, 156, True)
#display(oneT_600K)
# +
twoH_0K = twoH_0K[['a', 'b', 'c', 'SG', 'C11', 'C12']]
twoH_0K['Phase'] = '2H'
twoH_0K['Temp'] = '0K'
#display(twoH_0K)
# +
twoH_300K = twoH_300K[['a', 'b', 'c', 'SG', 'C11', 'C12']]
twoH_300K = twoH_300K[(twoH_300K['C11']> 0) & (twoH_300K['C11']>twoH_300K['C12']) & (twoH_300K['a']==twoH_300K['b']) ]
twoH_300K['a'] = twoH_300K['a']/2
twoH_300K['Phase'] = '2H'
twoH_300K['Temp'] = '300K'
## Replacing the SG in twoH_300K with the correct ones according to twoH_0K.
matSG_8 = twoH_0K.index[twoH_0K['SG'] == 8].tolist()
notPresent = ['Hf-Ti-Te-Se', 'Pb-Sn-Te-Se', 'Ti-Hf-Se-Te', 'Si-Hf-Se-Te', 'Si-Hf-S-Se', 'Sn-Ge-Te-Se', 'Mo-Sn-S-Se', 'Hf-Si-Te-Se', 'Pb-Si-Se-Te', 'Sn-Si-S-Te', 'Hf-Sn-Se-S', 'Hf-Ge-Se-Te', 'Cr-Ti-Se-Te', 'Sn-Ge-Te-S', 'Sn-Hf-S-Se', 'Pb-Sn-Se-Te', 'Ti-Cr-Te-Se', 'Pb-Sn-S-Te', 'Pb-Mo-S-Se', 'Pb-Sn-Te-S', 'Sn-Mn-S-Se', 'Ge-Ti-Te-Se', 'Ge-Hf-Se-Te', 'Ti-Pb-S-Se','Ti-Mn-Te-Se', 'Mn-Ti-Se-Te', 'Pb-Ge-Te-O', 'Ge-Pb-Se-Te']
toRemove = [i for i in matSG_8]
for i in matSG_8:
if i in notPresent:
toRemove.remove(i)
twoH_300K.loc[toRemove, 'SG'] = 8
indexSG156 = twoH_300K.index[twoH_300K['SG'] == 1].tolist()
twoH_300K.loc[indexSG156, 'SG'] = 156
#display(twoH_300K)
# +
twoH_600K.rename(columns={"aa" : "a", "bb" : "b", "cc" : "c"}, inplace = True)
twoH_600K = twoH_600K[['a', 'b', 'c', 'SG', 'C11', 'C12']]
twoH_600K = twoH_600K[(twoH_600K['C11']> 0) & (twoH_600K['C11']>twoH_600K['C12']) & (twoH_600K['a']==twoH_600K['b']) ]
twoH_600K['a'] = twoH_600K['a']/2
twoH_600K['Phase'] = '2H'
twoH_600K['Temp'] = '600K'
twoH_600K['SG'].replace(1.0, 156, True)
#display(twoH_600K)
# +
twoone_0K = twoone_0K[['a', 'b', 'c', 'SG', 'C11', 'C12']]
twoone_0K['Phase'] = '2H-1T'
twoone_0K['Temp'] = '0K'
twoone_0K = twoone_0K[(twoone_0K['C11']> 0) & (twoone_0K['C11']>twoone_0K['C12']) & (twoone_0K['a']==twoone_0K['b']) ]
#display(twoone_0K)
# +
twoone_300K = twoone_300K[['a', 'b', 'c', 'SG', 'C11', 'C12']]
twoone_300K = twoone_300K[(twoone_300K['C11']> 0) & (twoone_300K['C11']>twoone_300K['C12']) & (twoone_300K['a']==twoone_300K['b']) ]
twoone_300K['a'] = twoone_300K['a']/2
twoone_300K['Phase'] = '2H-1T'
twoone_300K['Temp'] = '300K'
#display(twoone_300K)
# +
P_0K = P_0K[['a', 'b', 'c', 'SG', 'C11', 'C12']]
P_0K = P_0K[(P_0K['C11']> 0) & (P_0K['C11']>P_0K['C12']) & (P_0K['a']==P_0K['b']) ]
P_0K['Phase'] = 'Planar'
P_0K['Temp'] = '0K'
#display(P_0K)
# -
P_300K = P_300K[['a', 'b', 'c', 'SG', 'C11', 'C12']]
P_300K = P_300K[(P_300K['C11']> 0) & (P_300K['C11']>P_300K['C12']) & (P_300K['a']==P_300K['b']) ]
P_300K['a'] = P_300K['a']/2
P_300K['Phase'] = 'Planar'
P_300K['Temp'] = '300K'
P_300K['SG'].replace(1.0, 156, True)
#display(P_300K)
# +
P_600K.rename(columns={"aa" : "a", "bb" : "b", "cc" : "c"}, inplace = True)
P_600K = P_600K[['a', 'b', 'c', 'SG', 'C11', 'C12']]
P_600K = P_600K[(P_600K['C11']> 0) & (P_600K['C11']>P_600K['C12']) & (P_600K['a']==P_600K['b']) ]
P_600K['a'] = P_600K['a']/2
P_600K['Phase'] = 'Planar'
P_600K['Temp'] = '600K'
P_600K['SG'].replace(1.0, 156, True)
#display(P_600K)
# -
datas = [oneT_0K, twoH_0K, twoone_0K, P_0K, oneT_300K, twoH_300K, twoone_300K, P_300K, oneT_600K, twoH_600K, P_600K]
dataset = pd.concat(datas)
del dataset["b"] #Note, we have trained the data for a=b, so, we remove the b column
dataset.info()
display(dataset)
# #### Including rarely occuring SG values
#
# Any space group number occuring less than 4 times is categorized arbitrarily as 0.
# +
SG_in_model = dataset.SG.value_counts().reset_index(name="count").query("count > 4")["index"]
def replace_rare_SG_occurances(dataset):
dataset.loc[~dataset["SG"].isin(SG_in_model), "SG"] = 0
return dataset
replace_rare_SG_occurances(dataset)
# -
# ## Encoding of categorical features
encSG = LabelEncoder()
encT = LabelEncoder()
enc = [encSG, encT]
categorical_variables = ['SG','Temp']
for c, encoder in zip(categorical_variables, enc):
print(f"Label Encoding: {c} - {dataset[c].nunique()}")
encoder.fit(dataset[c])
dataset[c] = encoder.transform(dataset[c])
#merged = pd.concat([dataset, PhaseDummies], axis = 'columns')
finaldata = dataset.drop(labels='Phase', axis='columns')
finaldata
# <a id='featureselection'></a>
# ## Part II: Feature selection and feature importance
#
# A simple correlation map is used to assess the correlation of different features with the target (a). In a typical ML problen we drop features based on this map to reduce the computation cost. We could have dropped some of these features on the basis of their weak correlation with 'a', but since this problem does not have too large a feature set we go ahead with all of them.
# +
f, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(finaldata.corr(method = 'spearman').round(2), annot = True, cmap="PiYG", vmin=-1, vmax=1, ax=ax);
if savefigs==True:
f.savefig("Figures/CorrMap_H", dpi = 1200, transparent = False, facecolor = 'white')
# +
from sklearn.feature_selection import SelectKBest, f_regression
fea = [c for c in finaldata.columns if c!= "a"]
Ximp = finaldata[fea]
Yimp = finaldata["a"]
sel = SelectKBest(f_regression, k="all")
sel.fit_transform(Ximp, Yimp)
importance = sel.scores_
impGraph = pd.Series(np.cbrt(importance), Ximp.columns)
impGraph.plot.bar(color = "Pink")
# -
# For regression, the vacuum size 'c' of a system has the maximum importance followed by its C11 value and whether the structure is planar or not. It seems that temperature is not important for regression. This is also seen in the datasets where the lattice constant column has almost identical values for all three temepratures.
# +
## Drop one of the phase columns to avoid multi-collinearity. We drop 1T column.
#finaldata.drop('1T', axis = 'columns', inplace = True)
display(finaldata)
# -
# <a id='modelselection'></a>
# ## Part III: Model selection and data training
# ## Remove 20% of the dataset as unseen data for out-of-sample test
data, data_test = train_test_split(finaldata, test_size = 0.2, random_state = 42)
data = pd.DataFrame(data)
data_test = pd.DataFrame(data_test)
display(data)
display(data_test)
# ### Define features and target
features = [c for c in data.columns if c!= "a"]
X = data[features]
y = data["a"]
# ## Linear Regression
#
# Here we first do a simple train test split (7:3) followed by a cross validation test to assess the accuracy of a default Linear Regression model.
# +
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 42)
lr = linear_model.LinearRegression()
kfold = KFold(n_splits = 5, shuffle = True, random_state = 42)
lr.fit(X_train, y_train)
#lr = linear_model.LinearRegression()
cv = RepeatedKFold(n_splits = 5, n_repeats = 3, random_state = 42)
scores = cross_val_score(lr, X, y, cv = cv)
r2_mean = scores.mean()
print("%0.4f accuracy with a standard deviation of %0.4f" % (r2_mean, scores.std()))
print(scores)
# +
yhat_lr = cross_val_predict(lr, X, y, cv = kfold)
f, ax = plt.subplots(figsize=(8, 8))
label_mlr = "$R^2$ = %.2f" % r2_mean
plt.plot(y, yhat_lr, 'o', label=label_mlr, color = "magenta")
plt.ylabel(r"$a_{predicted}$ ($\AA$)")
plt.xlabel(r"$a$ ($\AA$)")
plt.legend()
plt.xlim([2.5, 6])
plt.ylim([2.5, 6])
plt.plot([2.5, 6], [2.5, 6], 'k--')
plt.show()
# -
# ### Explore and search for the best model
# +
#Lets use train/split ratio of 70/30 - Use ShuffleSplit
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 42)
#kfold = KFold(n_splits = 5, shuffle = True, random_state = 42 )
# Use some linear reg model and some with boosting capabilities
hyper_params_lgbm = {
"objective" : "multiclass",
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'n_estimators': 500,
"verbosity" : -1
}
hyper_params_xgb = {
'objective': 'reg:squarederror', # error evaluation for multiclass tasks
'n_estimators': 500,
'eval_metric': mean_absolute_error
}
#Use ShuffleSplit for cross validation of dataset. Kfold also gives similar result
cv_split = ShuffleSplit(n_splits = 5, test_size = 0.30, random_state = 42)
# Initializing the Algorithms # I have tried a couple of them. Just showing a handful here
algorithm_list = {'LinearReg': linear_model.LinearRegression(),
'BayeRidge':linear_model.BayesianRidge(),
'LassoLars': linear_model.LassoLars(alpha=0.01),
'LeastAngLars':linear_model.Lars(),
'XgbostReg': xgb.XGBRegressor(**hyper_params_xgb),
'LGBMReg': lgbm.LGBMRegressor(**hyper_params_lgbm)
}
# Fit the various model
for key,model in algorithm_list.items():
try:
if key == 'XgbostReg' or key == 'LGBMReg':
model.fit(X_train, y_train,early_stopping_rounds=100,eval_set=[(X_test, y_test)])
else:
model.fit(X_train, y_train)
# Cross-validate the result to avoid over fitting
cv_results = cross_validate(model, X_train, y_train, cv = cv_split)
# cv_results = RepeatedKFold(n_splits = 5, n_repeats = 3, random_state = 42)
adj_R2 = 1 - (1-model.score(X_train, y_train))*(len(y_train)-1)/(len(y_train)-X_train.shape[1]-1)
y_pred = model.predict(X_test)
# Print model and test metrics
print(f'Model - {key: <5} || Model Score: %.3f, Adj_R^2: %.3f, Test Score: %.3f, Cross Val Score: %.3f'
%(model.score(X_train, y_train),adj_R2, model.score(X_test, y_test), cv_results.get('test_score').mean()))
print(f'Model - {key: <5} || MSE: %.3f, MAE: %.3f, STD: %.3f, MAPE: %.3f%%'
%(metrics.mean_squared_error(y_test, y_pred), metrics.mean_absolute_error(y_test, y_pred),
cv_results.get('test_score').std(), metrics.mean_absolute_percentage_error(y_test, y_pred)*100))
print(f'Model - {key: <5} || Cross Validation Progress: \n')
print(pd.DataFrame(cv_results))
print('----------------------------------------------------------------------------------------------------------')
except Exception as e:
print(f"Exception occurred in {str(e)}")
pass
# -
# ### Comment on the ML models performance
# ***The boosting models performed the best. We will adopt the Xgboost model***
# <a id='hyperparameter'></a>
# ## Part IV: Hyperparameter Optimization of best model
if (hyperparam_opt==True):
# Doing hyperparameter optimization
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn import model_selection
n_estimators = [int(x) for x in np.linspace(start =50, stop = 500, num = 5)]
max_depth = [int(x) for x in np.linspace(start =5, stop = 20, num = 5)]
#bootstrap = [True, False]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 42)
kfold = ShuffleSplit(n_splits = 5, test_size = 0.3, random_state = 42)
#KFold(n_splits = 10, shuffle = True, random_state = rng )
param_grid = {
'n_estimators': n_estimators,
'max_depth': max_depth,
'objective':['reg:squarederror'],
'learning_rate': [0.02, 0.04, 0.06, 0.08, 0.10],
'colsample_bytree': [0.5, 0.6, 0.7, 0.8]
}
bestmodel = XGBRegressor()
xgb_bestmodel = model_selection.GridSearchCV(
estimator=bestmodel,
param_grid=param_grid,
scoring="r2",
n_jobs = 4,
cv=kfold,
verbose = True
)
xgb_bestmodel.fit(X_train, y_train)
#bestparams = model.best_estimator_.get_params()
bestparams = xgb_bestmodel.best_params_
bestscore = xgb_bestmodel.best_score_
print("Best paramters for the chosen model: %s " % bestparams)
print("Best r2 score: %.6f " % bestscore)
'''
Best hyperparameter
Best paramters for the chosen model: {'colsample_bytree': 0.8, 'learning_rate': 0.02, 'max_depth': 8, 'n_estimators': 500, 'objective': 'reg:squarederror'}
Best r2 score: 0.884865
'''
# <a id='bestmodelp'></a>
# ## Part V: Employing XGBoost Regression - Best model
# +
# Use boosting model, which was the best of all the models tested
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 42)
param_best = {'colsample_bytree': 0.8,
'learning_rate': 0.1,
'max_depth': 8,
'n_estimators': 500,
'objective': 'reg:squarederror'}
xgbr = XGBRegressor(**param_best)
xgbr.fit(X_train, y_train,early_stopping_rounds=100,eval_set=[(X_test, y_test)])
cv_results = cross_validate(xgbr, X_train, y_train, cv = cv_split)
adj_R2_train = 1 - (1-model.score(X_train, y_train))*(len(y_train)-1)/(len(y_train)-X_train.shape[1]-1)
adj_R2_test = 1 - (1-model.score(X_test, y_test))*(len(y_test)-1)/(len(y_test)-X_test.shape[1]-1)
y_pred = xgbr.predict(X_test)
R2_value = xgbr.score(X_test, y_test)
MSE = metrics.mean_squared_error(y_test, y_pred)
MAE = metrics.mean_absolute_error(y_test, y_pred)
MAPE = metrics.mean_absolute_percentage_error(y_test, y_pred)
print(f'Model Score: %.3f, Adj_R^2: %.3f, Test Score: %.3f, Cross Val Score: %.3f'
%(xgbr.score(X_train, y_train),adj_R2_train, xgbr.score(X_test, y_test), cv_results.get('test_score').mean()))
print(f'MSE: %.3f, MAE: %.3f, STD: %.3f, MAPE: %.3f%%'
%(MSE, MAE,cv_results.get('test_score').std(), MAPE*100))
print(f'Cross Validation Progress: \n')
print(pd.DataFrame(cv_results))
# +
f, ax = plt.subplots(figsize=(8, 8))
#label_xgbr = f'$R^2$ = %.3f, Adj_$R^2$ = %.3f, \n MSE = %.3f, MAE = %.3f' %(R2_value,adj_R2_test, MSE, MAE)
plt.plot(y_test, y_pred, 'o', color = "green")
plt.ylabel(r"$a_{predicted}$ ($\AA$)")
plt.xlabel(r"$a$ ($\AA$)")
#plt.legend()
plt.xlim([2, 6])
plt.ylim([2, 6])
plt.plot([2, 6], [2, 6], 'k--')
plt.annotate(r"$R^2$ = %.2f, MSE = %.3f" % (R2_value, MSE), (2.1, 5.8), fontsize=15);
#plt.show()
if savefigs==True:
f.savefig("Figures/BestModel_H", dpi = 1200, transparent = False, facecolor = 'white')
# +
x = X_train.columns
y = xgbr.feature_importances_
fig, ax = plt.subplots(figsize=(12,8))
width = 0.75 # the width of the bars
ind = np.arange(len(y)) # the x locations for the groups
ax.barh(ind, y, width, color="blue")
ax.set_yticks(ind+width/2)
ax.set_yticklabels(x, minor=False)
plt.title('Distribution of Feature Importance')
plt.xlabel('Importance')
plt.ylabel('Features')
# Print the values on the barchart
ax.bar_label(ax.containers[0],label_type='edge',fmt='%.2f')
plt.show()
if savefigs==True:
fig.savefig("Figures/FeatImp_H", dpi = 1200, transparent = False, facecolor = 'white')
# -
# Check the evolution of features
thresholds = sort(y)
for thresh in thresholds:
# select features using threshold
selection = SelectFromModel(xgbr, threshold=thresh, prefit=True)
select_X_train = selection.transform(X_train)
# train model
selection_model = XGBRegressor()
selection_model.fit(select_X_train, y_train)
# evaluate model
select_X_test = selection.transform(X_test)
y_pred = selection_model.predict(select_X_test)
predictions = [round(value) for value in y_pred]
accuracy = metrics.r2_score(y_test, y_pred)
print("Thresh=%.3f, n=%d, Accuracy: %.2f%%" % (thresh, select_X_train.shape[1], accuracy*100.0))
# <a id='makeprediction1'></a>
# ## Part VI: Predictions with designed ML model - Out of sample
x_final = [c for c in data_test.columns if c!= "a"]
X_predict_data_1 = data_test[x_final]
y_predict_data_1 = data_test["a"]
X_predict_data_1.head(5)
# +
#xgbr.fit(x,y)
prediction_1 = xgbr.predict(X_predict_data_1)
r2_score = xgbr.score(X_predict_data_1, y_predict_data_1)
mse = mean_squared_error(y_predict_data_1, prediction_1)
f, ax = plt.subplots(figsize=(8, 8))
#sns.scatterplot(x=y_predict_data_1, y=prediction_1)
plt.plot(y_predict_data_1, prediction_1, 'o', color = "magenta")
#r2_score = r2_score(y_predict_data_1, prediction_1)
plt.ylabel(r"$a_{predicted}$ ($\AA$)")
plt.xlabel(r"$a$ ($\AA$)")
plt.xlim([2, 6])
plt.ylim([2, 6])
plt.plot([2, 6], [2, 6], 'k--')
plt.annotate(r"$R^2$ = %.2f, MSE = %.3f" % (r2_score, mse), (2.1, 5.8), fontsize=15);
if savefigs==True:
f.savefig("Figures/Prediction1_H", dpi = 1200, transparent = False, facecolor = 'white')
# -
result = X_predict_data_1.copy()
result['a'] = y_predict_data_1
result['a_predicted'] = prediction_1
result.drop(['SG', 'Temp'], axis = 1, inplace = True)
#result.drop(['SG-156', 'SG-8', '0K', '1T', 'Phase', 'Temp'], axis = 1, inplace = True)
result['% error of a'] = (abs(prediction_1 - y_predict_data_1 ))*100/y_predict_data_1
display(result)
# ## End of Case Study
| 2DHetero_code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Soft Computing
#
# ## Vežba 3 - HOG
# ## Histogram of Oriented Gradients (HOG)
# Jedan od najpopularnijih postupaka za izdvajanje osobina (eng. *feature extraction*) sa digitalne slike. Formalno, HOG je nešto što se zove **deskriptor slike** (eng. *image descriptor*), koji predstavlja opis vizuelnih osobina slike. Iako je predstavljen pre više od jedne dekade, tačnije 2005. godine u radu <a href="http://lear.inrialpes.fr/people/triggs/pubs/Dalal-cvpr05.pdf">Histogram of Oriented Gradients for Human Detection</a>, HOG je deskriptor koji se i dan-danas vrlo često koristi za probleme detekcije i prepoznavanja objekata na slikama.
# <img src="images/hog_diagram.png">
# ## Support Vector Machine (SVM)
# SVM je model nadgledanog mašinskog učenja, koji se može koristiti za probleme klasifikacije i regresije. Ugrubo, SVM klasifikator konstruiše hiperravan koja "najbolje" razdvaja klase podataka, a ova "najbolja" hiperravan je zapravo ona koja maksimizuje marginu između podataka različitih klasa.
#
# U svom osnovnom obliku, SVM je linearni klasifikator, tj. može vrlo efikasno da pronađe hiperravan koja maksimizuje marginu između linearno separabilnih podataka. Međutim, korišćenjem tzv. kernel trika (eng. *kernel trick*), moguće je konstruisati SVM za klasifikaciju podataka koji nisu linearno separabilni - tačnije, pomoću kernela se ulazni podaci zapravo mapiraju na neki višedimenzionalni prostor gde ti podaci postanu linearno separabilni.
# <img src="images/svm.png">
# ## Detekcija automobila
#
# HOG i SVM ćemo iskoristiti kako bi rešili problem detekcije automobila na slikama iz <a href="http://cogcomp.org/Data/Car/">UIUC Image Database for Car Detection</a> skupa podataka.
#
# Trening skup **data/CarData/TrainImages** sadrži 1050 slika (pozitivnih i negativnih primera) dimenzija 100 x 40 za obučavanje i validaciju deskriptora i klasifikatora.
#
# Test skup **data/CarData/TestImages** sadrži 170 slika različitih dimenzija za evaluaciju rada deskriptora i klasifikatora.
#
# Rezultati rada deskriptora i klasifikatora na testnom skupu podataka se mogu uporediti sa tačnim lokacijama automobila koji se nalaze u **data/CarData/trueLocations.txt**.
# ### Obučavanje
import os
import numpy as np
import cv2 # OpenCV
from sklearn.svm import SVC # SVM klasifikator
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier # KNN
import matplotlib.pyplot as plt
# %matplotlib inline
# +
def load_image(path):
return cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2GRAY)
def display_image(image):
plt.imshow(image, 'gray')
# -
# #### Primer pozitivnog uzorka - jeste automobil
ipos = load_image('data/CarData/TrainImages/pos-0.pgm')
display_image(ipos)
ipos.shape
# ##### Primer negativnog uzorka - nije automobil
ineg = load_image('data/CarData/TrainImages/neg-0.pgm')
display_image(ineg)
ineg.shape
# #### Učitavanje svih primera
# +
train_dir = 'data/CarData/TrainImages/'
pos_imgs = []
neg_imgs = []
for img_name in os.listdir(train_dir):
img_path = os.path.join(train_dir, img_name)
img = load_image(img_path)
if 'pos' in img_name:
pos_imgs.append(img)
elif 'neg' in img_name:
neg_imgs.append(img)
print("Positive images #: ", len(pos_imgs))
print("Negative images #: ", len(neg_imgs))
# -
# #### Izračunavanje HOG deskriptora za sve slike i formiranje niza labela (1 = jeste automobil, 0 = nije automobil)
# +
pos_features = []
neg_features = []
labels = []
nbins = 9 # broj binova
cell_size = (8, 8) # broj piksela po celiji
block_size = (3, 3) # broj celija po bloku
hog = cv2.HOGDescriptor(_winSize=(img.shape[1] // cell_size[1] * cell_size[1],
img.shape[0] // cell_size[0] * cell_size[0]),
_blockSize=(block_size[1] * cell_size[1],
block_size[0] * cell_size[0]),
_blockStride=(cell_size[1], cell_size[0]),
_cellSize=(cell_size[1], cell_size[0]),
_nbins=nbins)
for img in pos_imgs:
pos_features.append(hog.compute(img))
labels.append(1)
for img in neg_imgs:
neg_features.append(hog.compute(img))
labels.append(0)
pos_features = np.array(pos_features)
neg_features = np.array(neg_features)
x = np.vstack((pos_features, neg_features))
y = np.array(labels)
# -
# #### Podela trening skupa na trening i validacioni
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
print('Train shape: ', x_train.shape, y_train.shape)
print('Test shape: ', x_test.shape, y_test.shape)
# +
# transformisemo u oblik pogodan za scikit-learn
def reshape_data(input_data):
nsamples, nx, ny = input_data.shape
return input_data.reshape((nsamples, nx*ny))
x_train = reshape_data(x_train)
x_test = reshape_data(x_test)
# -
print('Train shape: ', x_train.shape, y_train.shape)
print('Test shape: ', x_test.shape, y_test.shape)
# #### Obučavanje i validacija SVM klasifikatora
clf_svm = SVC(kernel='linear', probability=True)
clf_svm.fit(x_train, y_train)
y_train_pred = clf_svm.predict(x_train)
y_test_pred = clf_svm.predict(x_test)
print("Train accuracy: ", accuracy_score(y_train, y_train_pred))
print("Validation accuracy: ", accuracy_score(y_test, y_test_pred))
# #### Obučavanje i validacija KNN klasifikatora
clf_knn = KNeighborsClassifier(n_neighbors=10)
clf_knn = clf_knn.fit(x_train, y_train)
y_train_pred = clf_knn.predict(x_train)
y_test_pred = clf_knn.predict(x_test)
print("Train accuracy: ", accuracy_score(y_train, y_train_pred))
print("Validation accuracy: ", accuracy_score(y_test, y_test_pred))
# ###### Sa oba klasifikatora koja smo isprobali, uvek smo dobili odlične rezultate (99+%). Kako ovo možemo protumačiti?
#
# Pa prosto, ispostavlja se da je HOG deskriptor vrlo dobar alat za izdvajanje osobina objekata sa slike, i izgleda da su te izdvojene osobine relevantne prilikom klasifikacije. Odnosno, te osobine na neki reprezentativan način predstavljaju određenu vrstu objekta, što mi onda lako možemo iskoristiti za automatizaciju klasifikacije nekim od algoritama mašinskog učenja.
# ### Testiranje
# Dakle, sada imamo obučen SVM klasifikator koji vrlo precizno može da predvidi da li je nešto automobil ili ne. Međutim, ovaj klasifikator radi isključivo na slikama koje su dimenzija kao u obučavajućem skupu (100 x 40). U testnom skupu su date slike koje su različitih dimenzija (dosta veće od 100 x 40) i na kojima se nalaze automobili. Potrebno je detektovati <b>gde</b> se nalazi automobil na slici.
# #### Primer testne slike
itest = load_image('data/CarData/TestImages/test-0.pgm')
display_image(itest)
# Tehnika kojom bi ovo trebalo raditi jeste tzv. klizeći/klizni prozor (eng. <i>sliding window</i>), gde bi se na slici pomerao "prozor", veličine 100 x 40 i za svaki prozor se računao HOG deskriptor i zatim klasifikovao (da li je automobil ili ne). Zatim prozor pomerimo za recimo 10 piksela u desno i uradimo istu stvar. Naravno, kada dođemo do kraja reda, vratimo sliku skroz levo na početak i pomerimo je za recimo 10 piksela na dole i tako dalje.
#
# Ovim ćemo za sve moguće prozore (dimenzija 100 x 40 i sa pomerajima [10px, 0px], [0px, 10px]) dobiti rezultate klasifikacije.
#
# <img src="images/test-im-2.png">
# Međutim, ovde nastaje problem - relativno je izvesno da ukoliko smo unutar jednog prozora detektovali automobil, da ćemo i u nekom od susednih prozora detektovati isti taj automobil (prosto, translacijom za samo 10 piksela smo dobili vrlo sličnu sliku i samim tim vrlo sličan HOG deskriptor koji je ulaz u SVM klasifikator). Dakle, neophodno je na neki način spojiti više bounding box-ova u jedan koji će biti konačan rezultat. Treba voditi računa i o slikama koje sadrže više od jednog automobila,a koje su prisutne u testnom skupu podataka.
#
# Prilikom računanja ovog konačnog bounding box-a može se uzeti u obzir i koliko je SVM siguran (eng. <i>confidence score</i>) u svoju odluku za određeni ulazni podatak - ovu vrednost je moguće dobiti pozivanjem metode <i>predict_proba</i> nad SVM klasifikatorom. Ova metoda vraća verovatnoću ishoda klasifikacije za svaku od klasa. Zbog toga smo u konstruktoru SVM klasifikatora morali staviti <i>probability=True</i>.
#
# <img src="images/test-im-2-nms.png">
# Konačni rezultati se mogu proveriti tako što će se uporediti sa lokacijama automobila u <b>data/CarData/trueLocations.txt</b>. U ovoj datoteci, svaka linija počinje sa rednim brojem slike iz testnog foldera, a istinska lokacija automobila na slici je definisana koordinatama gornje leve tačke (oblika: (y,x)) bounding box-a (dato u datoteci), a širina visina bounding box-a su uvek 100px i 40px, respektivno (kao veličina slika u trening skupu).
#
# Dakle, ako u datoteci imamo <i>0: (48, 26)</i>, to znači da se na slici <i>data/CarData/TestImages/test-0.pgm</i> automobil nalazi na lokaciji koja je definisana bounding box-om <i>((48, 26), (48+40, 26+100)) = ((48, 26), (88, 126))</i>. Ako se u datoteci nalazi više koordinata u jednoj liniji, to znači da na toj slici ima više automobila.
# Metrika kojom se meri poklapanje istinskog bounding box-a i dobijenog bounding box-a je <i>Jaccard index</i>, odnosno <i>IoU (Intersection over Union)</i>. Dakle, za dva bounding box-a, pronaći njihov presek i uniju, a zatim prebrojati piksele u preseku i u uniji i na kraju podeliti ova dva broja:
#
# $$IoU=\frac{|A \cap B|}{|A \cup B|}$$
#
# <img src="images/iou_viz.png">
# Ukoliko je poklapanje potpuno, vrednost <i>IoU</i> će biti 1, ako poklapanja nema biće 0, u slučaju delimičnog poklapanja biće neda vrednost između 0 i 1. Vrednosti veće od 0.5 (tj. **IoU > 0.5**) se generalno smatraju "dobrom" predikcijom.
# <img src="images/iou_examples.png">
# ### Primer:
#
# Iskoristićemo HOG i SVM za detektovanje automobila na slici iz testnog skupa podataka.
#
#
# Prvo ćemo implementirati metodu koja će računati HOG deskriptor i *confidence score* SVM klasifikatora za prosleđeni prozor.
def classify_window(window):
features = hog.compute(window).reshape(1, -1)
return clf_svm.predict_proba(features)[0][1]
# Sada implementiramo metodu koja će "obilaziti" sliku pomoću klizećeg/kliznog prozora i "pronalaziti" automobile.
def process_image(image, step_size, window_size=(100, 40)):
best_score = 0
best_window = None
for y in range(0, image.shape[0], step_size):
for x in range(0, image.shape[1], step_size):
this_window = (y, x) # zbog formata rezultata
window = image[y:y+window_size[1], x:x+window_size[0]]
if window.shape == (window_size[1], window_size[0]):
score = classify_window(window)
if score > best_score:
best_score = score
best_window = this_window
return best_score, best_window
# Ostalo je još da implementiramo metodu koja će računati *Jaccard index* za dva prosleđena bounding box-a.
def jaccard_index(true_box, predicted_box):
y_a = max(true_box[0], predicted_box[0])
x_a = max(true_box[1], predicted_box[1])
y_b = min(true_box[2], predicted_box[2])
x_b = min(true_box[3], predicted_box[3])
inter_area = max(0, x_b - x_a + 1) * max(0, y_b - y_a + 1)
true_area = (true_box[3] - true_box[1] + 1) * (true_box[2] - true_box[0] + 1)
pred_area = (predicted_box[3] - predicted_box[1] + 1) * (predicted_box[2] - predicted_box[0] + 1)
iou = inter_area / float(true_area + pred_area - inter_area)
return max(iou, 0)
# Sada ćemo iskombinovati implementirane metode kako bismo videli ostvareni rezultat.
score, score_window = process_image(itest, step_size=10)
print(score)
print(score_window)
jaccard_index([48, 26, 88, 126], [50, 20, 90, 120])
# ## Zadaci
#
# ### Zadatak 1
#
# Proveriti dobijene rezultate za sve slike iz testnog skupa sa tačnim lokacijama automobila.
#
# Za slike sa više automobila detaljnije istražiti **Non-Maximum Suppresion** kada se radi detekcija više objekata.
#
# ### Zadatak 2 - Konji
#
# U <b>data/HorsesData/</b> se nalazi modifikovan <b>INRIA horse dataset</b> za detekciju konja na slici. Sve slike su u boji, i različitih su dimenzija.
#
# Skup podataka je modifikovan tako da:
#
# * folder <b>data/HorsesData/neg</b> sadrži 170 slika negativnog uzorka (nije konj).
# * folder <b>data/HorsesData/pos</b> sadrži 120 slika pozitivnog uzorka (jeste konj) sa pripadajućim <b>groundtruth</b> datotekama sa tačnim bounding box-om konja na slici.
# * folder <b>data/HorsesData/test</b> sadrži 50 testnih slika sa konjima i pripadajućim <b>groundtruth</b> datotekama.
#
# <b>Groundtruth</b> datoteke su imenovane po šablonu: <i><b>imeSlike</b>__entires.groundtruth</i>.
# Koordinate rezultujućeg bounding box-a u <b>groundtruth</b> datotekama su date u sledećem formatu:
#
# <i>top_left_x top_left_y bottom_right_x bottom_right_y</i>
#
# Prilikom testiranja, za poređenje bounding box-ova koristiti <i>Jaccard index</i>.
| v3-hog/sc-siit-v3-hog.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## [Word2vec](https://code.google.com/archive/p/word2vec/) model
# Download here : https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit
# +
# Loading the model with gensim
from gensim.models import KeyedVectors
import os
MODEL_PATH = '/home/b/Downloads/GoogleNews-vectors-negative300.bin.gz'
if not os.path.exists(MODEL_PATH):
raise ValueError("SKIP: You need to download the google news model")
model = KeyedVectors.load_word2vec_format(MODEL_PATH, binary=True)
# +
sentence_obama = 'Obama speaks to the media in Illinois'
sentence_president = 'The president greets the press in Chicago'
sentence_obama = sentence_obama.lower().split()
sentence_president = sentence_president.lower().split()
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
sentence_obama = [w for w in sentence_obama if w not in stop_words]
sentence_president = [w for w in sentence_president if w not in stop_words]
model.wmdistance(sentence_obama, sentence_president)
| .ipynb_checkpoints/wmd_experiments-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import re
import os
os.chdir('/Users/sammitranade/mscds/diss/TEDLIUM_release1/test/stm/')
directory = os.fsencode("/Users/sammitranade/mscds/diss/TEDLIUM_release1/test/stm")
for file in os.listdir(directory):
org_file = os.fsdecode(file)
filename, file_ext = os.path.splitext(org_file)
print(filename)
if file_ext == '.stm':
sorted_data = sorted(open(os.path.join("/Users/sammitranade/mscds/diss/TEDLIUM_release1/test/stm",
org_file)).readlines(), key=lambda line: float(line.split(' ')[3]))
sorted_data = [s[s.find(">")+2:] for s in sorted_data]
sorted_data = [re.sub("[\(\[].*?[\)\]]", "", s) for s in sorted_data]
sorted_data = [s.replace('{NOISE}', '').replace('{COUGH}', '').replace('uh', '').replace('um', '').replace('{SMACK}', '').replace('{HUM}', '').replace('{BREATH}', '').replace('<sil>', '').replace(" '", "'").replace(" ", " ") for s in sorted_data]
sorted_data = [s.replace(" ", " ") for s in sorted_data]
# sorted_data = [s.replace('ignore_time_segment_in_scoring', '') for s in sorted_data]
sorted_data = [ x for x in sorted_data if "ignore_time_segment_in_scoring" not in x ]
# ignore_time_segment_in_scoring
sorted_data = [s.strip() for s in sorted_data]
print("pre-processing done")
with open("/Users/sammitranade/mscds/diss/TEDLIUM_release1/test/updated-test/{0}.txt".format(filename), "w") as outfile:
outfile.write("".join(sorted_data))
| pre-processing-TED-LIUM/diss-TED-TR-preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="p1Vc5jmW_zk2"
# # Import modules
# + id="nrLqvWo996e-"
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sys, os, re, csv, codecs, numpy as np, pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Reshape, Conv2D, Concatenate, Flatten
from keras.layers import Bidirectional, GlobalMaxPool1D, SpatialDropout1D, MaxPool2D, BatchNormalization, Wrapper, InputSpec, TimeDistributed, concatenate
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import seaborn as sns
import numpy as np
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import train_test_split, KFold, StratifiedShuffleSplit, StratifiedKFold
from sklearn.metrics import (
accuracy_score,
recall_score,
precision_score,
f1_score, roc_curve, auc)
from keras.callbacks import EarlyStopping, ModelCheckpoint
early_stopping = EarlyStopping(monitor='val_loss', patience=4,restore_best_weights=True,verbose=0)
callbacks=[early_stopping]
from keras import backend as K
import tensorflow as tf
import keras
import pandas as pd
from tensorflow.keras.optimizers import Adam
from gensim.models import KeyedVectors
import string
# + [markdown] id="PefyF2CF_8ij"
# # Load data
# + id="P9unlGAmAyZT"
# !git clone https://github.com/anonymoususr12/MHPurf
# + id="0rNBGWKJ9cyn"
# %cd MHPurf/data
project = 'tensorflow' # select in [tensorflow, pytorch, keras, incubator-mxnet, caffe]
path = f'{project}.csv'
pd_all = pd.read_csv(path)
pd_all = pd_all.sample(frac=1,random_state=999)
pd_all['Title+Body'] = ''
for idx in range(len(pd_all)):
if pd.notna(pd_all['Body'].iloc[idx]):
pd_all['Title+Body'].iloc[idx] = pd_all['Title'].iloc[idx] + '. ' + pd_all['Body'].iloc[idx]
else:
pd_all['Title+Body'].iloc[idx] = pd_all['Title'].iloc[idx]
pd_title = pd_all.rename(columns={"Unnamed: 0":"id","class":"sentiment","Title":"text"})
pd_title.to_csv('Title.csv', index=False, columns=["id","Number","sentiment","text"], mode="w")
pd_body = pd_all.rename(columns={"Unnamed: 0":"id","class":"sentiment","Body":"text"})
pd_body.to_csv('Body.csv', index=False, columns=["id","Number","sentiment","text"], mode="w")
pd_label = pd_all.rename(columns={"Unnamed: 0":"id","class":"sentiment","Labels":"text"})
pd_label.to_csv('Labels.csv', index=False, columns=["id","Number","sentiment","text"], mode="w")
pd_code = pd_all.rename(columns={"Unnamed: 0":"id","class":"sentiment","Codes":"text"})
pd_code.to_csv('Codes.csv', index=False, columns=["id","Number","sentiment","text"], mode="w")
pd_comment = pd_all.rename(columns={"Unnamed: 0":"id","class":"sentiment","Comments":"text"})
pd_comment.to_csv('Comments.csv', index=False, columns=["id","Number","sentiment","text"], mode="w")
pd_command = pd_all.rename(columns={"Unnamed: 0":"id","class":"sentiment","Commands":"text"})
pd_command.to_csv('Command.csv', index=False, columns=["id","Number","sentiment","text"], mode="w")
pd_tplusb = pd_all.rename(columns={"Unnamed: 0":"id","class":"sentiment","Title+Body":"text"})
pd_tplusb.to_csv('Title+Body.csv', index=False, columns=["id","Number","sentiment","text"], mode="w")
# + [markdown] id="E8QrdaUBhezp"
# # Tuning
# + id="ztLTnb2Phd6E"
##################### GLOBAL ###################
NETWORK = 'LSTM'
repeated_range = range(0,1)
for max_features in [1000,2500,5000,10000]:
for MAX_LEN in [250,500,1000,2000]:
embed_size = 100 # how big is each word vector
CV_TIME = 10
out_csv_name = f'../{project}_{NETWORK}_{max_features}feature_{MAX_LEN}len'
if NETWORK == 'LSTM':
out_csv_name += '_LSTM'
elif NETWORK == 'CNN':
out_csv_name += '_CNN'
working_path = 'Title+Body.csv'
data = pd.read_csv(working_path)
data = data.rename(columns={"sentiment": "target"})
#################################################
for i in range(len(data['text'])):
data['text'].iloc[i] = str(data['text'].iloc[i])
# Preprocessing
import re
# removing URLs
def remove_url(text):
url = re.compile(r'https?://\S+|www\.\S+')
return url.sub(r'', text)
data["text"] = data["text"].apply(lambda x: remove_url(x))
# removing html
def remove_html(text):
html = re.compile(r'<.*?>')
return html.sub(r'', text)
data["text"] = data["text"].apply(lambda x: remove_html(x))
# removing emoji
def remove_emoji(text):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" #emoticons
u"\U0001F300-\U0001F5FF" #symbols&pics
u"\U0001F680-\U0001F6FF" #transportation pic
u"\U0001F1E0-\U0001F1FF" #flags
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags = re.UNICODE)
return emoji_pattern.sub(r'', text)
data["text"] = data["text"].apply(lambda x: remove_emoji(x))
# Stop Word Removal
NLTK_stop_words_list = stopwords.words('english')
custom_stop_words_list = ['...']
final_stop_words_list = NLTK_stop_words_list + custom_stop_words_list
def remove_stopwords(text):
"""custom function to remove the stopwords"""
return " ".join([word for word in str(text).split() if word not in final_stop_words_list])
data["text"] = data["text"].apply(lambda text: remove_stopwords(text))
# Symbols Removal
def clean_str(string):
string = re.sub(r"[^A-Za-z0-9(),.!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
string = re.sub(r"\\", "", string)
string = re.sub(r"\'", "", string)
string = re.sub(r"\"", "", string)
return string.strip().lower()
data["text"] = data["text"].apply(lambda text: clean_str(text))
# Word2Vec Embedding
list_sentences = data["text"].fillna("").values
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(list_sentences))
list_tokenized = tokenizer.texts_to_sequences(list_sentences)
y = data["target"]
data = pad_sequences(list_tokenized, maxlen=MAX_LEN)
def load_word2vec_embeddings(filepath, tokenizer, max_features, embedding_size):
model = KeyedVectors.load_word2vec_format(filepath,limit=500000)
emb_mean, emb_std = model.wv.syn0.mean(), model.wv.syn0.std()
word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embedding_size))
for word, i in word_index.items():
if i > max_features:
continue
try:
embedding_vector = model[word]
embedding_matrix[i-1] = embedding_vector
except KeyError:
continue
return embedding_matrix
embedding_matrix = load_word2vec_embeddings("embedding/enwiki_20180420_100d.txt.bz2",
tokenizer,
max_features,
embed_size)
# Model Construction
def build_model(max_len):
if NETWORK == 'LSTM':
# LSTM begin
inp = Input(shape=(MAX_LEN,))
x = Dropout(0.1)(inp)
x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)
x = LSTM(100)(x)
x = Dense(100, activation="relu")(x)
x = Dropout(0.1)(x)
x = Dense(2, activation="softmax")(x)
if NETWORK == 'CNN':
# CNN begin
filter_sizes = [3,4,5]
num_filters = 2
maxlen = MAX_LEN
inp = Input(shape=(maxlen,))
x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)
x = SpatialDropout1D(0.2)(x)
x = Reshape((maxlen, embed_size, 1))(x)
conv_0 = Conv2D(num_filters, kernel_size=(filter_sizes[0], embed_size), kernel_initializer='normal',
activation='elu')(x)
conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[1], embed_size), kernel_initializer='normal',
activation='elu')(x)
conv_2 = Conv2D(num_filters, kernel_size=(filter_sizes[2], embed_size), kernel_initializer='normal',
activation='elu')(x)
maxpool_0 = MaxPool2D(pool_size=(maxlen - filter_sizes[0] + 1, 1))(conv_0)
maxpool_1 = MaxPool2D(pool_size=(maxlen - filter_sizes[1] + 1, 1))(conv_1)
maxpool_2 = MaxPool2D(pool_size=(maxlen - filter_sizes[2] + 1, 1))(conv_2)
z = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2])
z = Flatten()(z)
x = Dense(2, activation="softmax")(z)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# Training
outer_macro = []
outer_micro = []
outer_auc_value = []
for repeated_time in repeated_range:
cv_time = 1
macro = []
micro = []
auc_value = []
data, _, y, _ = train_test_split(data,y,test_size=0.2, random_state=0)
y = y.to_numpy()
indices = np.arange(data.shape[0])
kf = KFold(n_splits=CV_TIME, random_state=0, shuffle=True)
for train_index, test_index in kf.split(data):
y_train, y_test = y[train_index], y[test_index]
X_train, X_test = data[train_index], data[test_index]
train_y = y[train_index]
model = build_model(MAX_LEN)
y_train = to_categorical(np.asarray(y_train))
model.fit(X_train, y_train, batch_size=64, epochs=100, validation_split=0.1,callbacks=[early_stopping])
y_pred = model.predict(X_test, batch_size=1024, verbose=1).round().astype(int)
y_true = y_test
y_true = to_categorical(np.asarray(y_true))
current_macro=f1_score(y_true,y_pred,average='macro')
macro.append(current_macro)
print('\n Average Macro F1 is ',sum(macro) / len(macro), f' after CV {cv_time}')
current_micro=f1_score(y_true,y_pred,average='micro')
micro.append(current_micro)
print('\n Average Macro F1 is ',sum(micro) / len(micro), f' after CV {cv_time}')
fpr, tpr, thresholds = roc_curve(y_true[:,1], y_pred[:,1], pos_label=1)
current_auc = auc(fpr, tpr)
auc_value.append(current_auc)
print('\n Average AUC is ', sum(auc_value) / len(auc_value),
f' after CV {cv_time}')
cv_time += 1
outer_macro.append(sum(macro) / len(macro))
print('\n Average Macro F1 is ',sum(outer_macro) / len(outer_macro), f' after running time {repeated_time}')
outer_micro.append(sum(micro) / len(micro))
print('\n Average F1 is ',sum(outer_micro) / len(outer_micro), f' after running time {repeated_time}')
outer_auc_value.append(sum(auc_value)/len(auc_value))
print('\n Total AUC is ', sum(outer_auc_value) / len(outer_auc_value),
f' after running time {repeated_time}')
new_row = {'repeated_time':repeated_time,'cv_list':str(auc_value),'Macro F1':sum(macro) / len(macro), 'Micro F1':sum(micro) / len(micro), 'AUC':sum(auc_value) / len(auc_value)}
df_log = pd.DataFrame(columns=['repeated_time','cv_list','Macro F1', 'Micro F1', 'AUC'])
df_log = df_log.append(new_row, ignore_index=True)
df_log.to_csv(out_csv_name, mode='a', header=False)
# + [markdown] id="l42hWaW-CEQU"
# # Training
# + id="XbxUTfxH9PFf"
##################### GLOBAL ###################
NETWORK = 'LSTM' # choose LSTM or CNN
REPEAT = 30
repeated_range = range(0,REPEAT)
max_features = 1000
MAX_LEN = 250
embed_size = 100 # how big is each word vector
CV_TIME = 10
out_csv_name = f'../{project}_{NETWORK}'
if NETWORK == 'LSTM':
out_csv_name += '_LSTM'
elif NETWORK == 'CNN':
out_csv_name += '_CNN'
data = pd.read_csv('Title+Body.csv')
data = data.rename(columns={"sentiment": "target"})
#################################################
for i in range(len(data['text'])):
data['text'].iloc[i] = str(data['text'].iloc[i])
# Preprocessing
import re
# removing URLs
def remove_url(text):
url = re.compile(r'https?://\S+|www\.\S+')
return url.sub(r'', text)
data["text"] = data["text"].apply(lambda x: remove_url(x))
# removing html
def remove_html(text):
html = re.compile(r'<.*?>')
return html.sub(r'', text)
data["text"] = data["text"].apply(lambda x: remove_html(x))
# removing emoji
def remove_emoji(text):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" #emoticons
u"\U0001F300-\U0001F5FF" #symbols&pics
u"\U0001F680-\U0001F6FF" #transportation pic
u"\U0001F1E0-\U0001F1FF" #flags
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags = re.UNICODE)
return emoji_pattern.sub(r'', text)
data["text"] = data["text"].apply(lambda x: remove_emoji(x))
# Stop Word Removal
NLTK_stop_words_list = stopwords.words('english')
custom_stop_words_list = ['...']
final_stop_words_list = NLTK_stop_words_list + custom_stop_words_list
def remove_stopwords(text):
"""custom function to remove the stopwords"""
return " ".join([word for word in str(text).split() if word not in final_stop_words_list])
data["text"] = data["text"].apply(lambda text: remove_stopwords(text))
# Symbol removal
def clean_str(string):
string = re.sub(r"[^A-Za-z0-9(),.!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
string = re.sub(r"\\", "", string)
string = re.sub(r"\'", "", string)
string = re.sub(r"\"", "", string)
return string.strip().lower()
data["text"] = data["text"].apply(lambda text: clean_str(text))
# Word2Vec Embedding
list_sentences = data["text"].fillna("").values
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(list_sentences))
list_tokenized = tokenizer.texts_to_sequences(list_sentences)
y = data["target"]
data = pad_sequences(list_tokenized, maxlen=MAX_LEN)
def load_word2vec_embeddings(filepath, tokenizer, max_features, embedding_size):
model = KeyedVectors.load_word2vec_format(filepath,limit=500000)
emb_mean, emb_std = model.wv.syn0.mean(), model.wv.syn0.std()
word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embedding_size))
for word, i in word_index.items():
if i > max_features:
continue
try:
embedding_vector = model[word]
embedding_matrix[i-1] = embedding_vector
except KeyError:
continue
return embedding_matrix
embedding_matrix = load_word2vec_embeddings("embedding/enwiki_20180420_100d.txt.bz2",
tokenizer,
max_features,
embed_size)
# Model Construction
def build_model(max_len):
if NETWORK == 'LSTM':
# LSTM begin
inp = Input(shape=(MAX_LEN,))
x = Dropout(0.1)(inp)
x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)
x = LSTM(100)(x)
x = Dense(100, activation="relu")(x)
x = Dropout(0.1)(x)
x = Dense(2, activation="softmax")(x)
if NETWORK == 'CNN':
# cPur (CNN) begin
filter_sizes = [3,4,5]
num_filters = 2
maxlen = MAX_LEN
inp = Input(shape=(maxlen,))
x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)
x = SpatialDropout1D(0.2)(x)
x = Reshape((maxlen, embed_size, 1))(x)
conv_0 = Conv2D(num_filters, kernel_size=(filter_sizes[0], embed_size), kernel_initializer='normal',
activation='elu')(x)
conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[1], embed_size), kernel_initializer='normal',
activation='elu')(x)
conv_2 = Conv2D(num_filters, kernel_size=(filter_sizes[2], embed_size), kernel_initializer='normal',
activation='elu')(x)
maxpool_0 = MaxPool2D(pool_size=(maxlen - filter_sizes[0] + 1, 1))(conv_0)
maxpool_1 = MaxPool2D(pool_size=(maxlen - filter_sizes[1] + 1, 1))(conv_1)
maxpool_2 = MaxPool2D(pool_size=(maxlen - filter_sizes[2] + 1, 1))(conv_2)
z = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2])
z = Flatten()(z)
x = Dense(2, activation="softmax")(z)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# Training
macro = []
micro = []
auc_value = []
for repeated_time in repeated_range:
cv_time = 1
X_train, X_test, y_train, y_test = train_test_split(data,y,test_size=0.2, random_state=repeated_time)
y_train = y_train.to_numpy()
y_test = y_test.to_numpy()
model = build_model(MAX_LEN)
y_train = to_categorical(np.asarray(y_train))
model.fit(X_train, y_train, batch_size=64, epochs=100, validation_split=0.1,callbacks=[early_stopping])
y_pred = model.predict(X_test, batch_size=1024, verbose=1).round().astype(int)
y_true = y_test
y_true = to_categorical(np.asarray(y_true))
current_macro=f1_score(y_true,y_pred,average='macro')
macro.append(current_macro)
current_micro=f1_score(y_true,y_pred,average='micro')
micro.append(current_micro)
fpr, tpr, thresholds = roc_curve(y_true[:,1], y_pred[:,1], pos_label=1)
current_auc = auc(fpr, tpr)
auc_value.append(current_auc)
new_row = {'repeated_times':REPEAT,'cv_list':str(auc_value),'Macro F1':sum(macro) / len(macro), 'Micro F1':sum(micro) / len(micro), 'AUC':sum(auc_value) / len(auc_value)}
df_log = pd.DataFrame(columns=['repeated_time','cv_list','Macro F1', 'Micro F1', 'AUC'])
df_log = df_log.append(new_row, ignore_index=True)
df_log.to_csv(out_csv_name, mode='a', header=False)
| model/LSTM&CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
from keras.optimizers import Adam
from keras.initializers import RandomNormal
from keras.models import Model, Input
from keras.layers import Conv2D, Conv2DTranspose, LeakyReLU, Activation, Concatenate
from keras.preprocessing.image import ImageDataGenerator, load_img
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
import os
import PIL
import numpy as np
from matplotlib import pyplot
# +
class BaseNeuralNetwork:
def __init__(self, name, input_shape):
self.name = name
self.model = None
self.random_normal = RandomNormal(stddev=.02)
self.input_layer = Input(shape=input_shape)
self.output_layer = None
def model_summary(self):
if self.model:
print(self.model.summary())
else:
print("{} is not built yet".format(self.name))
#candidate for more template treatment
#conv transpose or not
#leaky relu, normal relu, or none
#etc
def create_conv_block(self, input_layer, output_space, kernal_size, instance_norm=True, relu='leaky', strides=(2, 2), transpose=False):
if transpose:
conv = Conv2DTranspose(output_space,
kernal_size,
strides=strides,
padding='same',
kernel_initializer=self.random_normal)(input_layer)
else:
conv = Conv2D(output_space,
kernal_size,
strides=strides,
padding='same',
kernel_initializer=self.random_normal)(input_layer)
if instance_norm: conv = InstanceNormalization(axis=-1)(conv)
if relu == 'leaky': conv = LeakyReLU(alpha=.2)(conv)
if relu == 'normal': conv = Activation('relu')(conv)
return conv
class Discriminator(BaseNeuralNetwork):
def __init__(self, name, input_shape):
super().__init__(name, input_shape)
def paper_build(self):
c = self.create_conv_block(self.input_layer, 64, (4, 4), instance_norm=False)
c = self.create_conv_block(c, 128, (4, 4))
c = self.create_conv_block(c, 256, (4, 4))
c = self.create_conv_block(c, 512, (4, 4))
c = self.create_conv_block(c, 512, (4, 4), strides=(1, 1))
out = self.create_conv_block(c, 1, (4, 4), instance_norm=False, relu=None, strides=(1, 1))
self.output_layer = out
def compile_model(self):
if self.output_layer is not None:
self.model = Model(self.input_layer, self.output_layer)
self.model.name = self.name
self.model.compile(loss='mse', optimizer=Adam(lr=0.0002, beta_1=0.5), loss_weights=[0.5])
else:
print('No output layers provided for {}'.format(self.name))
class Generator(BaseNeuralNetwork):
def __init__(self, name, input_shape):
super().__init__(name, input_shape)
def create_resnet_block(self, input_layer, output_space):
#first layer
res = self.create_conv_block(input_layer, output_space, (3, 3), relu='normal', strides=(1, 1))
# second convolutional layer
res = self.create_conv_block(res, output_space, (3, 3), relu=None, strides=(1, 1))
# concatenate merge channel-wise with input layer
res = Concatenate()([res, input_layer])
return res
def paper_build(self):
c = self.create_conv_block(self.input_layer, 64, (7, 7), relu='normal', strides=(1, 1))
c = self.create_conv_block(c, 128, (3, 3), relu='normal')
c = self.create_conv_block(c, 256, (3, 3), relu='normal')
for _ in range(9):
c = self.create_resnet_block(c, 256)
c = self.create_conv_block(c, 128, (3, 3), relu='normal', transpose=True)
c = self.create_conv_block(c, 64, (3, 3), relu='normal', transpose=True)
c = self.create_conv_block(c, 3, (7, 7), relu=None, strides=(1, 1))
out = Activation('tanh')(c)
self.output_layer = out
def compile_model(self):
if self.output_layer is not None:
self.model = Model(self.input_layer, self.output_layer)
self.model.name = self.name
else:
print('No output layers provided for {}'.format(self.name))
def generate_fake_samples(self, dataset, patch_shape):
X = self.model.predict(dataset)
#fake images have label of zero
y = np.zeros((len(X), patch_shape, patch_shape, 1))
return X, y
class Composite():
def __init__(self, name, g1, d1, g2, input_shape):
g_model_1 = g1.model
d_model = d1.model
g_model_2 = g2.model
#We are only training the first generator
g_model_1.trainable = True
d_model.trainable = False
g_model_2.trainable = False
#discriminator element
#generator 1 creates an image for domain 1 and discriminator 1 try to see if it was real or fake
input_gen = Input(shape=input_shape)
gen1_out = g_model_1(input_gen)
output_d = d_model(gen1_out)
#identity element
#generator 1 receives a real image from domain 1 and trys not to change it
input_id = Input(shape=input_shape)
output_id = g_model_1(input_id)
#forward cycle
#generator 2 receives an image from generator 1 and tries to change it back to domain 2
output_f = g_model_2(gen1_out)
#backward cycle
#generator 2 receives real image from domain 1 and gives it to generator 1 who tries to change it back to domain 1
gen2_out = g_model_2(input_id)
output_b = g_model_1(gen2_out)
self.model = Model([input_gen, input_id], [output_d, output_id, output_f, output_b])
def compile_model(self):
self.model.compile(loss=['mse', 'mae', 'mae', 'mae'], loss_weights=[1, 5, 10, 10], optimizer=Adam(lr=.0002, beta_1=.5))
def model_summary(self):
if self.model:
print(self.model.summary())
else:
print("{} is not built yet".format(self.name))
# -
image_shape = (256, 256, 3)
d1 = Discriminator('Impr-Discriminator', image_shape)
g1 = Generator('Impr-Generator', image_shape)
d2 = Discriminator('Photo-Discriminator', image_shape)
g2 = Generator('Photo-Generator', image_shape)
d1.paper_build()
g1.paper_build()
d2.paper_build()
g2.paper_build()
d1.compile_model()
g1.compile_model()
d2.compile_model()
g2.compile_model()
i2p = Composite('Impr to Photo', g1, d1, g2, image_shape)
p2i = Composite('Photo to Impr', g2, d2, g1, image_shape)
i2p.compile_model()
p2i.compile_model()
image_loader = ImageDataGenerator(rescale=1.0/255,
rotation_range=15,
width_shift_range=.2,
height_shift_range=.1,
zoom_range=.2,
horizontal_flip=True,
)
def load_samples(path):
all_files = os.listdir(path)
imgs = np.zeros((1300, 256, 256, 3))
for i in range(1300):
#for i, file in enumerate(all_files):
file = all_files[i]
print(i, end='\r')
image = np.array(load_img(path+'/'+ file, target_size=(256, 256)))
image = (image - 127.5) / 127.5
imgs[i] = image
#imgs.append(image)
del image
print('\nend')
return imgs
impr_imgs = load_samples('monet-paintings')
photo_imgs = load_samples('landscape-pictures')
print(photo_imgs.shape)
print(photo_imgs[0][0][0])
print(impr_imgs.shape)
print(impr_imgs[0][0][0])
np.save('monet-paintings.npy', impr_imgs)
np.save('landscape-photos.npy', photo_imgs)
for i in range(3):
pyplot.subplot(2, 3, 1 + i)
pyplot.axis('off')
pyplot.imshow(photo_imgs[i].astype('uint8'))
def generate_real_samples(dataset, n_samples, patch_shape):
ix = np.random.randint(0, dataset.shape[0], n_samples)
X = dataset[ix]
y = np.ones((n_samples, patch_shape, patch_shape, 1))
return X, y
def summarize_performance(step, g_model, trainX, name, n_samples=5):
# select a sample of input images
X_in, _ = generate_real_samples(trainX, n_samples, 0)
# generate translated images
X_out, _ = g_model.generate_fake_samples(X_in, 0)
# scale all pixels from [-1,1] to [0,1]
X_in = (X_in + 1) / 2.0
X_out = (X_out + 1) / 2.0
# plot real images
for i in range(n_samples):
pyplot.subplot(2, n_samples, 1 + i)
pyplot.axis('off')
pyplot.imshow(X_in[i])
# plot translated image
for i in range(n_samples):
pyplot.subplot(2, n_samples, 1 + n_samples + i)
pyplot.axis('off')
pyplot.imshow(X_out[i])
# save plot to file
filename1 = '%s_generated_plot_%06d.png' % (name, (step+1))
pyplot.savefig(filename1)
pyplot.close()
def update_image_pool(pool, images, max_size=50):
selected = list()
for image in images:
if len(pool) < max_size:
# stock the pool
pool.append(image)
selected.append(image)
elif random() < 0.5:
# use image, but don't add it to the pool
selected.append(image)
else:
# replace an existing image and use replaced image
ix = randint(0, len(pool))
selected.append(pool[ix])
pool[ix] = image
return np.asarray(selected)
def save_models(step, g_model_AtoB, g_model_BtoA):
# save the first generator model
filename1 = 'g_model_AtoB_%06d.h5' % (step+1)
g_model_AtoB.model.save(filename1)
# save the second generator model
filename2 = 'g_model_BtoA_%06d.h5' % (step+1)
g_model_BtoA.model.save(filename2)
print('>Saved: %s and %s' % (filename1, filename2))
# train cyclegan models
def train(d_model_A, d_model_B, g_model_AtoB, g_model_BtoA, c_model_AtoB, c_model_BtoA, dataset):
# define properties of the training run
n_epochs, n_batch, = 10, 10
# determine the output square shape of the discriminator
n_patch = d_model_A.model.output_shape[1]
# unpack dataset
trainA, trainB = dataset
# prepare image pool for fakes
poolA, poolB = list(), list()
# calculate the number of batches per training epoch
bat_per_epo = int(len(trainA) / n_batch)
# calculate the number of training iterations
n_steps = bat_per_epo * n_epochs
# manually enumerate epochs
for i in range(n_steps):
# select a batch of real samples
X_realA, y_realA = generate_real_samples(trainA, n_batch, n_patch)
X_realB, y_realB = generate_real_samples(trainB, n_batch, n_patch)
# generate a batch of fake samples
X_fakeA, y_fakeA = g_model_BtoA.generate_fake_samples(X_realB, n_patch)
X_fakeB, y_fakeB = g_model_AtoB.generate_fake_samples(X_realA, n_patch)
# update fakes from pool
X_fakeA = update_image_pool(poolA, X_fakeA)
X_fakeB = update_image_pool(poolB, X_fakeB)
# update generator B->A via adversarial and cycle loss
g_loss2, _, _, _, _ = c_model_BtoA.model.train_on_batch([X_realB, X_realA], [y_realA, X_realA, X_realB, X_realA])
# update discriminator for A -> [real/fake]
dA_loss1 = d_model_A.model.train_on_batch(X_realA, y_realA)
dA_loss2 = d_model_A.model.train_on_batch(X_fakeA, y_fakeA)
# update generator A->B via adversarial and cycle loss
g_loss1, _, _, _, _ = c_model_AtoB.model.train_on_batch([X_realA, X_realB], [y_realB, X_realB, X_realA, X_realB])
# update discriminator for B -> [real/fake]
dB_loss1 = d_model_B.model.train_on_batch(X_realB, y_realB)
dB_loss2 = d_model_B.model.train_on_batch(X_fakeB, y_fakeB)
# summarize performance
print('>%d, dA[%.3f,%.3f] dB[%.3f,%.3f] g[%.3f,%.3f]' % (i+1, dA_loss1,dA_loss2, dB_loss1,dB_loss2, g_loss1,g_loss2))
# evaluate the model performance every so often
if (i+1) % (int(bat_per_epo / 5)) == 0:
# plot A->B translation
summarize_performance(i, g_model_AtoB, trainA, 'AtoB')
# plot B->A translation
summarize_performance(i, g_model_BtoA, trainB, 'BtoA')
if (i+1) % (bat_per_epo * 5) == 0:
# save the models
save_models(i, g_model_AtoB, g_model_BtoA)
train(d1, d2, g1, g2, i2p, p2i, (impr_imgs, photo_imgs))
summarize_performance(0, g2, photo_imgs, 'BtoA')
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Predicting yield at University of California schools
#
# In our project, we wanted to work with admission data from undergraduate institutions to learn more about the admission process in a more scientific context.
#
# **Our main modelling goal for this project will be to determine the yield at an undergraduate school given information about the admitted class.** We believe it is a very interesting and practical question. Every year, during the admission season, colleges have to select students for the incoming freshmen year, but do not know how many of their offers will be accepted. If too few students accept their offers, the freshmen class will be under-enrolled, and school's resources will not be fully used. However, if too many students are admitted, the school will need to spend more resources to accommodate the unusually high number of students.
#
# Unfortunately, **admission data is legally protected, and only highly anonymized datasets are publicly available.** For this project, we decided to use the data from the University of California infocenter. The particular datasets we were interested in can be found here: https://www.universityofcalifornia.edu/infocenter/admissions-source-school. The data contains information about:
#
# - The number of applying, admitted and accepted students from each high school
# - The average GPA of applying, admitted and accepted students at each high school
# - Demographic data (students' race/ethnicity)
# - Locations of the high schools
#
# The data is sorted by year and University of California campus.
#
# We believe that the predictive power of these datasets might not be enough to accurately predict the yield (it only gives us access to very basic meta-information). Therefore, if the evaluations of our models show poor results, we are planning to use demographic information about the surveyed high schools/counties. To do that, we will most likely use the https://data.ca.gov/ repository.
# ## First look at our data
#
# Our data is split into two datasets. The first one (which we will call `gpas` in the later parts of this notebook) contains mean GPA information by:
#
# - University of California campus
# - High School
# - Year
# - Category (applied, admitted, enrolled)
#
# Whereas the second set (which we will call `counts`) contains the number of students in each of the categories *(applied, admitted, enrolled)*. The data is also grouped by:
#
# - University of California campus
# - High School
# - Year
import pandas as pd
# %matplotlib inline
import pylab as plt
import numpy as np
import scipy as sc
import scipy.stats
gpas = pd.read_csv('data/FR_GPA_by_Inst_data_converted.csv')
counts = pd.read_csv('data/HS_by_Year_data_converted.csv')
# After we have loaded our data, we will display the first few rows in each dataset.
gpas.head(12)
counts.head(6)
# ## About the structure of the data
#
# Unfortunately, the datasets were given to us in a fairly uncomfortable format. Each of the rows specifies:
# - Name of the high school
# - City of the high school
# - County/State/Teritory of the high school
# - University of California campus
# - Year.
#
# However, instead of specifying the numerical data in designated columns, the datasets use the *measure name/measure value* approach. That means, that **only one numerical value is given per row.** Instead of putting multiple measurements per each row, the datasets' designers decided to create multiple copies of each row with one measurement per copy. The `Measure Names` column is used to indicate the type of the measurement in the row. The `Measure Values` column specifies the actual value of the measurement.
#
# For example, a row of type:
#
# | campus_name | school_name | avg_enrolled_gpa | avg_accepted_gpa | enrolled_student_count | accepted_student_count |
# |-------------|-------------|------------------|------------------|------------------------|------------------------|
# | Campus A | School B | 2.0 | 3.0 | 50 | 80 |
#
# Would be converted to multiple rows like:
#
# | campus_name | school_name | measurement name | measurement value |
# |-------------|-------------|------------------------|-------------------|
# | Campus A | School B | avg_enrolled_gpa | 2.0 |
# | Campus A | School B | avg_accepted_gpa | 3.0 |
# | Campus A | School B | enrolled_student_count | 50 |
# | Campus A | School B | accepted_student_count | 80 |
#
#
# Moreover, these rows have been split to two separate files, which further complicates working with the data. We are expecting, that we will need to put significant effort into the data cleaning part of the project.
#
# ## Data exploration
#
# In order to better understand the data we will be working with, we decided to perform a few data exploration tasks.
# ### Ratio of NaN fields
#
# One of the concerning properties of our datasets was the large number of `NaN` fields. In order to anonymize the data, the Unviersity of California decided to remove information about GPAs for high schools with less than 3 student datapoints, and count information for high schools with less than 5 datapoints.
#
# In this exercise, we dicided to find out the ratio of `NaN` fields to actual fields.
# +
gpas_row_count = len(gpas)
gpas_not_nan_count = gpas[~gpas['Measure Values'].isnull()]['Measure Values'].count()
gpas_nan_ratio = gpas_not_nan_count/gpas_row_count
print('Number of rows in the GPA table: ', gpas_row_count)
print('Number of valid GPA values: ', gpas_not_nan_count)
print('Ratio of valid GPA values to all values: ', gpas_nan_ratio)
# -
# Next, we repeat the same process for the `student count` data:
# +
student_num_row_count = len(counts)
student_num_not_nan_count = counts[~counts['Measure Values'].isnull()]['Measure Values'].count()
student_num_nan_ratio = student_num_not_nan_count/student_num_row_count
print('Number of rows in the student count table: ', student_num_row_count)
print('Number of valid student count values: ', student_num_not_nan_count)
print('Ratio of valid student count values to all values: ', student_num_nan_ratio)
# -
# #### Results
#
# As we can see, a large number of rows in our dataset **do not contain valid data.** We will have to properly deal with this problem while working on our data cleaning component.
# ### High school applicant GPAs
#
# We thought it would be interesting to learn which schools in our datasets sent the most qualified candidates as measured by student GPA. In order to find that information, we decided to sort the schools by their mean applicant GPA.
#
# First we will show the best schools by applicant GPA:
# +
school_gpas = gpas[gpas['Measure Names'] == 'App GPA'].\
groupby('Calculation1')['Measure Values'].\
mean()
school_gpas.sort_values(ascending=[False])[0:10]
# -
# Next we will look at the schools with lowest GPAs:
school_gpas.sort_values(ascending=[True])[0:10]
# Interestingly, **all of these schools were located in California**. This brings us to another interesting question about our dataset composition.
# ### High school location breakdown
#
# In our previous excercise we noticed that the top 10 "best" schools and top 10 "worst" schools in our dataset were located in California. In this section, we would like to learn how many of the considered schools were located:
# - in California
# - in the US but outside California
# - outside of the US
#
# In order to perform this task, we notice the following conjecture about the format of the `County/State/Territory` column in the `counts` dataset:
#
# - If the school is located in California, the column contains the county name
# - If the school is located in the US, the column contains the name of the state
# - If the school is located outside of the US, the column contains the name of the country (in all caps)
#
# First we will validate our data:
# +
# We extracted the list of California counties, and US teritories from the list of unique locations
ca_counties = ['Alameda', 'Alpine', 'Amador', 'Butte', 'Calaveras', 'Colusa', 'Contra Costa', 'Del Norte', 'El Dorado', 'Fresno', 'Glenn', 'Humboldt', 'Imperial', 'Inyo', 'Kern', 'Kings', 'Lake', 'Lassen', 'Los Angeles', 'Madera', 'Marin', 'Mariposa', 'Mendocino', 'Merced', 'Modoc', 'Mono', 'Monterey', 'Napa', 'Nevada', 'Orange', 'Placer', 'Plumas', 'Riverside', 'Sacramento', 'San Benito', 'San Bernardino', 'San Diego', 'San Francisco', 'San Joaquin', 'San Luis Obispo', 'San Mateo', 'Santa Barbara', 'Santa Clara', 'Santa Cruz', 'Shasta', 'Sierra', 'Siskiyou', 'Solano', 'Sonoma', 'Stanislaus', 'Sutter', 'Tehama', 'Trinity', 'Tulare', 'Tuolumne', 'Ventura', 'Yolo', 'Yuba']
us_states_and_territories = ['American Samoa', 'Northern Mariana Islands', 'U.S. Armed Forces –\xa0Pacific', 'U.S. Armed Forces –\xa0Europe', 'Puerto Rico', 'Guam', 'District of Columbia', 'Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California', 'Colorado', 'Connecticut', 'Delaware', 'Florida', 'Georgia', 'Hawaii', 'Idaho', 'Illinois', 'Indiana', 'Iowa', 'Kansas', 'Kentucky', 'Louisiana', 'Maine', 'Maryland', 'Massachusetts', 'Michigan', 'Minnesota', 'Mississippi', 'Missouri', 'Montana', 'Nebraska', 'Nevada', 'New Hampshire', 'New Jersey', 'New Mexico', 'New York', 'North Carolina', 'North Dakota', 'Ohio', 'Oklahoma', 'Oregon', 'Pennsylvania', 'Rhode Island', 'South Carolina', 'South Dakota', 'Tennessee', 'Texas', 'Utah', 'Vermont', 'Virginia', 'Washington', 'West Virginia', 'Wisconsin', 'Wyoming']
all_locations = list(counts['County/State/ Territory'].unique())
country_names = [l for l in all_locations
if l not in ca_counties and
l not in us_states_and_territories and
l is not np.nan]
# Sanity check - our contry_names should be in all caps:
for country_name in country_names:
assert(country_name == country_name.upper())
# -
# Next we will perform the actual calculations:
# +
total_schools = counts['Calculation1'].unique().size
california_schools = counts[counts['County/State/ Territory'].isin(ca_counties)]\
['Calculation1'].unique().size
us_non_ca_schools = counts[counts['County/State/ Territory'].isin(us_states_and_territories)]\
['Calculation1'].unique().size
foreign_schools = counts[counts['County/State/ Territory'].isin(country_names)]\
['Calculation1'].unique().size
print('Total number of schools: ', total_schools)
print('Ratio of schools in california: ', california_schools/total_schools)
print('Ratio of schools in the US (but not CA): ', us_non_ca_schools/total_schools)
print('Ratio of foreign schools: ', foreign_schools/total_schools)
# -
# ## Raw data summary
#
# To summarize, we belive our data contains very interesting information that could be helpful to predict the student yield ratio. However, due to a peculiar format of the data, we will need to put a large amount of work into data cleanup, and preprocessing. We will move on to that task in our `preprocessing.ipynb` notebook.
# ## Visualizations on the preprocessed data
#
# To show the type of information stored in our dataset, we decided to show it on a variety of different graphs.
packed = pd.read_csv('data/processed.csv')
# ### Applying vs Admitted vs Enrolled GPA
#
# We wanted to see what the differences between applying, admitted, and enrolled students' GPAs are. In order to do that, we used our `*_num` and `*_gpa` columns to properly compute the average GPA of students at the UC universities.
#
# Unsurprisingly, the applying student pool had the lowest mean GPA. Moreover, the enrolled student pool had lower GPAs than admitted students. This makes sense, since the students from the top of the accepted pool are more likely to get offers from other universities.
# +
def avg_gpa_finder(data):
d = {}
d['adm_gpa'] = (data['adm_gpa'] * data['adm_num']).sum() / (data[data['adm_gpa'].notnull()]['adm_num'].sum())
d['app_gpa'] = (data['app_gpa'] * data['app_num']).sum() / (data[data['app_gpa'].notnull()]['app_num'].sum())
d['enr_gpa'] = (data['enr_gpa'] * data['enr_num']).sum() / (data[data['enr_gpa'].notnull()]['enr_num'].sum())
return pd.Series(d, index=['adm_gpa', 'app_gpa', 'enr_gpa'])
packed.groupby(['campus']).apply(avg_gpa_finder).plot.bar()
# -
# ### Average Admitted GPA Inflation over the years
#
# We are interested in exploring how the average admitted, enrolled and applied GPAs have changed over the years. The line plots describe the trend, in which the GPA tends to increase before 2007 and suddenly drops afterwards. After 2010, the increasing trend of GPAs goes on. So, during recent years, GPA does get inflated. This suggests to us that, in order to predict the ratio between the applicants and the students who were actually enrolled, we might need to look at data in recent years.
packed.groupby(['year']).apply(avg_gpa_finder).plot.line()
# ### Admitted Students vs Enrolled Students
#
# The goal of this project is to predict the ratio between the enrolled students and the admitted students in the future. Therefore, a scatterplot between the enrolled and the admitted from the past would give us an indication of how our model needs to be built. The data regarding "Universitywide" is excluded from this plot because we are interested in each individual university.
#
# The ratio of enrolled to admitted could be a good metric for the desirability of a campus. For instance, Berkely and Santa Barbara admitted a similar amount of students, but many more students enrolled at Berkely, indicating that Berkely could be more desirable for students.
# +
def adm_enr_num(data):
d = {}
d['adm_num'] = data['adm_num'].sum()
d['enr_num'] = data['enr_num'].sum()
return pd.Series(d, index=['adm_num', 'enr_num'])
enr_adm_num_c = packed[packed['campus'] != 'Universitywide'].groupby(['campus']).apply(adm_enr_num)
x, y = enr_adm_num_c.adm_num, enr_adm_num_c.enr_num # should exclude the Universitywide data
campus_names = ['Berkeley', 'Irvine', 'Davis', 'Los Angeles', 'Merced', 'Riverside', 'San Diego',
'Santa Barbara', 'Santa Cruz']
campus_names.sort()
plt.scatter(x, y)
plt.xlabel('admitted')
plt.ylabel('enrolled')
plt.title('Number enrolled vs admitted by UC campus')
for i in range(0, len(campus_names)):
plt.annotate(campus_names[i], (x[i], y[i]))
# -
| DataExploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Funciones: Cómo encapsular código
#
# Algunas tareas que deseamos realizar requieren de muchas líneas de código, lo que dificulta su lectura. Uno de los pilares del lenguaje Python es que podamos escribir código que podamos leer y entender rápidamente.
#
# Muchas veces estas tareas no las realizaremos una única vez, sino que vamos a necesitar de ellas en el futuro o para otras aplicaciones.
# Tener que reescribirlas en cada oportunidad no solo es tedioso, sino que nos arriesgamos a cometer errores.
#
# Una solución para estos problemas es encapsular nuestro código en **funciones**, las cuales nos permiten encerrar códigos largos y/o complejos dentro de una _única y simple idea_ la que podemos utilizar o **llamar** de forma sencilla.
#
# Por ejemplo, escribamos una función que transforme temperaturas en grados Fahrenheit a grados Celsius.
def fahrenheit_to_celsius(temperature):
return 5 / 9 * (temperature - 32)
# La función nos permite convertir un valor de temperatura en Fahrenheit a Celsius
fahrenheit_to_celsius(10.1)
# O bien, varios valores dentro de un `np.array`
# +
import numpy as np
temperaturas_f = np.array([10.1, 14.6, 18.3, 20.3])
fahrenheit_to_celsius(temperaturas_f)
# -
# ¿Cuál es el _tipo_ de una función?
type(fahrenheit_to_celsius)
# > **Observaciones**
# > - Con la instrucción `def` le indicamos que vamos a definir una función.
# > - Luego indicamos el nombre de la función, que se comporta de manera similar que las variables.
# > - **Llamamos** (del inglés _call_) a la función a través de este nombre seguido por paréntesis `()`.
# > - Dentro de los paréntesis enumeramos los _argumentos_ de la función.
# > - Al igual que los _for loops_, todo lo que queremos que se ejecute dentro de la función debe ir indentado, dentro del **cuerpo** de la función.
# > - Con la instrucción `return` le indicamos a la función que finalice su tarea y que _devuelva_ un determinado valor.
# > - Las funciones de Python **no son** necesariamente funciones matemáticas.
# Por ejemplo, la siguiente función imprime un saludo en base a mi nombre:
def saludo(nombre):
print("Hola, " + nombre + "!")
saludo("Santi")
# Como vemos, las funciones no necesariamente tienen que devolver un valor. Si no lo hacen, devuelven una variable `None` (que significa _ningún valor_).
resultado = saludo("Santi")
print(resultado)
# Las funciones también pueden devolver _varios_ valores. Por ejemplo:
# +
def potencias(x):
cuadrado = x ** 2
cubo = x ** 3
return cuadrado, cubo
potencias(2)
# -
# Al devolver múltiples valores, las funciones los agrupan en **tuplas**.
# ## Ya conocemos algunas funciones...
#
# Sin saberlo, ya veníamos utilizando funciones desde la primer clase:
# - `print()`, `type()`, `len()`
# - las de `numpy`, como `np.array()`, `np.linspace()`, `np.min()`, `np.max()`, etc
# - las de `matplotlib.pyplot`, como `plt.plot()`, `plt.show()`, `plt.title()`.
#
# Estas funciones fueron definidas por los desarrolladores de las librerías de la misma forma que lo estamos haciendo nosotoros.
# ## Buenas Prácticas: Documentación
#
# Muchas veces escribimos funciones con la intención de reutilizarlas en el futuro. Es por eso que es importante agregarles una documentación detallando qué realiza la función, si hay que tener cuidados especiales, etc.
def fahrenheit_to_celsius(temperature):
"""
Convierte temperaturas en grados Fahrenheit a grados Celsius
Parameters
----------
temperature: float or array
Temperatura en grados Fahrenheit a convertir.
Returns
-------
temperature_c: float or array
Temperatura en grados Celsius.
"""
return 5 / 9 * (temperature - 32)
# ## Argumentos (o parámetros)
#
# Los argumentos son aquellos valores que la función toma como entrada para llevar a cabo sus tareas.
# Una función puede tener uno o múltiples argumentos:
# +
def suma_de_cuadrados(x, y):
return x ** 2 + y ** 2
suma_de_cuadrados(2, 3)
# -
# Existen diferentes tipos de argumentos para una función. Los que vimos hasta ahora son argumentos **posicionales**, que son _obligatorios_ y sus valores son asignados por su posición. Es decir, la función espera que le pasemos valores para esos argumentos cada vez que la llamamos y asigna los valores a variables internas en función de la posición en la que los pasamos.
#
# ¿Qué pasa si no le pasamos argumentos a una función con un argumento posicional?
# +
def saludo(nombre):
print("Hola", nombre)
saludo()
# -
# ¿Y qué pasa si alteramos el orden?
# +
def division(y, x):
return y / x
print(division(4, 12))
print(division(12, 4))
# -
# Sin embargo existen otro tipo de argumentos, los argumentos **default**.
# Estos son opcionales, pero al no especificarlos asumen un valor por default. Podemos identificarlos ya que en la definición de las funciones están acompañados de un `=` y un valor por defecto.
#
# Por ejemplo:
def seno(x, omega=1):
return np.sin(omega * x)
seno(np.pi / 4)
seno(np.pi / 4, omega=2)
# +
import matplotlib.pyplot as plt
abscisas = np.linspace(0, 2 * np.pi, 100)
plt.plot(abscisas, seno(abscisas))
plt.plot(abscisas, seno(abscisas, omega=2))
plt.show()
# -
# # Ejercicio
# En el notebook anterior graficamos repetidas veces una función gaussiana centrada en cero modificando el parámetro sigma:
#
# $$ g(x) = \frac{1}{\sigma\sqrt{2\pi}} e^{ -\frac{1}{2}\left(\frac{x}{\sigma}\right)^2 }. $$
#
# Esta tarea podría haber sido más fácil si hubieramos **encapsulado** la función gaussiana dentro de una **función** de Python.
# Además, definir funciones para realizar cálculos en los que _no nos queremos equivocar_ nos ayuda a prevenir errores. Especialmente si creamos una función con buena documentación que podemos usar en el futuro.
# Estos hábitos (entre otros) forman parte de lo que se conoce como **buenas prácticas** para el desarrollo de software.
#
# 1. Escribir una función que evalúe la función Gaussiana y devuelva su valor.
# 2. Dicha función debe poder admitir un valor de $\sigma$ como argumento, asumiéndolo por defecto igual a 1.
# 3. Añadir una documentación detallada de la función gaussiana. ¿Qué hace la función? ¿Qué argumentos admite? ¿Qué valores devuelve?
# 4. Seleccionar al menos tres valores diferentes de $\sigma$ y graficar las gaussianas usando un _for loop_.
#
# **Bonus track**
#
# La expresión generalizada de la función gaussiana es:
#
# $$ g(x) = \frac{1}{\sigma\sqrt{2\pi}} e^{ -\frac{1}{2}\left(\frac{x - \mu}{\sigma}\right)^2 }. $$
#
# Donde $\mu$ indica la abscisa correpondiente al pico de la función.
#
# 1. Generalice la función anterior para que admita un argumento adicional `mu`, con valor por defecto igual a 0.
# 2. Actualice la documentación de la función para el nuevo valor.
# 3. Seleccione al menos tres valores diferentes de $\mu$ y grafique las gaussianas para un mismo valor de $\sigma$.
# ### Resolución
# +
import numpy as np
import matplotlib.pyplot as plt
def gaussiana(x, sigma=1):
"""
Función gaussiana centrada en cero
Parameters
----------
x : float or array
Valores de las abcisas en las que la función gaussiana será evaluada.
sigma : float (optional)
Desviación estándar. Controla el ancho de la campana de Gauss.
Default to 1.
Returns
-------
y : float or array
Valores de la función gaussiana evaluada en ``x``.
"""
return 1 / sigma * 1 / np.sqrt(2 * np.pi) * np.exp(-0.5 * (x / sigma) ** 2)
x = np.linspace(-4, 4, 100)
sigmas = [0.5, 1, 2]
for sigma in sigmas:
label = "sigma = " + str(sigma)
plt.plot(x, gaussiana(x, sigma=sigma), label=label)
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.show()
# -
# Generalicemos la función gaussiana incluyendo el parámetro $\mu$.
# +
import numpy as np
import matplotlib.pyplot as plt
def gaussiana(x, sigma=1, mu=0):
"""
Función gaussiana
Parameters
----------
x : float or array
Valores de las abscisas en las que la función gaussiana será evaluada.
sigma : float (optional)
Desviación estándar. Controla el ancho de la campana de Gauss.
Default to 1.
mu : float (optional)
Abscisa correspondiente al pico de la función.
Default to 0.
Returns
-------
y : float or array
Valores de la función gaussiana evaluada en ``x``.
"""
return 1 / sigma * 1 / np.sqrt(2 * np.pi) * np.exp(-0.5 * ((x - mu) / sigma) ** 2)
x = np.linspace(-4, 4, 100)
mus = [1, 0, -1]
for mu in mus:
label = "mu = " + str(mu)
plt.plot(x, gaussiana(x, mu=mu), label=label)
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.show()
| notebooks/pendientes/05-funciones.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %run "C:\Users\neoSTORM6\Documents\ImageAnalysis3\required_files\Startup_py3.py"
sys.path.append(r"C:\Users\neoSTORM6\Documents")
import ImageAnalysis3 as ia
# %matplotlib notebook
from ImageAnalysis3 import *
print(os.getpid())
# -
image_folder = r'D:\Pu\20211115-P_brain_CTP11-500_DNA_only\Tests'
image_basenames = [_fl for _fl in os.listdir(image_folder) if _fl.split(os.extsep)[-1] =='dax']
print(image_basenames)
# +
image_filename = os.path.join(image_folder, image_basenames[-1])
im1 = ia.visual_tools.batch_load_dax(image_filename)
# -
#split by channel
image_channels = [750,647,488,405]
splitted_ims1 = ia.io_tools.load.split_im_by_channels(im1,
image_channels,
image_channels,
num_buffer_frames=0,
single_im_size=[60,2048,2048])
splitted_ims1[0].shape
# %matplotlib notebook
ia.visual_tools.imshow_mark_3d_v2(splitted_ims1, dpi=150, image_names=image_channels)
# +
image_filename = os.path.join(image_folder, image_basenames[-1])
im2 = ia.visual_tools.batch_load_dax(image_filename)
#split by channel
image_channels = [750,647,488,405]
splitted_ims2 = ia.io_tools.load.split_im_by_channels(im2, image_channels,image_channels,num_buffer_frames=0, single_im_size=[50,2048,2048])
# -
# %matplotlib notebook
ia.visual_tools.imshow_mark_3d_v2(splitted_ims2, dpi=150)
| jupyter/Quick_visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Diabetes prediction
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from random import randint
from sklearn.metrics import classification_report,confusion_matrix
def load_dataset():
url = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-diabetes.csv'
df = read_csv(url, header=None,names=['Number of times pregnant','Plasma glucose concentration a 2 hours in an oral glucose tolerance test',
'Diastolic blood pressure (mm Hg)','Triceps skin fold thickness (mm)','2-Hour serum insulin (mu U/ml)','Body mass index (weight in kg/(height in m)^2)',
'Diabetes pedigree function','Age (years)','Outcome'])
data = df.values
return data[:, :-1], data[:, -1]
def load_dataset2():
url = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-diabetes.csv'
df = read_csv(url, header=None,names=['Number of times pregnant','Plasma glucose concentration a 2 hours in an oral glucose tolerance test',
'Diastolic blood pressure (mm Hg)','Triceps skin fold thickness (mm)','2-Hour serum insulin (mu U/ml)','Body mass index (weight in kg/(height in m)^2)',
'Diabetes pedigree function','Age (years)','Outcome'])
return df
n=load_dataset2()
n
n.columns
n[['Age (years)']]=n[['Age (years)']]+5
n.drop(['Outcome'], axis=1)
data=load_dataset()
data
for i in range(len(data)):
data[0][i][7]=data[0][i][7]+5
data[0][0]
data[0][1]
# # Hill Climbing Algorithm
# +
# evaluate a set of predictions
def evaluate_predictions(y_test, yhat):
return accuracy_score(y_test, yhat)
# +
# create a random set of predictions
def random_predictions(n_examples):
return [randint(0, 1) for _ in range(n_examples)]
# +
# modify the current set of predictions
def modify_predictions(current, n_changes=1):
updated = current.copy() # copy current solution
for i in range(n_changes):
ix = randint(0, len(updated)-1) # select a point to change
updated[ix] = 1 - updated[ix] # flip the class label
return updated
# +
# run hill climb for a set of predictions
def hill_climb_testset(X_test, y_test, max_iterations):
scores = list()
solution = random_predictions(X_test.shape[0])
score = evaluate_predictions(y_test, solution)
scores.append(score)
for i in range(max_iterations):
scores.append(score)
if score == 1.0:
break
candidate = modify_predictions(solution)
value = evaluate_predictions(y_test, candidate)
if value >= score:
solution, score = candidate, value
print('>%d, score=%.3f' % (i, score))
return solution, scores
# -
X, y = data
print(X.shape, y.shape)
X
# split dataset into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# run hill climb
yhat, scores = hill_climb_testset(X_test, y_test, 5000)
# +
# plot the scores vs iterations
plt.plot(scores)
plt.show()
# -
# # KNN
from sklearn.neighbors import KNeighborsClassifier
knn=KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
print('Accuracy of on training set: {:.2f}'.format(knn.score(X_train, y_train)))
print('Accuracy of on test set: {:.2f}'.format(knn.score(X_test, y_test)))
y_pred=knn.predict(X_test)
y_pred
from sklearn.metrics import classification_report,confusion_matrix
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test,y_pred))
# # Decision tree
from sklearn.tree import DecisionTreeClassifier
# +
dt = DecisionTreeClassifier()
# Train Decision Tree Classifer
dt = dt.fit(X_train,y_train)
#Predict the response for test dataset
y_pred = dt.predict(X_test)
y_pred
# -
print('Accuracy of on training set: {:.2f}'.format(dt.score(X_train, y_train)))
print('Accuracy of on test set: {:.2f}'.format(dt.score(X_test, y_test)))
from sklearn import metrics
print("DT score:",metrics.accuracy_score(y_test, y_pred))
from sklearn.metrics import classification_report,confusion_matrix
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test,y_pred))
# # Random Forest
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_validate
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# +
#Split the data randomly into two. One for training the model, one for testing
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= .5)
# -
#Make and fit the random forest to the training data
randForest = RandomForestClassifier(n_estimators=100, random_state=0)
randForest.fit(X_train, y_train)
print("Accuracy on training set: {:.3f}".format(randForest.score(X_train, y_train)))
print("Accuracy on test set: {:.3f}".format(randForest.score(X_test, y_test)))
# # Support Vector Machine
from sklearn.svm import SVC
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.fit_transform(X_test)
svc = SVC()
svc.fit(X_train_scaled, y_train)
print("Accuracy on training set: {:.2f}".format(svc.score(X_train, y_train)))
print("Accuracy on test set: {:.2f}".format(svc.score(X_test, y_test)))
# # Gradient Boosting
#
from sklearn.ensemble import GradientBoostingClassifier
gb = GradientBoostingClassifier(random_state=0)
gb.fit(X_train, y_train)
print("Accuracy on training set: {:.3f}".format(gb.score(X_train, y_train)))
print("Accuracy on test set: {:.3f}".format(gb.score(X_test, y_test)))
# # Predict on new data set (minimum 1 row).
data_new=pd.read_csv(r'C:\Users\ACER\Downloads\newdataset.csv')
data_new
knn.predict(data_new)
dt.predict(data_new)
randForest.predict(data_new)
gb.predict(data_new)
svc.predict(data_new)
| hill climb for diabetes_ann (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from astropy.io import ascii
from scipy import stats
from scipy.optimize import minimize,curve_fit
from scipy.stats import gaussian_kde as kde
np.random.seed(9620)
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
sns.set(style='ticks', context='talk')
plt.style.use("../paper.mplstyle")
from multiprocessing import Pool
from tqdm import tqdm
from astropy.cosmology import FlatLambdaCDM
from astropy.coordinates import SkyCoord, Galactic
from astropy import units as u
from warnings import filterwarnings
filterwarnings(action='ignore')
# -
# # (0) import data & prepare functions
# import Galaxy data
df_SDSS = pd.read_csv('data/DR8_GZoo+Granada+Portsmouth_cut.csv').set_index('specobjid')
df_MANGA = pd.read_csv('data/MaNGA_combined_v01.csv').set_index('mangaid')
df_CALIFA = pd.read_csv('data/CALIFA_ttype.csv').set_index('califa_id')
# import SN data
data2 = pd.read_csv('data/A2_rev1.csv').set_index('ID')
# + code_folding=[0]
def get_dist(df,type_range=(),attr='AGE_granada',ttype_attr='gz_ttype',
gaussian_error=None,slope=None,Nsample=100000,isCALIFA=False):
'''
returns a calculated y-values of the kde curve
inputs:
type_range: a touple of lower and upper bounds in the types, inclusive
# note: for gaussian error to work, you need the 'slope' value.
'''
# select binning (e.g. 'early-type')
idx_lower = np.where(np.asarray(types)==type_range[0])[0][0]
idx_upper = np.where(np.asarray(types)==type_range[1])[0][0]
indices = np.arange(idx_lower,idx_upper+1)
# create weighted disribution
Ntotal = 0
y_th = np.zeros(len(x_th))
for i in indices:
if isCALIFA:
cut = df[ttype_attr]==CALIFA_ttypes[i]
if cut.sum()==0:
print('error: cannot find ttypes in CALIFA')
return 1
else:
T_lower = T_center[i]-T_ranges[i]
T_upper = T_center[i]+T_ranges[i]
cut = (df[ttype_attr]>T_lower) & (df[ttype_attr]<T_upper)
Ntotal += N_types_A2[i]
try:
y_th += N_types_A2[i] * kde(df[cut][attr].values)(x_th)
except:
continue
# normalize
y_th /= y_th.max()
# gaussian error
if gaussian_error == None:
return y_th
else:
if slope == None:
print('error: slope needs to be given')
return 1
HR = slope*x_th
samples = np.random.choice(HR,Nsample,p=y_th/(y_th.sum()))
samples_b = [np.random.normal(x,gaussian_error) for x in samples]
y_th_b = kde(samples_b)(HR)
y_th_b = y_th_b / y_th_b.max()
mean = np.mean(samples_b)
return y_th, y_th_b, mean
# + code_folding=[0]
def project_slope(df,slope,slope_err,
early_range,late_range,attr,ttype_attr,Nsample=100000,scatter_size=None,isCALIFA=False):
'''
projects the galaxy property distribution with specific t-type ranges onto HR space,
using a SN Ia luminosity -- galaxy property slope.
*** returned data axes ***
0: slope (min,most_probable,max)
1: population bin (early or late)
2: dist, broadened dist, mean of dist
(e.g. results_list[1][0][2] returns
the mean of early population in HR space
predicted by most probable slope)
'''
slope_max = slope-slope_err
slope_min = slope+slope_err
results_list = []
for slope_val in tqdm([slope_min,slope,slope_max]):
y_early, y_early_b, mean_early = get_dist(df,
type_range=early_range, attr=attr, ttype_attr=ttype_attr,
gaussian_error=scatter_size, slope=slope_val, isCALIFA=isCALIFA)
y_late, y_late_b, mean_late = get_dist(df,
type_range=late_range, attr=attr, ttype_attr=ttype_attr,
gaussian_error=scatter_size, slope=slope_val, isCALIFA=isCALIFA)
results_list.append([[y_early, y_early_b, mean_early],[y_late, y_late_b, mean_late]])
return results_list
# -
# # (1) set up (slope, binning, etc.)
U20_slope = np.array([-0.029,-0.024,-0.016,-0.030,-0.034])
U20_slope_err = np.array([0.027,0.027,0.024,0.025,0.026])
mean,weight_sum = np.average(U20_slope,weights=1/U20_slope_err**2,returned=True)
mean_err = 1/np.sqrt(weight_sum/len(U20_slope))
print(mean,mean_err)
slope = mean
slope_err = mean_err
# +
# available types and counts
types = ['E','S0-','S0','S0+','S0a','Sa','Sab','Sb','Sbc','Sc','Scd','Sd','Sdm']
T_center = [-5,-3,-2,-1,0,1,2,3,4,5,6,7,8]
T_ranges = [1.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5]
CALIFA_ttypes = [-5,-2,-2,-2,1,1,1,3,4,5,5,7,7]
N_types_A2 = [36,12,21,2,17,28,24,43,34,42,26,2,5]
x_th = np.linspace(-10,30,1000)
# galaxy binning
ttype_early = ('E','S0+')
ttype_late = ('Sbc','Sdm')
# +
# SN data binning
early_type = ['E', 'E?', 'E/S0', 'S0', 'S0-', 'S0⚬', 'S0+']#, 'S0a', 'Sa']
late_type = ['Sbc','Sc', 'Scd','Sd','Sdm']
# ************ do not need to touch below ***********
# binning
data2['morph_bin'] = np.nan
data2.loc[data2['morphology'].isin(early_type),'morph_bin'] = 'early'
data2.loc[data2['morphology'].isin(late_type),'morph_bin'] = 'late'
bins = np.linspace(-1,1,50)
early = data2['morph_bin']=='early'
late = data2['morph_bin']=='late'
# error sampling
Nsample = 100000
SN_early = np.array([np.random.normal(data2[early]['HR'][i],data2[early]['HR_err'][i],Nsample) for i in range(early.sum())]).flatten()
SN_late = np.array([np.random.normal(data2[late]['HR'][i],data2[late]['HR_err'][i],Nsample) for i in range(late.sum())]).flatten()
HR_err_mean = data2['HR_err'].mean()
# ***************************************************
# -
# # (2) do analysis
df_MANGA.columns
df_SDSS.columns
U20_granada = project_slope(df_SDSS,slope,slope_err,
ttype_early,ttype_late,
attr = 'LOGMASS_granada',
ttype_attr = 'gz_ttype',
scatter_size = HR_err_mean
)
U20_portsmouth = project_slope(df_SDSS,slope,slope_err,
ttype_early,ttype_late,
attr = 'LOGMASS_portsmouth',
ttype_attr = 'gz_ttype',
scatter_size = HR_err_mean
)
U20_firefly = project_slope(df_MANGA,slope,slope_err,
ttype_early,ttype_late,
attr = 'ff_mass',
ttype_attr = 'gz_ttype',
scatter_size = HR_err_mean
)
U20_pipe3d = project_slope(df_MANGA,slope,slope_err,
ttype_early,ttype_late,
attr = 'p3d_mass',
ttype_attr = 'gz_ttype',
scatter_size = HR_err_mean
)
U20_Starlight = project_slope(df_CALIFA,slope,slope_err,
ttype_early,ttype_late,
attr = 'mass',
ttype_attr = 'ttype',
scatter_size = HR_err_mean,
isCALIFA = True
)
with open('data/U20.npy', 'wb') as f:
np.save(f, np.asarray(U20_granada),allow_pickle=True)
np.save(f, np.asarray(U20_portsmouth),allow_pickle=True)
np.save(f, np.asarray(U20_firefly),allow_pickle=True)
np.save(f, np.asarray(U20_pipe3d),allow_pickle=True)
np.save(f, np.asarray(U20_Starlight),allow_pickle=True)
np.save(f, SN_early,allow_pickle=True)
np.save(f, SN_late,allow_pickle=True)
# # (4) plot results
# + code_folding=[]
def plot_results(results,x_th,slope,color='yellowgreen',plot_early=True,plot_late=False,ax=None,label=None,
plot_axvline=False,plot_errorbar=True,yscale=1,ls=':',errorbar_yloc=0.3):
# data prep
HR = x_th*slope
offset = results[1][1][2]
mean_min = results[2][0][2] - results[2][1][2]
mean_mpb = results[1][0][2] - results[1][1][2]
mean_max = results[0][0][2] - results[0][1][2]
early_dist_b = results[1][0][1]
late_dist_b = results[1][1][1]
if ax==None:
ax = plt.gca()
if plot_early:
ax.plot(HR-offset,early_dist_b*yscale,c=color,lw=3,alpha=0.8,label=label)
if plot_late:
ax.plot(HR-offset,late_dist_b*yscale,c='lightblue',lw=3,alpha=0.8,label=label)
if plot_axvline:
ax.axvline(mean_mpb,linestyle=ls,color=color,alpha=1,lw=3,zorder=10)
ax.axvspan(mean_min,mean_max,color=color,alpha=0.05)
if plot_errorbar:
err_min = mean_min-mean_mpb
err_max = mean_mpb-mean_max
ax.errorbar(mean_mpb,errorbar_yloc,xerr=[[err_min],[err_max]], fmt='o',
color=color, ecolor=color, ms=7,lw=3,capsize=7)
# +
fig, ax = plt.subplots(1,1,figsize=(12,7))
y1,_,_ = plt.hist(SN_early-SN_late.mean(),color='orange',bins=bins,density=True,alpha=0.6,label='A2 early-type',histtype=u'step',lw=4)
y2,_,_ = plt.hist(SN_late-SN_late.mean(),color='blue',bins=bins,density=True,alpha=0.4,label='A2 late-type',histtype=u'step',lw=4)
plot_results(U20_firefly,x_th,slope,yscale=y1.max(),color='pink',label='MaNGA + Firefly',ls='dotted',errorbar_yloc=1.2)
plot_results(U20_pipe3d,x_th,slope,yscale=y1.max(),color='orangered',label='MaNGA + Pipe3D',ls='dashed',errorbar_yloc=0.9)
plot_results(U20_Starlight,x_th,slope,yscale=y1.max(),color='yellowgreen',label='CALIFA + Starlight',ls=(0, (3, 1, 1, 1, 1, 1)),errorbar_yloc=0.6)
plot_results(U20_granada,x_th,slope,yscale=y1.max(),color='mediumturquoise',label='SDSS DR8 + FSPS',ls='dashdot',errorbar_yloc=0.3)
plot_results(U20_portsmouth,x_th,slope,yscale=y1.max(),color='violet',label='SDSS DR8 + FSPS',ls='dashdot',errorbar_yloc=0.1)
plt.axvline(SN_early.mean()-SN_late.mean(),linestyle='-',c='k',lw=2)
plt.axvline(0,linestyle='--',c='k',lw=2,alpha=0.3)
plt.xlabel(r'HR [mag], projected from $\log_{10}(M/M_\odot)$ with U20 slope')
plt.legend(ncol=1)
plt.xlim(-0.5,0.5)
plt.ylim(0,2.3)
plt.yticks([])
# -
# ### did they underestimate the slope? --> probably yes
# +
# set up
means = [10.5, 0]
stds = [0.3, 0.1]
corr = -0.2
covs = [[stds[0]**2 , stds[0]*stds[1]*corr],
[stds[0]*stds[1]*corr, stds[1]**2]]
# expected slope
slope = corr * (stds[1]/stds[0])
x_th = np.linspace(5,15,100)
y_th = slope*(x_th-means[0]) + means[1]
plt.figure(figsize=(8,6))
plt.plot(x_th,y_th,linestyle='--',lw=3,label='expected',zorder=100)
# repeat slope estimation
slope_est = []
newdata_mean = []
for _ in tqdm(range(500)):
m = np.random.multivariate_normal(means, covs, 300).T
# raw data
plt.scatter(m[0], m[1], s=1,c='k',alpha=0.1)
# add a few random data at tail
N_new = 10
x_add = np.random.normal(8,0.5,N_new)
y_add = np.random.normal((8-10)*slope,0.1,N_new) # assume the uncertainty is larger
plt.scatter(x_add,y_add,s=1,color='orange',alpha=0.1)
xnew = [*m[0],*x_add]
ynew = [*m[1],*y_add]
# fit to new data
popt,pcov = curve_fit(lambda x,a,b: a*x+b,xnew,ynew)
x_th = np.linspace(5,15,100)
y_th = popt[0]*x_th + popt[1]
plt.plot(x_th,y_th,color='yellowgreen',lw=1,alpha=0.1)
plt.legend()
slope_est.append(popt[0])
newdata_mean.append(np.mean(y_add))
plt.xlabel('Mass (mock data)')
plt.ylabel('HR (mock data)')
plt.xlim(7,12)
# -
plt.figure(figsize=(8,8))
h = sns.jointplot(newdata_mean,slope_est)
h.ax_joint.set_xlabel('y-mean of orange population')
h.ax_joint.set_ylabel('fitted slope (green)')
| test_U20.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:jupyter-talk]
# language: python
# name: conda-env-jupyter-talk-py
# ---
# ### Using Cython to improve code performance
# [Cython](https://cython.org/) is a Python-to-C compiler that allows you to write Python functions in C, using a Python-like syntax. The resulting functions can be used in Python just the way normal Python functions are used, and can provide a big performance boost, especially for numeric operations.
# %load_ext Cython
# Define a simple iterative function to compute Fibonacci numbers, and time its execution
# +
def python_fib(n):
a, b = 1, 1
for _ in range(n):
a, b = a + b, a
return a
# %timeit python_fib(75)
# -
# Now do the same thing in Cython, augmenting variables with type information
# + language="cython"
#
# def cython_fib(int n):
# cdef long a, b
# cdef int i
# a, b = 1, 1
# for i in range(n):
# a, b = a + b, a
# return a
# -
# %timeit cython_fib(75)
# You can call Cython functions from Python (which is the whole point)
# +
# %matplotlib inline
import pandas as pd
pd.Series([cython_fib(n) for n in range(75)]).plot(logy=True);
# -
# C's type system is lacking some Python features, so some operations will not work. For example, Python integers can be arbitrarily long:
python_fib(2000)
# Whereas C long integers have a limited range and operations outside of that range overflow:
cython_fib(2000)
# However, the performance gain is usually worth it for having to cope with some limitations.
| 4. Using Cython to improve code performance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Recursive Matrix Multiplication
# ### Set values for whether you would like to generate new results or just use the JSON that has previously been generated
# +
# Set to true if you want to run the tests again. Otherwise just loads results from JSON
GENERATE_NEW_RESULTS = False
# Set to true if you want to save figures to disk. Change path as needed
SAVE_FIGURES_TO_DISK = False
FIG_SAVE_PATH = "../../thesis/figures/matmul/"
# -
# Load in packages and set options for prettier plots
# +
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
import concurrent.futures
import json
from helper_funcs import save_fig, make_cache, MORTON, ROW_ARR, BLOCK_ARR
from data_structures.morton_order import MortonOrder
from data_structures.block_array import BlockArray
from data_structures.row_major_array import RowMajorArray
from algorithms.matmul import matmul_rec
# +
matplotlib.rcParams['figure.figsize'] = (9.0, 4.8)
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png', 'pdf')
# -
# ## Defining functions/variables to run tests
# +
def results_for_arr_and_tile_size(vals):
n, tile_size = vals
r = int(np.log2(n)) - int(np.log2(tile_size))
if r < 0:
return None, tile_size
print(f"{n} -- {tile_size}")
cs1 = make_cache()
cs2 = make_cache()
cs3 = make_cache()
rnd_vals1 = np.random.rand(n, n)
rnd_vals2 = np.random.rand(n, n)
morton1 = MortonOrder(rnd_vals1, cache=cs1)
morton2 = MortonOrder(rnd_vals2, cache=cs1, offset=morton1.get_next_offset())
block_arr1 = BlockArray(rnd_vals1, cache=cs2)
block_arr2 = BlockArray(rnd_vals2, cache=cs2, offset=block_arr1.get_next_offset())
row_arr1 = RowMajorArray(rnd_vals1, cache=cs3)
row_arr2 = RowMajorArray(rnd_vals2, cache=cs3, offset=row_arr1.get_next_offset())
# Warm up
matmul_rec(morton1, morton2, r)
matmul_rec(block_arr1, block_arr2, r)
matmul_rec(row_arr1, row_arr2, r)
# Reset caches
cs1.force_write_back()
cs1.reset_stats()
cs2.force_write_back()
cs2.reset_stats()
cs3.force_write_back()
cs3.reset_stats()
# Do actual test
matmul_rec(morton1, morton2, r)
matmul_rec(block_arr1, block_arr2, r)
matmul_rec(row_arr1, row_arr2, r)
# Save results
obj = {
MORTON: list(cs1.stats()),
BLOCK_ARR: list(cs2.stats()),
ROW_ARR: list(cs3.stats())
}
print(f"{n} -- {tile_size} finished")
return obj, tile_size
def results_for_arr_size(n):
print(n)
vals = [(n, tile_size) for tile_size in tile_sizes]
obj = {}
with concurrent.futures.ProcessPoolExecutor() as executor:
for (res, tile_size) in executor.map(results_for_arr_and_tile_size, vals):
if res is not None:
obj[tile_size] = res
return obj, n
# +
ns = [2**i for i in range(5, 10)]
tile_sizes = [2**i for i in range(2, 10)]
print("ns:", ns)
print("ts:", tile_sizes)
# -
# ## Generating/loading in test results
# +
if GENERATE_NEW_RESULTS:
results = {}
with concurrent.futures.ProcessPoolExecutor() as executor:
for (obj, n) in executor.map(results_for_arr_size, ns):
results[n] = obj
with open('results/matrix-result.json', 'w') as f:
json.dump(results, f, indent=4)
with open('results/matrix-result.json', 'r') as f:
results = json.load(f)
# -
# ## Plotting
def get_val_arr(n, typ=MORTON, cache_level=0, stat="HIT_count"):
res_arr = []
n = str(n)
for tile_size in results[n]:
val_morton = results[n][tile_size][typ][cache_level][stat]
val_arr = results[n][tile_size][ROW_ARR][cache_level][stat]
res_arr.append(
np.nan if val_arr == 0 else float(val_morton) / float(val_arr)
)
return res_arr
def make_plot(cache_level=0, stat="HIT_count"):
labels = tile_sizes
n32 = get_val_arr(32, stat=stat)
n64 = get_val_arr(64, stat=stat)
n128 = get_val_arr(128, stat=stat)
n256 = get_val_arr(256, stat=stat)
n512 = get_val_arr(512, stat=stat)
x = np.arange(len(labels)) # the label locations
width = 0.15 # the width of the bars
fig, ax = plt.subplots()
ax.bar(x[:len(n32)] - 2*width, n32, width, label='$n=32$')
ax.bar(x[:len(n64)] - 1*width, n64, width, label='$n=64$')
ax.bar(x[:len(n128)] + 0*width, n128, width, label='$n=128$')
ax.bar(x[:len(n256)] + 1*width, n256, width, label='$n=256$')
ax.bar(x[:len(n512)] + 2*width, n512, width, label='$n=512$')
# Add some text for labels, title and custom x-axis tick labels, etc.
stat_simple_label = stat.lower().replace("_", " ")
ax.set_ylabel(f'Relative {stat_simple_label}')
ax.set_xlabel('Tile size')
#ax.set_title(f'Relative {stat} of Morton Order vs. RowMajorArray')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(loc=4)
fig.tight_layout()
save_fig(f"relative-{stat}-matrix-l{cache_level + 1}.pdf", FIG_SAVE_PATH, SAVE_FIGURES_TO_DISK)
plt.show()
make_plot()
make_plot(stat="MISS_count")
# ## Sanity check:
# On an intuitive level, the plots might seem baffling at first. These numbers should help explain why there is such a big discrepancy between the two plots
# +
def print_some_stats(n, tile_size):
n, tile_size = str(n), str(tile_size)
print("Morton hit: ", results[n][tile_size][MORTON][0]["HIT_count"])
print("RowArr hit: ", results[n][tile_size][ROW_ARR][0]["HIT_count"])
print("Morton miss:", results[n][tile_size][MORTON][0]["MISS_count"])
print("RowArr miss:", results[n][tile_size][ROW_ARR][0]["MISS_count"])
print_some_stats(512, 4)
# -
n, tile_size = 512, 64
print_some_stats(n, tile_size)
# ## Cache hits and misses
# Use `tile_size` of 4 (index `0`) in the following numbers
print(ns)
for typ in [MORTON, BLOCK_ARR]:
print(typ)
for stat in ["HIT_count", "MISS_count"]:
vals = [get_val_arr(n, typ, stat=stat)[0] for n in ns]
vals_str = ' & '.join([f"{val:.2f}" for val in vals])
print(f"{stat:10}: {vals_str}")
print()
# ## Cache hit-to-miss ratio
# Use `tile_size` of 4 (index `0`) in the following numbers
# +
def get_arr(n, typ=MORTON, cache_level=0, stat="HIT_count"):
n = str(n)
return np.array([ results[n][tile_size][typ][cache_level][stat] for tile_size in results[n] ])[0]
print(f"{'':12}{ns}")
for typ in [MORTON, BLOCK_ARR, ROW_ARR]:
hits = np.array([get_arr(n, typ, stat="HIT_count") for n in ns])
misses = np.array([get_arr(n, typ, stat="MISS_count") for n in ns])
hit_to_miss = hits / misses
vals_str = ' & '.join([f"{val:.2f}" for val in hit_to_miss])
print(f"{typ:10}: {vals_str}")
# -
| src/rec-matmul.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Major version: the feature setup
# Minor version: model hypertunning
VERSION = 'v2.1'
major_VERSION = VERSION.split('.')[0]+'.0'
# # Model Details
# ## Features:
# - One hot encoded **day of week** and **month** (not year)
# - Weather feature (OHE):
# - Icons (cloudy, partial cloudy, ...)
# - Precipitates Type (None, Snow, Sleet, Rain)
# - Station info:
# - community area (OHE)
# - capacity
# - long, lat
#
# ## Target
# - Log scale/normal scale
#
# # Work Flow
# ## Training Preprocessing
# - Merge station community area (Join tables)
# - Drop id after merging
# - Add weather info (temp_high/low, and OHE ICONs and Precipitates Types)
# - Convert to numpy matrix
#
# ## Pipeline
# - OHE on date time (Remember column indices)
# - Scaling for `year, lon_ave, lat_ave, dp_max, temp_high, temp_low` (`MinMaxScaler`)
# - Regressor()
#
# ## Test Preprocessing
# - Start with Pandas template (station_id, lon_ave, lat_ave, dp_max, OHE community area)
# - Add weather info (temp_high/low, and OHE ICONs and Precipitates Types)
# - Convert to numpy matrix
#
# ## Post prediction
# - Rescale if trained on log
# - Hard cap negative (activation function)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import pickle
from tqdm import tqdm_notebook as tqdm
# ## Load data
INFO_verbose = False
# %%time
st_template = pd.read_pickle('../data/model_v1.0_template.pk')
if INFO_verbose:
st_template.info()
# +
# %%time
## load preprocessed data
if os.path.exists('../data/divvy_data_model_'+major_VERSION+'.pk'):
print("Loading from previous pickle file.")
data = pd.read_pickle('../data/divvy_data_model_'+major_VERSION+'.pk')
else:
print("Create data set for this model... ")
data_lst = []
for year in [2013, 2014, 2015, 2016, 2017, 2018]:
dt_tmp = pd.read_feather('../data/Final_Divvy_data_'+str(year)+'.feather')
data_lst.append(dt_tmp)
data = pd.concat(data_lst, ignore_index=True, sort=True) # !!! Careful with Pandas future version
data.to_pickle('../data/divvy_data_model_'+major_VERSION+'.pk')
print("Data saved to pickle file")
# -
if INFO_verbose:
data.info()
# ## Prepare Historical Data table
# - Group data by `station_id`, `month`, `year`, `total_out`
# - Calculated average of previous historical data
# - Add New station Flag
# - Fill nan with zero
# +
def _get_previous_ave(row, year):
tmp_sum = 0
tmp_count = 0
for y in range(2013, year):
if not np.isnan(row.total_out[y]):
tmp_count += 1
tmp_sum += row.total_out[y]
if tmp_count == 0:
return
return tmp_sum/tmp_count
def _check_if_new(row, year):
for y in range(2013, year):
if not np.isnan(row.total_out[y]):
return 1
return 0
# -
# %%time
if not os.path.exists('../data/historical_data.feather'):
print("Get new historical data")
historical_raw = data[['station_id', 'month', 'year', 'total_out']]\
.groupby(['station_id', 'month', 'year'])\
.mean().unstack(level=-1).reset_index().copy()
for y in tqdm([2013, 2014, 2015, 2016, 2017, 2018, 2019]):
historical_raw['hisave_'+str(y)] = historical_raw.apply(lambda row: _get_previous_ave(row, y), axis=1)
historical_raw['newstation_'+str(y)] = historical_raw.apply(lambda row: _check_if_new(row, y), axis=1)
history_df = pd.DataFrame()
for y in [2013, 2014, 2015, 2016, 2017, 2018, 2019]:
tmp = pd.DataFrame()
tmp['station_id'] = historical_raw.station_id
tmp['month'] = historical_raw.month
tmp['year'] = y
tmp['historical_ave'] = historical_raw['hisave_'+str(y)]
tmp['newstation'] = historical_raw['newstation_'+str(y)]
history_df = pd.concat([history_df, tmp], ignore_index=True, sort=False)
history_df = history_df.fillna(0)
history_df.to_feather('../data/historical_data.feather')
else:
print("Load data")
history_df = pd.read_feather('../data/historical_data.feather')
future_tmplt = history_df[history_df.year == 2019][['station_id', 'month', 'historical_ave', 'newstation']]\
.sort_values(by='month').reset_index()
future_tmplt.to_pickle('../data/future_hist.pk')
history_df.info()
# %%time
data = pd.merge(data, history_df, on=['year', 'month', 'station_id'], how='left')
if INFO_verbose:
data.info()
# ## Get target value
target_in = data.total_in
target_out = data.total_out
target_diff = data.total_out - data.historical_ave
data[data.year < 2018].year.count()
data[data.year >= 2018].year.count()
data[(data.year > 2017) & (data.month == 7)][['total_out', 'historical_ave']].mean()
# ## Prepare features
# Prescreening for useful features
feats = pd.merge(data[['station_id', 'month', 'dayofweek', 'year',
'icon_clear-day', 'icon_cloudy', 'icon_fog',
'icon_partly-cloudy-day', 'icon_rain',
'icon_sleet', 'icon_snow',
'temperatureHigh', 'temperatureLow',
'historical_ave', 'newstation',
]], st_template,
left_on='station_id', right_on='id').drop(['station_id', 'id'], axis=1)
feats.info()
# ## Reordering dataframe
# Reordering
cols = []
cols.extend(feats.columns[17:])
cols.extend(feats.columns[3:17])
cols.extend(feats.columns[:2])
feats_ro = feats[cols]
if INFO_verbose:
feats_ro.info()
feats_ro = feats_ro.fillna(0)
feats_ro.info()
# ## One hot encoding date
feats_fnl = pd.get_dummies(feats_ro, columns=['month', 'dayofweek'])
# ## Save model details into txt
# +
import io
buffer = io.StringIO()
feats_fnl.info(buf=buffer)
s = buffer.getvalue()
with open("../model_features_details/features_"+major_VERSION+'.txt', "w", encoding="utf-8") as f:
f.write(s)
# -
# ## Convert to numpy matrix
features_ = feats_fnl.to_numpy()
features_.shape
feats_fnl.info()
# ## Building pipelines
# +
import sklearn
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, max_error
# -
sklearn.__version__
# +
# %%time
model = Pipeline([
('mms', MinMaxScaler(feature_range=(0, 1))),
('rf', RandomForestRegressor(n_estimators=30, min_samples_split=5))
])
# Train Test split
split_method = "random"
if split_method == "2018":
print("Predict future")
X_train = features_[:731037]
X_test = features_[731037:]
y_train = target_out[:731037]
y_test = target_out[731037:]
elif split_method == "random":
print("Randomly split")
X_train, X_test, y_train, y_test = train_test_split(
features_,
target_out,
test_size=0.2,
random_state=42)
# -
# %%time
model.fit(X_train, y_train)
model.score(X_test, y_test)
# First check
y_pred = model.predict(X_test)
y_base = feats_ro.iloc[y_test.index]['historical_ave']
X_test.shape
print("R2 score:")
print("Model:", r2_score(y_test, y_pred))
print("Baseline:", r2_score(y_test, y_base))
print()
print("MAE:")
print("Model:", mean_absolute_error(y_test, y_pred))
print("Baseline:", mean_absolute_error(y_test, y_base))
print()
print("RMSE:")
print("Model:", np.sqrt(mean_squared_error(y_test, y_pred)))
print("Baseline:", np.sqrt(mean_squared_error(y_test, y_base)))
plt.plot(y_test, y_pred, 'o', alpha=0.01)
# plt.plot(y_test, y_base, 'or', alpha=0.01)
# Deeper Look
plt.plot(y_test, y_pred, 'o', alpha=0.01)
plt.xlim(0, 300)
plt.ylim(0, 300)
# +
# Save model, features and targets
Model_name = 'random_forest_future_'
with open('../model_data/'+Model_name+'model_'+VERSION+'.pk', 'wb') as p:
pickle.dump(model, p)
with open('../model_data/'+Model_name+'testfeature_'+VERSION+'.pk', 'wb') as ptf:
pickle.dump(X_test, ptf)
with open('../model_data/'+Model_name+'testtarget_'+VERSION+'.pk', 'wb') as ptg:
pickle.dump(y_test, ptg)
# -
# ## Grid Search
# +
# # %%time
# # Train Test split
# X_train, X_test, y_train, y_test = train_test_split(
# features_,
# target_out,
# test_size=0.2,
# random_state=42)
# print("Train/Test splitted...")
# model_gs = GridSearchCV(
# model,
# {'rf__n_estimators': [20, 50], 'rf__max_depth':[5, 10, 20]},
# cv=5,
# n_jobs=4
# )
# model_gs.fit(X_train, y_train)
# print("Best params: ", model_gs.best_params_)
# y_pred = model_gs.predict(X_test)
# plt.plot(y_test, y_pred, 'o', alpha=0.1)
# # Save model, features and targets
# Model_name = 'random_forest_'
# with open('../model_data/'+Model_name+'model_'+VERSION+'.pk', 'wb') as p:
# pickle.dump(model_gs, p)
# with open('../model_data/'+Model_name+'testfeature_'+VERSION+'.pk', 'wb') as ptf:
# pickle.dump(X_test, ptf)
# with open('../model_data/'+Model_name+'testtraget_'+VERSION+'.pk', 'wb') as ptg:
# pickle.dump(y_test, ptg)
# -
# ## Reload model
# +
# %%time
# Load previous data
Model_name = 'random_forest_more_tree_'
with open('../model_data/'+Model_name+'model_'+VERSION+'.pk', 'rb') as pp:
loaded_model = pickle.load(pp)
with open('../model_data/'+Model_name+'testfeature_'+VERSION+'.pk', 'rb') as ptfl:
loaded_test_feature = pickle.load(ptfl)
with open('../model_data/'+Model_name+'testtarget_'+VERSION+'.pk', 'rb') as ptgl:
loaded_test_target = pickle.load(ptgl)
# -
loaded_model.score(loaded_test_feature, loaded_test_target)
loaded_test_feature.shape
| machine_learning_models/ML_v2.1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
ADT-s
list array, pointer
tree BST pointer array
ADT-s
internal
array amortized insert 1,1111111,n 2 1 n, worst-case
pointer
list -search O(n),O(logN)
import ctypes # provides low-level arrays
# + code_folding=[]
class DynamicArray:
"""A dynamic array class akin to a simplified Python list."""
def getsize(self):
import sys
try:
return sys.getsizeof(self._A)
except:
return 0
def ToString(self):
try:
for i in self._A:
print(i," ")
except:
pass
def getLength(self):
return len(self._A)
def __init__(self):
"""Create an empty array."""
self._n = 0 # count actual elements
self._capacity = 1 # default array capacity
self._A = self._make_array(self._capacity) # low-level array
def _make_array(self, c): # nonpublic utitity
"""Return new array with capacity c."""
return (c * ctypes.py_object)() # see ctypes documentation
def append(self, obj):
"""Add object to end of the array."""
if self._n == self._capacity: # not enough room
self._resize(2 * self._capacity) # so double capacity
self._A[self._n] = obj
self._n += 1
def _resize(self, c): # nonpublic utitity
"""Resize internal array to capacity c."""
B = self._make_array(c) # new (bigger) array
print(" şu an amortized cost işlemi ... ")
for k in range(self._n): # for each existing value
B[k] = self._A[k]
print(" şu an move işlemi ... ")
self._A = B # use the bigger array
self._capacity = c
def __len__(self):
"""Return number of elements stored in the array."""
return self._n
def len_n(self):
"""Return number of elements stored in the array."""
return self._n
def __getitem__(self, k):
"""Return element at index k."""
if not 0 <= k < self._n:
raise IndexError('invalid index')
return self._A[k] # retrieve from array
def insert(self, k, value):
"""Insert value at index k, shifting subsequent values rightward."""
# (for simplicity, we assume 0 <= k <= n in this verion)
if self._n == self._capacity: # not enough room
self._resize(2 * self._capacity) # so double capacity
for j in range(self._n, k, -1): # shift rightmost first
self._A[j] = self._A[j-1]
self._A[k] = value # store newest element
self._n += 1
def remove(self, value):
"""Remove first occurrence of value (or raise ValueError)."""
# note: we do not consider shrinking the dynamic array in this version
for k in range(self._n):
if self._A[k] == value: # found a match!
for j in range(k, self._n - 1): # shift others to fill gap
self._A[j] = self._A[j+1]
self._A[self._n - 1] = None # help garbage collection
self._n -= 1 # we have one less item
return # exit immediately
raise ValueError('value not found') # only reached if no match
# -
c=DynamicArray()
c.getLength(), c.getsize()
c=DynamicArray()
for i in range(150):
c.append(-100)
c.getLength(), c.getsize(),c.len_n()
# +
import sys
from pympler import asizeof
s_1=sys.getsizeof(c)
s_2=asizeof.asizeof(c)
print("s_1 : {0}, s_2 : {1}".format(s_1,s_2))
# -
get_size(c)
# +
n=1000000
for i in range(n):
c.append(12)
c.append("sdfsdfsdf")
s_1 = sys.getsizeof(c)
s_2 = asizeof.asizeof(c)
print("n s_1 : {0}, s_2 : {1}".format(s_1, s_2))
# -
c._A
c=DynamicArray();
for i in range(10):
c.append(i)
# c.ToString()
print("len : {0}".format(c.getLength()),end=" ")
print("size : {0}".format(c.getsize()))
for i in range(10000):
c.append(i)
# c.ToString()
print("len : {0}".format(c.getLength()),end=" ")
print("size : {0}".format(c.getsize()))
get_size(c)
# +
def __len__(self):
"""Return number of elements stored in the array."""
return self._n
def __getitem__(self, k):
"""Return element at index k."""
if not 0 <= k < self._n:
raise IndexError('invalid index')
return self._A[k] # retrieve from array
def append(self, obj):
"""Add object to end of the array."""
if self._n == self._capacity: # not enough room
self._resize(2 * self._capacity) # so double capacity
self._A[self._n] = obj
self._n += 1
def _resize(self, c): # nonpublic utitity
"""Resize internal array to capacity c."""
B = self._make_array(c) # new (bigger) array
for k in range(self._n): # for each existing value
B[k] = self._A[k]
self._A = B # use the bigger array
self._capacity = c
def insert(self, k, value):
"""Insert value at index k, shifting subsequent values rightward."""
# (for simplicity, we assume 0 <= k <= n in this verion)
if self._n == self._capacity: # not enough room
self._resize(2 * self._capacity) # so double capacity
for j in range(self._n, k, -1): # shift rightmost first
self._A[j] = self._A[j-1]
self._A[k] = value # store newest element
self._n += 1
def remove(self, value):
"""Remove first occurrence of value (or raise ValueError)."""
# note: we do not consider shrinking the dynamic array in this version
for k in range(self._n):
if self._A[k] == value: # found a match!
for j in range(k, self._n - 1): # shift others to fill gap
self._A[j] = self._A[j+1]
self._A[self._n - 1] = None # help garbage collection
self._n -= 1 # we have one less item
return # exit immediately
raise ValueError('value not found') # only reached if no match
def testDynamicArray(n=100):
for i in range(n):
c.append(12)
c.append("sdfsdfsdf")
s_1 = sys.getsizeof(c)
s_2 = asizeof.asizeof(c)
print("n s_1 : {0}, s_2 : {1}".format(s_1, s_2))
for i in range(n*10):
c.append(12)
c.append("sdfsdfsdf")
s_1 = sys.getsizeof(c)
s_2 = asizeof.asizeof(c)
print("n*10 s_1 : {0}, s_2 : {1}".format(s_1, s_2))
for i in range(n*100):
c.append(12)
c.append("sdfsdfsdf")
s_1 = sys.getsizeof(c)
s_2 = asizeof.asizeof(c)
print("n*100 s_1 : {0}, s_2 : {1}".format(s_1, s_2))
# testDynamicArray(1000)
c=DynamicArray();
for i in range(10):
c.append(i)
c.ToString()
print("len : {0}".format(c.getLength()))
print("size : {0}".format(c.getsize()))
# +
import sys
def get_size(obj, seen=None):
"""Recursively finds size of objects"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_size(v, seen) for v in obj.values()])
size += sum([get_size(k, seen) for k in obj.keys()])
elif hasattr(obj, '__dict__'):
size += get_size(obj.__dict__, seen)
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
size += sum([get_size(i, seen) for i in obj])
return size
# -
| 012-dynamic_array_amortized_cost.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### setup notebook
# +
# widen jupyter notebook window
from IPython.core.display import display, HTML
display(HTML("<style>.container {width:100% !important; }</style>"))
# check environment
import os
print(f'Conda Environment: ' + os.environ['CONDA_DEFAULT_ENV'])
# -
# ### import core library
import sys
sys.path.append('/media/rich/Home_Linux_partition/github_repos/')
# ### import files
# %load_ext autoreload
# %autoreload 2
from NBAP import import_data
# + active=""
# from pathlib import Path
#
# dir_S2p = Path(r'/media/rich/bigSSD/res2p/scanimage data/round 5 experiments/mouse 2_6/20210417/suite2p/plane0')
#
# dir_ROIClassifier = Path(r'/media/rich/bigSSD/res2p/scanimage data/round 5 experiments/mouse 2_6/20210418/analysis_lastNight')
#
# dir_ws = Path(r'/media/rich/bigSSD/res2p/wavesurfer data/round 5 experiments/mouse 2_6/20210417')
# fileName_ws = Path(r'exp_0001.h5')
#
# dir_behaviorSignals = Path(r'/media/rich/bigSSD/analysis_data/mouse 2_6/20210417/postHoc_analysis')
# fileName_behaviorSignals = Path(r'signals_S2pAligned.npy')
# # fileName_behaviorSignals = Path(r'alignment_data.npy')
#
# dir_faceRhythm = Path(r'/media/rich/bigSSD/analysis_data/mouse 2_6/20210417/faceRhythm_data/run1_cam3/data')
# fileName_faceRhythm = Path('factors_spectral_temporal_interp2.npy')
# +
from pathlib import Path
dir_S2p = Path(r'/media/rich/bigSSD/res2p/scanimage data/round 5 experiments/mouse 2_6/20210417/suite2p/plane0')
dir_ROIClassifier = Path(r'/media/rich/bigSSD/res2p/scanimage data/round 5 experiments/mouse 2_6/20210418/analysis_lastNight')
dir_ws = Path(r'/media/rich/bigSSD/res2p/wavesurfer data/round 5 experiments/mouse 2_6/20210417')
fileName_ws = Path(r'exp_0001.h5')
dir_cameraCSV = Path(r'/media/rich/bigSSD/res2p/Camera data/round 5 experiments/mouse 2_6/20210417/cam3/experiment')
fileName_cameraCSV = Path(r'times_cam32021-04-18T00_41_02.csv')
dir_faceRhythm = Path(r'/media/rich/bigSSD/analysis_data/mouse 2_6/20210417/FR_run1_vqt/data')
fileName_faceRhythm = Path('factors_spectral_temporal_interp2.npy')
# +
F , Fneu , iscell , ops , spks , stat , num_frames_S2p = import_data.import_S2p(dir_S2p)
IsCell_ROIClassifier , ROI_Classifier_manual_selection_vars = import_data.import_roiClassifier(dir_ROIClassifier)
ws_data = import_data.import_ws(dir_ws / fileName_ws)
cameraCSV , signal_GPIO = import_data.import_cameraCSV(dir_cameraCSV / fileName_cameraCSV)
temporalFactors_faceRhythm = import_data.import_temporalFactorsFR(dir_faceRhythm / fileName_faceRhythm)
# -
# ### align data
# %load_ext autoreload
# %autoreload 2
from NBAP import align_data
# +
## == ALIGN DATA ==
# %matplotlib inline
plot_pref = False
ws_galvoFlyBackTrace = ws_data[6,:]
ws_YGalvoFlybacks_bool_wsTime, ws_frameTimes_wsTime, ws_samples_per_S2p_frame_rough = align_data.align_ws_toS2p(ws_galvoFlyBackTrace,
num_frames_S2p,
plot_pref)
ws_licks = ws_data[3,:]
thresh_licks = -0.2
ws_licks_bool_wsTime, ws_licks_bool_S2pInd, ws_lickTimes_S2pInd = align_data.align_licks_toS2p(ws_licks,
thresh_licks,
ws_frameTimes_wsTime,
num_frames_S2p,
plot_pref)
ws_rewards = ws_data[4,:]
thresh_rewards = 1
ws_rewards_bool_wsTime, ws_rewards_bool_S2pInd, ws_rewardTimes_S2pInd = align_data.align_rewards_toS2p(ws_rewards,
thresh_rewards,
ws_frameTimes_wsTime,
num_frames_S2p,
plot_pref)
ws_treadmill = ws_data[2,:]
ws_treadmill_S2pInd = align_data.align_treadmill_toS2p(ws_treadmill,
ws_frameTimes_wsTime,
num_frames_S2p,
ws_samples_per_S2p_frame_rough,
plot_pref)
signal_GPIO_bool_camTime , signal_GPIO_camTimes = align_data.extract_camPulses_camIdx(signal_GPIO,
plot_pref)
ws_camPulses = ws_data[0,:]
ws_camSignal_bool_wsTime , ws_camSignal_wsTimes = align_data.align_ws_camPulses_toWS(ws_camPulses,
plot_pref)
camTimeDates = cameraCSV[:,3]
camTimes_absolute = align_data.convert_camTimeDates_toAbsoluteSeconds(camTimeDates)
camTimes_wsInd, camTimes_wsInd_rounded, first_camPulse_camIdx, last_camPulse_camIdx = align_data.align_camFrames_toWS(signal_GPIO_camTimes,
camTimes_absolute,
ws_camSignal_wsTimes)
camSignal_s2pInd , first_s2pIdx_usable = align_data.align_camSignal_toS2p_andToWS(temporalFactors_faceRhythm,
camTimes_wsInd,
len(signal_GPIO),
ws_frameTimes_wsTime,
first_camPulse_camIdx,
last_camPulse_camIdx,
downsample_factor=None,
plot_pref=False)
# -
# ### visualize the wavesurfer data
# +
# show wavesurfer data
from matplotlib import pyplot as plt
# %matplotlib notebook
fig , axs = plt.subplots(ws_data.shape[0] , sharex=True)
for ii in range(ws_data.shape[0]):
axs[ii].plot(ws_data[ii,:] , linewidth=0.2)
# -
# # Saving and Loading
# ### Saving
# +
import datetime
import numpy as np
signals_S2pAligned = {
"ws_licks_bool_S2pInd": ws_licks_bool_S2pInd,
"ws_rewards_bool_S2pInd": ws_rewards_bool_S2pInd,
"ws_treadmill_S2pInd": ws_treadmill_S2pInd,
"ws_rewards_bool_S2pInd": ws_rewards_bool_S2pInd,
"camSignal_s2pInd": camSignal_s2pInd,
}
alignment_data = {
"ws_YGalvoFlybacks_bool_wsTime": ws_YGalvoFlybacks_bool_wsTime,
"ws_frameTimes_wsTime": ws_frameTimes_wsTime,
"ws_samples_per_S2p_frame_rough": ws_samples_per_S2p_frame_rough,
"thresh_licks": thresh_licks,
"ws_licks_bool_wsTime": ws_licks_bool_wsTime,
"ws_licks_bool_S2pInd": ws_licks_bool_S2pInd,
"ws_lickTimes_S2pInd": ws_lickTimes_S2pInd,
"thresh_rewards": thresh_rewards,
"ws_rewards_bool_wsTime": ws_rewards_bool_wsTime,
"ws_rewards_bool_S2pInd": ws_rewards_bool_S2pInd,
"ws_rewardTimes_S2pInd": ws_rewardTimes_S2pInd,
"ws_treadmill_S2pInd": ws_treadmill_S2pInd,
"signal_GPIO_bool_camTime": signal_GPIO_bool_camTime,
"signal_GPIO_camTimes": signal_GPIO_camTimes,
"ws_camSignal_bool_wsTime": ws_camSignal_bool_wsTime,
"ws_camSignal_wsTimes": ws_camSignal_wsTimes,
"camTimes_absolute": camTimes_absolute,
"camTimes_wsInd": camTimes_wsInd,
"camTimes_wsInd_rounded": camTimes_wsInd_rounded,
"first_camPulse_camIdx": first_camPulse_camIdx,
"last_camPulse_camIdx": last_camPulse_camIdx,
"camSignal_s2pInd": camSignal_s2pInd,
"first_s2pIdx_usable": first_s2pIdx_usable,
"date_now": np.array(datetime.datetime.now() , dtype='datetime64')
}
# +
from pathlib import Path
dir_save = Path('/media/rich/bigSSD/analysis_data/mouse 2_6/20210409/postHoc_analysis')
fileName_save = 'signals_S2pAligned'
fileSuffix_save = '.npy'
path_save = dir_save / (fileName_save + fileSuffix_save)
print(f'Saving to: {path_save}')
np.save(path_save , signals_S2pAligned)
fileName_save = 'alignment_data'
fileSuffix_save = '.npy'
path_save = dir_save / (fileName_save + fileSuffix_save)
print(f'Saving to: {path_save}')
np.save(path_save , alignment_data)
# -
# ### Loading
# +
from pathlib import Path
import numpy as np
dir_load = Path('/media/rich/bigSSD/analysis_data/mouse 2_6/20210417/postHoc_analysis')
fileName_load = 'alignment_data'
fileSuffix_load = '.npy'
path_load = dir_load / (fileName_load + fileSuffix_load)
# path_load = dir_load / fileName_load
alignment_data = np.load(path_load , allow_pickle=True)
# this line of code is reckless but useful. It imports all of the key-value pairs in a dict as variables
locals().update(alignment_data[()])
# -
# ---
# ---
# # Optional: align camera tensor (face-rhythm tensor) to S2p
# ### Import .nwb
# +
# %load_ext autoreload
# %autoreload 2
from NBAP import helpers
import pynwb
import numpy as np
# -
nwb_path = r'/media/rich/bigSSD/analysis_data/mouse 2_6/20210417/FR_run1_vqt/data/session.nwb'
helpers.dump_nwb(nwb_path)
with pynwb.NWBHDF5IO(nwb_path, 'r') as io:
nwbfile = io.read()
face_array = np.array(nwbfile.processing['Face Rhythm']['CQT']['Sxx_allPixels_norm'].data)
face_array = np.concatenate((face_array[...,0], face_array[...,1]), axis=0)
# face_tensor = torch.tensor(nwbfile.processing['Face Rhythm']['CQT']['Sxx_allPixels_norm'].data, dtype=torch.float32, device='cpu')
# face_tensor = torch.tensor(nwbfile.processing['Face Rhythm']['CQT']['Sxx_allPixels_norm'].data, dtype=torch.float32)
plt.figure()
plt.plot(face_array[0,0,:])
# ## Perform alignment
faceArray_s2pInd_ds , first_s2pIdx_usable = align_data.align_camSignal_toS2p_andToWS(face_array[:,:,:].transpose(2,0,1),
camTimes_wsInd,
len(signal_GPIO),
ws_frameTimes_wsTime,
first_camPulse_camIdx,
last_camPulse_camIdx,
downsample_factor=4,
plot_pref=False)
# +
# %load_ext autoreload
# %autoreload 2
from basic_neural_processing_modules import h5_handling
import h5py
import numpy as np
# -
h5_handling.write_dict_to_h5(r'/media/rich/bigSSD/analysis_data/mouse 2_6/20210417/FR_run1_vqt/data/faceArray_s2pInd_ds.h5', {'faceArray_s2pInd_ds': np.single(faceArray_s2pInd_ds)}, write_mode='w-')
# ---
# ---
| .ipynb_checkpoints/align_data-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="5AldI23FZmMX"
import torch
import numpy as np
import pandas as pd
import sklearn
import warnings
warnings.filterwarnings('ignore')
from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_equal, assert_almost_equal
from pandas.testing import assert_frame_equal
from sklearn.tree import DecisionTreeRegressor as DTR, DecisionTreeClassifier as DTC
from sklearn.neighbors import KNeighborsRegressor as KNR
from sklearn.linear_model import LinearRegression as LinReg
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_regression, make_classification
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier, BaggingClassifier
from sklearn.metrics import mean_squared_error as MSE
# + [markdown] colab_type="text" id="Jvls9GQxWK5O"
# # SLIDE (1) Bootstrap.
# + [markdown] colab_type="text" id="9POKe84XWK6A"
# На вход массив чисел $X$ и число бутстрепных выборок $B$. Необходимо реализовать свой бутстреп и найти матожидание и стандартную ошибку у бутстрепных выборок.
#
# ### Sample 1
# #### Input:
# ```python
# X = np.array([37,43,38,36,17,40,40,45,41,84])
# B = 100000
# ```
# #### Output:
# ```python
# 42.1, 4.56
# ```
#
# + [markdown] colab_type="text" id="oNTDVikgWK6F"
# # TASK
# + colab={} colab_type="code" id="_awC3d6CWK6I"
import numpy as np
from scipy.stats import sem # ищет SE среднего
def get_stats(X: np.array, B:int)->tuple:
SEs = []
means = []
for _ in range(B):
sample = np.random.choice(X, len(X), True)
SEs.append(sem(sample))
means.append(sample.mean())
mean = np.array(means).mean()
SE = np.array(SEs).mean()
return mean, SE
# -
# # OPEN TESTS
# +
######################################################
X = np.array([37,43,38,36,17,40,40,45,41,84])
B = 10000
mean, se = get_stats(X, B)
print(mean)
print(se)
assert np.abs(mean - 42.1) < 0.05
assert np.abs(se - 4.56) < 0.03
######################################################
print('Well Done!')
# -
# # SLIDE (1) Bias-variance
# На вход подается **один** объект $(x, y)$ и список из нескольких **обученных** моделей.
#
# Необходимо найти $error$, $bias^2$, $variance$ для данного объекта.
#
# Теперь все аккуратно запишем, чтобы не запутаться.
#
# * $(x, y)$ - тестировачная выборка
# * $a_1(\cdot), \ldots, a_M(\cdot)$ - модели (это не обученные на бутстрепе модели, а просто возможные модели из пространства $\mathbb{A}$, которое мы выбрали)
#
# Как настоящие статистики мы можем ~~забить~~ оценить матожидание как среднее.**Это не смешанная модель, а именно оценка матожидания через среднее**
# $$\mathbb{E}a(x) = \frac{1}{M}\sum_{i=1}^{M}a_i(x)$$
#
# **Error** (берем матожидание от квадрата разности)
#
# $$error = \mathbb{E}_{a}(a(x)-y)^2 = \frac{1}{M}\sum_{i=1}^{M}(a_i(x) - y)^2$$
#
# **Bias** (заметьте, что возвращаем квадрат bias, а не просто bias)
#
# $$bias^2 = \Big(y - \mathbb{E}_{a}[a(x)]\Big)^2 = \Big(y - \frac{1}{M}\sum_{i=1}^{M}a_i(x)\Big)^2$$
#
#
# **Variance** (ищем смещенную оценку)
#
# $$variance = \mathbb{D}_{a}a(x)= \mathbb{E}_{a}(a(x) - \mathbb{E}_{a}a(x))^2 = \frac{1}{M}\sum_{i=1}^{M}\Big(a_i(x)-\frac{1}{M}\sum_{r=1}^{M}a_r(x)\Big)^2$$
#
# ### Sample 1
# #### Input:
# ```python
# x, y = np.array([[0,0,0]]), 0
# estimators = [DecisionTreeRegressor(max_depth=3, random_state=1), #already fitted estimators
# DecisionTreeRegressor(max_depth=5, random_state=1)]
# ```
# #### Output:
# ```python
# error, bias2, var = 3.574, 3.255, 0.319
# ```
# # TASK
# +
import numpy as np
def bias_variance_decomp(x_test:np.array, y_test:int, estimators:list)->tuple:
error = 0
bias2 = 0
var = 0
est_count = len(estimators)
est_sum = 0
for estimator in estimators:
y_ = estimator.predict(x_test)
est_sum += y_
for estimator in estimators:
y_ = estimator.predict(x_test)
error += pow(y_test - y_, 2)
var += pow(y_ - est_sum / est_count, 2)
error /= est_count
bias2 = pow(y_test - est_sum / est_count, 2)
var = var / est_count
return error[0], bias2[0], var[0]
# -
# # OPEN TESTS
# +
def generate(n_samples, noise, f):
X = np.linspace(-4, 4, n_samples)
y = f(X)
X = X.reshape((n_samples, 1))
return X, y
######################################################
n_train = 150
noise = 0.1
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
X, y = generate(n_samples=n_train, noise=noise, f=f)
estimators = [DTR(max_depth=2, random_state=1).fit(X, y),
DTR(max_depth=4, random_state=1).fit(X, y)]
x, y = np.array([[2]]), 1.5
error, bias, var = bias_variance_decomp(x, y, estimators)
assert_array_almost_equal(np.array([error, bias, var]),
np.array([0.108, 0.083, 0.025]), decimal=3)
x, y = np.array([[-0.7]]), 0.8
error, bias, var = bias_variance_decomp(x, y, estimators)
assert_array_almost_equal(np.array([error, bias, var]),
np.array([0.045, 0.002, 0.043]), decimal=3)
######################################################
X, y = make_regression(n_samples=1000, n_features=3, n_informative=3, bias=2, noise=10,
n_targets=1, shuffle=False, random_state=10)
estimators = [DTR(max_depth=3, random_state=1).fit(X, y),
DTR(max_depth=5, random_state=1).fit(X, y)]
x, y = np.array([[0,0,0]]), 0
error, bias, var = bias_variance_decomp(x, y, estimators)
assert_array_almost_equal(np.array([error, bias, var]),
np.array([3.574, 3.255, 0.319]), decimal=3)
print('Well Done')
# -
# # SLIDE (1) Bias-variance v2
# А теперь тоже самое, только для нескольких объектов
#
# На вход подается тестовая выборка объект $(X_test, y_test)$ и список из нескольких **обученных** моделей.
#
# Необходимо найти $error$, $bias^2$, $variance$, $noise$ для данного объекта.
#
# $$error = \mathbb{E}_{x,y}\mathbb{E}_{a}(a(x)-y)^2 = \frac{1}{N}\sum_{i=1}^{N}\frac{1}{M}\sum_{j=1}^{M}(a_j(x_i) - y_i)^2$$
#
# $$bias^2 = \mathbb{E}_{x,y}\Big(y - \mathbb{E}_{a}[a(x)]\Big)^2 = \frac{1}{N}\sum_{i=1}^{N}\Big(y_i - \frac{1}{M}\sum_{j=1}^{M}a_j(x_i)\Big)^2$$
#
# $$variance = \mathbb{E}_{x,y}\mathbb{D}_{a}a(x)= \mathbb{E}_{x,y}\mathbb{E}_{a}(a(x) - \mathbb{E}_{a}a(x))^2 = \frac{1}{N}\sum_{i=1}^{N}\frac{1}{M}\sum_{j=1}^{M}\Big(a_j(x_i)-\frac{1}{M}\sum_{r=1}^{M}a_r(x_i)\Big)^2$$
#
#
# ### Sample 1
# #### Input:
# ```python
# x = np.array([[ 0, 0, 0],
# [0.1, 0.1, 0.1]])
# y = np.array([0, 0.1])
#
# estimators = [DecisionTreeRegressor(max_depth=3, random_state=3),
# DecisionTreeRegressor(max_depth=5, random_state=3)]
# ```
# #### Output:
# ```python
# error, bias2, var = 3.399, 3.079, 0.319
# ```
# # TASK
# +
import numpy as np
def bias_variance_decomp2(x_test:np.array, y_test:np.array, estimators:list)->tuple:
error = 0
bias2 = 0
var = 0
obj_count = x_test.shape[0]
est_count = len(estimators)
est_mean = 0
for i in range(obj_count):
x = x_test[i].reshape(1, -1)
y = y_test[i]
bias = 0
for estimator in estimators:
prediction = estimator.predict(x)
error += pow(prediction - y, 2)
bias += prediction
est_mean = 0
for est in estimators:
est_mean += est.predict(x)
est_mean /= est_count
var += pow(prediction - est_mean, 2)
bias /= est_count
bias = pow(y - bias, 2)
bias2 += bias
error /= est_count
error /= obj_count
bias2 /= obj_count
var /= est_count
var /= obj_count
return error[0], bias2[0], var[0]
# -
# # OPEN TESTS
# +
def generate(n_samples, noise, f):
X = np.linspace(-4, 4, n_samples)
y = f(X)
X = X.reshape((n_samples, 1))
return X, y
######################################################
n_train = 150
noise = 0.1
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
X, y = generate(n_samples=n_train, noise=noise, f=f)
estimators = [DTR(max_depth=2, random_state=1).fit(X, y),
DTR(max_depth=4, random_state=1).fit(X, y)]
x = np.array([[2], [-0.7]])
y = np.array([1.5, 0.8])
error, bias, var = bias_variance_decomp2(x, y, estimators)
assert_array_almost_equal(np.array([error, bias, var]),
(np.array([0.108, 0.083, 0.025]) + np.array([0.045, 0.002, 0.043])) / 2, decimal=3)
######################################################
X, y = make_regression(n_samples=1000, n_features=3, n_informative=3, bias=2, noise=10,
n_targets=1, shuffle=False, random_state=10)
estimators = [DTR(max_depth=3, random_state=1).fit(X, y),
DTR(max_depth=5, random_state=1).fit(X, y)]
x = np.array([[ 0, 0, 0]])
y = np.array([0])
error, bias, var = bias_variance_decomp2(x, y, estimators)
assert_array_almost_equal(np.array([error, bias, var]),
np.array([3.574, 3.255, 0.319]), decimal=3)
x = np.array([[ 0, 0, 0],
[0.1, 0.1, 0.1]])
y = np.array([0, 0.1])
error, bias, var = bias_variance_decomp2(x, y, estimators)
assert_array_almost_equal(np.array([error, bias, var]),
np.array([3.399, 3.079, 0.319]), decimal=3)
print('Well Done')
# -
# # SLIDE (2) Bagging
# На вход подается некий **необученный** алгоритм регрессии, тренировачная и тестовые выборки и число бутстрепных выборок. Необходимо
# * бустингом сделать несколько выборок $X_1, \ldots, X_B$
# * обучить несколько алгоритмов на этих выборках: $a_1(\cdot), \ldots, a_B(\cdot)$
# * реализовать бэггинг этого алгоритма и найти собственно предсказания, $error$, $bias^2$ и $variance$.
#
# Вот теперь аккуратно. Это - **не матожидание**! Это модель такая.
# $$a(x) = \frac{1}{B}\sum_{b=1}^{B}a_b(x)$$
#
# А вот ее матожидание равно для всех алгоритмов:
# $$\mathbb{E}_aa(x) = \mathbb{E}_a\frac{1}{B}\sum_{b=1}^{B}a_b(x) = \mathbb{E}_aa_1(x)$$
#
# Но так как теперь, нам нужно посчитать матожидание, мы воспользуемся нашим множеством алгоритмов, обученных на бутстрепе, чтобы получить оценку матожидания единичного алгоритма.
#
# $$\mathbb{E}_aa_1(x) = \frac{1}{B}\sum_{j=1}^{B}a_j(x)$$
#
# Остальные формулы берутся из предыдущей задачи.
#
# P.S.
# * Так как тут есть вероятности, в целом тесты могут `редко` не взлететь. Перезашлите задачу в этом случае.
#
# ### Sample 1
# #### Input:
# ```python
# estimator = DecisionTreeRegressor(max_depth=1)
# X_train = np.array([[1, 1], [2, 2]])
# y_train = np.array([1, 2])
# X_test = np.array([[0, 0], [4, 4], [8, 8]])
# y_test = np.array([0, 4, 8])
#
# B = 10
# ```
# #### Output:
# ```python
# y_pred = np.array([3.708, 6.016])
# error = 3.5
# bias^2 = 0.1
# var = 3.5
# ```
# # TASK
# +
import numpy as np
from sklearn.base import clone
from sklearn.tree import DecisionTreeRegressor as DTR
from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_equal, assert_almost_equal
from pandas.testing import assert_frame_equal
def bagging(estimator, X_train, y_train, X_test, y_test, boot_count):
obj_count = X_train.shape[0]
X_samples, y_samples = get_samples(X_train, y_train, boot_count)
estimators = []
predicts = []
for i in range(boot_count):
new_est = clone(estimator)
new_est.fit(X_samples[i], y_samples[i])
predict = new_est.predict(X_test)
predicts.append(predict)
estimators.append(new_est)
predicts = np.array(predicts)
n = predicts.shape[0]
y_pred = predicts.sum(axis=0)/n
loss = 0
bias = 0
var = 0
m = y_test.shape[0]
predictions = {}
for i in range(m):
x = np.array([X_test[i]])
predictions[i] = {}
for j in range(n):
pred = estimators[j].predict(x)
predictions[i][j] = pred
for predict in predicts:
temp = 0
for i in range(m):
temp += (predict[i] - y_test[i])**2
temp /= m
loss += temp
loss /= n
for i in range(m):
yt = y_test[i]
x = X_test[i]
temp = 0
for j in range(boot_count):
pred = predictions[i][j]
temp += pred
temp /= boot_count
bias = bias + (yt - temp)**2
bias /= m
for i in range(m):
temp = 0
x = X_test[i]
for j in range(n):
pred = predictions[i][j]
temp2 = 0
for r in range(n):
temp2 += predictions[i][r]
temp2 /= n
temp = temp + (pred - temp2)**2
temp /= n
var += temp
var /= m
return y_pred, loss, bias, var
def get_samples(X_test, y_test, boot_count):
obj_count = X_train.shape[0]
X_samples = []
y_samples = []
for _ in range(boot_count):
indices = np.random.choice(obj_count, obj_count, True)
X_sample = []
y_sample = []
for index in indices:
X_sample.append(X_test[index])
y_sample.append(y_test[index])
X_samples.append(X_sample)
y_samples.append(y_sample)
return X_samples, y_samples
# -
# # OPEN TESTS
# +
from sklearn.model_selection import train_test_split
estimator = DTR(max_depth=2)
X_train = np.array([[0, 0], [1, 1], [5, 5], [8, 8], [10, 10]])
y_train = np.array([0, 1, 5, 8, 10])
X_test = np.array([[4, 4], [6, 6]])
y_test = np.array([4, 6])
B = 100
y_pred, loss, bias, var = bagging(estimator, X_train, y_train, X_test, y_test, boot_count=B)
# Да я в курсе что очень грубые ограничения, просто пример игрушечный на таком малом количестве данных
assert_array_almost_equal(y_pred, np.array([4, 6]), decimal=0)
assert_almost_equal(loss, 3.7, decimal=0)
assert_almost_equal(bias, 0.1, decimal=1)
assert_almost_equal(var, 3.7, decimal=0)
######################################################
B = 10
from sklearn.datasets import load_boston
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=123,
shuffle=True)
tree = DTR(max_depth=7)
y_pred, loss, bias, var = bagging(
tree, X_train, y_train, X_test, y_test, boot_count=200)
assert_almost_equal(loss, 32, decimal=0)
assert_almost_equal(bias, 14, decimal=0)
assert_almost_equal(var, 18, decimal=0)
print('Well Done!')
# -
# # SLIDE (2) RF Classification
# Осталось переделать чуток предыдущую задачу в `RandomForest`.
# Но теперь мы наконец попробуем классификацию. (Пока только бинарную)
#
# План
# * Также делаем бутстрепные выборки
# * Бэггинг теперь будет только по деревьям классификации
# * Будем передавать параметр `n_estimators`, `max_depth` и `max_features`
#
# Как выбирать ответ в задаче классификации?
# * Для каждого внутреннего дерева решений находим веротности обоих классов для каждого объекта $X_test$:
# * Вызываем `predict_proba` у `DecisionTreeClassifier`
# * Усредняем вероятности класса и объекта по деревьям:
# * $P(n_{class}=d, object=x_k) = \frac{1}{B}\sum_{i=1}^{B}P(n_{class}=d, object=x_k, tree=b_i)$
# * Для каждого объекта выбираем тот класс, у которого выше вероятность
#
#
#
# ### Sample 1
# #### Input:
# ```python
# X_train = np.array([[0, 0], [4, 4], [5, 5], [10, 10]])
# y_train = np.array([0, 0, 1, 1])
# X_test = np.array([[3, 3], [6, 6]])
# y_test = np.array([0, 1])
#
# B = 1000
# ```
# #### Output:
# ```python
# model.predict(X_test) == np.array([0, 1])
# ```
# # TASK
# +
from sklearn.tree import DecisionTreeClassifier as DTC
class MyRFC():
def __init__(self, n_estimators=10, max_features=None, max_depth=None):
self.n = n_estimators
self.estimators_ = []
for _ in range(n_estimators):
self.estimators_.append(DTC(max_depth=max_depth, max_features=max_features))
def fit(self, X_train: np.array, y_train: np.array):
X_samples, y_samples = self._get_samples(X_train, y_train, self.n)
for i in range(self.n):
self.estimators_[i].fit(X_samples[i], y_samples[i])
return self
def predict(self, X_test) -> np.array:
n = X_test.shape[0]
probas = []
for i in range(self.n):
proba = self.estimators_[i].predict_proba(X_test)
if proba.shape[1] == 1:
b = np.zeros((n,2))
b[:,:-1] = proba
proba = b
probas.append(proba)
me = np.mean(probas, axis=0)
result = []
for elem in me:
if elem[0] >= 0.5:
result.append(0)
else:
result.append(1)
return result
def predict_proba(self, X_test)-> np.array:
n = X_test.shape[0]
probas = []
for i in range(self.n):
proba = self.estimators_[i].predict_proba(X_test)
if proba.shape[1] == 1:
b = np.zeros((n,2))
b[:,:-1] = proba
proba = b
probas.append(proba)
me = np.mean(probas, axis=0)
return me
def _get_samples(self, X_test, y_test, boot_count):
obj_count = X_train.shape[0]
X_samples = []
y_samples = []
for _ in range(boot_count):
indices = np.random.choice(obj_count, obj_count, True)
X_sample = []
y_sample = []
for index in indices:
X_sample.append(X_test[index])
y_sample.append(y_test[index])
X_samples.append(X_sample)
y_samples.append(y_sample)
return X_samples, y_samples
# -
# # OPEN TEST
# +
######################################################
X_train = np.array([[0, 0], [4, 4], [5, 5], [10, 10]])
y_train = np.array([0, 0, 1, 1])
X_test = np.array([[3, 3], [6, 6], [2, 2]])
y_test = np.array([0, 1, 0])
B = 1000
y_pred_my = MyRFC(n_estimators = 2, max_depth=3).fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred_my, np.array([0, 1, 0]))
######################################################
from random import gauss
from sklearn.metrics import accuracy_score
num_samples = 1000
theta = np.linspace(0, 2*np.pi, num_samples)
r1 = 1
r2 = 2
rng = np.random.RandomState(1)
circle = np.hstack([np.cos(theta).reshape((-1, 1)) + (rng.randn(num_samples)[:,np.newaxis] / 8),
np.sin(theta).reshape((-1, 1)) + (rng.randn(num_samples)[:,np.newaxis] / 8)])
lil = r1 * circle
big = r2 * circle
X = np.vstack([lil, big])
y = np.hstack([np.zeros(num_samples), np.ones(num_samples)])
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=123,
shuffle=True)
y_test = y_test.astype('int')
y_pred_my = MyRFC(n_estimators = 100,
max_depth=1).fit(X_train, y_train).predict(X_test)
assert accuracy_score(y_pred_my, y_test) > 0.85
print('Well Done!')
# -
# # SLIDE (1) Feature Importance
# Просто верните отсортированный массив важности фич, полученные из обученного RandomForest. Фичи нумеруются с 1.
#
# ### Sample 1
# #### Input:
# ```python
# X = np.array([[0, 0], [0,1], [1, 0], [1, 1]])
# y = np.array([0,0,1,1])
# ```
# #### Output:
# ```python
# features= np.array([1, 2])
# importance = np.array([0.75, 0.25])
#
# ```
# # TASK
# +
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier, BaggingClassifier
def feature_importance(X, y):
rf = RandomForestClassifier()
rf.fit(X, y)
importance = rf.feature_importances_
positions = [i+1 for i in range(len(importance))]
my_dict = dict(zip(importance, positions))
sorted_importance = sorted(importance, reverse=True)
features = list(map(lambda x: my_dict[x], sorted_importance))
return features, sorted_importance
# -
# # OPEN TESTS
# +
from sklearn.datasets import make_regression, make_classification
######################################################
X = np.array([[0, 0], [0,1], [1, 0], [1, 1]])
y = np.array([0,0,1,1])
f, i = feature_importance(X, y)
assert_array_equal(f , np.array([1, 2]))
assert i[0] > 0.74
######################################################
X, y = make_classification(n_samples=1000,
n_features=4,
n_informative=2,
shuffle=False,
random_state=10)
print(feature_importance(X, y))
n = 10
a = np.zeros((n, X.shape[1]))
for i in range(n):
a[i], _ = feature_importance(X, y)
assert_array_equal(np.round(a.mean(axis=0)), np.array([2,3,4,1]))
######################################################
print('Well Done!')
# -
| 08-RandomForest/RFHome.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # VGG16 Surgery
# VGG16 Transfer Learning After 3-to-4-Channel Input Conversion
# ## Background
# MaskRNN's' binary segmentation net is a 2-stream convnet (`astream` and `fstream`). Section "3.3 Binary Segmentation" of the MaskRNN paper and "Figure 2" are inconsistent when it comes to describing the inputs of the two-stream network. In this implementation, we chose the input of the appearance stream `astream` to be the concatenation of the current frame I<sub>t</sub> and the warped prediction of the previous frame's segmentation mask b<sub>t-1</sub>, denoted as φ<sub>t-1,t</sub>(b<sub>t-1</sub>). The warping function φ<sub>t-1,t</sub>(.) transforms the input based on the optical flow fields from frame I<sub>t-1</sub> to frame I<sub>t</sub>.
# We chose the input of the flow stream `fstream` to be the concatenation of the magnitude of the flow field from <sub>t-1</sub> to I<sub>t</sub> and I<sub>t</sub> to frame I<sub>t+1</sub> and, again, the warped prediction of the previous frame's segmentation mask b<sub>t-1</sub>.
#
# Each stream is a modified VGG16 convnet. Here's how a typical VGG16 convnet looks like:
#
# 
#
# Each MaskRNN convnet is initialized from a VGG16 network pre-trained on ImageNet. This pre-trained network takes RGB images as an input (W x H x 3). The `FStream` network also takes 3-channel inputs (||φ<sub>t-1,t</sub>||, ||φ<sub>t,t+1</sub>||, φ<sub>t-1,t</sub>(b<sub>t-1</sub>)) so we can use the VGG16 network pre-trained on ImageNet as a starting point. However, the `AStream` network takes 4-channel inputs (I<sub>t</sub>[0], I<sub>t</sub>[1], I<sub>t</sub>[2], φ<sub>t-1,t</sub>(b<sub>t-1</sub>)).
#
# Below, we load a 3-channel input VGG16 network pre-trained on ImageNet and perform surgery on it to create a 4-channel input version initialized with the same parameter values except for the additional convolutional filter dimension (we use Gaussian initialization). This is the first block, on the left, in the picture above.
"""
vgg16_surgery.ipynb
VGG16 Transfer Learning After 3-to-4-Channel Input Conversion
Written by <NAME>
Licensed under the MIT License (see LICENSE for details)
Based on:
- https://github.com/minhnhat93/tf_object_detection_multi_channels/blob/master/edit_checkpoint.py
Written by <NAME>
Unknown code license
"""
from tensorflow.python import pywrap_tensorflow
import numpy as np
import tensorflow as tf
# ## Configuration
num_input_channels = 4 # AStream uses 4-channel inputs
init_method = 'gaussian' # ['gaussian'|'spread_average'|'zeros']
input_path = 'models/vgg_16_3chan.ckpt' # copy of checkpoint in http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz
output_path = 'models/vgg_16_4chan.ckpt'
# ## Surgery
# Here are the VGG16 stage 1 parameters we'll want to modify:
# ```
# (dlwin36tfvos) Phil@SERVERP E:\repos\tf-video-seg\tfvos\tools
# $ python -m inspect_checkpoint --file_name=../models/vgg_16_3chan.ckpt | grep -i conv1_1
# vgg_16/conv1/conv1_1/weights (DT_FLOAT) [3,3,3,64]
# vgg_16/conv1/conv1_1/biases (DT_FLOAT) [64]
# ```
# First, let's find the correct tensor:
# +
print('Loading checkpoint...')
reader = pywrap_tensorflow.NewCheckpointReader(input_path)
print('...done loading checkpoint.')
var_to_shape_map = reader.get_variable_to_shape_map()
var_to_edit_name = 'vgg_16/conv1/conv1_1/weights'
for key in sorted(var_to_shape_map):
if key != var_to_edit_name:
var = tf.Variable(reader.get_tensor(key), name=key, dtype=tf.float32)
else:
var_to_edit = reader.get_tensor(var_to_edit_name)
print('Tensor {} of shape {} located.'.format(var_to_edit_name, var_to_edit.shape))
# -
# Now, let's edit the tensor and initialize it according to the chosen init method:
sess = tf.Session()
if init_method != 'gaussian':
print('Error: Unimplemented initialization method')
new_channels_shape = list(var_to_edit.shape)
new_channels_shape[2] = num_input_channels - 3
gaussian_var = tf.random_normal(shape=new_channels_shape, stddev=0.001).eval(session=sess)
new_var = np.concatenate([var_to_edit, gaussian_var], axis=2)
new_var = tf.Variable(new_var, name=var_to_edit_name, dtype=tf.float32)
# Finally, let's update the network parameters and the save the updated model to disk:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.save(sess, output_path)
# ## Verification
# Verify the result of this surgery by looking at the output of the following commands:
# ```
# $ python -m inspect_checkpoint --file_name=../models/vgg_16_3chan.ckpt --tensor_name=vgg_16/conv1/conv1_1/weights > vgg_16_3chan-conv1_1-weights.txt
# $ python -m inspect_checkpoint --file_name=../models/vgg_16_4chan.ckpt --tensor_name=vgg_16/conv1/conv1_1/weights > vgg_16_4chan-conv1_1-weights.txt
# ```
# You should see the following values in the first filter:
# ```
# # 3-channel VGG16
# 3,3,3,0
# [[[ 0.4800154 0.55037946 0.42947057]
# [ 0.4085474 0.44007453 0.373467 ]
# [-0.06514555 -0.08138704 -0.06136011]]
#
# [[ 0.31047726 0.34573907 0.27476987]
# [ 0.05020237 0.04063221 0.03868078]
# [-0.40338343 -0.45350131 -0.36722335]]
#
# [[-0.05087169 -0.05863491 -0.05746817]
# [-0.28522751 -0.33066967 -0.26224968]
# [-0.41851634 -0.4850302 -0.35009676]]]
#
# # 4-channel VGG16
# 3,3,4,0
# [[[ 4.80015397e-01 5.50379455e-01 4.29470569e-01 1.13388560e-04]
# [ 4.08547401e-01 4.40074533e-01 3.73466998e-01 7.61439209e-04]
# [ -6.51455522e-02 -8.13870355e-02 -6.13601133e-02 4.74345696e-04]]
#
# [[ 3.10477257e-01 3.45739067e-01 2.74769872e-01 4.11637186e-04]
# [ 5.02023660e-02 4.06322069e-02 3.86807770e-02 1.38304755e-03]
# [ -4.03383434e-01 -4.53501314e-01 -3.67223352e-01 1.28411280e-03]]
#
# [[ -5.08716851e-02 -5.86349145e-02 -5.74681684e-02 -6.34787197e-04]
# [ -2.85227507e-01 -3.30669671e-01 -2.62249678e-01 -1.77454809e-03]
# [ -4.18516338e-01 -4.85030204e-01 -3.50096762e-01 2.10441509e-03]]]
#
# ```
| tfvos/vgg16_surgery.ipynb |
/ -*- coding: utf-8 -*-
/ ---
/ jupyter:
/ jupytext:
/ text_representation:
/ extension: .q
/ format_name: light
/ format_version: '1.5'
/ jupytext_version: 1.14.4
/ kernelspec:
/ display_name: SQL
/ language: sql
/ name: SQL
/ ---
/ + [markdown] azdata_cell_guid="a2782576-c8ad-483e-bd03-289dd656844c" extensions={"azuredatastudio": {"views": []}}
/ # Modern database capabilities in Azure SQL Database
/
/ This is a SQL Notebook, which allows you to separate text and code blocks and save code results. Azure Data Studio supports several languages, referred to as kernels, including SQL, PowerShell, Python, and more.
/
/ In this activity, you'll explore how Azure SQL Database is great for modern scenarios that require JSON and/or geospatial support by using T-SQL to analyze both.
/
/ ## Set up: Connect to `bus-db`
/
/ At the top of the window, select **Select Connection** \> **Change Connection** next to "Attach to".
/
/ Under _Recent Connections_ select your `bus-db` connection.
/
/ You should now see it listed next to _Attach to_.
/ + [markdown] azdata_cell_guid="f348fdae-e69a-4271-907d-e6b4e0619151" extensions={"azuredatastudio": {"views": []}}
/ ## Part 1: Explore JSON support
/
/ If you want to start over at any point, run the below cell to delete the temporary tables. Otherwise, you can skip it.
/ + azdata_cell_guid="c14329fd-fee4-4014-a77d-ed5b59785685" extensions={"azuredatastudio": {"views": []}}
DROP TABLE IF EXISTS #t;
DROP TABLE IF EXISTS #g;
DROP TABLE IF EXISTS #r;
/ + [markdown] azdata_cell_guid="962dcc3f-be18-4cc3-bdfc-d670c962a0dc" extensions={"azuredatastudio": {"views": []}}
/ To take a look at an example, let's create a payload which contains two bus data points. This JSON format is similar to what will be ultimately pulled from the real-time data source. Our goal in this section is to add the received bus geolocation data and check if the buses are inside any predefined GeoFence.
/
/ Once you declare a payload, you can use it to insert the data into a temporary table `#t`. Notice how `openjson(@payload)` allows you to parse the JSON very easily with T-SQL.
/
/ One other thing to call out from the select statement below is the `GEOGRAPHY::Point([Latitude], [Longitude], 4326)` which is able to take in latitudes and longitudes and convert it to a spatial reference identifier (SRID) which applies to a certain standard (in this case `4326` is used). More on geospatial later in this activity.
/ + azdata_cell_guid="5a690353-b1f9-43b2-85e1-2a7467f7b3ad" extensions={"azuredatastudio": {"views": []}}
DECLARE @payload NVARCHAR(max) = N'[{
"DirectionId": 1,
"RouteId": 100001,
"VehicleId": 1,
"Position": {
"Latitude": 47.61705102765316,
"Longitude": -122.14291865504012
},
"TimestampUTC": "20201031"
},{
"DirectionId": 1,
"RouteId": 100531,
"VehicleId": 2,
"Position": {
"Latitude": 47.61346156765316,
"Longitude": -122.14291784492805
},
"TimestampUTC": "20201031"
}]';
SELECT
[DirectionId],
[RouteId],
[VehicleId],
GEOGRAPHY::Point([Latitude], [Longitude], 4326) AS [Location],
[TimestampUTC]
INTO #t
FROM
openjson(@payload) WITH (
[DirectionId] INT,
[RouteId] INT,
[VehicleId] INT,
[Latitude] DECIMAL(10,6) '$.Position.Latitude',
[Longitude] DECIMAL(10,6) '$.Position.Longitude',
[TimestampUTC] DATETIME2(7)
);
/ + [markdown] azdata_cell_guid="3a30a8e0-5054-471c-84f3-6e28dc47c694" extensions={"azuredatastudio": {"views": []}}
/ Now that you've inserted data into `#t`, take a look at the results. Azure SQL Database is able to take in the JSON data and turn it into a table without difficulty.
/ + azdata_cell_guid="92c0d495-d239-4dd1-b095-05f8fa0a6cef" extensions={"azuredatastudio": {"views": []}}
select * from #t;
/ + [markdown] azdata_cell_guid="102d8657-f5ab-4921-809d-d77fc9b41ad2" extensions={"azuredatastudio": {"views": []}}
/ ## Part 2: Explore geospatial support
/
/ You saw briefly how the longitude and latitude were converted to a point using \`GEOGRAPHY::Point()\`. In the previous statement you see it as a long string of letters and numbers. By using `ToString()`, you can easily see the point values.
/ + azdata_cell_guid="b8d596bf-0540-49fa-b206-ce52397c0459" extensions={"azuredatastudio": {"views": []}}
SELECT [VehicleId], [Location].ToString() AS Location FROM #t;
/ + [markdown] azdata_cell_guid="a53ad031-a7c3-40a9-bf5b-695a148dbea9" extensions={"azuredatastudio": {"views": []}}
/ You can navigate to [https://clydedacruz.github.io/openstreetmap-wkt-playground](https://clydedacruz.github.io/openstreetmap-wkt-playground), clear the sample, enter in one of the points and select **Plot Shape** to see the point displayed on a map.
/
/ You can do other things with the geospatial support, a common one might be to find the distance between, in this case, the two buses.
/ + azdata_cell_guid="1cc9823a-4b83-4e91-a36f-8b503abf0347" extensions={"azuredatastudio": {"views": []}}
declare @bus1 geography;
declare @bus2 geography;
select @bus1 = [Location] from #t where VehicleId = 1;
select @bus2 = [Location] from #t where VehicleId = 2;
select @bus1.STDistance(@bus2) as DistanceInMeters;
/ + [markdown] azdata_cell_guid="3f360f2b-eac5-425e-8cd3-0f59d18bbecb" extensions={"azuredatastudio": {"views": []}}
/ In addition to points, you can also define and store polygons on Earth's surface. This is what we have been referring to as a GeoFence. At the same URL as before, you can replace the `POINT` values with, for example, `POLYGON ((-122.14359028995352 47.618245191245848, -122.14360975757847 47.616519550427654, -122.13966755206604 47.616526111887509, -122.13968701903617 47.617280676597375, -122.142821316476 47.617300360798339, -122.142821316476 47.618186139853435, -122.14359028995352 47.618245191245848))` and see the shape on the map.
/
/ This shape represents the GeoFence where you might want to be notified that your bus is entering or exiting. Azure SQL Database also supports using the `POLYGON` format to add that data to a table, as shown below.
/ + azdata_cell_guid="78c7acde-05ec-486b-bb6a-5b25f5007c59" extensions={"azuredatastudio": {"views": []}}
SELECT * INTO #g
FROM (VALUES(
CAST('Overlake Stop' AS NVARCHAR(100)),
GEOGRAPHY::STGeomFromText('POLYGON ((-122.14359028995352 47.618245191245848, -122.14360975757847 47.616519550427654, -122.13966755206604 47.616526111887509, -122.13968701903617 47.617280676597375, -122.142821316476 47.617300360798339, -122.142821316476 47.618186139853435, -122.14359028995352 47.618245191245848))',4326)
))
AS s ([BusStop], [GeoFence])
SELECT * FROM #g
/ + [markdown] azdata_cell_guid="46ac87ea-ab53-4c87-9b41-57a3b96924a0" extensions={"azuredatastudio": {"views": []}}
/ Now that you have defined a few points and a GeoFence, you might want to know if and when a bus is located from within the GeoFence. With Azure SQL Database, that is easy to do.
/ + azdata_cell_guid="3e3da57b-2132-4788-b46c-20374d080b2f" extensions={"azuredatastudio": {"views": []}}
SELECT
t.DirectionId,
t.RouteId,
t.VehicleId,
GEOGRAPHY::STGeomCollFromText('GEOMETRYCOLLECTION(' + t.[Location].ToString() + ', ' + g.[GeoFence].ToString() +')',4326).ToString() as [WKT],
t.[Location].STWithin(g.[GeoFence]) as InGeoFence
INTO #r
FROM #t AS t
CROSS JOIN #g AS g
WHERE g.[BusStop] = 'Overlake Stop';
SELECT * FROM #r;
/ + [markdown] azdata_cell_guid="c8d2f9b4-af0e-48c2-a594-e21bb7e297a7" extensions={"azuredatastudio": {"views": []}}
/ You can copy a value for `WKT` above and plug it into a map to see that the `InGeoFence` column indeed matches if a bus is in the GeoFence. Note that `GEOMETRYCOLLECTION` allows you to plot points and polygons together.
/ + [markdown] azdata_cell_guid="85b43292-8b20-432a-9b39-b72462d02b50" extensions={"azuredatastudio": {"views": []}}
/ ## Part 3: Create Stored Procedures to get and add data
/
/ You've now seen how to use Azure SQL Database to determine if a bus is within a GeoFence. However, you now need to scale this so it can process real-time data as it flows in. Stored procedures will greatly simplify this in future exercises where you're leveraging other services, e.g. Azure Functions, Azure Logic Apps, Azure App Service, etc.
/
/ A stored procedure is a way to group SQL statements and execute them on the database with one command. For the catching the bus scenario, three stored procedures will be required and you will create them using your learnings from Parts 1 and 2. As you create the stored procedures, take some time to review the T-SQL and how it compares to what you learned in Parts 1 and 2.
/
/ 1. **web.AddBusData**: this stored procedure takes in JSON data containing new bus route, vehicle, direction, time, and location information and adds it to the _busData_ table. If a bus enters/exits a GeoFence, it will also log this information in the _GeoFencesActive_ table.
/ + azdata_cell_guid="ae8b7a37-24ae-4dda-a1fe-e9fd2f8426a8" extensions={"azuredatastudio": {"views": []}} tags=[]
create schema [web] AUTHORIZATION [dbo];
go
/ + azdata_cell_guid="b37cc158-0917-4080-b839-acf01dda488d" extensions={"azuredatastudio": {"views": []}}
DROP TABLE IF EXISTS #t;
DROP TABLE IF EXISTS #g;
DROP TABLE IF EXISTS #r;
/ + azdata_cell_guid="a29d10d2-ad7f-4062-bad6-3536ffcb5a1c" extensions={"azuredatastudio": {"views": []}}
/*
Add received Bus geolocation data and check if buses are
inside any defined GeoFence. JSON must be like:
{
"DirectionId": 1,
"RouteId": 100001,
"VehicleId": 2,
"Position": {
"Latitude": 47.61705102765316,
"Longitude": -122.14291865504012
},
"TimestampUTC": "20201031"
}
}
*/
create or alter procedure [web].[AddBusData]
@payload nvarchar(max)
as
begin
set nocount on
set xact_abort on
set tran isolation level serializable
begin tran
if (isjson(@payload) != 1) begin;
throw 50000, 'Payload is not a valid JSON document', 16;
end;
declare @ids as table (id int);
-- insert bus data
insert into dbo.[BusData]
([DirectionId], [RouteId], [VehicleId], [Location], [TimestampUTC])
output
inserted.Id into @ids
select
[DirectionId],
[RouteId],
[VehicleId],
geography::Point([Latitude], [Longitude], 4326) as [Location],
[TimestampUTC]
from
openjson(@payload) with (
[DirectionId] int,
[RouteId] int,
[VehicleId] int,
[Latitude] decimal(10,6) '$.Position.Latitude',
[Longitude] decimal(10,6) '$.Position.Longitude',
[TimestampUTC] datetime2(7)
);
-- Get details of inserted data
select * into #t from dbo.[BusData] bd where bd.id in (select i.id from @ids i);
-- Find geofences in which the vehicle is in
select
t.Id as BusDataId,
t.[VehicleId],
t.[DirectionId],
t.[TimestampUTC],
t.[RouteId],
g.Id as GeoFenceId
into
#g
from
dbo.GeoFences g
right join
#t t on g.GeoFence.STContains(t.[Location]) = 1;
-- Calculate status
select
c.BusDataId,
coalesce(a.[GeoFenceId], c.[GeoFenceId]) as GeoFenceId,
coalesce(a.[DirectionId], c.[DirectionId]) as DirectionId,
coalesce(a.[VehicleId], c.[VehicleId]) as VehicleId,
c.[RouteId],
c.[TimestampUTC],
case
when a.GeoFenceId is null and c.GeoFenceId is not null then 'Enter'
when a.GeoFenceId is not null and c.GeoFenceId is null then 'Exit'
end as [Status]
into
#s
from
#g c
full outer join
dbo.GeoFencesActive a on c.DirectionId = a.DirectionId and c.VehicleId = a.VehicleId;
-- Delete exited geofences
delete
a
from
dbo.GeoFencesActive a
inner join
#s s on a.VehicleId = s.VehicleId and s.DirectionId = a.DirectionId and s.[Status] = 'Exit';
-- Insert entered geofences
insert into dbo.GeoFencesActive
([GeoFenceId], [DirectionId], [VehicleId])
select
[GeoFenceId], [DirectionId], [VehicleId]
from
#s s
where
s.[Status] = 'Enter';
-- Insert Log
insert into dbo.GeoFenceLog
(GeoFenceId, BusDataId, [RouteId], [VehicleId], [TimestampUTC], [Status])
select
GeoFenceId, BusDataId, [RouteId], [VehicleId], [TimestampUTC], isnull([Status], 'In')
from
#s s
where
s.[GeoFenceId] is not null
and
s.[BusDataId] is not null
-- Return Entered or Exited geofences
select
((
select
s.[BusDataId],
s.[VehicleId],
s.[DirectionId],
s.[RouteId],
r.[ShortName] as RouteName,
s.[GeoFenceId],
gf.[Name] as GeoFence,
s.[Status] as GeoFenceStatus,
s.[TimestampUTC]
from
#s s
inner join
dbo.[GeoFences] gf on s.[GeoFenceId] = gf.[Id]
inner join
dbo.[Routes] r on s.[RouteId] = r.[Id]
where
s.[Status] is not null and s.[GeoFenceId] is not null
for
json path
)) as ActivatedGeoFences;
commit
end
/ + [markdown] azdata_cell_guid="905f2a7b-5775-4149-90c0-afea8ec249ff" extensions={"azuredatastudio": {"views": []}}
/ 2. **web.GetMonitoredRoutes**: this stored procedure returns the route IDs for the bus routes that are being monitored.
/ + azdata_cell_guid="afc7f551-18a4-49bb-bf9f-a8f7f30bdafd" extensions={"azuredatastudio": {"views": []}}
/*
Return the Routes (and thus the buses) to monitor
*/
create or alter procedure [web].[GetMonitoredRoutes]
as
begin
select
((
select RouteId from dbo.[MonitoredRoutes] for json auto
)) as MonitoredRoutes
end
GO
/ + [markdown] azdata_cell_guid="ee6d9b66-9075-4adf-bce7-a09b78d0b303" extensions={"azuredatastudio": {"views": []}}
/ 3. **web.GetMonitoredBusData**: this stored procedure will return bus information for the 50 most-recent buses within 5 kilometers of the monitored GeoFence(s).
/ + azdata_cell_guid="d7340308-a55c-443f-a235-8f7848d90089" extensions={"azuredatastudio": {"views": []}}
/*
Return last geospatial data for bus closest to the GeoFence
*/
create or alter procedure [web].[GetMonitoredBusData]
@routeId int,
@geofenceId int
as
begin
with cte as
(
-- Get the latest location of all the buses in the given route
select top (1) with ties
*
from
dbo.[BusData]
where
[RouteId] = @routeId
order by
[ReceivedAtUTC] desc
),
cte2 as
(
-- Get the closest to the GeoFence
select top (1)
c.[VehicleId],
gf.[GeoFence],
c.[Location].STDistance(gf.[GeoFence]) as d
from
[cte] c
cross join
dbo.[GeoFences] gf
where
gf.[Id] = @geofenceId
order by
d
), cte3 as
(
-- Take the last 50 points
select top (50)
[bd].[VehicleId],
[bd].[DirectionId],
[bd].[Location] as l,
[bd].[Location].STDistance([GeoFence]) as d
from
dbo.[BusData] bd
inner join
cte2 on [cte2].[VehicleId] = [bd].[VehicleId]
order by
id desc
)
-- Return only the points that are withing 5 Km
select
((
select
geography::UnionAggregate(l).ToString() as [busData],
(select [GeoFence].ToString() from dbo.[GeoFences] where Id = @geofenceId) as [geoFence]
from
cte3
where
d < 5000
for json auto, include_null_values, without_array_wrapper
)) as locationData
end
GO
/ + [markdown] azdata_cell_guid="57da2a37-393c-47c3-8409-2f3db8d1a7cc" extensions={"azuredatastudio": {"views": []}}
/ Confirm you've created the stored procedures with the following.
/ + azdata_cell_guid="004217bf-00f5-48e4-ab2f-2b972c9e0394" extensions={"azuredatastudio": {"views": []}}
SELECT * FROM INFORMATION_SCHEMA.ROUTINES WHERE ROUTINE_SCHEMA = 'web'
| database/notebooks/02-json-geospatial-sql-db.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="b7633c3776a139d6c1caaf7a821f0a85843bc11a"
# !wget https://datahack-prod.s3.amazonaws.com/train_file/train_LZdllcl.csv -O train.csv
# + _uuid="53b6a0d794afdd21e00d6d5f54c6bd8883541b42"
# !wget https://datahack-prod.s3.amazonaws.com/test_file/test_2umaH9m.csv -O test.csv
# + _uuid="5fd6027988feec479318de3dbc2b98989baa440a"
# !wget https://datahack-prod.s3.amazonaws.com/sample_submission/sample_submission_M0L0uXE.csv -O sample_submission.csv
# + _uuid="26708f7d764135fd2fd47e600069e5cbe7887f71"
# Import the required packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
# + _uuid="25f40cdb83beb98d47c5f19a23190e8a42be5a64"
# Read the train and test data
train=pd.read_csv("train.csv")
train.drop('employee_id',inplace=True,axis = 1)
test=pd.read_csv("test.csv")
# + _uuid="99e1a13c0de47e834e8de343607592aed8938792"
# Check the variables in train data
train.columns
# + _uuid="ac93f6ab0061a3a52a6a474d8908c32ad45f3413"
# Print datatype of each variable
train.dtypes
# + _uuid="a761ae73de6f97c0f39e70b972107b68b872baf2"
# Dimension of the train dataset
train.shape
# + _uuid="62e4fa900d65189c05b9253a97111fdc5082da65"
# Print the head of train dataset
train.head()
# + _uuid="d23ab5f7d2a08d190fe83807f86f6e3cd21dfe66"
# Unique values in each variable of train dataset
train.nunique()
# + [markdown] _uuid="08873b4885821f6a69ebe2f86c8653911b9de9ed"
# ### Univariate Analysis
# + [markdown] _uuid="96c55f1e1d8b25971b62667045e55b26f6cadcee"
# #### Target Variable
# + _uuid="2ce5bbfa1e4e0bdfafbadd9cbedbc10ce13c6ce7"
train['is_promoted'].value_counts(normalize=True)
# + _uuid="79be89f499f99ccda7e0b425da2cd7ac210dd091"
# Around 91% trainee have promoted
# Unbalanced dataset
# + [markdown] _uuid="a9c77308cdf23905b732a3ebdd8ac5ca79212716"
# #### Categorical Independent Variables
# + _uuid="bfc8278ea9cbbeb3eae635361bb002a3601e2eef"
plt.figure(1)
plt.subplot(221)
train['department'].value_counts(normalize=True).plot.bar(figsize=(20,10), title= 'Department')
plt.subplot(222)
train['awards_won?'].value_counts(normalize=True).plot.bar(title= 'Awards won')
plt.subplot(223)
train['education'].value_counts(normalize=True).plot.bar(title= 'Education')
plt.subplot(224)
train['gender'].value_counts(normalize=True).plot.bar(title= 'Gender')
plt.show()
# + _uuid="448b0008c0005b8bac7517e2712afcd275d16c1f"
# Most of the trainee are enrolled for Y and T program_type.
# More number of trainee enrolment for offline test than online test.
# Most of the test are easy in terms of difficulty level.
# + _uuid="38c90f687147e812444d82fd39a1f4ef89ea85cd"
train['KPIs_met >80%'].value_counts(normalize=True).plot.bar(title= 'KPI met greater than 80')
# + _uuid="06e10d4d08c38687cf7ee3415dbeae59b71da4f2"
plt.figure(1)
plt.subplot(221)
train['region'].value_counts(normalize=True).plot.bar(figsize=(20,10), title= 'Region')
plt.subplot(222)
train['recruitment_channel'].value_counts(normalize=True).plot.bar(title='Recruitment Channels')
plt.subplot(223)
train['no_of_trainings'].value_counts(normalize=True).plot.bar(title= 'No of Trainings')
plt.subplot(224)
train['previous_year_rating'].value_counts(normalize=True).plot.bar(title= 'Previous year ratings')
plt.show()
# + _uuid="ce58132467771804d0c6dde66b7201ef132e7e2b"
# More male trainee as compared to female trainee
# Most of the trainee have diploma
# Most of the trainee belongs to tier 3 city
# 10% of the trainee are handicapped
# + [markdown] _uuid="36cd44e98df41020144f72ce9e479441e1efc925"
# #### Numerical Independent Variables
# + _uuid="ff05893eb22a4b5abc53c026f01049b464540fa7"
sns.distplot(train['age']);
# + _uuid="ab0bb2436a8aafdbccfb5b70395f7d470acaa5f3"
# Most of the trainee are in the age range of 20-30 and 40-50
# + _uuid="56fe72b47516e3033cb1dcb5fe96fcba49adee73"
sns.distplot(train['length_of_service']);
# + _uuid="cd71f64c6498e307516c236f4d71d8b94e6f786f"
sns.distplot(train['avg_training_score']);
# + [markdown] _uuid="a9c395da513a03c3d4cf74559ec5e6ddf69ae471"
# ### Bivariate Analysis
# + _uuid="455257d6faeef3ac9e54ed79d7bb388f9777eb61"
# Correlation between numerical variables
matrix = train.corr()
f, ax = plt.subplots(figsize=(9, 6))
sns.heatmap(matrix, vmax=.8, square=True, cmap="BuPu");
# + _uuid="529566e738cdc1e9e1b868c72e5030963b94f71e"
# Not much correlation between the variables
# + _uuid="359e86f33d96063a3121ca837a8f6713d527c185"
# program_id vs is_pass
plt.figure(figsize=(12,4))
sns.barplot(train['department'], train['is_promoted'])
# + _uuid="4b363419b6df265a056614c149b7174d62a0b6d5"
plt.figure(figsize=(20,8))
# program_type vs is_pass
sns.barplot(train['region'], train['is_promoted'])
# + _uuid="d805673ffe0ba6b0f4032335d5d144505a9d0b0d"
# Trainee in X and Y program type have higher chances to pass the test
# + _uuid="7db2c6101077a51ab70b9f6d21d16acc65ef85cb"
# test_type vs is_pass
sns.barplot(train['recruitment_channel'], train['is_promoted'])
# + _uuid="a36cf43fd15ae9e80d494dc0d5fd3ea8473173b3"
# Trainee attending online mode of test have higher chances to pass the test
# + _uuid="e580b8149834e54d735f045d80c3f64003070dfc"
# difficulty_level vs is_pass
sns.barplot(train['no_of_trainings'], train['is_promoted'])
# + _uuid="6c01fd50943611cce679e628b6da4afdb0414ae4"
# If the difficulty level of the test is easy, chances to pass the test are higher
# + _uuid="2e1396ae174e216cbc155b0e5fc4443b32229da9"
# Gender vs is_pass
sns.barplot(train['previous_year_rating'], train['is_promoted'])
# + _uuid="82e575ccbe1e225e608f718e721d9bf92257a0bb"
# Gender does not affect the chances to pass the test
# + _uuid="f6171daa2a2453ec54a68ac3216a6708e4ba3667"
# education vs is_pass
plt.figure(figsize=(12,4))
sns.barplot(train['education'], train['is_promoted'])
# + _uuid="4d51a39c0f8b5b26b7721a1357ed344b72fd5d6b"
# Trainee with Masters education level have more chances to pass the test
# + _uuid="d6407f1d29497a0b7aa4303822c4716e38d81353"
plt.figure(figsize=(20,8))
# is_handicapped vs is_pass
sns.barplot(train['length_of_service'], train['is_promoted'])
# + _uuid="482d44e64b2f2585489ef8a8773f8365858144ce"
# Handicapped trainee have less chances to pass the test
# + _uuid="a991342a7b47b1bb5c3f6eaa8c17213d2e64b86a"
# city_tier vs is_pass
sns.barplot(train['KPIs_met >80%'], train['is_promoted'])
# + _uuid="582bdb73dabbcde0f5c1182c935142b2274323b9"
# Trainee from city tier 1 have higher chances to pass the test
# + _uuid="7e1eb7ddf0872f2169a8db6d239dbaf08568fb84"
# trainee_engagement_rating vs is_pass
sns.barplot(train['awards_won?'], train['is_promoted'])
# + _uuid="b5114cf69693c817f808443c10bc02baff6aa327"
# As the trainee engagement rating increases, chances to pass the test also increases
# + [markdown] _uuid="fd27a117a89fd064d466909c4c58413c8617b8ee"
# ### Missing Values Treatment
# + _uuid="785a4c7e34acfeff9b06aacada3a0aaecb599476"
# Check the number of missing values in each variable
train.isnull().sum()
# + _uuid="7f7bddd4e740fc8513c93f9c4fee2d79e46fa5ba"
# age and trainee_engagement_rating variables have missing values in it.
# + _uuid="7efee8262046723ea76c5d6b09539f8e315a007d"
test = pd.read_csv('test.csv')
test.drop('employee_id',inplace=True,axis = 1)
test.head()
# + _uuid="2c59197cac0b3c20984bf0066bb77154e79aa158"
test['education'].fillna('other',inplace=True)
test['previous_year_rating'].fillna(99,inplace=True)
train['education'].fillna('other',inplace=True)
train['previous_year_rating'].fillna(99,inplace=True)
# + [markdown] _uuid="e19dc042f8d583d6a0e9b9db9bbad37e239d487a"
# ### Logistic Regression
# + _uuid="19f12e96cede2a29c1b917d94838e13a7662dba1"
train.head()
# + _uuid="9d27eeb9cae2d1b88f5b6c26ffe534e2d7f55757"
# Save target variable in separate dataset
X = train.drop('is_promoted',axis=1)
y = train.is_promoted
# + _uuid="01c549003c69b99f646eff61116f49ddde6ebad8"
test.head()
# + _uuid="f4f266e38e7a900c1a03a2c2031bab3a5b47c743"
# Apply dummies to the dataset
X=pd.get_dummies(X)
test=pd.get_dummies(test)
# + _uuid="ec63a2e87565a1598a8a7e32d971dc47d443c040"
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from sklearn import cross_validation, metrics #Additional scklearn functions
from sklearn.grid_search import GridSearchCV #Perforing grid search
# + _uuid="cf89c15d35f4e4f9f200442760cb6d70cb03cc1b"
def modelfit(alg, dtrain, predictors,useTrainCV=True, cv_folds=3, early_stopping_rounds=50):
if useTrainCV:
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(dtrain[predictors].values, label=y)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
metrics='auc', early_stopping_rounds=early_stopping_rounds)
alg.set_params(n_estimators=cvresult.shape[0])
#Fit the algorithm on the data
alg.fit(dtrain[predictors],y,eval_metric='auc')
#Predict training set:
dtrain_predictions = alg.predict(dtrain[predictors])
dtrain_predprob = alg.predict_proba(dtrain[predictors])[:,1]
#Print model report:
print("\nModel Report")
print("F1 Score : ",metrics.f1_score(y, dtrain_predictions))
# + _uuid="dbf8337006696a48f999a0745be2b830be1cfbb5"
#Choose all predictors except target & IDcols
predictors = [x for x in X.columns]
xgb1 = XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
modelfit(xgb1,X, predictors)
# + _uuid="e0826dccb493866949ed9af74f3aef2024a0b6bc"
xgb1
# + _uuid="64cf848df1f7021f018c31b10f8ab4c45cb4e65c"
param_test1 = {
'max_depth':np.arange(3,10,2),
'min_child_weight':np.arange(1,6,2)
}
gsearch1 = GridSearchCV(
estimator = XGBClassifier(
learning_rate =0.1,
n_estimators=184,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27),
param_grid = param_test1,
scoring='roc_auc',
n_jobs=4,
iid=False,
cv=5)
gsearch1.fit(X[predictors],y)
gsearch1.grid_scores_,gsearch1.best_params_, gsearch1.best_score_
# + _uuid="9afcd37e809cd5b3dd6c79711cb94caeb91149eb"
# Read the submission file
submission=pd.read_csv("sample_submission.csv")
# + _uuid="5b4ba649b5a8f0846d1dcc287513925e64ab6c8a"
submission.head()
# + _uuid="963cdc37a3b48c94017c16d526f3408d59a729c3"
# Fill the is_pass variable with the predictions
submission['is_promoted']=pred
# + _uuid="3957d16c1f26c5ca0fe781f39da79a224f388a0a"
submission['is_promoted'] = submission['is_promoted'].astype(np.int64)
# + _uuid="22591f429efdf09a5ae090f7ceceddb8c9037595"
submission.head()
# + _uuid="f23311e0bb07bffc8e41360b27390ae2f53ca273"
submission['is_promoted'].value_counts()
# + _uuid="3800779bb3832dfead3c44df7723a4f71628b475"
# Converting the submission file to csv format
submission.to_csv('logistic_submission.csv', index=False)
# + [markdown] _uuid="b7dbd31bb3eaa4494d614e4b3aca075a4e833d62"
# score on leaderboard - 0.71145
| xgboost-tuning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
from algos.convex_naf import ConvexNAFAlgorithm
from algos.ddpg import DDPG as MyDDPG
from qfunctions.convex_naf_qfunction import ConcaveNAF
from qfunctions.nn_qfunction import FeedForwardCritic
from qfunctions.quadratic_naf_qfunction import QuadraticNAF
from qfunctions.quadratic_qf import QuadraticQF
from policies.nn_policy import FeedForwardPolicy
from rllab.exploration_strategies.ou_strategy import OUStrategy
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
import matplotlib.pyplot as plt
import numpy as np
# +
BATCH_SIZE = 128
N_EPOCHS = 10
EPOCH_LENGTH = 100
EVAL_SAMPLES = 100
DISCOUNT = 0.99
QF_LEARNING_RATE = 1e-3
POLICY_LEARNING_RATE = 1e-4
BATCH_LEARNING_RATE = 1e-2
SOFT_TARGET_TAU = 1e-2
REPLAY_POOL_SIZE = 1000000
MIN_POOL_SIZE = 256
SCALE_REWARD = 1.0
QF_WEIGHT_DECAY = 0.01
MAX_PATH_LENGTH = 1000
N_UPDATES_PER_TIME_STEP = 5
QF_TYPE = ''
# +
env = normalize(CartpoleEnv())
policy_params = dict(
observation_hidden_sizes=(100, 100),
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh,
)
algo_params = dict(
batch_size=BATCH_SIZE,
n_epochs=N_EPOCHS,
epoch_length=EPOCH_LENGTH,
eval_samples=EVAL_SAMPLES,
discount=DISCOUNT,
policy_learning_rate=POLICY_LEARNING_RATE,
qf_learning_rate=QF_LEARNING_RATE,
soft_target_tau=SOFT_TARGET_TAU,
replay_pool_size=REPLAY_POOL_SIZE,
min_pool_size=MIN_POOL_SIZE,
scale_reward=SCALE_REWARD,
max_path_length=MAX_PATH_LENGTH,
qf_weight_decay=QF_WEIGHT_DECAY,
)
es = OUStrategy(env_spec=env.spec)
policy = FeedForwardPolicy(
name_or_scope="policy",
env_spec=env.spec,
**policy_params
)
qf = QuadraticNAF(
name_or_scope="quadratic_qfunction",
env_spec=env.spec,
)
# qf = FeedForwardCritic(
# name_or_scope="feed_forward_qfunction",
# env_spec=env.spec,
# )
algorithm = MyDDPG(
env,
es,
policy,
qf,
**algo_params
)
# -
algorithm.train()
a_dim = qf.action_dim
o_dim = qf.observation_dim
o_high = env.spec.action_space.high[0]
o_low = env.spec.action_space.low[0]
print(a_dim)
print(o_dim)
print(o_high)
print(o_low)
# # Plot QF
# +
num_actions = 10000
actions = np.linspace(-1, 1, num_actions)
actions = np.expand_dims(actions, axis=1)
random_state_single = np.random.rand(1, o_dim)
random_state = np.vstack([random_state_single for _ in range(num_actions)])
feed_dict = {
qf.action_input: actions,
qf.observation_input: random_state,
}
qf_output = qf.sess.run(
qf.output,
feed_dict=feed_dict
)
plt.plot(actions, qf_output)
plt.xlabel('Action')
plt.ylabel('QF output')
plt.show()
# -
# # Inspect correctness of this quadratic function
# +
feed_dict = {
qf.action_input: actions,
qf.observation_input: random_state,
}
L_params, L, implicit_policy_output = qf.sess.run(
[qf.advantage_function.L_params.output, qf.advantage_function.L, qf.implicit_policy.output],
feed_dict=feed_dict
)
expected_values = -0.5 * ((actions - implicit_policy_output) * L[0][0][0])**2
plt.plot(actions, expected_values)
plt.xlabel('Action')
plt.ylabel('Expected QF output')
plt.show()
# -
plt.plot(actions, np.abs(expected_values - qf_output))
plt.xlabel('Action')
plt.ylabel('QF output error')
plt.show()
# Make sure diagonal values are exponentiated corrected
print(L[0])
print(np.exp(L_params[0]))
# Make sure max action is the one taken by the implicit policy
max_index = np.argmax(qf_output, axis=0)
print(actions[max_index])
print(implicit_policy_output[0])
# # Plot implicit policy
o_delta = o_high - o_low
for dim_changing in range(4):
num_states = 1000
base_state = np.random.rand(1, o_dim) * o_delta + o_low
# base_state = np.zeros((1, o_dim))
linear_states = np.vstack([base_state for _ in range(num_states)])
linear_states[:, dim_changing] = np.linspace(o_low, o_high, num_states)
print(linear_states)
policy_output = np.vstack([policy.get_action(state)[0] for state in linear_states])
plt.plot(linear_states[:, dim_changing], policy_output)
plt.xlabel('State {0} dimension'.format(dim_changing))
plt.ylabel('Policy Output')
plt.show()
| check_quadratic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] origin_pos=0
# # 读写文件
#
# 到目前为止,我们讨论了如何处理数据,
# 以及如何构建、训练和测试深度学习模型。
# 然而,有时我们希望保存训练的模型,
# 以备将来在各种环境中使用(比如在部署中进行预测)。
# 此外,当运行一个耗时较长的训练过程时,
# 最佳的做法是定期保存中间结果,
# 以确保在服务器电源被不小心断掉时,我们不会损失几天的计算结果。
# 因此,现在是时候学习如何加载和存储权重向量和整个模型了。
#
# ## load & save tensor
#
# 对于单个张量,我们可以直接调用`load`和`save`函数分别读写它们。
# 这两个函数都要求我们提供一个名称,`save`要求将要保存的变量作为输入。
#
# + origin_pos=2 tab=["pytorch"]
import torch
from torch import nn
from torch.nn import functional as F
# -
# ## save
x = torch.arange(4)
torch.save(x, 'x.torch_tensor')
# + [markdown] origin_pos=4
# ## load
# + origin_pos=6 tab=["pytorch"]
x2 = torch.load('x.torch_tensor')
x2
# + [markdown] origin_pos=8
# ## load & save a `list` of tensor
# -
y = torch.zeros(4)
torch.save([x, y],'xy.torch_tensor')
# + origin_pos=10 tab=["pytorch"]
x2, y2 = torch.load('xy.torch_tensor')
(x2, y2)
# + [markdown] origin_pos=12
# ## load & save a `dict` of tensor
# + origin_pos=14 tab=["pytorch"]
mydict = {'x': x, 'y': y}
torch.save(mydict, 'mydict')
mydict2 = torch.load('mydict')
mydict2
# + [markdown] origin_pos=16
# ## load & save network params
# + origin_pos=18 tab=["pytorch"]
class MLP(nn.Module):
def __init__(self):
super().__init__()
self.hidden = nn.Linear(20, 256)
self.output = nn.Linear(256, 10)
def forward(self, x):
return self.output(F.relu(self.hidden(x)))
net = MLP()
X = torch.randn(size=(2, 20))
Y = net(X)
# + [markdown] origin_pos=20
# 接下来,我们[**将模型的参数存储在一个叫做“mlp.params”的文件中。**]
#
# + origin_pos=22 tab=["pytorch"]
torch.save(net.state_dict(), 'mlp.params')
# + [markdown] origin_pos=24
# 为了恢复模型,我们[**实例化了原始多层感知机模型的一个备份。**]
# 这里我们不需要随机初始化模型参数,而是(**直接读取文件中存储的参数。**)
# - **we need the same structure first!!!**
# + origin_pos=26 tab=["pytorch"]
clone = MLP()
clone.load_state_dict(torch.load('mlp.params'))
clone.eval() # 处于predict模式 , 处于训练模式时时.train()
# 对Batch nomalization和dropout 有影响
# + [markdown] origin_pos=28
# ## testing(check if the result is the same)
# + origin_pos=30 tab=["pytorch"]
Y_clone = clone(X)
Y_clone == Y
# -
| 04_deep_learning_computation/pytorch_read_write.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Waveform Characteristics Example
# Its useful to know when the waveform "starts" and "ends". Here we summarize how nrutils lets you know about these useful markers.
## Setup ipython environment
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# Import useful things
from nrutils import gwylm,scsearch,romline,maketaper
# Setup plotting backend
import matplotlib as mpl
from mpl_toolkits.mplot3d import axes3d
mpl.rcParams['lines.linewidth'] = 0.8
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = 12
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['axes.titlesize'] = 20
from matplotlib.pyplot import *
from mpl_toolkits.mplot3d import Axes3D
#
from numpy import *
#
A = scsearch(keyword='athena',q=2,verbose=True,nonspinning=True,unique=True)
# +
#
a = A[0]
#
y = gwylm( a, verbose=True, lm=[(3,2)], clean=True )
# -
y.plot()
# +
figure( figsize=2*array([11,2]) )
u = y.ylm[0]
plot( u.t, u.amp )
# Demarcate strict beginning and end of waveform
axvline( u.t[y.startindex], linestyle='--', color='k' )
axvline( u.t[y.endindex], linestyle='--', color='k' )
# Demarcate strict (index) peak of waveform
axvline( u.t[u.k_amp_max], linestyle=':', color='c' )
# Demarcate strict INTERPOLATED peak of waveform
axvline( u.intrp_t_amp_max, linestyle='--', color='k', alpha=0.3 )
# Demarcate effective beginning and end of waveform
axvline( u.t[ y.preinspiral.right_index ], linestyle='-', color='r' )
axvline( u.t[ y.postringdown.left_index ], linestyle='-', color='r' )
gca().set_yscale('log')
xlabel('$t/M$')
ylabel('$|$'+u.kind+'$|$')
title('The waveform should be windowed to zero at both ends.')
# -
| examples/wavform_characteristics_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_tensorflow_p36)
# language: python
# name: conda_tensorflow_p36
# ---
# +
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Lambda
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Conv1D, MaxPooling1D, LSTM, ConvLSTM2D, GRU, CuDNNLSTM, CuDNNGRU, BatchNormalization, LocallyConnected2D, Permute, TimeDistributed, Bidirectional
from keras.layers import Concatenate, Reshape, Softmax, Conv2DTranspose, Embedding, Multiply
from keras.callbacks import ModelCheckpoint, EarlyStopping, Callback
from keras import regularizers
from keras import backend as K
from keras.utils.generic_utils import Progbar
from keras.layers.merge import _Merge
import keras.losses
from functools import partial
from collections import defaultdict
import tensorflow as tf
from tensorflow.python.framework import ops
import isolearn.keras as iso
import numpy as np
import tensorflow as tf
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
from sklearn import preprocessing
import pandas as pd
import os
import pickle
import numpy as np
import scipy.sparse as sp
import scipy.io as spio
import scipy as scp
import matplotlib.pyplot as plt
import isolearn.io as isoio
import isolearn.keras as isol
from sequence_logo_helper import plot_dna_logo
import pandas as pd
from keras.backend.tensorflow_backend import set_session
def contain_tf_gpu_mem_usage() :
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess)
contain_tf_gpu_mem_usage()
class EpochVariableCallback(Callback) :
def __init__(self, my_variable, my_func) :
self.my_variable = my_variable
self.my_func = my_func
def on_epoch_begin(self, epoch, logs={}) :
K.set_value(self.my_variable, self.my_func(K.get_value(self.my_variable), epoch))
# +
#optimus 5-prime functions
def test_data(df, model, test_seq, obs_col, output_col='pred'):
'''Predict mean ribosome load using model and test set UTRs'''
# Scale the test set mean ribosome load
scaler = preprocessing.StandardScaler()
scaler.fit(df[obs_col].reshape(-1,1))
# Make predictions
predictions = model.predict(test_seq).reshape(-1)
# Inverse scaled predicted mean ribosome load and return in a column labeled 'pred'
df.loc[:,output_col] = scaler.inverse_transform(predictions)
return df
def one_hot_encode(df, col='utr', seq_len=50):
# Dictionary returning one-hot encoding of nucleotides.
nuc_d = {'a':[1,0,0,0],'c':[0,1,0,0],'g':[0,0,1,0],'t':[0,0,0,1], 'n':[0,0,0,0]}
# Creat empty matrix.
vectors=np.empty([len(df),seq_len,4])
# Iterate through UTRs and one-hot encode
for i,seq in enumerate(df[col].str[:seq_len]):
seq = seq.lower()
a = np.array([nuc_d[x] for x in seq])
vectors[i] = a
return vectors
def r2(x,y):
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
return r_value**2
#Train data
e_train = pd.read_csv("bottom5KIFuAUGTop5KIFuAUG.csv")
e_train.loc[:,'scaled_rl'] = preprocessing.StandardScaler().fit_transform(e_train.loc[:,'rl'].values.reshape(-1,1))
seq_e_train = one_hot_encode(e_train,seq_len=50)
x_train = seq_e_train
x_train = np.reshape(x_train, (x_train.shape[0], 1, x_train.shape[1], x_train.shape[2]))
y_train = np.array(e_train['scaled_rl'].values)
y_train = np.reshape(y_train, (y_train.shape[0],1))
#Test data
e_test = pd.read_csv("randomSampleTestingAllAUGtypes.csv")
e_test.loc[:,'scaled_rl'] = preprocessing.StandardScaler().fit_transform(e_test.loc[:,'rl'].values.reshape(-1,1))
seq_e_test = one_hot_encode(e_test, seq_len=50)
x_test = seq_e_test
x_test = np.reshape(x_test, (x_test.shape[0], 1, x_test.shape[1], x_test.shape[2]))
y_test = np.array(e_test['scaled_rl'].values)
y_test = np.reshape(y_test, (y_test.shape[0],1))
e_extra = pd.DataFrame({
'utr' : [
"CCGGCTTATCAATGGGAAGCGTCGATTGCGACAAGGGTCGTGCTCGCTAG",
"CCGGCTTATCAATGGGAAGCGTCGATTGCGACAAGGGTCGTTAGCGCTAG",
"CCGGCTTATCAATGGGAATGGTCGATTGCGACAAGGGTCGTTAGCGCTAG"
]
})
seq_e_extra = one_hot_encode(e_extra, seq_len=50)
x_extra = seq_e_extra
x_extra = np.reshape(x_extra, (x_extra.shape[0], 1, x_extra.shape[1], x_extra.shape[2]))
y_extra = np.zeros((x_extra.shape[0],1))
print("x_train.shape = " + str(x_train.shape))
print("x_test.shape = " + str(x_test.shape))
print("x_extra.shape = " + str(x_extra.shape))
print("y_train.shape = " + str(y_train.shape))
print("y_test.shape = " + str(y_test.shape))
print("y_extra.shape = " + str(y_extra.shape))
# +
#Define sequence template
sequence_template = "N" * 50
sequence_mask = np.array([1 if sequence_template[j] == 'N' else 0 for j in range(len(sequence_template))])
# +
#Visualize background sequence distribution
pseudo_count = 1.0
x_mean = (np.sum(x_train, axis=(0, 1)) + pseudo_count) / (x_train.shape[0] + 4. * pseudo_count)
x_mean_logits = np.log(x_mean / (1. - x_mean))
plot_dna_logo(np.copy(x_mean), sequence_template=sequence_template, figsize=(14, 0.65), logo_height=1.0, plot_start=0, plot_end=205)
# +
#Calculate mean training set conservation
entropy = np.sum(x_mean * -np.log(x_mean), axis=-1) / np.log(2.0)
conservation = 2.0 - entropy
x_mean_conservation = np.sum(conservation) / np.sum(sequence_mask)
print("Mean conservation (bits) = " + str(x_mean_conservation))
# +
#Calculate mean training set kl-divergence against background
x_train_clipped = np.clip(np.copy(x_train[:, 0, :, :]), 1e-8, 1. - 1e-8)
kl_divs = np.sum(x_train_clipped * np.log(x_train_clipped / np.tile(np.expand_dims(x_mean, axis=0), (x_train_clipped.shape[0], 1, 1))), axis=-1) / np.log(2.0)
x_mean_kl_divs = np.sum(kl_divs * sequence_mask, axis=-1) / np.sum(sequence_mask)
x_mean_kl_div = np.mean(x_mean_kl_divs)
print("Mean KL Div against background (bits) = " + str(x_mean_kl_div))
# +
from tensorflow.python.framework import ops
#Stochastic Binarized Neuron helper functions (Tensorflow)
#ST Estimator code adopted from https://r2rt.com/beyond-binary-ternary-and-one-hot-neurons.html
#See Github https://github.com/spitis/
def st_sampled_softmax(logits):
with ops.name_scope("STSampledSoftmax") as namescope :
nt_probs = tf.nn.softmax(logits)
onehot_dim = logits.get_shape().as_list()[1]
sampled_onehot = tf.one_hot(tf.squeeze(tf.multinomial(logits, 1), 1), onehot_dim, 1.0, 0.0)
with tf.get_default_graph().gradient_override_map({'Ceil': 'Identity', 'Mul': 'STMul'}):
return tf.ceil(sampled_onehot * nt_probs)
def st_hardmax_softmax(logits):
with ops.name_scope("STHardmaxSoftmax") as namescope :
nt_probs = tf.nn.softmax(logits)
onehot_dim = logits.get_shape().as_list()[1]
sampled_onehot = tf.one_hot(tf.argmax(nt_probs, 1), onehot_dim, 1.0, 0.0)
with tf.get_default_graph().gradient_override_map({'Ceil': 'Identity', 'Mul': 'STMul'}):
return tf.ceil(sampled_onehot * nt_probs)
@ops.RegisterGradient("STMul")
def st_mul(op, grad):
return [grad, grad]
#Gumbel Distribution Sampler
def gumbel_softmax(logits, temperature=0.5) :
gumbel_dist = tf.contrib.distributions.RelaxedOneHotCategorical(temperature, logits=logits)
batch_dim = logits.get_shape().as_list()[0]
onehot_dim = logits.get_shape().as_list()[1]
return gumbel_dist.sample()
# +
#aparent_l_test, aparent_d_test#PWM Masking and Sampling helper functions
def mask_pwm(inputs) :
pwm, onehot_template, onehot_mask = inputs
return pwm * onehot_mask + onehot_template
def sample_pwm_st(pwm_logits) :
n_sequences = K.shape(pwm_logits)[0]
seq_length = K.shape(pwm_logits)[2]
flat_pwm = K.reshape(pwm_logits, (n_sequences * seq_length, 4))
sampled_pwm = st_sampled_softmax(flat_pwm)
return K.reshape(sampled_pwm, (n_sequences, 1, seq_length, 4))
def sample_pwm_gumbel(pwm_logits) :
n_sequences = K.shape(pwm_logits)[0]
seq_length = K.shape(pwm_logits)[2]
flat_pwm = K.reshape(pwm_logits, (n_sequences * seq_length, 4))
sampled_pwm = gumbel_softmax(flat_pwm, temperature=0.5)
return K.reshape(sampled_pwm, (n_sequences, 1, seq_length, 4))
#Generator helper functions
def initialize_sequence_templates(generator, sequence_templates, background_matrices) :
embedding_templates = []
embedding_masks = []
embedding_backgrounds = []
for k in range(len(sequence_templates)) :
sequence_template = sequence_templates[k]
onehot_template = iso.OneHotEncoder(seq_length=len(sequence_template))(sequence_template).reshape((1, len(sequence_template), 4))
for j in range(len(sequence_template)) :
if sequence_template[j] not in ['N', 'X'] :
nt_ix = np.argmax(onehot_template[0, j, :])
onehot_template[:, j, :] = -4.0
onehot_template[:, j, nt_ix] = 10.0
elif sequence_template[j] == 'X' :
onehot_template[:, j, :] = -1.0
onehot_mask = np.zeros((1, len(sequence_template), 4))
for j in range(len(sequence_template)) :
if sequence_template[j] == 'N' :
onehot_mask[:, j, :] = 1.0
embedding_templates.append(onehot_template.reshape(1, -1))
embedding_masks.append(onehot_mask.reshape(1, -1))
embedding_backgrounds.append(background_matrices[k].reshape(1, -1))
embedding_templates = np.concatenate(embedding_templates, axis=0)
embedding_masks = np.concatenate(embedding_masks, axis=0)
embedding_backgrounds = np.concatenate(embedding_backgrounds, axis=0)
generator.get_layer('template_dense').set_weights([embedding_templates])
generator.get_layer('template_dense').trainable = False
generator.get_layer('mask_dense').set_weights([embedding_masks])
generator.get_layer('mask_dense').trainable = False
generator.get_layer('background_dense').set_weights([embedding_backgrounds])
generator.get_layer('background_dense').trainable = False
#Generator construction function
def build_sampler(batch_size, seq_length, n_classes=1, n_samples=1, sample_mode='st') :
#Initialize Reshape layer
reshape_layer = Reshape((1, seq_length, 4))
#Initialize background matrix
onehot_background_dense = Embedding(n_classes, seq_length * 4, embeddings_initializer='zeros', name='background_dense')
#Initialize template and mask matrices
onehot_template_dense = Embedding(n_classes, seq_length * 4, embeddings_initializer='zeros', name='template_dense')
onehot_mask_dense = Embedding(n_classes, seq_length * 4, embeddings_initializer='ones', name='mask_dense')
#Initialize Templating and Masking Lambda layer
masking_layer = Lambda(mask_pwm, output_shape = (1, seq_length, 4), name='masking_layer')
background_layer = Lambda(lambda x: x[0] + x[1], name='background_layer')
#Initialize PWM normalization layer
pwm_layer = Softmax(axis=-1, name='pwm')
#Initialize sampling layers
sample_func = None
if sample_mode == 'st' :
sample_func = sample_pwm_st
elif sample_mode == 'gumbel' :
sample_func = sample_pwm_gumbel
upsampling_layer = Lambda(lambda x: K.tile(x, [n_samples, 1, 1, 1]), name='upsampling_layer')
sampling_layer = Lambda(sample_func, name='pwm_sampler')
permute_layer = Lambda(lambda x: K.permute_dimensions(K.reshape(x, (n_samples, batch_size, 1, seq_length, 4)), (1, 0, 2, 3, 4)), name='permute_layer')
def _sampler_func(class_input, raw_logits) :
#Get Template and Mask
onehot_background = reshape_layer(onehot_background_dense(class_input))
onehot_template = reshape_layer(onehot_template_dense(class_input))
onehot_mask = reshape_layer(onehot_mask_dense(class_input))
#Add Template and Multiply Mask
pwm_logits = masking_layer([background_layer([raw_logits, onehot_background]), onehot_template, onehot_mask])
#Compute PWM (Nucleotide-wise Softmax)
pwm = pwm_layer(pwm_logits)
#Tile each PWM to sample from and create sample axis
pwm_logits_upsampled = upsampling_layer(pwm_logits)
sampled_pwm = sampling_layer(pwm_logits_upsampled)
sampled_pwm = permute_layer(sampled_pwm)
sampled_mask = permute_layer(upsampling_layer(onehot_mask))
return pwm_logits, pwm, sampled_pwm, onehot_mask, sampled_mask
return _sampler_func
# +
#Scrambler network definition
def make_resblock(n_channels=64, window_size=8, dilation_rate=1, group_ix=0, layer_ix=0, drop_rate=0.0) :
#Initialize res block layers
batch_norm_0 = BatchNormalization(name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_batch_norm_0')
relu_0 = Lambda(lambda x: K.relu(x, alpha=0.0))
conv_0 = Conv2D(n_channels, (1, window_size), dilation_rate=dilation_rate, strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_conv_0')
batch_norm_1 = BatchNormalization(name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_batch_norm_1')
relu_1 = Lambda(lambda x: K.relu(x, alpha=0.0))
conv_1 = Conv2D(n_channels, (1, window_size), dilation_rate=dilation_rate, strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_conv_1')
skip_1 = Lambda(lambda x: x[0] + x[1], name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_skip_1')
drop_1 = None
if drop_rate > 0.0 :
drop_1 = Dropout(drop_rate)
#Execute res block
def _resblock_func(input_tensor) :
batch_norm_0_out = batch_norm_0(input_tensor)
relu_0_out = relu_0(batch_norm_0_out)
conv_0_out = conv_0(relu_0_out)
batch_norm_1_out = batch_norm_1(conv_0_out)
relu_1_out = relu_1(batch_norm_1_out)
if drop_rate > 0.0 :
conv_1_out = drop_1(conv_1(relu_1_out))
else :
conv_1_out = conv_1(relu_1_out)
skip_1_out = skip_1([conv_1_out, input_tensor])
return skip_1_out
return _resblock_func
def mask_dropout_multi_scale(mask, drop_scales=[1, 2, 4, 7], min_drop_rate=0.0, max_drop_rate=0.5) :
rates = K.random_uniform(shape=(K.shape(mask)[0], 1, 1, 1), minval=min_drop_rate, maxval=max_drop_rate)
scale_logits = K.random_uniform(shape=(K.shape(mask)[0], len(drop_scales), 1, 1, 1), minval=-5., maxval=5.)
scale_probs = K.softmax(scale_logits, axis=1)
ret_mask = mask
for drop_scale_ix, drop_scale in enumerate(drop_scales) :
ret_mask = mask_dropout(ret_mask, rates * scale_probs[:, drop_scale_ix, ...], drop_scale=drop_scale)
return K.switch(K.learning_phase(), ret_mask, mask)
def mask_dropout(mask, drop_rates, drop_scale=1) :
random_tensor_downsampled = K.random_uniform(shape=(
K.shape(mask)[0],
1,
K.cast(K.shape(mask)[2] / drop_scale, dtype=tf.int32),
K.shape(mask)[3]
), minval=0.0, maxval=1.0)
keep_mask_downsampled = random_tensor_downsampled >= drop_rates
keep_mask = K.repeat_elements(keep_mask_downsampled, rep=drop_scale, axis=2)
ret_mask = mask * K.cast(keep_mask, dtype=tf.float32)
return ret_mask
def mask_dropout_single_scale(mask, drop_scale=1, min_drop_rate=0.0, max_drop_rate=0.5) :
rates = K.random_uniform(shape=(K.shape(mask)[0], 1, 1, 1), minval=min_drop_rate, maxval=max_drop_rate)
random_tensor_downsampled = K.random_uniform(shape=(
K.shape(mask)[0],
1,
K.cast(K.shape(mask)[2] / drop_scale, dtype=tf.int32),
K.shape(mask)[3]
), minval=0.0, maxval=1.0)
keep_mask_downsampled = random_tensor_downsampled >= rates
keep_mask = K.repeat_elements(keep_mask_downsampled, rep=drop_scale, axis=2)
ret_mask = mask * K.cast(keep_mask, dtype=tf.float32)
return K.switch(K.learning_phase(), ret_mask, mask)
def load_scrambler_network(seq_length=50, n_groups=1, n_resblocks_per_group=4, n_channels=32, window_size=8, dilation_rates=[1], drop_rate=0.0, drop_scales=[1, 5], min_drop_rate=0.0, max_drop_rate=0.5) :
#Discriminator network definition
conv_0 = Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_conv_0')
mask_drop = Lambda(lambda x: mask_dropout_multi_scale(x, drop_scales=drop_scales, min_drop_rate=min_drop_rate, max_drop_rate=max_drop_rate), output_shape=(1, seq_length, 1), name='scrambler_mask_drop')
#mask_drop = Lambda(lambda x: mask_dropout_single_scale(x, drop_scale=drop_scales[0], min_drop_rate=min_drop_rate, max_drop_rate=max_drop_rate), output_shape=(1, seq_length, 1), name='scrambler_mask_drop')
mask_concat = Concatenate(axis=-1)
mask_multiply = Lambda(lambda x: x[0] * x[1])
skip_convs = []
resblock_groups = []
for group_ix in range(n_groups) :
skip_convs.append(Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_skip_conv_' + str(group_ix)))
resblocks = []
for layer_ix in range(n_resblocks_per_group) :
resblocks.append(make_resblock(n_channels=n_channels, window_size=window_size, dilation_rate=dilation_rates[group_ix], group_ix=group_ix, layer_ix=layer_ix, drop_rate=drop_rate))
resblock_groups.append(resblocks)
last_block_conv = Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_last_block_conv')
skip_add = Lambda(lambda x: x[0] + x[1], name='scrambler_skip_add')
final_conv = Conv2D(1, (1, 1), strides=(1, 1), padding='same', activation='softplus', kernel_initializer='glorot_normal', name='scrambler_final_conv')
onehot_to_logits = Lambda(lambda x: 2. * x - 1., name='scrambler_onehot_to_logits')
scale_logits = Lambda(lambda x: x[1] * K.tile(x[0], (1, 1, 1, 4)), name='scrambler_logit_scale')
def _scrambler_func(sequence_input, mask_input) :
mask_dropped = mask_drop(mask_input)
conv_0_out = conv_0(mask_concat([sequence_input, mask_dropped]))
#Connect group of res blocks
output_tensor = conv_0_out
#Res block group execution
skip_conv_outs = []
for group_ix in range(n_groups) :
skip_conv_out = skip_convs[group_ix](output_tensor)
skip_conv_outs.append(skip_conv_out)
for layer_ix in range(n_resblocks_per_group) :
output_tensor = resblock_groups[group_ix][layer_ix](output_tensor)
#Last res block extr conv
last_block_conv_out = last_block_conv(output_tensor)
skip_add_out = last_block_conv_out
for group_ix in range(n_groups) :
skip_add_out = skip_add([skip_add_out, skip_conv_outs[group_ix]])
#Final conv out
final_conv_out = mask_multiply([final_conv(skip_add_out), mask_dropped])
#Scale logits by importance scores
scaled_logits = scale_logits([final_conv_out, onehot_to_logits(sequence_input)])
return scaled_logits, final_conv_out
return _scrambler_func
# +
#Keras loss functions
def get_margin_entropy_ame_masked(pwm_start, pwm_end, pwm_background, max_bits=1.0) :
def _margin_entropy_ame_masked(pwm, pwm_mask) :
conservation = pwm[:, 0, pwm_start:pwm_end, :] * K.log(K.clip(pwm[:, 0, pwm_start:pwm_end, :], K.epsilon(), 1. - K.epsilon()) / K.constant(pwm_background[pwm_start:pwm_end, :])) / K.log(2.0)
conservation = K.sum(conservation, axis=-1)
mask = K.max(pwm_mask[:, 0, pwm_start:pwm_end, :], axis=-1)
n_unmasked = K.sum(mask, axis=-1)
mean_conservation = K.sum(conservation * mask, axis=-1) / n_unmasked
margin_conservation = K.switch(mean_conservation > K.constant(max_bits, shape=(1,)), mean_conservation - K.constant(max_bits, shape=(1,)), K.zeros_like(mean_conservation))
return margin_conservation
return _margin_entropy_ame_masked
def get_target_entropy_sme_masked(pwm_start, pwm_end, pwm_background, target_bits=1.0) :
def _target_entropy_sme_masked(pwm, pwm_mask) :
conservation = pwm[:, 0, pwm_start:pwm_end, :] * K.log(K.clip(pwm[:, 0, pwm_start:pwm_end, :], K.epsilon(), 1. - K.epsilon()) / K.constant(pwm_background[pwm_start:pwm_end, :])) / K.log(2.0)
conservation = K.sum(conservation, axis=-1)
mask = K.max(pwm_mask[:, 0, pwm_start:pwm_end, :], axis=-1)
n_unmasked = K.sum(mask, axis=-1)
mean_conservation = K.sum(conservation * mask, axis=-1) / n_unmasked
return (mean_conservation - target_bits)**2
return _target_entropy_sme_masked
def get_margin_lum_ame_masked(pwm_start, pwm_end) :
def _margin_lum_ame(importance_scores, pwm_mask, max_lum) :
p_ons = 2. * K.sigmoid(importance_scores[:, 0, pwm_start:pwm_end, 0]) - 1.
mask = K.max(pwm_mask[:, 0, pwm_start:pwm_end, :], axis=-1)
n_unmasked = K.sum(mask, axis=-1)
mean_p_on = K.switch(n_unmasked > 0, K.sum(p_ons * mask, axis=-1) / n_unmasked, max_lum[:, 0])
margin_p_on = K.switch(mean_p_on > max_lum[:, 0], mean_p_on - max_lum[:, 0], K.zeros_like(mean_p_on))
return margin_p_on
return _margin_lum_ame
def get_target_lum_sme_masked(pwm_start, pwm_end) :
def _target_lum_sme(importance_scores, pwm_mask, target_lum) :
p_ons = 2. * K.sigmoid(importance_scores[:, 0, pwm_start:pwm_end, 0]) - 1.
mask = K.max(pwm_mask[:, 0, pwm_start:pwm_end, :], axis=-1)
n_unmasked = K.sum(mask, axis=-1)
mean_p_on = K.switch(n_unmasked > 0, K.sum(p_ons * mask, axis=-1) / n_unmasked, target_lum[:, 0])
return (mean_p_on - target_lum[:, 0])**2
return _target_lum_sme
def get_weighted_loss(loss_coeff=1.) :
def _min_pred(y_true, y_pred) :
return loss_coeff * y_pred
return _min_pred
def get_mse(loss_coeff=1.) :
#returns mse function for optimus
def _mse(y_true, y_pred):
return K.mean((y_true - y_pred)**2, axis=-1)
return _mse
# -
K.clear_session()
# +
#Initialize Encoder and Decoder networks
batch_size = 32
seq_length = 50
n_samples = 32
#sample_mode = 'st'
sample_mode = 'gumbel'
#Resnet parameters
resnet_n_groups = 5
resnet_n_resblocks_per_group = 4
resnet_n_channels = 32
resnet_window_size = 3
resnet_dilation_rates = [1, 2, 4, 2, 1]
resnet_drop_rate = 0.0
resnet_min_drop_rate = 0.0
resnet_max_drop_rate = 0.15
resnet_drop_scales = [1, 2, 5]
#Load scrambler
scrambler = load_scrambler_network(
seq_length=seq_length,
n_groups=resnet_n_groups,
n_resblocks_per_group=resnet_n_resblocks_per_group,
n_channels=resnet_n_channels, window_size=resnet_window_size,
dilation_rates=resnet_dilation_rates,
drop_rate=resnet_drop_rate,
min_drop_rate=resnet_min_drop_rate,
max_drop_rate=resnet_max_drop_rate,
drop_scales=resnet_drop_scales
)
#Load sampler
sampler = build_sampler(batch_size, seq_length, n_classes=1, n_samples=n_samples, sample_mode=sample_mode)
# +
#Load Predictor
predictor_path = 'optimusRetrainedMain.hdf5'
predictor = load_model(predictor_path)
predictor.trainable = False
predictor.compile(optimizer=keras.optimizers.SGD(lr=0.1), loss='mean_squared_error')
# +
#Build scrambler model
scrambler_class = Input(shape=(1,), name='scrambler_class')
scrambler_input = Input(shape=(1, seq_length, 4), name='scrambler_input')
scrambler_drop = Input(shape=(1, seq_length, 1), name='scrambler_drop')
scrambler_logits, importance_scores = scrambler(scrambler_input, scrambler_drop)
pwm_logits, pwm, sampled_pwm, _, _ = sampler(scrambler_class, scrambler_logits)
scrambler_model = Model([scrambler_input, scrambler_drop, scrambler_class], [pwm_logits, pwm, sampled_pwm, importance_scores])
#Initialize Sequence Templates and Masks
initialize_sequence_templates(scrambler_model, [sequence_template], [x_mean_logits])
scrambler_model.compile(
optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999),
loss='mean_squared_error'
)
# +
#Build Auto-scrambler pipeline
#Define model inputs
ae_scrambler_class = Input(shape=(1,), name='ae_scrambler_class')
ae_scrambler_input = Input(shape=(1, seq_length, 4), name='ae_scrambler_input')
ae_scrambler_drop = Input(shape=(1, seq_length, 1), name='ae_scrambler_drop')
#Run encoder and decoder
_, scrambled_pwm, scrambled_sample, pwm_mask, _ = sampler(ae_scrambler_class, scrambler(ae_scrambler_input, ae_scrambler_drop)[0])
#Define layer to deflate sample axis
deflate_scrambled_sample = Lambda(lambda x: K.reshape(x, (batch_size * n_samples, 1, seq_length, 4)), name='deflate_scrambled_sample')
#Deflate sample axis
scrambled_sample_deflated = deflate_scrambled_sample(scrambled_sample)
# +
def _make_prediction(inputs, predictor=predictor) :
pred_seq_in = inputs
pred_seq_in_perm = tf.squeeze(pred_seq_in, 1)
outputs = predictor(pred_seq_in_perm)
return outputs
def _make_prediction_scrambled(inputs, predictor=predictor, n_samples=n_samples) :
pred_seq_in = inputs
pred_seq_in_perm = tf.squeeze(pred_seq_in, 1)
outputs = predictor(pred_seq_in_perm)
return outputs
#Make reference prediction on non-scrambled input sequence
y_pred_non_scrambled = Lambda(_make_prediction, name='make_prediction_non_scrambled')(ae_scrambler_input)
#Make prediction on scrambled sequence samples
y_pred_scrambled_deflated = Lambda(_make_prediction_scrambled, name='make_prediction_scrambled')(scrambled_sample_deflated)
# +
#Define layer to inflate sample axis
inflate_scrambled_prediction = Lambda(lambda x: K.reshape(x, (batch_size, n_samples)), name='inflate_scrambled_prediction')
#Inflate sample axis
y_pred_scrambled = inflate_scrambled_prediction(y_pred_scrambled_deflated)
#Cost function parameters
pwm_start = 0
pwm_end = 50
target_bits = 0.125
#OPTIMUS: switched to MSE
mse_loss_func = get_mse()
#Conservation cost
conservation_loss_func = get_target_entropy_sme_masked(pwm_start=pwm_start, pwm_end=pwm_end, pwm_background=x_mean, target_bits=1.8)
#Entropy cost
entropy_loss_func = get_target_entropy_sme_masked(pwm_start=pwm_start, pwm_end=pwm_end, pwm_background=x_mean, target_bits=target_bits)
#entropy_loss_func = get_margin_entropy_ame_masked(pwm_start=pwm_start, pwm_end=pwm_end, pwm_background=x_mean, max_bits=target_bits)
#Define annealing coefficient
anneal_coeff = K.variable(1.0)
#Execute NLL cost
mse_loss = Lambda(lambda x: mse_loss_func(K.tile(x[0], (1, K.shape(x[1])[1])), x[1]), name='mse')([y_pred_non_scrambled, y_pred_scrambled])
#Execute conservation cost
conservation_loss = Lambda(lambda x: anneal_coeff * conservation_loss_func(x[0], x[1]), name='conservation')([scrambled_pwm, pwm_mask])
#Execute entropy cost
entropy_loss = Lambda(lambda x: (1. - anneal_coeff) * entropy_loss_func(x[0], x[1]), name='entropy')([scrambled_pwm, pwm_mask])
loss_model = Model(
[ae_scrambler_class, ae_scrambler_input, ae_scrambler_drop],
[mse_loss, conservation_loss, entropy_loss]
)
#Initialize Sequence Templates and Masks
initialize_sequence_templates(loss_model, [sequence_template], [x_mean_logits])
opt = keras.optimizers.Adam(lr=0.0001, beta_1=0.5, beta_2=0.9)
#opt = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999)
loss_model.compile(
optimizer=opt,
loss={
'mse' : get_weighted_loss(loss_coeff=1.0),
'conservation' : get_weighted_loss(loss_coeff=1.0),
'entropy' : get_weighted_loss(loss_coeff=1.0)
}
)
# +
#Training configuration
#Define number of training epochs
n_epochs = 50
#Define experiment suffix (optional)
experiment_suffix = "_example_if_uorf_seqs_drop_multi_scale_weight_1"
#Define anneal function
def _anneal_func(val, epoch, n_epochs=n_epochs) :
if epoch in [0] :
return 1.0
return 0.0
architecture_str = "resnet_" + str(resnet_n_groups) + "_" + str(resnet_n_resblocks_per_group) + "_" + str(resnet_n_channels) + "_" + str(resnet_window_size) + "_" + str(resnet_drop_rate).replace(".", "") + "_" + str(resnet_min_drop_rate).replace(".", "") + "_to_" + str(resnet_max_drop_rate).replace(".", "")
dataset_name = "egfp_unmod_1"
model_name = "autoscrambler_dataset_" + dataset_name + "_sample_mode_" + sample_mode + "_n_samples_" + str(n_samples) + "_" + architecture_str + "_n_epochs_" + str(n_epochs) + "_target_bits_" + str(target_bits).replace(".", "") + experiment_suffix
print("Model save name = " + model_name)
# +
#Execute training procedure
callbacks =[
EpochVariableCallback(anneal_coeff, _anneal_func)
]
s_train = np.zeros((x_train.shape[0], 1))
s_test = np.zeros((x_test.shape[0], 1))
all_ones_mask_train = np.ones((x_train.shape[0], 1, seq_length, 1))
all_ones_mask_test = np.ones((x_test.shape[0], 1, seq_length, 1))
# train the autoencoder
train_history = loss_model.fit(
[s_train, x_train, all_ones_mask_train],
[s_train, s_train, s_train],
shuffle=True,
epochs=n_epochs,
batch_size=batch_size,
validation_data=(
[s_test, x_test, all_ones_mask_test],
[s_test, s_test, s_test]
),
callbacks=callbacks
)
# +
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(3 * 4, 3))
n_epochs_actual = len(train_history.history['mse_loss'])
ax1.plot(np.arange(1, n_epochs_actual + 1), train_history.history['mse_loss'], linewidth=3, color='green')
ax1.plot(np.arange(1, n_epochs_actual + 1), train_history.history['val_mse_loss'], linewidth=3, color='orange')
plt.sca(ax1)
plt.xlabel("Epochs", fontsize=14)
plt.ylabel("MSE", fontsize=14)
plt.xlim(1, n_epochs_actual)
plt.xticks([1, n_epochs_actual], [1, n_epochs_actual], fontsize=12)
plt.yticks(fontsize=12)
ax2.plot(np.arange(1, n_epochs_actual + 1), train_history.history['entropy_loss'], linewidth=3, color='green')
ax2.plot(np.arange(1, n_epochs_actual + 1), train_history.history['val_entropy_loss'], linewidth=3, color='orange')
plt.sca(ax2)
plt.xlabel("Epochs", fontsize=14)
plt.ylabel("Entropy Loss", fontsize=14)
plt.xlim(1, n_epochs_actual)
plt.xticks([1, n_epochs_actual], [1, n_epochs_actual], fontsize=12)
plt.yticks(fontsize=12)
ax3.plot(np.arange(1, n_epochs_actual + 1), train_history.history['conservation_loss'], linewidth=3, color='green')
ax3.plot(np.arange(1, n_epochs_actual + 1), train_history.history['val_conservation_loss'], linewidth=3, color='orange')
plt.sca(ax3)
plt.xlabel("Epochs", fontsize=14)
plt.ylabel("Conservation Loss", fontsize=14)
plt.xlim(1, n_epochs_actual)
plt.xticks([1, n_epochs_actual], [1, n_epochs_actual], fontsize=12)
plt.yticks(fontsize=12)
plt.tight_layout()
plt.show()
# +
# Save model and weights
save_dir = 'saved_models'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name + '.h5')
scrambler_model.save(model_path)
print('Saved scrambler model at %s ' % (model_path))
# +
#Load models
save_dir = 'saved_models'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name + '.h5')
scrambler_model.load_weights(model_path)
print('Loaded scrambler model %s ' % (model_path))
# +
def _drop_seq_to_mask(s) :
return np.reshape(
np.array([0. if s[i] == 'X' else 1. for i in range(len(s))]),
(1, 1, len(s), 1)
)
e_extra = pd.DataFrame({
'utr' : [
"CTGGAATGCATCCAAGAGAATCCCTCAACATGCTGGTGGCTTTCTAGAAG", #ENSG00000115295,ENST00000404424
"GGTCTCATCTCTGCTTCACAATGCCGATGATTTAGCTGGGAGGACCCAAA", #ENSG00000182346,ENST00000473269
"GAATGGGTTATATCCTGTGTTGTGACCTCATGGTTTAAGTGGGAATAAAG", #ENSG00000177565,ENST00000431674
"CAAGAGAAAGTAAAGAATTTAAGATTTTATTCATGTGCATGGCATAGAAG", #ENSG00000104442,ENST00000519352
"GGGAGCTAGAGATGCTGTTATTCTATTGTATGTGAGAAGTCGGCCCAGAG", #ENSG00000168038,ENST00000420927
]
})
seq_e_extra = one_hot_encode(e_extra, seq_len=50)
x_extra = seq_e_extra
x_extra = np.reshape(x_extra, (x_extra.shape[0], 1, x_extra.shape[1], x_extra.shape[2]))
y_extra = np.zeros((x_extra.shape[0],1))
drop_test = [
#_drop_seq_to_mask("CTGGAATGCATCCAAGAGAATCCCTCAACATGCTGGTGGCTTTCTAGAAG"),
_drop_seq_to_mask("NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN"),
_drop_seq_to_mask("NNNNNXXXNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN"),
_drop_seq_to_mask("NNNNNNNNNNNNNNNNNNNNNNNNNNNNNXXXNNNNNNNNNNNNNNNNNN"),
#_drop_seq_to_mask("GGTCTCATCTCTGCTTCACAATGCCGATGATTTAGCTGGGAGGACCCAAA"),
_drop_seq_to_mask("NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN"),
_drop_seq_to_mask("NNNNNNNNNNNNNNNNNNNNXXXNNNNNNNNNNNNNNNNNNNNNNNNNNN"),
_drop_seq_to_mask("NNNNNNNNNNNNNNNNNNNNNNNNNNXXXNNNNNNNNNNNNNNNNNNNNN"),
#_drop_seq_to_mask("GAATGGGTTATATCCTGTGTTGTGACCTCATGGTTTAAGTGGGAATAAAG"),
_drop_seq_to_mask("NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN"),
_drop_seq_to_mask("NNXXXNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN"),
_drop_seq_to_mask("NNNNNNNNNNNNNNNNNNNNNNNNNNNNNXXXNNNNNNNNNNNNNNNNNN"),
#_drop_seq_to_mask("CAAGAGAAAGTAAAGAATTTAAGATTTTATTCATGTGCATGGCATAGAAG"),
_drop_seq_to_mask("NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN"),
_drop_seq_to_mask("NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNXXXNNNNNNNNNNNNNNN"),
_drop_seq_to_mask("NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNXXXNNNNNNNNN"),
#_drop_seq_to_mask("GGGAGCTAGAGATGCTGTTATTCTATTGTATGTGAGAAGTCGGCCCAGAG"),
_drop_seq_to_mask("NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN"),
_drop_seq_to_mask("NNNNNNNNNNNXXXNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN"),
_drop_seq_to_mask("NNNNNNNNNNNNNNNNNNNNNNNNNNNNNXXXNNNNNNNNNNNNNNNNNN"),
]
new_x_test = [
x_extra[0:1, ...],
x_extra[0:1, ...],
x_extra[0:1, ...],
x_extra[1:2, ...],
x_extra[1:2, ...],
x_extra[1:2, ...],
x_extra[2:3, ...],
x_extra[2:3, ...],
x_extra[2:3, ...],
x_extra[3:4, ...],
x_extra[3:4, ...],
x_extra[3:4, ...],
x_extra[4:5, ...],
x_extra[4:5, ...],
x_extra[4:5, ...],
]
drop_test = np.concatenate(drop_test, axis=0)
new_x_test = np.concatenate(new_x_test, axis=0)
# +
#Execute training procedure
s_test = np.zeros((batch_size, 1))
pwm_test = []
sample_test = []
importance_scores_test = []
for data_ix in range(new_x_test.shape[0]) :
print("Predicting example " + str(data_ix) + "...")
_, temp_pwm, temp_sample, temp_importance_scores = scrambler_model.predict_on_batch(x=[
np.tile(new_x_test[data_ix:data_ix+1], (batch_size, 1, 1, 1)),
np.tile(drop_test[data_ix:data_ix+1], (batch_size, 1, 1, 1)),
s_test
])
pwm_test.append(temp_pwm[:1, :])
sample_test.append(temp_sample[:1, :])
importance_scores_test.append(temp_importance_scores[:1, :])
pwm_test = np.concatenate(pwm_test, axis=0)
sample_test = np.concatenate(sample_test, axis=0)
importance_scores_test = np.concatenate(importance_scores_test, axis=0)
# +
#Visualize a few reconstructed sequence patterns
from sequence_logo_helper import plot_dna_logo
save_figs = True
fig_name = model_name[model_name.index("target_bits"):] + "_human_examples"
for plot_i in range(0, new_x_test.shape[0]) :
print("Test sequence " + str(plot_i) + ":")
justPred = np.expand_dims(np.expand_dims(new_x_test[plot_i, 0, :, :], axis=0), axis=-1)
justPredReshape = np.reshape(justPred, (1,50,4))
expanded = np.expand_dims(sample_test[plot_i, :, 0, :, :], axis=-1)
expandedReshape = np.reshape(expanded, (n_samples, 50,4))
y_test_hat_ref = predictor.predict(x=justPredReshape, batch_size=1)[0][0]
y_test_hat = predictor.predict(x=[expandedReshape], batch_size=32)[:10, 0]
print(" - Prediction (original) = " + str(round(y_test_hat_ref, 2))[:4])
print(" - Predictions (scrambled) = " + str([float(str(round(y_test_hat[i], 2))[:4]) for i in range(len(y_test_hat))]))
plot_dna_logo(new_x_test[plot_i, 0, :, :], sequence_template=sequence_template, figsize=(12, 1), plot_start=0, plot_end=50, save_figs=save_figs, fig_name=fig_name + "_" + str(plot_i) + "_orig_sequence")
plot_dna_logo(pwm_test[plot_i, 0, :, :], sequence_template=sequence_template, figsize=(12, 1), plot_start=0, plot_end=50, save_figs=save_figs, fig_name=fig_name + "_" + str(plot_i) + "_scrambled_pwm")
# -
| analysis/optimus5/train_scrambler_optimus5_target_bits_0125_weight_1_gumbel_drop_net_multi_scale_50_epochs_human.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Question 1
# X의 확률 분포가 다음과 같을 때, 중앙값과 평균, 최빈값을 찾아라
# a) $P(X) = (\frac 1 2)^x, ~~ for~~ x = 1,2,3... zero~~elsewhere$
# b) $f(x) = 12x^2 (1-x), ~0<x<1,~~zero~elsewhere$
# * * *
# ### Question 2
# 미국식 룰렛 게임을 다음과 같은 규칙으로 실시한다고 하자. 기대 이익을 구하여라.
# 1. 빨강 또는 검정에 배팅한다. 각 배팅에서 승리할 확률은 50%이며, 승리할 시 배팅한 돈의 두 배를 돌려준다.
# 2. 첫 게임에 10,000원을 배팅하고 게임에서 지면 두 번째 게임에 20,000원을 배팅하고 또 진다면 다음에는 40,000원을 배팅한다.
# 3. 한 번이라도 이기면 배팅을 중단한다.
# 4. 최대 5번만 배팅한다. 즉, 5번째 게임 후에 게임 결과에 상관없이 배팅을 중단한다.
# * * *
# ### Question 3
# $E(X) = 5$이고 $E(X(X-1))$ = 27.5일 때 다음 물음에 답하라.
# a) $E(X^2)$와 $Var(X)$를 구하라.
# b) $E(X)$, $E(X(X-1))$, $Var(X)$의 관계를 밝혀라.
| Questions/Chapter2/2.1. Expectations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# First, let's get the latest installations of our dependencies
# !pip install --upgrade pip
# !pip install boto3 --upgrade
# !pip install -U botocore
# # Environment Setup
#
# We need to set up the following data:
#
# REGION - Region to call A2I.
# BUCKET_NAME - A S3 bucket accessible by the given role
# Used to store the input files and output results
# Must be within the same region A2I is called from
# WORKTEAM_ARN - To create your Private Workteam, visit the instructions here: https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-private.html After you have created your workteam, replace \<YOUR-WORKTEAM-ARN> below
# ROLE - The IAM role used as part of StartHumanLoop. By default, this notebook will use the execution role. You can learn more about IAM Policies here https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
#
#
# +
REGION = 'us-east-1'
BUCKET_NAME = 'comprehend-data-label'
WORKTEAM_ARN= "<enter workteam arn>"
# +
from sagemaker import get_execution_role
import sagemaker
# Setting Role to the default SageMaker Execution Role
ROLE = get_execution_role()
display(ROLE)
# +
#Setup Bucket and Paths
import os
import boto3
import botocore
sess = sagemaker.Session()
# -
# # Client Setup
#
# Let's setup the clients for Amazon S3, Amazon SageMaker A2I Runtime and Amazon Comprehend.
#
# +
import boto3
import io
import json
import uuid
import botocore
import time
import botocore
# Amazon SageMaker client
sagemaker = boto3.client('sagemaker', REGION)
# Amazon Comprehend client
comprehend = boto3.client('comprehend', REGION)
# S3 client
s3 = boto3.client('s3', REGION)
# A2I Runtime client
a2i_runtime_client = boto3.client('sagemaker-a2i-runtime', REGION)
# +
import pprint
# Pretty print setup
pp = pprint.PrettyPrinter(indent=2)
# Function to pretty-print AWS SDK responses
def print_response(response):
if 'ResponseMetadata' in response:
del response['ResponseMetadata']
pp.pprint(response)
# -
# # Sample Data
#
# Let's create some sample text that we would test our translation with and store it in S3.
#
# +
translation_text = """
Just then another visitor entered the drawing room: <NAME>, the little princess’ husband. He was a very handsome young man, of medium height, with firm, clearcut features. Everything about him, from his weary, bored expression to his quiet, measured step, offered a most striking contrast to his quiet, little wife. It was evident that he not only knew everyone in the drawing room, but had found them to be so tiresome that it wearied him to look at or listen to them. And among all these faces that he found so tedious, none seemed to bore him so much as that of his pretty wife. He turned away from her with a grimace that distorted his handsome face, kissed <NAME>’s hand, and screwing up his eyes scanned the whole company.
"""
key = "input/test.txt"
s3.put_object(Bucket=BUCKET_NAME, Key=key, Body=translation_text)
# -
# # Create Control Plane Resources
# Create a Worker Task Tempalte
#
# Create a human task UI resource, giving a UI template in liquid html. This template will be rendered to the human workers whenever human loop is required.
#
# For over 70 pre built UIs, check: https://github.com/aws-samples/amazon-a2i-sample-task-uis.
#
# We will be taking translation review and correction UI and filling in the object categories in the labels variable in the template.
#
template = """
<script src="https://assets.crowd.aws/crowd-html-elements.js"></script>
<crowd-entity-annotation
name="entities"
header="Highlight parts of the text below"
labels="{{ task.input.labels | to_json | escape }}"
text="{{task.input.taskObject}}"
>
<full-instructions header="Named entity recognition instructions">
<ol>
<li><strong>Read</strong> the text carefully.</li>
<li><strong>Highlight</strong> words, phrases, or sections of the text.</li>
<li><strong>Choose</strong> the label that best matches what you have highlighted.</li>
<li>To <strong>change</strong> a label, choose highlighted text and select a new label.</li>
<li>To <strong>remove</strong> a label from highlighted text, choose the X next to the abbreviated label name on the highlighted text.</li>
<li>You can select all of a previously highlighted text, but not a portion of it.</li>
</ol>
</full-instructions>
<short-instructions>
Apply labels to words or phrases.
</short-instructions>
"""
# # Create a Worker Task Template Creator Function
#
# This function would be a higher level abstration, on the SageMaker package's method to create the Worker Task Template which we will use in the next step to create a human review workflow.
#
def create_task_ui(task_ui_name, template):
'''
Creates a Human Task UI resource.
Returns:
struct: HumanTaskUiArn
'''
response = sagemaker.create_human_task_ui(
HumanTaskUiName=task_ui_name,
UiTemplate={'Content': template})
return response
# +
# Task UI name - this value is unique per account and region. You can also provide your own value here.
taskUIName = 'a2i-comprehend-test-12-ue-1'
# Create task UI
humanTaskUiResponse = create_task_ui(taskUIName, template)
humanTaskUiArn = humanTaskUiResponse['HumanTaskUiArn']
print(humanTaskUiArn)
# -
# # Creating the Flow Definition
#
# In this section, we're going to create a flow definition definition. Flow Definitions allow us to specify:
#
# The workforce that your tasks will be sent to.
# The instructions that your workforce will receive. This is called a worker task template.
# Where your output data will be stored.
#
# This demo is going to use the API, but you can optionally create this workflow definition in the console as well.
#
# For more details and instructions, see: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html.
#
def create_flow_definition(flow_definition_name):
'''
Creates a Flow Definition resource
Returns:
struct: FlowDefinitionArn
'''
response = sagemaker.create_flow_definition(
FlowDefinitionName= flow_definition_name,
RoleArn= ROLE,
HumanLoopConfig= {
"WorkteamArn": WORKTEAM_ARN,
"HumanTaskUiArn": humanTaskUiArn,
"TaskCount": 1,
"TaskDescription": "Please review the entities and labels done using Amazon Comprehend and make corrections and improvements.",
"TaskTitle": "Review and Improve entity."
},
OutputConfig={
"S3OutputPath" : "s3://"+BUCKET_NAME+"/"
}
)
return response['FlowDefinitionArn']
# # Now we are ready to create our flow definition
# +
# Flow definition name - this value is unique per account and region. You can also provide your own value here.
uniqueId = str(uuid.uuid4())
flowDefinitionName = f'comprehend-a2i-{uniqueId}'
flowDefinitionArn = create_flow_definition(flowDefinitionName)
print(flowDefinitionArn)
# -
# # Data Load
# +
# Get file from S3 and load it into a variable
file_contents = s3.get_object(Bucket=BUCKET_NAME, Key=key)['Body'].read().decode("utf-8", 'ignore')
# Get just the filename without prefix or suffix
fileName = key[key.rindex('/')+1:key.rindex('.')]
print(fileName)
# -
# # Comprehend Documents
#
# Now that we have the Human Review Workflow set up, we can comprehend our documents and pass them over to a Human Loop for review.
#
# +
# Create the human loop input JSON object
humanLoopInput = {
'SourceLanguage' : 'English',
'sourceLanguageCode':'en',
'rowCount': 0,
'labels' : [],
'taskObject':[],
'bucketName': BUCKET_NAME,
'keyName': key
}
translatedText = ''
rowCount = 0
print('Splitting file and performing translation')
textvalues=[]
# split the body by period to get individual sentences
for sentence in file_contents.split('.'):
if len(sentence.lstrip()) > 0:
# call translation
comprehend_response = comprehend.detect_entities(
Text=sentence + '.',
LanguageCode='en')
entities = comprehend_response['Entities']
textvalues=[]
for s in entities:
textvalues.append(s.get("Type"))
set(textvalues)
str1 = ';'.join(textvalues)
originalText = sentence + ' '
labels={ 'label':str1}
taskObject = {
'originalText': sentence + '.'
}
#humanLoopInput['taskObject'].append(taskObject)
rowCount+=1
humanLoopInput['taskObject'].append(taskObject)
humanLoopInput['labels'].append(labels)
print(humanLoopInput)
humanLoopInput['rowCount'] = rowCount
# +
humanLoopName = 'Comprehend-A2I-Text' + str(int(round(time.time() * 1000)))
print('Starting human loop - ' + humanLoopName)
response = a2i_runtime_client.start_human_loop(
HumanLoopName=humanLoopName,
FlowDefinitionArn= flowDefinitionArn,
HumanLoopInput={
'InputContent': json.dumps(humanLoopInput)
}
)
# write the machine translated file to S3 bucket.
targetKey = ('machine_output/MO-{0}.txt').format(fileName)
print ('Writing translated text to '+ BUCKET_NAME + '/' + targetKey)
s3.put_object(Bucket=BUCKET_NAME, Key=targetKey, Body=translatedText.encode('utf-8'))
# -
# # Check Status of Human Loop
#
# Let's define a function that allows us to check the status of Human Loop progress.
#
#
# +
resp = a2i_runtime_client.describe_human_loop(HumanLoopName=humanLoopName)
print(f'HumanLoop Name: {humanLoopName}')
print(f'HumanLoop Status: {resp["HumanLoopStatus"]}')
print(f'HumanLoop Output Destination: {resp["HumanLoopOutput"]}')
print('\n')
humanLoopStatus = resp["HumanLoopStatus"]
outputFilePath = resp["HumanLoopOutput"]
# -
# # Wait For Work Team to Complete Task
workteamName = WORKTEAM_ARN[WORKTEAM_ARN.rfind('/') + 1:]
print("Navigate to the private worker portal and do the tasks. Make sure you've invited yourself to your workteam!")
print('https://' + sagemaker.describe_workteam(WorkteamName=workteamName)['Workteam']['SubDomain'])
# # Check Status of Human Loop Again and process Task Results
#
# Once the Human Loop Status has changed to completed, you can post process the results to build the final file, with Human Reviewed corrections, for future use.
#
# +
resp = a2i_runtime_client.describe_human_loop(HumanLoopName=humanLoopName)
humanLoopStatus = resp["HumanLoopStatus"]
outputFilePath = resp["HumanLoopOutput"]['OutputS3Uri']
if humanLoopStatus == "Completed":
# Remove s3:// from S3 File Path
outputFilePath = outputFilePath.replace("s3://", "")
# recreate the output text document, including post edits.
tmsFile = s3.get_object(Bucket=outputFilePath.split('/')[0],
Key="/".join(outputFilePath.split('/')[1:]))['Body'].read()
tmsFile = json.loads(tmsFile.decode('utf-8'))
inputContent = tmsFile['inputContent']
rowcount = inputContent['rowCount']
answerContent = tmsFile['humanAnswers'][0]['answerContent']
editedContent = ''
# extract the file name
targetKeyName = inputContent['keyName']
targetKeyName = targetKeyName[targetKeyName.index('/') + 1: len(targetKeyName)]
# save the file.
s3.put_object(Bucket=BUCKET_NAME,
Key='post_edits/PO-{0}'.format(targetKeyName),
Body=editedContent.encode('utf-8'))
print("Output File successfully stored in s3://{0}/post_edits/PO-{1}".format(BUCKET_NAME,targetKeyName))
elif humanLoopStatus == "InProgress":
print("Navigate to the private worker portal and do the tasks. Make sure you've invited yourself to your workteam!")
print('https://' + sagemaker.describe_workteam(WorkteamName=workteamName)['Workteam']['SubDomain'])
# -
| ComprehendEntity- A2I(1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import statsmodels.api as sm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.signal import coherence,periodogram
import sys
sys.path.append('../utils/')
from series_plot import plot_multi_series
data = pd.read_csv('../data/sample_data.csv')
data.keys()
d_js = np.array(list(map(float, data['[46] No.2 Shield Jack Speed'].tolist()[1:1300])))
d_sp = np.array(list(map(float, data['Soil Press Ave'].tolist()[1:1300])))
d_st = np.array(list(map(float, data['[2496] [c]Screw Torque'].tolist()[1:1300])))
d_tf = np.array(list(map(float, data['[9] Total Thrust Force'].tolist()[1:1300])))
plot_multi_series([d_js,d_sp,d_st,d_tf],title_names=["Shield Jack Speed",
'Soil Press Ave',
'Screw Torque',
'Total Thrust Force'])
ccf1 = sm.tsa.stattools.ccf(d_sp,d_tf,unbiased=False)
plt.plot(ccf1[:100])
d_js_diff = d_js-np.roll(d_js,1)
f, Pxx_den = periodogram(d_js_diff)
plt.semilogy(f, Pxx_den)
plt.ylim([1e-7, 1e3])
plt.xlabel('frequency [Hz]')
plt.ylabel('PSD [V**2/Hz]')
plt.show()
f, Cxy = coherence(d_sp, d_tf,fs=1)
plt.semilogy(f, Cxy)
plt.xlabel('frequency [Hz]')
plt.ylabel('Coherence')
plt.show()
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(d_js_diff, lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(d_js_diff, lags=40, ax=ax2)
from statsmodels.tsa.stattools import adfuller
def test_stationarity(timeseries):
#Perform Dickey-Fuller test:
print ('Results of Dickey-Fuller Test:')
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print (dfoutput )
test_stationarity(d_js_diff)
arma_mod20 = sm.tsa.ARMA(d_js_diff, (2,2)).fit(disp=False)
print(arma_mod20.summary())
resid = arma_mod20.resid
print (len(arma_mod20.predict(0,1000)))
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax = plt.plot(resid);
from statsmodels.graphics.api import qqplot
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
fig = qqplot(resid, line='q', ax=ax, fit=True)
d_js_raw = np.array(list(map(float, data['[46] No.2 Shield Jack Speed'].tolist()[1:])))
d_js_diff_raw = d_js_raw-np.roll(d_js_raw,1)
# +
start,end = 1290,1305
plt.plot(d_js_diff_raw[start:end])
plt.plot(arma_mod20.predict(0,1500,dynamic=False)[start:end])
plt.show()
# -
arma_mod20.predict(1200,1305,dynamic=False)
| core/time_series.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pokemon GO shiny rates: a Bayesian perspective
# [The Silph Road](https://thesilphroad.com/) is the largest online and in-person network of Pokemon GO players and researchers. We investigate the question of how accurate their proposed shiny rates are by putting on our Bayesian hat, setting the "consensus" shiny rate as our prior, and using Silph field studies as observed data.
#
# ## Background: Silph, shinies, and statistics
# The Silph Road organizes regional groups of Pokemon GO players, sets up in-person tournaments, and conducts field studies to learn about game mechanics of Pokemon GO. Of particular interest to us here is the *shiny rate*, which is the probability that a Pokemon found in the wild will be shiny (for non-Pokemon players, this just means it's rare and specially coloured; it's like a trophy). Though not publicized by the game developer Niantic, this rate has been of great interest to Pokemon GO players (after all, shinies are not too far off from loot boxes).
#
# Silph publishes [field studies](https://thesilphroad.com/science/oddish-shiny-rates/) to determine shiny rates, and these studies have resulted in two consensus rates: one "standard" rate of 1/450 (used for the vast majority of Pokemon), and one "boosted" rate of 1/150 (used during certain events). Recently, however, those rates have been [called into question](https://old.reddit.com/r/TheSilphRoad/comments/dd79zk/its_time_to_rethink_the_assumed_shiny_rates_from/) on the Silph subreddit, saying that they are not consistent with the collected data. I am going to re-examine these findings from a Bayesian perspective.
# ## Methodology
# I went through the Silph archives looking for their shiny rate publications posted this year, and gathered them into a file `rates.csv`. The null rows in this file were the result of Silph not reporting their exact numbers (e.g., see [Spoink](https://thesilphroad.com/science/lunar-new-year-boosted-lucky-rates/) ("over 16,500 Spoink") and [Adventure Week](https://thesilphroad.com/science/quick-discovery/adventure-week-shiny-rates/) ("over 30,000 encounters each")). I chose to keep these in the dataset in case someone asks "what happened?" Additionally, the presence of two rows from the Gligar event were the result of an apparent change in the shiny rate after ~24 hours, which I am taking to be fact.
# +
import pandas as pd
rates = pd.read_csv("rates.csv")
rates.sample(5)
# -
# Let's compute the "rarity", defined as `n_encounters / n_shinies`. A rarity R means that we saw shinies with a rate of 1 in R.
rates["rarity"] = rates["n_encounters"] / rates["n_shiny"]
rates = rates.dropna()
rates.sample(5)
# Domain knowledge tells us that there are three classes of shiny rates here: a highly boosted one (around 1 in 60, for Alolan Exeggutor and Meltan), one boosted one (which Silph claims to be 1 in 150), and one normal one (which Silph claims to be 1 in 450). We can use this to partition the dataset manuallly, discarding the highly boosted samples because they're not relevant to this debate.
boosted = rates[rates["rarity"].between(70, 200)].sort_values("date").reset_index(drop=True)
unboosted = rates[rates["rarity"] > 200].sort_values("date").reset_index(drop=True)
boosted
unboosted
# Let's start with the proposed boosted shiny rate of 1 in 150. We'll come back to the standard one later.
# ## The boosted shiny rate: the Bayesian way
# Frequentist statistics would construct a confidence interval on these rates--it's a simple proportions test--and call it a day. Indeed, that's what both Silph (see every publication they put out) and [critics of Silph](https://old.reddit.com/r/TheSilphRoad/comments/dd6ln1/world_wide_oddish_shiny_rates/f2egcsx/) have done. After constructing this confidence interval, we simply check if 1/150 lies within it.
#
# But we can do better than this yes/no response. Given that we believe that the boosted shiny rate is 1 in 150, the Bayesian way of thinking provides us with a natural way of incorporating this into our analysis: as a prior.
# +
import arviz as az
import pymc3 as pm
az.style.use("fivethirtyeight")
# -
# ### Setting priors
# Let's use a [Beta](https://en.m.wikipedia.org/wiki/Beta_distribution) prior over p, since a Beta can be used as a distribution over probabilities. Using the [success rate interpretation](https://stats.stackexchange.com/a/47782) of the Beta, our prior will be
# fairly weak: equivalent to having seen 10 shinies in 1500 encounters. Put otherwise, our prior is that anything between 1 in 100 and 1 in 300 is plausible.
#
# We'll add a second variable, rarity, which is 1 / p as defined before. This makes it easier to use phrases like "1 in 150" or "1 in N," and is more intuitive when talking about extremely small probabilities. Through the rest of this document, we'll mostly focus on the plots of the rarity.
# +
with pm.Model() as model:
p = pm.Beta("p", alpha=10, beta=1490)
rarity = pm.Deterministic("rarity", 1. / p)
prior_samples = pm.sample_prior_predictive(samples=10000, model=model)
# -
axes = az.plot_density(
prior_samples,
var_names=["p", "rarity"],
point_estimate=None,
credible_interval=0.99,
shade=0.5,
figsize=(12, 4),
)
# From this, we can see that while 1/150 is at the center of our prior beliefs, we wouldn't be surprised with a rarity of 1 in 100 or 1 in 200 either. This is without having collected any data--if *all* we had heard was "the shiny rate is 1 in 150," but we weren't sure about that 150 number, this plot represents a plausible range of values.
# ### Adding data
# One advantage of the Bayesian approach is that it lets us add as much or as little data as we have. We will demonstrate how our beliefs in the shiny rate change over time as we show our model more data (i.e., as we progress through time and have more shinies released).
# +
from typing import Tuple
def encounters_and_shiny(df: pd.DataFrame, species_name: str) -> Tuple[float, float]:
"""Given a species name, retrieve the number of encounters and number of shinies"""
row = df[df.name == species_name].iloc[0]
return (row["n_encounters"], row["n_shiny"])
assert encounters_and_shiny(boosted, "sneasel") == (1588, 13)
assert encounters_and_shiny(unboosted, "sentret") == (19297, 54)
# -
# Beacuse each encounter is independently shiny with probability p, a binomial distribution is appropriate for modeling the number of shinies we see. We will use Markov Chain Monte Carlo to learn the likely distributions over our parameters (shiny rate and rarity). In lay terms, we will try to infer a distribution of most probable values for those parameters, little by little as we see more data. We'll start with just Bronzor.
with model:
n_encounters, n_shiny = encounters_and_shiny(boosted, "bronzor")
bronzor = pm.Binomial("bronzor", n=n_encounters, p=p, observed=n_shiny)
trace = pm.sample(1000, chains=4)
_ = az.plot_trace(trace)
# This plot represents what we might have believed in February 2019, after seeing 15 out of 2479 shinies for Bronzor. The left curves represent the likely ranges for the shiny rate p and the rarity 1-in-N. For those unfamiliar with MCMC, ignore the fuzzy-caterpillar-like plots on the right; for those familiar with it, this model exhibits excellent sampling behavior.
#
# Notice how we're already seeing that these distributions are a little bit tighter. We see virtually no likelihood of the rate being 1 in 300 now, but earlier we did. Meanwhile, 1 in 150 remains a highly likely shiny rate given our limited data.
#
# Let's add the next Pokemon we had an event for, Horsea.
with model:
n_encounters, n_shiny = encounters_and_shiny(boosted, "horsea")
horsea = pm.Binomial("horsea", n=n_encounters, p=p, observed=n_shiny)
trace = pm.sample(1000, chains=4)
_ = az.plot_trace(trace)
# Because we observed a rate of 1 in 114 for Poliwag, the likelihood for the rarity has now shifted much further left. It is now almost entirely implausible for the shiny rate to be any lower than 1 in 200, and even 1 in 150 is starting to look unlikely.
#
# The next shiny released was Nid<NAME>.
with model:
n_encounters, n_shiny = encounters_and_shiny(boosted, "nidoran_m")
nidoran_m = pm.Binomial("nidoran_m", n=n_encounters, p=p, observed=n_shiny)
trace = pm.sample(1000, chains=4)
_ = az.plot_trace(trace)
# Nidoran's observed rarity was 1 in 107 over 5700 encounters, shifting our rarity curve evne further left, and now it's becoming more clear that 1 in 150 is a pretty unlikely shiny rate. Let's do this one more time for Sneasel.
with model:
n_encounters, n_shiny = encounters_and_shiny(boosted, "sneasel")
sneasel = pm.Binomial("sneasel", n=n_encounters, p=p, observed=n_shiny)
trace = pm.sample(1000, chains=4)
_ = az.plot_trace(trace)
# At this point (perhaps earlier) I would feel confident saying that the shiny rate, whatever it is, is not 1 in 150. The Sneasel event happened in July 2019, and I'm writing this in October, so clearly that wasn't enough for the Pokemon GO community. Fortunately, four more events happened between then and now, and we can pass them all at once.
with model:
n_encounters, n_shiny = encounters_and_shiny(boosted, "poliwag")
poliwag = pm.Binomial("poliwag", n=n_encounters, p=p, observed=n_shiny)
n_encounters, n_shiny = encounters_and_shiny(boosted, "gligar_later")
gligar = pm.Binomial("gligar", n=n_encounters, p=p, observed=n_shiny)
n_encounters, n_shiny = encounters_and_shiny(boosted, "yanma")
yanma = pm.Binomial("yanma", n=n_encounters, p=p, observed=n_shiny)
n_encounters, n_shiny = encounters_and_shiny(boosted, "oddish")
oddish = pm.Binomial("oddish", n=n_encounters, p=p, observed=n_shiny)
trace = pm.sample(1000, chains=4)
_ = az.plot_trace(trace)
# We can confidently say that **it is extremely unlikely that the boosted shiny rate is 1 in 150.** It is much more plausible that the rate is in the neighborhood of 1 in 120, as 150 hasn't even registered on our posterior plot of the rarity.
#
# Notice how natural a fit the Bayesian way of thinking was: we have some prior beliefs (that the rate is 1 in 150), and some data (the Silph studies), and we can marry the two together to get a posterior (the plot we see above). It's clear that the data do not support our prior beliefs, but that's okay; we're researchers, and that's how this is supposed to work.
# ## The normal shiny rate (supposedly 1 in 450)
# Let's look next at the normal shiny rate, which is supposedly 1 in 450. For brevity's sake, I won't take us through the step-by-step process again, but rather pass all the data at once.
# +
with pm.Model() as model:
p = pm.Beta("p", alpha=10, beta=4490)
rarity = pm.Deterministic("rarity", 1. / p)
prior_samples = pm.sample_prior_predictive(samples=10000, model=model)
# -
axes = az.plot_density(
prior_samples,
var_names=["p", "rarity"],
point_estimate=None,
credible_interval=0.99,
shade=0.5,
figsize=(12, 4),
)
# Our prior is again relatively uninformative because we're not very confident in the particular value of 1 in 450. Let's add the data.
with model:
for name in unboosted.name.values:
n_encounters, n_shiny = encounters_and_shiny(unboosted, name)
_ = pm.Binomial(name, n=n_encounters, p=p, observed=n_shiny)
trace = pm.sample(2000, chains=4)
_ = az.plot_trace(trace)
# Here, too, we see that the prior belief of 1 in 450 is no longer likely after considering our data. According to the Silph studies, the true shiny rate is likely between 1 in 350 and 1 in 425.
#
# To make things more complex: there's growing consensus that the true shiny rate is *rarer* than 1 in 450, not more common, which is the opposite of what this data says. See this [Reddit comment](https://old.reddit.com/r/TheSilphRoad/comments/dd79zk/its_time_to_rethink_the_assumed_shiny_rates_from/f2ey7wv/) for an example; using an unnamed dataset, this user and others in that thread feel confident in a 1 in 500 rate. The comment chain discusses why the Silph shiny data may be biased to making shinies appear more common, and it's very possible that this is the case. **These methods are only as good as our data is;** if our data is flawed, our conclusions will be too.
# ## Summary and highlights
# We re-analyzed the Silph shiny data and found that both of the assumed shiny rates--the normal rate of 1 in 450 and the boosted rate of 1 in 150--are almost certainly inaccurate given the data. The boosted shiny rate is probably closer to 1 in 120; this has been corroborated by another user on Reddit. The unboosted shiny rate, given the data, is apparently closer to 1 in 380; however, another independent analysis has found the opposite, so more research is needed.
#
# Let's draw attention to **how natural the Bayesian way of thinking was.** Given how people were framing this problem--"is the rate of 1 in 150 accurate?"--the Bayesian way let us incorporate this belief naturally into our analysis. We set that rate as a prior, then looked at how well the data supported it (in this case, it did not), then drew conclusions from this posterior. That's not to say the frequentist paradigm doens't make sense; rather that in cases like this, we can draw more informative conclusions than before, and we can easily incorporate multiple events' worth of data.
#
# Finally: to Silph's credit, they are a group of volunteer researchers doing the best that they can. They have done as much (or more) for the Pokemon GO community as anyone else has. Their treatment of statistics is far better than the average person's, and I greatly appreciate the work that they have done to shed light upon the game mechanics and (more importantly!) educate the community. This analysis, and others like it, would not be possible without their work.
| analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h2>Dataframes, Manipulation and Pandas, Oh My!</h2>
# <p>In this workshop, we are going to learn how to use pandas, a data science library in python that allows us to load csv files into useful dataframes. Dataframes are like spreadsheets with more functionality, letting us load large amounts of data and easily clean and analyze the data.</p>
# <p>First, we need to import the pandas library. We are also going to import pyplot, a library that we will later use to visualize our data. The "as" statements are so we can rename our libraries so we can more easily call them when we need them</p>
#
import pandas as pd
import matplotlib.pyplot as plt
# <h3>Loading a CSV Into a Dataframe</h3>
# <p>Now that we have imported pandas, we can create our first dataframe. We do this with the read_csv() function. In addition to calling read_csv() from the pandas library and assigning it to a variable, you also need to write the file path in the parentheses.</p>
# <p>To make sure we have loaded the csv into a dataframe correctly, we can call the .head() function on our dataframe. This outputs a nice, spreadsheet-like visualization of the dataframe. You can also modify how many rows you want to display by writing how many you want to see in the parentheses. The default number of rows is 5. </p>
nw_df = pd.read_csv("../data/raw/nw.csv")
nw_df.head(10)
# <h3>Dataframe Basics</h3>
# <p>We can select columns in the dataframe by writing df['col_name'] like below</p>
nw_df['Person ID']
# We can select rows in the dataframe by using slicing indices. This looks like df[beginning:end] (note that the ending index is not inclusive, and indices start at 0).
nw_df[0:5]
# Slicing dataframes works just like lists in Python, so you can also get the first five rows with [:5].
nw_df[:5]
# You can also combine row slicing with column selection (order does not matter). For instance, we can get the last five ages of offense like below
nw_df['Age at Offense'][-5:]
# Finally, we can select multiple columns. If we want the age at offense at disposition of the first 10 entries in the dataframe we can call
nw_df[['Age at Offense', 'Disposition']][:10]
# <h3>Renaming Columns</h3>
# <p>We can also rename the columns in a dataframe. This can help to clarify your data for downstream analysis. To do this, we can call df.rename(columns={'old_name_1':'new_name_1', 'old_name_2':'new_name_2'})</p>
nw_df.rename(columns={'Dispo Date': 'Disposition Date'})
# <h3>Describing the Dataframe</h3>
#
# <p> As you can imagine, these dataframes can be very unwieldy when you first load them. In order to get some summary data on our data, let's call the .describe() function.</p>
nw_df.describe()
# <p>Great! We now have some descriptive statistics on our data and can start to make sense of what our data is saying. Except, if you look a little closer, there's still some problems. For one, we are only seeing a description of 2 columns of our data while the original file has 10. Also, the minimum age of offense is -242 years old, which doesn't make much sense. Let's get a broader overview of our data using the .info() function</p>
nw_df.info()
# <h3>Dealing With Incomplete Entries</h3>
# <p>Now we have a broader overview of the structure of our dataframe, with each row in the info() output describing a column of the dataframe. Here we can see that only two of the columns are numbers, which is why we only had two sets of descriptive statitics. We can also see that not every row is complete, since the number of non-null entries for each row is not equal among the columns (null meaning that there is no data for that column).</p>
#
# <p>Incomplete data entries are often a problem for data analysis. For instance, we are missing ~4000 dispo date entries. If the dispo date data is not useful for your data analysis, you can largely ignore this. But often we want to clean our data before we start analysis, and incomplete entries can throw errors down the line. There are many approaches you can take to filling these entries in, but the easiest approach is just to delete all entries that are not complete. We are going to do this with the .dropna() function. We are also going to pass the argument "inplace = True" so that it will modify the existing dataframe and we don't have to create a new one.</p>
#
#
# +
num_raw_entries = len(nw_df.index)
nw_df.dropna(inplace = True)
nw_df.info()
num_complete_entries = len(nw_df.index)
percent_null = float(1 -(num_complete_entries/num_raw_entries))*100
print("The raw data was {:.2f}".format(percent_null) + "% incomplete")
# -
# <p>Perfect, now we only have complete data entries! As a disclaimer, completely dropping incomplete entries is not always the correct approach. Luckily for us, incomplete entries only represented 6.15% of our original data, so we still have a lot to work with. Let's look at the descriptive statistics on the modified data set to see if it drastically changed our data.</p>
nw_df.describe()
# <p>Looks like most of the metrics are the same, so our modification did not affect the data much</p>
# <h3>Graphing the Data</h3>
# <p>There are still some entries in our data set that do not make sense, like how someone has 0 charges or was -242 years old for their possible offense. Some simple graphs of the data can reveal what these outliers look like further. Let's start by constructing a histogram for the ages at offense using pyplot.</p>
#
# <p>We are going to do this using the pyplot module of matplotlib. We do this by calling plt.hist(), where we pass the dataframe and which column we want to plot in the parentheses. It is also a good idea to title your chart and axes, which we can do ax.set_title(), ax.set_xlabel(), and ax.set_ylabel() as shown below.</p>
fig,ax = plt.subplots()
ax.hist(nw_df['Age at Offense'])
ax.set_yscale('log')
ax.set_title('Age at Offense')
ax.set_xlabel('Age')
ax.set_ylabel('Frequency (log scale)')
# <p>As we can see from the plot, we have a few negative age at offense entries. Since these are nonsensical in the real world, we can remove them without removing much of our data. Let's make the minimum age 0. We can filter the dataframe to adhere to this condition by using the command below:</p>
# +
nw_df_filtered = nw_df[nw_df['Age at Offense'] >= 0]
fig,ax = plt.subplots()
ax.hist(nw_df_filtered['Age at Offense'])
ax.set_yscale('log')
ax.set_title('Age at Offense')
ax.set_xlabel('Age')
ax.set_ylabel('Frequency (log scale)')
| workshop/Pandas Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div class="alert alert-block alert-info">
# Section of the book chapter: <b>5.1 Supervised Learning Models</b>
# </div>
#
# # 3. Supervised learning
#
# **Table of Contents**
#
# * [3.1 Linear regression and partial least squares](#3.1-Linear-regression-and-partial-least-squares)
# * [3.2 Tree-based Models](#3.2-Tree-based-Models)
# * [3.3 Support Vector Machines](#3.3-Support-Vector-Machines)
# * [3.4 k-Nearest Neighbors](#3.4-k-Nearest-Neighbors)
# * [3.5 Artificial Neural Networks (ANN)](#3.5-Artificial-Neural-Networks,-ANN)
# * [3.6 SUSI: Supervised Self-organizing Maps in Python](#3.6-SUSI:-Supervised-Self-organizing-Maps-in-Python)
# * [3.7 Overall results](#3.7-Overall-results)
#
# **Learnings:**
#
# - how to implement different supervised machine learning models,
# - how to plot regression results.
#
#
#
# ### Packages
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import sklearn.metrics as met
import datetime
import utils
# ignore warnings
import warnings
warnings.filterwarnings('ignore')
# -
# ### Read in Data
#
# **Dataset:** <NAME> and <NAME>, "Hyperspectral benchmark dataset on soil moisture", Dataset, Zenodo, 2018. [DOI:10.5281/zenodo.1227836](http://doi.org/10.5281/zenodo.1227836) and [GitHub](https://github.com/felixriese/hyperspectral-soilmoisture-dataset)
#
# **Introducing paper:** <NAME> and <NAME>, “Introducing a Framework of Self-Organizing Maps for Regression of Soil Moisture with Hyperspectral Data,” in IGARSS 2018 - 2018 IEEE International Geoscience and Remote Sensing Symposium, Valencia, Spain, 2018, pp. 6151-6154. [DOI:10.1109/IGARSS.2018.8517812](https://doi.org/10.1109/IGARSS.2018.8517812)
X_train, X_test, y_train, y_test = utils.get_xy_split()
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# ### Plot Configurations
norm = mpl.colors.Normalize(vmin=np.min([np.min(y_train), np.min(y_test)]),
vmax=np.max([np.max(y_train), np.max(y_test)]))
cmap = "cividis_r"
# ### Results Dataframe
results = pd.DataFrame(columns=["model", "r2", "mae", "rmse", "potential"])
# ### Metrics
#
# The following functions calculate and print the following performance metrics:
#
# * Coefficient of Determination $R^2$
# * Mean Absolute Error (MEA)
# * Root Mean Squared Error (RMSE)
# +
def get_regression_metrics(y_pred):
global y_test
return (
met.r2_score(y_test, y_pred),
met.mean_absolute_error(y_test, y_pred),
np.sqrt(met.mean_squared_error(y_test, y_pred)))
def print_regression_metrics(y_pred, model_name, potential):
global results
# get and print metrics
r2, mae, rmse = get_regression_metrics(y_pred)
print("R2 = {0:.1f}% \nMAE = {1:.2f} \nRMSE = {2:.2f}".format(
r2*100, mae, rmse))
# save metrics to dataframe
if not ((results["model"]==model_name).any()):
rdict = {
"model": model_name,
"r2": r2,
"mae": mae,
"rmse": rmse,
"potential": potential}
results = results.append(rdict, ignore_index=True)
else:
idx = results.index[results['model'] == model_name].tolist()[0]
results.at[idx, "r2"] = r2
results.at[idx, "mae"] = mae
results.at[idx, "rmse"] = rmse
results.at[idx, "potential"] = potential
# -
# ### Fix Random State
# +
import tensorflow as tf
np.random.seed(42)
tf.random.set_seed(43)
# -
# ***
#
# ## 3.1 Linear regression and partial least squares
#
# Content:
#
# - [3.1.1 Linear regression](#3.1.1-Linear-regression)
# - [3.1.2 Partial least squares](#3.1.2-Partial-least-squares)
#
# ### 3.1.1 Linear regression
# Implementation: [sklearn.linear_model.LinearRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html)
# +
from sklearn.linear_model import LinearRegression
model_lin = LinearRegression()
model_lin.fit(X_train, y_train)
y_pred_lin = model_lin.predict(X_test)
print_regression_metrics(y_pred_lin, "Linear", "-")
utils.plot_regression_results(y_test, y_pred_lin, "Linear")
# -
# ### 3.1.2 Partial least squares
# Implementation: [sklearn.cross_decomposition.PLSRegression](https://scikit-learn.org/stable/modules/generated/sklearn.cross_decomposition.PLSRegression.html)
# +
from sklearn.cross_decomposition import PLSRegression
model_pls = PLSRegression(n_components=5)
model_pls.fit(X_train, y_train)
y_pred_pls = model_pls.predict(X_test)
print_regression_metrics(y_pred_pls, "PLS", "Minor")
utils.plot_regression_results(y_test, y_pred_pls, "PLS")
# -
# ***
#
# ## 3.2 Tree-based Models
#
# Content:
#
# - [3.2.1 Decision Tree](#3.2.1-Decision-Tree)
# - [3.2.2 Bagging: Random Forest & Extremly Randomized Trees](#3.2.2-Bagging:-Random-Forest-&-Extremly-Randomized-Trees)
# - [3.2.3 Boosting: Gradient Boosting](#3.2.3-Boosting:-Gradient-Boosting)
#
# ### 3.2.1 Decision Tree
#
# **Source:** <NAME>., <NAME>., <NAME>., <NAME>.: Classification and regression trees. Chapman and Hall/CRC (1984)
#
# **Algorithm:**
#
# The regression trees algorithm is defined as follows:
# 1. Start with the root node.
# 2. Start with the most significant feature of the training data.
# 3. Divide the input data with (binary) a cut $c_1$ on feature $x_i$, e.g. according to the Gini index, see below.
# 4. Divide data along the next best feature on cut $c_j$ for $j=2, 3, \ldots$
# 5. Stop if a condition is met, e.g. maximum number of nodes, maximum depth, maximum purity etc.
# 6. Every leaf is then averaged and therefore contains one output value.
#
# The Gini index is defined as:
#
# $G = 1 - \sum_{i=1}^n P_i^2 \qquad \text{with } P_i = \frac{N_i}{N},\label{eq:gini}$
#
# with $N$ as number of all objects and $N_i$ as number of objects of class $i$.
#
# **Implementation:** [sklearn.tree.DecisionTreeRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html#sklearn.tree.DecisionTreeRegressor)
# +
from sklearn.tree import DecisionTreeRegressor
model_dt = DecisionTreeRegressor()
model_dt.fit(X_train, y_train)
y_pred_dt = model_dt.predict(X_test)
print_regression_metrics(y_pred_dt, "Decision Tree", "Minor")
utils.plot_regression_results(y_test, y_pred_dt, "Decision Tree")
# -
# ### 3.2.2 Bagging: Random Forest & Extremly Randomized Trees
# #### Random Forest
# Implementation: [sklearn.ensemble.RandomForestRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html#sklearn.ensemble.RandomForestRegressor)
# +
from sklearn.ensemble import RandomForestRegressor
model_rf = RandomForestRegressor(n_estimators=100, oob_score=True)
model_rf.fit(X_train, y_train)
y_pred_rf = model_rf.predict(X_test)
print_regression_metrics(y_pred_rf, "RF", "Minor")
utils.plot_regression_results(y_test, y_pred_rf, "RF")
print("Out-of-bag estimate = {0:.1f}%".format(model_rf.oob_score_*100))
# -
# #### Extremly Randomized Trees
# Implementation: [sklearn.ensemble.ExtraTreesRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html#sklearn.ensemble.ExtraTreesRegressor)
# +
from sklearn.ensemble import ExtraTreesRegressor
model_et = ExtraTreesRegressor(n_estimators=100)
model_et.fit(X_train, y_train)
y_pred_et = model_et.predict(X_test)
print_regression_metrics(y_pred_et, "ET", "Minor")
utils.plot_regression_results(y_test, y_pred_et, "ET")
# -
# #### Feature Importance
# +
n_features_plotted = 15
importances = model_rf.feature_importances_
indices = np.argsort(importances)
std = np.std([tree.feature_importances_ for tree in model_rf.estimators_], axis=0)
plt.figure(figsize=(15,5))
plt.title("Feature importances")
plt.bar(range(X_train.shape[1])[125-n_features_plotted:], importances[indices][125-n_features_plotted:], color="r", yerr=std[indices][125-n_features_plotted:], align="center")
# If you want to define your own labels,
# change indices to a list of labels on the following line.
plt.xticks(range(X_train.shape[1])[125-n_features_plotted:], indices[:n_features_plotted], rotation=90)
plt.xlim([-1 + 125-n_features_plotted, X_train.shape[1]])
plt.xlabel("Hyperspectral band")
plt.ylabel("Feature importance")
plt.savefig("plots/featureimportance_rf.pdf", bbox_inches="tight")
# -
# ### 3.2.3 Boosting: Gradient Boosting
# Implementation: [sklearn.ensemble.GradientBoostingRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html#sklearn.ensemble.GradientBoostingRegressor)
# +
from sklearn.ensemble import GradientBoostingRegressor
model_gb = GradientBoostingRegressor()
model_gb.fit(X_train, y_train)
y_pred_gb = model_gb.predict(X_test)
print_regression_metrics(y_pred_gb, "GB", "Minor")
utils.plot_regression_results(y_test, y_pred_gb, "GB")
# -
# ***
#
# ## 3.3 Support Vector Machines
# Implementation: [sklearn.svm.SVR](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html)
#
# The SVM is tuned with a Grid Search, see [sklearn.model_selection.RandomizedSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html)
# +
from sklearn.svm import SVR
from sklearn.model_selection import RandomizedSearchCV
# 1. find hyperparameters
params = {"C": np.logspace(-8, 8, 17), "gamma": np.logspace(-8, 8, 17)}
rsearch = RandomizedSearchCV(
estimator=SVR(),
n_iter=30,
cv=5,
n_jobs=-1,
param_distributions=params)
rsearch.fit(X_train, y_train)
model_svm = rsearch.best_estimator_
# 2. predict
model_svm.fit(X_train, y_train)
y_pred_svm = model_svm.predict(X_test)
print_regression_metrics(y_pred_svm, "SVM", "Minor")
utils.plot_regression_results(y_test, y_pred_svm, "SVM")
# -
# ***
#
# ## 3.4 k-Nearest Neighbors
#
# Types:
#
# - [3.4.1 Without weighting](#3.4.1-Without-weighting)
# - [3.4.2 With distance weighting](#3.4.2-With-distance-weighting)
#
# Implementation: [sklearn.neighbors.KNeighborsRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor)
#
# ### 3.4.1 Without weighting
# +
from sklearn.neighbors import KNeighborsRegressor
model_knn = KNeighborsRegressor(n_neighbors=5)
model_knn.fit(X_train, y_train)
y_pred_knn = model_knn.predict(X_test)
print_regression_metrics(y_pred_knn, "k-NN", "Minor")
utils.plot_regression_results(y_test, y_pred_knn, "kNN")
# -
# ### 3.4.2 With distance weighting
# +
from sklearn.neighbors import KNeighborsRegressor
model_knnw = KNeighborsRegressor(n_neighbors=5, weights="distance")
model_knnw.fit(X_train, y_train)
y_pred_knnw = model_knnw.predict(X_test)
print_regression_metrics(y_pred_knnw, "k-NN (weighted)", "Minor")
utils.plot_regression_results(y_test, y_pred_knnw, "kNN weighted")
# -
# ***
#
# ## 3.5 Artificial Neural Networks, ANN
#
# Types:
#
# - [3.5.1 Fully-connected ANNs](#3.5.1-Fully-connected-ANNs)
# - [3.5.2 CNN with Keras and TensorFlow](#3.5.2-CNN-with-Keras-and-TensorFlow)
#
# ### 3.5.1 Fully-connected ANNs
# #### scikit-learn
# Implementation: [sklearn.neural_network.MLPRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html)
# +
from sklearn.neural_network import MLPRegressor
model_ann = MLPRegressor(hidden_layer_sizes=(20, 20, 20), batch_size=10, max_iter=500)
model_ann.fit(X_train, y_train)
y_pred_ann = model_ann.predict(X_test)
print_regression_metrics(y_pred_ann, "ANN (sklearn)", "Major")
utils.plot_regression_results(y_test, y_pred_ann, "ANN (sklearn)")
# -
# #### Keras with TensorFlow
# +
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
keras.backend.clear_session()
# define model
model = Sequential()
model.add(Dense(20, input_dim=X_train.shape[1], activation="relu"))
model.add(Dense(10, activation="relu"))
model.add(Dense(1, activation="linear"))
# compile and train model
model.compile(loss="mean_squared_error", optimizer="nadam")
model.fit(X_train, y_train, epochs=1000, verbose=0, batch_size=10,
validation_data=(X_test, y_test))
y_pred_annk = model.predict(X_test)
print_regression_metrics(y_pred_annk, "ANN (keras)", "Major")
utils.plot_regression_results(y_test, y_pred_annk, "ANN (keras)")
# -
# ### 3.5.2 CNN with Keras and TensorFlow
# +
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv1D, MaxPooling1D, Flatten
keras.backend.clear_session()
# define model
model = Sequential()
model.add(Conv1D(filters=8, kernel_size=3, activation="relu",
input_shape=(X_train.shape[1],1)))
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(filters=16, kernel_size=3, activation="relu"))
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(filters=32, kernel_size=3, activation="relu"))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(20, activation="relu"))
model.add(Dense(1, activation="linear"))
# compile and train model
model.compile(loss="mean_squared_error", optimizer="nadam")
model.fit(X_train.reshape(X_train.shape[0], X_train.shape[1], 1), y_train,
epochs=500, verbose=0, batch_size=10,
validation_data=(X_test.reshape(X_test.shape[0], X_test.shape[1], 1), y_test))
y_pred_cnn = model.predict(X_test.reshape(X_test.shape[0], X_test.shape[1], 1))
print_regression_metrics(y_pred_cnn, "CNN", "Major")
utils.plot_regression_results(y_test, y_pred_cnn, "CNN")
# -
# ***
#
# ## 3.6 SUSI: Supervised Self-organizing Maps in Python
# Implementation: [felixriese/susi](https://github.com/felixriese/susi)
# +
import susi
model_som = susi.SOMRegressor(
n_rows=35,
n_columns=35,
n_iter_unsupervised=10000,
n_iter_supervised=10000,
n_jobs=-1)
model_som.fit(X_train, y_train)
y_pred_som = model_som.predict(X_test)
print_regression_metrics(y_pred_som, "SOM", "Minor")
utils.plot_regression_results(y_test, y_pred_som, "SOM")
# -
# ***
#
# ## 3.7 Overall results
# save results to CSV
dt = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
results.to_csv("results/results_"+dt+".csv")
# + active=""
# # load results from CSV
# # results = pd.read_csv("results/results.csv")
# results = pd.read_csv("results/results_20190704-112011.csv")
# +
# plot horizontal bar plot for results
fig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(15,5))
results.plot(x="model", y="r2", kind="barh", ax=ax1, title="$R^2$", legend=False)
results.plot(x="model", y="mae", kind="barh", ax=ax2, title="MAE", legend=False)
results.plot(x="model", y="rmse", kind="barh", ax=ax3, title="RMSE", legend=False)
for ax in [ax1, ax2, ax3]:
ax.set_ylabel("")
plt.tight_layout()
plt.savefig("plots/results_bar.pdf", bbox_inches="tight")
# -
# generate LaTeX table
utils.write_results_to_latex_table(results)
| notebooks/3_Supervised_Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Feature: Question Occurrence Frequencies
# This is a "magic" (leaky) feature [published by <NAME>](https://www.kaggle.com/jturkewitz/magic-features-0-03-gain) that doesn't rely on the question text. Questions that occur more often in the training and test sets are more likely to be duplicates.
# ## Imports
# This utility package imports `numpy`, `pandas`, `matplotlib` and a helper `kg` module into the root namespace.
from pygoose import *
# ## Config
# Automatically discover the paths to various data folders and compose the project structure.
project = kg.Project.discover()
# Identifier for storing these features on disk and referring to them later.
feature_list_id = 'magic_frequencies'
# ## Read data
# Preprocessed and tokenized questions.
tokens_train = kg.io.load(project.preprocessed_data_dir + 'tokens_lowercase_spellcheck_train.pickle')
tokens_test = kg.io.load(project.preprocessed_data_dir + 'tokens_lowercase_spellcheck_test.pickle')
# ## Build features
# Unique question texts.
df_all_pairs = pd.DataFrame(
[
[' '.join(pair[0]), ' '.join(pair[1])]
for pair in tokens_train + tokens_test
],
columns=['question1', 'question2'],
)
df_unique_texts = pd.DataFrame(np.unique(df_all_pairs.values.ravel()), columns=['question'])
question_ids = pd.Series(df_unique_texts.index.values, index=df_unique_texts['question'].values).to_dict()
# Mark every question with its number according to the uniques table.
df_all_pairs['q1_id'] = df_all_pairs['question1'].map(question_ids)
df_all_pairs['q2_id'] = df_all_pairs['question2'].map(question_ids)
# Map to frequency space.
q1_counts = df_all_pairs['q1_id'].value_counts().to_dict()
q2_counts = df_all_pairs['q2_id'].value_counts().to_dict()
df_all_pairs['q1_freq'] = df_all_pairs['q1_id'].map(lambda x: q1_counts.get(x, 0) + q2_counts.get(x, 0))
df_all_pairs['q2_freq'] = df_all_pairs['q2_id'].map(lambda x: q1_counts.get(x, 0) + q2_counts.get(x, 0))
# Calculate ratios.
df_all_pairs['freq_ratio'] = df_all_pairs['q1_freq'] / df_all_pairs['q2_freq']
df_all_pairs['freq_ratio_inverse'] = df_all_pairs['q2_freq'] / df_all_pairs['q1_freq']
# Build final features.
columns_to_keep = [
'q1_freq',
'q2_freq',
'freq_ratio',
'freq_ratio_inverse',
]
X_train = df_all_pairs[columns_to_keep].values[:len(tokens_train)]
X_test = df_all_pairs[columns_to_keep].values[len(tokens_train):]
print('X train:', X_train.shape)
print('X test :', X_test.shape)
# ## Save features
feature_names = [
'magic_freq_q1',
'magic_freq_q2',
'magic_freq_q1_q2_ratio',
'magic_freq_q2_q1_ratio',
]
project.save_features(X_train, X_test, feature_names, feature_list_id)
| notebooks/feature-magic-frequencies.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/KordingLab/ENGR344/blob/master/tutorials/W4D1_How_do_we_know_how_certain_we_should_be/TA/W4D1_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="6SdrBMy7G8E2"
# # Tutorial 2: Confidence intervals and bootstrapping
# **Module 4: How do we know how certain we should be?**
#
# **Originally By Neuromatch Academy**
#
# **Content creators**: <NAME>, <NAME>, <NAME> with help from <NAME>
#
# **Content reviewers**: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# **Content Modifiers**: <NAME>, <NAME>
# + [markdown] id="EJvMQDrvG8E4"
# **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
#
# <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
# + colab={"base_uri": "https://localhost:8080/", "height": 563, "referenced_widgets": ["e6b72b838f6443f4ab8713aac7cacd0f", "<KEY>", "<KEY>", "ef418b0981714ae8a5b98d16d7c9b3df"]} cellView="form" id="1G-b7g6BOtis" outputId="40be87c0-e7b0-42cc-bc8d-6f09542e8eda"
# @title Due Dates Calendar
from ipywidgets import widgets
from IPython.display import display, IFrame, YouTubeVideo
out1 = widgets.Output()
with out1:
calendar = IFrame(src="https://calendar.google.com/calendar/embed?src=356b9d2nspjttvgbb3tvgk2f58%40group.calendar.google.com&ctz=America%2FNew_York", width=600, height=480)
display(calendar)
out = widgets.Tab([out1])
out.set_title(0, 'Calendar')
display(out)
# + [markdown] id="s8BjsbXKG8E5"
# # Tutorial Objectives
#
# *Estimated timing of tutorial: 23 minutes*
#
# This is Tutorial 3 of a series on fitting models to data. We start with simple linear regression, using least squares optimization (Tutorial 1) and Maximum Likelihood Estimation (Tutorial 2). We will use bootstrapping to build confidence intervals around the inferred linear model parameters (Tutorial 3). We'll finish our exploration of regression models by generalizing to multiple linear regression and polynomial regression (Tutorial 4). We end by learning how to choose between these various models. We discuss the bias-variance trade-off (Tutorial 5) and Cross Validation for model selection (Tutorial 6).
#
# In this tutorial, we will discuss how to gauge how good our estimated model parameters are.
# - Learn how to use bootstrapping to generate new sample datasets
# - Estimate our model parameter on these new sample datasets
# - Quantify the variance of our estimate using confidence intervals
# + cellView="form" id="kuSoGxcDG8E5"
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/2mkq4/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
# + id="Cy5pHTHZG8E6" outputId="5c17d84c-286c-4677-eb80-07a5862e68b2" colab={"base_uri": "https://localhost:8080/", "height": 581, "referenced_widgets": ["69f304c22a054606b794d4a8b6965831", "18e81900f6a84b83aab807f5c0c2a24c", "7fc7647292024944b397c084d67c930b", "f26be7549edc414582450facaa1ac0b2"]} cellView="form"
# @title Video 1: Confidence Intervals & Bootstrapping
from ipywidgets import widgets
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Jii0gMy5JLQ", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1])
out.set_title(0, 'Youtube')
display(out)
# + [markdown] id="CkCzXiK3G8E6"
# Up to this point we have been finding ways to estimate model parameters to fit some observed data. Our approach has been to optimize some criterion, either minimize the mean squared error or maximize the likelihood while using the entire dataset. How good is our estimate really? How confident are we that it will generalize to describe new data we haven't seen yet?
#
# One solution to this is to just collect more data and check the MSE on this new dataset with the previously estimated parameters. However this is not always feasible and still leaves open the question of how quantifiably confident we are in the accuracy of our model.
#
# In Section 1, we will explore how to implement bootstrapping. In Section 2, we will build confidence intervals of our estimates using the bootstrapping method.
# + [markdown] id="guEnAdIwG8E7"
# ---
# # Setup
# + cellView="both" id="K27zV2BnG8E7"
# Imports
import numpy as np
import matplotlib.pyplot as plt
# + cellView="form" id="6Gn6CaPjG8E8"
#@title Figure Settings
# %config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# + cellView="form" id="kCgekUPFG8E8"
# @title Plotting Functions
def plot_original_and_resample(x, y, x_, y_):
""" Plot the original sample and the resampled points from this sample.
Args:
x (ndarray): An array of shape (samples,) that contains the input values.
y (ndarray): An array of shape (samples,) that contains the corresponding
measurement values to the inputs.
x_ (ndarray): An array of shape (samples,) with a subset of input values from x
y_ (ndarray): An array of shape (samples,) with a the corresponding subset
of measurement values as x_ from y
"""
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))
ax1.scatter(x, y)
ax1.set(title='Original', xlabel='x', ylabel='y')
ax2.scatter(x_, y_, color='c')
ax2.set(title='Resampled', xlabel='x', ylabel='y',
xlim=ax1.get_xlim(), ylim=ax1.get_ylim());
# + [markdown] id="eKLabMy8G8E8"
# ---
# # Section 1: Bootstrapping
#
# *Estimated timing to here from start of tutorial: 7 min*
#
# [Bootstrapping](https://en.wikipedia.org/wiki/Bootstrapping_(statistics)) is a widely applicable method to assess confidence/uncertainty about estimated parameters, it was originally [proposed](https://projecteuclid.org/euclid.aos/1176344552) by [<NAME>](https://en.wikipedia.org/wiki/Bradley_Efron). The idea is to generate many new synthetic datasets from the initial true dataset by randomly sampling from it, then finding estimators for each one of these new datasets, and finally looking at the distribution of all these estimators to quantify our confidence.
#
# Note that each new resampled datasets will be the same size as our original one, with the new data points sampled with replacement i.e. we can repeat the same data point multiple times. Also note that in practice we need a lot of resampled datasets, here we use 2000.
#
# To explore this idea, we will start again with our noisy samples along the line $y_i = 1.2x_i + \epsilon_i$, but this time only use half the data points as last time (15 instead of 30).
# + cellView="form" id="--3ArfrsG8E9"
#@title
#@markdown Execute this cell to simulate some data
# setting a fixed seed to our random number generator ensures we will always
# get the same psuedorandom number sequence
np.random.seed(121)
# Let's set some parameters
theta = 1.2
n_samples = 15
# Draw x and then calculate y
x = 10 * np.random.rand(n_samples) # sample from a uniform distribution over [0,10)
noise = np.random.randn(n_samples) # sample from a standard normal distribution
y = theta * x + noise
fig, ax = plt.subplots()
ax.scatter(x, y) # produces a scatter plot
ax.set(xlabel='x', ylabel='y');
# + [markdown] id="CIN1NIHaG8E9"
# ## Coding Exercise 1: Resample Dataset with Replacement
#
# In this exercise you will implement a method to resample a dataset with replacement. The method accepts $\mathbf{x}$ and $\mathbf{y}$ arrays. It should return a new set of $\mathbf{x}'$ and $\mathbf{y}'$ arrays that are created by randomly sampling from the originals.
#
# We will then compare the original dataset to a resampled dataset.
#
# TIP: The [numpy.random.choice](https://numpy.org/doc/stable/reference/random/generated/numpy.random.choice.html) method would be useful here.
# + cellView="both" id="PItvAhA6G8E9"
def resample_with_replacement(x, y):
"""Resample data points with replacement from the dataset of `x` inputs and
`y` measurements.
Args:
x (ndarray): An array of shape (samples,) that contains the input values.
y (ndarray): An array of shape (samples,) that contains the corresponding
measurement values to the inputs.
Returns:
ndarray, ndarray: The newly resampled `x` and `y` data points.
"""
#######################################################
## TODO for students: resample dataset with replacement
# Fill out function and remove
raise NotImplementedError("Student exercise: resample dataset with replacement")
#######################################################
# Get array of indices for resampled points
sample_idx = ...
# Sample from x and y according to sample_idx
x_ = ...
y_ = ...
return x_, y_
x_, y_ = resample_with_replacement(x, y)
plot_original_and_resample(x, y, x_, y_)
# + cellView="both" id="b5VaDrvCG8E9"
# to_remove solution
def resample_with_replacement(x, y):
"""Resample data points with replacement from the dataset of `x` inputs and
`y` measurements.
Args:
x (ndarray): An array of shape (samples,) that contains the input values.
y (ndarray): An array of shape (samples,) that contains the corresponding
measurement values to the inputs.
Returns:
ndarray, ndarray: The newly resampled `x` and `y` data points.
"""
# Get array of indices for resampled points
sample_idx = np.random.choice(len(x), size=len(x), replace=True)
# Sample from x and y according to sample_idx
x_ = x[sample_idx]
y_ = y[sample_idx]
return x_, y_
x_, y_ = resample_with_replacement(x, y)
with plt.xkcd():
plot_original_and_resample(x, y, x_, y_)
# + [markdown] id="0kVljlwyG8E9"
# In the resampled plot on the right, the actual number of points is the same, but some have been repeated so they only display once.
#
# Now that we have a way to resample the data, we can use that in the full bootstrapping process.
# + [markdown] id="t6kFTNckG8E-"
# ## Coding Exercise 2: Bootstrap Estimates
#
# In this exercise you will implement a method to run the bootstrap process of generating a set of $\hat\theta$ values from a dataset of inputs ($\mathbf{x}$) and measurements ($\mathbf{y}$). You should use `resample_with_replacement` here, and you may also invoke helper function `solve_normal_eqn` from Tutorial 1 to produce the MSE-based estimator.
#
# We will then use this function to look at the theta_hat from different samples.
#
# + cellView="form" id="7yU1_0CaG8E-"
# @markdown Execute this cell for helper function `solve_normal_eqn`
def solve_normal_eqn(x, y):
"""Solve the normal equations to produce the value of theta_hat that minimizes
MSE.
Args:
x (ndarray): An array of shape (samples,) that contains the input values.
y (ndarray): An array of shape (samples,) that contains the corresponding
measurement values to the inputs.
thata_hat (float): An estimate of the slope parameter.
Returns:
float: the value for theta_hat arrived from minimizing MSE
"""
theta_hat = (x.T @ y) / (x.T @ x)
return theta_hat
# + id="5gLc2EpYG8E-"
def bootstrap_estimates(x, y, n=2000):
"""Generate a set of theta_hat estimates using the bootstrap method.
Args:
x (ndarray): An array of shape (samples,) that contains the input values.
y (ndarray): An array of shape (samples,) that contains the corresponding
measurement values to the inputs.
n (int): The number of estimates to compute
Returns:
ndarray: An array of estimated parameters with size (n,)
"""
theta_hats = np.zeros(n)
##############################################################################
## TODO for students: implement bootstrap estimation
# Fill out function and remove
raise NotImplementedError("Student exercise: implement bootstrap estimation")
##############################################################################
# Loop over number of estimates
for i in range(n):
# Resample x and y
x_, y_ = ...
# Compute theta_hat for this sample
theta_hats[i] = ...
return theta_hats
# Set random seed
np.random.seed(123)
# Get boostrap estimates
theta_hats = bootstrap_estimates(x, y, n=2000)
print(theta_hats[0:5])
# + cellView="both" id="3jMBVeQPG8E-"
# to_remove solution
def bootstrap_estimates(x, y, n=2000):
"""Generate a set of theta_hat estimates using the bootstrap method.
Args:
x (ndarray): An array of shape (samples,) that contains the input values.
y (ndarray): An array of shape (samples,) that contains the corresponding
measurement values to the inputs.
n (int): The number of estimates to compute
Returns:
ndarray: An array of estimated parameters with size (n,)
"""
theta_hats = np.zeros(n)
# Loop over number of estimates
for i in range(n):
# Resample x and y
x_, y_ = resample_with_replacement(x, y)
# Compute theta_hat for this sample
theta_hats[i] = solve_normal_eqn(x_, y_)
return theta_hats
# Set random seed
np.random.seed(123)
# Get boostrap estimates
theta_hats = bootstrap_estimates(x, y, n=2000)
print(theta_hats[0:5])
# + [markdown] id="hr2_nKhcG8E-"
# You should see `[1.27550888 1.17317819 1.18198819 1.25329255 1.20714664]` as the first five estimates.
# + [markdown] id="QpGClNEwG8E-"
# Now that we have our bootstrap estimates, we can visualize all the potential models (models computed with different resampling) together to see how distributed they are.
# + cellView="form" id="J6b5h8ZyG8E_"
#@title
#@markdown Execute this cell to visualize all potential models
fig, ax = plt.subplots()
# For each theta_hat, plot model
theta_hats = bootstrap_estimates(x, y, n=2000)
for i, theta_hat in enumerate(theta_hats):
y_hat = theta_hat * x
ax.plot(x, y_hat, c='r', alpha=0.01, label='Resampled Fits' if i==0 else '')
# Plot observed data
ax.scatter(x, y, label='Observed')
# Plot true fit data
y_true = theta * x
ax.plot(x, y_true, 'g', linewidth=2, label='True Model')
ax.set(
title='Bootstrapped Slope Estimation',
xlabel='x',
ylabel='y'
)
# Change legend line alpha property
handles, labels = ax.get_legend_handles_labels()
handles[0].set_alpha(1)
ax.legend();
# + [markdown] id="SQm3R9EaG8E_"
# This looks pretty good! The bootstrapped estimates spread around the true model, as we would have hoped. Note that here we have the luxury to know the ground truth value for $\theta$, but in applications we are trying to guess it from data. Therefore, assessing the quality of estimates based on finite data is a task of fundamental importance in data analysis.
#
# + [markdown] id="WrkKdb7XG8E_"
# ---
# # Section 2: Confidence Intervals
#
# *Estimated timing to here from start of tutorial: 17 min*
#
# Let us now quantify how uncertain our estimated slope is. We do so by computing [confidence intervals](https://en.wikipedia.org/wiki/Confidence_interval) (CIs) from our bootstrapped estimates. The most direct approach is to compute percentiles from the empirical distribution of bootstrapped estimates. Note that this is widely applicable as we are not assuming that this empirical distribution is Gaussian.
# + cellView="form" id="Q11CY34fG8E_"
#@title
#@markdown Execute this cell to plot bootstrapped CI
theta_hats = bootstrap_estimates(x, y, n=2000)
print(f"mean = {np.mean(theta_hats):.2f}, std = {np.std(theta_hats):.2f}")
fig, ax = plt.subplots()
ax.hist(theta_hats, bins=20, facecolor='C1', alpha=0.75)
ax.axvline(theta, c='g', label=r'True $\theta$')
ax.axvline(np.percentile(theta_hats, 50), color='r', label='Median')
ax.axvline(np.percentile(theta_hats, 2.5), color='b', label='95% CI')
ax.axvline(np.percentile(theta_hats, 97.5), color='b')
ax.legend()
ax.set(
title='Bootstrapped Confidence Interval',
xlabel=r'$\hat{{\theta}}$',
ylabel='count',
xlim=[1.0, 1.5]
);
# + [markdown] id="2Imv9CyPG8E_"
# Looking at the distribution of bootstrapped $\hat{\theta}$ values, we see that the true $\theta$ falls well within the 95% confidence interval, which is reassuring. We also see that the value $\theta = 1$ does not fall within the confidence interval. From this we would reject the hypothesis that the slope was 1.
# + [markdown] id="rto1-cU7G8E_"
# ---
# # Summary
#
# *Estimated timing of tutorial: 23 minutes*
#
# - Bootstrapping is a resampling procedure that allows to build confidence intervals around inferred parameter values
# - it is a widely applicable and very practical method that relies on computational power and pseudo-random number generators (as opposed to more classical approaches than depend on analytical derivations)
# + [markdown] id="xtHO2wLoG8E_"
# ---
# # Notation
#
# \begin{align}
# \theta &\quad \text{parameter}\\
# \hat{\theta} &\quad \text{estimated parameter}\\
# x &\quad \text{input, independent variable}\\
# y &\quad \text{response measurement, dependent variable}\\
# \mathbf{x} &\quad \text{vector of input values}\\
# \mathbf{y} &\quad \text{vector of measurements}\\
# \mathbf{x}' &\quad \text{vector of resampled input values }\\
# \mathbf{y}' &\quad \text{vector of resampled measurement values}\\
# \end{align}
# + [markdown] id="4Zi2saaWG8E_"
# **Suggested readings**
#
# Computer Age Statistical Inference: Algorithms, Evidence and Data Science, by <NAME> and <NAME>
#
| tutorials/W4D1_How_do_we_know_how_certain_we_should_be/TA/W4D1_Tutorial2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Legend outside the axis
# **References:**
# - https://matplotlib.org/examples/pylab_examples/figlegend_demo.html
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import numpy as np
# %matplotlib inline
x = np.linspace(0.0, 2.0*np.pi, 100)
y = np.sin(x)
# ## Sidebox with two axis
# +
f = plt.figure()
ax_sidebox = f.add_axes([0.0, 0.05, 0.22, 0.9])
ax = f.add_axes([0.32, 0.05, 0.65, 0.9])
ax_sidebox.grid(False)
ax_sidebox.set_xticks(())
ax_sidebox.set_yticks(())
sidebox_text = ['text1', 'text2', 'text3']
text_xpos = 0.03
text_ypos_start = 0.85
text_vspace = 0.1
for i,text in enumerate(sidebox_text):
f.text(text_xpos, text_ypos_start-i*text_vspace, text, transform=ax.transAxes, fontsize=18)
line1 = ax.plot(x,y)
line2 = ax.plot(x,0.5*y)
ax_sidebox.legend(line1, (r'$y_{1}$',), loc=(0.1, 0.5), fontsize=15)
f.legend(line2, (r'$y_{2}$',), loc=(0.045, 0.4), fontsize=15)
# -
# ## Top caption box with a single axis
# +
f,a = plt.subplots(figsize=(9,6))
a.plot(x,y)
# - Top caption box
f.subplots_adjust(top=0.8)
caption_box = mpatches.Rectangle((0.0, 1.0), 1.0, 0.2, clip_on=False, transform=a.transAxes, edgecolor='k', facecolor='white', linewidth=1.0)
a.text(0.4, 1.09, 'This text is inside the top figure caption box', horizontalalignment='center', transform=a.transAxes)
a.add_artist(caption_box)
# - Custom legend
curve = mlines.Line2D([], [], linestyle="-", color='C0', label=r'$\sin\alpha$')
legend_handles = [curve]
a.legend(handles=legend_handles, loc=(0.8, 1.07));
| python/modules/matplotlib/legend/notebooks/legend_outside_figure.ipynb |
# + active=""
# =============================
# Predict Remaining Useful Life
# =============================
# In this example, we will generate labels using Compose on data provided by NASA simulating turbofan engine degradation. Then, the labels are used to generate features and train a machine learning model to predict the Remaining Useful Life (RUL) of an engine.
# +
import composeml as cp
import featuretools as ft
import pandas as pd
import data
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
# + active=""
# Load Data
# =========
# In this dataset, we have 249 engines (:code:`engine_no`) which are monitored over time (:code:`time_in_cycles`). Each engine had :code:`operational_settings` and :code:`sensor_measurements` recorded for each cycle. The **Remaining Useful Life** (RUL) is the amount of cycles an engine has left before it needs maintenance. What makes this dataset special is that the engines run all the way until failure, giving us precise RUL information for every engine at every point in time.
#
# You can download the data directly from NASA `here <https://ti.arc.nasa.gov/c/13/>`_. After downloading the data, you can set the :code:`file` parameter as an absolute path to :code:`train_FD004.txt`. With the file in place, we preview the data to get an idea on how to observations look.
# -
df = data.load('data/train_FD004.txt')
df[df.columns[:7]].head()
# + active=""
# Generate Labels
# ===============
# Now with the observations loaded, we are ready to generate labels for our prediction problem.
#
# Define Labeling Function
# ------------------------
# To get started, we define the labeling function that will return the RUL given the remaining observations of an engine.
# -
def remaining_useful_life(df):
return len(df) - 1
# + active=""
# Create Label Maker
# ------------------
# With the labeling function, we create the label maker for our prediction problem. To process the RUL for each engine, we set the :code:`target_entity` to the engine number. By default, the :code:`window_size` is set to the total observation size to contain the remaining observations for each engine.
# -
lm = cp.LabelMaker(
target_entity='engine_no',
time_index='time',
labeling_function=remaining_useful_life,
)
# + active=""
# Search Labels
# -------------
# Let's imagine we want to make predictions on turbines that are up and running. Turbines in general don't fail before 120 cycles, so we will only make labels for engines that reach at least 100 cycles. To do this, the :code:`minimum_data` parameter is set to 100. Using Compose, we can easily tweak this parameter as the requirements of our model changes. Additionally, we set :code:`gap` to one to create labels on every cycle and limit the search to 10 examples for each engine.
#
# .. seealso::
# For more details on how the label maker works, see :doc:`/main_concepts`.
# +
lt = lm.search(
df.sort_values('time'),
num_examples_per_instance=10,
minimum_data=100,
gap=1,
verbose=True,
)
lt.head()
# + active=""
# Continuous Labels
# -----------------
# The labeling function we defined returns continuous labels which can be used to train a regression model for our predictin problem. Alternatively, there are label transforms available to further process these labels into discrete values. In which case, can be used to train a classification model.
#
# Describe Labels
# ~~~~~~~~~~~~~~~
# Let's print out the settings and transforms that were used to make the continuous labels. This is useful as a reference for understanding how the labels were generated from raw data.
# -
lt.describe()
# + active=""
# Let's plot the labels to get additional insight of the RUL.
#
# Label Distribution
# ~~~~~~~~~~~~~~~~~~
# This plot shows the continuous label distribution.
# -
lt.plot.distribution();
# + active=""
# Discrete Labels
# ---------------
# Let's further process the labels into discrete values. We divide the RUL into quartile bins to predict which range an engine's RUL will fall in.
# -
lt = lt.bin(4, quantiles=True)
# + active=""
# Describe Labels
# ~~~~~~~~~~~~~~~
# Next, let's print out the settings and transforms that were used to make the discrete labels. This time we can see the label distribution which is useful for determining if we have imbalanced labels. Also, we can see that the label type changed from continuous to discrete and the binning transform used in the previous step is included below.
# -
lt.describe()
# + active=""
# Let's plot the labels to get additional insight of the RUL.
#
# Label Distribution
# ~~~~~~~~~~~~~~~~~~
# This plot shows the discrete label distribution.
# -
lt.plot.distribution();
# + active=""
# Count by Time
# ~~~~~~~~~~~~~
# This plot shows the label count accumulated across cutoff times.
# -
lt.plot.count_by_time();
# + active=""
# .. currentmodule:: featuretools
#
# Generate Features
# =================
# Now, we are ready to generate features for our prediction problem.
#
# Create Entity Set
# -----------------
# To get started, let's create an :class:`EntitySet` for the observations.
#
# .. seealso::
# For more details on working with entity sets, see :doc:`loading_data/using_entitysets`.
# +
es = ft.EntitySet('observations')
es.entity_from_dataframe(
dataframe=df,
entity_id='recordings',
index='id',
time_index='time',
make_index=True,
)
es.normalize_entity(
base_entity_id='recordings',
new_entity_id='engines',
index='engine_no',
)
es.normalize_entity(
base_entity_id='recordings',
new_entity_id='cycles',
index='time_in_cycles',
)
# + active=""
# Describe Entity Set
# -------------------
# To get an idea on how the entity set is structured, we can plot a diagram.
# -
es.plot()
# + active=""
# Create Feature Matrix
# ---------------------
# To simplify the calculation for the feature matrix, we only use 20 percent of the labels.
# -
lt = lt.sample(frac=.2, random_state=0)
# + active=""
# Let's generate features that correspond to the labels. To do this, we set the :code:`target_entity` to engines and the :code:`cutoff_time` to our labels so that the features are calculated for each engine only using data up to and including the cutoff time of each label. Notice that the output of Compose integrates easily with Featuretools.
#
# .. seealso::
# For more details on calculating features using cutoff times, see :doc:`automated_feature_engineering/handling_time`.
# +
fm, fd = ft.dfs(
entityset=es,
target_entity='engines',
agg_primitives=['last', 'max', 'min'],
trans_primitives=[],
cutoff_time=lt,
cutoff_time_in_index=True,
max_depth=3,
verbose=True,
)
fm.head()
# + active=""
# Machine Learning
# ================
# Now, we are ready to create a machine learning model for our prediction problem.
#
# Preprocess Features
# -------------------
# Let's extract the labels from the feature matrix and fill any missing values with zeros. Additionally, the categorical features are one-hot encoded.
# +
y = fm.pop(lt.name)
y = y.astype('str')
x = fm.fillna(0)
x, fe = ft.encode_features(x, fd)
# + active=""
# Split Labels and Features
# -------------------------
# Then, we split the labels and features each into training and testing sets.
# -
x_train, x_test, y_train, y_test = train_test_split(
x,
y,
train_size=.8,
test_size=.2,
random_state=0,
)
# + active=""
# Train Model
# -----------
# Next, we train a random forest classifer on the training set.
# -
clf = RandomForestClassifier(n_estimators=10, random_state=0)
clf.fit(x_train, y_train)
# + active=""
# Test Model
# ----------
# Lastly, we test the model performance by evaluating predictions on the testing set.
# -
y_hat = clf.predict(x_test)
print(classification_report(y_test, y_hat))
# + active=""
# Feature Importances
# -------------------
# This plot is based on scores from the model to show which features are important for predictions.
# +
feature_importances = zip(x_train.columns, clf.feature_importances_)
feature_importances = pd.Series(dict(feature_importances))
feature_importances = feature_importances.rename_axis('Features')
feature_importances = feature_importances.sort_values()
top_features = feature_importances.tail(40)
plot = top_features.plot(kind='barh', figsize=(5, 12), color='#054571')
plot.set_title('Feature Importances')
plot.set_xlabel('Scores');
| docs/source/examples/predict-remaining-useful-life/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import chapter01
import chapter02
import chapter03
import chapter04
import chapter05
import chapter06
import chapter07
import chapter08
import chapter09
import chapter10
import chapter11
import chapter12
import chapter13
import chapter14
import chapter15
import chapter15
import chapter15
| Chapters_Result.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# +
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
np.set_printoptions(suppress=True, precision=4)
data = pd.read_csv("adult.csv", index_col=0)
data.head()
# -
columns = ['age', 'workclass', 'education', 'gender', 'hours-per-week', 'occupation', 'income']
data = data[columns]
X = data.drop('income', axis=1)
y = data.income.values
data.head()
X_dummies = pd.get_dummies(X)
X_train, X_test, y_train, y_test = train_test_split(X_dummies, y)
print(X_train.shape)
print(X_dummies.shape)
from sklearn.linear_model import LogisticRegression
logr = LogisticRegression()
logr.fit(X_train, y_train)
#Training Score
print("Training Score")
print(logr.score(X_train, y_train))
#Test Score
print("Test Score")
print(logr.score(X_test, y_test))
| sup_learn_adults.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # "Coding Period: Week 11"
# > "Writing Unit tests of App & Completing its README.md"
#
# - toc: false
# - branch: master
# - badges: true
# - comments: true
# - hide: false
#
# - search_exclude: true
# ### Decreased size of scraper repository
#
# I reduced the repository size from 600 MB to around 14 MB using [BFG Repo Cleaner](https://rtyley.github.io/bfg-repo-cleaner/), it is a tool which cleans large blobs in git repository commits, I ran it to clear all blobs greater than 10 Kb & then I achieved 14 MB size, Well it is not the original size but a lot better than previous one.
#
#
# ### Fixed CI pipeline
#
# The problem with CI pipeline was with authentication, I & my mentors fixed this issue correctly,Now we have used an access token instead of SSH earlier.Data is pushed in master branch of Gitlab repository
#
# ### Wrote Unit tests of App
#
# I worked a lot on this during this week, I have used flutter's two popular packages,
#
# - [bloc_test](https://pub.dev/packages/bloc_test) This is very useful for testing blocx which are used in BLoC architecture.
#
# - [mockito](https://pub.dev/packages/mockito) - Used flutter's mockito package which is used for mocking repository, I used it for testing mocking all repository & all API clients
#
# Wrote Unit Tests for
#
# - Home Screen
# - Download CDM screen,
# - Saved CDM Screen
# - View CDM
# - Search Screen
# - View CDM Statewise screen
# - Compare Hospital Screen
# - Also Wrote unit tests for Overpass API, GitLab API, Medicare Hospital Compare API
#
# ### Completing README.md for App
#
# Readme of app is now completed, I have also modified readme of scraper as suggested by <NAME> to display links of hospitals in a table. I viewed a lot of Open Source projects Repositories to get idea.
#
# https://gitlab.com/Darshpreet2000/lh-toolkit-cost-of-care-app-data-scraper/-/blob/fix-issue/README.md
#
# ### What do I plan to do next week?
#
# - I need to test app more to ensure each & every component is working correctly
#
# - Finding & fixing bugs in app.
#
# - Discussing with @mua_rachmann for REST server, App icon, Intro slides
#
# - Completing Unit tests for other screens
| _notebooks/2020-08-14-Week-11-Aug-08 -To-Aug-14.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <center>Financial Economics HW_02</center>
#
# **<center>11510691 程远星$\DeclareMathOperator*{\argmin}{argmin}
# \DeclareMathOperator*{\argmax}{argmax}
# \newcommand{\using}[1]{\stackrel{\mathrm{#1}}{=}}
# \newcommand{\ffrac}{\displaystyle \frac}
# \newcommand{\space}{\text{ }}
# \newcommand{\bspace}{\;\;\;\;}
# \newcommand{\QQQ}{\boxed{?\:}}
# \newcommand{\void}{\left.\right.}
# \newcommand{\CB}[1]{\left\{ #1 \right\}}
# \newcommand{\SB}[1]{\left[ #1 \right]}
# \newcommand{\P}[1]{\left( #1 \right)}
# \newcommand{\dd}{\mathrm{d}}
# \newcommand{\Tran}[1]{{#1}^{\mathrm{T}}}
# \newcommand{\d}[1]{\displaystyle{#1}}
# \newcommand{\EE}[2][\,\!]{\mathbb{E}_{#1}\left[#2\right]}
# \newcommand{\Var}[2][\,\!]{\mathrm{Var}_{#1}\left[#2\right]}
# \newcommand{\Cov}[2][\,\!]{\mathrm{Cov}_{#1}\left(#2\right)}
# \newcommand{\Corr}[2][\,\!]{\mathrm{Corr}_{#1}\left(#2\right)}
# \newcommand{\I}[1]{\mathrm{I}\left( #1 \right)}
# \newcommand{\N}[1]{\mathrm{N} \left( #1 \right)}
# \newcommand{\ow}{\text{otherwise}}\void^\dagger$</center>**
# ## Question 4.1
#
# $\P{a}$
#
# $\bspace$Consider the last four column vectors in $X$, which represent four tradable securities in the market. Their payoff matrix is $\begin{bmatrix}
# 1 & 0 & 0 & 0 \\
# 2 & 1 & 0 & 0\\
# 3 & 2 & 1 & 0\\
# 4 & 3 & 2 & 1
# \end{bmatrix}$ which is obviously equivalent to the Identity matrix, the payoff for the four state-$\omega$ contingent securities. That is to say the market is complete.
#
# $\P{b}$
#
# $\bspace$Given the price vector $Z$, let the state price vector be $\phi=\SB{\phi_1;\phi_2;\phi_3;\phi_4}$ we have the equation:
#
# $$\Tran\phi X = S \Rightarrow \phi = \begin{bmatrix}
# 0.25 \\
# 0.25 \\
# 0.25 \\
# 0.25
# \end{bmatrix}$$
#
# $\P{c}$
#
# $\bspace$There's redundant vector. It could be the one with first column vector of $X$ as its payoff vector. Since that
#
# $$\begin{bmatrix}
# 1 \\
# 1 \\
# 1 \\
# 1
# \end{bmatrix} = \begin{bmatrix}
# 1 \\
# 2 \\
# 3 \\
# 4
# \end{bmatrix} - \begin{bmatrix}
# 0 \\
# 1 \\
# 2 \\
# 3
# \end{bmatrix}$$
#
# $\P{d}$
#
# $\bspace$We can take the last four. Let the portfolio be $\Theta$, then
#
# $$\begin{bmatrix}
# 1 & 0 & 0 & 0 \\
# 2 & 1 & 0 & 0\\
# 3 & 2 & 1 & 0\\
# 4 & 3 & 2 & 1
# \end{bmatrix}\Theta = I_4 \Rightarrow \Theta = \begin{bmatrix}
# 1 & 0 & 0 & 0 \\
# -2 & 1 & 0 & 0\\
# 1 & -2 & 1 & 0\\
# 0 & 1 & -2 & 1
# \end{bmatrix}$$
#
# $\P{e}$
#
# $\bspace$Let the holding vector be $\theta = \SB{\theta_1;\theta_2;\theta_3;\theta_4;\theta_5}$, then the position for the state-$\omega$ contingent portfolio are
#
# $$X\theta = \begin{bmatrix}
# \theta_1 + \theta_2 \\
# \theta_1 + 2\theta_2 + \theta_3\\
# \theta_1 + 3\theta_2 + 2\theta_3 + \theta_4 \\
# \theta_1 + 4\theta_2 + 3\theta_3 + 2\theta_4 + \theta_5
# \end{bmatrix}$$
#
# $\P{f}$
#
# $\bspace$The number of securities with independent payoff is $4$, same with the number of state in period $1$.
# ## Question 4.5
#
# $\P{a}$
#
# $\bspace$When $\phi = \SB{2;-1;2}$, we have
#
# $$\Tran\phi X = \Tran{\begin{bmatrix}
# 2 \\
# -1 \\
# 2
# \end{bmatrix}} \cdot \begin{bmatrix}
# 1 & 0 & 0.5\\
# 1 & 1 & 1 \\
# 0 & 1 & 0.5
# \end{bmatrix} = \begin{bmatrix}
# 1 \\
# 1 \\
# 1
# \end{bmatrix} = \begin{bmatrix}
# S_a \\
# S_b \\
# S_c
# \end{bmatrix} \equiv S$$
#
# $\P{b}$
#
# $\bspace$It's a requirement for No-arbitrage. For that if so, then there'll be a positive payoff at period $1$ at state $\omega$ while $\phi_\omega <0$.
#
# $\P{c}$
#
# $\bspace$It's not a counterexample since the only ONE state price vector should still be with all positive entries. This market, at this time is not even complete.
#
# $\P{d}$
#
# $\bspace$When $\phi = \SB{0.5;0.5;0.5}$, we have
#
# $$\Tran\phi X = \Tran{\begin{bmatrix}
# 0.5\\
# 0.5 \\
# 0.5
# \end{bmatrix}} \cdot \begin{bmatrix}
# 1 & 0 & 0.5\\
# 1 & 1 & 1 \\
# 0 & 1 & 0.5
# \end{bmatrix} = \begin{bmatrix}
# 1 \\
# 1 \\
# 1
# \end{bmatrix} =S$$
#
# $\P{e}$
#
# $\bspace$Cuz this market is not even complete!
# ## Question 4.6
#
# $\P{a}$
#
# $$\ffrac{F_0} {1+r_F} = S_0 \Rightarrow F_0 = \P{1+r_F} S_0$$
#
# $\P{b}$
#
# $$\ffrac{F_0 + y} {1+r_F} = S_0 \Rightarrow F_0 = \P{1+r_F} S_0 - y$$
# ## Question 4.8
#
# $\P{a}$
#
# $\bspace$Still we let $\phi = \SB{\phi_1;\phi_2;\phi_3;\phi_4}$ be the state price vector so that
#
# $$\Tran\phi \begin{bmatrix}
# 1 & 1 & 0 & 0 & 0 \\
# 1 & 2 & 1 & 0 & 0\\
# 1 & 3 & 2 & 1 & 0\\
# 1 & 4 & 3 & 2 & 1
# \end{bmatrix} = S = \begin{bmatrix}
# 0.8 \\
# 2 \\
# 1.2 \\
# 0.6 \\
# 0.2
# \end{bmatrix} \Rightarrow \phi = \begin{bmatrix}
# k \\
# 0.6-2k \\
# k \\
# 0.2
# \end{bmatrix}, \text{where }k=0.2$$
#
# $\bspace$The interest rate is:
#
# $$r_F = \ffrac{1-S_1} {S_1} = \ffrac{1-\P{k+0.6-2k+k+0.2}} {k+0.6-2k+k+0.2} = 0.25$$
#
# $\P{b}$
#
# $\bspace$The equivalent $Q$ measure is $Q = \SB{\ffrac{k}{0.8};\ffrac{0.6-2k}{0.8};\ffrac{k}{0.8};0.25}$ where $k=0.2$.
#
# $\P{c}$
#
# $\bspace$Except for the forth one, the others can be verified of the property of martingale.
#
# $$\ffrac{\Tran QX_1}{1+0.25} = 0.8\times\P{\ffrac{k}{0.8} + \ffrac{0.6-2k}{0.8} + \ffrac{k}{0.8} + 0.25} = 0.8$$
#
# $$\ffrac{\Tran QX_2}{1+0.25} = 0.8\times\P{\ffrac{k}{0.8} +2\times \ffrac{0.6-2k}{0.8} +3\times \ffrac{k}{0.8} + 4\times0.25} = 2$$
#
# $$\ffrac{\Tran QX_3}{1+0.25} = 0.8\times\P{1\times \ffrac{0.6-2k}{0.8} +2\times \ffrac{k}{0.8} + 3\times0.25} = 1.2$$
#
# $$\ffrac{\Tran QX_4}{1+0.25} = 0.8\times\P{1\times \ffrac{k}{0.8} + 2\times0.25} = 0.6,\text{where }k=0.2$$
#
# $$\ffrac{\Tran QX_5}{1+0.25} = 0.8\times0.25 = 0.2$$
# ## Question 5.1
#
# $\P{a}$
#
# $\bspace$To eliminate the chance to arbitrage, we have $d < 1+r_F < u$ while since now there's extra payoff, the relaiton should change to
#
# $$\delta + d < 1 + r_F < \delta + u$$
#
# $\P{b}$
#
# $\bspace$Let the state price vector be $\phi = \SB{\phi_u;\phi_d}$. Then we have the equation:
#
# $$\begin{cases}
# S = \phi_u \cdot u S + \phi_d \cdot dS + \ffrac{\delta S} {1+r_F} \\
# B = \ffrac{1} {1+r_F} = \phi_u + \phi_d
# \end{cases} \Rightarrow \begin{cases}
# \phi_u = \ffrac{\P{1+r_F} - \P{d+\delta}} {1+r_F}\\
# \phi_d = \ffrac{\P{u+\delta} - \P{1+r_F}} {1+r_F}
# \end{cases}$$
#
# $\bspace$Hence the price of that option is $c = \phi_u \cdot [uS − K]_+ + \phi_d \cdot[dS − K]_+$
#
# $\P{c}$
#
# $\bspace C = \max\P{c,\SB{S-K}_+}$. Obviously with $\delta$ getting larger, the agent will tend to early exercise more cause $S$ is getting larger as well.
#
# $\P{d}$
#
# $\bspace$The price of the European put option is $p = \phi_u \cdot [K-uS]_+ + \phi_d \cdot[K-dS]_+$. And if it's an American put, we have $P = \max\P{p,\SB{K-S}_+}$. At this time the increasing $\delta$ will decrease the intention of the agent to early exercise.
# ## Question 5.5
#
# $\P{a}$
#
# $\bspace$Still we assume the state price vector, and obtain the equation set:
#
# $$\begin{cases}
# S = \phi_u \cdot u S + \phi_d \cdot dS \\
# B = \ffrac{1} {1+r_F} = \phi_u + \phi_d
# \end{cases} \Rightarrow \begin{cases}
# \phi_u = \ffrac{\P{1+r_F} - d} {1+r_F} = \ffrac{15} {22}\\
# \phi_d = \ffrac{u- \P{1+r_F}} {1+r_F} = \ffrac{5} {22}
# \end{cases}$$
#
# $\bspace$So that the put option price is $p = \ffrac{15} {22}\cdot0 + \ffrac{5} {22}\times 1 = \ffrac{5} {22}$
#
# $\P{b}$
#
# $\bspace$Actually nothing changes here so the price remains unchanged.
# ## Question 6.1
#
# $\bspace$Consider two consumption plan $c$ and $c'$ and suppose we have $c \succsim c'$. If in state $\omega^\dagger$ both two have the consumption path from $c_0$ to $c_{1\omega^\dagger}$, consumption plan $c$ must provide a higher utility for some paths to states other than $\omega^*$:
#
# $$U\P{c} \geq U\P{c'} \Rightarrow u_0\P{c_0} + \pi_{\omega^\dagger}u_1\P{c_{x}} + \sum_{\omega \neq \omega^\dagger} \pi_{\omega}u_1\P{c_{1\omega}} \geq u_0\P{c_0} + \pi_{\omega^\dagger}u_1\P{c_{x}} + \sum_{\omega \neq \omega^\dagger} \pi_{\omega}u_1\P{c'_{1\omega}} \\
# \Downarrow \\
# \sum_{\omega \neq \omega^\dagger} \pi_{\omega}u_1\P{c_{1\omega}} \geq \sum_{\omega \neq \omega^\dagger} \pi_{\omega}u_1\P{c'_{1\omega}}
# $$
#
# $\bspace$So now even the comsumption path is changed to $c_{1\omega^\ddagger}$, we still have
#
# $$U\P{c} = u_0\P{c_0} + \pi_{\omega^*}u_1\P{c_{y}} + \sum_{\omega \neq \omega^\ddagger} \pi_{\omega}u_1\P{c_{1\omega}} \geq u_0\P{c_0} + \pi_{\omega^\ddagger}u_1\P{c_{y}} + \sum_{\omega \neq \omega^\ddagger} \pi_{\omega}u_1\P{c'_{1\omega}} = U\P{c'}$$
# ## Question 6.4
#
# $\bspace$By the definition of weak convexity, we have $\forall a,b, \in C = \mathbb{R}_+^{1+\Omega}$, $\alpha \in \SB{0,1}$ if $a \succsim b$, then $\alpha a + \P{1-\alpha}b \succsim b$, so that $U\P{\alpha a + \P{1-\alpha}b} \geq U\P{b}$. As assumed, we can write
#
# $$u_0\P{\alpha a_0 + \P{1-\alpha}b_0} + \sum_{\omega\in\Omega}\pi_\omega u_1\P{\alpha a_{1\omega} + \P{1-\alpha}b_{1\omega}} \geq u_0\P{b_0} + \sum_{\omega\in\Omega}\pi_\omega u_1\P{b_{1\omega}}$$
#
# $\bspace$And this inequality holds for all possible positive $\SB{a_{0};a_{11};\cdots;a_{1\omega}}$ and $\SB{b_{0};b_{11};\cdots;b_{1\omega}}$ so to obtain the quasiconcavity of $u_0$ and $u_1$, we plug in the consumption plan with only one positive entry while all others are $0$, then since $u_0\P{0} = 0$, $u_1\P{0} = 0$
#
# $$u_0\P{\alpha a_0 + \P{1-\alpha}b_0} \geq u_0\P{b_0} \\[0.6em]
# u_1\P{\alpha a_{1\omega} + \P{1-\alpha}b_{1\omega}} \geq u_1\P{b_{1\omega}}
# $$
#
# $\bspace\newcommand{\Exp}{\mathrm{E}}
# \newcommand{\RR}{\mathbb{R}}
# \newcommand{\EE}{\mathbb{E}}
# \newcommand{\NN}{\mathbb{N}}
# \newcommand{\ZZ}{\mathbb{Z}}
# \newcommand{\QQ}{\mathbb{Q}}
# \newcommand{\PP}{\mathbb{P}}
# \newcommand{\AcA}{\mathcal{A}}
# \newcommand{\FcF}{\mathcal{F}}
# \newcommand{\AsA}{\mathscr{A}}
# \newcommand{\FsF}{\mathscr{F}}$And since $a \succsim b$, we have $u_0\P{b_0} = \min\CB{u_0\P{a_0}, u_0\P{b_0}}$ and $u_1\P{b_{1\omega}} = \min\CB{u_1\P{a_{1\omega}}, u_1\P{b_{1\omega}}}$. Combine these we can conclude that the weak convexity of $U$ promises the quasiconcavity of $u_0\P{\cdot}$ and $u_1\P{\cdot}$.
# ## Question 7.1
#
# $\P{1}$
#
# $$R_1\P{w} = -w \ffrac{u_1''\P{w}} {u_1'\P{w}} = -w\ffrac{\lambda^2u''\P{\lambda w}}{\lambda u'\P{\lambda w}} = R\P{\lambda w} \geq R\P{w} = R_2\P{w}$$
#
# $\P{2}$
#
# $\bspace$To find its maximum we first take its derivative $w.r.t.$ $a$ and let it be $0$:
#
# $$\ffrac{\partial \Exp\SB{u_1\P{w+a\tilde r}}} {\partial a} = \Exp\SB{u'\P{\lambda w + a\lambda\tilde r}\cdot\lambda\tilde r} = 0 \iff \Exp\SB{u'\P{\lambda w + a\lambda\tilde r}\cdot\tilde r} = 0$$
#
# $\bspace$Then find its derivative $w.r.t.$ $\lambda$, still, it should be $0$:
#
# $$\Exp\SB{u''\P{\lambda w + \lambda a \tilde r}\tilde r\P{w+a\tilde r + \lambda a' \tilde r}} = 0$$
#
# $\bspace$Let $\tilde w = \lambda w + \lambda a \tilde r$ we will then have
#
# $$a' = -\ffrac{\Exp\SB{u''\P{\tilde w} \tilde w \tilde r}} {\lambda^2 \Exp\SB{u''\P{\tilde w}\tilde r^2}} = \ffrac{\Exp\SB{u'\P{\tilde w} R\P{\tilde w} \tilde r}} {\lambda^2 \Exp\SB{u''\P{\tilde w}\tilde r^2}}$$
#
# $\bspace$The numerator is $\Exp\SB{u'\P{\tilde w} R\P{\tilde w} \tilde r} \geq R\P{w} \Exp\SB{u'\P{\tilde w} \tilde r} = 0$. And the denominator is, of course, negative, since $u''\P{\cdot}\leq 0$. Thus $a' \leq 0$ and consequently, we have $a_1\P{w} = a\P{\lambda w} \leq a\P{w} = a_2\P{w}$.
#
# $\P{3}$
#
# $\bspace$First we define $\lambda = w_2/w_1 \geq 1$, then
#
# $$a\P{w_2} = a\P{\lambda w_1} \leq a\P{w_1} \leq \ffrac{w_2} {w_1}a\P{w_1} \Rightarrow \hat a\P{w_1}\geq\hat a\P{w_2}$$
# ## Question 7.2
#
# $\bspace$It's a constant absolute risk aversion so we just assume that the utility function to be $u=-e^{-aw}$ with $a>0$. And let $-\pi \equiv \xi$ so that $u\P{w+\xi}=-e^{-a\P{w+\xi}}$.
#
# $\P{1}$
#
# $\bspace$Since it's a fair game we have $P\CB{\tilde g = +b} = P\CB{\tilde g = -b} = 0.5$.
#
# $$\begin{align}
# \Exp\SB{u\P{w+\tilde g}} &= \ffrac{1} {2}u\P{w+b} +\ffrac{1} {2}u\P{w-b} = -\ffrac{1} {2}\P{e^{-a\P{w+b}} + e^{-a\P{w-b}}} = u\P{w+\xi}\\
# \Rightarrow \xi &= -\ffrac{1} {a}\log\P{\ffrac{1} {2}e^{-ab} + \ffrac{1}{2}e^{ab}}
# \end{align}$$
#
# $\P{2}$
#
# $$\begin{align}
# \Exp\SB{u\P{w+\tilde g}} &= \int_{-c}^{c} \ffrac{1} {2c}\P{-e^{-a\P{w+x}}}\dd{x} = \left.\ffrac{1} {2ac}e^{-a\P{w+x}}\right|_{-c}^{c} = \ffrac{1}{2ac}\P{e^{-a\P{w+c}}-e^{-a\P{w-c}}} = u\P{w+\xi}\\
# \Rightarrow \xi &= -\ffrac{1} {a}\log\P{\ffrac{e^{ac}-e^{-ac}} {2ac}}
# \end{align}$$
#
# $\P{3}$
#
# $$\begin{align}
# \Exp\SB{u\P{w+\tilde g}} &= \int_{-\infty}^{\infty} -e^{-a\P{w+x}} \ffrac{1} {\sqrt{2\pi}\sigma} \exp\CB{-\ffrac{x^2} {2\sigma^2}}\dd{x} = -\exp\CB{-aw + \ffrac{a^2\sigma^2} {2}} = u\P{w+\xi}\\
# \Rightarrow \xi &= -\ffrac{a\sigma^2} {2}
# \end{align}$$
#
# $\P{4}$
#
# $\bspace$NONE of the above fair games show that the certainty equivalence is dependent on $w$. I think that's because they're fair games. And the extra $w$ has no effect on the absolute risk aversion.
# ***
| FinMath/Financial Economics/HW/HW_02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # 使用Pytorch 搭建神经网络进实现气温预测
#
# + pycharm={"name": "#%%\n"}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.optim as optim
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
# + pycharm={"name": "#%%\n"}
features = pd.read_csv('temps.csv')
print(features.head())
print(features.shape)
features.describe()
# + pycharm={"name": "#%%\n"}
# 时间格式化
import datetime
years = features['year']
months = features['month']
days = features['day']
dates = [f'{year}-{month}-{day}' for year,month,day in zip(years,months,days)]
dates = [datetime.datetime.strptime(date,'%Y-%m-%d') for date in dates]
# + pycharm={"name": "#%%\n"}
import datetime
dates = [datetime.datetime.strptime(date,'%Y-%m-%d') for date in features['Date']]
print(dates[:5])
# + pycharm={"name": "#%%\n"}
# 可视化
plt.style.use('fivethirtyeight')
fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(nrows=2,ncols=2,figsize=(10,10))
fig.autofmt_xdate(rotation=45)
# 标签
ax1.plot(dates, features['Temperature(Celsius)(avg)'])
ax1.set_xlabel('')
ax1.set_ylabel('Temp'); ax1.set_title('Max')
plt.tight_layout(pad=2)
# + pycharm={"name": "#%%\n"}
# 独热编码
features = pd.get_dummies(features) # 字符串值 列展开成行
print(features.head())
# + pycharm={"name": "#%%\n"}
labels = np.array(features['actual'])
features = features.drop('actual', axis=1)
feature_list = list(features.columns)
features = np.array(features)
print(features.shape) # 几个特征 ?
# + pycharm={"name": "#%%\n"}
# 使用 sklearn 预处理
from sklearn import preprocessing
input_feature = preprocessing.StandardScaler().fit_transform(features)
print(input_feature[0])
# 标准化后更容易收敛
# + pycharm={"name": "#%%\n"}
# 构建神经网络 一种麻烦的方法
x = torch.tensor(input_feature, dtype=float)
y = torch.tensor(labels, dtype=float)
# 权重
weights = torch.randn((14,128), dtype=float, requires_grad=True) # 随机初始化参数
biases = torch.randn(128, dtype=float, requires_grad=True)
weights2 = torch.randn((128,1), dtype=float, requires_grad=True)
biases2 = torch.randn(1, dtype=float, requires_grad=True)
lr = 0.001
losses = []
epochs = 1000
for epoch in range(epochs):
hidden = x.mm(weights) + biases # 矩阵
hidden = torch.relu(hidden) # 激活函数
pred = hidden.mm(weights2) + biases2
loss = torch.mean((pred - y)**2) # 使用均分误差作为 损失值
if epoch % 100 == 0:
print(epoch, loss)
loss.backward() # 反向传播计算
# 更新参数
weights.data.add_(-lr * weights.grad.data)
biases.data.add_(-lr * weights.grad.data)
weights2.data.add_(-lr * weights2.grad.data)
biases2.data.add_(-lr * biases2.grad.data)
# 清空梯度
weights.grad.zero_()
biases.grad.zero_()
weights2.grad.zero_()
biases2.grad.zero_()
# + pycharm={"name": "#%%\n"}
# 构建神经网络 一种麻烦的方法
input_size = input_feature.shape[1]
hidden_size = 128
output_size = 1
batch_size = 16
net_nn = torch.nn.Sequential(torch.nn.Linear(input_size, hidden_size),
torch.nn.Sigmoid(),
torch.nn.Linear(hidden_size, output_size)
)
# 训练
criterion = torch.nn.MSELoss(reduction='mean')
optimizer = torch.optim.Adam(net_nn.parameters(), lr =0.001)
losses = []
epochs = 1000
total_size = len(input_feature)
for epoch in range(epochs):
for start in range(0, total_size, batch_size):
optimizer.zero_grad() # 梯度清零
end = start + batch_size if start+ batch_size < total_size else total_size
x = torch.tensor(input_feature[start:end],dtype=float, requires_grad=True)
y = torch.tensor(labels[start:end],dtype=float, requires_grad=True)
pred = net_nn(x)
loss = criterion(pred, y)
loss.backward(retain_graph=True)
optimizer.step()
losses.append(loss.data.numpy())
if epoch % 100 == 0:
print(epoch, losses)
# + pycharm={"name": "#%%\n"}
# 预测
test_x = torch.tensor(input_feature, dtype=torch.float)
pred = net_nn(test_x).data.numpy()
# + pycharm={"name": "#%%\n"}
# 画图
true_data = pd.DataFrame(data={'date': dates, 'actual': labels})
pred_data = pd.DataFrame(data={'date': dates, 'prediction': pred.reshape(-1)})
plt.plot(true_data['date'], true_data['actual'],'b-',label='actual')
plt.plot(true_data['date'], true_data['prediction'],'ro',label='prediction')
plt.xticks(rotation ='60')
plt.legend()
| part03-neural-network/temperature.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Alternate view 05
#
# ### Purpose
# - Demonstrate graph connecting output to input. The example shows connection between cities in a trip.
# - Simplify view by removing area and country which are not usefull here and hiding towns. The graph only shows connections between flights
# ## Imports modules and does some configurations
import logging
logging.basicConfig()
logger = logging.getLogger('dependencynet')
logger.setLevel(logging.WARN)
# clarification for linter
from IPython.display import display
# remove these lines if you use the pypi package
import sys
sys.path.append("../../..") # go to parent dir
from dependencynet.schema import SchemaBuilder
from dependencynet.model import ModelBuilder
from dependencynet.network.graphbuilder import GraphBuilder, LevelNode, InputNode, OutputNode
from dependencynet.network.stylebuilder import StyleBuilder
from dependencynet.network.graphviewer import GraphViewer
from dependencynet.network.graphml import GraphMLConverter
# ## Loads and prepare data
#
# Check the notebook for more information on the dataset and the steps requiored to prepare the graph.
# %run example-graphml-trips.ipynb
graph_model.pretty_print()
# ## Alter the graph model
#
# - remove area and country nodes
# - show connections between flights (merge flight_in and flight_out into a single node, then hide towns nodes)
graph_model.remove_category('area')
graph_model.remove_category('country')
graph_model.merge_connection('flight_out', 'flight_in', 'flight')
graph_model.fold_category('town')
graph_model.pretty_print()
# #### Shows graph
display(GraphViewer(graph_model).render('dagre', graph_style, 'LR'))
# ## Exports to GraphML
dirname = path.join('output')
makedirs(dirname, exist_ok=True)
filename = path.join(dirname, 'trips_yed_05.graphml')
GraphMLConverter(graph_model, graph_style, schema_trips).save(filename)
| notebooks/scenario/trips/alternatives-trips-05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import imageio
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage
import cv2 as cv
def grayscale(rgb):
return np.dot(rgb[...,:3], [0.199, 0.287, 0.214])
img_src="https://images.mid-day.com/images/2018/apr/Dhoni-Ziva.jpg"
img=imageio.imread(img_src)
gray_image=grayscale(img)
inverted_image=255-gray_image
#original image
plt.imshow(img)
#gray_image
plt.imshow(gray_image)
#inverted_image
plt.imshow(inverted_image)
#blur image
blur_image=scipy.ndimage.filters.gaussian_filter(inverted_image, sigma=5)
plt.imshow(blur_image)
def dodge(front, back):
result=front*255/(255-back)
result[result > 255]=255
result[back==255]=255
return result.astype('uint8')
sketched_image=dodge(blur_image, gray_image)
plt.imshow(sketched_image, cmap="gray")
cv.imwrite("sketch.jpeg", sketched_image)
| Sketch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import standard PyTorch modules
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter # TensorBoard support
# import torchvision module to handle image manipulation
import torchvision
import torchvision.transforms as transforms
# calculate train time, writing train data to files etc.
import time
import pandas as pd
import json
from IPython.display import clear_output
torch.set_printoptions(linewidth=120)
torch.set_grad_enabled(True) # On by default, leave it here for clarity
# -
# Use standard FashionMNIST dataset
train_set = torchvision.datasets.FashionMNIST(
root = './data/FashionMNIST',
train = True,
download = True,
transform = transforms.Compose([
transforms.ToTensor()
])
)
# Build the neural network, expand on top of nn.Module
class Network(nn.Module):
def __init__(self):
super().__init__()
# define layers
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5)
self.fc1 = nn.Linear(in_features=12*4*4, out_features=120)
self.fc2 = nn.Linear(in_features=120, out_features=60)
self.out = nn.Linear(in_features=60, out_features=10)
# define forward function
def forward(self, t):
# conv 1
t = self.conv1(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
# conv 2
t = self.conv2(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
# fc1
t = t.reshape(-1, 12*4*4)
t = self.fc1(t)
t = F.relu(t)
# fc2
t = self.fc2(t)
t = F.relu(t)
# output
t = self.out(t)
# don't need softmax here since we'll use cross-entropy as activation.
return t
# +
from collections import OrderedDict
# put all hyper params into a OrderedDict, easily expandable
params = OrderedDict(
lr = [.01],
batch_size = [100],
shuffle = [False]
)
epochs = 10
# +
# import modules to build RunBuilder and RunManager helper classes
from collections import namedtuple
from itertools import product
# Read in the hyper-parameters and return a Run namedtuple containing all the
# combinations of hyper-parameters
class RunBuilder():
@staticmethod
def get_runs(params):
Run = namedtuple('Run', params.keys())
runs = []
for v in product(*params.values()):
runs.append(Run(*v))
return runs
# -
# Helper class, help track loss, accuracy, epoch time, run time,
# hyper-parameters etc. Also record to TensorBoard and write into csv, json
class RunManager():
def __init__(self):
# tracking every epoch count, loss, accuracy, time
self.epoch_count = 0
self.epoch_loss = 0
self.epoch_num_correct = 0
self.epoch_start_time = None
# tracking every run count, run data, hyper-params used, time
self.run_params = None
self.run_count = 0
self.run_data = []
self.run_start_time = None
# record model, loader and TensorBoard
self.network = None
self.loader = None
self.tb = None
# record the count, hyper-param, model, loader of each run
# record sample images and network graph to TensorBoard
def begin_run(self, run, network, loader):
self.run_start_time = time.time()
self.run_params = run
self.run_count += 1
self.network = network
self.loader = loader
self.tb = SummaryWriter(comment=f'-{run}')
images, labels = next(iter(self.loader))
grid = torchvision.utils.make_grid(images)
self.tb.add_image('images', grid)
self.tb.add_graph(self.network, images)
# when run ends, close TensorBoard, zero epoch count
def end_run(self):
self.tb.close()
self.epoch_count = 0
# zero epoch count, loss, accuracy,
def begin_epoch(self):
self.epoch_start_time = time.time()
self.epoch_count += 1
self.epoch_loss = 0
self.epoch_num_correct = 0
#
def end_epoch(self):
# calculate epoch duration and run duration(accumulate)
epoch_duration = time.time() - self.epoch_start_time
run_duration = time.time() - self.run_start_time
# record epoch loss and accuracy
loss = self.epoch_loss / len(self.loader.dataset)
accuracy = self.epoch_num_correct / len(self.loader.dataset)
# Record epoch loss and accuracy to TensorBoard
self.tb.add_scalar('Loss', loss, self.epoch_count)
self.tb.add_scalar('Accuracy', accuracy, self.epoch_count)
# Record params to TensorBoard
for name, param in self.network.named_parameters():
self.tb.add_histogram(name, param, self.epoch_count)
self.tb.add_histogram(f'{name}.grad', param.grad, self.epoch_count)
# Write into 'results' (OrderedDict) for all run related data
results = OrderedDict()
results["run"] = self.run_count
results["epoch"] = self.epoch_count
results["loss"] = loss
results["accuracy"] = accuracy
results["epoch duration"] = epoch_duration
results["run duration"] = run_duration
# Record hyper-params into 'results'
for k,v in self.run_params._asdict().items(): results[k] = v
self.run_data.append(results)
df = pd.DataFrame.from_dict(self.run_data, orient = 'columns')
# display epoch information and show progress
clear_output(wait=True)
display(df)
# accumulate loss of batch into entire epoch loss
def track_loss(self, loss):
# multiply batch size so variety of batch sizes can be compared
self.epoch_loss += loss.item() * self.loader.batch_size
# accumulate number of corrects of batch into entire epoch num_correct
def track_num_correct(self, preds, labels):
self.epoch_num_correct += self._get_num_correct(preds, labels)
@torch.no_grad()
def _get_num_correct(self, preds, labels):
return preds.argmax(dim=1).eq(labels).sum().item()
# save end results of all runs into csv, json for further analysis
def save(self, fileName):
pd.DataFrame.from_dict(
self.run_data,
orient = 'columns',
).to_csv(f'{fileName}.csv')
with open(f'{fileName}.json', 'w', encoding='utf-8') as f:
json.dump(self.run_data, f, ensure_ascii=False, indent=4)
# +
m = RunManager()
# get all runs from params using RunBuilder class
for run in RunBuilder.get_runs(params):
# if params changes, following line of code should reflect the changes too
network = Network()
loader = torch.utils.data.DataLoader(train_set, batch_size = run.batch_size)
optimizer = optim.Adam(network.parameters(), lr=run.lr)
m.begin_run(run, network, loader)
for epoch in range(epochs):
m.begin_epoch()
for batch in loader:
images = batch[0]
labels = batch[1]
preds = network(images)
loss = F.cross_entropy(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
m.track_loss(loss)
m.track_num_correct(preds, labels)
m.end_epoch()
m.end_run()
# when all runs are done, save results to files
m.save('results')
# +
def get_pil_transform():
transf = transforms.Compose([
#transforms.Resize((28, 28)),
])
return transf
def get_preprocess_transform():
transf = transforms.Compose([
#transforms.Resize((28, 28)),
transforms.ToTensor(),
])
return transf
pill_transf = get_pil_transform()
preprocess_transform = get_preprocess_transform()
# -
def batch_predict(pred_images):
pred_images = pred_images[:,:,:,0]
batchy = torch.stack(tuple(preprocess_transform(i) for i in pred_images), dim=0)
logits = network(batchy)
probs = F.softmax(logits, dim=1)
return probs.detach().numpy()
def batch_true_predict(pred_images):
batchy = torch.stack(tuple(preprocess_transform(i) for i in pred_images), dim=0)
logits = network(batchy)
probs = F.softmax(logits, dim=1)
return probs.detach().numpy()
# +
test_dataloader = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=False, num_workers=0)
imgs, lbls = iter(test_dataloader).next()
img = torchvision.transforms.ToPILImage()(imgs[0])
# +
import matplotlib.pyplot as plt
import numpy as np
plt.imshow(img)
print(lbls[0])
# -
plt.imshow(img, cmap='gray')
plt.axis('off')
plt.savefig('fig/example.png', bbox_inches='tight')
plt.show()
pix = np.array(img)
pix.shape
# +
from lime import lime_image
explainer = lime_image.LimeImageExplainer()
explanation = explainer.explain_instance(pix,
batch_predict, # classification function
#top_labels=5,
hide_color=0,
num_samples=10000) # number of images that will be sent to classification function
# -
explanation.local_exp
# +
from skimage.segmentation import mark_boundaries
temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=True, num_features=5, hide_rest=False)
img_boundry1 = mark_boundaries(temp/28, mask)
plt.imshow(img_boundry1, cmap='gray')
plt.axis('off')
plt.savefig('fig/example_lime.png', bbox_inches='tight')
plt.show()
# -
temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=False, num_features=10, hide_rest=False)
img_boundry2 = mark_boundaries(temp/28, mask)
plt.imshow(img_boundry2)
# +
sample_size = 1000
sample_lbls = []
sample_imgs = []
iterator = iter(torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=False, num_workers=0))
for i in range(sample_size):
img, lbl = next(iterator)
img = torchvision.transforms.ToPILImage()(img[0])
sample_lbls.append(lbl)
sample_imgs.append(img)
# -
img
true_preds = batch_true_predict(sample_imgs)
# +
import random
dif_sample_imgs = []
for image in sample_imgs:
im = np.array(image)
x = random.randint(0, 27)
y = random.randint(0, 27)
if im[x][y] + 15 > 255:
im[x][y] -= 15
else:
im[x][y] += 15
dif_sample_imgs.append(torchvision.transforms.ToPILImage()(im))
# -
new_preds = batch_true_predict(dif_sample_imgs)
# +
from tqdm import tqdm
lime_local_pred = []
lime_local_exp = []
lime_score = []
explainer = lime_image.LimeImageExplainer(kernel_width=100)
for im in sample_imgs:
exp = explainer.explain_instance(image=np.array(im), classifier_fn=batch_predict)
lime_local_pred.append(exp.local_pred)
lime_local_exp.append(exp.local_exp)
lime_score.append(exp.score)
# -
lime_local_pred = np.stack(lime_local_pred)
top_lime_preds = []
for mas in lime_local_pred:
top_lime_preds.append(mas[0])
top_lime_preds
top_true_preds = []
for mas in true_preds:
top_true_preds.append(mas.max())
top_new_preds = []
for mas in new_preds:
top_new_preds.append(mas.max())
top_new_preds
top_true_preds
true_lime = []
for i in range(sample_size):
true_lime.append((top_true_preds[i], top_lime_preds[i]))
true_lime.sort(key=lambda x: -x[0])
true_lime
true_new = []
for i in range(sample_size):
true_new.append((top_true_preds[i], top_new_preds[i]))
true_new.sort(key=lambda x: -x[0])
true_new
# +
plt.rcParams['font.size'] = '20'
li = [true_new[i][1] for i in range(sample_size)]
tr = [true_new[i][0] for i in range(sample_size)]
fig = plt.figure(figsize=(10, 8))
plt.plot(tr, label='True prediction')
plt.plot(li, label='OpenBox')
plt.xlabel('Индекс объекта', fontsize=20)
plt.ylabel('Предсказание вероятности', fontsize=20)
plt.legend(loc=3)
fig.axes[0].set_rasterized(True)
plt.savefig('fig/True_OpenBox.png')
plt.show()
# +
plt.rcParams['font.size'] = '20'
li = [true_lime[i][1] for i in range(sample_size)]
tr = [true_lime[i][0] for i in range(sample_size)]
fig = plt.figure(figsize=(10, 8))
plt.plot(tr, label='True prediction')
plt.plot(li, label='LIME')
plt.xlabel('Индекс объекта', fontsize=20)
plt.ylabel('Предсказание вероятности', fontsize=20)
plt.legend(loc=3)
fig.axes[0].set_rasterized(True)
plt.savefig('fig/True_Lime.png')
plt.show()
# -
| code/Muradov_CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
my_list=[1,2,3]
my_list=['Ram',100.002]
len(my_list)
my_list=['Ram',100,89.999,'Hello',"Nails"]
my_list[0]
my_list[1]
my_list[1:-8:-2]
no_list=['1','2','3']
my_list + no_list
my_list
no_list
new_list=my_list + no_list
new_list
new_list[4]='Nobody'
new_list
new_list.sort
new_list.append('sixty')
new_list
new_list.pop()
new_list
new_list=['a','n','d','s','l']
new_listn=['1','7','0.0','0.7']
new_list.sort()
new_list
new_listn.sort()
new_listn
new_list.reverse()
new_list
| Exercise py/Lists.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visualizing Chipotle's Data
# This time we are going to pull data directly from the internet.
# Special thanks to: https://github.com/justmarkham for sharing the dataset and materials.
#
# ### Step 1. Import the necessary libraries
# +
import pandas as pd
import matplotlib.pyplot as plt
from collections import Counter
# set this so the graphs open internally
# %matplotlib inline
# -
# ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv).
# ### Step 3. Assign it to a variable called chipo.
chipo = pd.read_csv('https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv', '\t')
# ### Step 4. See the first 10 entries
chipo.head()
# ### Step 5. Create a histogram of the top 5 items bought
top_five = chipo.groupby('item_name').sum()['quantity'].sort_values(ascending=False)[:5]
top_five
# +
fig, ax = plt.subplots()
ax.bar(x=top_five.index, height=top_five)
ax.set_xlabel('Items')
ax.set_ylabel('Number of Times Ordered')
ax.set_title('Most ordered Chipotle\'s Items')
plt.xticks(rotation=45)
plt.tight_layout()
# -
# ### Step 6. Create a scatterplot with the number of items orderered per order price
# #### Hint: Price should be in the X-axis and Items ordered in the Y-axis
chipo['item_price'] = chipo['item_price'].str[1:].astype(float)
items_ordered = chipo.groupby('order_id').sum()
# +
fig, ax = plt.subplots()
ax.scatter(x=items_ordered['quantity'], y=items_ordered['item_price'], s=50)
ax.set_xlabel('Item Price')
ax.set_ylabel('Items Ordered')
| 07_Visualization/Chipotle/Exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:memlab]
# language: python
# name: conda-env-memlab-py
# ---
# Once installed, you need to load the `pytorch_memlab` IPython extensions:
# %load_ext pytorch_memlab
# One magic is provided, `mlrun` which can act either as a line magic `%mlrun`, or as a cell magic `%%mlrun`
# %%mlrun?
# First we need some torch code to profile:
# +
import torch
def x():
torch.nn.Linear(100, 100).cuda()
def y(gpu=0):
torch.nn.Linear(1000, 100).cuda(device=gpu)
# -
# We can profile multiple functions at the same type by repeatedly specifying `-f`
# +
# %%mlrun -f x -f y
x()
y()
# -
# You can alos profile with the `%mlrun` line magic
def z():
torch.nn.Linear(100, 100).cuda()
# %mlrun -f z z()
# You can specify which GPU you wish to profile using `-g`:
# +
# %%mlrun -f x -f y -g 1 y
x()
y(gpu=1)
# -
# You can get a handle on the `LineProfiler` object using `-r`
# profiler = %mlrun -q -r -f x x()
profiler.code_map
# You can dump stats out to a file using `-T`:
# %mlrun -q -T profile.log -f x x()
# !head profile.log
| demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### IWSLT German->English translation
#
# This notebook shows a simple example of how to use the transformer provided by this repo for one-direction translation.
#
# We will use the IWSLT 2016 De-En dataset.
# +
from torchtext import data, datasets
import spacy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
import sys
sys.path.append("..")
from model.transformers import BaseTransformer
from model.utils import device, Batch, BasicIterator
from model.opt import NoamOpt
import time
# -
# ##### The below does some basic data preprocessing and filtering, in addition to setting special tokens.
# + pycharm={"name": "#%%\n"}
de_data = spacy.load('de_core_news_sm')
en_data = spacy.load('en_core_web_sm')
def de_tokenizer(data):
raw_data = [x.text for x in de_data.tokenizer(data)]
return raw_data
def en_tokenizer(data):
raw_data = [x.text for x in en_data.tokenizer(data)]
return raw_data
BOS = "<s>"
EOS = "</s>"
BLANK = "<blank>"
de = data.Field(tokenize=de_tokenizer, pad_token=BLANK, init_token=BOS, eos_token=EOS)
en = data.Field(tokenize=en_tokenizer, pad_token=BLANK, init_token=BOS, eos_token=EOS)
MAX_LEN = 128
train, val, test = datasets.IWSLT.splits(
exts=(".de", ".en"), fields=(de, en),
filter_pred=lambda x: len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN
)
MIN_FREQ = 4
de.build_vocab(train.src, min_freq=MIN_FREQ)
en.build_vocab(train.trg, min_freq=MIN_FREQ)
# -
# ##### Torchtext required functions. batch_size_fn exists to make sure the batch size stays where it should be.
#
# ##### The BasicIterator class helps with dynamic batching, so batches are tightly grouped with minimal padding.
# +
global max_src_in_batch, max_tgt_in_batch
def batch_size_fn(new, count, sofar):
"Keep augmenting batch and calculate total number of tokens + padding."
global max_src_in_batch, max_tgt_in_batch
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(new.src))
max_tgt_in_batch = max(max_tgt_in_batch, len(new.trg) + 2)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
train_loader = BasicIterator(train, batch_size=1100, device=torch.device("cuda"),
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=True)
val_loader = BasicIterator(val, batch_size=1100, device=torch.device("cuda"),
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
test_loader = BasicIterator(test, batch_size=1100, device=torch.device("cuda"),
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
# -
# ##### Single step over entire dataset, with tons of gradient accumulation to get batch sizes big enough for stable training.
def train_step(dataloader):
i = 0
loss = 0
total_loss = 0
for batch in dataloader:
source = batch.src
target = batch.trg
# Only take a step every 11th batch to simulate bs of ~12k
if (i + 1) % 11 == 0:
optimizer.step()
optimizer.zero_grad()
loss, _ = transformer.forward_and_return_loss(criterion, source, target)
loss.backward()
total_loss += loss.item()
i += 1
return total_loss / i
# #### Creating the translation model:
#
# Subclassing the Transformer class allows us to implement a forward_and_return_loss_function and generation function, and requires nothing else before being fully functional.
#
# The Transformer class handles embedding and the transformer layers itself, including an output Linear layer.
#
# The goal of a basic translation model is to recreate the translation given the input (in a different language). We use crossentropy between the target and ground truth.
#
# We use the utils.Batch object to automatically create padding masks, in addition to dec-dec attn. masks.
class TranslationModel(BaseTransformer):
def __init__(
self, *args,
):
super(TranslationModel, self).__init__(*args)
def forward_and_return_loss(self, criterion, sources, targets):
"""
Pass input through transformer and return loss, handles masking automagically
Args:
criterion: torch.nn.functional loss function of choice
sources: source sequences, [seq_len, bs]
targets: full target sequence, [seq_len, bs, embedding_dim]
Returns:
loss, transformer output
"""
batch = Batch(sources, targets, self.pad_idx)
seq_len, batch_size = batch.trg.size()
out = self.forward(batch.src, batch.trg, batch.src_mask, batch.trg_mask)
loss = criterion(
out.contiguous().view(-1, out.size(-1)),
batch.trg_y.contiguous().view(-1),
ignore_index=self.pad_idx,
)
return loss, out
def generate(self, source, source_mask, max_len):
"""
Args:
source: input sequence indices, [seq_len, bs,
source_mask: the source mask to prevent attending to <pad> tokens
max_len: maximum length
Returns:
generated translations
"""
memory = self.encoder(source, source_mask)
ys = torch.ones(1, source.size(1)).long().fill_(self.sos_idx).to(device)
# max target length is 1.5x * source + 10 to save compute power
for _ in range(int(1.5 * source.size(0)) - 1 + 10):
out = self.decoder(ys, memory, source_mask, Batch(ys, ys, 1).raw_mask)
out = self.fc1(out[-1].unsqueeze(0))
prob = F.log_softmax(out, dim=-1)
next_word = torch.argmax(prob, dim=-1)
ys = torch.cat([ys, next_word.detach()], dim=0)
return ys
# ##### These hyperparameters were set for a GTX980. A bigger GPU, such as a P100 or similar, will be able to handle default transformer hyperparameters and bigger batch sizes.
# + pycharm={"name": "#%%\n"}
input_vocab_size = len(de.vocab)
output_vocab_size = len(en.vocab)
embedding_dim = 256
n_layers = 4
hidden_dim = 512
n_heads = 8
dropout_rate = .1
transformer = TranslationModel(input_vocab_size, output_vocab_size, embedding_dim,
n_layers,hidden_dim, n_heads, dropout_rate).to(device)
adamopt = torch.optim.Adam(transformer.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9)
optimizer = NoamOpt(embedding_dim, 1, 2000, adamopt)
criterion = F.cross_entropy
# optimization is unstable without this step
for p in transformer.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
# -
# ##### Runs 10 epochs of the entire training dataset.
# + pycharm={"name": "#%%\n"}
true_start = time.time()
for i in range(10):
transformer.train()
t = time.time()
loss = train_step(train_loader)
print("Epoch {}. Loss: {}, ".format((i+1), str(loss)[:5], int(time.time() - t)))
print("Total time (s): {}, Last epoch time (s): {}".format(int(time.time()- true_start), int(time.time() - t)))
# -
torch.save(transformer, "basic_translation.pt")
# ##### Finally, generations.
#
#
# The model by default uses greedy decoding for generation, and does not have incremental decoding. Currently, this leads to the transformer generating at about 1/2 the speed of Fairseq for short sequences.
#
# Implementing incremental decoding, however, makes the code for the attention function much harder to read, and has been left out for now.
# + pycharm={"name": "#%%\n"}
transformer.eval()
new_batch = next(iter(val_loader))
inp = new_batch.src
tra = new_batch.trg
out = transformer.generate(inp, Batch(inp, inp, 1).src_mask, 120)
for i in range(len(inp)):
print("Input sentence: ", end="")
for j in range(1, inp.size(0)):
char = de.vocab.itos[inp[j, i]]
if char == "</s>":
break
print(char, end =" ")
print("\nPredicted translation: ", end="")
for j in range(1, out.size(0)):
char = en.vocab.itos[out[j, i]]
if char == "</s>":
break
print(char, end =" ")
print("\nGround truth translation: ", end="")
for j in range(1, tra.size(0)):
char = en.vocab.itos[tra[j, i]]
if char == "</s>":
break
print(char, end =" ")
print("\n")
| EXAMPLES/IWSLT-De-En.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import copy
import numpy as np
import os
import cv2
from matplotlib import pyplot as plt
import mxnet as mx
from mxnet import gluon
from mxnet.gluon.data import dataset
from mxnet import image as T
def get_transform(transforms, is_training):
transforms_list = []
if is_training:
transforms_list += transforms
return T.SequentialAug(transforms_list)
def aug_fn():
center_crop(src, size, interp=2)
color_normalize(src, mean, std=None)
copyMakeBorder(src=None, top=_Null, bot=_Null, left=_Null, right=_Null, type=_Null, value=_Null, values=_Null, out=None, name=None, **kwargs)
fixed_crop(src, x0, y0, w, h, size=None, interp=2)
imresize(src, w, h, *args, **kwargs)
random_crop(src, size, interp=2)
random_size_crop(src, size, area, ratio, interp=2, **kwargs)
resize_short(src, size, interp=2)
scale_down(src_size, size)
pass
class MxGluonDataset(dataset.Dataset):
def __init__(self, images_dir, transforms=None):
# load all image files, sorting them to
# ensure that they are aligned
imgs = list(sorted(os.listdir(images_dir)))
self.imgs = [os.path.join(images_dir, i) for i in imgs]
self.transforms = transforms
def __getitem__(self, index):
path = self.imgs[index]
label = None
image = cv2.imread(path)
image = mx.nd.array(image[:,:,::-1])
if self.transforms is not None:
image = self.transforms(image)
image = T.image.imresize(image, 320, 320, interp=1)
return image#, label
def __len__(self):
return len(self.imgs)
trans_class = [
# T.BrightnessJitterAug(brightness=0.9),
# T.ContrastJitterAug(contrast=0.9),
# T.SaturationJitterAug(saturation=0.9),
T.ColorJitterAug(brightness=0.9, contrast=0.9, saturation=0.9),
T.HorizontalFlipAug(p=0.5),
T.HueJitterAug(hue=0.6),
T.RandomGrayAug(p=0.1),
T.ForceResizeAug((224,224), interp=2),
]
images_dir = "../data"
batch_size = 2
shuffle = False
num_workers = 0
dataset = MxGluonDataset(images_dir, get_transform(trans_class, is_training=True))
print("length of dataset: ", len(dataset))
dataLoader = gluon.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, last_batch ="keep")
def mxgluontensor2ndarray(image):
image = np.clip(image.asnumpy(),0,255)
return image.astype(np.uint8)
for idx, item in enumerate(dataLoader):
images = item
plt.figure()
for i in range(1,images.shape[0]+1):
plt.subplot(1,batch_size,i)
plt.imshow(mxgluontensor2ndarray(images[i-1]))
plt.xticks([])
plt.yticks([])
plt.show()
print("done!")
# -
| mximage/mximage_augmentor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Groupby
# __How Groupby Works__
# - Split a DataFrame into groups based on some criteria
# - Apply a function to each group independently
# - Combine the results into a DataFrame
import pandas as pd
olympics = pd.read_csv('olympics.csv', skiprows=4)
# __The Groupby object__
olympics.groupby('Edition')
type(olympics.groupby('Edition'))
# __Iterate through a Group__
for group_key, group_value in olympics.groupby('Edition'):
print(group_key)
print(group_value)
type(group_value)
# __Groupby computations__
# - `agg()` - multiple statistics in one calculation per group
# - `DataFrame.groupby(agg([...]))`
# - `DataFrame.groupby(agg({..:[...]}))`
olympics.groupby('Edition').size()
# __`agg([...])`__
olympics.groupby(['Edition','NOC','Medal']).agg('count')
olympics.groupby(['Edition','NOC','Medal']).size()
olympics.groupby(['Edition','NOC','Medal']).agg({'Edition':'count'})
olympics[olympics.Athlete == 'LEWIS, Carl'].groupby(['Athlete','Medal']).agg({'Edition':['max','min','count']})
# ## Challenge
# __Using groupby(), plot the total number of medals awarded at each of the Olympic games throughout history.__
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
olympics.groupby('Edition').agg({'Edition':'count'}).plot(kind='bar', figsize=(20,5));
medal = olympics.groupby(['Edition', 'Medal']).agg({'Edition':'count'})
medal.info()
plt.figure(figsize=(20,7))
sns.countplot(x='Edition', hue ='Medal', data=olympics);
# __Create a list showing the total number of medals won for each contry over the history of the Olympics. For each country, include the year of the first and most recent Olympics medal wins.__
olympics.groupby('NOC').agg({'Edition':['count','max','min']}).sort_index(ascending=False)
| Groupby.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Single Layer Neural Network
#
# - 딥러닝 알고리즘의 가장 기본이 되는 인공신경망(artificial neural network, ANN) 그 중에서도 single-layer neural network 모델을 구현해보자.
#
# #### 크게 세가지 방식
# - 1) Random Search
# - 2) h-step Search
# - 3) Gradient Descent
import numpy as np
# ## Generate Dataset
# - 0~1 사이에 균등분포를 가지는 100개의 샘플을 각각 x1, x2로 만든다.
# - 0.3 * x1 + 0.5 * x2 + 0.1의 공식을 가지는 y를 만든다.
# +
x1 = np.random.uniform(low=0.0, high=1.0, size=100)
print(x1.shape)
x1[:10]
# +
x2 = np.random.uniform(low=0.0, high=1.0, size=100)
print(x2.shape)
x2[:10]
# +
y = 0.3 * x1 + 0.5 * x2 + 0.1
print(y.shape)
y[:10]
# -
# ## First idea: Random Search
# - 랜덤으로 weight값(w1, w2)과 바이어스(bias) 값(b)을 만들어 최소한의 error를 만들어 내는 값을 여러번의 시도를 통해 찾아낸다.
# - **답(목표error값)을 찾을 때까지 w1, w2, b을 랜덤으로 찾기 때문에 1번만에 끝나거나 10000을 다돌려도 못찾을 수 있다.**
# +
num_epoch = 10000
best_error = np.inf
best_epoch = None
best_w1 = None
best_w2 = None
best_b = None
for epoch in range(num_epoch):
w1 = np.random.uniform(low=0.0, high=1.0)
w2 = np.random.uniform(low=0.0, high=1.0)
b = np.random.uniform(low=0.0, high=1.0)
y_predict = x1 * w1 + x2 * w2 + b
error = np.abs(y_predict - y).mean()
if error < best_error:
best_error = error
best_epoch = epoch
best_w1 = w1
best_w2 = w2
best_b = b
print("{0:4} w1 = {1:.5f}, w2 = {2:.5f}, b = {3:.5f}, error = {4:.5f}".format(best_epoch, best_w1, best_w2, best_b, best_error))
print("----" * 15)
print("{0:4} w1 = {1:.5f}, w2 = {2:.5f}, b = {3:.5f}, error = {4:.5f}".format(best_epoch, best_w1, best_w2, best_b, best_error))
# -
# ## Second idea: H-step Search
# - w1, w2, b 값을 한번 랜덤으로 만든다.
# - 첫번째 error 값을 구한후, 정해진 스텝수(h)만큼 w1을 +,-하며 error가 줄어드는 방향으로 업데이트 한다.
# - w2와 b도 동일한 방법으로 첫번째 error와 비교하여 error가 줄어드는 방향으로 업데이트 한다.
# - **스텝수(h)가 적절하지 않으면 여러번 돌려도 error를 줄일 수 없다.**
# - **Random Search와 마찬가지로 처음생성되는 w1, w2, b값과 스텝수의 따라 1번만에 목표 error값에 도달하는 값을 찾거나 10000번이 지나도 못찾을 수도 있다.**
# +
num_epoch = 10000
w1 = np.random.uniform(low=0.0, high=1.0)
w2 = np.random.uniform(low=0.0, high=1.0)
b = np.random.uniform(low=0.0, high=1.0)
h = 0.01
for epoch in range(num_epoch):
y_predict = x1 * w1 + x2 * w2 + b
current_error = np.abs(y_predict - y).mean()
if current_error < 0.005:
break
y_predict = x1 * (w1 + h) + x2 * w2 + b
h_plus_error = np.abs(y_predict - y).mean()
if h_plus_error < current_error:
w1 = w1 + h
else:
y_predict = x1 * (w1 - h) + x2 * w2 + b
h_minus_error = np.abs(y_predict - y).mean()
if h_minus_error < current_error:
w1 = w1 - h
y_predict = x1 * w1 + x2 * (w2 + h) + b
h_plus_error = np.abs(y_predict - y).mean()
if h_plus_error < current_error:
w2 = w2 + h
else:
y_predict = x1 * w1 + x2 * (w2 - h) + b
h_minus_error = np.abs(y_predict - y).mean()
if h_minus_error < current_error:
w2 = w2 - h
y_predict = x1 * w1 + x2 * w2 + (b + h)
h_plus_error = np.abs(y_predict - y).mean()
if h_plus_error < current_error:
b = b + h
else:
y_predict = x1 * w1 + x2 * w2 + (b - h)
h_minus_error = np.abs(y_predict - y).mean()
if h_minus_error < current_error:
b = b - h
print("{0} w1 = {1:.5f}, w2 = {2:.5f}, b = {3:.5f}, error = {4:.5f}".format(epoch, w1, w2, b, current_error))
# -
# ## Third Idea - Gradient Descent
# - **dError/dWeight한 값으로 weight를 업데이트 해주는 방법이다.** (에러를 각각 업데이트할 weight로 편미분 하면 w의 업데이트 방향설정이 가능하다.)
# - learning rate는 편미분한 값의 방향을 정한후 학습할 크기를 말한다. (적절한 값이 필요하다.)
# - 경사감소법은 기울기를 구해 error를 줄이는 방향을 설정하기 때문에 정답에 근사하게 접근할 수 있다.(위 두방법보다 업데이트의 방향성이 보장된다.)
# +
num_epoch = 100
learning_rate = 1.2
w1 = np.random.uniform(low=0.0, high=1.0)
w2 = np.random.uniform(low=0.0, high=1.0)
b = np.random.uniform(low=0.0, high=1.0)
for epoch in range(num_epoch):
y_predict = x1 * w1 + x2 * w2 + b
error = np.abs(y_predict - y).mean()
if error < 0.001:
break
w1 = w1 - learning_rate * ((y_predict - y) * x1).mean()
w2 = w2 - learning_rate * ((y_predict - y) * x2).mean()
b = b - learning_rate * (y_predict - y).mean()
if epoch % 10 == 0:
print("{0:2} w1 = {1:.5f}, w2 = {2:.5f}, b = {3:.5f}, error = {4:.5f}".format(epoch, w1, w2, b, error))
print("----" * 15)
print("{0:2} w1 = {1:.5f}, w2 = {2:.5f}, b = {3:.5f}, error = {4:.5f}".format(epoch, w1, w2, b, error))
# -
| example/01_Single_Layer_Neural_Network_part1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
# ### Reading the text data
text_data_sentence = pd.read_csv('textdatanew.csv', encoding='ISO-8859-1')
text_data_sentence.head()
# ### Reading the Response file
bess_tags = pd.read_csv('CBW_Bess_tags_final2.csv')
bess_tags.head()
# +
#bess_tags.head()
# -
bess_reponse = bess_tags.loc[:,['Content','Event','Type','para no','biographyID','collectionID']]
bess_reponse= bess_reponse.fillna(' ')
bess_reponse.loc[:,'Response'] = bess_reponse.loc[:,['Content','Event']].apply(lambda x: '_'.join(x),axis = 1)
top_10_persona_desc = bess_reponse[bess_reponse.Type.isin(['personaDescription'])].Response.value_counts()[0:10]
top_10_persona_desc
bess_reponse.shape
bess_reponse.loc[:,['para no','biographyID','collectionID','Response']].head()
bess_response_df = bess_reponse.loc[:,['para no','biographyID','collectionID','Response','Type']].drop_duplicates()
# +
#bess_response_df.groupby(['para no','biographyID','collectionID','Type'],as_index = False)['Response'].apply(lambda x: ' '.join(x))
# -
final_response_file = bess_response_df[bess_response_df.Response.isin(top_10_persona_desc.index)]
# +
#final_response_file.melt()
# -
final_response_file.head()
final_response_file.loc[:,'values'] = 1
final_response_wide = (final_response_file.pivot_table(index=['para no','biographyID','collectionID','Type'],\
columns='Response', values='values').reset_index())
final_response_wide = final_response_wide.fillna(0)
final_response_wide.head()
text_data_sentence_final = pd.merge(text_data_sentence,final_response_wide, how = 'left', left_on = ['CollectionID','BiographyID','ParagraphNo'],\
right_on = ['collectionID','biographyID','para no'])
text_data_sentence_final = text_data_sentence_final[~text_data_sentence_final['para no'].isna()]
text_data_sentence_final_response = text_data_sentence_final[text_data_sentence_final.columns[-10:]]
final_response_file.Response.value_counts()
# #### Bag of Words
stop_words = stopwords.words('english')
bow_model = CountVectorizer(ngram_range= (1,2),stop_words=stop_words)
Para_text_bow = bow_model.fit_transform(text_data_sentence_final.ParagraphText)
# #### Tf-IDF
tf_idf_model = TfidfVectorizer(ngram_range=(1,2),stop_words = stop_words)
Para_text_tfidf = tf_idf_model.fit_transform(text_data_sentence_final.ParagraphText)
# ### Splitting data into train and test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(Para_text_tfidf,text_data_sentence_final_response,
test_size = 0.3, random_state = 0)
y_test.shape
# ### Machine Learning Models
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
#knn_model = KNeighborsClassifier(n_neighbors= 3, p = 1.5)
rf_model = RandomForestClassifier()
lr_model = LogisticRegression(multi_class='multinomial')
# +
#y_train.melt()[y_train.melt().value.isin([1])].variable.value_counts()
# -
rf_model.fit(X_train,y_train)
# +
feature_importances = pd.DataFrame(rf_model.feature_importances_,
index = bow_model.get_feature_names(),
columns=['importance']).sort_values('importance',ascending=False)
feature_importances
# -
np.array(rf_model.predict_proba(X_test)).shape
[np.argmax([int(each[1]>0.4) for each in np.array(rf_model.predict_proba(X_test))[:,each_example,:]]) if \
sum([int(each[1]>0.4) for each in np.array(rf_model.predict_proba(X_test))[:,each_example,:]]) > 0 else 10
for each_example in range(100)]
result = [np.argmax(each) if each.sum() != 0 else 10 for each in rf_model.predict_proba(X_test)]
pd.DataFrame(result)[0].value_counts()
y_test.melt()[y_test.melt().value.isin([1])].variable.value_counts()
| Paragraph Annotation/Paragraph Annotation_ PersonaDesc- Machine Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# + _uuid="bb3d32cf67f12406353cb3550577cd3685e77fe4"
df = pd.read_csv('../input/test.csv', index_col='id', skiprows=range(1, 400))
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
# %load_ext Cython
# + _uuid="4a1de9c9fc845912e606ab051d1a9368230412e5" language="cython"
#
# cimport cython
# import numpy as np
#
# @cython.cdivision(True)
# @cython.boundscheck(False)
# @cython.nonecheck(False)
# @cython.wraparound(False)
# cdef int calc_neighs(unsigned char[:, :] field, int i, int j, int n):
# cdef:
# int neighs = 0;
# int k, row_idx, col_idx;
# neighs = 0
# if i - 1 >= 0 and j - 1 >= 0 and field[i - 1, j - 1]:
# neighs += 1
# if i - 1 >= 0 and field[i - 1, j]:
# neighs += 1
# if i - 1 >= 0 and j + 1 < n and field[i - 1, j + 1]:
# neighs += 1
# if j - 1 >= 0 and field[i, j - 1]:
# neighs += 1
# if j + 1 < n and field[i, j + 1]:
# neighs += 1
# if i + 1 < n and j - 1 >= 0 and field[i + 1, j - 1]:
# neighs += 1
# if i + 1 < n and field[i + 1, j]:
# neighs += 1
# if i + 1 < n and j + 1 < n and field[i + 1, j + 1]:
# neighs += 1
# return neighs
#
# @cython.cdivision(True)
# @cython.boundscheck(False)
# @cython.nonecheck(False)
# @cython.wraparound(False)
# cpdef make_move(unsigned char[:, :] field, int moves):
# cdef:
# int _, i, j, neighs;
# int n;
# int switch = 0;
# unsigned char[:, :] cur_field;
# unsigned char[:, :] next_field;
# cur_field = np.copy(field)
# next_field = np.zeros_like(field, 'uint8')
# n = len(field)
# for _ in range(moves):
# if switch == 0:
# for i in range(n):
# for j in range(n):
# neighs = calc_neighs(cur_field, i, j, n)
# if cur_field[i, j] and neighs == 2:
# next_field[i, j] = 1
# elif neighs == 3:
# next_field[i, j] = 1
# else:
# next_field[i, j] = 0
# else:
# for i in range(n):
# for j in range(n):
# neighs = calc_neighs(next_field, i, j, n)
# if next_field[i, j] and neighs == 2:
# cur_field[i, j] = 1
# elif neighs == 3:
# cur_field[i, j] = 1
# else:
# cur_field[i, j] = 0
# switch = (switch + 1) % 2
# return np.array(next_field if switch else cur_field)
# + _uuid="9083e6768ae28d3a8b0bb9d363d4773b1f3862e6"
import numpy as np
import multiprocessing as mp
from functools import partial
def parallel_fitness(gene, Y, delta):
candidate = make_move(gene, moves=delta)
return (candidate == Y).sum() / 400
class GeneticSolver:
def __init__(self, population_size=800, n_generations=2000, retain_best=0.8, retain_random=0.05, mutate_chance=0.05,
verbosity=0, verbosity_step=20, random_state=-1, warm_start=False, early_stopping=True, patience=20,
initialization_strategy='uniform', fitness_parallel=False):
"""
:param population_size: number of individual candidate solutions
:param n_generations: number of generations
:param retain_best: percentage of best candidates to select into the next generation
:param retain_random: probability of selecting sub-optimal candidate into the next generation
:param mutate_chance: candidate mutation chance
:param verbosity: level of verbosity (0 - quiet, 1 - evolution information, 2 - spamming like in 2003)
:param random_state: if specified, initializes seed with this value
:param warm_start: if True, initial population generation step is omitted, allowing for continuing training
:param early_stopping: if True, evolution will stop if top-10 candidates are not changing for several generations
:param patience: number of generations to wait for best solution change when <early_stopping>
:param initialization_strategy: initial population generation rule: 'uniform' or 'covering'
"""
self.population_size = population_size
self.n_generations = n_generations
self.retain_best = retain_best
self.retain_random = retain_random
self.mutate_chance = mutate_chance
self.verbosity = verbosity
self.verbosity_step = verbosity_step
self.random_state = random_state
self.warm_start = warm_start
self.early_stopping = early_stopping
self.patience = patience
self.initialization_strategy = initialization_strategy
self.fitness_parallel = fitness_parallel
if fitness_parallel:
self.pool = mp.Pool(mp.cpu_count())
else:
self.pool = None
self._population = None
if random_state != -1:
np.random.seed(random_state)
def solve(self, Y, delta, n_generations=-1):
"""
:param Y: 20x20 array that represents field in stopping condition
:param delta: number of steps to revert
:param n_generations: number of evolution generations. Overrides initialization value if specified
:return: 20x20 array that represents the best start field found and associated fitness value
"""
if not (self._population and self.warm_start):
self._population = self._generate_population()
if n_generations != -1:
self.n_generations = n_generations
scores = np.zeros(len(self._population))
prev_scores = np.zeros(len(self._population))
cnt_no_change_in_scores = 0
for generation in range(self.n_generations):
self._population, scores = self.evolve(Y, delta)
if np.isclose(prev_scores[:10], scores[:10]).all():
cnt_no_change_in_scores += 1
else:
cnt_no_change_in_scores = 0
prev_scores = scores
if self.verbosity and generation % self.verbosity_step == 0:
if generation == 0:
print(f"Generation #: best score")
else:
print(f"Generation {generation}: {scores[0]}")
if np.isclose(scores[:10], 1).any() or (self.early_stopping and cnt_no_change_in_scores >= self.patience):
if self.verbosity:
print(f"Early stopping on generation {generation} with best score {scores[0]}")
break
return self._population[0], scores[0]
def _generate_population(self):
"""
Generating initial population of individual solutions
Regardless of strategy, we make 5 initial "warming" steps to make distribution closer to the problem.
Strategies description:
* Uniform: each cell has equal probability of being initialized as alive or dead. This will introduce no
prior information at all
* Covering: Each individual is generated with it's own probability of having each cell 'alive'. This gives
on average higher initial fitness score, but has no observed effect on long-term behavior
:return: initial population as a list of 20x20 arrays
"""
if self.initialization_strategy == 'uniform':
initial_states = np.split(np.random.binomial(1, 0.5, (20 * self.population_size, 20)).astype('uint8'), self.population_size)
return [make_move(state, 5) for state in initial_states]
elif self.initialization_strategy == 'covering':
""" Idea is to cover all the range of possible values for 'density' parameter """
alive_probabilities = np.linspace(0.01, 0.99, self.population_size)
return [make_move(np.random.binomial(1, prob, size=(20, 20)), moves=5) for prob in alive_probabilities]
else:
raise NotImplementedError(f"{self.initialization_strategy} is not implemented!")
def evolve(self, Y, delta):
"""
Evolution step
:param Y: 20x20 array that represents field in stopping condition
:param delta: number of steps to revert
:return: new generation of the same size along with scores of the best retained individuals
"""
if self.fitness_parallel:
scores = np.array(self.parallel_score_population(self._population, Y, delta))
else:
scores = np.array(self.score_population(self._population, Y, delta))
retain_len = int(len(scores) * self.retain_best)
sorted_indices = np.argsort(scores)[::-1]
self._population = [self._population[idx] for idx in sorted_indices]
best_scores = scores[sorted_indices][:retain_len]
if self.verbosity > 1:
print("best scores:", best_scores)
parents = self._population[:retain_len]
leftovers = self._population[retain_len:]
cnt_degenerate = 0
for gene in leftovers:
if np.random.rand() < self.retain_random:
cnt_degenerate += 1
parents.append(gene)
if self.verbosity > 1:
print(f"# of degenerates left: {cnt_degenerate}")
cnt_mutations = 0
for gene in parents[1:]: # mutate everyone expecting for the best candidate
if np.random.rand() < self.mutate_chance:
self.mutate(gene)
cnt_mutations += 1
if self.verbosity > 1:
print(f"# of mutations: {cnt_mutations}")
places_left = self.population_size - retain_len
children = []
while len(children) < places_left:
mom_idx, dad_idx = np.random.randint(0, retain_len - 1, 2)
if mom_idx != dad_idx:
child1, child2 = self.crossover(parents[mom_idx], parents[dad_idx])
children.append(child1)
if len(children) < places_left:
children.append(child2)
if self.verbosity > 1:
print(f"# of children: {len(children)}")
parents.extend(children)
return parents, best_scores
@classmethod
def crossover(cls, mom, dad):
"""
Take two parents, return two children, interchanging half of the allels of each parent randomly
"""
# select_mask = np.random.randint(0, 2, size=(20, 20), dtype='bool')
select_mask = np.random.binomial(1, 0.5, size=(20, 20)).astype('bool')
child1, child2 = np.copy(mom), np.copy(dad)
child1[select_mask] = dad[select_mask]
child2[select_mask] = mom[select_mask]
return child1, child2
@classmethod
def mutate(cls, field):
"""
Inplace mutation of the provided field
"""
a = np.random.binomial(1, 0.1, size=(20, 20)).astype('bool')
field[a] += 1
field[a] %= 2
return field
@classmethod
def fitness(cls, start_field, end_field, delta):
"""
Calculate fitness for particular candidate (start configuration of the field)
:param start_field: candidate (start configuration)
:param end_field: target (stop configuration)
:param delta: number of steps to proceed before comparing to stop configuration
:return: value in range [0, 1] that indicates fractions of cells that match their state
"""
candidate = make_move(start_field, moves=delta)
return (candidate == end_field).sum() / 400
@classmethod
def score_population(cls, population, Y, delta):
"""
Apply fitness function for each gene in a population
:param population: list of candidate solutions
:param Y: 20x20 array that represents field in stopping condition
:param delta: number of steps to revert
:return: list of scores for each solution
"""
return [cls.fitness(gene, Y, delta) for gene in population]
def parallel_score_population(self, population, Y, delta):
"""
Apply fitness function for each gene in a population in parallel
:param population: list of candidate solutions
:param Y: 20x20 array that represents field in stopping condition
:param delta: number of steps to revert
:return: list of scores for each solution
"""
return self.pool.map(partial(parallel_fitness, Y=Y, delta=delta), population)
# + _uuid="f8f477caa8c253e7ee38e14d2223992d08952b58"
import multiprocessing as mp
import scipy
def work(solver, Y, delta):
# this is required for every worker to have different initial seed. Otherwise they inherit it from this thread
scipy.random.seed()
return solver.solve(Y, delta)
class MPGeneticSolver:
def __init__(self, n_proc='auto', *args, **kwargs):
"""
Multi-process version of Genetic Solver with different initial conditions
:param n_proc: number of processes to create
:param args: GeneticSolver arguments (see its documentation for more)
:param kwargs: GeneticSolver key-value arguments
"""
if n_proc == 'auto':
n_proc = mp.cpu_count()
self.n_proc = n_proc
self.pool = mp.Pool(mp.cpu_count() if n_proc == 'auto' else n_proc)
self.args = args
self.kwargs = kwargs
self._solvers = None
if 'fitness_parallel' in self.args or ('fitness_parallel' in self.kwargs and self.kwargs['fitness_parallel']):
raise ValueError("Fitness function cannot be parallelized in MPGeneticSolver")
def solve(self, Y, delta, return_all=True):
"""
Solve RGoL problem
:param Y: 20x20 array that represents field in stopping condition
:param delta: number of steps to revert
:param return_all: if True, returns all of the results from different runners, as well as their scores.
If False only solution associated with the best score is returned
:return: either list of (solution, score) pairs or the best solution (see `return_all`)
"""
self._solvers = [GeneticSolver(*self.args, **self.kwargs) for _ in range(self.n_proc)]
tasks = [(solver, Y, delta) for solver in self._solvers]
results = self.pool.starmap(work, tasks)
return results if return_all else self.select_best(results)
@classmethod
def select_best(cls, solutions):
"""
Using output of solve method, select the best solution
:param solutions: list of (solution, score) pairs
:return: 20x20 array that represents the solution (starting board condition)
"""
return sorted(solutions, key=lambda x:x[1], reverse=True)[0]
# + _uuid="013227f34f237a3ccb50f0c744600a8e1553df14"
class SolutionRunner:
def __init__(self, save_fname='solution.csv', verbosity=0):
self.save_fname = save_fname
self.verbosity = verbosity
self.log = []
self.running_avg = 0
self.n = 0
def solve_df(self, df, first_n=None, save_to=None):
solver = MPGeneticSolver(early_stopping=False)
solution_df = pd.DataFrame([], columns=['id', 'score'] + ['start.'+ str(_) for _ in range(1, 401)], dtype=int)
for col in solution_df.columns:
solution_df[col] = solution_df[col].astype(np.int32)
self.running_avg = 0
self.n = 0
self.log = []
best, worst = None, None
for i, (id, (idx, row)) in enumerate(zip(df.index, df.iterrows())):
delta, Y = row.values[0], row.values[1:].reshape((20, 20)).astype('uint8')
solution = solver.solve(Y, delta, return_all=False)
board, score = solution
flat_board = np.insert(board.ravel(), 0, id)
flat_board = np.insert(flat_board, 1, int(score * 100))
solution_df = solution_df.append(pd.Series(flat_board, index=solution_df.columns), ignore_index=True)
self.log.append((idx, score))
if best is None or best[1] < score:
best = (idx, score)
if worst is None or worst[1] > score:
worst = (idx, score)
self.n += 1
self.running_avg = (self.running_avg * (self.n - 1) + score) / self.n
if self.verbosity:
print(f"{idx} is solved with score {score}. Average score: {self.running_avg}")
if first_n and i >= first_n:
break
if self.verbosity:
print("Best score:", best)
print("Worst score:", worst)
if save_to is not None:
solution_df.to_csv(save_to, index=False)
else:
solution_df.to_csv(self.save_fname, index=False)
return solution_df
# + _uuid="a4b6809d82936d83b76bcd0e80c527b03f23a2f1"
sr = SolutionRunner(verbosity=1)
# + _uuid="cbb7fd30a861ce26ab8625da591825798a9e616e"
solution = sr.solve_df(df, 300, 's401-700.csv')
# + _uuid="5356e2efdd6c1ce37f01829b36aa456e7d39b765"
| ga_exploration/genetic-algorithm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ### Task 25: 1000-digit Fibonacci number
# As Python cannot optimize tailrec, we'll implement basic fibonacci algorithm with a list:
# +
import time
start = time.time()
def fibonacci(n):
fibs = [1, 1]
for j in range(2, n):
fibs.append(fibs[j-1] + fibs[j-2])
return max(fibs)
i = 12
while True:
a = fibonacci(i)
if len(str(a)) == 1000:
print(i)
break
i += 1
print("Solution took", time.time() - start, "seconds")
# -
# Complexity of this algorithm is O(N^2) and can be improved by the following algorithm. that I used in Task 2:
# +
start = time.time()
def generate_fibonacci(N):
basis_a = 1
basis_b = 1
basis_c = 1
basis_d = 0
rc = 0
rd = 1
while N != 0:
if N & 1:
tc = rc
rc = rc * basis_a + rd * basis_c
rd = tc * basis_b + rd * basis_d
ta = basis_a
tb = basis_b
tc = basis_c
basis_a = basis_a * basis_a + basis_b * basis_c
basis_b = ta * basis_b + basis_b * basis_d
basis_c = basis_c * ta + basis_d * basis_c
basis_d = tc * tb + basis_d * basis_d
N >>= 1
return rc
for i in range(0, 100000000):
fib = generate_fibonacci(i)
if len(str(fib)) == 1000:
print(i)
break
print("Solution took", time.time() - start, "seconds")
# -
# This algorithm is much better and takes just O(N*log(N)) complexity
| python/Problem_25.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Viral Data Analysis
#
# Date: 2020.10.26
#
# Author: © 2020 <NAME>
#
# This notebook is part of an individual project for [KTU IF](https://fi.ktu.edu/) [P176M010 Advanced Machine Learning](https://admissions.ktu.edu/programme/m-software-engineering/#P176M010) class led by [<NAME>](http://mantas.info/).
#
# Here we analyze open COVID-19 data from https://ourworldindata.org/ to answer the following questions:
#
# 1. [Are more tests performed in countries with higher GDP per capita?](#Q1)
# 1. [Does higher diabetes prevalence implies that the country has higher mortality rate among infected people?](#Q2)
# 1. [Lockdown in Czech Republic](#Q3)
#
# Additional data from:
# * https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19
# * https://onemocneni-aktualne.mzcr.cz/covid-19/prehled-hospitalizaci
#
#
# This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International license</a>. <a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons licencija" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a>
# ## Load the data
import numpy as np
import numpy.ma as ma
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
import scipy
# Download the data to cache locally. Do this once (a day), and comment out, not to DoS the server. (adopted from https://stackoverflow.com/a/35997720).
import requests
fname = 'owid-covid-data.csv'
url = 'https://covid.ourworldindata.org/data/' + fname
r = requests.get(url)
open(fname, 'wb').write(r.content)
# Load the file to Pandas.
# url = "https://covid.ourworldindata.org/data/owid-covid-data.csv"
# download the file locally when developing, not to DoS the server:
url = "owid-covid-data.csv"
df = pd.read_csv(url, parse_dates=[3])
df.date.max()
# Show the information about columns in the dataframe. The columns are explained [here](https://github.com/owid/covid-19-data/blob/master/public/data/owid-covid-codebook.csv).
df.info()
# ## Data quality report
df.describe().T
# Group the dataframe by country code, which will come in handy later.
df_grouped = df.groupby('iso_code').last()
# <a id='Q1'></a>
# ## 1. Are more tests performed in countries with higher GDP per capita?
# The goal of this part is to analyze if countries with higher GDP are testing their citizen more.
# ### Least Squares Solution
# I will interpolate the data by means of Least Squares Fit method. The mathematial problem I am solving is the following:
# <center>$\min_{x \in \mathbb{R^n}}||Ax - y||$</center>
# +
# Filter the dataframe from NaN values in the desired columns
df1 = df_grouped.dropna(subset=['gdp_per_capita', 'total_tests_per_thousand'])
# Obtain the desired features from dataset
gdp = df1.gdp_per_capita
tests = df1.total_tests_per_thousand
# Plot the features
plt.figure(figsize=(14,8))
plt.scatter(gdp, tests)
plt.xlabel('GDP per capita')
plt.ylabel('Total number of tests per 1000 people')
# Least Squares Fit
# Add column of ones to create matrix A
A = np.vstack([gdp, np.ones(len(gdp))]).T
y = tests
w, b = np.linalg.lstsq(A, y, rcond=None)[0]
# Two points are enough to make a line
x_fit = np.linspace(gdp.min(), gdp.max(), 2)
y_fit = b + w * x_fit
plt.plot(x_fit, y_fit, color='r')
plt.title('Relation between GDP per capita and tests per thousand people')
plt.show()
# -
# ### Correlation
# We can see if and how are those features correlated. Correlation is calculated as in the formula below.
# 
gdp.corr(tests)
# We can see that the correlation between GDP per capita and tests is a relatively high positive number already in a linear scale, which means that richer countries are indeed testing people more.
# <a id='Q2'></a>
# ## 2. Does higher diabetes prevalence implies that the country has higher mortality rate among infected people?
# +
df2 = df_grouped.dropna(subset=['diabetes_prevalence', 'total_cases', 'total_deaths'])
diabetes = df2.diabetes_prevalence
cases = df2.total_cases
deaths = df2.total_deaths
death_rate = (deaths/cases)
# Plot the features
plt.figure(figsize=(14,8))
plt.scatter(diabetes, death_rate, color='navy')
plt.xlabel('Diabetes prevalence')
plt.ylabel('Death rate on {}'.format(df['date'].dt.date.max()))
# Least Squares Fit
# Add column of ones to create matrix A
A = np.vstack([diabetes, np.ones(len(diabetes))]).T
y = death_rate
w, b = np.linalg.lstsq(A, y, rcond=None)[0]
# Two points are enough to make a line
x_fit = np.linspace(diabetes.min(), diabetes.max(), 2)
y_fit = b + w * x_fit
plt.plot(x_fit, y_fit, color='r', label='Least squares fit')
# # Because this is quite similar to the first question I will use RANSAC
# # algorithm for line fitting, as this approach is quite outlier-proof
from sklearn import linear_model
ransac = linear_model.RANSACRegressor()
ransac.fit(diabetes[:, np.newaxis], death_rate)
x = np.arange(diabetes.min(), diabetes.max())[:, np.newaxis]
y = ransac.predict(x)
plt.plot(x, y, color='g', label='RANSAC fit')
plt.title('Relation between diabetes prevalence and mortality rate')
plt.legend()
plt.show()
# -
death_rate.corr(diabetes)
# Ok, doesn't really look like it is somehow correlated. Let's try log log scale, maybe it will be better..
plt.figure(figsize=(14,8))
plt.loglog(diabetes, death_rate, '.', color='navy')
plt.xlabel('Diabetes prevalence')
plt.ylabel('Death rate on {}'.format(df['date'].dt.date.max()))
w1, w0 = ma.polyfit(np.log(diabetes), np.log(death_rate), 1)
x_all = np.linspace(np.log(diabetes.min()), np.log(diabetes.max()), 100)
plt.plot(np.exp(x_all), np.exp(w0 + w1 * x_all), '-', color='yellow')
plt.title('Relation between diabetes prevalence and mortality rate in loglog scale')
plt.show()
np.log(death_rate).corr(np.log(diabetes))
# Okay, it is not. Specialists say that coronavirus is more dangerous for people with diabetes, but we cannot really see it in the data. The reason can be that we have just diabetes prevalence among all citizens, not among the infected.
# <a id='Q3'></a>
# ## 3. Lockdown in Czech Republic
# In the meantime before I started working on this mini-project, the second lockdown in the Czech Republic already came, it was declared on 22nd October 2020. I want to analyze, if it is early enough to save our health system from collapsing and early enough to have enough artificial ventilators and other means that people in serious condition need.
# ### Overall statistics
# +
cz = df.loc[df['iso_code'] == 'CZE']
plt.figure(figsize=(14,10))
x = cz['date']
cases = cz['new_cases']
tests = cz['new_tests']
deaths = cz['new_deaths']
cases_smoothed = cz['new_cases_smoothed']
tests_smoothed = cz['new_tests_smoothed']
deaths_smoothed = cz['new_deaths_smoothed']
plt.plot(x, cases, label='New daily cases', color='red', alpha=0.3)
plt.plot(x, tests, label='New daily tests', color='blue', alpha=0.3)
plt.plot(x, deaths, label='New daily deaths', color='black', alpha=0.3)
plt.plot(x, cases_smoothed, label='New daily cases (7 days moving average)', color='red')
plt.plot(x, tests_smoothed, label='New daily tests (7 days moving average)', color='blue')
plt.plot(x, deaths_smoothed, label='New daily deaths (7 days moving average)', color='black')
lockdown = dt.datetime(2020, 3, 16)
plt.axvline(lockdown, color='yellow', ymin=0.02, ymax=0.72, label='First Lockdown ({})'.format(lockdown.date()), linestyle='--')
plt.ylim(0, 30000)
plt.legend(loc='upper left')
plt.title('Daily statistics')
plt.show()
# -
# We can see that the numbers of new cases started to grow exponentially, so the lockdown was inevitable.
#
# ### Growing positive rate
# +
fig, ax1 = plt.subplots(figsize=(14,10))
x = cz.date
positive_rate = cz.new_cases_smoothed/cz.new_tests_smoothed
ax1.plot(x, positive_rate, label='Positive rate', color='red')
ax1.set_ylabel('positive rate', color='red')
ax1.tick_params(axis='y', labelcolor='red')
ax1.legend(loc='upper left')
ax2 = ax1.twinx()
ax2.plot(x, tests_smoothed, label='New daily tests (7 days moving average)', color='darkblue')
ax2.plot(x, cases_smoothed, label='New daily cases (7 days moving average)', color='cornflowerblue')
ax2.set_ylabel('Number of infected/tested people', color='darkblue')
ax2.tick_params(axis='y', labelcolor='cornflowerblue')
ax2.legend(loc='upper right')
plt.title('Positive rate')
plt.show()
# -
# Also not only the absolute number of positive people is growing but also the ratio of positive tests is higher and higher, theses around 28% of tested people are positive.
# ### Capacities for people in serious conditions
# I used additional data from Czech ministry of health (https://onemocneni-aktualne.mzcr.cz/covid-19). I had to combine several dataframes to get information needed. The dataframe in cell below contains information about hospitalized people and people being in serious condition on a given date.
hosp = pd.read_csv('hospitalizace.csv')
hosp['date'] = pd.to_datetime(hosp['date'], format='%d.%m.%Y')
hosp['hospitalized'] = pd.to_numeric(hosp['hospitalized'])
hosp = hosp.iloc[::-1]
hosp.info()
# This dataframe contains information about capacities of our health system. There are columns like overall count of ventilator, beds on intensive care units, capacities of doctors and nurses and we have also percentages of those resources currently available.
cap_df = pd.read_csv('kapacity-nemocnic.csv')
cap_df.info()
# Some data cleaning here. I had to transform the dates to datetime format and change format and type of the numerical values, because they were considered objects (because of the comma after thousand units).
# +
from pandas.api.types import is_string_dtype
for column in cap_df:
col = cap_df[column]
if column == 'Datum':
pd.to_datetime(cap_df[column], format='%m/%d/%Y')
continue
if is_string_dtype(col):
if "%" in col.iloc[0]:
continue
else:
y = col.tolist()
y = [s.replace(",","") for s in y]
cap_df[column] = y
cap_df[column] = pd.to_numeric(cap_df[column])
cap_df['Datum'] = pd.to_datetime(cap_df['Datum'], format='%m/%d/%Y')
cap_df = cap_df.sort_values(by='Datum')
# -
# ### Capacities related to hospilazited people
fig, ax1 = plt.subplots(figsize=(14,10))
x = hosp.date
lockdown = dt.datetime(2020, 3, 16)
capacity = cap_df['UPV Celkem kapacita'] + cap_df['Vent. sál (celkem)'] + cap_df['ECMO Celkem kapacita'] + cap_df[' Ventilátory přenosné (celkem)']
ax1.plot(x, hosp.hospitalized, label='Actual number of hospitalized people', color='orange')
ax1.plot(x, hosp.serious_condition, label='Actual number of people in serious condition (JIP, UPV, ECMO)', color='red')
ax1.plot(cap_df['Datum'], capacity, label='Overall number of spots for people in serious condition')
ax1.axvline(lockdown, color='yellow', ymin=0.02, ymax=0.72, label='First Lockdown ({})'.format(lockdown.date()), linestyle='--')
ax1.legend()
plt.ylabel('Units')
plt.xlabel('Date')
plt.title('People needing medical help')
plt.show()
# Sadly, we can see that the number of hospitalized people also grows exponentially, similarly as the number of positive people. Looks like number of people who had to be hospitalized is a multiple od people in serious condition..
corr = hosp.hospitalized.corr(hosp.serious_condition)
corr
ratio = hosp.serious_condition/hosp.hospitalized
ratio = ratio[ratio > 0]
avg = np.average(ratio)
std = np.std(ratio)
print('Average: ', avg, '| standard deviation: ', std)
# Beause correlation between hospitalized people and people in serious conditions is almost 1, I just divided those variables and see that around 20% of all hospitalized people is in serious conditions. The standard devation is small so this ratio is quite constant, which means that it is around 20% at any time. So it basically means that every fifth person being hospitalized ends up in serious condition.
#
#
# ### Lockdown effect
cz = df.loc[df['iso_code'] == 'CZE']
plt.figure(figsize=(14,10))
x = cz['date'].iloc[70:150]
cases_smoothed = cz['new_cases_smoothed'].iloc[70:150]
lockdown = dt.datetime(2020, 3, 16)
max_idx = np.argmax(cases_smoothed)
date = x.iloc[max_idx]
max_serious = np.argmax(hosp.serious_condition[:80])
plt.plot(x, cases_smoothed, label='New cases (7 days moving average)', color='navy')
plt.plot(x, hosp.serious_condition[:80], label='Actual number of people in serious condition (JIP, UPV, ECMO)', color='darkslategray')
plt.axvline(lockdown, color='yellow', ymin=0, ymax=0.9, label='First Lockdown ({})'.format(lockdown.date()), linestyle='--')
plt.axvline(date, color='navy', ymin=0, ymax=0.9, label='Peak of new daily cases ({})'.format(date.date()), linestyle='--', alpha=0.5)
plt.axvline(x.iloc[max_serious+5], color='darkgreen', ymin=0, ymax=0.9, label='Peak of hospitalized people in serious conditions ({})'.format(x.iloc[max_serious+5].date()), linestyle='--', alpha=0.5)
plt.legend(loc='upper left')
plt.title('Lockdown effect in the first wave')
plt.ylabel('New daily casses')
plt.xlabel('Date')
plt.ylim((0,500))
plt.show()
# +
from scipy.optimize import curve_fit
def log_curve(x, k, x_0, ymax):
return ymax / (1 + np.exp(-k*(x-x_0)))
# +
cz = df.loc[df['iso_code'] == 'CZE']
plt.figure(figsize=(14,10))
x = cz['date'].iloc[70:150]
x_int = np.arange(len(x))
y = hosp.serious_condition[:80]
plt.plot(x, y, label='Actual number of people in serious condition (JIP, UPV, ECMO)', color='indigo', alpha=0.5)
# Fit the logistic curve
end = 30
popt, pcov = curve_fit(log_curve, np.arange(end), y[:end], bounds=([0,0,0],np.inf), maxfev=50000)
k, x_0, ymax = popt
length = 80
smoothed = log_curve(range(0,length), k, x_0, ymax)
print(x.iloc[0])
x_dt = pd.date_range(x.iloc[0], periods=length, freq='d')
plt.plot(x_dt, smoothed, '--', label='Prediction about people in serious conditions', color='navy')
# Find and plot the inflection point
start_idx = 5
end_idx = 30
snd_derivatives = np.diff(smoothed[start_idx:end_idx], n=2)
inflection = np.where(snd_derivatives > 0)[0][-1]
inflection_date = x.iloc[inflection+start_idx]
print((x.iloc[inflection], smoothed[inflection]-60))
print(inflection_date)
plt.plot(x.iloc[inflection+start_idx], smoothed[inflection+start_idx], 'o', color='green', markersize=12)
plt.annotate('Inflection point', (x.iloc[inflection+start_idx], smoothed[inflection+start_idx]-4), color='black')
plt.annotate(x.iloc[inflection+start_idx].date(), (x.iloc[inflection+start_idx], smoothed[inflection+start_idx]-7), color='black')
# Plot lockdown date
lockdown = dt.datetime(2020, 3, 16)
plt.axvline(lockdown, color='yellow', ymin=0, ymax=0.9, label='First Lockdown ({})'.format(lockdown.date()), linestyle='--')
plt.title('Lockdown effect in the first wave - inflection point')
plt.ylim((0, 120))
plt.legend()
plt.show()
# -
# After the first lockdown, which was declared on 16th March, the cases started to decrease on 2nd April 2020, so it took 16 days to see the effect of the lockdown. The numbers of people of people in serious condition is hospital started to decrease 10 days later. So we alleviated the hospitals after 26 days. Let\'s use this fact in predicting the situation during the second wave.
# ### Prediction
# Based on the data from the first wave, I am trying to predict, whether we will have enough ventilators and other equipment for people in serious conditions.
# +
y_data = hosp.serious_condition.iloc[100:] # Starting from 100th position because now we care only about 2nd wave
days = hosp['date'].iloc[100:]
start_date = days.iloc[0]
x_data = pd.date_range(days.iloc[0], periods=len(days), freq='d')
x_ints = np.arange(len(days))
plt.figure(figsize=(14,10))
# Simulate exponential growth until the inflection point (so 10 days after the lockdown)
def func(x, a, b, c, d):
return a*np.exp(-c*(x-b))+d
# I fit the line so that the inflection point is same
x_ints_future = np.arange(len(days)+3)
popt, pcov = curve_fit(func, x_ints[70:], y_data[70:], [100,400,0.001,0])
y_pred = func(x_ints_future, *popt)
x_dt = pd.date_range(days.iloc[0], periods=len(y_pred), freq='d')
plt.plot(x_dt, y_pred, color='salmon')
y_pred = np.hstack((y_data, y_pred[len(y_data):]))
# Predict how the number of people in serious condition will grow (logistic regression)
popt, pcov = curve_fit(log_curve, x_ints_future, y_pred, bounds=([0,0,0],np.inf), maxfev=50000)
k, x_0, ymax = popt
length = 270
y_fitted = log_curve(range(0, length), k, x_0, ymax)
x_dt = pd.date_range(days.iloc[0], periods=length, freq='d')
plt.plot(x_dt, y_fitted, '--', label='Prediction about people in serious conditions', color='navy')
# Plot data about people in serious condition
plt.plot(x_data, y_data, 'o', label='Confirmed data of people in serious conditions', color='red')
# Plot the lockdown and expected peak of the number of people in serious condition
lockdown = dt.datetime(2020, 10, 22)
max_cases = pd.date_range(lockdown, periods=26, freq='d')[-1]
plt.axvline(lockdown, color='yellow', label='Second Lockdown ({})'.format(lockdown.date()), linestyle='--')
# Find and plot the inflection point
snd_derivatives = np.diff(y_fitted, n=2)
inflection = np.where(snd_derivatives > 0)[0][-1]
plt.plot(x_dt[inflection], y_fitted[inflection], 'o', color='green', markersize=12)
plt.annotate('Inflection point', (x_dt[inflection], y_fitted[inflection]-60))
plt.annotate(x_dt[inflection].date(), (x_dt[inflection], y_fitted[inflection]-160))
plt.plot(cap_df['Datum'], capacity, label='Exact capacity of ventilators and other equipment for people in serious conditions', color='deepskyblue', alpha=0.4)
y_pos = scipy.stats.mode(capacity)[0]
plt.axhline(y_pos, label='Expected capacity of ventilators and other equipment for people in serious conditions')
plt.ylim((0, 3500))
plt.xlim((start_date, dt.datetime(2021, 1, 15)))
plt.legend()
plt.title('Prediction about number of people in serious condition in hospitals')
plt.xlabel('Date')
plt.ylabel('Number of people')
plt.show()
# -
# Given the inflection point will come +- day 10 days after lockdown, like in the first wave, we can predict, that we will have enough ventilators and other equipment for people in serious condition.
| AML_MP1_Petrzelkova.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 17 - Intermediate Exercises
#
# * Classes
# ## 🍅🍅🍅
# 1.Create a Python program that imports built-in pathlib module and displays the namespace of **```pathlib.PurePath```**.
# +
# Write your own code in this cell
# -
# ## 🍅🍅🍅
# 2.Create a Python class that will convert an integer to a Persian numbering system.
#
# 1234567890 ---> ۱۲۳۴۵۶۷۸۹۰
#
# ۱۲۳۴۵۶۷۸۹۰ ---> 1234567890
# Write your own code in this cell
class PersianUtils(object):
def __init__(self):
pass
def enum_to_pnum(self, enum):
pass
def pnum_to_enum(self, pnum):
pass
# ## 🍅🍅🍅
# 3.Create a class named **```Sphere```** .
# Knowing the radius of the sphere allows you to create sphere object.
#
# Then define all of the comparison operations so that the two sphere objects can be compared.
#
# Then define the *addition* and *subtraction* for this class so that adding or subtracting two spheres adds and subtracts the volume of the two spheres, and a sphere corresponding to the final volume is returned to the user.
#
# Then define the division function for this class so that we can determine how many times sphere A is larger than sphere B. (based on volume).
#
# Then define a sphere's multiplication function by multiplying a sphere by a real number, such as **X**, which returns a sphere with the volume **X** times larger than the previous sphere.
# 
# $$Area=4\Pi r^2$$
#
#
# $$Volume = \frac{4}{3}\Pi r^3$$
# Write your own code in this cell
class Sphere(object):
def __init__(self, radius):
self.__pi = 3.1415926535897932384626433
self.radius = radius
def __repr__(self):
pass
def area(self):
pass
def volume(self):
pass
def __mul__(self, n):
if not isinstance(n , (int, float)):
raise TypeErrors()
# Run this cell to check your class .
print(Sphere(4) == Sphere(4))
print(Sphere(3) != Sphere(8))
print(Sphere(4) > Sphere(1))
print(Sphere(4) >= Sphere(4))
print(Sphere(4) < Sphere(8))
print(Sphere(9) <= Sphere(9))
print(Sphere(3.2710663101885893) == Sphere(3) + Sphere(2))
print(Sphere(2.668401648721945) == Sphere(3) - Sphere(2))
print(Sphere(3) / Sphere(2) == 3.375)
print(Sphere(3) // Sphere(2) == 3)
print(Sphere(3.7797631496846193) == Sphere(3) * 2)
| Exercise/17_PyEx.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 基本程序设计
# - 一切代码输入,请使用英文输入法
print('joker is bad man')
# ## 编写一个简单的程序
# - 圆公式面积: area = radius \* radius \* 3.1415
# ### 在Python里面不需要定义数据的类型
radius = 100 # 定义变量
area = radius * radius * 3.14 # 普通代码,* 代表乘法
print(area) # 最后打印出结果
# ## 控制台的读取与输入
# - input 输入进去的是字符串
# - eval
# - 在jupyter用shift + tab 键可以跳出解释文档
variable = input('请输入一个数字')
print(variable)
# ## 变量命名的规范
# - 由字母、数字、下划线构成
# - 不能以数字开头 \*
# - 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)
# - 可以是任意长度
# - 驼峰式命名
print(12)
import os
def go(num):
os.system('echo hahah')
print = go
print(12)
# ## 变量、赋值语句和赋值表达式
# - 变量: 通俗理解为可以变化的量
# - x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式
# - test = test + 1 \* 变量在赋值之前必须有值
x = 100
x = 2 * x + 1 # 赋值语句,在赋值之前,一定要有值
print(x)
a = eval(input('数字'))
print(type(a))
print(a * 3)
# ## 同时赋值
# var1, var2,var3... = exp1,exp2,exp3...
Joekr, Mistt,hahah,lalal = 'lalal',120,120.33333,True
print(Joekr,Mistt,hahah,lalal)
# ## 定义常量
# - 常量:表示一种定值标识符,适合于多次使用的场景。比如PI
# - 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的
chart = 100.1
chart = 'hahahah'
chart = True
print(chart)
import math
print(math.pi)
# ## 数值数据类型和运算符
# - 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次
# <img src = "../Photo/01.jpg"></img>
# ## 运算符 /、//、**
number1 = 100
number2 = 500
print(number1 + number2)
number3 = 100.0
number4 = 500.0
print(number3 + number4)
number3 = 100.0
number4 = 500.0
print(number3 - number4)
number3 = 100.0
number4 = 500.0
print(number3 * number4)
number3 = 100.0
number4 = 500.0
print(number3 / number4)
number3 = 100.0
number4 = 500.0
print(number3 // number4)
number3 = 100.0
number4 = 2
print(number3 ** number4)
# ## 运算符 %
number3 = 100.0
number4 = 500.0
print(number3 % number4)
# ## EP:
# - 25/4 多少,如果要将其转变为整数该怎么改写
# - 输入一个数字判断是奇数还是偶数
# - 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒
# - 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
res = 25//4
print(res)
input_number = input('input number')
input_number_int = eval(input_number)
if input_number_int == int:
if input_number_int % 2 == 0:
print('偶数')
else:
print('奇数')
else:
if input_number_int % 2.0 == 0.0:
print('偶数')
else:
print('奇数')
time = eval(input('input'))
fen = time // 60
miao = time % 60
print(fen,'分',miao,'秒')
print('%d分%d秒'%(fen,miao))
time1 = 6
time2 = eval(input('输入'))
result = (time1 + time2) % 7
print(result)
# ## 科学计数法
# - 1.234e+2
# - 1.234e-2
num1=1.234e+2
num2 = 1.234e-2
print(num1,num2)
# ## 计算表达式和运算优先级
# <img src = "../Photo/02.png"></img>
# <img src = "../Photo/03.png"></img>
x = eval(input('x'))
y = eval(input('y'))
a = eval(input('a'))
b = eval(input('b'))
c = eval(input('c'))
part_1 = (3 + 4 * x) / 5
part_2 = (10 * (y-5)* (a+b+c))/ x
part_3 = 9*(4/x + (9+x)/y)
print(part_1 - part_2 + part_3)
# ## 增强型赋值运算
# <img src = "../Photo/04.png"></img>
a = 1
a += 100 # a = a + 100
print(a)
# ## 类型转换
# - float -> int
# - 四舍五入 round
int(25 / 4) # 转换成整型
str(25 / 4) # 转换成字符串
float(25//5) # 转换成浮点
round(25/4,1) # 四舍五入
# ## EP:
# - 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)
# - 必须使用科学计数法
water_floawer = 153
bai = 153 //100
shi = 153 //10 % 10
ge = 153 % 10
if water_floawer == bai ** 3 + shi **3 + ge **3:
print('是水仙花')
else:
print('NO')
round(197.55e+2 * 0.06e-2,2)
# # Project
# - 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment)
# 
贷款数 = eval(input('请输入贷款数'))
月利率 = eval(input('月利率'))
年限 = eval(input('年限'))
月供= ( (贷款数 * 月利率) / (1-(1/(1+月利率)**(年限*12))))
总还款数 = 月供 * 年限 * 12
print(总还款数)
import time
print(time.time())
# # Homework
# - 1
# <img src="../Photo/06.png"></img>
celsius=eval(input("Enter a degree in Celsius:"))
fahrenheit=(9/5)*celsius+32
print(celsius,"Celsius is",fahrenheit,"Fathrenheit")
# - 2
# <img src="../Photo/07.png"></img>
import math
radius=eval(input("Enter the radius and length of a cylinder:"))
length=eval(input(","))
area =radius *radius*math.pi
volme=area*length
print("The area is",area)
print("The volume is",volme)
# - 3
# <img src="../Photo/08.png"></img>
feet=eval(input("Enter a value for feet:"))
print(feet,"feet is",feet*0.305,"meters")
# - 4
# <img src="../Photo/10.png"></img>
M=eval(input("Enter the amount of water in kilograms:"))
initial_temperature=eval(input("Enter the initial_temperature:"))
final_temperature=eval(input("Enter the final_temperature:"))
Q=M*(final_temperature-initial_temperature)*4184
print("The energy neened is",Q)
# - 5
# <img src="../Photo/11.png"></img>
balance=eval(input("Enter balance and interest rate "))
rate=eval(input(","))
interest=balance*(rate/1200)
print("The interest is",interest)
# - 6
# <img src="../Photo/12.png"></img>
v0=eval(input("Enter v0,v1,and t: "))
v1=eval(input(","))
t=eval(input(","))
print("The average acceleration is",(v1-v0)/t)
# - 7 进阶
# <img src="../Photo/13.png"></img>
m=eval(input("Enter the monthly saving amount: "))
i=0
s=0
while i<6:
s=s+m
s=s*(1+0.00417)
i+=1
print("After the sixth month,the account value is",s)
# - 8 进阶
# <img src="../Photo/14.png"></img>
# +
s=0
n=int(input("Enter a number between 0 and 1000: "))
if n!=0:
while n!=0:
print(n%10)
s=s+int(n%10)
n=int(n/10)
print(s)
# +
s=0
n=int(input("Enter a number between 0 and 1000: "))
if n!=0:
while n!=0:
s=s+int(n%10)
n=int(n/10)
print(s)
# +
s=0
n=int(input("Enter a number between 0 and 1000: "))
if n!=0:
while n!=0:
s=s+int(n%10)
n=int(n/10)
print(s)
# -
| 7.16.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# +
"""Small test script taken from https://wiki.python.org/moin/SimplePrograms"""
import pwd # F401 'os' imported but unused
import grp # F401 'os' imported but unused
BOARD_SIZE = 8
### E265 block comment should start with '# '
print("Hello from reviewdog!")
print("Let's play a small queen problem game to test the flake8 github action.")
print("This game is taken from https://wiki.python.org/moin/SimplePrograms.")
class BailOut(Exception):
pass
def validate(queens):
left = right = col = queens[-1] # E501 line too long (80 > 79 characters). Long description text
for r in reversed(queens[:-1]):
left, right = left-1, right+1
if r in (left, col, right):
raise BailOut
def add_queen(queens):
for i in range(BOARD_SIZE):
test_queens = queens + [i]
try:
validate(test_queens)
if len(test_queens) == BOARD_SIZE:
return test_queens
else:
return add_queen(test_queens)
except BailOut:
pass
raise BailOut
# +
queens = add_queen([])
print (queens)
print ("\n".join(". "*q + "Q " + ". "*(BOARD_SIZE-q-1) for q in queens))
import dis # E402 module level import not at top of file
| testdata/subfolder/queen_problem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.6 64-bit (''.venv'': poetry)'
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
df = pd.read_csv('./test.csv')
df.head()
df.info()
df.describe()
x_col=df["x"].values
y_col=df["y"].values
x_col=x_col.reshape(-1,1)
# x_col
x_train, x_test, y_train, y_test = train_test_split(x_col,y_col,train_size=0.5,test_size=0.5,random_state=60)
# print(f"X_train shape {x_train.shape}")
# print(f"y_train shape {y_train.shape}")
# print(f"X_test shape {x_test.shape}")
# print(f"y_test shape {y_test.shape}")
# print(y_test)
lr = LinearRegression()
lr.fit(x_train,y_train)
y_predict = lr.predict(x_test)
# y_predict
print(f"Train accuracy {lr.score(x_train,y_train)*100} %")
print(f"Test accuracy {lr.score(x_test,y_test)*100} %")
plt.scatter(x_train,y_train,color='green')
plt.scatter(x_test,y_test,color='blue')
plt.scatter(x_train,y_train,color='green')
plt.scatter(x_test,y_test,color='blue')
plt.plot(x_test,y_predict , color='yellow')
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Trained model plot")
plt.plot
| regression.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,jl:hydrogen
# text_representation:
# extension: .jl
# format_name: hydrogen
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.8.0-DEV
# language: julia
# name: julia-1.8
# ---
# %%
using Roots
function withcounter(f, n = 0)
c = Ref(n)
F(x...; y...) = (c[] += 1; f(x...; y...))
getcounter() = c[]
setcounter!(n) = c[] = n
F, getcounter, setcounter!
end
f(x::Float64)::Float64 = @ccall sin(x::Float64)::Float64
@show f(1.0);
# %%
F, getcounter, setcounter! = withcounter(x -> f(x) - 0.8414709848078965)
@show find_zero(F, 0.9)
@show getcounter();
# %%
setcounter!(0)
@show find_zeros(F, 0, 2π)
@show getcounter();
# %%
dump(F)
# %%
| 0013/withcounter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import nltk
from fuzzywuzzy import fuzz
# +
##Function that takes in 3 lists, the list of strings you want to compare, the list of strings you're comparing to and a list with the original way the string from list 1 was written.
def matchWithFuzzyNames(l1, l2, original, row_num):
match = pd.DataFrame(columns = ['original company names', 'clean company name', 'company matches', 'fuzz ratio'])
for i in l1:
matches = []
score = []
score_partial = []
for j in l2:
#due to time constraints, we set the fuzz.ratio to 100, however future programming should adjust this value to some other quantity to see if we get better matches
if (fuzz.ratio(i,j)) == 100:
matches.append(j)
score.append(fuzz.ratio(i,j))
#the partial ratio will return the ratio raw score of 100 if the shorter string is found an any of substrings of the larger string, used this as a safety net and potentially as an additional step to
score_partial.append((fuzz.partial_ratio(i,j)))
match = match.append({ 'clean company name': i,'company matches': matches, 'fuzz ratio': score, 'fuzz partial ratio': score_partial},ignore_index=True)
match['original company names'] = original
match['original row number'] = row_num
#Will output a pandas dataframe with 4 columns
return match
# -
#Isabels function; I added List at the end to differentiate between this and the top function
#I am using this original function to do DNA
def matchWithFuzzyNamesList(l1, l2, original):
match = pd.DataFrame(columns = ['original company names', 'clean company name', 'company matches', 'fuzz ratio'])
for i in l1:
matches = []
score = []
score_partial = []
for j in l2:
if (fuzz.ratio(i,j)) == 100:
matches.append(j)
score.append(fuzz.ratio(i,j))
score_partial.append((fuzz.partial_ratio(i,j)))
match = match.append({ 'clean company name': i,'company matches': matches, 'fuzz ratio': score, 'fuzz partial ratio': score_partial},ignore_index=True)
match['original company names'] = original
return match
# <h2>Replicating Isabel's code</h2>
fda = pd.read_csv("../data/working/fda_clean.csv", index_col = [0])
fda.columns
fda_clean = fda.dropna()
fda.head()
fda_org = fda_clean['FDA Companies '].reset_index()
fda_org
#this variable will hold the original index positions of the fda companies from the original data
fda_row = fda_org['index']
fda_row
fda_comp = fda_org['FDA Companies ']
#This variable will hold all the original companies
fda_comp
fda_list = fda_clean['Company Clean']
#This variable will hold the clean names
fda_list
matching = matchWithFuzzyNames(fda_list, fda_list, fda_comp, fda_row)
matching
# +
#original row number is the row number from fda_clean before the dropna()s
# -
matching['company matches'] = matching['company matches'].astype(str)
matching['original row number'] = matching['original row number'].astype(str)
matching['fuzz ratio'] = matching['fuzz ratio'].astype(str)
testing = matching.groupby("clean company name", as_index = False)
testing
#group by clean company name, append the original company names, row numbers and ratio value to new colum
unique_comp = matching.groupby('clean company name', as_index = False).agg({'original company names': ', '.join,
'original row number': ', '.join,
'company matches': ', '.join,
'fuzz ratio': ', '.join} )
unique_comp
# +
#CSVs I need
#fda x ndc
#dna x dna
#dna x ndc
# -
unique_comp.to_csv("shinyDashboardBI/fdaxfda.csv")
og = pd.read_excel("../data/original/fda_companies.xlsx")
og.head()
# <h3>Using Isabel's Method on NDC</h3>
ndc = pd.read_csv("../data/working/ndc_clean.csv", index_col = [0])
ndc_clean = ndc.dropna()
ndc.columns
ndc_org = ndc_clean['original_company'].reset_index()
#Since I deleted the first 25 rows from the original data (they were just numbers), the original row numbers for these companies start at 25
ndc_org
#Since I deleted the first 25 rows from the original data (they were just numbers), the original row numbers for these companies start at 25
ndc_row = ndc_org['row'] + 25
ndc_row
ndc_comp = ndc_org['original_company']
ndc_list = ndc_clean['cleaned_name']
matching = matchWithFuzzyNames(ndc_list, ndc_list, ndc_comp, ndc_row)
matching
matching['company matches'] = matching['company matches'].astype(str)
matching['original row number'] = matching['original row number'].astype(str)
matching['fuzz ratio'] = matching['fuzz ratio'].astype(str)
matching['original row number'] = matching['original row number'].astype(str) #adding 25 because 25 numbers were removed from the og dataset before cleaning, so original row ended up being 25 less than it was suppose to
matching
unique_comp = matching.groupby('clean company name', as_index = False).agg({'original company names': ', '.join,
'original row number': ', '.join,
'company matches': ', '.join,
'fuzz ratio': ', '.join} )
unique_comp.head(50)
og = pd.read_excel("../data/original/BI DSPG Company Datasets/NDC_Company_Dataset.xls")
unique_comp.to_csv("shinyDashboardBI/ndcxndc.csv")
#because I deleted the first 25 rows (the original row number was 25 less than it was suppose to be, so I added 25 to the og row number above to accurately reflect the actual row the company was in prior to any deletions)
og.iloc[25]
# +
#Double checking dna and fda
# -
dna = pd.read_csv("../data/working/dna_clean.csv", index_col = [0])
dna.iloc[62283]
fdaXdna = pd.read_csv("../data/working/fda_dna_matching.csv", index_col = [0])
fdaXdna
# <h2>DNA Matching</h2>
dna = pd.read_csv("../data/working/dna_clean.csv", index_col = [0])
del dna['Unnamed: 0.1']
del dna['Code']
dna.dropna(inplace = True)
dna.reset_index(inplace = True)
dna
new_original_company = dna['Description']
new_original_company.tail()
#Running Isabels code
dna_matching = matchWithFuzzyNamesList(dna['cleaned_companies'].tolist(), dna['cleaned_companies'].tolist(), new_original_company)
# <h3>FDA x DNA</h3>
x = pd.read_csv("../data/working/fda_dna_matching.csv", index_col = [0])
x.tail()
fda.iloc[952]
dna.iloc[28069]
ndc.iloc[319]
| src/matchingWithinAndAcross.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Temperatures
# Note: for this demo to work properly, you'll need to install [statsmodels](http://http://statsmodels.sourceforge.net/)
# ## Import libraries
# +
import glob
import os
import numpy as np
import pandas as pd
import hypertools as hyp
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import statsmodels
# %matplotlib inline
# -
# ## Read in data
results=pd.read_csv('data/temperatures.csv')
locs=pd.read_csv('data/temperature_locs.csv')
# ## Temperature dataframe
results.head()
# ## Locations dataframe
print(locs)
# ## Clean up NAs and convert to numpy array to pass into hyperplot
results = results.dropna()
temps = results.as_matrix(locs['City'].as_matrix())
years = results.as_matrix(['Year'])
month = results.as_matrix(['Month'])
# ## Static 3D image: plot the high-dimensional data in a low dimensional space
geo = hyp.plot(temps, group=years.flatten(), palette='RdBu_r', normalize='across')
# ## Static 2D image
geo.plot(ndims=2)
# ## Color Bar
fig = plt.figure(figsize=(8, 3))
ax1 = fig.add_axes([0.05, 0.80, 0.9, 0.15])
cmap = mpl.cm.RdBu_r
norm = mpl.colors.Normalize(vmin=1875, vmax=2013)
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,
norm=norm,
orientation='horizontal')
cb1.set_label('Years')
plt.show()
# ## Lowess predictions of PCA values by year
# +
sns.set_style('darkgrid')
sns.set_palette(palette='muted')
reduced_data = hyp.reduce(temps, reduce='PCA', ndims = 3)
results['PCA 1'] = reduced_data[:,0]
results['PCA 2'] = reduced_data[:,1]
results['PCA 3'] = reduced_data[:,2]
PCA1_df = results[['Year', 'PCA 1']]
PCA2_df = results[['Year', 'PCA 2']]
PCA3_df = results[['Year', 'PCA 3']]
# -
sns.regplot(x='Year', y='PCA 1', data=PCA1_df.groupby(['Year']).mean().reset_index(), lowess=True)
sns.regplot(x='Year', y='PCA 2', data=PCA2_df.groupby(['Year']).mean().reset_index(), lowess=True)
sns.regplot(x='Year', y='PCA 3', data=PCA3_df.groupby(['Year']).mean().reset_index(), lowess=True)
plt.show()
# ## Lowess predictions of PCA values by average yearly temperature
temp_PCA = pd.DataFrame()
temp_PCA['ave_temp'] = results.groupby(['Year']).mean()[locs['City']].mean(axis=1)
temp_PCA = temp_PCA.join(PCA1_df.groupby(['Year']).mean(), how = 'outer')
temp_PCA = temp_PCA.join(PCA2_df.groupby(['Year']).mean(), how = 'outer')
temp_PCA = temp_PCA.join(PCA3_df.groupby(['Year']).mean(), how = 'outer')
sns.regplot(x='ave_temp', y='PCA 1', data = temp_PCA, lowess=True)
plt.show()
sns.regplot(x='ave_temp', y='PCA 2', data = temp_PCA, lowess=True)
plt.show()
sns.regplot(x='ave_temp', y='PCA 3', data = temp_PCA, lowess=True)
plt.show()
# ## Linear regression predictions of PCA values by month
PCA1_df = results[['Month', 'Year', 'PCA 1']]
PCA2_df = results[['Month', 'Year', 'PCA 2']]
PCA3_df = results[['Month', 'Year', 'PCA 3']]
sns.regplot(x='Month', y='PCA 1', data=PCA1_df.groupby(['Month', 'Year']).mean().reset_index(), truncate = True, lowess=True)
sns.regplot(x='Month', y='PCA 2', data=PCA2_df.groupby(['Month', 'Year']).mean().reset_index(), truncate = True, lowess=True)
sns.regplot(x='Month', y='PCA 3', data=PCA3_df.groupby(['Month', 'Year']).mean().reset_index(), truncate = True, lowess=True)
plt.show()
temps_locs = pd.DataFrame()
temps_locs = results.groupby(['Year']).mean()[locs['City']].reset_index()
meltCov = pd.melt(temps_locs,id_vars=['Year'], var_name='Cities', value_name='Average Temperature')
sns.set(style="whitegrid")
sns.set_palette(palette='muted')
g = sns.lmplot(x = 'Year', y = 'Average Temperature', hue="Cities", data = meltCov, fit_reg=False, legend_out=True)
regplot = sns.regplot(x = 'Year', y = 'Average Temperature', data = meltCov, lowess = True, scatter=False, ax=g.axes[0, 0], color='k')
regplot.set(ylim=(0, 30), xlim=(1870,2040))
plt.show()
| temperatures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from numpy import meshgrid
## For plotting
import matplotlib.pyplot as plt
from matplotlib import style
import datetime as dt
import seaborn as sns
sns.set_style("whitegrid")
# -
df= pd.read_csv('df7.csv', parse_dates=['Date'])
df = df.rename(columns = {"Date":"ds","Close":"y"})
df
# +
from datetime import datetime
p = 0.9
# Train around 90% of dataset
# cutoff = int((p*len(df)//100)*100)
cutoff = int(p*len(df))
df_train = df[:cutoff].copy()
df_test = df.drop(df_train.index).copy()
print(df_train, df_test)
# -
from sklearn.metrics import mean_absolute_error as MAE
# +
# pred_size =100 # predicted periods
# q = 0.4
# mod = len(df_train) % pred_size
# start = int(q * (len(df_train) - mod)) # 1000 # the number of initial data for training
# pred_size =100 # predicted periods
# num_winds = int((df_train.shape[0]-start)/pred_size)
# num_winds
# -
int(0.4 * (len(df_train) - len(df_train) % 100)) + len(df_train) % 100
df
len(df_train)
# +
from fbprophet import Prophet
from fbprophet.diagnostics import cross_validation
pred_size = int((1-p)* len(df))
# fbp = Prophet(daily_seasonality=True)
fbp = Prophet(growth='linear',daily_seasonality=False )
fbp.fit(df_train[['ds','y']])
# predict pred_size futures and get the forecast price
fut = fbp.make_future_dataframe(periods = pred_size,)
forecast = fbp.predict(fut)
cv_results = cross_validation(fbp, initial='1668 days', period= '100 days', horizon= '1 day')
forecast
# +
from sklearn.metrics import mean_absolute_error as MAE
mae = MAE(cv_results.y, cv_results.yhat)
from sklearn.metrics import mean_squared_error as MSE
mse = MSE(cv_results.y, cv_results.yhat)
print(mae,mse)
# -
cv_results = cross_validation(fbp, initial='1668 days', period= '100 days', horizon='5 days')
mse = MSE(cv_results.y, cv_results.yhat)
print(mae,mse)
cv_results
from fbprophet import Prophet
from fbprophet.diagnostics import cross_validation
pred_size = int((1-p)* len(df))
# fbp = Prophet(daily_seasonality=True)
fbp = Prophet(growth='linear',daily_seasonality=False )
fbp.fit(df_train[['ds','y']])
# predict pred_size futures and get the forecast price
fut = fbp.make_future_dataframe(periods= pred_size)
cv_results = cross_validation(fbp, horizon='100 days')
mse = MSE(cv_results.y, cv_results.yhat)
print(mae,mse)
cv_results
from fbprophet import Prophet
from fbprophet.diagnostics import cross_validation
pred_size = int((1-p)* len(df))
# fbp = Prophet(daily_seasonality=True)
fbp = Prophet(growth='linear',daily_seasonality=False )
fbp.fit(df_train[['ds','y']])
# predict pred_size futures and get the forecast price
fut = fbp.make_future_dataframe(periods = pred_size)
forecast = fbp.predict(fut)
cv_results = cross_validation(fbp, initial='1000 days', period= '100 days', horizon= '1100 day')
print(MSE(cv_results.y, cv_results.yhat))
from fbprophet.diagnostics import generate_cutoffs()
from fbprophet import Prophet
pred_size = int((1-p)* len(df))
# fbp = Prophet(daily_seasonality=True)
fbp = Prophet(growth='linear',daily_seasonality=True )
fbp.fit(df_train[['ds','y']])
# predict pred_size futures and get the forecast price
fut = fbp.make_future_dataframe(periods = pred_size,)
forecast = fbp.predict(fut)
forecast
len(df)
len(forecast)
# +
from sklearn.metrics import mean_absolute_error as MAE
mae = MAE(df[:len(forecast)].y, forecast.yhat)
from sklearn.metrics import mean_squared_error as MSE
mse = MSE(df[:len(forecast)].y, forecast.yhat)
print(mae,mse)
# -
df['fbsp'] = np.nan
df['fbsp'] = forecast.yhat
df=df.dropna()
df['diff'] = df['y'].copy() - df['fbsp'].copy()
df['ds'] = pd.to_datetime(df['ds'])
df = df.set_index(df['ds'])
df = df.drop(columns = 'ds')
df
df.to_csv(r'/Users/yuqingdai/Documents/GitHub/Stock-Erdos/scratch work/Yuqing-Data-Merge/dff2.csv')
# +
import pandas as pd
import numpy as np
from numpy import meshgrid
## For plotting
import matplotlib.pyplot as plt
from matplotlib import style
import datetime as dt
import seaborn as sns
sns.set_style("whitegrid")
# -
df2= pd.read_csv('df7.csv', parse_dates=['Date'])
df2 = df2.rename(columns = {"Date":"ds","Close":"y"})
df2
# +
from datetime import datetime
p = 0.9
# Train around 90% of dataset
# cutoff = int((p*len(df)//100)*100)
cutoff = int(p*len(df2))
df2_train = df2[:cutoff].copy()
df2_test = df2.drop(df2_train.index).copy()
from fbprophet import Prophet
pred_size = int((1-p)* len(df2))
fbp = Prophet(daily_seasonality=True)
# fit close price using fbprophet model
fbp.fit(df2_train[['ds','y']])
# predict pred_size futures and get the forecast price
fut = fbp.make_future_dataframe(periods = pred_size,)
forecast = fbp.predict(fut)
forecast
# -
del(df2['fbsp'])
df2['fbsp'] = np.nan
df2['fbsp'] = forecast.trend
df2=df2.dropna()
df2['diff'] = df2['y'].copy() - df2['fbsp'].copy()
df2
df2.to_csv(r'/Users/yuqingdai/Documents/GitHub/Stock-Erdos/scratch work/Yuqing-Data-Merge/dff4.csv')
| scratch work/Yuqing-Data-Merge/Scenario1-v3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + hide_input=false
from preamble import *
# %matplotlib notebook
import matplotlib as mpl
mpl.rcParams['legend.numpoints'] = 1
# -
# ## Evaluation Metrics and scoring
# ### Metrics for binary classification
# +
from sklearn.model_selection import train_test_split
data = pd.read_csv("data/bank-campaign.csv")
X = data.drop("target", axis=1).values
y = data.target.values
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# -
from sklearn.dummy import DummyClassifier
dummy_majority = DummyClassifier(strategy='most_frequent').fit(X_train, y_train)
pred_most_frequent = dummy_majority.predict(X_test)
print("predicted labels: %s" % np.unique(pred_most_frequent))
print("score: %f" % dummy_majority.score(X_test, y_test))
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(max_depth=2).fit(X_train, y_train)
pred_tree = tree.predict(X_test)
tree.score(X_test, y_test)
# +
from sklearn.linear_model import LogisticRegression
dummy = DummyClassifier().fit(X_train, y_train)
pred_dummy = dummy.predict(X_test)
print("dummy score: %f" % dummy.score(X_test, y_test))
logreg = LogisticRegression(C=0.1).fit(X_train, y_train)
pred_logreg = logreg.predict(X_test)
print("logreg score: %f" % logreg.score(X_test, y_test))
# -
# #### Confusion matrices
# +
from sklearn.metrics import confusion_matrix
confusion = confusion_matrix(y_test, pred_logreg)
print(confusion)
# -
mglearn.plots.plot_binary_confusion_matrix()
print("Most frequent class:")
print(confusion_matrix(y_test, pred_most_frequent))
print("\nDummy model:")
print(confusion_matrix(y_test, pred_dummy))
print("\nDecision tree:")
print(confusion_matrix(y_test, pred_tree))
print("\nLogistic Regression")
print(confusion_matrix(y_test, pred_logreg))
# ##### Relation to accuracy
# \begin{equation}
# \text{Accuracy} = \frac{\text{TP} + \text{TN}}{\text{TP} + \text{TN} + \text{FP} + \text{FN}}
# \end{equation}
# #### Precision, recall and f-score
# \begin{equation}
# \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
# \end{equation}
# + [markdown] hide_input=false
# \begin{equation}
# \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
# \end{equation}
# \begin{equation}
# \text{F} = 2 \cdot \frac{\text{precision} \cdot \text{recall}}{\text{precision} + \text{recall}}
# \end{equation}
# -
from sklearn.metrics import f1_score
print("f1 score most frequent: %.2f" % f1_score(y_test, pred_most_frequent, pos_label="yes"))
print("f1 score dummy: %.2f" % f1_score(y_test, pred_dummy, pos_label="yes"))
print("f1 score tree: %.2f" % f1_score(y_test, pred_tree, pos_label="yes"))
print("f1 score: %.2f" % f1_score(y_test, pred_logreg, pos_label="yes"))
from sklearn.metrics import classification_report
print(classification_report(y_test, pred_most_frequent,
target_names=["no", "yes"]))
print(classification_report(y_test, pred_tree,
target_names=["no", "yes"]))
print(classification_report(y_test, pred_logreg,
target_names=["no", "yes"]))
# # Taking uncertainty into account
from mglearn.datasets import make_blobs
from sklearn.svm import SVC
X, y = make_blobs(n_samples=(400, 50), centers=2, cluster_std=[7.0, 2],
random_state=22)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
svc = SVC(gamma=.05).fit(X_train, y_train)
mglearn.plots.plot_decision_threshold()
print(classification_report(y_test, svc.predict(X_test)))
y_pred_lower_threshold = svc.decision_function(X_test) > -.8
print(classification_report(y_test, y_pred_lower_threshold))
# ## Precision-Recall curves and ROC curves
from sklearn.metrics import precision_recall_curve
precision, recall, thresholds = precision_recall_curve(y_test,
svc.decision_function(X_test))
# +
# create a similar dataset as before, but with more samples to get a smoother curve
X, y = make_blobs(n_samples=(4000, 500), centers=2, cluster_std=[7.0, 2], random_state=22)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
svc = SVC(gamma=.05).fit(X_train, y_train)
precision, recall, thresholds = precision_recall_curve(
y_test, svc.decision_function(X_test))
# find threshold closest to zero:
close_zero = np.argmin(np.abs(thresholds))
plt.figure()
plt.plot(precision[close_zero], recall[close_zero], 'o', markersize=10,
label="threshold zero", fillstyle="none", c='k', mew=2)
plt.plot(precision, recall, label="precision recall curve")
plt.xlabel("precision")
plt.ylabel("recall")
plt.title("precision_recall_curve");
plt.legend(loc="best")
# +
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100, random_state=0, max_features=2)
rf.fit(X_train, y_train)
# RandomForestClassifier has predict_proba, but not decision_function
precision_rf, recall_rf, thresholds_rf = precision_recall_curve(
y_test, rf.predict_proba(X_test)[:, 1])
plt.figure()
plt.plot(precision, recall, label="svc")
plt.plot(precision[close_zero], recall[close_zero], 'o', markersize=10,
label="threshold zero svc", fillstyle="none", c='k', mew=2)
plt.plot(precision_rf, recall_rf, label="rf")
close_default_rf = np.argmin(np.abs(thresholds_rf - 0.5))
plt.plot(precision_rf[close_default_rf], recall_rf[close_default_rf], '^', markersize=10,
label="threshold 0.5 rf", fillstyle="none", c='k', mew=2)
plt.xlabel("precision")
plt.ylabel("recall")
plt.legend(loc="best")
plt.title("precision_recall_comparison");
# -
print("f1_score of random forest: %f" % f1_score(y_test, rf.predict(X_test)))
print("f1_score of svc: %f" % f1_score(y_test, svc.predict(X_test)))
from sklearn.metrics import average_precision_score
ap_rf = average_precision_score(y_test, rf.predict_proba(X_test)[:, 1])
ap_svc = average_precision_score(y_test, svc.decision_function(X_test))
print("average precision of random forest: %f" % ap_rf)
print("average precision of svc: %f" % ap_svc)
# #### Receiver Operating Characteristics (ROC) and AUC
# \begin{equation}
# \text{FPR} = \frac{\text{FP}}{\text{FP} + \text{TN}}
# \end{equation}
# +
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_test, svc.decision_function(X_test))
plt.figure()
plt.plot(fpr, tpr, label="ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR (recall)")
plt.title("roc_curve");
# find threshold closest to zero:
close_zero = np.argmin(np.abs(thresholds))
plt.plot(fpr[close_zero], tpr[close_zero], 'o', markersize=10,
label="threshold zero", fillstyle="none", c='k', mew=2)
plt.legend(loc=4)
# +
from sklearn.metrics import roc_curve
fpr_rf, tpr_rf, thresholds_rf = roc_curve(y_test, rf.predict_proba(X_test)[:, 1])
plt.figure()
plt.plot(fpr, tpr, label="ROC Curve SVC")
plt.plot(fpr_rf, tpr_rf, label="ROC Curve RF")
plt.xlabel("FPR")
plt.ylabel("TPR (recall)")
plt.title("roc_curve_comparison");
plt.plot(fpr[close_zero], tpr[close_zero], 'o', markersize=10,
label="threshold zero SVC", fillstyle="none", c='k', mew=2)
close_default_rf = np.argmin(np.abs(thresholds_rf - 0.5))
plt.plot(fpr_rf[close_default_rf], tpr[close_default_rf], '^', markersize=10,
label="threshold 0.5 RF", fillstyle="none", c='k', mew=2)
plt.legend(loc=4)
# -
from sklearn.metrics import roc_auc_score
rf_auc = roc_auc_score(y_test, rf.predict_proba(X_test)[:, 1])
svc_auc = roc_auc_score(y_test, svc.decision_function(X_test))
print("AUC for Random Forest: %f" % rf_auc)
print("AUC for SVC: %f" % svc_auc)
X = data.drop("target", axis=1).values
y = data.target.values
X.shape
# +
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=0, train_size=.1, test_size=.1)
plt.figure()
for gamma in [1, 0.01, 0.001]:
svc = SVC(gamma=gamma).fit(X_train, y_train)
accuracy = svc.score(X_test, y_test)
auc = roc_auc_score(y_test == "yes", svc.decision_function(X_test))
fpr, tpr, _ = roc_curve(y_test , svc.decision_function(X_test), pos_label="yes")
print("gamma = %.03f accuracy = %.02f AUC = %.02f" % (gamma, accuracy, auc))
plt.plot(fpr, tpr, label="gamma=%.03f" % gamma, linewidth=4)
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.xlim(-0.01, 1)
plt.ylim(0, 1.02)
plt.legend(loc="best")
# -
# ### Multi-class classification
# +
from sklearn.metrics import accuracy_score
from sklearn.datasets import load_digits
digits = load_digits()
X_train, X_test, y_train, y_test = train_test_split(
digits.data, digits.target, random_state=0)
lr = LogisticRegression().fit(X_train, y_train)
pred = lr.predict(X_test)
print("accuracy: %0.3f" % accuracy_score(y_test, pred))
print("confusion matrix:")
print(confusion_matrix(y_test, pred))
# + hide_input=false
plt.figure()
scores_image = mglearn.tools.heatmap(confusion_matrix(y_test, pred), xlabel='Predicted label', ylabel='True label',
xticklabels=digits.target_names, yticklabels=digits.target_names,
cmap=plt.cm.gray_r, fmt="%d")
plt.title("Confusion matrix")
plt.gca().invert_yaxis()
# -
print(classification_report(y_test, pred))
print("micro average f1 score: %f" % f1_score(y_test, pred, average="micro"))
print("macro average f1 score: %f" % f1_score(y_test, pred, average="macro"))
# ## Using evaluation metrics in model selection
from sklearn.cross_validation import cross_val_score
# default scoring for classification is accuracy
print("default scoring ", cross_val_score(SVC(), X, y))
# providing scoring="accuracy" doesn't change the results
explicit_accuracy = cross_val_score(SVC(), digits.data, digits.target == 9, scoring="accuracy")
print("explicit accuracy scoring ", explicit_accuracy)
roc_auc = cross_val_score(SVC(), digits.data, digits.target == 9, scoring="roc_auc")
print("AUC scoring ", roc_auc)
# +
from sklearn.model_selection import GridSearchCV
# back to the bank campaign
X = data.drop("target", axis=1).values
y = data.target.values
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=.1, test_size=.1, random_state=0)
# we provide a somewhat bad grid to illustrate the point:
param_grid = {'gamma': [0.0001, 0.01, 0.1, 1, 10]}
# using the default scoring of accuracy:
grid = GridSearchCV(SVC(), param_grid=param_grid)
grid.fit(X_train, y_train)
print("Grid-Search with accuracy")
print("Best parameters:", grid.best_params_)
print("Best cross-validation score (accuracy)):", grid.best_score_)
print("Test set AUC: %.3f" % roc_auc_score(y_test, grid.decision_function(X_test)))
print("Test set accuracy %.3f: " % grid.score(X_test, y_test))
# using AUC scoring instead:
grid = GridSearchCV(SVC(), param_grid=param_grid, scoring="roc_auc")
grid.fit(X_train, y_train)
print("\nGrid-Search with AUC")
print("Best parameters:", grid.best_params_)
print("Best cross-validation score (AUC):", grid.best_score_)
print("Test set AUC: %.3f" % roc_auc_score(y_test, grid.decision_function(X_test)))
print("Test set accuracy %.3f: " % grid.score(X_test, y_test))
# -
from sklearn.metrics.scorer import SCORERS
print(sorted(SCORERS.keys()))
# +
def my_scoring(fitted_estimator, X_test, y_test):
return (fitted_estimator.predict(X_test) == y_test).mean()
GridSearchCV(SVC(), param_grid, scoring=my_scoring)
# -
# # Exercises
# Load the adult dataset from ``data/adult.csv``, and split it into training and test set.
# Apply grid-search to the training set, searching for the best C for Logistic Regression, also search over L1 penalty vs L2 penalty.
# Plot the ROC curve of the best model on the test set.
# get dummy variables, needed for scikit-learn models on categorical data:
X = pd.get_dummies(data.drop("income", axis=1))
y = data.income == " >50K"
| 03.2 Evaluation Metrics.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Stack Semantics in Trax: Ungraded Lab
# %% [markdown]
# In this ungraded lab, we will explain the stack semantics in Trax. This will help in understanding how to use layers like `Select` and `Residual` which operates on elements in the stack. If you've taken a computer science class before, you will recall that a stack is a data structure that follows the Last In, First Out (LIFO) principle. That is, whatever is the latest element that is pushed into the stack will also be the first one to be popped out. If you're not yet familiar with stacks, then you may find this [short tutorial](https://www.tutorialspoint.com/python_data_structure/python_stack.htm) useful. In a nutshell, all you really need to remember is it puts elements one on top of the other. You should be aware of what is on top of the stack to know which element you will be popping. You will see this in the discussions below. Let's get started!
# %% [markdown]
# ## Imports
# %% tags=[]
import numpy as np # regular ol' numpy
from trax import layers as tl # core building block
from trax import shapes # data signatures: dimensionality and type
from trax import fastmath # uses jax, offers numpy on steroids
# %% [markdown]
# ## 1. The tl.Serial Combinator is Stack Oriented.
#
# To understand how stack-orientation works in [Trax](https://trax-ml.readthedocs.io/en/latest/), most times one will be using the `Serial` layer. We will define two simple [Function layers](https://trax-ml.readthedocs.io/en/latest/notebooks/layers_intro.html?highlight=fn#With-the-Fn-layer-creating-function.): 1) Addition and 2) Multiplication.
#
# Suppose we want to make the simple calculation (3 + 4) * 15 + 3. `Serial` will perform the calculations in the following manner `3` `4` `add` `15` `mul` `3` `add`. The steps of the calculation are shown in the table below. The first column shows the operations made on the stack and the second column the output of those operations. Moreover, the rightmost element in the second column represents the top of the stack (e.g. in the second row, `Push(3)` pushes `3 ` on top of the stack and `4` is now under it).
#
# <div style="text-align:center" width="50px"><img src="Stack1.png" /></div>
#
# After processing all the stack contains 108 which is the answer to our simple computation.
#
# From this, the following can be concluded: a stack-based layer has only one way to handle data, by taking one piece of data from atop the stack, termed popping, and putting data back atop the stack, termed pushing. Any expression that can be written conventionally, can be written in this form and thus be amenable to being interpreted by a stack-oriented layer like `Serial`.
# %% [markdown]
# ### Coding the example in the table:
# %% [markdown]
# **Defining addition**
# %% tags=[]
def Addition():
layer_name = "Addition" # don't forget to give your custom layer a name to identify
# Custom function for the custom layer
def func(x, y):
return x + y
return tl.Fn(layer_name, func)
# Test it
add = Addition()
# Inspect properties
print("-- Properties --")
print("name :", add.name)
print("expected inputs :", add.n_in)
print("promised outputs :", add.n_out, "\n")
# Inputs
x = np.array([3])
y = np.array([4])
print("-- Inputs --")
print("x :", x, "\n")
print("y :", y, "\n")
# Outputs
z = add((x, y))
print("-- Outputs --")
print("z :", z)
# %% [markdown]
# **Defining multiplication**
# %% tags=[]
def Multiplication():
layer_name = (
"Multiplication" # don't forget to give your custom layer a name to identify
)
# Custom function for the custom layer
def func(x, y):
return x * y
return tl.Fn(layer_name, func)
# Test it
mul = Multiplication()
# Inspect properties
print("-- Properties --")
print("name :", mul.name)
print("expected inputs :", mul.n_in)
print("promised outputs :", mul.n_out, "\n")
# Inputs
x = np.array([7])
y = np.array([15])
print("-- Inputs --")
print("x :", x, "\n")
print("y :", y, "\n")
# Outputs
z = mul((x, y))
print("-- Outputs --")
print("z :", z)
# %% [markdown]
# **Implementing the computations using Serial combinator.**
# %% tags=[]
# Serial combinator
serial = tl.Serial(
Addition(), Multiplication(), Addition() # add 3 + 4 # multiply result by 15
)
# Initialization
x = (np.array([3]), np.array([4]), np.array([15]), np.array([3])) # input
serial.init(shapes.signature(x)) # initializing serial instance
print("-- Serial Model --")
print(serial, "\n")
print("-- Properties --")
print("name :", serial.name)
print("sublayers :", serial.sublayers)
print("expected inputs :", serial.n_in)
print("promised outputs :", serial.n_out, "\n")
# Inputs
print("-- Inputs --")
print("x :", x, "\n")
# Outputs
y = serial(x)
print("-- Outputs --")
print("y :", y)
# %% [markdown]
# The example with the two simple adition and multiplication functions that where coded together with the serial combinator show how stack semantics work in `Trax`.
# %% [markdown]
# ## 2. The tl.Select combinator in the context of the Serial combinator
# %% [markdown]
# Having understood how stack semantics work in `Trax`, we will demonstrate how the [tl.Select](https://trax-ml.readthedocs.io/en/latest/trax.layers.html?highlight=select#trax.layers.combinators.Select) combinator works.
# %% [markdown]
# ### First example of tl.Select
# %% [markdown]
# Suppose we want to make the simple calculation (3 + 4) * 3 + 4. We can use `Select` to perform the calculations in the following manner:
#
# 1. `4`
# 2. `3`
# 3. `tl.Select([0,1,0,1])`
# 4. `add`
# 5. `mul`
# 6. `add`.
#
# The `tl.Select` requires a list or tuple of 0-based indices to select elements relative to the top of the stack. For our example, the top of the stack is `3` (which is at index 0) then `4` (index 1) and we Select to add in an ordered manner to the top of the stack which after the command is `3` `4` `3` `4`. The steps of the calculation for our example are shown in the table below. As in the previous table each column shows the contents of the stack and the outputs after the operations are carried out.
#
# <div style="text-align:center" width="20px"><img src="Stack2.png" /></div>
#
# After processing all the inputs the stack contains 25 which is the answer we get above.
# %% tags=[]
serial = tl.Serial(tl.Select([0, 1, 0, 1]), Addition(), Multiplication(), Addition())
# Initialization
x = (np.array([3]), np.array([4])) # input
serial.init(shapes.signature(x)) # initializing serial instance
print("-- Serial Model --")
print(serial, "\n")
print("-- Properties --")
print("name :", serial.name)
print("sublayers :", serial.sublayers)
print("expected inputs :", serial.n_in)
print("promised outputs :", serial.n_out, "\n")
# Inputs
print("-- Inputs --")
print("x :", x, "\n")
# Outputs
y = serial(x)
print("-- Outputs --")
print("y :", y)
# %% [markdown]
# ### Second example of tl.Select
# %% [markdown]
# Suppose we want to make the simple calculation (3 + 4) * 4. We can use `Select` to perform the calculations in the following manner:
#
# 1. `4`
# 2. `3`
# 3. `tl.Select([0,1,0,1])`
# 4. `add`
# 5. `tl.Select([0], n_in=2)`
# 6. `mul`
#
# The example is a bit contrived but it demonstrates the flexibility of the command. The second `tl.Select` pops two elements (specified in n_in) from the stack starting from index 0 (i.e. top of the stack). This means that `7` and `3 ` will be popped out because `n_in = 2`) but only `7` is placed back on top because it only selects `[0]`. As in the previous table each column shows the contents of the stack and the outputs after the operations are carried out.
#
# <div style="text-align:center" width="20px"><img src="Stack3.png" /></div>
#
# After processing all the inputs the stack contains 28 which is the answer we get above.
# %% tags=[]
serial = tl.Serial(
tl.Select([0, 1, 0, 1]), Addition(), tl.Select([0], n_in=2), Multiplication()
)
# Initialization
x = (np.array([3]), np.array([4])) # input
serial.init(shapes.signature(x)) # initializing serial instance
print("-- Serial Model --")
print(serial, "\n")
print("-- Properties --")
print("name :", serial.name)
print("sublayers :", serial.sublayers)
print("expected inputs :", serial.n_in)
print("promised outputs :", serial.n_out, "\n")
# Inputs
print("-- Inputs --")
print("x :", x, "\n")
# Outputs
y = serial(x)
print("-- Outputs --")
print("y :", y)
# %% [markdown]
# **In summary, what Select does in this example is a copy of the inputs in order to be used further along in the stack of operations.**
# %% [markdown]
# ## 3. The tl.Residual combinator in the context of the Serial combinator
# %% [markdown]
# ### tl.Residual
# %% [markdown]
# [Residual networks](https://arxiv.org/pdf/1512.03385.pdf) are frequently used to make deep models easier to train and you will be using it in the assignment as well. Trax already has a built in layer for this. The [Residual layer](https://trax-ml.readthedocs.io/en/latest/trax.layers.html?highlight=residual#trax.layers.combinators.Residual) computes the element-wise *sum* of the *stack-top* input with the output of the layer series. Let's first see how it is used in the code below:
# %% tags=[]
# Let's define a Serial network
serial = tl.Serial(
# Practice using Select again by duplicating the first two inputs
tl.Select([0, 1, 0, 1]),
# Place a Residual layer that skips over the Fn: Addition() layer
tl.Residual(Addition())
)
print("-- Serial Model --")
print(serial, "\n")
print("-- Properties --")
print("name :", serial.name)
print("expected inputs :", serial.n_in)
print("promised outputs :", serial.n_out, "\n")
# %% [markdown]
# Here, we use the Serial combinator to define our model. The inputs first goes through a `Select` layer, followed by a `Residual` layer which passes the `Fn: Addition()` layer as an argument. What this means is the `Residual` layer will take the stack top input at that point and add it to the output of the `Fn: Addition()` layer. You can picture it like the diagram the below, where `x1` and `x2` are the inputs to the model:
# %% [markdown]
# <img src="residual_example_add.png" width="400"/></div>
# %% [markdown]
# Now, let's try running our model with some sample inputs and see the result:
# %%
# Inputs
x1 = np.array([3])
x2 = np.array([4])
print("-- Inputs --")
print("(x1, x2) :", (x1, x2), "\n")
# Outputs
y = serial((x1, x2))
print("-- Outputs --")
print("y :", y)
# %% [markdown]
# As you can see, the `Residual` layer remembers the stack top input (i.e. `3`) and adds it to the result of the `Fn: Addition()` layer (i.e. `3 + 4 = 7`). The output of `Residual(Addition()` is then `3 + 7 = 10` and is pushed onto the stack.
#
# On a different note, you'll notice that the `Select` layer has 4 outputs but the `Fn: Addition()` layer only pops 2 inputs from the stack. This means the duplicate inputs (i.e. the 2 rightmost arrows of the `Select` outputs in the figure above) remain in the stack. This is why you still see it in the output of our simple serial network (i.e. `array([3]), array([4])`). This is useful if you want to use these duplicate inputs in another layer further down the network.
# %% [markdown]
# ### Modifying the network
# %% [markdown]
# To strengthen your understanding, you can modify the network above and examine the outputs you get. For example, you can pass the `Fn: Multiplication()` layer instead in the `Residual` block:
# %%
# model definition
serial = tl.Serial(
tl.Select([0, 1, 0, 1]),
tl.Residual(Multiplication())
)
print("-- Serial Model --")
print(serial, "\n")
print("-- Properties --")
print("name :", serial.name)
print("expected inputs :", serial.n_in)
print("promised outputs :", serial.n_out, "\n")
# %% [markdown]
# This means you'll have a different output that will be added to the stack top input saved by the Residual block. The diagram becomes like this:
# %% [markdown]
# <img src="residual_example_multiply.png" width="400"/></div>
#
# %% [markdown]
# And you'll get `3 + (3 * 4) = 15` as output of the `Residual` block:
# %%
# Inputs
x1 = np.array([3])
x2 = np.array([4])
print("-- Inputs --")
print("(x1, x2) :", (x1, x2), "\n")
# Outputs
y = serial((x1, x2))
print("-- Outputs --")
print("y :", y)
# %% [markdown]
# #### Congratulations! In this lab, we described how stack semantics work with Trax layers such as Select and Residual. You will be using these in the assignment and you can go back to this lab in case you want to review its usage.
# %%
| Natural Language Processing with Attention Models/Week 1 - Neural Machine Translation/C4_W1_Ungraded_Lab_Stack_Semantics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
np.random.seed(42)
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from plotting import plot
# +
dataset = load_iris()
x = dataset.data[:, :2]
y = dataset.target
# -
class KMeans:
def __init__(self, n_clusters: int = 8, max_iter: int = 3_000):
self.n_clusters = n_clusters
self.max_iter = max_iter
self.centroids_ = None
def fit(self, x: np.ndarray):
rand_idxs = np.random.permutation(len(x))[:self.n_clusters]
self.centroids_ = x[rand_idxs]
for _ in range(self.max_iter):
self.assignment = {
i: [] for i in range(self.n_clusters)
}
# Step 1: Assignment
for xi in x:
distance_to_centroids = np.array(
[np.linalg.norm(xi - centroid) for centroid in self.centroids_]
)
closest_centroid_idx = np.argmin(distance_to_centroids)
self.assignment[closest_centroid_idx].append(xi)
# Step 2: Update
for cluster_idx in range(self.n_clusters):
if len(self.assignment[cluster_idx]) > 0:
self.centroids_[cluster_idx] = np.mean(self.assignment[cluster_idx], axis=0)
def predict(self, x: np.ndarray):
y_pred = np.zeros(shape=(len(x),))
for idx, xi in enumerate(x):
distance_to_centroids = np.array(
[np.linalg.norm(xi - centroid) for centroid in self.centroids_]
)
closest_centroid_idx = np.argmin(distance_to_centroids)
y_pred[idx] = closest_centroid_idx
return y_pred
def score(self, x: np.ndarray):
pass
# +
kmeans = KMeans(n_clusters=3, max_iter=1_000)
kmeans.fit(x)
y_pred = kmeans.predict(x).astype(int)
print(kmeans.centroids_)
print(y_pred)
# +
colors = ["red", "green", "blue"]
for idx, point in enumerate(x):
plt.scatter(point[0], point[1], color=colors[y[idx]])
plt.show()
# +
colors = ["red", "green", "blue"]
for idx, point in enumerate(x):
plt.scatter(point[0], point[1], color=colors[y_pred[idx]], marker='o')
for centroid in kmeans.centroids_:
plt.scatter(centroid[0], centroid[1], color="black", marker="*", s=200)
plt.show()
# -
plot(x, y, y_pred, kmeans.centroids_, kmeans)
| Chapter10_Clustering/KMeansImplementation/KMeans_Implementation_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## **Serial Dilution then Agar Spot**
#
# #### Deck Layout:
# 1. TipBox.200uL.Corning-4864.orangebox
# 2. Empty
# 3. Reservoir.12col.Agilent-201256-100.BATSgroup *(media in columns 1 and 2, refill between each run)*
# 4. Empty
# 5. Plate.96.Corning-3635.ClearUVAssay *(source plate, source for each half plate in adjacent columns)*
# 6. Plate.96.Corning-3635.ClearUVAssay *(dilution plate, empty at start)*
# 7. TipBox.50uL.Axygen-EV-50-R-S.tealbox
# 8. AgarPlate.40mL.OmniTray-242811.ColonyPicker
#
# #### Steps:
# **1. Dispense Diluent into Whole Plate** --> dilute_then_spot_STEP1.hso
# * Place media in columns 1 and 2 of 96 deep well or 12 channel reservoir, be sure to refill this media between runs
#
# **2. Complete All Serial Dilutions** --> dilute_then_spot_STEP2.hso
# * Stock for first half of plate comes from column specified in 'stock_start_column' variable below.
# * Stock for second half of plate comes from column ('stock_start_column'+1)
# * Serial Dilutions are 10 fold dilutions over 6 columns. NOTE: First column is pure undiluted stock.
#
# **3. Spot All Dilutions onto Agar Plate** --> dilute_then_spot_STEP3.hso
# * Aspirates 10ul, dispenses 3.5uL onto agar plate at pre-calculated z-height
# +
from liquidhandling import SoloSoft, SoftLinx
from liquidhandling import * # replace with plate types used
#* Program Variables ------------------
stock_start_column = 1
num_plate_halves = 2 #(1 or 2)
spot_z_shift = 1.8
# # OR uncomment this and fill in first 2 variables to have z shift calculated for you - - - - - - - - - - -
# caliper_mm = 9
# measure_assist_mm = 1.6 # this is the height of the blue plastic used to aid in caliper measurement
# actual_plate_height_mm = 14.3
# plate_height_in_db = 15
# well_depth_in_db = 10
# spot_z_shift = round((actual_plate_height_mm - (caliper_mm - measure_assist_mm)) - (plate_height_in_db - well_depth_in_db), 1)
# print(spot_z_shift)
# # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
media_aspirate_column = 1
first_column_transfer_volume = 100
dilution_media_volume = 90
dilution_transfer_volume = 10
stock_mix_volume = 50
dilution_mix_volume = 30 # 50uL tips used
num_mixes = 5
pre_spot_mix_volume = 30
default_z_shift = 2
pre_spot_aspirate_volume = 10
spot_volume = 3.5
#* ---------------------------------------
soloSoft = SoloSoft(
filename="modular_dilute_then_spot_STEP1.hso",
plateList=[
"TipBox.200uL.Corning-4864.orangebox",
"Empty",
"Reservoir.12col.Agilent-201256-100.BATSgroup",
"Empty",
"Plate.96.Corning-3635.ClearUVAssay",
"Plate.96.Corning-3635.ClearUVAssay",
"TipBox.50uL.Axygen-EV-50-R-S.tealbox",
"AgarPlate.40mL.OmniTray-242811.ColonyPicker",
],
)
# * STEP 1: Dispense diluent into whole plate -> MUST CHECK/REFILL BETWEEN CREATING EACH PLATE -------------
# Don't dispense diluent into first column of each half plate -> will be pure stock
soloSoft.getTip()
for i in range(1,num_plate_halves + 1):
print("Dispensing Diluent -> plate half: " + str(i))
for j in range((6*(i-1)+2), (6*(i-1)+7)):
print("\tDispensing Diluent: media_column: " + str(media_aspirate_column) + "-> column: " + str(j))
soloSoft.aspirate(
position="Position3",
aspirate_volumes=Reservoir_12col_Agilent_201256_100_BATSgroup().setColumn(media_aspirate_column, dilution_media_volume),
aspirate_shift=[0, 0, 4],
)
soloSoft.dispense(
position="Position6",
dispense_volumes=Plate_96_Corning_3635_ClearUVAssay().setColumn(j, dilution_media_volume),
dispense_shift=[0, 0, default_z_shift],
)
media_aspirate_column += 1
soloSoft.shuckTip()
soloSoft.savePipeline()
# * STEP2 SERIAL DILUTION FOR BOTH HALVES OF THE PLATE ----------------------------------------------------------------
soloSoft = SoloSoft(
filename="modular_dilute_then_spot_STEP2.hso",
plateList=[
"TipBox.200uL.Corning-4864.orangebox",
"Empty",
"Reservoir.12col.Agilent-201256-100.BATSgroup",
"Empty",
"Plate.96.Corning-3635.ClearUVAssay",
"Plate.96.Corning-3635.ClearUVAssay",
"TipBox.50uL.Axygen-EV-50-R-S.tealbox",
"AgarPlate.40mL.OmniTray-242811.ColonyPicker",
],
)
# FIRST HALF OF THE PLATE
for i in range(1, num_plate_halves + 1):
# set up first column of dilution plate -> pure stock, no dilution (100uL transfer volume)
soloSoft.getTip() # 200uL tips
soloSoft.aspirate(
position="Position5",
aspirate_volumes=Plate_96_Corning_3635_ClearUVAssay().setColumn(
stock_start_column, first_column_transfer_volume
),
aspirate_shift=[0, 0, default_z_shift],
mix_at_start=True,
mix_volume=stock_mix_volume,
mix_cycles=num_mixes,
dispense_height=default_z_shift,
)
soloSoft.dispense(
position="Position6",
dispense_volumes=Plate_96_Corning_3635_ClearUVAssay().setColumn(
(6 * (i - 1)) + 1, first_column_transfer_volume
),
dispense_shift=[0, 0, default_z_shift],
mix_at_finish=True,
mix_volume=dilution_mix_volume,
mix_cycles=num_mixes,
aspirate_height=default_z_shift,
)
print("\nPrepare the first dilution column: ")
print(
"\t From clear UV column ( "
+ str(stock_start_column)
+ " ) to clear dilution UV column ( "
+ str((6 * (i - 1)) + 1)
+ " )"
)
print("Diluting: ")
soloSoft.getTip("Position7") # 50uL tips for 10uL transfers
for j in range(1,6): # 1,2,3,4,5
soloSoft.aspirate(
position="Position6",
aspirate_volumes=Plate_96_Corning_3635_ClearUVAssay().setColumn(
(6 * (i - 1)) + j, dilution_transfer_volume
),
aspirate_shift=[0, 0, default_z_shift],
mix_at_start=True,
mix_cycles=num_mixes,
mix_volume=dilution_mix_volume,
dispense_height=default_z_shift,
)
soloSoft.dispense(
position="Position6",
dispense_volumes=Plate_96_Corning_3635_ClearUVAssay().setColumn(
(6 * (i - 1)) + j + 1, dilution_transfer_volume
),
dispense_shift=[0, 0, default_z_shift],
mix_at_finish=True,
mix_cycles=num_mixes,
mix_volume=dilution_mix_volume,
aspirate_height=default_z_shift,
)
print(
"\t Dilute: From clear UV column ( "
+ str((6 * (i - 1)) + j)
+ " ) to clear UV column ( "
+ str((6 * (i - 1)) + j + 1)
+ " )"
)
stock_start_column += 1 # make sure to draw from the next culture stock column for the next half of the plate.
soloSoft.shuckTip()
soloSoft.savePipeline()
# * STEP 3 SPOT ALL DILUTIONS -----------------------------------------------------------------------------
soloSoft = SoloSoft(
filename="modular_dilute_then_spot_STEP3.hso",
plateList=[
"TipBox.200uL.Corning-4864.orangebox",
"Empty",
"Reservoir.12col.Agilent-201256-100.BATSgroup",
"Empty",
"Plate.96.Corning-3635.ClearUVAssay",
"Plate.96.Corning-3635.ClearUVAssay",
"TipBox.50uL.Axygen-EV-50-R-S.tealbox",
"AgarPlate.40mL.OmniTray-242811.ColonyPicker",
],
)
print("Spotting: ")
for i in range(1, (6*(num_plate_halves))+1):
soloSoft.getTip("Position7")
soloSoft.aspirate( # mix before aspirating the 3.5 uL
position="Position6",
aspirate_volumes=Plate_96_Corning_3635_ClearUVAssay().setColumn(i, pre_spot_aspirate_volume),
aspirate_shift=[0,0,default_z_shift],
mix_at_start=True,
mix_volume=pre_spot_mix_volume,
dispense_height=default_z_shift,
mix_cycles=num_mixes,
)
soloSoft.dispense(
position="Position8",
dispense_volumes=AgarPlate_40mL_OmniTray_242811_ColonyPicker().setColumn(
i, spot_volume
),
dispense_shift=[0, 0, spot_z_shift],
)
print(
"Dilution Plate Column ( "
+ str(i)
+ " ) -> Agar Plate Column ( "
+ str(i)
+ " )"
)
soloSoft.shuckTip()
soloSoft.savePipeline()
# LOAD PROTOCOL STEPS 1-3 IN SOFTLINX
softLinx = SoftLinx("Modular Dilute then Spot Steps 1-3", "modular_dilute_then_spot.slvp")
softLinx.soloSoftRun("C:\\Users\\svcaibio\\Dev\\liquidhandling\\example\\other_protocols\\dilute_then_spot\\modular_dilute_then_spot_STEP1.hso")
softLinx.soloSoftRun("C:\\Users\\svcaibio\\Dev\\liquidhandling\\example\\other_protocols\\dilute_then_spot\\modular_dilute_then_spot_STEP2.hso")
softLinx.soloSoftRun("C:\\Users\\svcaibio\\Dev\\liquidhandling\\example\\other_protocols\\dilute_then_spot\\modular_dilute_then_spot_STEP3.hso")
softLinx.saveProtocol()
# -
| example/other_protocols/dilute_then_spot/modular_dilute_then_spot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import the library
import pandas as pd
titanic = pd.read_csv('./titanic.csv')
titanic.head()
# explore the data to estimate if we have enough (statistically relevant) data for both classes
titanic.groupby('Survived').count()
# We drop clearly irrelevant attributes. Pay attention for bias! Don't let your own opinion play.
titanic = titanic.drop(['PassengerId','Name','Ticket','Fare','Cabin','Embarked'],axis=1)
titanic.head()
# +
print('Before')
print(titanic.count())
print()
# drop all lines that contain empty (null or NaN) values
titanic = titanic.dropna()
print('After')
print(titanic.count())
# -
# see what remains
titanic.groupby('Survived').count()
import numpy as np
titanic['Sex'] = np.where(titanic['Sex']>='male', 1, 2)
titanic.head()
titanic = pd.get_dummies(titanic, columns=["Sex"], prefix=["Sex"])
titanic.head()
from sklearn.model_selection import train_test_split
X = titanic.drop('Survived',axis=1)
y = titanic['Survived']
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.30)
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100)
model.fit(X_train, y_train)
y_test2 = model.predict(X_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_test2)
print(X_train.columns)
print(model.feature_importances_)
# we now combine those two collections into a dataframe
pd.DataFrame(model.feature_importances_,columns=['Importance'],index=X_train.columns).sort_values(by='Importance',ascending=False)
| provisioning/files/Data_analysis/machine_learning/notebook-files/4. Titanic random forest-onehotencoded.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # `pandas` Part 4: Grouping and Sorting
#
# # Learning Objectives
# ## By the end of this tutorial you will be able to:
# 1. Group data with `groupby()`
# 2. Sort data with `sort_values()`
#
#
# ## Files Needed for this lesson: `winemag-data-130k-v2.csv`
# >- Download this csv from Canvas prior to the lesson
#
# ## The general steps to working with pandas:
# 1. import pandas as pd
# 2. Create or load data into a pandas DataFrame or Series
# 3. Reading data with `pd.read_`
# >- Excel files: `pd.read_excel('fileName.xlsx')`
# >- Csv files: `pd.read_csv('fileName.csv')`
# >- Note: if the file you want to read into your notebook is not in the same folder you can do one of two things:
# >>- Move the file you want to read into the same folder/directory as the notebook
# >>- Type out the full path into the read function
# 4. After steps 1-3 you will want to check out your DataFrame
# >- Use `shape` to see how many records and columns are in your DataFrame
# >- Use `head()` to show the first 5-10 records in your DataFrame
# Type-along narration: https://youtu.be/gDDqmK5J5Ak
# # Analytics Project Framework Notes
# ## A complete and thorough analytics project will have 3 main areas
# 1. Descriptive Analytics: tells us what has happened or what is happening.
# >- The focus of this lesson is how to do this in python.
# >- Many companies are at this level but not much more than this
# >- Descriptive statistics (mean, median, mode, frequencies)
# >- Graphical analysis (bar charts, pie charts, histograms, box-plots, etc)
# 2. Predictive Analytics: tells us what is likely to happen next
# >- Less companies are at this level but are slowly getting there
# >- Predictive statistics ("machine learning (ML)" using regression, multi-way frequency analysis, etc)
# >- Graphical analysis (scatter plots with regression lines, decision trees, etc)
# 3. Prescriptive Analytics: tells us what to do based on the analysis
# >- Synthesis and Report writing: executive summaries, data-based decision making
# >- No analysis is complete without a written report with at least an executive summary
# >- Communicate results of analysis to both non-technical and technical audiences
# # Descriptive Analytics Using `pandas`
# # Initial set-up steps
# 1. import modules and check working directory
# 2. Read data in
# 3. Check the data
# # Step 2 Read Data Into a DataFrame with `read_csv()`
# >- file name: `winemag-data-130k-v2.csv`
# >- Set the index to column 0
# ### Check how many rows, columns, and data points are in the `wine_reviews` DataFrame
# >- Use `shape` and indices to define variables
# >- We can store the values for rows and columns in variables if we want to access them later
# ### Check a couple of rows of data
# # Descriptive Analytics with `groupby()`
# >- General syntax: dataFrame.groupby(['fields to group by']).fieldsToanalyze.aggregation
# ### Now, what is/are the question(s) being asked of the data?
# >- All analytics projects start with questions (from you, your boss, some decision maker, etc)
# ### How many wines have been rated at each point value?
# ### How much does the least expensive wine for each point rating cost?
# ### Question: How much does the most expensive wine for each point rating cost?
# ### What is the overall maximum price for all wines?
# ### What is the lowest price for a wine rating of 100?
# ### What is the highest price for a wine rating of 80?
# ### What is the maximum rating for each country?
# ### What is the maximum rating for China?
# ##### Another way to get maximum ratring for China combining `where` and `groupby`
# ### What are some summary stats for price for each country?
# >- Using the `agg()` function for specific summary stats
# >>- What is the sample size?
# >>- What is the minimum?
# >>- What is the maximum?
# >>- What is the mean?
# >>- What is the median?
# >>- What is the standard deviation?
# ## What are the descriptive analytics for country and province?
# >- We can group by multiple fields by adding more to our groupby() function
# ## What are the descriptive price analytics for the US?
# >- Add `get_group()` syntax
# ## What are the summary wine rating stats for Colorado?
# >- Note that states are coded in this dataset under province
# # Sorting Results
# >- Add sort_values() syntax
# >- Default is ascending order
# ## What are the summary stats for points for each country?
# >- Sort the results from lowest to highest mean points
# ### To sort in descending order...
# >- Use ascending = False
| Week 10/Pandas_Part4_Grouping_Student.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
data = pd.read_csv('features_10_sec.csv')
data.head()
dataset = data[data['label'].isin(['blues', 'classical', 'country', 'disco', 'hiphop', 'jazz', 'metal', 'pop', 'reggae', 'rock'])].drop(['filename','length'],axis=1)
dataset.iloc[:, :-15].head()
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn import preprocessing
y = LabelEncoder().fit_transform(dataset.iloc[:,-1])
y.shape
X = StandardScaler().fit_transform(np.array(dataset.iloc[:, :-15], dtype = float))
X.shape
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.20,random_state=42)
print ('Train set:', X_train.shape, y_train.shape)
print ('Test set:', X_test.shape, y_test.shape)
import tensorflow.keras as keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout, BatchNormalization
# defining our regression model
n_cols = dataset.iloc[:, :-15].shape[1]
def regression_model_1():
# structure of our model
model = Sequential()
model.add(Dense(256, activation='relu', input_shape=(n_cols,)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(64, activation='relu',))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(32, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(10,activation='softmax'))
# compile model
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
earlystop = EarlyStopping(patience=10)
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
patience=10,
verbose=1,
)
Callbacks = [earlystop, learning_rate_reduction]
# +
#build the model
model_1 = regression_model_1()
#fit the model
model_1.fit(X_train,y_train, callbacks=Callbacks , validation_data=(X_test,y_test) ,epochs=100,batch_size=150)
# -
model_1.save('Keras_reg_10sec_10.h5')
| Project_ML_STCET/GTZAN_MGD/Keras_reg_10sec_10 .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: TensorFlow-GPU
# language: python
# name: tf-gpu
# ---
# # LSTM Sentiment Analysis With Emoji
#
#
# #
#
# 
#
# #
#
# ### LSTM을 이용해 이모티콘이 포함되어있는 텍스트를 감정분석 합니다
# ####
#
# 
#
# #
import json
import os
from pprint import pprint
from konlpy.tag import Okt
import nltk
import numpy as np
import pandas as pd
import time
# +
def read_data(filename):
with open(filename, 'r') as f:
# tab 별로 자른다
data = [line.split('\t') for line in f.read().splitlines()]
# txt 파일의 헤더(id document label)는 제외하기
data = data[1:]
return data
train_data = read_data('ratings_train.txt')
test_data = read_data('ratings_test.txt')
okt = Okt()
# +
def tokenize(doc):
# 토큰과 근어 사이에 '/'로 구부해줍니다
# norm은 정규화, stem은 근어로 표시하기를 나타냄
return ['/'.join(t) for t in okt.pos(doc, norm=True, stem=True)]
# 매번 반복하지 않기 위해 json파일이 있으면 읽어서 사용
if os.path.isfile('train_docs.json'):
with open('train_docs.json') as f:
train_docs = json.load(f)
with open('test_docs.json') as f:
test_docs = json.load(f)
else:
# row[1]에 리뷰가, row[2]에 부정or긍정이 담겨있음
train_docs = [(tokenize(row[1]), row[2]) for row in train_data]
test_docs = [(tokenize(row[1]), row[2]) for row in test_data]
# JSON 파일로 저장
with open('train_docs.json', 'w', encoding="utf-8") as make_file:
json.dump(train_docs, make_file, ensure_ascii=False, indent="\t")
with open('test_docs.json', 'w', encoding="utf-8") as make_file:
json.dump(test_docs, make_file, ensure_ascii=False, indent="\t")
# -
tokens = [t for d in train_docs for t in d[0]]
# +
text = nltk.Text(tokens, name='NMSC')
# 전체 토큰의 개수
print('전체 토큰의 개수 : {}'.format(len(text.tokens)))
# 중복을 제외한 토큰의 개수
print('중복을 제외한 토큰의 개수 : {}'.format(len(set(text.tokens))))
# +
# 모든 문장을 학습할 순 없으니 가장 많이 등장하는 2500개의 토큰을 사용해서 벡터화
# RAM이 높다면 10000까지 해봅시다
selected_words = [f[0] for f in text.vocab().most_common(3000)]
# selected_words 안에 있는 단어들이 doc안에 있는지 확인해서 반환
# 문서집합에서 단어 토큰을 생성하고 각 단어의 수를 세어 BOW 인코딩한 벡터를 만듭니다
# BOW(Back Of Words)
def term_frequency(doc):
return [doc.count(word) for word in selected_words]
# token_list : 0 or 1(긍정,부정)으로 이루어져있으므로 token_list만 확인(d, _)
# train_docs 안에 있는 toekn중 selected_words에 들어있는 단어만 포함
# train_docs는 2차원 list들([[영화 리뷰], 긍정or부정])로 구성된 3차원 list
# train_x는 0과 1로 이루어진 5천개의 list가 15만개 존재(2차원 list)
train_x = [term_frequency(d) for d, _ in train_docs]
test_x = [term_frequency(d) for d, _ in test_docs]
train_y = [c for _, c in train_docs]
test_y = [c for _, c in test_docs]
# +
# 데이터가 문자열이니 input을 위해 float으로 바꿔줍니다
# 15만개의 데이터가 각각 2500개의 0과 1로 존재
x_train = np.asarray(train_x).astype('float32')
x_test = np.asarray(test_x).astype('float32')
y_train = np.asarray(train_y).astype('float32')
y_test = np.asarray(test_y).astype('float32')
# +
# LSTM은 3차원 리스트만 input으로 받으니 3차원으로 reshape 해줍니다
# [샘플 수, 타임스텝 수, 속성 수]로 구성됩니다
# 타임스텝이란 하나의 샘플에 포함된 시퀀스 개수(여기선 리뷰 글 하나)
# embedding 기능을 사용하면 자동으로 변환되지만 학습이 너무 느려져 직접 변환해줍니다
X_train = np.reshape(x_train, (x_train.shape[0], 1, x_train.shape[1]))
X_test = np.reshape(x_test, (x_test.shape[0], 1, x_train.shape[1]))
# +
# LSTM모델을 이용해 감정도 예측
# EarlyStopping을 이용해 과적합이 발생하면 훈련을 바로 중단시킵니다
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense, LSTM, Embedding
from keras.preprocessing import sequence
from keras.callbacks import EarlyStopping
from keras.models import load_model
model = Sequential()
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# 과적합을 방지하기 위해 과적합이 진행되면 바로 EarlyStopping
early_stopping = EarlyStopping()
model.fit(X_train, y_train, validation_data=(X_test, y_test),
epochs=100, batch_size=25, callbacks=[early_stopping])
scores = model.evaluate(X_test, y_test, verbose=0)
print('\n')
print("정확도: %.2f%%" % (scores[1]*100))
model.save('Sentiment Analysis.h5')
# -
# # 문장에 포함된 이모티콘 감정도 분석
# #
# +
# 문장에서 이모티콘 처리를 해줍니다
emoji = pd.read_csv('Emoji.csv')
# emoji 딕셔너리로 만들어서 {emoji : emoji_sentiment}로 만들기
emoji_list = emoji['Emoji'].tolist()
emoji_neg = emoji['Negative'].tolist()
emoji_pos = emoji['Positive'].tolist()
emoji.head(20)
# +
# 이모티콘 딕셔너리를 만들어 {이모티콘 : 감정도} 형식으로 짝을 맞춰줍니다
emoji_dictionary = {}
for i in range(len(emoji_list)):
# 이모티콘 긍정이 부정보다 높으면 양수
if(emoji_pos[i]-emoji_neg[i]>0):
emoji_sentiment = emoji_pos[i]- emoji_neg[i]
# 이모티콘 부정이 긍정보다 높으면 음수
else:
emoji_sentiment = emoji_pos[i]- emoji_neg[i]
# 각각 scale이 다르므로 소수점자리를 맞춰줍니다
if(abs(emoji_sentiment)>=1000):
emoji_sentiment/=1000000
elif(abs(emoji_sentiment)>=100):
emoji_sentiment/=100000
elif(abs(emoji_sentiment)>=10):
emoji_sentiment/=10000
else:
emoji_sentiment/=1000
# 딕셔너리에 {이모티콘 : 감정도}로 추가
emoji_dictionary[emoji_list[i]] = emoji_sentiment
# +
import pandas as pd
import numpy as np
from keras.models import load_model
def predict_sentiment_with_emoji(word):
try:
# 문장을 token화 시키고
token = tokenize(word)
# 가장많이 등장하는 단어와 토큰과 비교를하고
tf = term_frequency(token)
# 문장을 float형식으로 바꿔줍니다
data = np.expand_dims(np.asarray(tf).astype('float32'), axis=0)
# LSTM에 맞게 3차원 배열로 바꿔줍니다
data = np.reshape(data, (data.shape[0], 1, data.shape[1]))
# 예전에 학습한 LSTM모델을 불러옵니다
score = float(model.predict(data))
# 문장에 이모티콘이 포함되어 있으면 이모티콘의 sentiment만큼 가중치를 줍니다
for emoji in emoji_dictionary:
if emoji in word:
score += emoji_dictionary[emoji]
# 소수 5자리까지만 반환
return round(score, 5)
except:
return 0.5
# +
# DataFrame이 총 4개 (다음뉴스, 인스타, 트위터, 유튜브)
# Keyword 별로 통합
# Title, Contents, Comments 각각 감정분석
# +
from datetime import timedelta,date
import pandas as pd
topics = ['society', 'politics', 'economic', 'foreign', 'culture',
'entertain', 'sports', 'digital']
today = int(date.today().strftime('%Y%m%d'))
yesterday = date.today() - timedelta(1)
yesterday = int(yesterday.strftime('%Y%m%d'))
keywords = pd.read_csv('./Crawled Data/{}/{}_Top10_keyword'.format(today, today))
keywords = keywords['Keyword'].values.tolist()
n=10
total_keyword_ranking10 = [keywords[i:i+n] for i in range(0, len(keywords), 10)]
insta = pd.read_csv('./Crawled Data/{}/{}_instagram_dataframe'.format(today,today))
daum_news = pd.read_csv('./Crawled Data/{}/{}_daum_news_dataframe'.format(today,today))
twitter = pd.read_csv('./Crawled Data/{}/{}_tweet_dataframe'.format(today,today))
youtube = pd.read_csv('./Crawled Data/{}/{}_youtube_dataframe'.format(today,today))
# +
# 빈 dataframe을 만들어줘서 차곡차곡 넣는다
today_dataframe = pd.DataFrame(columns=['Topic', 'Keyword', 'Company', 'Title', 'Contents', 'Comments', 'KC', 'KCC'])
# +
# daum, youtube, insta, twitter 순으로 넣어준다
index = 0
for i in range(80):
today_dataframe.loc[index] = daum_news.loc[i]
index += 1
today_dataframe.loc[index] = youtube.loc[i]
index += 1
today_dataframe.loc[index] = insta.loc[i]
index += 1
today_dataframe.loc[index] = twitter.loc[i]
index += 1
# +
# Keyword Total Ratio 추가
# Total KTR, Topic KTR 따로
Total_KTR_list = []
Topic_KTR_list = []
Topic_KTR_dataframe = []
KTR_list = []
total_count_list = []
Topic_mean_list = []
total_count = 0
# Total KTR을 구하기 위해 평균을 구한다
for i in range(0,320,4):
count = 0
for j in range(4):
count += today_dataframe.iloc[i+j]['KC']
count += today_dataframe.iloc[i+j]['KCC']
total_count_list.append(count)
total_count+= count
mean_ratio = total_count/80
# 평균대비 Keyword Total Ratio
for i in range(80):
total_KTR = round((total_count_list[i]/mean_ratio),2)
Total_KTR_list.append(int(total_KTR*100))
for j in range(4):
KTR = round((total_count_list[i]/mean_ratio),2)
KTR_list.append(str(int(KTR * 100))+'%')
# Topic별로 KTR을 구하기 위해 Topic별로 평균을 구한다
for i in range(8):
count=0
for j in range(10):
count+=total_count_list[10*i+j]
Topic_mean_list.append(count/10)
for i in range(8):
for j in range(10):
topic_ktr = round((total_count_list[10*i+j] / Topic_mean_list[i]),2)
Topic_KTR_list.append(int(topic_ktr*100))
# +
# Keyword Total Sentiment 추가
sentiment_list = []
start_time = time.time()
for i in range(320):
title = str(today_dataframe.iloc[i]['Title'])
content = str(today_dataframe.iloc[i]['Contents'])
comment = str(today_dataframe.iloc[i]['Comments'])
title_content_sentiment = predict_sentiment_with_emoji(title+content)
comment_sentiment = predict_sentiment_with_emoji(comment)
sentiment = round((title_content_sentiment + comment_sentiment)/2,2)
sentiment_list.append(int(sentiment*100))
print("걸린시간 : {}분".format(round((time.time() - start_time)/60, 1)))
# +
# Keyword Total Sentiment
# Daum News 50%, Youtube 35%, Instagram 5%, Twitter 15%의 가중치
today_dataframe_KTS = []
Total_KTS_list = []
for i in range(0,320,4):
KTS = 0
for j in range(4):
if(j % 4==0):
KTS += 0.5 * sentiment_list[i+j]
elif(j % 4==1):
KTS += 0.35 * sentiment_list[i+j]
elif(j % 4==2):
KTS += 0.05 * sentiment_list[i+j]
else:
KTS += 0.15 * sentiment_list[i+j]
Total_KTS_list.append(int(KTS))
# +
topic_list = daum_news['Topic'].values.tolist()
keyword_list = daum_news['Keyword'].values.tolist()
today_KTR_KTS = pd.DataFrame({'Topic':topic_list,
'Keyword':keyword_list,
'Total_KTR':Total_KTR_list,
'Topic_KTR':Topic_KTR_list,
'KTS':Total_KTS_list})
today_KTR_KTS.to_csv('./Crawled Data/{}/{}_KTR_KTS_dataframe'.format(today, today), index=False)
# +
# 연관검색어 검색
keywords = pd.read_csv('./Crawled Data/{}/{}_Top10_keyword'.format(today, today))
keyword_list = keywords['Keyword'].values.tolist()
top5_topic = []
# pytrend는 keyword를 str이 아니라 list로 받으므로
# top5 topic을 모두 2차원 list로 설정
for i in range(8):
for j in range(10):
top5_topic.append([keyword_list[10*i+j]])
# top5 topic의 연관검색어 추출
from pytrends.request import TrendReq
top3_related_keyword = []
top3_related_value = []
pytrends = TrendReq(hl='ko')
for i in range(len(top5_topic)):
# 시간설정은 지난 하루동안, 지역은 한국설정
# 'rising'부분이 급상승 키워드 부분입니다(가중치도 함께 출력)
try:
pytrends.build_payload(top5_topic[i], geo = 'KR', timeframe='now 1-d')
queries = pytrends.related_queries()
dataframe = queries[top5_topic[i][0]]['rising']
top3_related_keyword.append(dataframe['query'].values.tolist()[0:3])
top3_related_value.append(dataframe['value'].values.tolist()[0:3])
except:
top3_related_keyword.append('[없음]')
top3_related_value.append('[없음]')
for i in range(80):
if top3_related_keyword[i] == '[없음]':
top3_related_keyword[i] = '#없음'
else:
for j in range(len(top3_related_keyword[i])):
top3_related_keyword[i][j] = '#'+top3_related_keyword[i][j]
if (" ") in top3_related_keyword[i][j]:
top3_related_keyword[i][j] = top3_related_keyword[i][j].replace(" ", "")
# -
for i in range(len(top3_related_keyword)):
if(type(top3_related_keyword[i]) == list):
top3_related_keyword[i] = (' ').join(top3_related_keyword[i])
top3_related_keyword
# +
# today_KTR_KTS dataframe
topic_list = daum_news['Topic'].values.tolist()
keyword_list = daum_news['Keyword'].values.tolist()
today_KTR_KTS = pd.DataFrame({'Topic':topic_list,
'Keyword':keyword_list,
'Total_KTR':Total_KTR_list,
'Topic_KTR':Topic_KTR_list,
'KTS':Total_KTS_list,
'Related_Keywords':top3_related_keyword})
today_KTR_KTS.to_csv('./Crawled Data/{}/{}_KTR_KTS_dataframe'.format(today, today), index=False)
# +
# 어제와 오늘 중복된 Keyword가 있으면 변화량을 측정하기 위해
# 어제의 csv파일을 불러온다
yesterday_KTR_KTS = pd.read_csv('./Crawled Data/{}/{}_KTR_KTS_dataframe'.format(yesterday, yesterday))
# +
compare_Total_KTR_list = Total_KTR_list
compare_KTS_list = Total_KTS_list
for i in range(8):
for j in range(10):
keyword = today_KTR_KTS.iloc[10*i+j]['Keyword']
for k in range(10):
if keyword == yesterday_KTR_KTS.iloc[10*i+k]['Keyword']:
Total_KTR_change = int(today_KTR_KTS.iloc[10*i+j]['Total_KTR']) - int(yesterday_KTR_KTS.iloc[10*i+k]['Total_KTR'])
KTR_change = str(abs(Total_KTR_change))
if(Total_KTR_change < 0):
compare_Total_KTR_list[10*i+j] = (str(today_KTR_KTS.iloc[10*i+j]['Total_KTR'])+'% ('+KTR_change+'🔻)')
else:
compare_Total_KTR_list[10*i+j] = (str(today_KTR_KTS.iloc[10*i+j]['Total_KTR'])+'% ('+KTR_change+'🔺)')
Total_KTS_change = int(today_KTR_KTS.iloc[10*i+j]['KTS']) - int(yesterday_KTR_KTS.iloc[10*i+k]['KTS'])
KTS_change = str(abs(Total_KTS_change))
if(Total_KTS_change < 0):
compare_KTS_list[10*i+j] = (str(today_KTR_KTS.iloc[10*i+j]['KTS'])+'% ('+KTS_change+'🔻)')
else:
compare_KTS_list[10*i+j] = (str(today_KTR_KTS.iloc[10*i+j]['KTS'])+'% ('+KTS_change+'🔻)')
# %를 붙여줍니다
for i in range(80):
if ('%' not in str(compare_Total_KTR_list[i])):
compare_Total_KTR_list[i] = str(compare_Total_KTR_list[i]) + '%'
if ('%' not in str(compare_KTS_list[i])):
compare_KTS_list[i] = str(compare_KTS_list[i]) + '%'
today_KTR_KTS['Total_KTR_change'] = compare_Total_KTR_list
today_KTR_KTS['KTS_change'] = compare_KTS_list
# -
today_KTR_KTS
today_KTR_KTS.to_csv('./Crawled Data/{}/{}_KTR_KTS_dataframe'.format(today, today), index=False)
| Test/LSTM Sentiment Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Norms, conditions numbers and Eigensystems
#
# In linear-algebra calculations, we are sometimes very unfortunate and have to solve a problem like $Ax = b$ (given fixed $A$), where small changes in $b$ produce extremely large changes in $x$. Such problems are said to be **ill-conditioned**.
#
# This notebook explores this phenomenon. Along the way we will have to calculate condition numbers and eigenvalues.
# ## Preliminaries
#
# Let's load numpy as usual, and the linear algebra package from numpy as we will find some functions in it useful. We also use the `GaussianElimination()` from one of the other notebooks and define the $L_2$-norm
import numpy as np
from numpy import linalg as la
def GaussianElimination(A, b):
n = A.shape[1]
# Append the vector b as a column to the matrix A
A1 = np.c_[A,b]
i = 0
while i < n - 1:
j = i+1
while j < n:
A1[j, i+1:] = A1[j, i+1:] - A1[i, i+1:]*A1[j,i]/A1[i,i]
j += 1
i += 1
x = np.zeros(n)
i = n-1
while i >= 0:
j = i
x[i] = A1[i,n]
while j < n-1:
x[i] -= A1[i,j+1]*x[j+1]
j += 1
x[i] = x[i]/A1[i,i]
i -= 1
return x
def MatrixInverseViaGaussianElimination(A):
n = A.shape[1]
A1 = np.hstack((A,np.identity(n)))
i = 0
while i < n:
j = 0
while j < n:
if(j == i):
j += 1
continue
A1[j] = (A1[j] - A1[i]*A1[j,i]/A1[i,i])
A1[j] = A1[j]/A1[j,j]
j += 1
i += 1
return A1[:,n:2*n]
def L2Norm(v):
return la.norm(v,2)
# # Norms
#
# The $L^2$-norm of a matrix $A$ is formally defined by
#
# $$\lVert A \rVert_2 = \sup_{x\neq0}\frac{\lVert Ax \rVert_2}{\lVert x \rVert} $$
#
# For practical calculation, this is not a particularly useful definition. We derived a more useful formula:
#
# $$ \lVert A \rVert_2 = \sqrt{\lambda_\text{max}} $$
#
# where $\lambda_\text{max}$ is the maximum eigenvector of $A A^T$.
#
# Let's check that NumPy's definitions agree with these formula.
# +
A = np.random.rand(6,6)
lmax = np.max(la.eig(A@A.T)[0])
la.norm(A,2) - np.sqrt(lmax)
# -
# Note that by default NumPy's `la.norm()` function computes the *Frobenius* norm for matrices. If you want the $L^2$-norm you need to call it as `la.norm(A,2)`, as above.
# ## Ill-condition system - example case
#
# Let's look at an example where the matrix $A$ is ill-conditioned
# +
A = np.array([[1.002,1],[1,0.998]])
b = np.array([2.002,1.998])
x = GaussianElimination(A,b)
print(x)
# -
# Slightly perturbing $b$ causes a huge change in the value of $x$
# +
bp = np.array([2.0021,1.998])
xp = GaussianElimination(A,bp)
print(xp)
# -
print("Change in b = %.4f%%" % (100*L2Norm(bp-b)/L2Norm(b)))
print("Change in x = %.2f%%" % (100*L2Norm(xp-x)/L2Norm(x)))
# ## Condition number
#
# The condition number of a matrix $A$ is defined as
#
# $$ \kappa(A) = \lVert A \rVert_2 \lVert A^{-1} \rVert_2$$
#
# We learned in the lectures that another practical way to compute this is via
#
# $$\kappa(A) = \sqrt{\frac{\lambda_\text{max}}{\lambda_\text{min}}} $$
#
# where $\lambda_\text{min/max}$ is the maximum eigenvalue of $A A^T$.
#
# Let's use these two methods along with NumPy's built in method `la.cond()`
A = np.random.rand(6,6)
eigenvalues = la.eig(A.T@A)[0]
lmax = np.max(eigenvalues)
lmin = np.min(eigenvalues)
print("Condition number computed via norm definition:\t", la.norm(A,2)*la.norm(la.inv(A),2))
print("Condition number comuted via eigenvalue formula:", np.sqrt(lmax/lmin))
print("Condition number comuted via la.cond(A,2):\t", la.cond(A,2))
| LinearAlgebraAndMatrices/NormsAndConditionsNumbers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.1
# language: julia
# name: julia-1.3
# ---
import Pkg
#Pkg.add("MathOptInterface")
#Pkg.add("GLPK")
Pkg.activate(@__DIR__)
Pkg.instantiate()
# +
using JuMP
using MathOptInterface # Replaces MathProgBase
# shortcuts
const MOI = MathOptInterface
const MOIU = MathOptInterface.Utilities
using GLPK # Loading the GLPK module for using its solver
# -
include("../src/read_data.jl")
include("../JuMP/main_JuMP.jl")
JJ=[j for j in 1:J]
PP=[p for p in 1:P]
CC=[c for c in 1:C]
TT=[t for t in 0:H-1]
QQ=[q for q in 1:Q]
model = Model(with_optimizer(GLPK.Optimizer, msg_lev = 4));
# Old syntax: model = Model(solver=GLPKSolverLP(msg_lev = 4)))
# +
@variable(model, x[p=1:P, i=1:C, q=1:Q], Bin)
@variable(model, tc[p=1:P, q=1:Q], lower_bound=0, upper_bound=H)
@variable(model, Start[q=1:Q], lower_bound=0, upper_bound=H)
@variable(model, End[q=1:Q], lower_bound=0, upper_bound=H)
#objective function
@objective(model, Min, sum(End[q] for q in QQ))
# -
@constraint(model, [i=1:C], sum(sum(x[p,i,q] for q in subset_crane_pos(CTS, p, bj)) for p in subset_pos(PP, tasks_by_position, i)) == 1)
@constraint(model, [p=1:P], sum(sum(x[p,i,q] for q in subset_crane_pos(CTS, p, bj)) for i in subset_pos(CC, tasks_by_position, p)) == 1)
for i = 1:C
for p = 1:P
for q in setdiff(Set(QQ),Set(subset_crane_pos(CTS, p, bj)))
@constraint(model, x[p,i,q] == 0)
end
end
end
for p = 1:P
for q in subset_crane_pos(CTS, p, bj)
@constraint(model, tc[p,q] <= CTS.H*sum(x[p,i,q] for i in subset_pos(CC, tasks_by_position, p)))
@constraint(model, tc[p,q] >= 2*sum((task_times[p,i]*x[p,i,q]) for i in subset_pos(CC, tasks_by_position, p)))
end
end
for q = 1:Q
for p = 1:P
@constraint(model, tc[p,q]-2*sum((task_times[p,i]*x[p,i,q]) for i in CC)+CTS.H*(1-sum(x[p,i,q] for i in subset_pos(CC, tasks_by_position, p))) >= Start[q])
end
end
for i = 1:C
for p in subset_pos(PP, tasks_by_position, i)
if length(prec[p]) > 0
for q = 1:Q
for pp in collect(intersect(Set(subset_pos_crane(CTS, q, bj)),Set(prec[p])))
@constraint(model, 2*task_times[p,i] - CTS.H*(2-x[p,i,q]-sum(x[pp,ii,q] for ii in subset_pos(CC, tasks_by_position, pp))) <= tc[p,q] - tc[pp,q])
end
end
end
end
end
#potencialment malament el fet de que la q sigui redundant --> restriccions extra
for p = 1:P
if length(prec[p]) > 0
for ppp in sort(prec[p], rev=true)[1]
@constraint(model, sum(tc[p,q] for q in QQ) >= sum(tc[ppp,q] for q in QQ) - sum(task_times[ppp,i]*sum(x[ppp,i,q] for q in QQ) for i in subset_pos(CC, tasks_by_position, ppp)))
#@constraint(model, sum(tc[p,q] for q in QQ) >= sum(tc[ppp,q] for q in QQ))
end
end
end
@constraint(model, [p=1:P, q=1:Q], tc[p,q] <= End[q])
@constraint(model, [q=1:Q], Start[q] <= End[q])
JuMP.optimize!(model) # Old syntax: status = JuMP.solve(model)
# +
sol_x = Dict{Int, Array}()
for p=1:P
for i=1:C
for q=1:Q
if JuMP.value.(x)[p,i,q] == 1
if haskey(sol_x, q) == false
sol_x[q] = Array{NamedTuple{(:pos, :cont),Tuple{Int64,Int64}}, 1}()
end
push!(sol_x[q], (pos=p, cont=i))
end
end
end
end
sol_x[1]
# -
sol_x[2]
sol_t = JuMP.value.(tc)
| notebooks/old/Math_Model_v1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise 2: Loading the Case Study Data in a Jupyter Notebook
# # First Jupyter notebook
# Welcome to your first jupyter notebook! The first thing to know about Jupyter notebooks is that there are two kinds of cells. This is a markdown cell.
#
# There are a lot of different ways to mark up the text in markdown cells, including __bold__ and *italics*.
#
# The next one will be a `code` cell.
import pandas as pd
# Load data
df = pd.read_excel(
'../../Data/default_of_credit_card_clients__courseware_version_1_21_19.xls')
df.shape
a = 5
a
| Chapter01/Exercise02/Exercise02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pyhf
import pandas
import numpy as np
import altair as alt
# # Visualization with Altair
#
#
# [Altair](https://altair-viz.github.io/) is a python API for generating [Vega](https://vega.github.io/) visuazliation specifications. We demonstracte how to use this to build an interactive chart of pyhf results.
# ## Preparing the data
#
# Altair reads the data as a pandas dataframe, so we create one.
model = pyhf.simplemodels.hepdata_like([7], [20], [5])
data = [25] + model.config.auxdata
muscan = np.linspace(0, 5, 31)
results = [
pyhf.infer.hypotest(mu, data, model, return_expected_set=True) for mu in muscan
]
data = np.concatenate(
[
muscan.reshape(-1, 1),
np.asarray([r[0] for r in results]).reshape(-1, 1),
np.asarray([r[1] for r in results]).reshape(-1, 5),
],
axis=1,
)
df = pandas.DataFrame(data, columns=["mu", "obs"] + [f"exp_{i}" for i in range(5)])
df.head()
# ## Defining the Chart
#
# We need to filled areas for the 1,2 sigma bands and two lines for the expected and observed CLs value. For interactivity we add a hovering label of the observed result
# +
band1 = (
alt.Chart(df)
.mark_area(opacity=0.5, color="green")
.encode(x="mu", y="exp_1", y2="exp_3")
)
band2 = (
alt.Chart(df)
.mark_area(opacity=0.5, color="yellow")
.encode(x="mu", y="exp_0", y2="exp_4")
)
line1 = alt.Chart(df).mark_line(color="black").encode(x="mu", y="obs")
line2 = (
alt.Chart(df).mark_line(color="black", strokeDash=[5, 5]).encode(x="mu", y="exp_2")
)
nearest = alt.selection_single(
nearest=True, on="mouseover", fields=["mu"], empty="none"
)
point = (
alt.Chart(df)
.mark_point(color="black")
.encode(x="mu", y="obs", opacity=alt.condition(nearest, alt.value(1), alt.value(0)))
.add_selection(nearest)
)
text = line1.mark_text(align="left", dx=5, dy=-5).encode(
text=alt.condition(nearest, "obs", alt.value(" "))
)
band2 + band1 + line1 + line2 + point + text
| docs/examples/notebooks/altair.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#importing matplotlib
import matplotlib.pyplot as plt
x = range(10)
x
# +
#we can also create two lists
x1 = [2,6,8,10]
y1 = [3,7,12,20]
x2 = [12,16,18]
y2 = [3,12,12]
# -
plt.xlabel("Distance")
plt.ylabel("time")
plt.grid(c = "green") #to display grid
plt.plot(x1,y1, label ='cars')
plt.plot(x2,y2,label = 'bike')
plt.legend() # to display labels
# ## Bar plots
plt.xlabel("time")
plt.ylabel("price")
plt.bar(x1,y1,label='apple')
plt.bar(x2,y2,label='microsoft')
plt.plot(x1,y1,label='amazon', c='green')
plt.grid(c = 'red')
plt.legend()
# ## Scatter / dots plot
plt.scatter(x1,y1, marker='*', s=100)
plt.scatter(x2,y2, marker='^', s=100)
plt.plot(x1,y1)
plt.grid(c='y')
import numpy as np
x = np.array([3,6,8,11,12,22,24])
x
y = x**2
y
plt.scatter(x,y)
plt.bar(x,y)
| dataVisuals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="bxPeVnpksepA" outputId="f82b5e9a-b88f-48d4-a852-411c20658e3c"
from core import *
import torch
import torchvision
import torch.utils.data as Data
import numpy as np
from torch.autograd import Variable
import random
import math
import itertools
from bandits import *
# + colab={"base_uri": "https://localhost:8080/", "height": 70} colab_type="code" id="appsi0oRMy5K" outputId="b628b0c3-72c2-4ef7-a6d2-42b6213a958a"
use_cuda = torch.cuda.is_available()
# hypers that do not need to be tuned
N_Steps = 4000 # in actual training, we use 600
# Load the UCI Mushroom Dataset: 8124 datapoints, each with 22 categorical
# features and one label - edible/poisonous. The features are transformed to a
# one-hot encoding.
# The missing values (marked with ?) are treated as a different class for now.
mushroom_dataset = pd.read_csv('mushrooms.csv')
train_labels = mushroom_dataset['class']
train_labels = train_labels.replace(['p', 'e'],
[POISONOUS_CONSTANT, EDIBLE_CONSTANT])
train_features = pd.get_dummies(mushroom_dataset.drop(['class'], axis=1))
train_features = torch.tensor(train_features.values, dtype=torch.float)
train_labels = torch.tensor(train_labels.values)
trainset = torch.utils.data.TensorDataset(train_features, train_labels)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=1,
shuffle=True, num_workers=1)
# TODO: cannot specify the hyper for priors outside the network
LearningRate_candidates = [1e-4, 1e-3]
Epsilon_candidates = [0.1, 0.01, 0.001]
hyper_val_error_dict = {}
if __name__ == '__main__':
# could may have more
hyper_list = itertools.product(LearningRate_candidates, Epsilon_candidates)
for LearningRate, epsilon in hyper_list:
print("*"*50)
print("Learning rate: {}".format(LearningRate))
print("Epsilon: {}".format(epsilon))
# Initialize network
optimizer_constructor = torch.optim.Adam
optimizer_params = {'lr': LearningRate, 'eps': epsilon}
eg_agent = EGreedyNNAgent(epsilon=.05,
optimizer_constructor=optimizer_constructor,
optim_params=optimizer_params)
eg_env = Environment(eg_agent, trainloader)
loss = []
regret = []
for i_step in range(N_Steps):
# Training
loss.append(eg_env.play_round())
regret.append(eg_env.cumulative_regret)
if (i_step + 1) % 100 == 0:
print('Step {}. Regret {}'.format(i_step, eg_env.cumulative_regret))
plt.plot(np.array(loss))
plt.ylabel('Loss')
plt.show()
plt.plot(np.array(regret))
plt.ylabel('Cumulative Regret')
plt.show()
# + colab={} colab_type="code" id="xsYlKmc8sV5E"
| src/bandits/bayes_validation_bandits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:ComPhot]
# language: python
# name: conda-env-ComPhot-py
# ---
# ## Task 3: Gamma Correction(10)
# >The demosaicked image will still be linear, apply a gamma correction to convert it to a sRGB
# image.
# +
import numpy as np
from skimage import io, util
naive_image_name = "naive_linear.png"
gradient_image_name = "gradient_linear.png"
gamma_value = 2.2
# Loading a RAW image
img_naive = util.img_as_float(io.imread("./"+naive_image_name))
img_gradient = util.img_as_float(io.imread("./"+gradient_image_name))
# -
# ### Gamma correction
# +
###############################################################################
# Gamma correction
###############################################################################
def gamma_correction(pixel, gamma_value):
return ((pixel)**(1.0/gamma_value))
vec_gamma = np.vectorize(gamma_correction)
result_naive = vec_gamma(img_naive, gamma_value)
result_naive = np.clip(result_naive, 0.0, 1.0)
result_gradient = vec_gamma(img_gradient, gamma_value)
result_gradient = np.clip(result_gradient, 0.0, 1.0)
###############################################################################
# Saving gamma-corrected image
io.imsave("./gamma_correctd_"+str(gamma_value)+"_"+naive_image_name, util.img_as_ubyte(result_naive))
io.imsave("./gamma_correctd_"+str(gamma_value)+"_"+gradient_image_name, util.img_as_ubyte(result_gradient))
###############################################################################
| task03/.ipynb_checkpoints/Task03-checkpoint.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .coco
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Coconut
# language: coconut
# name: coconut
# ---
# # Advent of Code 2021
#
# This solution (Jupyter notebook; coconut 1.5.0 on python 3.7.11) by kannix68, @ 2021-12. \
# Using anaconda distro, conda v4.10.3, and coconut language. installation on MacOS v10.14.6 "Mojave".
#
# Reddit Advent of Code [solution_megathreads - adventofcode](https://www.reddit.com/r/adventofcode/wiki/solution_megathreads#wiki_december_2021)
# +
import copy
import itertools
import logging
import re
import sys
import time
from collections import defaultdict
import numpy as np
import pandas as pd
import pylib.aochelper as aoc
#from pylib.aochelper import map_list as mapl
#from pylib.aochelper import filter_list as filterl
f"Python version: {sys.version}" |> print
f"Version info: {sys.version_info}" |> print
log = aoc.getLogger(__name__)
f"Initial log-level={aoc.getLogLevelName(log.getEffectiveLevel())}." |> print
# -
# ## Problem domain code
# ### Day 1: Sonar Sweep
# +
"Day 1" |> print
tests = """
199
200
208
210
200
207
240
269
260
263""".strip()
# -
def solve_d01pt1(inp):
"""Solve Day 1 part 1."""
inp = inp |> .split() |> map$(int)
outp = pd.Series(inp).diff()[1:].astype(int).tolist()
outp = outp |> filter$(it -> it > 0)
return outp |> list |> len
expected = 7
result = solve_d01pt1(tests)
aoc.assert_msg("test solve day 1 part 1", result == expected)
ins = aoc.read_file_to_str("./in/day01.in")
out = solve_d01pt1(ins)
f"day 1 part 1 output: {out}" |> print
"Day 1 part 2" |> print
def solve_d01pt2(inp):
"""Solve Day 1 part 2."""
inp = inp |> .split() |> map$(int)
outp = pd.Series(inp).rolling(3).sum().diff()[3:].astype(int).tolist()
return outp |> filter$(-> _ > 0) |> list |> len
expected = 5
result = solve_d01pt2(tests)
aoc.assert_msg("test solve day 1 part 2", result == expected)
out = solve_d01pt2(ins)
f"day 1 part 2 output: {out}" |> print
# ## Day 2: Dive!
"Day 2" |> print
# +
def iterate(pos, cmd):
HPOS, DEPTH = 0, 1
direct, val = cmd.split(" ")
val = int(val)
pos = pos.copy()
case direct:
match "forward":
pos[HPOS] += val
match "down":
pos[DEPTH] += val
match "up":
pos[DEPTH] -= val
return pos
def solve_d02pt1(inp):
pos = [0, 0]
for cmd in inp.splitlines():
pos = iterate(pos, cmd)
return pos |> reduce$(*)
tests = """
forward 5
down 5
forward 8
up 3
down 8
forward 2""".strip()
expected = 150
result = solve_d02pt1(tests)
aoc.assert_msg("test solve day 1 part 1", result == expected)
# -
ins = aoc.read_file_to_str("./in/day02.in")
out = solve_d02pt1(ins)
f"day 2 part 1 output: {out}" |> print
"Day 2 part 2" |> print
# +
def iterate2(pos, cmd):
HPOS, DEPTH, AIM = 0, 1, 2
direct, val = cmd.split(" ")
val = int(val)
pos = pos.copy()
case direct:
match "forward":
pos[HPOS] += val
pos[DEPTH] += pos[AIM] * val
match "down":
pos[AIM] += val
match "up":
pos[AIM] -= val
return pos
def solve_d02pt2(inp):
pos = [0, 0, 0]
for cmd in inp.splitlines():
pos = iterate2(pos, cmd)
return pos[0:2] |> reduce$(*)
expected = 900
result = solve_d02pt2(tests)
aoc.assert_msg("test solve day 1 part 2", result == expected)
# -
# `ins` remains the same
out = solve_d02pt2(ins)
f"day 2 part 2 output: {out}" |> print
# ## Day 3: Binary Diagnostic
"Day 3" |> print
# +
tests = """
00100
11110
10110
10111
10101
01111
00111
11100
10000
11001
00010
01010
""".strip()
def T(lst):
"""Transpose a 2d list."""
return [list(i) for i in zip(*lst)] # transpose
def bitlist2int(bitlist):
return int(str.join('', map(str, bitlist)), 2)
def solve_d03pt1(ins):
lst = ins.splitlines()
for idx in range(len(lst)):
lst[idx] = map(int, list(lst[idx])) |> list
l = len(lst)
log.debug(lst)
in_t = T(lst)
log.debug(in_t)
blst = []
cblst = []
for col in in_t:
c0, c1 = col.count(0), l - col.count(0)
if c0 > c1:
i = 0
c = 1
else:
i = 1
c = 0
blst.append(i)
cblst.append(c)
log.debug(blst)
bnum = bitlist2int(blst)
cnum = bitlist2int(cblst)
log.debug([bnum, cnum])
return bnum * cnum
expected = 198
log.setLevel(logging.INFO)
result = solve_d03pt1(tests)
aoc.assert_msg("test solve day 3 part 1", result == expected)
# -
ins = aoc.read_file_to_str("./in/day03.in")
out = solve_d03pt1(ins)
f"day 3 part 1 output: {out}" |> print
"Day 3 part 2" |> print
# +
def solve_d03pt2(ins):
lst = ins.splitlines()
for idx in range(len(lst)):
lst[idx] = map(int, list(lst[idx])) |> list
elemlen = len(lst[0])
lstlen = len(lst)
tlst = T(lst)
log.debug(lst)
log.info(f"elemlen={elemlen}, lstlen={lstlen}")
olst = copy.deepcopy(lst)
for i in range(elemlen):
sm1 = tlst[i].count(1)
sm0 = lstlen - sm1
if sm1 >= sm0:
trg = 1
else:
trg = 0
log.debug(f"oxy iter-#{i} lstlen={lstlen} trg={trg}")
lst = lst |> filter$(-> _[i] == trg) |> list
tlst = T(lst)
lstlen = len(lst)
if lstlen <= 1:
oxyrat_lst = lst[0]
oxyrat = bitlist2int(oxyrat_lst)
log.debug(f"oxyrat={oxyrat_lst} => {oxyrat}")
break
lst = olst
tlst = T(olst)
lstlen = len(lst)
for i in range(elemlen):
sm1 = tlst[i].count(1)
sm0 = lstlen - sm1
if sm1 >= sm0:
trg = 0
else:
trg = 1
log.debug(f"co2 iter-#{i} lstlen={lstlen} trg={trg}")
lst = lst |> filter$(-> _[i] == trg) |> list
tlst = T(lst)
lstlen = len(lst)
if lstlen <= 1:
co2rat_lst = lst[0]
co2rat = bitlist2int(co2rat_lst)
log.debug(f"co2rat={co2rat_lst} => {co2rat}")
break
return oxyrat * co2rat
expected = 230
log.setLevel(logging.INFO)
result = solve_d03pt2(tests)
aoc.assert_msg("test solve day 3 part 2", result == expected)
# -
out = solve_d03pt2(ins)
f"day 3 part 2 output: {out}" |> print
# ## Day 4: Giant Squid
print("Day 4, bingo!")
tests = """
7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7
""".strip()
"Day 4 TBD, see python solution" |> print
# ## Day 5: Hydrothermal Venture
"Day 5" |> print
tests = """
0,9 -> 5,9
8,0 -> 0,8
9,4 -> 3,4
2,2 -> 2,1
7,0 -> 7,4
6,4 -> 2,0
0,9 -> 2,9
3,4 -> 1,4
0,0 -> 8,8
5,5 -> 8,2
""".strip()
# +
def solve_d05pt1(ins):
lines = ins.splitlines() |> map$(-> _.split(" -> ")) |> list
for lnidx in range(len(lines)):
for ptidx in range(2):
lines[lnidx][ptidx] = lines[lnidx][ptidx].split(",") |> map$(int) |> list
log.trace(lines)
grid = defaultdict(int)
for line in lines:
ptst, pten = line
if ptst[0] == pten[0]: # horiz
log.trace(f"horiz line {line}")
x = ptst[0]
for y in range(min(ptst[1], pten[1]), max(ptst[1], pten[1])+1):
grid[tuple([x,y])] += 1
elif ptst[1] == pten[1]: # vert
log.trace(f"vert line {line}")
y = ptst[1]
for x in range(min(ptst[0], pten[0]), max(ptst[0], pten[0])+1):
grid[tuple([x,y])] += 1
else:
pass
log.trace(grid)
intersects = grid.values() |> filter$(-> _ > 1) |> list |> len
log.debug(f"intersects-#={intersects}")
return intersects
expected = 5
log.setLevel(logging.INFO)
result = solve_d05pt1(tests)
aoc.assert_msg("test solve day 5 part 1", result == expected)
# -
ins = aoc.read_file_to_str("./in/day05.in")
out = solve_d05pt1(ins)
f"day 5 part 1 output: {out}" |> print
"Day 5 part 2" |> print
# +
def scan_grid(lines):
grid = defaultdict(int)
for line in lines:
ptst, pten = line
if ptst[0] == pten[0] and ptst[1] == pten[1]: # point
grid[tuple(ptst)] += 1
elif ptst[0] == pten[0]: # horiz
log.trace(f"horiz line {line}")
x = ptst[0]
for y in range(min(ptst[1], pten[1]), max(ptst[1], pten[1])+1):
grid[tuple([x,y])] += 1
elif ptst[1] == pten[1]: # vert
log.trace(f"vert line {line}")
y = ptst[1]
for x in range(min(ptst[0], pten[0]), max(ptst[0], pten[0])+1):
grid[tuple([x,y])] += 1
else: # diagonal
line = sorted(line, key=lambda it: it[0])
ptst, pten = line
if ptst[1] > pten[1]:
ystp = -1
else:
ystp = 1
log.trace(f"diag line {line}")
y = ptst[1]
for x in range(ptst[0], pten[0]+1):
log.trace(f"set diag line-pt {[x,y]}")
grid[tuple([x,y])] += 1
y += ystp
log.trace(grid)
return grid
def solve_d05pt2(ins):
lines = ins.splitlines() |> map$(-> _.split(" -> ")) |> list
for lnidx in range(len(lines)):
for ptidx in range(2):
lines[lnidx][ptidx] = lines[lnidx][ptidx].split(",") |> map$(int) |> list
log.trace(lines)
grid = scan_grid(lines)
intersects = grid.values() |> filter$(-> _ > 1) |> list |> len
log.debug(f"intersects-#={intersects}")
#log.debug(f"grid={grid}")
return intersects
expected = 12
log.setLevel(logging.INFO)
result = solve_d05pt2(tests)
aoc.assert_msg("test solve day 5 part 2", result == expected)
# -
out = solve_d05pt2(ins)
f"day 5 part 2 output: {out}" |> print
# ## Day 6: Lanternfish
# DE: [<NAME>](https://de.wikipedia.org/wiki/Nikolaus_von_Myra) \
# EN: [Saint Nicholas](https://en.wikipedia.org/wiki/Saint_Nicholas) \
# ES: [<NAME>](https://es.wikipedia.org/wiki/Nicol%C3%A1s_de_Bari)
"Day 6" |> print
tests = "3,4,3,1,2"
# +
def solve_d06pt1(ins, n):
"""Lanternfish, "brute force"."""
cells = ins.split(",") |> map$(int) |> list
log.trace(f"lfish={cells}")
log.debug(f"lfish-num={len(cells)}")
tms = time.time()
for itr in range(n):
log.debug(f"iter-#{itr} took {round(time.time()-tms, 2)}s len={len(cells)}")
toappend = 0
for idx in range(len(cells)):
case cells[idx]:
match 0:
toappend += 1
cells[idx] = 6
else:
cells[idx] -= 1
cells = cells + [8] * toappend
log.trace(f"iter#{itr} lfish={cells}")
log.info(f"End iter-#{itr} took {round(time.time()-tms, 2)}s len={len(cells)}")
return len(cells)
expected = [[18, 26], [80,5934]]
log.setLevel(logging.INFO)
for e in expected:
n, expct = e
result = solve_d06pt1(tests, n)
aoc.assert_msg(f"test solve day 6 part 1; expect fish={expct} <= n={n} found {result}", result == expct)
# -
ins = aoc.read_file_to_str("./in/day06.in")
out = solve_d06pt1(ins, 80)
f"day 6 part 1 output: {out}" |> print
"Day 6 part 2" |> print
def solve_d06pt2(ins, n):
"""Lanternfish, just keep counting cellstates-number."""
tms = time.time()
cells = ins.split(",") |> map$(int) |> list
log.debug(f"lfish-num={len(cells)}")
cellsts = []
for idx in range(9):
cellsts.append(cells.count(idx))
log.debug(f"initial cellstates={cellsts}")
for itr in range(n):
toappend, cellsts = cellsts[0], cellsts[1:]
assert len(cellsts) == 8, f"len={len(cellsts)}"
cellsts[6] += toappend
cellsts = cellsts + [toappend]
if itr % 10 == 0:
#log.info(f"iter-#{itr} count={sum(cellsts)}. cellstates={cellsts}. took {int(time.time()-tms)}s")
log.debug(f"iter-#{itr} count={sum(cellsts)}. took {int(time.time()-tms)}s")
log.info(f"iter-#{itr} ends. took {round(time.time()-tms, 4)}s")
return sum(cellsts)
expected = [[256, 26984457539]]
log.setLevel(logging.INFO)
for e in expected:
n, expct = e
result = solve_d06pt2(tests, n)
aoc.assert_msg(f"test solve day 6 part 1; expect fish={expct} <= n={n} found {result}", result == expct)
out = solve_d06pt2(ins, 256)
f"day 6 part 2 output: {out}" |> print
# ## Day 7: The Treachery of Whales
"Day 7" |> print
tests = "16,1,2,0,4,2,7,1,2,14"
# +
def solve_d07pt1(ins):
"""Day 7 pt1 solution."""
ins = ins.split(",") |> map$(int) |> list
mn, mx = min(ins), max(ins)
fc_min = 1e20
for trg in range(mn, mx+1):
fc = 0
for xpos in ins:
fc += abs(xpos - trg)
if fc < fc_min:
log.debug(f"pos={trg}, new min fuelcost={fc}")
fc_min = fc
return fc_min
expected = 37
log.setLevel(logging.INFO)
result = solve_d07pt1(tests)
aoc.assert_msg(f"test solve day 7 part 1; expected={expct}, found {result}", result == expected)
# -
ins = aoc.read_file_to_str("./in/day07.in")
out = solve_d07pt1(ins)
f"day 7 part 1 output: {out}" |> print
# +
def solve_d07pt2(ins):
"""Day 7 pt2 solution."""
ins = ins.split(",") |> map$(int) |> list
mn, mx = min(ins), max(ins)
fc_min = 1e20
cst = 0
seq = []
for idx in range(0, mx+1): # costs per step
seq.append(cst)
cst += 1
costseq = []
for idx in range(0, mx+1): # costs for distance
#log.debug(f"dst={idx} costseq={seq[0:idx+1]}")
costseq.append(sum(seq[0:idx+1]))
log.debug(seq)
for trg in range(mn, mx+1):
fc = 0
fclst = []
for xpos in ins:
dist = abs(xpos - trg)
fclst.append(costseq[dist])
fc += costseq[dist]
log.debug(f" pos={trg}, fuelcosts={fclst}")
if fc < fc_min:
log.debug(f"pos={trg}, new min fuelcost={fc}")
fc_min = fc
return fc_min
expected = 168
log.setLevel(logging.INFO)
result = solve_d07pt2(tests)
aoc.assert_msg(f"test solve day 7 part 2; expected={expected}, found {result}", result == expected)
# -
out = solve_d07pt2(ins)
f"day 7 part 2 output: {out}" |> print
# ## Day 8: Seven Segment Search
"Day 8" |> print
# +
segments = """
0: 1: 2: 3: 4:
aaaa .... aaaa aaaa ....
b c . c . c . c b c
b c . c . c . c b c
.... .... dddd dddd dddd
e f . f e . . f . f
e f . f e . . f . f
gggg .... gggg gggg ....
5: 6: 7: 8: 9:
aaaa aaaa aaaa aaaa aaaa
b . b . . c b c b c
b . b . . c b c b c
dddd dddd .... dddd dddd
. f e f . f e f . f
. f e f . f e f . f
gggg gggg .... gggg gggg
""".strip()
d = {
0: ["a","b","c","e","f","g"],
1: ["c","f"],
2: ["a","c","d","e","g"],
3: ["a","c","d","f","g"],
4: ["b","c","d","f"],
5: ["a","b","d","f","g"],
6: ["a","b","d","e","f","g"],
7: ["a","c","f"],
8: ["a","b","c","d","e","f","g"],
9: ["a","b","c","d","f","g"],
}
d2 = {
1: ["c","f"],
7: ["a","c","f"],
4: ["b","c","d","f"],
2: ["a","c","d","e","g"],
3: ["a","c","d","f","g"],
5: ["a","b","d","f","g"],
0: ["a","b","c","e","f","g"],
6: ["a","b","d","e","f","g"],
9: ["a","b","c","d","f","g"],
8: ["a","b","c","d","e","f","g"],
}
# +
tests1 = """acedgfb cdfbe gcdfa fbcad dab cefabd cdfgeb eafb cagedb ab | cdfeb fcadb cdfeb cdbaf""".strip()
tests = """
be cfbegad cbdgef fgaecd cgeb fdcge agebfd fecdb fabcd edb | fdgacbe cefdb cefbgd gcbe
edbfga begcd cbg gc gcadebf fbgde acbgfd abcde gfcbed gfec | fcgedb cgb dgebacf gc
fgaebd cg bdaec gdafb agbcfd gdcbef bgcad gfac gcb cdgabef | cg cg fdcagb cbg
fbegcd cbd adcefb dageb afcb bc aefdc ecdab fgdeca fcdbega | efabcd cedba gadfec cb
aecbfdg fbg gf bafeg dbefa fcge gcbea fcaegb dgceab fcbdga | gecf egdcabf bgf bfgea
fgeab ca afcebg bdacfeg cfaedg gcfdb baec bfadeg bafgc acf | gebdcfa ecba ca fadegcb
dbcfg fgd bdegcaf fgec aegbdf ecdfab fbedc dacgb gdcebf gf | cefg dcbef fcge gbcadfe
bdfegc cbegaf gecbf dfcage bdacg ed bedf ced adcbefg gebcd | ed bcgafe cdgba cbgef
egadfb cdbfeg cegd fecab cgb gbdefca cg fgcdab egfdb bfceg | gbdfcae bgc cg cgb
gcafb gcf dcaebfg ecagb gf abcdeg gaef cafbge fdbac fegbdc | fgae cfgab fg bagce
""".strip()
# +
def solve_d08pt1(ins):
"""Day 8 pt1 solution."""
ins = ins.splitlines() # |> filter$(-> "|" not in _) |> list
for idx in range(len(ins)):
ins[idx] = ins[idx].split(" | ")[1]
ct = 0
for ln in ins:
for grp in ln.split(" "):
if len(grp) in [2, 4, 3, 7]:
ct += 1
log.trace([ins, ct])
return ct
expected = 26
log.setLevel(logging.DEBUG)
#log.setLevel(logging.INFO)
result = solve_d08pt1(tests)
aoc.assert_msg(f"test solve day 8 part 1; expected={expected}, found {result}", result == expected)
# -
ins = aoc.read_file_to_str("./in/day08.in")
out = solve_d08pt1(ins)
f"day 8 part 1 output: {out}" |> print
# +
def solve_d08pt2(ins):
"""Day 8 pt2 solution."""
return None
expected1 = 5353
expected = 61229
#log.setLevel(logging.DEBUG)
#log.setLevel(logging.INFO)
#result = solve_d08pt2(tests1)
#result = solve_d08pt2(tests)
#aoc.assert_msg(f"test solve day 7 part 2; expected={expected}, found {result}", result == expected)
"Day 8 pt2 solution - TBD." |> print
# -
# ## Day 9: Smoke Basin
# +
"Day 9" |> print
tests = """
2199943210
3987894921
9856789892
8767896789
9899965678
""".strip()
# +
def solve_d09pt1(ins):
"""Day 9 pt1 solution."""
ins = ins.splitlines() |> map$(-> list(_)) |> list
xdim , ydim = len(ins[0]), len(ins)
for y in range(ydim):
for x in range(xdim):
ins[y][x] = int(ins[y][x])
log.debug(f"grid[{xdim},{ydim}]...!")
log.trace(f"grid[{xdim},{ydim}]")
locminsum = 0
for y in range(ydim):
for x in range(xdim):
pt = ins[y][x]
nbs = aoc.get_neighbors(ins, x, y) |> filter$(-> _ != "") |> list
#log.trace(f"pt[{x},{y}]={pt} neibs={nbs}")
#all(flag == 0 for (_, _, flag) in items)
if pt < min(nbs):
log.trace(f"pt[{x},{y}]={pt} IS locmin, neibs={nbs}")
locminsum += pt+1
log.info(f"rc locminsum={locminsum}")
return locminsum
expected = 15
log.setLevel(logging.DEBUG)
#log.setLevel(logging.INFO)
result = solve_d09pt1(tests)
aoc.assert_msg(f"test solve day 8 part 1; expected={expected}, found {result}", result == expected)
# -
ins = aoc.read_file_to_str("./in/day09.in")
out = solve_d09pt1(ins)
f"day 9 part 1 output: {out}" |> print
# +
"Day 9 part 2" |> print
def get_locmins(ins):
xdim , ydim = len(ins[0]), len(ins)
locmins = []
for y in range(ydim):
for x in range(xdim):
pt = ins[y][x]
nbs = aoc.get_neighbors(ins, x, y) |> filter$(-> _ != "") |> list
#log.trace(f"pt[{x},{y}]={pt} neibs={nbs}")
if pt != "" and pt < min(nbs):
log.trace(f"pt[{x},{y}]={pt} IS locmin, neibs={nbs}")
locmins.append( tuple([x,y]) )
log.debug(f"rc locmins={locmins}")
return locmins
def get_neib_coords(strlist: List[str], x: int, y: int) -> List[str]:
"""Get a list of 4 neighbors of cell. May return '' as element if boundary."""
max_y_idx = len(strlist) - 1
max_x_idx = len(strlist[0]) - 1
nbs = []
if y > 0:
nbs.append(tuple([x, y-1]))
if x > 0:
nbs.append(tuple([x-1, y]))
if y < max_y_idx:
nbs.append(tuple([x, y+1]))
if x < max_x_idx:
nbs.append(tuple([x+1, y]))
return nbs
def solve_d09pt2(ins):
"""Day 9 pt1 solution."""
ins = ins.splitlines() |> map$(-> list(_)) |> list
xdim , ydim = len(ins[0]), len(ins)
for y in range(ydim):
for x in range(xdim):
val = int(ins[y][x])
if val < 9:
ins[y][x] = val
else:
ins[y][x] = 9
log.debug(f"grid[{xdim},{ydim}]...!")
log.trace(f"grid[{xdim},{ydim}]")
basinsize_lst = []
for lmin in get_locmins(ins):
basin = set()
to_scan = [lmin]
minval = ins[lmin[1]][lmin[0]]
while(len(to_scan)>0):
pt = to_scan[0]
basin.add(pt)
to_scan = to_scan[1:]
x,y = pt[0], pt[1]
ptval = ins[y][x]
nb_coords = get_neib_coords(ins, x, y)
nb_coords = nb_coords |> filter$(-> _ not in basin and ins[_[1]][_[0]] != "" and ins[_[1]][_[0]] < 9) |> list
nb_coords = nb_coords |> filter$(-> _ not in to_scan) |> list
to_scan += nb_coords
log.trace(f"trg-pt[{x},{y}]={ptval} len(toscan)={len(to_scan)}, nb-coords={nb_coords}")
log.debug(f"lmin-pt[{lmin}]={minval} basin-len={len(basin)}")
basinsize_lst.append(len(basin))
basinsize_lst = sorted(basinsize_lst)
log.debug(f"basinsize_lst={basinsize_lst}, {basinsize_lst[-3:]}")
return reduce((*),basinsize_lst[-3:])
expected = 1134
#log.setLevel(logging.DEBUG)
log.setLevel(logging.INFO)
result = solve_d09pt2(tests)
aoc.assert_msg(f"test solve day 8 part 1; expected={expected}, found {result}", result == expected)
# -
out = solve_d09pt2(ins)
f"day 9 part 2 output: {out}" |> print
# ## Day 10: Syntax Scoring
# +
"Day 10" |> print
tests = """
[({(<(())[]>[[{[]{<()<>>
[(()[<>])]({[<{<<[]>>(
{([(<{}[<>[]}>{[]{[(<()>
(((({<>}<{<{<>}{[]{[]{}
[[<[([]))<([[{}[[()]]]
[{[{({}]{}}([{[{{{}}([]
{<[[]]>}<{[{[{[]{()[[[]
[<(<(<(<{}))><([]([]()
<{([([[(<>()){}]>(<<{{
<{([{{}}[<[[[<>{}]]]>[]]
""".strip()
# +
def solve_d10pt1(ins):
"""Day 10 pt1 solution."""
minis = ["()", "[]", "{}", "<>"]
closing = [")", "]", "}", ">"]
scores = {")":3, "]":57, "}":1197, ">":25137}
ins = ins.splitlines()
score_sum = 0
for ln in ins:
#log.debug(f"ln={ln}")
while any(s in ln for s in minis):
ln = ln.replace("()", "").replace("[]", "").replace("{}", "").replace("<>", "")
#log.debug(f" reduced={ln}")
if not any(s in ln for s in closing):
#log.debug(" is ok/open ln")
continue
ln2 = ln
ln2 = ln.replace("(", "").replace("[", "").replace("{", "").replace("<", "")
breaking = ln2[0]
score = scores[breaking]
#log.debug(f" breaks with score={score}")
score_sum += score
return score_sum
expected = 26397
log.setLevel(logging.DEBUG)
#log.setLevel(logging.INFO)
result = solve_d10pt1(tests)
aoc.assert_msg(f"test solve day 8 part 1; expected={expected}, found {result}", result == expected)
# -
ins = aoc.read_file_to_str("./in/day10.in")
out = solve_d10pt1(ins)
f"day 10 part 1 output: {out}" |> print
# +
def solve_d10pt2(ins):
"""Day 10 pt1 solution."""
minis = ["()", "[]", "{}", "<>"]
closing = [")", "]", "}", ">"]
scores2 = {")":1, "]":2, "}":3, ">":4}
matching = {"(": ")", "[": "]", "{": "}", "<": ">" }
ins = ins.splitlines()
score_sum = 0
rcscores = []
for ln in ins:
#log.debug(f"ln={ln}")
while any(s in ln for s in minis):
ln = ln.replace("()", "").replace("[]", "").replace("{}", "").replace("<>", "")
#log.debug(f" reduced={ln}")
if any(s in ln for s in closing):
#log.debug(" is corrupt")
continue
score = 0
while len(ln) > 0:
c = ln[-1]
ln = ln[:-1]
score = score*5 + scores2[matching[c]]
#log.trace(f"red-line={ln}, tmp-score={score}")
#log.debug(f" score={score} for line")
rcscores.append( score )
l = len(rcscores)
idx = l // 2
rc = sorted(rcscores)[idx]
#log.debug(f"rcscores rc={rc}, len={l}, median-idx={idx}, idx ar={rcscores}")
return rc
expected = 288957
log.setLevel(logging.DEBUG)
#log.setLevel(logging.INFO)
result = solve_d10pt2(tests)
aoc.assert_msg(f"test solve day 8 part 1; expected={expected}, found {result}", result == expected)
# -
out = solve_d10pt2(ins)
f"day 10 part 2 output: {out}" |> print
# ## Day 11: Dumbo Octopus
# +
"Day 11" |> print
tests1 = """
11111
19991
19191
19991
11111
""".strip()
tests = """
5483143223
2745854711
5264556173
6141336146
6357385478
4167524645
2176841721
6882881134
4846848554
5283751526
""".strip()
# -
class Grid:
"""Grid as 2d list-of-list of chars"""
def __init__(self, los):
self.grid = los |> map$(list) |>list
self.dim = [len(los[0]), len(los)]
self.iter = 0
self.md = {}
self.init_nb_tns()
def __repr__(self):
return f"Grid[{self.dim}] iter={self.iter} md={self.md}"
def show(self):
#aoc.represent_strlist(self.grid)
los = self.grid |> map$(-> str.join("", _))
return "grid-show:\n" + str.join("\n", los)
def init_nb_tns(self):
"""Diagonal transformations."""
lst = []
for x in range(-1, 2):
for y in range(-1, 2):
if (x,y) == (0,0):
continue
else:
lst.append( (x,y) )
self.nb_tns = lst
# +
def raise1(g, coord):
x,y = coord[0], coord[1]
gg = g.grid
v = gg[y][x]
if v == "a":
return False
else:
i = int(v)
if i == 9:
gg[y][x] = "a"
return True
else:
gg[y][x] = str(i+1)
return False
def raise_neibs_en(g, coord):
neibs = g.nb_tns |> map$(->(_[0]+coord[0], _[1]+coord[1])) |> list
neibs = neibs |> filter$(-> _[0] >= 0 and _[0] < g.dim[0]) |> list
neibs = neibs |> filter$(-> _[1] >= 0 and _[1] < g.dim[1]) |> list
log.debug(f"neibs={neibs}")
new_to_flash = []
for neib in neibs:
if raise1(g, neib):
log.debug(f"raise_neibs_en: new2flash={neib}")
new_to_flash(neib)
# TODO: neu geflashte zurückliefern
return new_to_flash
def raise_en(g):
gg = g.grid
flashed = []
flashs_num = 0
init = True
while(len(flashed)>0 or init):
if init:
init = False
else:
lastflashed = flashed
flashed = flashed[1:]
flashed += raise_neibs_en(g, lastflashed)
for x in range(g.dim[0]):
for y in range(g.dim[1]):
coord = (x,y)
if coord == (0,0):
flashed = []
if raise1(g, coord):
log.debug(f"flashed {(x,y)}")
flashed.append((x,y))
flashs_num += 1
break
def solve_d11pt1(ins):
"""Day 11 pt1 solution."""
g = Grid(ins.splitlines())
#g = g.iterate(ins)
log.info(f"initial: {g}")
log.info(g.show())
log.debug(f" tns: {g.nb_tns}")
raise_en(g)
log.info(g.show())
raise_en(g)
log.info(g.show())
return None
expected = 1656
n = 100
log.setLevel(logging.DEBUG)
#log.setLevel(logging.INFO)
#result = solve_d11pt1(tests)
#aoc.assert_msg(f"test solve day 11 part 1; expected={expected}, found {result}", result == expected)
"Day 11 TBD" |> print
# -
# ## Day 12: Passage Pathing
# +
"Day 12" |> print
tests = """
start-A
start-b
A-c
A-b
b-d
A-end
b-end
""".strip()
tests2 = """
dc-end
HN-start
start-kj
dc-start
dc-HN
LN-dc
HN-end
kj-sa
kj-HN
kj-dc
""".strip()
# +
import networkx as nx # Graph theory module
import matplotlib.pyplot as plt
def solve_d12pt1(ins):
"""Day 12 pt1 solution."""
ins = ins.splitlines()
g = nx.DiGraph()
for ln in ins:
#nd_st, nd_en = ln.split("-")
nodes = ln.split("-")
if "start" in nodes:
other = nodes |> filter$(-> not _ == "start") |> list |> .[0]
g.add_edge("start", other)
elif "end" in nodes:
other = nodes |> filter$(-> not _ == "end") |> list |> .[0]
g.add_edge(other, "end")
else:
g.add_edge(nodes[0], nodes[1])
g.add_edge(nodes[1], nodes[0])
log.info(f"G:: nodes={g.nodes} edges={g.edges}")
p = nx.shortest_path(g, source="start", target="end")
log.info(f"G:: shortest_path={p}")
ps = nx.all_simple_paths(g, source="start", target="end") |> list
log.info(f"G:: all_simple_paths={ps}")
log.info(f"G:: nodes_with_selfloops={list(nx.nodes_with_selfloops(g))}")
log.info(f"G:: simple_cycles={list(nx.simple_cycles(g))}")
#ps = nx.all_paths(g, source="A", target="A") |> list
#log.info(f"G:: all_simple_paths[A->A]={ps}")
#nx.draw_networkx(g)
#nx.draw_networkx(g, pos=nx.circular_layout(g))
#nx.draw_networkx(g, pos=nx.random_layout(g))
#nx.draw_networkx(g, pos=nx.spiral_layout(g))
#nx.draw_networkx(g, pos=nx.spring_layout(g)) # no edge overlaps !
#nx.draw_networkx(g, pos=nx.planar_layout(g))
#nx.draw_networkx(g, pos=nx.kamada_kawai_layout(g))
nx.draw_networkx(g, pos=nx.spectral_layout(g))
plt.show()
return None
expected = 10
log.setLevel(logging.DEBUG)
#log.setLevel(logging.INFO)
#result = solve_d12pt1(tests2)
#aoc.assert_msg(f"test solve day 8 part 1; expected={expected}, found {result}", result == expected)
"Day 12 TBD" |> print
# -
# ## Day 13: Transparent Origami
# +
"Day 13" |> print
tests = """
6,10
0,14
9,10
0,3
10,4
4,11
6,0
6,12
4,1
0,13
10,12
3,4
3,0
8,4
1,10
2,14
8,10
9,0
fold along y=7
fold along x=5
""".strip()
# +
def nda_repr(nda):
s = str(nda).replace(" ","").replace("[","").replace("]","")
return s.replace("0", ".").replace("1", "#")
def solve_d13pt1(ins, fold_count=None):
"""Day 13 pt1 solution."""
backd = np.get_printoptions()
np.set_printoptions(threshold=32_000, linewidth=32_000)
ins, cmds = ins.split("\n\n")
ins = ins.splitlines() |> map$(-> (_.split(",") |> map$(int) |> list) ) |> list
cmds = cmds.splitlines() |> map$(-> (_.replace("fold along ", "")) ) \
|> map$(-> (_.split("=")) ) |> list
for cmd in cmds:
cmd[1] = int(cmd[1])
maxx = ins |> map$(-> _[0] ) |> list |> max$()
maxy = ins |> map$(-> _[1] ) |> list |> max$()
log.debug(f"ins={ins} max=[{maxx}, {maxy}] , cmds={cmds}")
#nda = np.ndarray( shape=[maxx+1, maxy+1], dtype='i1' )
nda = np.full(shape=[maxy+1, maxx+1], fill_value=0, dtype="i1")
for cell in ins:
nda[cell[1], cell[0]] = 1
log.info("initial MAT\n" + nda_repr(nda))
log.info("initial MAT\n" + str(nda))
ct = -1
for idx, cmd in enumerate(cmds):
if fold_count is not None and idx >= fold_count:
log.info(f"break after fold no {fold_count}")
break
dim, ofs = cmd
log.info(f"fold {dim} on {ofs}")
if dim == "y":
a = nda[0:ofs,]
b = nda[ofs+1:,]
b = np.flipud(b) # flip(axis=0)
elif dim == "x":
a = nda[:,0:ofs]
b = nda[:,ofs+1:]
b = np.fliplr(b) # flip(axis=1)
log.debug(f"? a.shape {a.shape} == {b.shape} b.shape ?")
if a.shape[0] != b.shape[0]:
m = max(a.shape[0], b.shape[0])
log.info(f"reshape y max={m}")
if a.shape[0] < m:
a.resize([m, a.shape[1]])
else:
b.resize([m, b.shape[1]])
elif a.shape[1] != b.shape[1]:
log.info(f"reshape x max={m}")
m = max(a.shape[1], b.shape[1])
if a.shape[0] < m:
a.resize([a.shape[0], m])
else:
b.resize([b.shape[0], m])
log.debug(f"? a.shape {a.shape} == {b.shape} b.shape ?")
assert(a.shape == b.shape)
c = np.sign(a+b)
nda = c
#log.info("1\n" + nda_repr(a))
#log.info("2\n" + nda_repr(b))
#log.info("2t\n" + nda_repr(bb))
ct = np.sum(c)
log.info(f"folded {dim} on {ofs}; count={ct}; MAT=\n" + nda_repr(c))
#np.set_printoptions(backd)
np.set_printoptions(threshold=backd["threshold"], linewidth=backd["linewidth"])
return ct
expected = 17
log.setLevel(logging.DEBUG)
#log.setLevel(logging.INFO)
#result = solve_d13pt1(tests, fold_count=1)
#aoc.assert_msg(f"test solve day 13 part 1; expected={expected}, found {result}", result == expected)
"Day 13 TBD" |> print
# -
ins = aoc.read_file_to_str("./in/day13.in")
#out = solve_d13pt1(ins)
#f"day 13 part 1 output: {out}" |> print
"Day 13 TBD" |> print
# ## Day 14: Extended Polymerization
# +
"Day 14" |> print
tests = """
NNCB
CH -> B
HH -> N
CB -> H
NH -> C
HB -> C
HC -> B
HN -> C
NN -> C
BH -> H
NC -> B
NB -> B
BN -> B
BB -> N
BC -> B
CC -> N
CN -> C
""".strip()
# +
def get_poly(ins, steps=1):
"""get the polymer chain after n steps"""
ins, cmds = ins.split("\n\n")
cmds = cmds.splitlines() |> map$(-> _.split(" -> ")) |> list
log.trace(f"ins={ins} cmds={cmds}")
d = {}
for cmd in cmds:
inps, outps = cmd
outps = inps[0] + outps + inps[1]
d[inps] = outps
log.trace(f" d={d}")
for stepidx in range(steps):
o = ""
for idx in range(len(ins)-1):
pr = ins[idx:idx+2]
log.trace(f" pair={pr}")
if pr in d.keys():
o += d[pr][0:2] # omit last input letter
else:
log.warn("pair {pr} not in recipes")
o += ins[-1]
log.trace(f"step#{stepidx+1} out={o}")
ins = o
return o
def solve_d14pt1(ins, steps=1):
"""Day 14 pt1 solution."""
res = get_poly(ins, steps=steps)
freqs = defaultdict(int)
for c in res:
freqs[c] += 1
rc = max(freqs.values()) - min(freqs.values())
log.info(f"rc={rc}, freqs={freqs}")
return rc
expected = [
[1, "NCNBCHB"],
[2, "NBCCNBBBCBHCB"],
[3, "NBBBCNCCNBBNBNBBCHBHHBCHB"],
[4, "NBBNBNBBCCNBCNCCNBBNBBNBBBNBBNBBCBHCBHHNHCBBCBHCB"],
]
log.setLevel(logging.DEBUG)
#log.setLevel(logging.INFO)
for e in expected:
n, expc = e[0], e[1]
res = get_poly(tests, steps=n)
aoc.assert_msg(f"test day 14 get_poly; n={n} expected={expc}, found {res}", res == expc)
# after step 5 it has len 97
# after step 10 it has len 3073
res = get_poly(tests, steps=5)
expc, reslen = 97, len( res )
aoc.assert_msg(f"test solve day 14 part 1; n={n} len={expc}, found {reslen}", reslen == expc)
res = get_poly(tests, steps=10)
expc, reslen = 3073, len( res )
aoc.assert_msg(f"test solve day 14 part 1; n={n} len={expc}, found {reslen}", reslen == expc)
n = 10
expc = 1588
rc = solve_d14pt1(tests, steps=10)
aoc.assert_msg(f"test solve day 14 part 1; n={n} len={expc}, found {rc}", rc == expc)
# -
ins = aoc.read_file_to_str("./in/day14.in")
out = solve_d14pt1(ins, steps=10)
f"day 14 part 1 output: {out}" |> print
# +
"Day 14 part 2" |> print
def get_polytns(ins, steps=1):
"""get the polymer chain after n steps"""
ins, cmds = ins.split("\n\n")
cmds = cmds.splitlines() |> map$(-> _.split(" -> ")) |> list
log.trace(f"ins={ins} cmds={cmds}")
cmdsd = {}
for cmd in cmds:
inps, outp = cmd
outps = []
outps.append(inps[0] + outp)
outps.append(outp + inps[1])
cmdsd[inps] = outps
log.trace(f" cmds={cmdsd}")
pairs = defaultdict(int)
for idx in range(len(ins)-1):
pr = ins[idx:idx+2]
pairs[pr] += 1
log.trace(f" pairs0={pairs}")
for idx in range(steps):
newd = pairs.copy()
for inpair, ct in pairs.items():
op1, op2 = cmdsd[inpair]
log.trace(f" tn: {inpair}:{ct} tn={[op1, op2]}")
newd[inpair] -= ct
newd[op1] += ct
newd[op2] += ct
pairs = newd
return pairs
def solve_d14pt2(ins, steps=1):
"""Day 14 pt1 solution."""
lastchar = ins.splitlines()[0][-1]
res = get_polytns(ins, steps=steps)
log.trace(f"get_polytns res={res} last-c={lastchar}")
freqs = defaultdict(int)
for pair, ct in res.items():
p1, p2 = pair
freqs[p1] += ct
#freqs[p2] += ct # omit doubles
freqs[lastchar] += 1 # fix frequencies, add last letter in chain
log.trace(f" freqs={freqs}")
rc = max(freqs.values()) - min(freqs.values())
return rc
rc = solve_d14pt2(tests, steps=0)
rc = solve_d14pt2(tests, steps=1)
#print("NCNBCHB")
rc = solve_d14pt2(tests, steps=2)
#print("NBCCNBBBCBHCB")
#rc = solve_d14pt2(tests, steps=2)
n = 10
expc = 1588
rc = solve_d14pt2(tests, steps=n)
aoc.assert_msg(f"test solve day 14 part 2; n={n} len={expc}, found {rc}", rc == expc)
rc = solve_d14pt2(ins, steps=10)
log.info(f"redo pt1={rc}")
out = solve_d14pt2(ins, steps=40)
f"day 14 part 2 output: {out}" |> print
# -
| aoc_2021_coco.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# +
from importlib import reload
import pygl.scene as glscene
reload(glscene)
import pygl.util as glutil
reload(glutil)
import pygl.points as glpoints
reload(glpoints)
# +
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.rc("font", size=14)
from robustfpm.pricing import EllipseHandler
from pygl import *
msize=30
g = Grid(0.1)
xlim=[-2.1, 2.1]
ylim=[-2.1, 2.1]
sc = Scene(xlim=xlim, ylim=ylim, legend_loc='lower right', override_conf={'background':'w'})
sc.append(g, xlim=xlim, ylim=ylim, label='Grid', c='lightgrey', s=msize)
sg1 = 0.8
sg2 = 1.5
rho = 0.6
K = EllipseHandler([0,0], np.array([[sg1**2, rho*sg1*sg2], [rho*sg1*sg2, sg2**2]]))
sc.append(g.map2x(K.project(g)), label='Scatter', c='k', s=msize)
po = PathPatchObject([[0,0], [1,1], [0,1], [2,0]], is_convex=True, facecolor='salmon', alpha=0.8, edgecolor='none', label='Path')
sc.append(po)
po = EllipsePatchObject([-1, -0.5], [0.3, 0.56], angle=30, facecolor='violet', alpha=0.8, edgecolor='none', label='Ellipse')
sc.append(po)
po = RectanglePatchObject([-2, -1.8], 0.2, 3, angle=-70, facecolor='gold', alpha=0.8, edgecolor='none', label='Rectangle')
sc.append(po)
po = LineObject([[-2,0.8], [-1.7, 1.5], [-1.4, 0.8], [-2, 1.2], [-1.4, 1.2], [-2,0.8]], label='Line', c='r', alpha=0.8)
sc.append(po)
sc.show()
# -
| examples/pygl_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Gensim tests
# #### Inspired by https://radimrehurek.com/gensim/tutorial.html
import os
import logging
from collections import defaultdict
from pprint import pprint # pretty-printer
from six import iteritems
from gensim import corpora, models, similarities
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# ## Corpora and Vector Spaces
# +
# documents = ["Human machine interface for lab abc computer applications",
# "A survey of user opinion of computer system response time",
# "The EPS user interface management system",
# "System and human system engineering testing of EPS",
# "Relation of user perceived response time to error measurement",
# "The generation of random binary unordered trees",
# "The intersection graph of paths in trees",
# "Graph minors IV Widths of trees and well quasi ordering",
# "Graph minors A survey"]
documents = open("../data/articles.txt", "r").read().splitlines()
# -
for i, s in enumerate(open("../data/sources.txt", "r").read().splitlines()):
print(i, s)
# +
# remove common words and tokenize
stoplist = set("for a an of the and to in on by from but at as or so it with "
"this that those these there "
"i you he she we they me him my your his her its our their m mr ms dr jr "
"s d re do did got is no b f j k l t w"
"- *".split())
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in documents]
# remove words that appear only once
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > 1]
for text in texts]
print(texts)
# -
dictionary = corpora.Dictionary(texts)
dictionary.save('../tmp/articles.dict') # store the dictionary, for future reference
print(dictionary)
# print(dictionary.token2id)
# +
# new_doc = "Human computer interaction"
# new_vec = dictionary.doc2bow(new_doc.lower().split())
# print(new_vec) # the word "interaction" does not appear in the dictionary and is ignored
# -
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize('../tmp/articles.mm', corpus) # store to disk, for later use
print(corpus)
# ### Memory efficience
class MyCorpus(object):
def __iter__(self):
for line in open('../data/articles.txt'):
# assume there's one document per line, tokens separated by whitespace
yield dictionary.doc2bow(line.lower().split())
corpus_memory_friendly = MyCorpus() # doesn't load the corpus into memory!
print(corpus_memory_friendly)
for vector in corpus_memory_friendly: # load one vector into memory at a time
print(vector)
# +
# collect statistics about all tokens
dictionary = corpora.Dictionary(line.lower().split() for line in open('../data/articles.txt'))
# remove stop words and words that appear only once
stoplist = set("for a an of the and to in by from at as with this that those these there"
"i me you he she we they s d re my your his her our their m mr ms dr jr"
"do did got"
"b f k l".split())
stop_ids = [dictionary.token2id[stopword] for stopword in stoplist
if stopword in dictionary.token2id]
once_ids = [tokenid for tokenid, docfreq in iteritems(dictionary.dfs) if docfreq == 1]
dictionary.filter_tokens(stop_ids + once_ids) # remove stop words and words that appear only once
dictionary.compactify() # remove gaps in id sequence after words that were removed
dictionary.save('../tmp/articles.dict') # store the dictionary, for future reference
print(dictionary)
# -
print('\n'.join(sorted([k for k in dictionary.token2id])))
print(dictionary.token2id)
# +
# create a toy corpus of 2 documents, as a plain Python list
corpus = [[(1, 0.5)], []] # make one document empty, for the heck of it
corpora.MmCorpus.serialize('../tmp/corpus.mm', corpus)
# -
corpus = corpora.MmCorpus('../tmp/corpus.mm')
# +
print(corpus)
# one way of printing a corpus: load it entirely into memory
print(list(corpus))
# another way of doing it: print one document at a time, making use of the streaming interface
for doc in corpus:
print(doc)
# -
# ## Topics and Transformations
if (os.path.exists("../tmp/articles.dict")):
dictionary = corpora.Dictionary.load('../tmp/articles.dict')
corpus = corpora.MmCorpus('../tmp/articles.mm')
print("Used files generated from first tutorial")
else:
print("Please run first tutorial to generate data set")
tfidf = models.TfidfModel(corpus) # step 1 -- initialize a model
doc_bow = [(0, 1), (1, 1)]
print(tfidf[doc_bow]) # step 2 -- use the model to transform vectors
corpus_tfidf = tfidf[corpus]
for doc in corpus_tfidf:
print(doc)
lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=5) # initialize an LSI transformation
corpus_lsi = lsi[corpus_tfidf] # create a double wrapper over the original corpus: bow->tfidf->fold-in-lsi
for doc in corpus_lsi: # both bow->tfidf and tfidf->lsi transformations are actually executed here, on the fly
print(doc)
lsi.save('../tmp/model.lsi') # same for tfidf, lda, ...
lsi = models.LsiModel.load('../tmp/model.lsi')
# ## Similarity Queries
# ### Load the model
dictionary = corpora.Dictionary.load('../tmp/articles.dict')
corpus = corpora.MmCorpus('../tmp/articles.mm') # comes from the first tutorial, "From strings to vectors"
print(corpus)
lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=5)
index = similarities.MatrixSimilarity(lsi[corpus]) # transform corpus to LSI space and index it
index.save('../tmp/articles.index') # save the index
# ### Similarity query step by step
doc = "trump propaganda"
vec_bow = dictionary.doc2bow(doc.lower().split())
vec_lsi = lsi[vec_bow] # convert the query to LSI space
print(vec_lsi)
index = similarities.MatrixSimilarity(lsi[corpus]) # transform corpus to LSI space and index it
index.save('../tmp/articles.index')
index = similarities.MatrixSimilarity.load('../tmp/articles.index')
sims = index[vec_lsi] # perform a similarity query against the corpus
print(list(enumerate(sims))) # print (document_number, document_similarity) 2-tuples
sims = sorted(enumerate(sims), key=lambda item: -item[1])
print(sims) # print sorted (document number, similarity score) 2-tuples
# ### Similarity query function
def search_related_articles(phrase):
"""Sort articles that matches 'phrase' the best"""
# load LSI space index
index = similarities.MatrixSimilarity.load('../tmp/articles.index')
# convert the query to LSI space
vec_bow = dictionary.doc2bow(phrase.lower().split())
vec_lsi = lsi[vec_bow]
# perform a similarity query against the corpus
sims = index[vec_lsi]
sims = sorted(enumerate(sims), key=lambda item: -item[1])
# load articles sources and print the results
sources = open("../data/sources.txt", "r").read().splitlines()
for (a_id, rating) in sims:
print("{:02d}: {:.1f}% # {}".format(a_id, rating*100, sources[a_id]))
search_related_articles("trump propaganda")
search_related_articles("health")
search_related_articles("violence")
| src/search_by_theme.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from IPython.display import Image
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import datasets
from sklearn import preprocessing
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.model_selection import train_test_split, cross_val_score
from six import StringIO
#import pydotplus
import matplotlib.pyplot as plt
pd.set_option('display.max_columns', None)
# %matplotlib inline
# -
import pandas as pd
dsWineMaster = pd.read_csv('C:/Users/Jam/Documents/MAESTRIA/Modelos avanzados de minería de datos/PEC1wine.data')
dsWineMaster.head()
| jmejiasa_PRA2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib notebook
# To prevent automatic figure display when execution of the cell ends
# # %config InlineBackend.close_figures=False
# import matplotlib.pyplot as plt
# plt.ion()
import tensorwatch as tw
cli = tw.WatchClient()
p = tw.mpl.LinePlot(title='Demo')
s1 = cli.create_stream('ev_i', 'map(lambda v:math.sqrt(v.val)*2, l)')
p.add(s1, xtitle='Index', ytitle='sqrt(ev_i)')
s2 = cli.create_stream('ev_i', 'map(lambda v:v.val*2, l)')
p.add(s2, xtitle='Index', ytitle='2ev_i')
s3 = cli.create_stream('ev_i', 'map(lambda v:math.sin(v.val), l)')
p.add(s3, xtitle='Index', ytitle='2ev_i')
p2 = tw.mpl.LinePlot(title='History Demo')
p2s1 = cli.create_stream('ev_j', 'map(lambda v:(v.val, math.sqrt(v.val)*2), l)')
p2.add(p2s1, xtitle='Index', ytitle='sqrt(ev_j)', clear_after_end=True, history_len=15)
| notebooks/archive/mpl_line_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Variational Inference: Ising Model
# This notebook focuses on Variational Inference (VI) for the Ising model in application to binary image de-noising. The Ising model is an example of a Markov Random Field (MRF) and it originated from statistical physics. The Ising model assumes that we have a grid of nodes, where each node can be in one of two states. In the case of binary images, you can think of each node as being a pixel with a black or white color. The state of each node depends on the neighboring nodes through interaction potentials. In the case of images, this translates to a smoothness constraint, i.e. a pixel prefers to be of the same color as the neighboring pixels. In the image denoising problem, we assume that we have a 2-D grid of noisy pixel observations of an underlying true image and we would like to recover the true image. Thus, we can model the image as a grid:
# <img src="figures/ising_gm.png">
# In the figure above, the shaded nodes are the noisy observations $y_i$ of binary latent variables $x_i \in \{-1, +1\}$. We can write down the joint distribution as follows:
# \begin{equation}
# p(x,y) = p(x)p(y|x) = \prod_{(s,t)\in E} \Psi_{st}(x_s, x_t) \prod_{i=1}^{n}p(y_i|x_i) = \prod_{(s,t)\in E} \exp \{x_s w_{st} x_t \} \prod_{i=1}^{N} N(y_i|x_i, \sigma^2)
# \end{equation}
# where the interaction potentials are represented by $\Psi_{st}$ for every pair of nodes $x_s$ and $x_t$ in a set of edges $E$ and the observations $y_i$ are Gaussian with mean $x_i$ and variance $\sigma^2$. Here, $w_{st}$ is the coupling strength and assumed to be constant and equal to $J>0$ indicating a preference for the same state as neighbors (i.e. potential $\Psi(x_s, x_t) = \exp\{x_s J x_t\}$ is higher when $x_s$ and $x_t$ are both either $+1$ or $-1$).
# The basic idea behind variational inference is to choose an approximating disribution $q(x)$ which is close to the original distribution $p(x)$ where the distance is measured by KL divergence:
# \begin{equation}
# KL(q||p) = \sum_x q(x) \log \frac{q(x)}{p(x)}
# \end{equation}
# This makes inference into an optimization problem in which the objective is to minimize KL divergence or maximize the Evidence Lower BOund (ELBO). We can derive the ELBO as follows:
# \begin{equation}
# \log p(y) = \log \sum_{x} p(x,y) = \log \sum_x \frac{q(x)}{q(x)}p(x,y) = \log E_{q(x)}\big[\frac{p(x,y)}{q(x)} \big] \geq E_{q(x)}\big[\log \frac{p(x,y)}{q(x)} \big] = E_{q(x)}\big[\log p(x,y) \big] - E_{q(x)}\big[\log q(x) \big]
# \end{equation}
# In application to the Ising model, we have:
# \begin{equation}
# \mathrm{ELBO} = E_{q(x)}\big[\log p(x,y) \big] - E_{q(x)}\big[\log q(x) \big] = E_{q(x)}\big[\sum_{(s,t)\in E}x_s w_{st}x_t + \sum_{i=1}^{n} \log N(x_i, \sigma^2) \big] - \sum_{i=1}^{n} E_{q_i(x)}\big[\log q_i(x) \big]
# \end{equation}
# In *mean-field* variational inference, we assume a *fully-factored* approximation q(x):
# \begin{equation}
# q(x) = \prod_{i=1}^{n} q(x_i; \mu_i)
# \end{equation}
# It can be shown [1] that $q(x_i;\mu_i)$ that minimizes the KL divergence is given by:
# \begin{equation}
# q_i(x_i) = \frac{1}{Z_i}\exp \big[E_{-q_i}\{\log p(x) \} \big]
# \end{equation}
# where $E_{-q_i}$ denotes an expectation over every $q_j$ except for $j=i$. To compute $q_i(x_i)$, we only care about the terms that involve $x_i$, i.e. we can isolate them as follows:
# \begin{equation}
# E_{-q_i}\{\log p(x)\} = E_{-q_i}\{x_i \sum_{j\in N(i)} w_{ij}x_j + \log N(x_i,\sigma^2) + \mathrm{const} \} = x_i \sum_{j\in N(i)}J\times \mu_j + \log N(x_i, \sigma^2) + \mathrm{const}
# \end{equation}
# where $N(i)$ denotes the neighbors of node $i$ and $\mu_j$ is the mean of a binary random variable:
# \begin{equation}
# \mu_j = E_{q_j}[x_j] = q_j(x_j=+1)\times (+1) + q_j(x_j=-1)\times (-1)
# \end{equation}
# In order to compute this mean, we need to know the values of $q_j(x_j=+1)$ and $q_j(x_j=-1)$. Let $m_i = \sum_{j\in N(i)} w_{ij}\mu_j$ be the mean value of neighbors and let $L_{i}^{+} = N(x_i=+1; \sigma^2)$ and $L_{i}^{-} = N(x_i=-1; \sigma^2)$, then we can compute the mean as follows:
# \begin{equation}
# q_i(x_i=+1) = \frac{\exp\{m_i + L_{i}^{+}\}}{\exp\{m_i + L_{i}^{+}\} + \exp\{-m_i + L_{i}^{-}\}} = \frac{1}{1+\exp\{-2m_i+L_{i}^{-}-L_{i}^{+}\}} = \frac{1}{1+\exp\{-2 a_i\}} = \sigma(2a_i)
# \end{equation}
# \begin{equation}
# q_i(x_i=-1) = 1 - q_i(x_i=+1) = 1 - \sigma(2a_i) = \sigma(-2a_i)
# \end{equation}
# \begin{equation}
# \mu_i = E_{q_i}[x_i] = \sigma(2a_i) - \sigma(-2a_i) = \tanh(a_i)
# \end{equation}
# where $a_i = m_i + 1/2\big(L_{i}^{+} - L_{i}^{-}\big)$. In other words, our mean-field variational updates of the parameters $\mu_i$ at iteration $k$ are computed as follows:
# \begin{equation}
# \mu_{i}^{(k)} = \tanh \bigg(\sum_{j\in N(i)}w_{ij}\mu_{j}^{(k-1)} + \frac{1}{2}\bigg[\log \frac{N(x_i=+1, \sigma^2)}{N(x_i=-1, \sigma^2)} \bigg] \bigg) \times \lambda + (1-\lambda)\times \mu_{i}^{(k-1)}
# \end{equation}
# where we added a learning rate parameter $\lambda$. The figure below shows the parametric form of our mean-field approximation of the Ising model:
# <img src="figures/ising_vi2.png">
# Now that we derived the variational updates and the ELBO, let's implement this in Python in application to binary image denoising!
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from PIL import Image
from tqdm import tqdm
from scipy.special import expit as sigmoid
from scipy.stats import multivariate_normal
np.random.seed(0)
sns.set_style('whitegrid')
# -
# Let's load a grayscale (single channel) image, add Gaussian noise and binarize it based on mean threshold. We can then define variational inference parameters such as the coupling strength, noise level, smoothing rate and max number of iterations:
# +
#load data
print "loading data..."
data = Image.open('./figures/bayes.bmp')
img = np.double(data)
img_mean = np.mean(img)
img_binary = +1*(img>img_mean) + -1*(img<img_mean)
[M, N] = img_binary.shape
#mean-field parameters
sigma = 2 #noise level
y = img_binary + sigma*np.random.randn(M, N) #y_i ~ N(x_i; sigma^2);
J = 1 #coupling strength (w_ij)
rate = 0.5 #update smoothing rate
max_iter = 15
ELBO = np.zeros(max_iter)
Hx_mean = np.zeros(max_iter)
# -
#generate plots
plt.figure()
plt.imshow(y)
plt.title("observed noisy image")
plt.savefig('./figures/ising_vi_observed_image.png')
# We can now run variational inference for the Ising model:
# +
#Mean-Field VI
print "running mean-field variational inference..."
logodds = multivariate_normal.logpdf(y.flatten(), mean=+1, cov=sigma**2) - \
multivariate_normal.logpdf(y.flatten(), mean=-1, cov=sigma**2)
logodds = np.reshape(logodds, (M, N))
#init
p1 = sigmoid(logodds)
mu = 2*p1-1 #mu_init
a = mu + 0.5 * logodds
qxp1 = sigmoid(+2*a) #q_i(x_i=+1)
qxm1 = sigmoid(-2*a) #q_i(x_i=-1)
logp1 = np.reshape(multivariate_normal.logpdf(y.flatten(), mean=+1, cov=sigma**2), (M, N))
logm1 = np.reshape(multivariate_normal.logpdf(y.flatten(), mean=-1, cov=sigma**2), (M, N))
for i in tqdm(range(max_iter)):
muNew = mu
for ix in range(N):
for iy in range(M):
pos = iy + M*ix
neighborhood = pos + np.array([-1,1,-M,M])
boundary_idx = [iy!=0,iy!=M-1,ix!=0,ix!=N-1]
neighborhood = neighborhood[np.where(boundary_idx)[0]]
xx, yy = np.unravel_index(pos, (M,N), order='F')
nx, ny = np.unravel_index(neighborhood, (M,N), order='F')
Sbar = J*np.sum(mu[nx,ny])
muNew[xx,yy] = (1-rate)*muNew[xx,yy] + rate*np.tanh(Sbar + 0.5*logodds[xx,yy])
ELBO[i] = ELBO[i] + 0.5*(Sbar * muNew[xx,yy])
#end for
#end for
mu = muNew
a = mu + 0.5 * logodds
qxp1 = sigmoid(+2*a) #q_i(x_i=+1)
qxm1 = sigmoid(-2*a) #q_i(x_i=-1)
Hx = -qxm1*np.log(qxm1+1e-10) - qxp1*np.log(qxp1+1e-10) #entropy
ELBO[i] = ELBO[i] + np.sum(qxp1*logp1 + qxm1*logm1) + np.sum(Hx)
Hx_mean[i] = np.mean(Hx)
#end for
# -
# Let's plot the mean parameters $\mu_i$ for every pixel to see how well we de-noised the image:
plt.figure()
plt.imshow(mu)
plt.title("after %d mean-field iterations" %max_iter)
plt.savefig('./figures/ising_vi_denoised_image.png')
# Not bad! By setting a positive coupling strength $w_{ij} = J > 0$, we were able to find the mean parameters for our approximating distribution $q_i(x_i)$ that maximized the ELBO objective and resulted in mostly denoised image. We can visualize the ELBO objective as a function of iterations as follows:
plt.figure()
plt.plot(ELBO, color='b', lw=2.0, label='ELBO')
plt.title('Variational Inference for Ising Model')
plt.xlabel('iterations'); plt.ylabel('ELBO objective')
plt.legend(loc='upper left')
plt.savefig('./figures/ising_vi_elbo.png')
# Notice that the ELBO is monotonically increasing and flattening out after about 10 iterations. To get further insight into de-noising, we can plot the average entropy $\frac{1}{n}\sum_{i=1}^{n}H_q(x_i)$. We expect early entropy to be high due to random initialization, however, as the number of iterations increases, mean-field updates converge on binary values of $x_i$ that are consistent with observations and the neighbors resulting in a decrease in average entropy:
plt.figure()
plt.plot(Hx_mean, color='b', lw=2.0, label='Avg Entropy')
plt.title('Variational Inference for Ising Model')
plt.xlabel('iterations'); plt.ylabel('average entropy')
plt.legend(loc="upper right")
plt.savefig('./figures/ising_vi_avg_entropy.png')
# The 2-D Ising model can be extended in multiple ways, for example: 3-D grids and K-states per node (aka Potts model).
# ### References
# [1] <NAME>, "Machine Learning: A Probabilistic Perspective", The MIT Press, 2012
# [2] <NAME>, "CS242: Probabilistic Graphical Models", http://cs.brown.edu/courses/cs242/lectures/
#
#
| chp02/mean_field_mrf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/niallomahony93/mxnet/blob/master/Copy_of_knn.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="2zAKo5PnIVy7"
# # k-Nearest Neighbor (kNN) exercise
#
# *Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*
#
# The kNN classifier consists of two stages:
#
# - During training, the classifier takes the training data and simply remembers it
# - During testing, kNN classifies every test image by comparing to all training images and transfering the labels of the k most similar training examples
# - The value of k is cross-validated
#
# In this exercise you will implement these steps and understand the basic Image Classification pipeline, cross-validation, and gain proficiency in writing efficient, vectorized code.
# + [markdown] id="-2g6EcmyxS4l"
# ## Setup Google Colab Environment
#
# 1. Clone repo to download supporting python files
# 2. Install Python dependencies with pip
# 3. Download CIFAR-10 data set
# + id="4Kez9IGkJjJP" outputId="5c6f7f89-887a-4837-cedc-6366e789cbd9" colab={"base_uri": "https://localhost:8080/", "height": 35}
# cd /content
# + id="BOAxi-KaJyEX" outputId="261eb2ac-57f7-4461-90f2-4d2ef286d046" colab={"base_uri": "https://localhost:8080/", "height": 35}
# !ls
# + id="Zx8FuPw9IygG" outputId="8bc579c3-9406-4787-e529-6845a4a7ad69" colab={"base_uri": "https://localhost:8080/", "height": 107}
# !git clone https://github.com/nholmber/google-colab-cs231n.git
# + id="GAXm6tgkKA24" outputId="791bbbde-9ebe-4109-c26e-76b680374eb8" colab={"base_uri": "https://localhost:8080/", "height": 35}
# %cd google-colab-cs231n/assignment1
# + id="47cAvKi9KKGV" outputId="486d621e-9e56-4412-8630-30e9dc023bd7" colab={"base_uri": "https://localhost:8080/", "height": 233}
# !pip install imageio
# + id="qsacNyLQK1nI" outputId="1702c0f4-0f76-486c-e24b-d10cda0b5499" colab={"base_uri": "https://localhost:8080/", "height": 53}
# %cd cs231n/datasets/
# !ls
# + id="PKtdOUf3yZLQ" outputId="e60c2998-f8af-453c-9bd1-6a9d61cec793" colab={"base_uri": "https://localhost:8080/", "height": 89}
# !more get_datasets.sh
# + id="W7IG5iwcyihM" outputId="f81b5bab-8fd0-458c-e104-f9f336893741" colab={"base_uri": "https://localhost:8080/", "height": 215}
# !./get_datasets.sh
# + id="gcCDvNDjyuhg" outputId="2e80c261-6f96-4688-f607-d11826861523" colab={"base_uri": "https://localhost:8080/", "height": 107}
# %cd ../..
# !ls
# + [markdown] id="bVo6zJtSy0_I"
# ---
# ## Start Building kNN Model
#
# + id="jFY7FQ-iIVy9" outputId="5e264140-b4eb-4911-8848-0fa1e733eddf" colab={"base_uri": "https://localhost:8080/", "height": 53}
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from __future__ import print_function
# This is a bit of magic to make matplotlib figures appear inline in the notebook
# rather than in a new window.
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
# Define function for inspecting the source code of a function
import inspect
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import Terminal256Formatter
def pretty_print(func):
source_code = inspect.getsourcelines(func)[0]
for line in source_code:
print(highlight(line.strip('\n'), PythonLexer(), Terminal256Formatter()), end='')
print('')
# + id="tCO9pp8SIVzD" outputId="745a9a83-f072-4c00-b8c2-15a706259d95" colab={"base_uri": "https://localhost:8080/", "height": 89}
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
# Cleaning up variables to prevent loading data multiple times (which may cause memory issue)
try:
del X_train, y_train
del X_test, y_test
print('Clear previously loaded data.')
except:
pass
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print('Training data shape: ', X_train.shape)
print('Training labels shape: ', y_train.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# + id="VPLu1uWvIVzM" outputId="58e3811b-b778-48d4-afd3-15abc787c5bc" colab={"base_uri": "https://localhost:8080/", "height": 492}
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# + id="IqIPW_Z-IVzR"
# Subsample the data for more efficient code execution in this exercise
num_training = 5000
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
num_test = 500
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# + id="geyAVio_IVzU" outputId="5cb4bbf1-a6e7-4ef5-d673-2945b2f9d7e8" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
print(X_train.shape, X_test.shape)
# + id="aehVa4kCIVzb"
from cs231n.classifiers import KNearestNeighbor
# Create a kNN classifier instance.
# Remember that training a kNN classifier is a noop:
# the Classifier simply remembers the data and does no further processing
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
# + [markdown] id="j7wigEpbIVzf"
# We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps:
#
# 1. First we must compute the distances between all test examples and all train examples.
# 2. Given these distances, for each test example we find the k nearest examples and have them vote for the label
#
# Lets begin with computing the distance matrix between all training and test examples. For example, if there are **Ntr** training examples and **Nte** test examples, this stage should result in a **Nte x Ntr** matrix where each element (i,j) is the distance between the i-th test and j-th train example.
#
# First, open `cs231n/classifiers/k_nearest_neighbor.py` and implement the function `compute_distances_two_loops` that uses a (very inefficient) double loop over all pairs of (test, train) examples and computes the distance matrix one element at a time.
# + id="jdbXqlvEIVzg" outputId="a5d97110-0a91-4c6b-ea52-81757cf43f37" colab={"base_uri": "https://localhost:8080/", "height": 449}
# Open cs231n/classifiers/k_nearest_neighbor.py and implement
# compute_distances_two_loops.
# Print out implementation
pretty_print(classifier.compute_distances_two_loops)
# Test your implementation:
dists = classifier.compute_distances_two_loops(X_test)
print(dists.shape)
# + id="l5PAbuv6IVzl" outputId="0fef9e18-529b-4eed-b7b8-4a47a8adf83c" colab={"base_uri": "https://localhost:8080/", "height": 108}
# We can visualize the distance matrix: each row is a single test example and
# its distances to training examples
plt.imshow(dists, interpolation='none')
plt.show()
# + [markdown] id="D3WsjZ1SIVzr"
# **Inline Question #1:** Notice the structured patterns in the distance matrix, where some rows or columns are visible brighter. (Note that with the default color scheme black indicates low distances while white indicates high distances.)
#
# - What in the data is the cause behind the distinctly bright rows?
# - What causes the columns?
# + [markdown] id="qXVHwPFSIVzs"
# **Your Answer**:
#
#
# + id="T-th7CV0IVzu" outputId="8264bf40-8314-49d2-ba0e-8baa7db14185" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Now implement the function predict_labels and run the code below:
# We use k = 1 (which is Nearest Neighbor).
y_test_pred = classifier.predict_labels(dists, k=1)
# Compute and print the fraction of correctly predicted examples
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
# + [markdown] id="qtVh-_-UIVzy"
# You should expect to see approximately `27%` accuracy. Now lets try out a larger `k`, say `k = 5`:
# + id="pjmZFD1TIVzz" outputId="c389c62d-1c78-4d81-c45c-6a9d44bb87d4" colab={"base_uri": "https://localhost:8080/", "height": 35}
y_test_pred = classifier.predict_labels(dists, k=5)
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
# + [markdown] id="Dj7FdOpcIVz4"
# You should expect to see a slightly better performance than with `k = 1`.
# + [markdown] id="nz9odeESIVz5"
# **Inline Question 2**
# We can also other distance metrics such as L1 distance.
# The performance of a Nearest Neighbor classifier that uses L1 distance will not change if (Select all that apply.):
# 1. The data is preprocessed by subtracting the mean.
# 2. The data is preprocessed by subtracting the mean and dividing by the standard deviation.
# 3. The coordinate axes for the data are rotated.
# 4. None of the above.
#
# *Your Answer*:
#
# *Your explanation*:
#
# + id="S4ryPkdiIVz6" outputId="b090a06d-f9fe-4b8a-b080-77fc22c81576" colab={"base_uri": "https://localhost:8080/", "height": 341}
# Now lets speed up distance matrix computation by using partial vectorization
# with one loop. Implement the function compute_distances_one_loop and run the
# code below:
# Print out implementation
pretty_print(classifier.compute_distances_one_loop)
# Now run calculation
dists_one = classifier.compute_distances_one_loop(X_test)
# To ensure that our vectorized implementation is correct, we make sure that it
# agrees with the naive implementation. There are many ways to decide whether
# two matrices are similar; one of the simplest is the Frobenius norm. In case
# you haven't seen it before, the Frobenius norm of two matrices is the square
# root of the squared sum of differences of all elements; in other words, reshape
# the matrices into vectors and compute the Euclidean distance between them.
difference = np.linalg.norm(dists - dists_one, ord='fro')
print('Difference was: %f' % (difference, ))
if difference < 0.001:
print('Good! The distance matrices are the same')
else:
print('Uh-oh! The distance matrices are different')
# + id="HTpNWWAqIVz-" outputId="15f028a1-f2d3-4078-ab03-9e9e53ff4672" colab={"base_uri": "https://localhost:8080/", "height": 431}
# Now implement the fully vectorized version inside compute_distances_no_loops
# and run the code
# Print out implementation
pretty_print(classifier.compute_distances_no_loops)
# Now run calculation
dists_two = classifier.compute_distances_no_loops(X_test)
# check that the distance matrix agrees with the one we computed before:
difference = np.linalg.norm(dists - dists_two, ord='fro')
print('Difference was: %f' % (difference, ))
if difference < 0.001:
print('Good! The distance matrices are the same')
else:
print('Uh-oh! The distance matrices are different')
# + id="lFU6RGkmIV0D" outputId="a007239d-0622-4093-d670-0b0edf0b99e4" colab={"base_uri": "https://localhost:8080/", "height": 71}
# Let's compare how fast the implementations are
def time_function(f, *args):
"""
Call a function f with args and return the time (in seconds) that it took to execute.
"""
import time
tic = time.time()
f(*args)
toc = time.time()
return toc - tic
two_loop_time = time_function(classifier.compute_distances_two_loops, X_test)
print('Two loop version took %f seconds' % two_loop_time)
one_loop_time = time_function(classifier.compute_distances_one_loop, X_test)
print('One loop version took %f seconds' % one_loop_time)
no_loop_time = time_function(classifier.compute_distances_no_loops, X_test)
print('No loop version took %f seconds' % no_loop_time)
# you should see significantly faster performance with the fully vectorized implementation
# + [markdown] id="CIqsnCT1IV0I"
# ### Cross-validation
#
# We have implemented the k-Nearest Neighbor classifier but we set the value k = 5 arbitrarily. We will now determine the best value of this hyperparameter with cross-validation.
# + id="oEpekQuPIV0K" outputId="1ce94a6e-efa6-45be-f15b-78358b229a06" colab={"base_uri": "https://localhost:8080/", "height": 917}
num_folds = 5
k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]
# Check that training set can be equally divided into num_folds portions
if num_training/num_folds % num_folds != 0.0:
raise ValueError('Number of training examples not evenly divisible by number of folds.')
# Split training set into num_folds lists
X_train_folds = np.split(X_train, num_folds)
y_train_folds = np.split(y_train, num_folds)
# A dictionary holding the accuracies for different values of k that we find
# when running cross-validation. After running cross-validation,
# k_to_accuracies[k] should be a list of length num_folds giving the different
# accuracy values that we found when using that value of k.
k_to_accuracies = {}
# Perform k-fold cross validation to find the best value of k
# Loop over num_folds in outer loop to reuse computed distances for all values of k
for k in k_choices:
k_to_accuracies[k] = []
for idx in range(num_folds):
# Use bin with index idx as validation set, rest as training set
X_train_set = np.concatenate((*X_train_folds[:idx], *X_train_folds[idx+1:]), axis=0)
y_train_set = np.concatenate((*y_train_folds[:idx], *y_train_folds[idx+1:]), axis=0)
X_validation_set = X_train_folds[idx]
y_validation_set = y_train_folds[idx]
num_validation_set = X_validation_set.shape[0]
# Train kNN classifier
classifier = KNearestNeighbor()
classifier.train(X_train_set, y_train_set)
# Compute distances
dists_validate = classifier.compute_distances_no_loops(X_validation_set)
for k in k_choices:
# Predict labels for validation set
y_validation_pred = classifier.predict_labels(dists_validate, k=k)
# Check accuracy
accuracy = (float(np.sum(np.equal(y_validation_pred, y_validation_set)))/num_validation_set)
k_to_accuracies[k].append(accuracy)
# Print out the computed accuracies
for k in sorted(k_to_accuracies):
for accuracy in k_to_accuracies[k]:
print('k = %d, accuracy = %f' % (k, accuracy))
# + id="2XoJqf0rIV0O" outputId="25563082-927d-4e4d-c97a-a033843af0b9" colab={"base_uri": "https://localhost:8080/", "height": 692}
# plot the raw observations
for k in k_choices:
accuracies = k_to_accuracies[k]
print('k = %d, average accuracy = %f' % (k, np.average(accuracies)))
plt.scatter([k] * len(accuracies), accuracies)
# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
plt.title('Cross-validation on k')
plt.xlabel('k')
plt.ylabel('Cross-validation accuracy')
plt.show()
# + id="ubK66G1wIV0T" outputId="41cd1822-e536-41a8-fbaa-2a9b31757c4e" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Based on the cross-validation results above, choose the best value for k,
# retrain the classifier using all the training data, and test it on the test
# data. You should be able to get above 28% accuracy on the test data.
best_k = 10
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
y_test_pred = classifier.predict(X_test, k=best_k)
# Compute and display the accuracy
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
# + [markdown] id="focgP79JIV0d"
# **Inline Question 3**
# Which of the following statements about $k$-Nearest Neighbor ($k$-NN) are true in a classification setting, and for all $k$? Select all that apply.
# 1. The training error of a 1-NN will always be better than that of 5-NN.
# 2. The test error of a 1-NN will always be better than that of a 5-NN.
# 3. The decision boundary of the k-NN classifier is linear.
# 4. The time needed to classify a test example with the k-NN classifier grows with the size of the training set.
# 5. None of the above.
#
# *Your Answer*:
#
# *Your explanation*:
| Copy_of_knn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from tensorflow.keras.preprocessing import image
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
import pandas as pd
import numpy as np
from glob import glob
import re
path = "/home/fabiana/Desktop/projeto-final-src/KeyFramesExtraction/Result/"
df = pd.read_csv('../Annotations/processedAnnotations_no_corrupted_videos.csv')
count = df.groupby(['CM']).count()['palavra']
threshold = 20
frames = 10
cnn_dataset_path = "/home/fabiana/Desktop/projeto-final-src/Classifier/datasets/CNN"
# +
def getFrameNumber(path):
return int(re.search(r".*\/frame(\d+)\.jpg", path).group(1))
def getFiles(path, drop_edges=False):
files = list(glob(path + "/*.jpg"))
files.sort(key=getFrameNumber)
if drop_edges: # Remove the first and the last frames
files.pop() # Remove last
files.pop(0) # Remove first
return files
def replaceClass(cg):
quantity = count[cg]
if (quantity <= threshold):
return 'others'
else:
return cg
# -
df['classe'] = df['CM'].map(replaceClass)
df['classe'] = df['classe'].astype('category')
df = df[['palavra', 'classe']]
df['first_letter'] = df['palavra'].map(lambda x: x[0])
df['folder_path'] = path + f'{frames}/' + df['first_letter'] + '/' + df['palavra'] + '/'
df['files_list'] = df['folder_path'].map(lambda x: getFiles(x, True))
df.head()
def get_images(images_list, img_size):
frames_array = np.zeros((len(images_list), img_size[0], img_size[1]))
for i, image_path in enumerate(images_list):
img = image.load_img(image_path, color_mode='grayscale', target_size=img_size)
img = np.squeeze(image.img_to_array(img))
img /= 127.5
img -= 1.
frames_array[i,:,:] = img
return frames_array
y = to_categorical(df['classe'].cat.codes.values)
X_train, X_test, y_train, y_test = train_test_split(df['files_list'], y, stratify=y,
test_size=0.2, random_state=0)
y_train = np.repeat(y_train, X_train.map(len), axis=0)
y_test = np.repeat(y_test, X_test.map(len), axis=0)
X_test_sum = np.array(X_test.sum())
X_train_sum = np.array(X_train.sum())
np.save(cnn_dataset_path + '/X_train_no_edge_frames.npy', X_train_sum)
np.save(cnn_dataset_path + '/X_test_no_edge_frames.npy', X_test_sum)
np.save(cnn_dataset_path + '/y_train_no_edge_frames.npy', y_train)
np.save(cnn_dataset_path + '/y_test_no_edge_frames.npy', y_test)
# +
from tqdm import tqdm
X = []
for images_list in tqdm(df['files_list'].values):
frames_array = get_images(images_list, img_size=(80, 100))
X.append(np.expand_dims(frames_array, axis=0))
y = df['classe'].cat.codes
# -
X.shape
frames_array.shape
img = image.load_img(image_path, color_mode='grayscale')
np.squeeze(image.img_to_array(img)).shape
image.img_to_array(img).
def getImages(files_list):
for image_path in files_list:
img = image.load_img(image_path, target_size=(299, 299), color_mode='grayscale')
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
getFiles(df['folder_path'].iloc[0])
labels = df['classe'].cat.codes
df.head()
| Classifier/Notebooks/CNN_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import gc
import time
import category_encoders as ce
from contextlib import contextmanager
import lightgbm as lgb
import xgboost as xgb
from sklearn.feature_selection import VarianceThreshold
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import KFold, StratifiedKFold
import matplotlib.pyplot as plt
import seaborn as sns
import category_encoders as ce
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from scipy.cluster.vq import kmeans2, whiten
from sklearn.decomposition import truncated_svd
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
num_rows = None
# +
descretize = lambda x, n: list(map(str, list(pd.qcut(x, n, duplicates='drop'))))
# One-hot encoding for categorical columns with get_dummies
def one_hot_encoder(df, nan_as_category = True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
# -
# Preprocess application_train.csv and application_test.csv
def application_train_test(num_rows = None, nan_as_category=False):
# Read data and merge
df = pd.read_csv('/media/limbo/Home-Credit/data/application_train.csv.zip', nrows= num_rows)
n_train = df.shape[0]
test_df = pd.read_csv('/media/limbo/Home-Credit/data/application_test.csv.zip', nrows= num_rows)
print("Train samples: {}, test samples: {}".format(len(df), len(test_df)))
df = df.append(test_df).reset_index()
df['CODE_GENDER'].replace('XNA', np.nan, inplace=True)
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
df['NAME_FAMILY_STATUS'].replace('Unknown', np.nan, inplace=True)
df['ORGANIZATION_TYPE'].replace('XNA', np.nan, inplace=True)
# Optional: Remove 4 applications with XNA CODE_GENDER (train set)
df = df[df['CODE_GENDER'] != 'XNA']
docs = [_f for _f in df.columns if 'FLAG_DOC' in _f]
live = [_f for _f in df.columns if ('FLAG_' in _f) & ('FLAG_DOC' not in _f) & ('_FLAG_' not in _f)]
# NaN values for DAYS_EMPLOYED: 365.243 -> nan
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)
inc_by_org = df[['AMT_INCOME_TOTAL', 'ORGANIZATION_TYPE']].groupby('ORGANIZATION_TYPE').median()['AMT_INCOME_TOTAL']
df['NEW_CREDIT_TO_ANNUITY_RATIO'] = df['AMT_CREDIT'] / df['AMT_ANNUITY']
df['NEW_AMT_INCOME_TOTAL_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
df['NEW_CREDIT_TO_GOODS_RATIO'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE']
df['NEW_DOC_IND_AVG'] = df[docs].mean(axis=1)
df['NEW_DOC_IND_STD'] = df[docs].std(axis=1)
df['NEW_DOC_IND_KURT'] = df[docs].kurtosis(axis=1)
df['NEW_LIVE_IND_SUM'] = df[live].sum(axis=1)
df['NEW_LIVE_IND_STD'] = df[live].std(axis=1)
df['NEW_LIVE_IND_KURT'] = df[live].kurtosis(axis=1)
df['NEW_INC_PER_CHLD'] = df['AMT_INCOME_TOTAL'] / (1 + df['CNT_CHILDREN'])
df['NEW_INC_BY_ORG'] = df['ORGANIZATION_TYPE'].map(inc_by_org)
df['NEW_EMPLOY_TO_BIRTH_RATIO'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['NEW_ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] / (1 + df['AMT_INCOME_TOTAL'])
df['NEW_SOURCES_PROD'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3']
df['NEW_EXT_SOURCES_MEAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)
df['NEW_SCORES_STD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)
df['NEW_SCORES_STD'] = df['NEW_SCORES_STD'].fillna(df['NEW_SCORES_STD'].mean())
df['NEW_CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH']
df['NEW_CAR_TO_EMPLOY_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['NEW_PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
df['NEW_PHONE_TO_EMPLOY_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']
df['NEW_CREDIT_TO_INCOME_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
df['children_ratio'] = df['CNT_CHILDREN'] / df['CNT_FAM_MEMBERS']
df['NEW_EXT_SOURCES_MEDIAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].median(axis=1)
df['NEW_DOC_IND_SKEW'] = df[docs].skew(axis=1)
df['NEW_LIVE_IND_SKEW'] = df[live].skew(axis=1)
df['ind_0'] = df['DAYS_EMPLOYED'] - df['DAYS_EMPLOYED'].replace([np.inf, -np.inf], np.nan).fillna(df['DAYS_EMPLOYED'].dropna().median()).mean()
df['ind_1'] = df['DAYS_EMPLOYED'] - df['DAYS_EMPLOYED'].replace([np.inf, -np.inf], np.nan).fillna(df['DAYS_EMPLOYED'].dropna().median()).median()
df['ind_2'] = df['DAYS_BIRTH'] - df['DAYS_BIRTH'].replace([np.inf, -np.inf], np.nan).fillna(df['DAYS_BIRTH'].dropna().median()).mean()
df['ind_3'] = df['DAYS_BIRTH'] - df['DAYS_BIRTH'].replace([np.inf, -np.inf], np.nan).fillna(df['DAYS_BIRTH'].dropna().median()).median()
df['ind_4'] = df['AMT_INCOME_TOTAL'] - df['AMT_INCOME_TOTAL'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_INCOME_TOTAL'].dropna().median()).mean()
df['ind_5'] = df['AMT_INCOME_TOTAL'] - df['AMT_INCOME_TOTAL'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_INCOME_TOTAL'].dropna().median()).median()
df['ind_6'] = df['AMT_CREDIT'] - df['AMT_CREDIT'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_CREDIT'].dropna().median()).mean()
df['ind_7'] = df['AMT_CREDIT'] - df['AMT_CREDIT'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_CREDIT'].dropna().median()).median()
df['ind_8'] = df['AMT_ANNUITY'] - df['AMT_ANNUITY'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_ANNUITY'].dropna().median()).mean()
df['ind_9'] = df['AMT_ANNUITY'] - df['AMT_ANNUITY'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_ANNUITY'].dropna().median()).median()
df['ind_10'] = df['AMT_CREDIT'] - df['AMT_INCOME_TOTAL'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_INCOME_TOTAL'].dropna().median()).mean()
df['ind_11'] = df['AMT_CREDIT'] - df['AMT_INCOME_TOTAL'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_INCOME_TOTAL'].dropna().median()).median()
AGGREGATION_RECIPIES = [
(['CODE_GENDER', 'NAME_EDUCATION_TYPE'], [('AMT_ANNUITY', 'max'),
('AMT_CREDIT', 'max'),
('EXT_SOURCE_1', 'mean'),
('EXT_SOURCE_2', 'mean'),
('OWN_CAR_AGE', 'max'),
('OWN_CAR_AGE', 'sum')]),
(['CODE_GENDER', 'ORGANIZATION_TYPE'], [('AMT_ANNUITY', 'mean'),
('AMT_INCOME_TOTAL', 'mean'),
('DAYS_REGISTRATION', 'mean'),
('EXT_SOURCE_1', 'mean'),
('NEW_CREDIT_TO_ANNUITY_RATIO', 'mean')]),
(['CODE_GENDER', 'REG_CITY_NOT_WORK_CITY'], [('AMT_ANNUITY', 'mean'),
('CNT_CHILDREN', 'mean'),
('DAYS_ID_PUBLISH', 'mean')]),
(['CODE_GENDER', 'NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], [('EXT_SOURCE_1', 'mean'),
('EXT_SOURCE_2', 'mean')]),
(['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], [('AMT_CREDIT', 'mean'),
('AMT_REQ_CREDIT_BUREAU_YEAR', 'mean'),
('APARTMENTS_AVG', 'mean'),
('BASEMENTAREA_AVG', 'mean'),
('EXT_SOURCE_1', 'mean'),
('EXT_SOURCE_2', 'mean'),
('EXT_SOURCE_3', 'mean'),
('NONLIVINGAREA_AVG', 'mean'),
('OWN_CAR_AGE', 'mean')]),
(['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], [('ELEVATORS_AVG', 'mean'),
('EXT_SOURCE_1', 'mean')]),
(['OCCUPATION_TYPE'], [('AMT_ANNUITY', 'median'),
('CNT_CHILDREN', 'median'),
('CNT_FAM_MEMBERS', 'median'),
('DAYS_BIRTH', 'median'),
('DAYS_EMPLOYED', 'median'),
('NEW_CREDIT_TO_ANNUITY_RATIO', 'median'),
('DAYS_REGISTRATION', 'median'),
('EXT_SOURCE_1', 'median'),
('EXT_SOURCE_2', 'median'),
('EXT_SOURCE_3', 'median')]),
]
for groupby_cols, specs in AGGREGATION_RECIPIES:
group_object = df.groupby(groupby_cols)
for select, agg in specs:
groupby_aggregate_name = '{}_{}_{}'.format('_'.join(groupby_cols), agg, select)
df = df.merge(group_object[select]
.agg(agg)
.reset_index()
.rename(index=str,
columns={select: groupby_aggregate_name})
[groupby_cols + [groupby_aggregate_name]],
on=groupby_cols,
how='left')
df['retirement_age'] = (df['DAYS_BIRTH'] > -14000).astype(int)
df['long_employment'] = (df['DAYS_EMPLOYED'] > -2000).astype(int)
df['cnt_non_child'] = df['CNT_FAM_MEMBERS'] - df['CNT_CHILDREN']
df['child_to_non_child_ratio'] = df['CNT_CHILDREN'] / df['cnt_non_child']
df['income_per_non_child'] = df['AMT_INCOME_TOTAL'] / df['cnt_non_child']
df['credit_per_person'] = df['AMT_CREDIT'] / df['CNT_FAM_MEMBERS']
df['credit_per_child'] = df['AMT_CREDIT'] / (1 + df['CNT_CHILDREN'])
df['credit_per_non_child'] = df['AMT_CREDIT'] / df['cnt_non_child']
df['cnt_non_child'] = df['CNT_FAM_MEMBERS'] - df['CNT_CHILDREN']
df['child_to_non_child_ratio'] = df['CNT_CHILDREN'] / df['cnt_non_child']
df['income_per_non_child'] = df['AMT_INCOME_TOTAL'] / df['cnt_non_child']
df['credit_per_person'] = df['AMT_CREDIT'] / df['CNT_FAM_MEMBERS']
df['credit_per_child'] = df['AMT_CREDIT'] / (1 + df['CNT_CHILDREN'])
df['credit_per_non_child'] = df['AMT_CREDIT'] / df['cnt_non_child']
df['p_0'] = descretize(df['credit_per_non_child'].values, 2 ** 5)
df['p_1'] = descretize(df['credit_per_person'].values, 2 ** 5)
df['p_2'] = descretize(df['credit_per_child'].values, 2 ** 5)
df['p_3'] = descretize(df['retirement_age'].values, 2 ** 5)
df['p_4'] = descretize(df['income_per_non_child'].values, 2 ** 5)
df['p_5'] = descretize(df['child_to_non_child_ratio'].values, 2 ** 5)
df['p_6'] = descretize(df['NEW_CREDIT_TO_ANNUITY_RATIO'].values, 2 ** 5)
df['p_7'] = descretize(df['NEW_CREDIT_TO_ANNUITY_RATIO'].values, 2 ** 6)
df['p_8'] = descretize(df['NEW_CREDIT_TO_ANNUITY_RATIO'].values, 2 ** 7)
df['pe_0'] = descretize(df['credit_per_non_child'].values, 2 ** 6)
df['pe_1'] = descretize(df['credit_per_person'].values, 2 ** 6)
df['pe_2'] = descretize(df['credit_per_child'].values, 2 ** 6)
df['pe_3'] = descretize(df['retirement_age'].values, 2 ** 6)
df['pe_4'] = descretize(df['income_per_non_child'].values, 2 ** 6)
df['pe_5'] = descretize(df['child_to_non_child_ratio'].values, 2 ** 6)
c = df['NEW_CREDIT_TO_ANNUITY_RATIO'].replace([np.inf, -np.inf], np.nan).fillna(999).values
a, b = kmeans2(np.log1p(c), 2, iter=333)
df['x_0'] = b
a, b = kmeans2(np.log1p(c), 4, iter=333)
df['x_1'] = b
a, b = kmeans2(np.log1p(c), 8, iter=333)
df['x_2'] = b
a, b = kmeans2(np.log1p(c), 16, iter=333)
df['x_3'] = b
a, b = kmeans2(np.log1p(c), 32, iter=333)
df['x_4'] = b
a, b = kmeans2(np.log1p(c), 64, iter=333)
df['x_5'] = b
a, b = kmeans2(np.log1p(c), 128, iter=333)
df['x_6'] = b
a, b = kmeans2(np.log1p(c), 150, iter=333)
df['x_7'] = b
a, b = kmeans2(np.log1p(c), 256, iter=333)
df['x_8'] = b
a, b = kmeans2(np.log1p(c), 512, iter=333)
df['x_9'] = b
a, b = kmeans2(np.log1p(c), 1024, iter=333)
df['x_10'] = b
# Categorical features with Binary encode (0 or 1; two categories)
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = pd.factorize(df[bin_feature])
# Categorical features with One-Hot encode
df, cat_cols = one_hot_encoder(df, nan_as_category)
del test_df
gc.collect()
return df
df = application_train_test(num_rows=None, nan_as_category=False)
gc.collect()
def bureau_and_balance(num_rows = None, nan_as_category = True):
bureau = pd.read_csv('../data/bureau.csv', nrows = num_rows)
bb = pd.read_csv('../data/bureau_balance.csv', nrows = num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)
# Bureau balance: Perform aggregations and merge with bureau.csv
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size', 'median']}
for col in bb_cat:
bb_aggregations[col] = ['median']
bb_agg = bb.groupby('SK_ID_BUREAU').agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist()])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(['SK_ID_BUREAU'], axis=1, inplace= True)
del bb, bb_agg
gc.collect()
# Bureau and bureau_balance numeric features
num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'median', 'var'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'median'],
'DAYS_CREDIT_UPDATE': ['median'],
'CREDIT_DAY_OVERDUE': ['max', 'median'],
'AMT_CREDIT_MAX_OVERDUE': ['median'],
'AMT_CREDIT_SUM': ['max', 'median', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'median', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['median'],
'AMT_CREDIT_SUM_LIMIT': ['median', 'sum'],
'AMT_ANNUITY': ['max', 'median'],
'CNT_CREDIT_PROLONG': ['sum'],
'MONTHS_BALANCE_MIN': ['min', 'median'],
'MONTHS_BALANCE_MAX': ['max', 'median'],
'MONTHS_BALANCE_SIZE': ['median', 'sum']
}
# Bureau and bureau_balance categorical features
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['median']
for cat in bb_cat: cat_aggregations[cat + "_MEDIAN"] = ['median']
bureau_agg = bureau.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist()])
# Bureau: Active credits - using only numerical aggregations
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR').agg(num_aggregations)
cols = active_agg.columns.tolist()
active_agg.columns = pd.Index(['ACTIVE_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist()])
bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')
del active, active_agg
gc.collect()
# Bureau: Closed credits - using only numerical aggregations
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR').agg(num_aggregations)
closed_agg.columns = pd.Index(['CLOSED_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist()])
bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')
for e in cols:
bureau_agg['NEW_RATIO_BURO_' + e[0] + "_" + e[1].upper()] = bureau_agg['ACTIVE_' + e[0] + "_" + e[1].upper()] / bureau_agg['CLOSED_' + e[0] + "_" + e[1].upper()]
del closed, closed_agg, bureau
gc.collect()
return bureau_agg
bureau = bureau_and_balance(num_rows)
df = df.join(bureau, how='left', on='SK_ID_CURR')
del bureau
gc.collect()
# +
def previous_applications(num_rows=None, nan_as_category=True):
prev = pd.read_csv('../data/previous_application.csv', nrows = num_rows)
prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)
# Days 365.243 values -> nan
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
# Add feature: value ask / value received percentage
prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']
# Previous applications numeric features
num_aggregations = {
'AMT_ANNUITY': ['min', 'max', 'median'],
'AMT_APPLICATION': ['min', 'max', 'median'],
'AMT_CREDIT': ['min', 'max', 'median'],
'APP_CREDIT_PERC': ['min', 'max', 'median', 'var'],
'AMT_DOWN_PAYMENT': ['min', 'max', 'median'],
'AMT_GOODS_PRICE': ['min', 'max', 'median'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'median'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'median'],
'DAYS_DECISION': ['min', 'max', 'median'],
'CNT_PAYMENT': ['median', 'sum'],
}
# Previous applications categorical features
cat_aggregations = {}
for cat in cat_cols:
cat_aggregations[cat] = ['median']
prev_agg = prev.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist()])
# Previous Applications: Approved Applications - only numerical features
approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
approved_agg = approved.groupby('SK_ID_CURR').agg(num_aggregations)
cols = approved_agg.columns.tolist()
approved_agg.columns = pd.Index(['APPROVED_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist()])
prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR')
# Previous Applications: Refused Applications - only numerical features
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
refused_agg = refused.groupby('SK_ID_CURR').agg(num_aggregations)
refused_agg.columns = pd.Index(['REFUSED_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist()])
prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR')
del refused, refused_agg, approved, approved_agg, prev
for e in cols:
prev_agg['NEW_RATIO_PREV_' + e[0] + "_" + e[1].upper()] = prev_agg['APPROVED_' + e[0] + "_" + e[1].upper()] / prev_agg['REFUSED_' + e[0] + "_" + e[1].upper()]
gc.collect()
return prev_agg
# Preprocess POS_CASH_balance.csv
def pos_cash(num_rows = None, nan_as_category = True):
pos = pd.read_csv('../data/POS_CASH_balance.csv', nrows = num_rows)
pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)
# Features
aggregations = {
'MONTHS_BALANCE': ['max', 'median', 'size'],
'SK_DPD': ['max', 'median'],
'SK_DPD_DEF': ['max', 'median']
}
for cat in cat_cols:
aggregations[cat] = ['median']
pos_agg = pos.groupby('SK_ID_CURR').agg(aggregations)
pos_agg.columns = pd.Index(['POS_' + e[0] + "_" + e[1].upper() for e in pos_agg.columns.tolist()])
# Count pos cash accounts
pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR').size()
del pos
gc.collect()
return pos_agg
# Preprocess installments_payments.csv
def installments_payments(num_rows = None, nan_as_category = True):
ins = pd.read_csv('/media/limbo/Home-Credit/data/installments_payments.csv', nrows = num_rows)
ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)
# Percentage and difference paid in each installment (amount paid and installment value)
ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']
ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']
ins['PAYMENT_PERC_median'] = ins['PAYMENT_PERC'] - ins['PAYMENT_PERC'].replace([np.inf, -np.inf], np.nan).fillna(ins['PAYMENT_PERC'].dropna().median()).median()
ins['PAYMENT_PERC_MEDIAN'] = ins['PAYMENT_PERC'] - ins['PAYMENT_PERC'].replace([np.inf, -np.inf], np.nan).fillna(ins['PAYMENT_PERC'].dropna().median()).median()
ins['PAYMENT_DIFF_median'] = ins['PAYMENT_DIFF'] - ins['PAYMENT_DIFF'].replace([np.inf, -np.inf], np.nan).fillna(ins['PAYMENT_DIFF'].dropna().median()).median()
ins['PAYMENT_DIFF_MEDIAN'] = ins['PAYMENT_DIFF'] - ins['PAYMENT_DIFF'].replace([np.inf, -np.inf], np.nan).fillna(ins['PAYMENT_DIFF'].dropna().median()).median()
ins['pay_0'] = descretize(ins['PAYMENT_PERC'].values, 2 ** 6)
ins['pay_1'] = descretize(ins['PAYMENT_DIFF'].values, 2 ** 6)
ins['pay_2'] = descretize(ins['PAYMENT_PERC_MEDIAN'].values, 2 ** 6)
ins['pay_3'] = descretize(ins['PAYMENT_PERC_MEDIAN'].values, 2 ** 6)
ins['pay_4'] = descretize(ins['PAYMENT_DIFF_median'].values, 2 ** 6)
ins['pay_5'] = descretize(ins['PAYMENT_DIFF_MEDIAN'].values, 2 ** 6)
# Days past due and days before due (no negative values)
ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']
ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']
ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)
ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)
ins['day_0'] = descretize(ins['DAYS_ENTRY_PAYMENT'].values, 2 ** 6)
ins['day_1'] = descretize(ins['DAYS_INSTALMENT'].values, 2 ** 6)
ins['day_2'] = descretize(ins['DBD'].values, 2 ** 6)
# Features: Perform aggregations
aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DPD': ['max', 'median', 'sum'],
'DBD': ['max', 'median', 'sum'],
'PAYMENT_PERC': ['max', 'median', 'sum', 'var'],
'PAYMENT_DIFF': ['max', 'median', 'sum', 'var'],
'AMT_INSTALMENT': ['max', 'median', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'median', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'median', 'sum']
}
for cat in cat_cols:
aggregations[cat] = ['median']
ins_agg = ins.groupby('SK_ID_CURR').agg(aggregations)
ins_agg.columns = pd.Index(['INSTAL_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist()])
# Count installments accounts
ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR').size()
del ins
gc.collect()
return ins_agg
# Preprocess credit_card_balance.csv
def credit_card_balance(num_rows = None, nan_as_category = True):
cc = pd.read_csv('../data/credit_card_balance.csv', nrows = num_rows)
cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)
# General aggregations
cc.drop(['SK_ID_PREV'], axis= 1, inplace = True)
cc_agg = cc.groupby('SK_ID_CURR').agg(['min', 'max', 'mean', 'sum', 'var'])
cc_agg.columns = pd.Index(['CC_' + e[0] + "_" + e[1].upper() for e in cc_agg.columns.tolist()])
# Count credit card lines
cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR').size()
del cc
gc.collect()
return cc_agg
# +
prev = previous_applications(num_rows)
print("Previous applications df shape:", prev.shape)
df = df.join(prev, how='left', on='SK_ID_CURR')
del prev
gc.collect()
pos = pos_cash(num_rows)
print("Pos-cash balance df shape:", pos.shape)
df = df.join(pos, how='left', on='SK_ID_CURR')
del pos
gc.collect()
ins = installments_payments(num_rows)
print("Installments payments df shape:", ins.shape)
df = df.join(ins, how='left', on='SK_ID_CURR')
del ins
gc.collect()
# +
cc = credit_card_balance(num_rows)
print("Credit card balance df shape:", cc.shape)
df = df.join(cc, how='left', on='SK_ID_CURR')
del cc
gc.collect()
print(df.shape)
# -
df.head()
test_file_path = "Level_1_stack/test_xgb-0.csv"
validation_file_path = 'Level_1_stack/validation_xgb-0.csv'
num_folds = 5
train = pd.read_csv('/media/limbo/Home-Credit/data/application_train.csv.zip', nrows= num_rows)
n_train = train.shape[0]
# +
encoding = 'ohe'
train_df = df.iloc[0:n_train]
test_df = df.iloc[n_train:]
print("Starting LightGBM. Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape))
gc.collect()
# Cross validation model
folds = KFold(n_splits=num_folds, shuffle=True, random_state=1001)
# Create arrays and dataframes to store results
oof_preds = np.zeros(train_df.shape[0])
sub_preds = np.zeros(test_df.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]
#feats = [col for col in feats_0 if df[col].dtype == 'object']
print(train_df[feats].shape)
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df[feats], train_df['TARGET'])):
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
if encoding == 'ohe':
enc = ce.OneHotEncoder(impute_missing=True, cols=categorical_columns).fit(train_df[feats].iloc[train_idx],
train_df['TARGET'].iloc[train_idx])
x_train = enc.transform(train_df[feats].iloc[train_idx])
x_valid = enc.transform(train_df[feats].iloc[valid_idx])
x_test = enc.transform(test_df[feats])
print(x_train.shape, x_valid.shape, x_test.shape)
dtest = xgb.DMatrix(x_test.values)
dtrain = xgb.DMatrix(x_train.values,
label=train_df['TARGET'].iloc[train_idx].values)
dvalid = xgb.DMatrix(x_valid.values,
train_df['TARGET'].iloc[valid_idx].values)
params = dict(
booster="gbtree",
eval_metric = "auc",
nthread=4,
eta=0.05,
max_depth=6,
min_child_weight = 30,
gamma=0,
subsample = 0.85,
colsample_bytree = 0.7,
colsample_bylevel = 0.632,
alpha=0)
clf = xgb.train(
params,
dtrain,
num_boost_round=30000,
evals=[(dtrain, 'train'), (dvalid, 'valid')],
early_stopping_rounds=100,
verbose_eval=False
)
oof_preds[valid_idx] = clf.predict(dvalid)
sub_preds += clf.predict(dtest) / folds.n_splits
print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(train_df['TARGET'].iloc[valid_idx].values, oof_preds[valid_idx])))
del clf, dtrain, dvalid
gc.collect()
# print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'].iloc[train_idx].values, oof_preds))
# # Write submission file and plot feature importance
sub_df = test_df[['SK_ID_CURR']].copy()
sub_df['TARGET'] = sub_preds
sub_df[['SK_ID_CURR', 'TARGET']].to_csv(test_file_path, index= False)
val_df = train_df[['SK_ID_CURR', 'TARGET']].copy()
val_df['TARGET'] = oof_preds
val_df[['SK_ID_CURR', 'TARGET']].to_csv(validation_file_path, index= False)
gc.collect()
# -
gc.collect()
| dashboards-examples/XGB-5-Fold-Stack-v0.1.1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Importing Pandas and Numpy
import pandas as pd
import numpy as np
# ## Importing Dataset in pandas
data=pd.read_csv('digit.csv')
test=pd.read_csv('digitest.csv')
data.head()
# ## Storing the label In Y_train
Y_train=data['label']
X_train=data.drop(labels='label',axis=1)
# ## Importing Seaborn Library
import seaborn as sns
sns.countplot(Y_train)
Y_train.value_counts()
# ## Checking for null values
X_train.isnull().any().count()
X_train.tail()
# ## Reducing the intensity of image Applying Normalisation
X_train=X_train/255
test=test/255
# ## Image Reshaping
#
# Train and test images (28px x 28px) has been stock into pandas.Dataframe as 1D vectors of 784 values. We reshape all data to 28x28x1 3D matrices.
#
# Keras requires an extra dimension in the end which correspond to channels. MNIST images are gray scaled so it use only one channel. For RGB images, there is 3 channels, we would have reshaped 784px vectors to 28x28x3 3D matrices.
X_train=X_train.values.reshape(-1,28,28,1)
test=test.values.reshape(-1,28,28,1)
Y_train
# ## Labels are 10 digits numbers from 0 to 9. We need to encode these lables to one hot vectors (ex : 2 -> [0,0,1,0,0,0,0,0,0,0]).
from keras.utils.np_utils import to_categorical
Y_train=to_categorical(Y_train,num_classes=10)
print(Y_train)
# ## Importing Train test split from sklearn library and also confusion matrix for checking accuracy
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
X_train,X_test,Y_train,Y_test=train_test_split(X_train,Y_train,test_size=0.1,random_state=42)
X_train
print(X_train[122][:,:,0])
import matplotlib.pyplot as plt
plt.imshow(X_train[122][:,:,0])
# ## Importing Sequential models present in keras API that allow us to create layer by layer model
#
# Since we are dealing with convolution neural network we will deal here with conv2D,MaxPool2D layer which are present in Keras and after we imported sequential model we will first add some convolution layer,
#
# I am first adding convolution layer with 32 filter for first two convolution layer having 5x5 kernel matrix filter which can be convolved on original image for extracting the important feature from image.
#
# The kernel matrix is applied on complete image matrix.
#
# Now I have added which is basically used as a downsampling filter as its reduces the dimension of image for it we are using Max2D which is reducing the size of an image.
#
# We need to choose the pooling size and more the pooling dimension more the downsampling is important.
#
# Convolution and pooling layer are combined here so that our model can learn more features
#
# Now we will add 2 more convolution layer at the end which are of 64 filter and will do the pooling for downsampling there too.Now after pooling is compelete we will proceed with Dropoutlayer which is a regularization method, where a proportion of nodes in the layer are randomly ignored (setting their wieghts to zero) for each training sample. This drops randomly a propotion of the network and forces the network to learn features in a distributed way. This technique also improves generalization and reduces the overfitting.
#
# 'relu' which is the rectifier (activation function max(0,x). The rectifier activation function is used to add non linearity to the network.
#
# The Flatten layer is use to convert the final feature maps into a one single 1D vector. This flattening step is needed so that you can make use of fully connected layers after some convolutional/maxpool layers. It combines all the found local features of the previous convolutional layers.
#
# In the end i used the features in two fully-connected (Dense) layers which is just artificial an neural networks (ANN) classifier. In the last layer(Dense(10,activation="softmax")) the net outputs distribution of probability of each class.
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
model=Sequential()
# +
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',
activation ='relu', input_shape = (28,28,1)))
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
# -
# # Set the optimizer and annealer
#
# Once our layers are added to the model, we need to set up a score function, a loss function and an optimisation algorithm.
#
# We define the loss function to measure how poorly our model performs on images with known labels. It is the error rate between the oberved labels and the predicted ones.
#
# We use a specific form for categorical classifications (>2 classes) called the "categorical_crossentropy".
#
# The most important function is the optimizer. This function will iteratively improve parameters (filters kernel values, weights and bias of neurons in order to minimise the loss.
#
# I choosed RMSprop (with default values), it is a very effective optimizer. The RMSProp update adjusts the Adagrad method in a very simple way in an attempt to reduce its aggressive, monotonically decreasing learning rate. We could also have used Stochastic Gradient Descent ('sgd') optimizer, but it is slower than RMSprop.
#
# The metric function "accuracy" is used is to evaluate the performance our model. This metric function is similar to the loss function, except that the results from the metric evaluation are not used when training the model (only for evaluation).
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
# In order to make the optimizer converge faster and closest to the global minimum of the loss function, i used an annealing method of the learning rate (LR).
#
# The LR is the step by which the optimizer walks through the 'loss landscape'. The higher LR, the bigger are the steps and the quicker is the convergence. However the sampling is very poor with an high LR and the optimizer could probably fall into a local minima.
#
# Its better to have a decreasing learning rate during the training to reach efficiently the global minimum of the loss function.
#
# To keep the advantage of the fast computation time with a high LR, i decreased the LR dynamically every X steps (epochs) depending if it is necessary (when accuracy is not improved).
#
# With the ReduceLROnPlateau function from Keras.callbacks, i choose to reduce the LR by half if the accuracy is not improved after 3 epochs.
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
epochs = 1 # Turn epochs to 30 to get 0.9967 accuracy
batch_size = 86
# In order to avoid overfitting problem, we need to expand artificially our handwritten digit dataset. We can make your existing dataset even larger. The idea is to alter the training data with small transformations to reproduce the variations occuring when someone is writing a digit.
#
# For example, the number is not centered The scale is not the same (some who write with big/small numbers) The image is rotated...
#
# Approaches that alter the training data in ways that change the array representation while keeping the label the same are known as data augmentation techniques. Some popular augmentations people use are grayscales, horizontal flips, vertical flips, random crops, color jitters, translations, rotations, and much more.
#
# By applying just a couple of these transformations to our training data, we can easily double or triple the number of training examples and create a very robust model.
# +
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.1, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(X_train)
# -
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data = (X_test,Y_test),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction])
# # Evaluatiing the loss in model
# +
fig, ax = plt.subplots(2,1)
ax[0].plot(history.history['loss'], color='b', label="Training loss")
ax[0].plot(history.history['val_loss'], color='r', label="validation loss",axes =ax[0])
legend = ax[0].legend(loc='best', shadow=True)
ax[1].plot(history.history['acc'], color='b', label="Training accuracy")
ax[1].plot(history.history['val_acc'], color='r',label="Validation accuracy")
legend = ax[1].legend(loc='best', shadow=True)
# -
# # Confusion matrix can be very helpfull to see your model drawbacks.
#
# I plot the confusion matrix of the validation results.
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
Y_pred = model.predict(X_test)
# Convert predictions classes to one hot vectors
Y_pred_classes = np.argmax(Y_pred,axis = 1)
# Convert validation observations to one hot vectors
Y_true = np.argmax(Y_test,axis = 1)
# compute the confusion matrix
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
# plot the confusion matrix
plot_confusion_matrix(confusion_mtx, classes = range(10))
# Here we can see that our CNN performs very well on all digits with few errors considering the size of the validation set (4 200 images).
#
# However, it seems that our CNN has some little troubles with the 4 digits, hey are misclassified as 9. Sometime it is very difficult to catch the difference between 4 and 9 when curves are smooth.
#
# Let's investigate for errors.
#
# I want to see the most important errors . For that purpose i need to get the difference between the probabilities of real value and the predicted ones in the results.
# +
# Errors are difference between predicted labels and true labels
errors = (Y_pred_classes - Y_true != 0)
Y_pred_classes_errors = Y_pred_classes[errors]
Y_pred_errors = Y_pred[errors]
Y_true_errors = Y_true[errors]
X_val_errors = X_test[errors]
# -
def display_errors(errors_index,img_errors,pred_errors, obs_errors):
""" This function shows 6 images with their predicted and real labels"""
n = 0
nrows = 2
ncols = 3
fig, ax = plt.subplots(nrows,ncols,sharex=True,sharey=True)
for row in range(nrows):
for col in range(ncols):
error = errors_index[n]
ax[row,col].imshow((img_errors[error]).reshape((28,28)))
ax[row,col].set_title("Predicted label :{}\nTrue label :{}".format(pred_errors[error],obs_errors[error]))
n += 1
# +
# Probabilities of the wrong predicted numbers
Y_pred_errors_prob = np.max(Y_pred_errors,axis = 1)
# Predicted probabilities of the true values in the error set
true_prob_errors = np.diagonal(np.take(Y_pred_errors, Y_true_errors, axis=1))
# Difference between the probability of the predicted label and the true label
delta_pred_true_errors = Y_pred_errors_prob - true_prob_errors
# Sorted list of the delta prob errors
sorted_dela_errors = np.argsort(delta_pred_true_errors)
# Top 6 errors
most_important_errors = sorted_dela_errors[-6:]
# Show the top 6 errors
display_errors(most_important_errors, X_val_errors, Y_pred_classes_errors, Y_true_errors)
# +
# predict results
results = model.predict(test)
# select the indix with the maximum probability
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
# +
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False)
# -
| Digit Recognizer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# +
from azureml.core import Workspace, Experiment, Run
# Configure experiment
ws = Workspace.from_config()
exp = Experiment(workspace=ws, name='cifar10_cnn_horovod')
# +
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Cluster configuration
cluster_name = "aml-gpu"
min_nodes = 0
max_nodes = 2
vm_size = "Standard_NC6"
# Verify that the cluster exists already
try:
aml_cluster = ComputeTarget(workspace=ws, name=cluster_name)
except ComputeTargetException:
print('Cluster not `%s` not found, creating one now.' % cluster_name)
config = AmlCompute.provisioning_configuration(vm_size=vm_size, min_nodes=min_nodes, max_nodes=max_nodes)
aml_cluster = ComputeTarget.create(workspace=ws, name=cluster_name, provisioning_configuration=config)
# Wait until the cluster is ready
aml_cluster.wait_for_completion(show_output=True)
# +
from azureml.widgets import RunDetails
from azureml.train.dnn import TensorFlow, Mpi
script = 'cifar10_cnn_horovod.py'
script_folder = os.path.join(os.getcwd(), 'code')
estimator = TensorFlow(source_directory=script_folder,
compute_target=aml_cluster,
entry_script=script,
script_params={'--epochs': 30},
node_count=2,
distributed_training=Mpi(process_count_per_node=1),
pip_packages=['keras'],
framework_version='1.13',
use_gpu=True)
run = exp.submit(estimator)
RunDetails(run).show()
| chapter12/AML_Horovod_GPU.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/institutohumai/cursos-python/blob/master/APIs/2_APIs_Series_Tiempo/ejercicios/ejercicios.ipynb"> <img src='https://colab.research.google.com/assets/colab-badge.svg' /> </a>
# <div align="center"> Recordá abrir en una nueva pestaña </div>
# # Clase 1: ejercicios prácticos resueltos
# !pip install markdown
# !pip install arrow
# !pip install seaborn
# !pip install requests
# +
from IPython.core.display import display, HTML
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import arrow
import markdown
import requests
# %matplotlib inline
matplotlib.style.use('ggplot')
matplotlib.rcParams['figure.figsize'] = [12, 8]
# -
# ## Ejercicio 1: API de Series de Tiempo de Argentina
# * Genera una tabla y grafica la evolucion de los tipos de cambio ARS/USD de todas las entidades financieras (canal electronico, venta, 15hs).
# +
BASE_SERIES_API = 'https://apis.datos.gob.ar/series/api/series/?ids={ids}&last=5000&format=csv'
tcs_api = BASE_SERIES_API.format(
ids=",".join([
'tc_usd_galicia_ev15',
'tc_usd_supervielle_ev15',
'tc_usd_itau_ev15',
'tc_usd_macro_ev15',
'tc_usd_piano_ev15',
'tc_usd_credicoop_ev15',
'tc_usd_bbva_ev15',
'tc_usd_bna_ev15',
'tc_usd_ibcambio_ev15',
'tc_usd_patagonia_ev15',
'tc_usd_hsbc_ev15',
'tc_usd_brubank_ev15',
'tc_usd_bullmarket_ev15',
'tc_usd_santander_ev15',
'tc_usd_hipotecario_ev15',
'tc_usd_balanz_ev15',
])
)
print(tcs_api)
tcs = pd.read_csv(tcs_api)
# -
tcs['indice_tiempo'] = pd.to_datetime(tcs.indice_tiempo)
tcs = tcs.set_index('indice_tiempo')
tcs
tcs.plot()
# * Genera un reporte automatico en HTML que diga las ultimas temperaturas diarias y el promedio de los ultimos 30 dias, para 3 ciudades de Argentina
# +
temperaturas_api = BASE_SERIES_API.format(
ids=",".join([
'temp_max_sarc',
'temp_max_sane',
'temp_max_saar',
])
)
print(temperaturas_api)
temperaturas = pd.read_csv(temperaturas_api)
# -
temperaturas['indice_tiempo'] = pd.to_datetime(temperaturas.indice_tiempo)
temperaturas = temperaturas.set_index('indice_tiempo')
temperaturas
# como hay algunos dias sin valores, hay que especificar cual es la
# cantidad minima de periodos aceptable para hacer el promedio de 30 dias
temperaturas_30d = temperaturas.rolling(30, min_periods=25).mean()
temperaturas_30d
# +
fecha = arrow.get(temperaturas.index[-1]).format('YYYY-MM-DD')
corrientes_temp = temperaturas.loc[fecha, 'temperatura_maxima_sarc']
santiago_temp = temperaturas.loc[fecha, 'temperatura_maxima_sane']
rosario_temp = temperaturas.loc[fecha, 'temperatura_maxima_saar']
corrientes_temp_30d = temperaturas_30d.loc[fecha, 'temperatura_maxima_sarc']
santiago_temp_30d = temperaturas_30d.loc[fecha, 'temperatura_maxima_sane']
rosario_temp_30d = temperaturas_30d.loc[fecha, 'temperatura_maxima_saar']
# -
reporte = f"""
=== TEMPERATURAS ===
Temperaturas maximas registradas al dia de {fecha} y
promedio de maximas de los 30 dias anteriores.
* Corrientes: {corrientes_temp} (promedio 30d: {corrientes_temp_30d:.1f})
* Santiago del Estero: {santiago_temp} (promedio 30d: {santiago_temp_30d:.1f})
* Rosario: {rosario_temp} (promedio 30d: {rosario_temp_30d:.1f})
=====================
"""
html = markdown.markdown(reporte)
display(HTML(html))
# * Grafica la relacion entre el nivel de precios (nucleo) y la base monetaria. Podes buscar la base monetaria en [este dataset](https://datos.gob.ar/dataset/sspm-factores-explicacion-base-monetaria/archivo/sspm_331.2) bajo el nombre de "Saldo de la base monetaria" y la serie de nivel de precios [es esta](https://datos.gob.ar/series/api/series/?ids=148.3_INUCLEONAL_DICI_M_19). Algunos scatter a probar:
# - IPC vs. base monetaria
# - IPC promedio 6 meses vs. base monetaria promedio 6 meses (Pista: usa rolling() y mean()).
# - IPC promedio 6 meses (variacion porcentual) vs. base monetaria promedio 6 meses (variacion porcentual) (Pista: agregale pct_change(1) al anterior).
# - IPC promedio 6 meses (variacion porcentual) vs. base monetaria promedio 6 meses (variacion porcentual) de hace 3 meses -rezago de 3 meses- (Pista: agregale shift(3) a una de las variables).
#
# Que otras variables se podrian incorporar para explicar o controlar esta relacion? Nivel de actividad? Tipo de cambio? Tasa de interes?
# +
m_ipc_api = BASE_SERIES_API.format(
ids=",".join([
'148.3_INUCLEONAL_DICI_M_19',
'331.2_SALDO_BASERIA__15',
])
)
print(m_ipc_api)
m_ipc = pd.read_csv(m_ipc_api)
# -
m_ipc = m_ipc.set_index('indice_tiempo')
m_ipc.tail()
m_ipc.plot.scatter(
'saldo_base_monetaria', 'ipc_nucleo_nacional'
)
m_ipc['m_rolling_6'] = m_ipc.saldo_base_monetaria.rolling(6).mean()
m_ipc['ipc_rolling_6'] = m_ipc.ipc_nucleo_nacional.rolling(6).mean()
m_ipc.plot.scatter(
'm_rolling_6', 'ipc_rolling_6'
)
m_ipc['m_roll_6_pct_var'] = m_ipc.saldo_base_monetaria.rolling(6).mean().pct_change(1)
m_ipc['ipc_roll_6_pct_var'] = m_ipc.ipc_nucleo_nacional.rolling(6).mean().pct_change(1)
# +
ax = m_ipc.plot.scatter(
'm_roll_6_pct_var', 'ipc_roll_6_pct_var'
)
# para que eje X y eje Y tengan la misma escala
ax.set_aspect('equal')
# -
m_ipc['ipc_roll_6_pct_var_shift_3'] = m_ipc.ipc_nucleo_nacional.rolling(6).mean().pct_change(1).shift(3)
# +
# bonus track! con este codigo adicional
# podes agregar la linea de equivalencia
# creas una nueva serie con los valores de la serie
# que es indice del grafico (eje Y)
m_ipc['equal_line'] = m_ipc['ipc_roll_6_pct_var']
# creas un dataframe que tiene el mismo indice que el grafico
m_index = m_ipc.set_index('ipc_roll_6_pct_var')
# +
ax = m_ipc.plot.scatter(
'm_roll_6_pct_var', 'ipc_roll_6_pct_var_shift_3'
)
ax.set_aspect('equal')
# agrega la linea de equivalencia, donde los valores del eje X
# son iguales a los del eje Y (NO es una linea de tendencia)
m_index.equal_line.plot(ax=ax)
# -
# ### Bonus track: graficar una recta de regresion
# Posiblemente te hayas preguntado haciendo los ejercicios anteriores como se puede graficar una recta de regresion _facilmente_.
#
# Hay una libreria de graficos llamada `seaborn` que tiene muchos de estos graficos tipicos implementados para hacerlos con facilidad.
sns.regplot(m_ipc.m_roll_6_pct_var, m_ipc.ipc_roll_6_pct_var_shift_3)
# ## Ejercicio 2: API de Quandl
# * Grafica las tasas de interes de los bonos de Estados Unidos, a partir del dataset de FRED disponible en Quandl (Pista: podes arrancar a buscar por aca: https://www.quandl.com/data/FRED-Federal-Reserve-Economic-Data?keyword=10%20years%20treasury)
BASE_QUANDL_API = 'https://www.quandl.com/api/v3/datasets/FRED/{serie_id}.csv?start_date=2010-01-01'
series_ids = ['DGS1', 'DGS2', 'DGS5', 'DGS10', 'DGS30']
def get_fred_serie(serie_id):
# extrae la serie de Quandl
api_call = BASE_QUANDL_API.format(serie_id=serie_id)
df = pd.read_csv(api_call, index_col='Date').sort_index()
# renombra con el id
serie = df.Value
serie.name = serie_id
return serie
series = list(map(get_fred_serie, series_ids))
df_t = pd.concat(series, axis=1)
df_t.index = pd.to_datetime(df_t.index)
df_t
df_t.plot()
# ## Ejercicio 3: API de Banco Mundial
# * Grafica la evolucion de las emisiones per capita de CO2 para por lo menos 8 paises paises de Sudamérica desde 1960 (o el primer año con datos).
BASE_BM_API = 'http://api.worldbank.org/v2/es/country/{pais}/indicator/{indicador}?format=json&per_page=20000'
paises = ['arg', 'bra', 'ury', 'chl', 'ven',
'col', 'bol', 'ecu', 'per', 'pry']
indicadores = ['EN.ATM.CO2E.PC']
emisiones = requests.get(
BASE_BM_API.format(
pais=";".join(paises),
indicador=";".join(indicadores)
)
).json()
df_emisiones = pd.json_normalize(emisiones[1])
df_emisiones_series = df_emisiones.pivot_table(
index='date',
columns='country.value',
values='value'
)
df_emisiones_series
df_emisiones_series.plot()
| APIs/2_APIs_Series_Tiempo/ejercicios/solucion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import logging
import tensorflow as tf
from ppnp.tensorflow import PPNP
from ppnp.tensorflow.training import train_model
from ppnp.tensorflow.earlystopping import stopping_args
from ppnp.tensorflow.propagation import PPRExact, PPRPowerIteration
from ppnp.data.io import load_dataset
# -
tf.logging.set_verbosity(tf.logging.INFO)
logging.basicConfig(
format='%(asctime)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO)
# # Load dataset
#
# First we need to load the dataset we want to train on. The datasets used are in the `SparseGraph` format. This is just a class providing the adjacency, attribute and label matrices in a dense (`np.ndarray`) or sparse (`scipy.sparse.csr_matrix`) matrix format and some (in principle unnecessary) convenience functions. If you want to use external datasets, you can e.g. use the `networkx_to_sparsegraph` method in `ppnp.data.io` for converting NetworkX graphs to our SparseGraph format.
#
# The four datasets from the paper (Cora-ML, Citeseer, PubMed and MS Academic) can be found in the directory `data`.
#
# For this example we choose the Cora-ML graph.
graph_name = 'cora_ml'
graph = load_dataset(graph_name)
graph.standardize(select_lcc=True)
# # Set up propagation
#
# Next we need to set up the proper propagation scheme. In the paper we've introduced the exact PPR propagation used in PPNP and the PPR power iteration propagation used in APPNP.
#
# Here we use the hyperparameters from the paper. Note that we should use a different `alpha = 0.2` for MS Academic.
prop_ppnp = PPRExact(graph.adj_matrix, alpha=0.1)
prop_appnp = PPRPowerIteration(graph.adj_matrix, alpha=0.1, niter=10)
# # Choose model hyperparameters
#
# Now we choose the hyperparameters. These are the ones used in the paper for all datasets.
#
# Note that we choose the propagation for APPNP.
# model_args = {
# 'hiddenunits': [64],
# 'reg_lambda': 5e-3,
# 'learning_rate': 0.01,
# 'keep_prob': 0.5,
# 'propagation': prop_appnp}
model_args = {
'hiddenunits': [64],
'reg_lambda': 5e-3,
'learning_rate': 0.01,
'keep_prob': 0.5,
'propagation': prop_ppnp}
# # Train model
#
# Now we can train the model.
idx_split_args = {'ntrain_per_class': 20, 'nstopping': 500, 'nknown': 1500, 'seed': 2413340114}
test = False
save_result = False
print_interval = 20
result = train_model(
graph_name, PPNP, graph, model_args, idx_split_args,
stopping_args, test, save_result, None, print_interval)
| simple_example_tensorflow.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia (4 threads) 1.5.2
# language: julia
# name: julia-(4-threads)-1.5
# ---
using PyPlot, PyCall, Peaks
np = pyimport("numpy")
rcParams = PyPlot.PyDict(PyPlot.matplotlib."rcParams")
rcParams["font.size"] = 15
rcParams["legend.fontsize"] = "xx-large"
rcParams["axes.labelsize"] = "xx-large"
rcParams["axes.titlesize"] = "xx-large"
rcParams["xtick.labelsize"] = "xx-large"
rcParams["ytick.labelsize"] = "xx-large"
rcParams["font.sans-serif"] = "Arial"
rcParams["font.family"] = "sans-serif"
rcParams["figure.figsize"] = (6, 6)
pwd()
filter(x -> contains(x, ".xsf"), readdir())
run(`open alpha.xsf`)
readdir()[contains.(readdir(), "Excitations")]
readdir()[contains.(readdir(), "eigenvals")]
.11*27.2
run(`cat alpha.Excitations`)
write("alpha.n_diff", np.fromfile("alpha.n_up")-np.fromfile("alpha.n_dn"))
excitations = np.loadtxt("alpha.Excitations", skiprows=6, usecols=[3, 6]);
eV=27.2
# +
broadenedXdipole = zeros(500)
broadening=0.1
energies= range(1.3, 5, length=500)
for (e, rsq) in zip(excitations[:, 1], excitations[:, 2])
broadenedXdipole += rsq*e*exp.(-((energies .- eV*e).^2)/broadening^2)
end
plot(energies, broadenedXdipole/maximum(broadenedXdipole), linewidth=5, color=:red)
#xlims!(1.5, 2.9)
#ylims!(0, 0.6)
#xlabel!("Energy (eV)")
#ylabel!("Oscillator Strength (a.u.)")
#vline!(energies[argmaxima(broadenedXdipole)], linewidth=5, color=:blue)
#yticks!(Float64[])
#savefig("BES2.svg")
# -
plot(1240 ./ energies, broadenedXdipole/maximum(broadenedXdipole), linewidth=5)
xlim(450, 800)
ylim(0, 0.3)
momenta = (abs.(np.reshape(np.fromfile("alpha.momenta", np.complex), (224, 3, 36, 36)))).^2;
reshaped=reshape(read!("alpha.eigenvals", Array{Float64}(undef, 36*112*2 )),(36, 112*2));
eups=permutedims(reshaped, [2, 1])[1:112, :]*eV;
edns=permutedims(reshaped, [2, 1])[112+1:2*112, :]*eV;
using Statistics
eups_av = mean(eups, dims=1)
edns_av = mean(edns, dims=1)
repeat(eups_av, 10)
# plot(title="Green and Red Transition States")
plot([edns_av[24]], linewidth=5, color=:black)
#hline!([edns_av[23]], legend=false, ylims=[0, 3.5], linewidth=5, color=:red)
#hline!([edns_av[22]], legend=false, ylims=[0, 3.5], linewidth=5, color=:green)
#xticks!(Float64[])
# ylabel!("Energy (eV)")
#savefig("DefectStates.svg")
# PyPlot compatible
# title("Green and Red Transition States")
figure(figsize = (1,2.5))
plot([0,1],[edns_av[24],edns_av[24]], linewidth=5, color=:black)
plot([0,1],[edns_av[23],edns_av[23]], linewidth=5, color=:red)
plot([0,1],[edns_av[22],edns_av[22]], linewidth=5, color=:green)
ylim([0,3.5])
yticks([0,1,2,3])
xticks([])
# hline!([edns_av[23]], legend=false, ylims=[0, 3.5], linewidth=5, color=:red)
# hline!([edns_av[22]], legend=false, ylims=[0, 3.5], linewidth=5, color=:green)
# xticks!(Float64[])
ylabel("Energy (eV)")
savefig("DefectStates.svg")
run(`cat alpha.out`)
# +
broadenedX = zeros(500)
broadening=0.1
μ = 0.111873*27.2
energies = range(1.3, 5, length=500)
for kpoint in 1:112
for band1 in 1:36
for band2 in 1:36
e1 = edns[kpoint, band1]; e2 = edns[kpoint, band2]
e1 < μ || continue
e2 > μ || continue
e = e2-e1
psquared = momenta[kpoint+112, 1, band1, band2]+momenta[kpoint+112, 2, band1, band2]+momenta[kpoint+112, 3, band1, band2]
broadenedX += (psquared/e)*exp.(-((energies .- e).^2)/broadening^2)
end
end
end
plot(energies, broadenedX/maximum(broadenedX), linewidth=5, color=:red)
plot(energies, broadenedXdipole/maximum(broadenedXdipole), linewidth=5, color=:black)
#xlims!(1, 3)
#yticks!(Float64[])
#xlabel!("Energy (eV)")
# -
1240 ./ energies[argmaxima(broadenedX)]
1240 ./ energies[argmaxima(broadenedXdipole)]
figure(figsize = (6, 3))
plot(1240 ./ energies, broadenedX/maximum(broadenedX), linewidth=5, color=:red, label = "Momentum")
plot(1240 ./ energies, broadenedXdipole/maximum(broadenedXdipole), linewidth=5, label = "Dipole")
legend()
xlim([420, 800])
ylim([0, 0.5])
# yaxis!(false)
xlabel("Wavelength (nm)")
ylabel("Spectrum (a.u.)")
# yticks(Real[])
savefig("BES1_SI.svg")
(1240 ./ energies)[(argmaxima(broadenedXdipole))][1:2]
(1240 ./ energies)[(argmaxima(broadenedX))][1:2]
readdir()
# +
#plot(1240 ./ energies, broadenedX/maximum(broadenedX), linewidth=5, color=:red, legend=false, size=(600, 400), title="Bulk Excitation Spectrum (Momentum Matrix Elements)")
plot(1240 ./ energies, broadenedXdipole/maximum(broadenedXdipole), linewidth=5, color=:black, legend=false, size=(600, 400), title="Bulk Excitation Spectrum")
xlims!(400, 800)
ylims!(0, 0.23)
ylabel!("Oscillator Strength (a.u.)", yguidefontsize=10, size=(400, 500))
yticks!(Float64[])
vline!((1240 ./ energies)[(argmaxima(broadenedXdipole))], linewidth=6, color=:red)
#Only plot the green and red peaks
xlabel!("Wavelength (nm)", xtickfontsize=12, xguidefontsize=20)
# -
eups = np.fromfile("alpha.eigenvals")[1:36]*eV;
edns = np.fromfile("alpha.eigenvals")[37:end]*eV;
plot(transpose(reshape(repeat(eups, 10), (36, 10))), legend=false, ylims=[-2,3 ] , linewidth=5, color=:blue)
plot!(transpose(reshape(repeat(edns, 10), (36, 10))), legend=false, ylims=[-2,8 ] , xticks=nothing,size=(400, 600), linewidth=5, color=:red)
run(`cat alpha.EigStats`)
| nanoscint/maintext/DFT Setup and Analysis/STH Defects With Hybrid Functionals and DFT+U/clusterresults/Bulk/BB1K/alteredlattice/.ipynb_checkpoints/Bulk BB1K Analysis - Plots for paper-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="f2v2y0kDhE3V"
# <table align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/ShopRunner/collie_recs/blob/main/tutorials/05_hybrid_model.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/ShopRunner/collie_recs/blob/main/tutorials/05_hybrid_model.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a>
# </td>
# <td>
# <a target="_blank" href="https://raw.githubusercontent.com/ShopRunner/collie_recs/main/tutorials/05_hybrid_model.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" /> Download notebook</a>
# </td>
# </table>
# + id="8Tcd653JhE3h"
# for Collab notebooks, we will start by installing the ``collie_recs`` library
# !pip install collie_recs --quiet
# + id="1nMfnWFKhE3j" outputId="0452ef70-4b40-4e71-ebcc-72aa43ab4d07"
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# %env DATA_PATH data/
# + id="hIKvscx2hE3k"
import os
import numpy as np
import pandas as pd
from pytorch_lightning.utilities.seed import seed_everything
from IPython.display import HTML
import joblib
import torch
from collie_recs.metrics import mapk, mrr, auc, evaluate_in_batches
from collie_recs.model import CollieTrainer, HybridPretrainedModel, MatrixFactorizationModel
from collie_recs.movielens import get_movielens_metadata, get_recommendation_visualizations
# + [markdown] id="44gLNrW-ghT_"
# ## Load Data From ``01_prepare_data`` Notebook
# If you're running this locally on Jupyter, you should be able to run the next cell quickly without a problem! If you are running this on Colab, you'll need to regenerate the data by running the cell below that, which should only take a few extra seconds to complete.
# + id="XKKPmYi_ghT_" outputId="23c16373-6747-4255-9f52-7091312e0b71"
try:
# let's grab the ``Interactions`` objects we saved in the last notebook
train_interactions = joblib.load(os.path.join(os.environ.get('DATA_PATH', 'data/'),
'train_interactions.pkl'))
val_interactions = joblib.load(os.path.join(os.environ.get('DATA_PATH', 'data/'),
'val_interactions.pkl'))
except FileNotFoundError:
# we're running this notebook on Colab where results from the first notebook are not saved
# regenerate this data below
from collie_recs.cross_validation import stratified_split
from collie_recs.interactions import Interactions
from collie_recs.movielens import read_movielens_df
from collie_recs.utils import convert_to_implicit, remove_users_with_fewer_than_n_interactions
df = read_movielens_df(decrement_ids=True)
implicit_df = convert_to_implicit(df, min_rating_to_keep=4)
implicit_df = remove_users_with_fewer_than_n_interactions(implicit_df, min_num_of_interactions=3)
interactions = Interactions(
users=implicit_df['user_id'],
items=implicit_df['item_id'],
ratings=implicit_df['rating'],
allow_missing_ids=True,
)
train_interactions, val_interactions = stratified_split(interactions, test_p=0.1, seed=42)
print('Train:', train_interactions)
print('Val: ', val_interactions)
# + [markdown] id="nfjulOO0hE3m"
# # Hybrid Collie Model
# In this notebook, we will use this same metadata and incorporate it directly into the model architecture with a hybrid Collie model.
# + [markdown] id="CM_O67rRhE3m"
# ## Read in Data
# + id="aELBEe5bhE3n" outputId="70033834-c3f8-4038-f0b4-e346600ef4d4"
# read in the same metadata used in notebooks ``03`` and ``04``
metadata_df = get_movielens_metadata()
metadata_df.head()
# + id="_4r2CHX_hE3n" outputId="5cd6ffda-4a4e-4e35-93ea-dc1a46df896a"
# and, as always, set our random seed
seed_everything(22)
# + [markdown] id="Laxa0vh1hE3o"
# ## Train a ``MatrixFactorizationModel``
# + [markdown] id="Fj3tJg-1hE3o"
# The first step towards training a Collie Hybrid model is to train a regular ``MatrixFactorizationModel`` to generate rich user and item embeddings. We'll use these embeddings in a ``HybridPretrainedModel`` a bit later.
# + id="m75xWkQLhE3o"
model = MatrixFactorizationModel(
train=train_interactions,
val=val_interactions,
embedding_dim=30,
lr=1e-2,
)
# + colab={"referenced_widgets": ["", "7c845f6e17ca48f8bcc6bf3b9e82e8c9"]} id="bjh0jE0yhE3p" outputId="ff30bf3a-8f82-479a-cb5f-859ca2aae7ce"
trainer = CollieTrainer(model=model, max_epochs=10, deterministic=True)
trainer.fit(model)
# + colab={"referenced_widgets": ["e85f417f5f5448498e6be79f745e365d"]} id="6tvE66cfhE3p" outputId="816b1ab5-7c1b-4ba6-b785-a852c3b88090"
mapk_score, mrr_score, auc_score = evaluate_in_batches([mapk, mrr, auc], val_interactions, model)
print(f'Standard MAP@10 Score: {mapk_score}')
print(f'Standard MRR Score: {mrr_score}')
print(f'Standard AUC Score: {auc_score}')
# + [markdown] id="CRq29RVfhE3q"
# ## Train a ``HybridPretrainedModel``
# + [markdown] id="lFh1LEcChE3q"
# With our trained ``model`` above, we can now use these embeddings and additional side data directly in a hybrid model. The architecture essentially takes our user embedding, item embedding, and item metadata for each user-item interaction, concatenates them, and sends it through a simple feedforward network to output a recommendation score.
#
# We can initially freeze the user and item embeddings from our previously-trained ``model``, train for a few epochs only optimizing our newly-added linear layers, and then train a model with everything unfrozen at a lower learning rate. We will show this process below.
# + id="RPgUTdR1hE3r"
# we will apply a linear layer to the metadata with ``metadata_layers_dims`` and
# a linear layer to the combined embeddings and metadata data with ``combined_layers_dims``
hybrid_model = HybridPretrainedModel(
train=train_interactions,
val=val_interactions,
item_metadata=metadata_df,
trained_model=model,
metadata_layers_dims=[8],
combined_layers_dims=[16],
lr=1e-2,
freeze_embeddings=True,
)
# + colab={"referenced_widgets": ["", "fbcd8d906b3b4558b4070e65c434b328"]} id="vyyUg5ilhE3r" outputId="3b998d8e-e003-4e2e-a48a-ca110e643d41"
hybrid_trainer = CollieTrainer(model=hybrid_model, max_epochs=10, deterministic=True)
hybrid_trainer.fit(hybrid_model)
# + colab={"referenced_widgets": ["11082307fc964d4b928e6aba846f520e"]} id="I8eEYwcfhE3s" outputId="3c8449c6-0a7a-4865-9608-46cb50333ce4"
mapk_score, mrr_score, auc_score = evaluate_in_batches([mapk, mrr, auc], val_interactions, hybrid_model)
print(f'Hybrid MAP@10 Score: {mapk_score}')
print(f'Hybrid MRR Score: {mrr_score}')
print(f'Hybrid AUC Score: {auc_score}')
# + id="EEw83cTUhE3s"
hybrid_model_unfrozen = HybridPretrainedModel(
train=train_interactions,
val=val_interactions,
item_metadata=metadata_df,
trained_model=model,
metadata_layers_dims=[8],
combined_layers_dims=[16],
lr=1e-4,
freeze_embeddings=False,
)
hybrid_model.unfreeze_embeddings()
hybrid_model_unfrozen.load_from_hybrid_model(hybrid_model)
# + colab={"referenced_widgets": ["", "eeb6803e1ad542729e8e14c1e27aefe1"]} id="yiA-EylqhE3t" outputId="b3973901-281f-4846-ff80-be2a4ae265ee"
hybrid_trainer_unfrozen = CollieTrainer(model=hybrid_model_unfrozen, max_epochs=10, deterministic=True)
hybrid_trainer_unfrozen.fit(hybrid_model_unfrozen)
# + colab={"referenced_widgets": ["f91b094c979b4d0eb1eee7c4a7eb5902"]} id="sof4rqMbhE3u" outputId="1d8603d4-796f-4d42-bbb2-fa951a7e7190"
mapk_score, mrr_score, auc_score = evaluate_in_batches([mapk, mrr, auc],
val_interactions,
hybrid_model_unfrozen)
print(f'Hybrid Unfrozen MAP@10 Score: {mapk_score}')
print(f'Hybrid Unfrozen MRR Score: {mrr_score}')
print(f'Hybrid Unfrozen AUC Score: {auc_score}')
# + [markdown] id="rM35J6KRhE3u"
# Note here that while our ``MAP@10`` and ``MRR`` scores went down slightly from the frozen version of the model above, our ``AUC`` score increased. For implicit recommendation models, each evaluation metric is nuanced in what it represents for real world recommendations.
#
# You can read more about each evaluation metric by checking out the [Mean Average Precision at K (MAP@K)](https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision), [Mean Reciprocal Rank](https://en.wikipedia.org/wiki/Mean_reciprocal_rank), and [Area Under the Curve (AUC)](https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve) Wikipedia pages.
# + id="EwM1pkf_hE3v" outputId="8a2b20ad-f663-4294-bece-f69fb3a623ab"
user_id = np.random.randint(0, train_interactions.num_users)
display(
HTML(
get_recommendation_visualizations(
model=hybrid_model_unfrozen,
user_id=user_id,
filter_films=True,
shuffle=True,
detailed=True,
)
)
)
# + [markdown] id="fNwj-u-AhE3w"
# The metrics and results look great, and we should only see a larger difference compared to a standard model as our data becomes more nuanced and complex (such as with MovieLens 10M data).
#
# If we're happy with this model, we can go ahead and save it for later!
# + [markdown] id="xYmFQZEhhE3w"
# ## Save and Load a Hybrid Model
# + id="2ZDlfmAVhE3w"
# we can save the model with...
os.makedirs('models', exist_ok=True)
hybrid_model_unfrozen.save_model('models/hybrid_model_unfrozen')
# + id="qW3kPpenhE3x" outputId="2e5f906c-32a3-4532-d798-6c345b7703eb"
# ... and if we wanted to load that model back in, we can do that easily...
hybrid_model_loaded_in = HybridPretrainedModel(load_model_path='models/hybrid_model_unfrozen')
hybrid_model_loaded_in
# + [markdown] id="1rDDCMkehE3x"
# That's the end of our tutorials, but it's not the end of the awesome features available in Collie. Check out all the different available architectures in the documentation [here](https://collie.readthedocs.io/en/latest/index.html)!
# + [markdown] id="S5RC4DzjhE3y"
# -----
| tutorials/05_hybrid_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hotel Recommender System
# Train and deploy a Hotel Recommender System using the public Expedia data from Kaggle competition https://www.kaggle.com/c/expedia-hotel-recommendations/overview
#
# **The goal is to help Expedia visitors find their dream hotel!**
#
# Download data from https://www.kaggle.com/c/expedia-hotel-recommendations/data and unzip the downloaded file.
# +
import json
import time
import boto3
session = boto3.Session(profile_name='personalize')
iam = session.client("iam")
role_name = "PersonalizeRole"
assume_role_policy_document = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "personalize.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
create_role_response = iam.create_role(
RoleName = role_name,
AssumeRolePolicyDocument = json.dumps(assume_role_policy_document)
)
# AmazonPersonalizeFullAccess provides access to any S3 bucket with a name that includes "personalize" or "Personalize"
# if you would like to use a bucket with a different name, please consider creating and attaching a new policy
# that provides read access to your bucket or attaching the AmazonS3ReadOnlyAccess policy to the role
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonPersonalizeFullAccess"
iam.attach_role_policy(
RoleName = role_name,
PolicyArn = policy_arn
)
time.sleep(60) # wait for a minute to allow IAM role policy attachment to propagate
role_arn = create_role_response["Role"]["Arn"]
print(role_arn)
# -
import pandas as pd
df = pd.read_csv('./expedia-hotel-recommendations/train.csv')
df.head(100)
df.dtypes
df['date_time'].head(10)
# +
import numpy as np
df['ts'] = pd.to_datetime(df['date_time'], format="%Y-%m-%d %H:%M:%S").values.astype(np.int64) // 10**6
# -
df.dtypes
df.head(10)
# ### Build the user-item interactions data set
df_subset = df[['user_id', 'hotel_cluster', 'ts']]
df_subset.head(10)
df_subset.columns = ['USER_ID','ITEM_ID', 'TIMESTAMP']
session = boto3.Session(profile_name='personalize') # replace with an aws profile with access to S3 and Personalize
personalize = session.client('personalize', region_name='us-east-1')
personalize_runtime = session.client('personalize-runtime', region_name='us-east-1')
bucket = "personalize-hotels" # replace with the name of your S3 bucket. Make sure the bucket is already created.
filename = "hotels-interactions.csv" # replace with a name that you want to save the dataset under
# +
# Save user-item interactions data set in a file locally
df_subset.to_csv(filename, index=False)
# Upload user-item interactions data set file to S3
session.resource('s3').Bucket(bucket).Object(filename).upload_file(filename)
# -
# ### Define user-item interactions data set schema in Amazon Personalize
# +
schema = {
"type": "record",
"name": "Interactions",
"namespace": "com.amazonaws.personalize.schema",
"fields": [
{
"name": "USER_ID",
"type": "string"
},
{
"name": "ITEM_ID",
"type": "string"
},
{
"name": "TIMESTAMP",
"type": "long"
}
],
"version": "1.0"
}
create_schema_response = personalize.create_schema(
name = "twitch-hotel-recommender-schema",
schema = json.dumps(schema)
)
schema_arn = create_schema_response['schemaArn']
print(json.dumps(create_schema_response, indent=2))
# -
# ### Create data set group in Amazon Personalize
# +
create_dataset_group_response = personalize.create_dataset_group(
name = "twitch-hotel-recommender-group"
)
dataset_group_arn = create_dataset_group_response['datasetGroupArn']
print(json.dumps(create_dataset_group_response, indent=2))
# -
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_dataset_group_response = personalize.describe_dataset_group(
datasetGroupArn = dataset_group_arn
)
status = describe_dataset_group_response["datasetGroup"]["status"]
print("DatasetGroup: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# ### Create data set in the group in Amazon Personalize
# +
dataset_type = "INTERACTIONS"
create_dataset_response = personalize.create_dataset(
name = "twitch-hotel-recommender-dataset",
datasetType = dataset_type,
datasetGroupArn = dataset_group_arn,
schemaArn = schema_arn
)
dataset_arn = create_dataset_response['datasetArn']
print(json.dumps(create_dataset_response, indent=2))
# +
role_arn = "arn:aws:iam::296654805457:role/Personalize" # replace with a Role that has access to Personalize
create_dataset_import_job_response = personalize.create_dataset_import_job(
jobName = "twitch-hotel-recommender-dataset-import-job",
datasetArn = dataset_arn,
dataSource = {
"dataLocation": "s3://{}/{}".format(bucket, filename)
},
roleArn = role_arn
)
dataset_import_job_arn = create_dataset_import_job_response['datasetImportJobArn']
print(json.dumps(create_dataset_import_job_response, indent=2))
# -
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_dataset_import_job_response = personalize.describe_dataset_import_job(
datasetImportJobArn = dataset_import_job_arn
)
status = describe_dataset_import_job_response["datasetImportJob"]['status']
print("DatasetImportJob: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# ### List recommender algorithms/recipes available in Amazon Personalize
list_recipes_response = personalize.list_recipes()
list_recipes_response
recipe_arn = "arn:aws:personalize:::recipe/aws-hrnn"
# ### Create solution in Amazon Personalize
# In other words, let's train the hotel-recommender system!
# +
create_solution_response = personalize.create_solution(
name = "twitch-hotel-recommender-solution",
datasetGroupArn = dataset_group_arn,
recipeArn = recipe_arn
)
solution_arn = create_solution_response['solutionArn']
print(json.dumps(create_solution_response, indent=2))
# +
create_solution_version_response = personalize.create_solution_version(
solutionArn = solution_arn
)
solution_version_arn = create_solution_version_response['solutionVersionArn']
print(json.dumps(create_solution_version_response, indent=2))
# -
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_solution_version_response = personalize.describe_solution_version(
solutionVersionArn = solution_version_arn
)
status = describe_solution_version_response["solutionVersion"]["status"]
print("SolutionVersion: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# ### Time to retrieve accuracy metrics of the trained recommender system model!
# +
get_solution_metrics_response = personalize.get_solution_metrics(
solutionVersionArn = solution_version_arn
)
print(json.dumps(get_solution_metrics_response, indent=2))
# -
# ### Let's deploy the trained recommender model!
# +
create_campaign_response = personalize.create_campaign(
name = "twitch-hotel-recommender-campaign",
solutionVersionArn = solution_version_arn,
minProvisionedTPS = 1
)
campaign_arn = create_campaign_response['campaignArn']
print(json.dumps(create_campaign_response, indent=2))
# -
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_campaign_response = personalize.describe_campaign(
campaignArn = campaign_arn
)
status = describe_campaign_response["campaign"]["status"]
print("Campaign: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
get_recommendations_response = personalize_runtime.get_recommendations(
campaignArn = campaign_arn,
userId = '93'
)
get_recommendations_response
| hotel-recommender-personalize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report,confusion_matrix
df = pd.read_excel (r'C:\Users\DELL\Downloads\Train_dataset.xlsx')
df1 = pd.read_excel (r'C:\Users\DELL\Downloads\Test_dataset.xlsx')
print (df)
print (df1)
df.info()
# +
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
corr=df.corr()
sns.heatmap(corr, xticklabels=corr.columns.values, yticklabels=corr.columns.values)
# +
df.isnull().sum()
# -
df1.isnull().sum()
print(len(df))
print(len(df1))
print(len(df.City.unique()))
print(len(df.State.unique()))
print(len(df.Type.unique()))
print(len(df.SWM.unique()))
df1
df=df.drop(['City','State','Popuation [2001]'], axis=1)
df1=df1.drop(['City','State','Popuation [2001]'], axis=1)
df['Sex Ratio'].hist(bins=50)
mean_sr= df['Sex Ratio'].mean()
df['Sex Ratio'].fillna(mean_sr,inplace=True)
df1['Sex Ratio'].fillna(mean_sr,inplace=True)
df['Sex Ratio'].isnull().sum()
df['Sex Ratio']=df['Sex Ratio'].astype('int64')/1000
df1['Sex Ratio']=df1['Sex Ratio'].astype('int64')/1000
df['Sex Ratio']
df['SWM'].value_counts().plot.bar()
mode_swm=df['SWM'].mode()
df['SWM'].fillna(mode_swm,inplace=True)
df1['SWM'].fillna(mode_swm,inplace=True)
df['SWM'].isnull().sum()
# +
dummies_train = pd.get_dummies(df['SWM'])
dummies_train.drop(['MEDIUM'],axis=1,inplace=True)
dummies_test = pd.get_dummies(df1['SWM'])
dummies_test.drop(['MEDIUM'],axis=1,inplace=True)
df = df.join(dummies_train)
df1 = df1.join(dummies_test)
df1.drop(['SWM'],axis=1,inplace=True)
df.drop(['SWM'],axis=1,inplace=True)
# -
df
df['Median Age'].hist(bins=50)
median_age=df['Median Age'].median()
df['Median Age'].fillna(median_age,inplace=True)
df1['Median Age'].fillna(median_age,inplace=True)
df['Median Age'].isnull().sum()
df['Median Age']=df['Median Age'].astype('int64')
df1['Median Age']=df1['Median Age'].astype('int64')
df['Avg Temp'].hist(bins=10)
F= np.log(df['Avg Temp'])
F.hist(bins=10)
# +
mean_temp=df['Avg Temp'].mean()
df['Avg Temp'].fillna(mean_temp,inplace=True)
df1['Avg Temp'].fillna(mean_temp,inplace=True)
df['Avg Temp'].isnull().sum()
# -
df['Avg Temp']=df['Avg Temp'].astype('int64')
df1['Avg Temp']=df1['Avg Temp'].astype('int64')
df['# of hospitals'].hist(bins=50)
df['# of hospitals'].fillna(np.random.randint(low=10, high=30),inplace=True)
df1['# of hospitals'].fillna(np.random.randint(low=10, high=30),inplace=True)
df['# of hospitals'].isnull().sum()
df['Toilets Avl'].hist(bins=50)
# +
mean_toilet=df['Toilets Avl'].mean()
df['Toilets Avl'].fillna(mean_toilet,inplace=True)
df1['Toilets Avl'].fillna(mean_toilet,inplace=True)
df['Toilets Avl'].isnull().sum()
# -
df['Toilets Avl']=df['Toilets Avl'].astype('int64')
df1['Toilets Avl']=df1['Toilets Avl'].astype('int64')
df['Water Purity'].hist(bins=50)
mean_water=df['Water Purity'].mean()
df['Water Purity'].fillna(mean_water,inplace=True)
df1['Water Purity'].fillna(mean_water,inplace=True)
df['Water Purity'].isnull().sum()
df['H Index'].hist(bins=50)
mean_index=df['H Index'].mean()
df['H Index'].fillna(mean_index,inplace=True)
df1['H Index'].fillna(mean_index,inplace=True)
df['H Index'].isnull().sum()
df['Foreign Visitors'].hist(bins=10)
max_viz=df['Foreign Visitors'].max()
min_viz=df['Foreign Visitors'].min()
min_viz
df['Foreign Visitors'].fillna(np.random.randint(low=100000, high=1000000),inplace=True)
df1['Foreign Visitors'].fillna(np.random.randint(low=100000, high=1000000),inplace=True)
df['Foreign Visitors'].isnull().sum()
df['Foreign Visitors']=df['Foreign Visitors'].astype('int64')
df1['Foreign Visitors']=df1['Foreign Visitors'].astype('int64')
# +
df.head(5)
# +
df1.head(5)
# -
writer = pd.ExcelWriter('hackathon1.xlsx', engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1')
writer.save()
writer = pd.ExcelWriter('hackathon2.xlsx', engine='xlsxwriter')
df1.to_excel(writer, sheet_name='Sheet1')
writer.save()
| HackathonPrediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import numpy as np
import music21 as m21
import pandas as pd
import json
import matplotlib.pyplot as plt
from scipy import stats
import time
np.random.seed(777)
# Define dataset paths
MXML_PATH="./dataset/MTC-ANN-2.0.1/musicxml/"
METADATA_PATH="./dataset/MTC-ANN-2.0.1/metadata/"
# -
# ## Data
# Read table of tune family
tune_family_filename = "MTC-ANN-tune-family-labels.csv"
tune_family_df = pd.read_csv(os.path.join(METADATA_PATH, tune_family_filename), header=None)
# Traverse musicxml files and tune family
song_id_x_family = {}
family_x_songs = {}
for root, directories, files in os.walk(MXML_PATH):
for file in files:
song_id = file.split(".")[0]
if (song_id not in song_id_x_family):
family_name = tune_family_df[tune_family_df[0] == song_id].iloc[0][1]
song_id_x_family[song_id] = (file, family_name)
if (family_name not in family_x_songs):
family_x_songs[family_name] = []
family_x_songs[family_name].append(song_id)
# Remove the incomplete anotated tunes from the dataframe
reduced_tune_family_df = tune_family_df[tune_family_df[0].isin(list(song_id_x_family.keys()))]
# ## Functions
DIV_CONST = 4
def getSongKey(song):
key = song.analyze("key")
return key
def getSongKeyFromMelody_W_Times(melody_w_times_in_k):
sc_test = m21.stream.Score()
p0_test = m21.stream.Part()
p0_test.id = 'part0'
for pitch_i in melody_w_times_in_k:
n_i = m21.note.Note(pitch_i[4])
p0_test.append(n_i)
sc_test.insert(0, p0_test)
return getSongKey(sc_test)
# Function to retrieve a list of midi pitch events and its timestamp
def getMelodyDeltaTimes(eventsintrack):
# Initialize array
DeltaTimes = []
# Initialize cumulative sum
cum_sum = 0
# Initialize variable to track the time delta
prev_deltatime = 0
# Traverse the events
for ev in eventsintrack:
# If a note starts
if (ev.isNoteOn()):
# Get the pitch name and save it with the cumulative sum, midi pitch and name
pitch_in_time = m21.pitch.Pitch(ev.pitch)
DeltaTimes.append((cum_sum, prev_deltatime, pitch_in_time.midi, pitch_in_time.spanish, pitch_in_time))
# Restart the delta time
prev_deltatime = 0
# Else if there is a delta time
elif(str(ev.type) == "DeltaTime"):
# We sum the time
cum_sum += ev.time
# We sum it to the current delta time
prev_deltatime += ev.time
# Return the array
return DeltaTimes
def get_SCLM_v100(melody_w_times_A, melody_w_times_B):
# We use a Dynamic Programming approach
max_len = max(len(melody_w_times_A), len(melody_w_times_B)) + 1
# memoization array
memo = np.full(shape=(max_len,max_len), fill_value=-1)
# Get the limits for each melody
lim_A = len(melody_w_times_A)
lim_B = len(melody_w_times_B)
# Actual DP implementation
for i in range(lim_A, -1, -1):
for j in range(lim_B, -1, -1):
# If we are at the limits the solution is 0
if i == lim_A or j == lim_B:
memo[i][j] = 0
continue
# If there is a match a possible solution is the previous plus one
curr_value = 0
tot_delta_time = (float(melody_w_times_A[i][1]) + float(melody_w_times_B[j][1])) / float(DIV_CONST)
tot_diff_time = np.abs(float(melody_w_times_A[i][1]) - float(melody_w_times_B[j][1]))
if (melody_w_times_A[i][3] == melody_w_times_B[j][3]) and (tot_diff_time <= tot_delta_time):
curr_value = memo[i + 1][j + 1] + 1
# The actual solution is the maximum between the one if there is a match, or skip on the melody A or melody B
curr_value = max(curr_value, max(memo[i + 1][j], memo[i][j + 1]))
# Save the solution
memo[i][j] = curr_value
# With the memoization table we can retrieve the actual melody
i = 0
j = 0
SCLM = []
while i != lim_A and j != lim_B:
if ((memo[i + 1][j + 1] + 1) == memo[i][j]):
SCLM.append((i, j))
i += 1
j += 1
elif (memo[i + 1][j] == memo[i][j]):
i += 1
elif (memo[i][j + 1] == memo[i][j]):
j += 1
return SCLM
def get_max_timestamp_dif(melody_w_times_A, melody_w_times_B):
return max(
melody_w_times_A[len(melody_w_times_A) - 1][0] - melody_w_times_A[0][0],
melody_w_times_B[len(melody_w_times_B) - 1][0] - melody_w_times_B[0][0]
)
def getDifSCLM(melody_w_times_A, melody_w_times_B, sclm):
# If there is no sclm or it is just one return max possible value
if (len(sclm) <= 1):
return get_max_timestamp_dif(melody_w_times_A, melody_w_times_B)
# Initialize the arrays
T_A = np.zeros(shape=(len(sclm) - 1))
T_B = np.zeros(shape=(len(sclm) - 1))
T_C = np.zeros(shape=(len(sclm) - 1))
Dif_ = np.zeros(shape=(len(sclm) - 1))
for i in range(1, len(sclm)):
T_A[i - 1] = melody_w_times_A[sclm[i][0]][0] - melody_w_times_A[sclm[i-1][0]][0]
T_B[i - 1] = melody_w_times_B[sclm[i][1]][0] - melody_w_times_B[sclm[i-1][1]][0]
T_C[i - 1] = np.abs(T_A[i - 1] - T_B[i - 1])
T_C_mean = np.mean(T_C)
for i in range(0, len(T_B)):
T_B[i] += T_C_mean
Dif_[i] = T_A[i] - T_B[i]
return T_C_mean
def get_MTRC_v100_from_melody_w_times(melody_w_times_A, melody_w_times_B):
# Assert at least one element for each melody
if (len(melody_w_times_A) == 0 or len(melody_w_times_B) == 0):
return 1
# Initialize result variable
result_value = 0
# Get Keys
key_A = getSongKeyFromMelody_W_Times(melody_w_times_A)
key_B = getSongKeyFromMelody_W_Times(melody_w_times_B)
# D1: Scale
scale_dif1 = 0
if (key_A.name != key_B.name):
scale_dif1 = W1
result_value += scale_dif1
# D2: Mode
mode_dif2 = 0
if (key_A.mode != key_B.mode):
mode_dif2 = W2
result_value += mode_dif2
# Get SCLM v100
sclm = get_SCLM_v100(melody_w_times_A, melody_w_times_B)
# Get max len
max_len = max(len(melody_w_times_A), len(melody_w_times_B))
# D3: SCLM Length
sclmlen_dif3 = ((max_len - len(sclm)) / max_len) * W3
result_value += sclmlen_dif3
# Get the Diff on temporal spacing in the SCLM
dif_sclm = getDifSCLM(melody_w_times_A, melody_w_times_B, sclm)
# D4: dif in sclm
max_timestamp_dif = get_max_timestamp_dif(melody_w_times_A, melody_w_times_B)
sclmdif_dif4 = (dif_sclm / max_timestamp_dif) * W4
result_value += sclmdif_dif4
return result_value
# +
# Read Files
song_m21_streams = {}
# We traverse the reduced table
for query_row in reduced_tune_family_df.iterrows():
tune_family_query = query_row[1][1]
song_id_A = query_row[1][0]
song_stream_A = m21.converter.parseFile(os.path.join(MXML_PATH, song_id_x_family[song_id_A][0]))
midi_tracks_A = m21.midi.translate.streamToMidiFile(song_stream_A)
melody_w_times_A = getMelodyDeltaTimes(midi_tracks_A.tracks[0].events)
song_m21_streams[song_id_A] = {
"song_stream": song_stream_A,
"midi_tracks": midi_tracks_A,
"melody_w_times": melody_w_times_A
}
# -
# ## Noises
# ### Type 1. Random Pitch
def get_random_pitch():
new_pitch_class = np.random.randint(0, 12)
new_pitch_octave = np.random.randint(1, 9)
return m21.pitch.Pitch(octave=new_pitch_octave, pitchClass=new_pitch_class)
# Define apply Transformation type 1: Ruido en notas
def apply_note_noise(melody_w_times_in, percentage=50):
# Track modified notes
modified_notes = {}
# Store the length of the melody
len_melody = len(melody_w_times_in)
# According to the desired percentage of noise we get the number of notes to be modified
many_notes = int((len_melody * percentage)//100)
for noise_i in range(many_notes):
# Select a random position that we haven't seen yet
note_to_change = np.random.randint(0, len_melody)
while (note_to_change in modified_notes):
note_to_change = np.random.randint(0, len_melody)
modified_notes[note_to_change] = 1
# Creating a new pitch note
previous_pitch = melody_w_times_in[note_to_change][3]
p_new = get_random_pitch()
while (p_new.spanish == previous_pitch):
p_new = get_random_pitch()
# Replace the data
melody_w_times_in[note_to_change] = (
melody_w_times_in[note_to_change][0],
melody_w_times_in[note_to_change][1],
p_new.midi,
p_new.spanish,
p_new)
# Return the modified melody
return melody_w_times_in
# ### Type 2. Random DeltaTime
def recalculate_timestamps(melody_w_times_in):
# Store the length of the melody
len_melody = len(melody_w_times_in)
# Define current start time
current_start_time = 0
# Traverse the melody
for note_i in range(len_melody):
current_start_time += melody_w_times_in[note_i][1]
melody_w_times_in[note_i] = (
current_start_time,
melody_w_times_in[note_i][1],
melody_w_times_in[note_i][2],
melody_w_times_in[note_i][3],
melody_w_times_in[note_i][4])
# Return the recalculated melody
return melody_w_times_in
def get_random_deltatime():
return np.random.randint(0, 4097)
# Define apply Transformation type 2: Ruido en tiempos
def apply_deltatime_noise(melody_w_times_in, percentage=50):
# Track modified notes
modified_notes = {}
# Store the length of the melody
len_melody = len(melody_w_times_in)
# According to the desired percentage of noise we get the number of notes to be modified
many_notes = int((len_melody * percentage)//100)
for noise_i in range(many_notes):
# Select a random position that we haven't seen yet
note_to_change = np.random.randint(0, len_melody)
while (note_to_change in modified_notes):
note_to_change = np.random.randint(0, len_melody)
modified_notes[note_to_change] = 1
# Creating a new deltatime
previous_deltatime = melody_w_times_in[note_to_change][1]
deltatime_new = get_random_deltatime()
while (deltatime_new == previous_deltatime):
deltatime_new = get_random_deltatime()
# ratio_of_change = np.abs((deltatime_new - previous_deltatime))
# if previous_deltatime != 0:
# ratio_of_change /= previous_deltatime
# else:
# ratio_of_change = -1
# print("AAA", ratio_of_change)
# Replace the data
melody_w_times_in[note_to_change] = (
melody_w_times_in[note_to_change][0],
deltatime_new,
melody_w_times_in[note_to_change][2],
melody_w_times_in[note_to_change][3],
melody_w_times_in[note_to_change][4])
# Recalculate timestamps due to the modification in deltatimes
melody_w_times_in = recalculate_timestamps(melody_w_times_in)
# Return the modified melody
return melody_w_times_in
# ### Type 3. Noise in Pitch and Deltatime
# Define apply Transformation type 3: Ruido en tiempos y notas (reemplazo)
def apply_deltatime_and_note_noise(melody_w_times_in, percentage=50):
# Track modified notes
modified_notes = {}
# Store the length of the melody
len_melody = len(melody_w_times_in)
# According to the desired percentage of noise we get the number of notes to be modified
many_notes = int((len_melody * percentage)//100)
for noise_i in range(many_notes):
# Select a random position that we haven't seen yet
note_to_change = np.random.randint(0, len_melody)
while (note_to_change in modified_notes):
note_to_change = np.random.randint(0, len_melody)
modified_notes[note_to_change] = 1
# Creating a new deltatime
previous_deltatime = melody_w_times_in[note_to_change][1]
deltatime_new = get_random_deltatime()
while (deltatime_new == previous_deltatime):
deltatime_new = get_random_deltatime()
# Creating a new pitch note
previous_pitch = melody_w_times_in[note_to_change][3]
p_new = get_random_pitch()
while (p_new.spanish == previous_pitch):
p_new = get_random_pitch()
# Replace the data
melody_w_times_in[note_to_change] = (
melody_w_times_in[note_to_change][0],
deltatime_new,
p_new.midi,
p_new.spanish,
p_new)
# Recalculate timestamps due to the modification in deltatimes
melody_w_times_in = recalculate_timestamps(melody_w_times_in)
# Return the modified melody
return melody_w_times_in
# ### Type 4. Removing notes
# Define apply Transformation type 4: Noise by removing events
def apply_removing_noise(melody_w_times_in, percentage=50):
# Store the length of the melody
len_melody = len(melody_w_times_in)
# According to the desired percentage of noise we get the number of notes to be modified
many_notes = int((len_melody * percentage)//100)
for noise_i in range(many_notes):
# Select a random position to remove
note_to_remove = np.random.randint(0, len(melody_w_times_in))
# Remove element
melody_w_times_in.pop(note_to_remove)
# Recalculate timestamps due to the modification in deltatimes continuity
melody_w_times_in = recalculate_timestamps(melody_w_times_in)
# Return the modified melody
return melody_w_times_in
# ### Type 5. Inserting new notes
# Define apply Transformation type 5: Noise by Inserting events
def apply_inserting_noise(melody_w_times_in, percentage=50):
# Assert only percentages p <= 100 and p > 0
if percentage >= 100 or percentage < 0:
percentage = 99
# Store the length of the melody
len_melody = len(melody_w_times_in)
# According to the desired percentage of noise we get the number of notes to be modified
new_len = int(len_melody / (1 - (percentage / 100)))
many_notes = new_len - len_melody
for noise_i in range(many_notes):
# Create new Event
# Creating a new deltatime
deltatime_new = get_random_deltatime()
# Creating a new pitch note
p_new = get_random_pitch()
# Replace the data
new_midi_event = (
0,
deltatime_new,
p_new.midi,
p_new.spanish,
p_new)
# Select a random position to insert
pos_to_insert = np.random.randint(0, len(melody_w_times_in))
# Insert element
melody_w_times_in.insert(pos_to_insert, new_midi_event)
# Recalculate timestamps due to the modification in deltatimes continuity
melody_w_times_in = recalculate_timestamps(melody_w_times_in)
# Return the modified melody
return melody_w_times_in
# ### Noise Controller
def apply_ith_noise(noise_type, melody_w_times_in, percentage=50):
if (noise_type == 1):
return apply_note_noise(melody_w_times_in, percentage)
if (noise_type == 2):
return apply_deltatime_noise(melody_w_times_in, percentage)
if (noise_type == 3):
return apply_deltatime_and_note_noise(melody_w_times_in, percentage)
if (noise_type == 4):
return apply_removing_noise(melody_w_times_in, percentage)
return apply_inserting_noise(melody_w_times_in, percentage)
# ## Experiments
def apply_threshold(val_x):
val_x *= 100
val_x = int(val_x)
val_x = round(val_x / 10)
return val_x / 10.0
def get_metric_range_to_100(val_x):
return (val_x * 1.0) / (0.7)
def get_mean_distance_for_song(song_id_query, with_threshold=False):
# We initialize the mean difference across noises
diff_x_noises = []
# Metric Values
metric_values = []
# Difference per percentage
differences_per_percentage = []
# We traverse the noises
for noise_type_i in noise_types:
# if (noise_type_i == 2):
# continue
# We initialize the mean distance in the current noise
sorted_distance_mean_current_noise = 0
# We save the similarities differences with the percentage
similarity_differences_for_mean = []
# We save metric values per noise type
metric_values_per_noise = []
# We traverse the percentages
for noise_percentage_i in noise_percentages:
melody_w_times_query = getMelodyDeltaTimes(
song_m21_streams[song_id_query]["midi_tracks"].tracks[0].events)
melody_w_times_test = getMelodyDeltaTimes(
song_m21_streams[song_id_query]["midi_tracks"].tracks[0].events)
melody_w_times_test = apply_ith_noise(noise_type_i, melody_w_times_test, noise_percentage_i)
current_similarity = get_MTRC_v100_from_melody_w_times(
melody_w_times_query,
melody_w_times_test)
# current_similarity = get_metric_range_to_100(current_similarity)
if (with_threshold):
current_similarity = apply_threshold(current_similarity)
metric_values_per_noise.append(current_similarity)
similarity_differences_for_mean.append(np.abs(current_similarity - float(noise_percentage_i/100.0)))
# Get the mean of the
similarity_difference_mean_current_noise = np.mean(similarity_differences_for_mean)
# Save the differences per percentage
differences_per_percentage.append(similarity_differences_for_mean)
# Save the metric values per percentage
metric_values.append(metric_values_per_noise)
# We sum the distance for this noise
diff_x_noises.append(similarity_difference_mean_current_noise)
# We return the mean distance per noise
return metric_values, differences_per_percentage, diff_x_noises
def get_weights_from_encoded_conf(encoded_conf):
return tuple(map(lambda x: x, map(float, encoded_conf.split(","))))
# ## No Threshold
# +
# %%time
# Define noise type array
noise_types = [1, 2, 3, 4, 5]
len_noise_types = len(noise_types)
# Define the percentages of noise
noise_percentages = list(map(int, np.linspace(10, 100, 10)))
# Weights
#W1 = 0.25
#W2 = 0.25
#W3 = 0.25
#W4 = 0.25
W1 = 0.0
W2 = 0.0
W3 = 1.0
W4 = 0.0
# TEST
heuristic_raw_results = {}
heuristic_raw_results['0.0,0.0,1.0,0.0'] = get_mean_distance_for_song('NLB072967_01', False)
# -
print(np.mean(heuristic_raw_results['0.0,0.0,1.0,0.0'][2]))
ts = time.time()
with open('./HEURISTIC_TEMPOMETRIC_1_{0}_NOISE_RAW_v106_{1}.json'.format(DIV_CONST, str(ts)), 'w') as outfile:
json.dump(heuristic_raw_results, outfile)
print("1_2", 0.11794245363453057)
print("1_4", 0.07116279261758142)
print("1_8", 0.04743397905825938)
print("1_32", 0.027433979058259384)
print("1_64", 0.022688216346394972)
print("1_128", 0.01997635193961531)
print("1_512", 0.018959402787072935)
print("1_2048", 0.018620419736225476)
| ControlledExperiments/AlRyNo_CALCULATE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cv2
import numpy as np
import glob
img_array = []
tmp = sorted(glob.glob("cho/*.png"), key=lambda x : int(x.split('_')[1].split('.')[0]))
for filename in tmp:
img = cv2.imread(filename)
print(filename)
height, width, layers = img.shape
size = (width,height)
img_array.append(img)
out = cv2.VideoWriter('cho_slow.avi', cv2.VideoWriter_fourcc(*'DIVX'), 10, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
# -
| examples/make_cam_video.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--BOOK_INFORMATION-->
# <img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png">
# *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by <NAME>; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*
#
# *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!*
# <!--NAVIGATION-->
# < [Simple Line Plots](04.01-Simple-Line-Plots.ipynb) | [Contents](Index.ipynb) | [Visualizing Errors](04.03-Errorbars.ipynb) >
# # Simple Scatter Plots
# Another commonly used plot type is the simple scatter plot, a close cousin of the line plot.
# Instead of points being joined by line segments, here the points are represented individually with a dot, circle, or other shape.
# We’ll start by setting up the notebook for plotting and importing the functions we will use:
# %matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import numpy as np
# ## Scatter Plots with ``plt.plot``
#
# In the previous section we looked at ``plt.plot``/``ax.plot`` to produce line plots.
# It turns out that this same function can produce scatter plots as well:
# +
x = np.linspace(0, 10, 30)
y = np.sin(x)
plt.plot(x, y, 'o', color='black');
# -
# The third argument in the function call is a character that represents the type of symbol used for the plotting. Just as you can specify options such as ``'-'``, ``'--'`` to control the line style, the marker style has its own set of short string codes. The full list of available symbols can be seen in the documentation of ``plt.plot``, or in Matplotlib's online documentation. Most of the possibilities are fairly intuitive, and we'll show a number of the more common ones here:
rng = np.random.RandomState(0)
for marker in ['o', '.', ',', 'x', '+', 'v', '^', '<', '>', 's', 'd']:
plt.plot(rng.rand(5), rng.rand(5), marker,
label="marker='{0}'".format(marker))
plt.legend(numpoints=1)
plt.xlim(0, 1.8);
# For even more possibilities, these character codes can be used together with line and color codes to plot points along with a line connecting them:
plt.plot(x, y, '-ok');
# Additional keyword arguments to ``plt.plot`` specify a wide range of properties of the lines and markers:
plt.plot(x, y, '-p', color='gray',
markersize=15, linewidth=4,
markerfacecolor='white',
markeredgecolor='gray',
markeredgewidth=2)
plt.ylim(-1.2, 1.2);
# This type of flexibility in the ``plt.plot`` function allows for a wide variety of possible visualization options.
# For a full description of the options available, refer to the ``plt.plot`` documentation.
# ## Scatter Plots with ``plt.scatter``
#
# A second, more powerful method of creating scatter plots is the ``plt.scatter`` function, which can be used very similarly to the ``plt.plot`` function:
plt.scatter(x, y, marker='o');
# The primary difference of ``plt.scatter`` from ``plt.plot`` is that it can be used to create scatter plots where the properties of each individual point (size, face color, edge color, etc.) can be individually controlled or mapped to data.
#
# Let's show this by creating a random scatter plot with points of many colors and sizes.
# In order to better see the overlapping results, we'll also use the ``alpha`` keyword to adjust the transparency level:
# +
rng = np.random.RandomState(0)
x = rng.randn(100)
y = rng.randn(100)
colors = rng.rand(100)
sizes = 1000 * rng.rand(100)
plt.scatter(x, y, c=colors, s=sizes, alpha=0.3,
cmap='viridis')
plt.colorbar(); # show color scale
# -
# Notice that the color argument is automatically mapped to a color scale (shown here by the ``colorbar()`` command), and that the size argument is given in pixels.
# In this way, the color and size of points can be used to convey information in the visualization, in order to visualize multidimensional data.
#
# For example, we might use the Iris data from Scikit-Learn, where each sample is one of three types of flowers that has had the size of its petals and sepals carefully measured:
# +
from sklearn.datasets import load_iris
iris = load_iris()
features = iris.data.T
plt.scatter(features[0], features[1], alpha=0.2,
s=100*features[3], c=iris.target, cmap='viridis')
plt.xlabel(iris.feature_names[0])
plt.ylabel(iris.feature_names[1]);
# -
# We can see that this scatter plot has given us the ability to simultaneously explore four different dimensions of the data:
# the (x, y) location of each point corresponds to the sepal length and width, the size of the point is related to the petal width, and the color is related to the particular species of flower.
# Multicolor and multifeature scatter plots like this can be useful for both exploration and presentation of data.
# ## ``plot`` Versus ``scatter``: A Note on Efficiency
#
# Aside from the different features available in ``plt.plot`` and ``plt.scatter``, why might you choose to use one over the other? While it doesn't matter as much for small amounts of data, as datasets get larger than a few thousand points, ``plt.plot`` can be noticeably more efficient than ``plt.scatter``.
# The reason is that ``plt.scatter`` has the capability to render a different size and/or color for each point, so the renderer must do the extra work of constructing each point individually.
# In ``plt.plot``, on the other hand, the points are always essentially clones of each other, so the work of determining the appearance of the points is done only once for the entire set of data.
# For large datasets, the difference between these two can lead to vastly different performance, and for this reason, ``plt.plot`` should be preferred over ``plt.scatter`` for large datasets.
# <!--NAVIGATION-->
# < [Simple Line Plots](04.01-Simple-Line-Plots.ipynb) | [Contents](Index.ipynb) | [Visualizing Errors](04.03-Errorbars.ipynb) >
| PythonDataScienceHandbook/notebooks/04.02-Simple-Scatter-Plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="6f7089e2e868fe2c1a1fe1851b1ee99098d99336"
# # Imports
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# %matplotlib inline
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.metrics import confusion_matrix
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import math
import os
#print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
tf.__version__
# + [markdown] _uuid="53fd36fe94e52aafa21b6398e09efd8b324f8d1a"
# # **Loading data**
# + _uuid="129314e7aee0d3f4c2880bffb6bf85968bf5dc05"
# from mnist import MNIST
# data = MNIST(data_dir = 'data/MNIST/')
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
mnist = tf.keras.datasets.mnist
(X_train , Y_train ) , (X_test , Y_test) =mnist.load_data()
X_train, X_test = X_train/255.0 , X_test/255.0
# + _uuid="97d57617417d0c92a9485cb7e6f32a57dc52fc5d"
print("Size of:")
print("- Training-set:\t\t{}".format(len(X_train)))
print("- Test-set:\t\t{}".format(len(X_test)))
# + _uuid="457f8da3b98c8d6bba84d14534510ec0793a3368"
X_train_images = X_train.reshape(X_train.shape[0],-1)
X_test_images = X_test.reshape(X_test.shape[0],-1)
# -
print("The shape of X_train:{}".format(X_train_images.shape))
print("The shape of X_test:{}".format(X_test_images.shape))
print("The shape of Y_train:{}".format(Y_train.shape))
print("The shape of Y_test:{}".format(Y_test.shape))
# + [markdown] _uuid="d0afd5615430a18e616c5b301b19072d78e26bc8"
# # **One Hot Encoded**
# -
def one_hot_code(x):
encoded = np.zeros((len(x),10))
for idx , val in enumerate(x):
encoded[idx][val]=1
return encoded
Train_labels = one_hot_code(Y_train)
Test_labels = one_hot_code(Y_test)
print("The shape of Y_train_cls:{}".format(Train_labels.shape))
print("The shape of Y_test_cls:{}".format(Test_labels.shape))
# + [markdown] _uuid="d273d7b505a3f01767baba63c5661ca60da3473e"
# # **Tensorflow Graph**
# + _uuid="db8154eac9212940c2eab60957608b237cc11964"
x = tf.placeholder(tf.float32, [None, (28*28)])
y_true = tf.placeholder(tf.float32, [None, 10])
y_true_cls = tf.placeholder(tf.float32, [None])
# + [markdown] _uuid="92e961eac5fa95a507b2f30d7ea55246f2da0e94"
# # Variables to be Optimized
# + _uuid="35facba39450df876404c4394c44703d604ac6eb"
weights = tf.Variable(tf.zeros([(28*28) , 10]))
biases = tf.Variable(tf.zeros([10]))
# + [markdown] _uuid="fca6bf676dbd91a73e6c77434d8d0aebe2228bba"
# # **Model**
# + _uuid="52ab31e0f82c3d4191e1285a5f3442d34ae194cc"
logits = tf.matmul(x, weights) + biases
# + _uuid="84b9193c5a0757f465c1cb7050c76c72bc4d7655"
y_pred = tf.nn.softmax(logits)
y_pred_cls = tf.argmax(y_pred, axis=1)
y_pred_cls = tf.to_float(y_pred_cls)
# + [markdown] _uuid="e753b7fe2673c5f19f05036aacd005a0b6b947b7"
# # **Cost Function**
# + _uuid="e521ff6f58eb1414787cba211377214699b199a5"
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
labels=y_true)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(cost)
# + _uuid="9f19e9932b58e2bbe5f99ff73a28bd7ebcfc8f8f"
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# -
# # Mini Batch Function
def random_mini_batches(X,Y, batch_size):
m = X.shape[0]
num_of_batches= math.floor(m/batch_size)
minibatches=[]
for i in range(0,num_of_batches):
min_X = X[ i*batch_size: (i+1)*batch_size , :]
min_Y = Y[ i*batch_size: (i+1)*batch_size , :]
mini_batch =(min_X , min_Y)
minibatches.append(mini_batch)
if m%batch_size !=0:
min_X = X[:(m-batch_size*num_of_batches), :]
min_Y = Y[:(m-batch_size*num_of_batches), :]
mini_batch =(min_X , min_Y)
minibatches.append(mini_batch)
return minibatches
# + _uuid="597abd944a35e0c9362b5b42545f17d9a6af1dc9"
feed_dict_test = {x: X_test_images,
y_true: Test_labels,
y_true_cls: Y_test}
# -
# # Helper Functions
# +
def Optimize(num_iterations):
for i in range(num_iterations):
miniBATCH = random_mini_batches(X_train_images , Train_labels , 128)
for batch in miniBATCH:
(min_X , min_Y) = batch
sess.run(optimizer , feed_dict={x:min_X , y_true:min_Y})
if i%20==0:
print_accuracy()
"------------------------------------------------XXXXXXXXXXXXX------------------------------------------------------"
def print_accuracy():
# Use TensorFlow to compute the accuracy.
acc = sess.run(accuracy, feed_dict=feed_dict_test)
# Print the accuracy.
print("Accuracy on test-set: {0:.1%}".format(acc))
"-------------------------------------------------XXXXXXXXXXXXX-------------------------------------------------------"
def print_confusion_matrix():
cls_true = Y_test
class_pred= sess.run(y_pred_cls , feed_dict=feed_dict_test)
cm = confusion_matrix(cls_true, class_pred)
print(cm)
plt.imshow(cm , interpolation='nearest' , cmap= plt.cm.Blues )
plt.tight_layout()
plt.colorbar()
tick_marks = np.arange(10)
plt.xticks(tick_marks, range(10))
plt.yticks(tick_marks, range(10))
plt.xlabel('Predicted')
plt.ylabel('True')
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
"------------------------------------------------------XXXXXXXXXXXXXXXXXXXXXX------------------------------------------"
def plot_images(image , cls_true , cls_pred):
assert len(image) == len(cls_true)==9
fig , axes = plt.subplots(3,3)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i , axe in enumerate(axes.flat):
axe.imshow(image[i] , cmap='binary')
if cls_pred is not None:
xlabel = 'True:{} Pred:{}'.format(cls_true[i] , cls_pred[i])
else:
xlabel = 'True:{}'.format(cls_true[i])
axe.set_xlabel(xlabel)
# Remove ticks from the plot.
axe.set_xticks([])
axe.set_yticks([])
plt.show()
"-------------------------------------------XXXXXXXXXXXXXXXXXXXXXXX-------------------------------------------------"
def plot_example_errors():
correct , cls_pred = sess.run([correct_prediction , y_pred_cls], feed_dict=feed_dict_test)
# Negate the boolean array.
incorrect = (correct == False)
# Get the images from the test-set that have been
# incorrectly classified.
images = X_test[incorrect]
cls_true = Y_test[incorrect]
# Get the predicted classes for those images.
pred = cls_pred[incorrect]
plot_images(image=images[0:9],
cls_true=cls_true[0:9],
cls_pred=pred[0:9])
"------------------------------------------------------------XXXXXXXXXXXXXXXXXXX----------------------------------------"
def plot_weights():
# Get the values for the weights from the TensorFlow variable.
w = sess.run(weights)
# Get the lowest and highest values for the weights.
# This is used to correct the colour intensity across
# the images so they can be compared with each other.
w_min = np.min(w)
w_max = np.max(w)
# Create figure with 3x4 sub-plots,
# where the last 2 sub-plots are unused.
fig, axes = plt.subplots(2, 5)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
# Only use the weights for the first 10 sub-plots.
if i<10:
# Get the weights for the i'th digit and reshape it.
# Note that w.shape == (img_size_flat, 10)
image = w[:, i].reshape(28,28)
# Set the label for the sub-plot.
ax.set_xlabel("Weights: {0}".format(i))
# Plot the image.
ax.imshow(image, vmin=w_min, vmax=w_max, cmap='seismic')
# Remove ticks from each sub-plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
# +
# Get the first images from the test-set.
images = X_test[0:9]
# Get the true classes for those images.
cls_true = Y_test[0:9]
# Plot the images and labels using our helper-function above.
plot_images(image=images, cls_true=cls_true ,cls_pred=None)
# -
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# + _uuid="85362441dbf81e9d235d0733010ba524f4e00df2"
Optimize(200)
# -
plot_weights()
print_confusion_matrix()
plot_example_errors()
| Tensorflow-Tutorial/#1-Simple Linear Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import seaborn
color = {
'aliceblue': '#F0F8FF',
'antiquewhite': '#FAEBD7',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgreen': '#90EE90',
'lightgray': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightsteelblue': '#B0C4DE',
'lightyellow': '#FFFFE0',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sandybrown': '#FAA460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32'}
marker={
'.': 'point marker',
',': 'pixel marker',
'o': 'circle marker',
'v': 'triangle_down marker',
'^': 'triangle_up marker',
'<': 'triangle_left marker',
'>': 'triangle_right marker',
'1': 'tri_down marker',
'2': 'tri_up marker',
'3': 'tri_left marker',
'4': 'tri_right marker',
's': 'square marker',
'p': 'pentagon marker',
'*': 'star marker',
'h': 'hexagon1 marker',
'H': 'hexagon2 marker',
'+': 'plus marker',
'x': 'x marker',
'D': 'diamond marker',
'd': 'thin_diamond marker',
'|': 'vline marker',
'_': 'hline marker',
}
# +
from sklearn.datasets import make_blobs
from numpy import where, random
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
# plt.style.use('classic')
plt.style.use('ggplot')
# plt.style.use('seaborn-bright')
# plt.style.use('seaborn-paper')
# plt.style.use('seaborn-dark')
# plt.style.use('seaborn-darkgrid')
# plt.style.use('seaborn-deep')
# plt.style.use('seaborn-colorblind')
# plt.style.use('seaborn-notebook')
# plt.style.use('seaborn-poster')
colors=random.rand(3)
def samples_for_seed(seed):
X, y = make_blobs(n_samples=1000, centers=3, n_features=2, cluster_std=2, random_state=seed)
return X, y
def plot_samples(X, y, classes=3):
for i in range(classes):
# select indices of points with each class label
samples_ix = where(y == i)
# plot points for this class with a given color
plt.scatter(X[samples_ix, 0], X[samples_ix, 1])
n_problems = 2
for i in range(1, n_problems+1):
# specify subplot
plt.subplot(2,1,i)
# generate samples
X, y = samples_for_seed(i)
# scatter plot of samples
plot_samples(X, y)
plt.show()
# +
from sklearn.datasets import make_blobs
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import SGD
from keras.utils import to_categorical
import matplotlib.pyplot as plt
# 准备数据
def samples_for_seed(seed):
# 生成分类样本
X, y = make_blobs(n_samples=1000, centers=3, n_features=2, cluster_std=2, random_state=seed)
# one-hot编码
y = to_categorical(y)
# 划分训练集和验证集
n_train = 500
trainX, testX = X[:n_train, :], X[n_train:, :]
trainy, testy = y[:n_train], y[n_train:]
return trainX, trainy, testX, testy
def fit_model(trainX, trainy, testX, testy):
# 定义模型
model = Sequential()
model.add(Dense(5, input_dim=2, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(5, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(3, activation='softmax'))
# 编译模型
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# 训练模型
history = model.fit(trainX, trainy, validation_data=(testX, testy), epochs=100, verbose=0)
return model, history
def summarize_model(model, history, trainX, trainy, testX, testy):
# 评估模型
_, train_acc = model.evaluate(trainX, trainy, verbose=0)
_, test_acc = model.evaluate(testX, testy, verbose=0)
print('Train: %.3f, Test: %.3f' % (train_acc, test_acc))
# 绘制损失曲线
plt.subplot(211) # plt.subplot(2,1,1)
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
# 绘制准确率曲线
plt.subplot(212) # plt.subplot(2,1,2)
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='test')
plt.legend()
plt.tight_layout()
plt.show()
# 准备数据
trainX, trainy, testX, testy = samples_for_seed(1)
# 训练模型
model, history = fit_model(trainX, trainy, testX, testy)
# 评估模型
summarize_model(model, history, trainX, trainy, testX, testy)
# 保存模型
model.save('model.h5')
# +
from sklearn.datasets import make_blobs
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import SGD
from keras.utils import to_categorical
import matplotlib.pyplot as plt
# 准备数据
def samples_for_seed(seed):
# 生成分类样本
X, y = make_blobs(n_samples=1000, centers=3, n_features=2, cluster_std=2, random_state=seed)
# one-hot编码
y = to_categorical(y)
# 划分训练集和验证集
n_train = 500
trainX, testX = X[:n_train, :], X[n_train:, :]
trainy, testy = y[:n_train], y[n_train:]
return trainX, trainy, testX, testy
def fit_model(trainX, trainy, testX, testy):
# 定义模型
model = Sequential()
model.add(Dense(5, input_dim=2, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(5, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(3, activation='softmax'))
# 编译模型
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# 训练模型
history = model.fit(trainX, trainy, validation_data=(testX, testy), epochs=100, verbose=0)
return model, history
def summarize_model(model, history, trainX, trainy, testX, testy):
# 评估模型
_, train_acc = model.evaluate(trainX, trainy, verbose=0)
_, test_acc = model.evaluate(testX, testy, verbose=0)
print('Train: %.3f, Test: %.3f' % (train_acc, test_acc))
# 绘制损失曲线
plt.subplot(211) # plt.subplot(2,1,1)
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
# 绘制准确率曲线
plt.subplot(212) # plt.subplot(2,1,2)
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='test')
plt.legend()
plt.tight_layout()
plt.show()
# 准备数据
trainX, trainy, testX, testy = samples_for_seed(2)
# 训练模型
model, history = fit_model(trainX, trainy, testX, testy)
# 评估模型
summarize_model(model, history, trainX, trainy, testX, testy)
# +
from keras.models import load_model
def fit_model_2(trainX, trainy, testX, testy):
# 加载问题1的模型
model = load_model('model.h5')
# 编译模型
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# 重新训练模型
history = model.fit(trainX, trainy, validation_data=(testX, testy), epochs=100, verbose=0)
return model, history
trainX, trainy, testX, testy = samples_for_seed(2)
model, history = fit_model_2(trainX, trainy, testX, testy)
summarize_model(model, history, trainX, trainy, testX, testy)
# +
from numpy import mean, std
def fit_model_3(trainX, trainy):
model = Sequential()
model.add(Dense(5, input_dim=2, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(5, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.fit(trainX, trainy, epochs=100, verbose=0)
return model
# 在基本模型上重复评估
def eval_standalone_model(trainX, trainy, testX, testy, n_repeats):
scores = []
for _ in range(n_repeats):
model = fit_model_3(trainX, trainy)
_, test_acc = model.evaluate(testX, testy, verbose=0)
scores.append(test_acc)
return scores
# 加载迁移学习模型并评估
def eval_transfer_model(trainX, trainy, testX, testy, n_fixed, n_repeats):
'''
n_fixed:表示加载的模型有多少层不可训练,即权重保持不变。
'''
scores = []
for _ in range(n_repeats):
model = load_model('model.h5')
# 标记迁移学习模型的隐藏层是否可训练
for i in range(n_fixed):
model.layers[i].trainable = False
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.fit(trainX, trainy, epochs=100, verbose=0)
_, test_acc = model.evaluate(testX, testy, verbose=0)
scores.append(test_acc)
return scores
trainX, trainy, testX, testy = samples_for_seed(2)
n_repeats = 10
dists, dist_labels = [], []
standalone_scores = eval_standalone_model(trainX, trainy, testX, testy, n_repeats)
print('Standalone %.3f (%.3f)' % (mean(standalone_scores), std(standalone_scores)))
dists.append(standalone_scores)
dist_labels.append('standalone')
# 设置不同数量的可训练的隐藏层,重复评估迁移学习模型的性能
n_fixed = 3
for i in range(n_fixed):
scores = eval_transfer_model(trainX, trainy, testX, testy, i, n_repeats)
print('Transfer (fixed=%d) %.3f (%.3f)' % (i, mean(scores), std(scores)))
dists.append(scores)
dist_labels.append('transfer f='+str(i))
plt.boxplot(dists, labels=dist_labels)
plt.show()
# -
dists
| Part I Basic Usage/20.Transfer Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## <small>
# Copyright (c) 2017-21 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# </small>
#
#
#
# # Deep Learning: A Visual Approach
# ## by <NAME>, https://glassner.com
# ### Order: https://nostarch.com/deep-learning-visual-approach
# ### GitHub: https://github.com/blueberrymusic
# ------
#
# ### What's in this notebook
#
# This notebook is provided as a “behind-the-scenes” look at code used to make some of the figures in this chapter. It is cleaned up a bit from the original code that I hacked together, and is only lightly commented. I wrote the code to be easy to interpret and understand, even for those who are new to Python. I tried never to be clever or even more efficient at the cost of being harder to understand. The code is in Python3, using the versions of libraries as of April 2021.
#
# This notebook may contain additional code to create models and images not in the book. That material is included here to demonstrate additional techniques.
#
# Note that I've included the output cells in this saved notebook, but Jupyter doesn't save the variables or data that were used to generate them. To recreate any cell's output, evaluate all the cells from the start up to that cell. A convenient way to experiment is to first choose "Restart & Run All" from the Kernel menu, so that everything's been defined and is up to date. Then you can experiment using the variables, data, functions, and other stuff defined in this notebook.
#
# ### A note on variation of output
#
# Note as always that due to the use of random numbers, some of the results here may differ from those in the book (since I re-ran everything to prepare this notebook), and will probably be different yet again if you run this notebook.
# ## Chapter 18: Autoencoders - Notebook 1: Small Models
# Some code inspired or based on:
#
# - https://blog.keras.io/building-autoencoders-in-keras.html
# - https://jmetzen.github.io/2015-11-27/vae.html
# - https://arxiv.org/pdf/1606.05908.pdf (https://github.com/cdoersch/vae_tutorial)
# +
import matplotlib.pyplot as plt
from keras.models import Sequential, Model
from keras.layers import Convolution2D, Dense, Input, MaxPooling2D, UpSampling2D
from keras.utils import np_utils
from keras.datasets import mnist
import numpy as np
import h5py
import cv2
from keras import backend as keras_backend
keras_backend.set_image_data_format('channels_last')
# -
# Workaround for Keras issues on Mac computers (you can comment this
# out if you're not on a Mac, or not having problems)
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# +
# Make a File_Helper for saving and loading files.
save_files = False
import os, sys, inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.insert(0, os.path.dirname(current_dir)) # path to parent dir
from DLBasics_Utilities import File_Helper
file_helper = File_Helper(save_files)
# -
# make the tiny autoencoder in the text
def make_model(X):
model = Sequential()
model.add(Dense(20, input_shape=[len(X[0])], activation='relu'))
model.add(Dense(len(X[0])))
model.compile(optimizer='adam', loss='mean_squared_error')
return model
# A bunch of images to train on
images_list = ['tiger-bw-100',
'meter', 'teapot', 'creature', 'toothbrush', 'stairs', 'spigot', 'stones',
'upipe', 'lavender', 'ground', 'pavement', 'lawnmowers', 'manholecover', 'stick1',
'stick2', 'pinecone', 'cookies', 'bark', 'granite', 'blurrytree', 'tree',
'metalgrid', 'bushes', 'shed', 'hose']
# +
# Get an image (or images) from the disk. If do_rotations is True,
# augment the data by rotating each image by 90, 180, and 270 degrees.
image_side = 100
def get_X(images, do_rotations=True):
X = []
for img_name in images:
filepath = file_helper.get_input_file_path(img_name+'.jpg')
im = cv2.resize(cv2.imread(filepath), (image_side, image_side)).astype(np.float32)
im = cv2.cvtColor( im, cv2.COLOR_RGB2GRAY )
if do_rotations:
rows,cols = im.shape
for angle in [0, 90, 180, 270]:
M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)
im2 = cv2.warpAffine(im,M,(cols,rows))
flat_im2 = np.ravel(im2/255.0)
X.append(flat_im2)
else:
flat_im = np.ravel(im/255.0)
X.append(flat_im)
num_images = len(images)
if do_rotations:
num_images *= 4
X = np.reshape(X, (num_images, len(X[0])))
return X
# -
# predict an output images, show it, and save it
def predict_and_show(X, filename):
pred_X = np.reshape(X[0], (1, len(X[0])))
prediction = model.predict(pred_X, batch_size=1)[0]
plt.subplot(1,2,1)
plt.imshow(X[0].reshape(image_side, image_side), cmap="gray")
plt.xticks([],[])
plt.yticks([],[])
plt.subplot(1,2,2)
plt.imshow(prediction.reshape(image_side, image_side), cmap="gray")
plt.xticks([],[])
plt.yticks([],[])
file_helper.save_figure(filename)
plt.show()
def predict_and_show_with_errorbar(model, input_X, compare_X, filename):
# It's tricky to get the colorbar in the right place.
# Code based on https://matplotlib.org/examples/pylab_examples/custom_cmap.html
pred_X = np.reshape(input_X[0], (1, len(input_X[0])))
prediction = model.predict(pred_X, batch_size=1)[0]
compare_flat = np.reshape(compare_X[0], len(compare_X[0]))
diff = np.zeros((image_side, image_side))
for y in range(image_side):
for x in range(image_side):
index = (y * image_side) + x
diff[y,x] = compare_flat[index] - prediction[index]
fig, axs = plt.subplots(1, 3, figsize=(10,3))
fig.subplots_adjust(left=0.02, bottom=0.13, right=0.95, top=0.94, wspace=0.05)
axs[0].imshow(pred_X.reshape(image_side, image_side), cmap="gray")
axs[0].get_xaxis().set_visible(False)
axs[0].get_yaxis().set_visible(False)
axs[1].imshow(prediction.reshape(image_side, image_side), cmap="gray")
axs[1].get_xaxis().set_visible(False)
axs[1].get_yaxis().set_visible(False)
cf = axs[2].imshow(diff.reshape(image_side, image_side), cmap="seismic")
fig.colorbar(cf, ax=axs[2])
axs[2].get_xaxis().set_visible(False)
axs[2].get_yaxis().set_visible(False)
file_helper.save_figure(filename)
plt.show()
# a little utility to show the training images
def show_images_list():
# we know the list has 25 entries so we'll just hard-code a 5-by-5 grid
for y in range(5):
for x in range(5):
img_name = images_list[1+(y*5)+x]
filepath = file_helper.get_input_file_path(img_name+'.jpg')
im = cv2.resize(cv2.imread(filepath), (image_side, image_side)).astype(np.float32)
im = cv2.cvtColor( im, cv2.COLOR_RGB2GRAY )
plt.subplot(5, 5, 1+((y*5)+x))
plt.imshow(im, cmap="gray")
plt.xticks([],[])
plt.yticks([],[])
plt.tight_layout()
file_helper.save_figure('NB1-tiny-AE-image-set')
plt.show()
show_images_list()
def get_trained_model(training_data, weights_filename):
# Train the tiny autoencoder on just the tiger and no other images
model = make_model(training_data)
np.random.seed(42)
if not file_helper.load_model_weights(model, weights_filename):
print("Training the model")
model.fit(training_data, training_data, epochs=100, batch_size=128, verbose=0)
file_helper.save_model_weights(model, weights_filename)
return model
# Now give it the tiger as input, and show the predicted output
input_X = get_X(['tiger-bw-100'], False)
model = get_trained_model(input_X, 'NB1-tiny-AE-trained-on-tiger')
compare_X = get_X(['tiger-bw-100'], False)
predict_and_show_with_errorbar(model, input_X, compare_X,
'NB1-tiny-AE-tiger-only-with-diffs')
# Give the tiny AE *any* other picture as input. We always get the tiger.
input_X = get_X(['stairs'], False)
compare_X = get_X(['tiger-bw-100'], False)
predict_and_show_with_errorbar(model, input_X, compare_X,
'NB1-tiny-AE-bannister-tiger-with-diffs')
# How about an input where all pixels are 0?
input_X = np.zeros((1, 10000))
compare_X = get_X(['tiger-bw-100'], False)
predict_and_show_with_errorbar(model, input_X, compare_X,
'NB1-tiny-AE-black-tiger-with-diffs')
# Let's try that again. Now train the tiny AE on the whole list of images.
input_X = get_X(images_list)
np.random.seed(42)
model = get_trained_model(input_X, 'NB1-tiny-AE-trained-on-image_set')
# And predict the tiger on this more generally-trained AE
input_X = get_X(['tiger-bw-100'], False)
compare_X = get_X(['tiger-bw-100'], False)
predict_and_show_with_errorbar(model, input_X, compare_X,
'NB1-tiny-AE-all-images-with-diffs')
| Notebooks/Chapter18-Autoencoders/Chapter18-Autoencoders-1-Small-Models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Anaconda 5)
# env:
# LD_LIBRARY_PATH: /ext/anaconda5/lib
# PROJ_LIB: /ext/anaconda-2019.03/share/proj
# PYTHONHOME: /ext/anaconda5/lib/python3.5
# PYTHONPATH: /ext/anaconda5/lib/python3.5:/ext/anaconda5/lib/python3.5/site-packages
# language: python
# metadata:
# cocalc:
# description: Python/R distribution for data science
# priority: -1
# url: https://www.anaconda.com/distribution/
# name: anaconda5
# ---
# # Python - Symbolic Mathematics (`sympy`)
# +
# %matplotlib inline
import sympy as sp
import numpy as np
import matplotlib.pyplot as plt
# -
sp.init_printing()
# ### `sympy` treats stuff fundementally different than `numpy`
np.sqrt(8)
sp.sqrt(8)
np.pi
sp.pi
# ### sympy has its own way to handle rational numbers
sp.Rational(3,5)
sp.Rational(3,5) + sp.Rational(1,2)
# #### Least Common Multiple
sp.lcm(2,5)
# #### Adding `.n()` to the end of a sympy expression will `evaluate` expression
sp.pi.n()
sp.pi.n(100)
# #### `nsimplify()` will sort-of do the reverse
sp.nsimplify(0.125)
sp.nsimplify(4.242640687119286)
sp.nsimplify(sp.pi, tolerance=1e-2)
sp.nsimplify(sp.pi, tolerance=1e-5)
sp.nsimplify(sp.pi, tolerance=1e-6)
# ### ... to $\infty$ and beyond
sp.oo
sp.oo + 3
1e19 < sp.oo
# ### Primes
list(sp.primerange(0,100))
sp.nextprime(2019)
sp.factorint(11182019)
# # Symbolic
#
# ### You have to explicitly tell `SymPy` what symbols you want to use.
x, y, z = sp.symbols('x y z')
a, b, c = sp.symbols('a b c')
mu, rho = sp.symbols('mu rho')
# ### Expressions are then able use these symbols
# +
my_equation = 2*x + y
my_equation
# -
my_equation + 3
my_equation - x
my_equation / x
# +
my_greek_equation = mu**2 / rho * (a + b)
my_greek_equation
# -
# ### `SymPy` has all sorts of ways to manipulates symbolic equations
sp.simplify(my_equation / x)
# +
another_equation = (x + 2) * (x - 3)
another_equation
# -
sp.expand(another_equation)
# +
long_equation = 2*y*x**3 + 12*x**2 - x + 3 - 8*x**2 + 4*x + x**3 + 5 + 2*y*x**2 + x*y
long_equation
# -
sp.collect(long_equation,x)
sp.collect(long_equation,y)
# ### You can evaluate equations for specific values
# +
trig_equation = a*sp.sin(2*x + y) + b*sp.cos(x + 2*y)
trig_equation
# -
trig_equation.subs({a:2, b:3, x:4, y:5})
trig_equation.subs({a:2, b:3, x:4, y:5}).n()
sp.expand(trig_equation, trig=True)
sp.collect(sp.expand(trig_equation, trig=True),sp.cos(x))
# #### You can evaluate/simplify equations sybolically
# +
my_equation_xyz = sp.sqrt((x * (y - 4*x)) / (z * (y - 3*x)))
my_equation_xyz
# +
my_equation_x = (3 * a * y) / (9 * a - y)
my_equation_x
# +
my_new_xyz = my_equation_xyz.subs(x, my_equation_x)
my_new_xyz
# -
sp.simplify(my_new_xyz)
# # System of equations
# $$
# \begin{array}{c}
# 9x - 2y = 5 \\
# -2x + 6y = 10 \\
# \end{array}
# \hspace{3cm}
# \left[
# \begin{array}{cc}
# 9 & -2 \\
# -2 & 6 \\
# \end{array}
# \right]
# \left[
# \begin{array}{c}
# x\\
# y
# \end{array}
# \right]
# =
# \left[
# \begin{array}{c}
# 5\\
# 10
# \end{array}
# \right]
# $$
# +
a_matrix = sp.Matrix([[9, -2],
[-2, 6]])
b_matrix = sp.Matrix([[5],
[10]])
# -
a_matrix, b_matrix
a_matrix.inv()
a_matrix.inv() * a_matrix
a_matrix.inv() * b_matrix
# # Solving equations - `solve`
#
# $$
# \begin{array}{c}
# 9x - 2y = 5 \\
# -2x + 6y = 10 \\
# \end{array}
# $$
equation_a = 9*x - 2*y - 5
equation_b = -2*x + 6*y - 10
sp.solve([equation_a, equation_b], [x,y])
# +
yet_another_equation = x**3 + x + 10
yet_another_equation
# -
sp.solve(yet_another_equation,x)
# #### ... complex numbers
sp.I
# +
a_complex_number = 2 + 3 * sp.I
a_complex_number
# -
sp.re(a_complex_number), sp.im(a_complex_number)
# ### ... solving more symbolically
# +
symbolic_equation = a*x**2 + b*x +c
symbolic_equation
# -
sp.solve(symbolic_equation, x)
# ## Calculus
symbolic_equation
sp.diff(symbolic_equation,x)
sp.diff(symbolic_equation,x,2)
sp.integrate(symbolic_equation,x)
sp.integrate(symbolic_equation,(x,0,5)) # limits x = 0 to 5
sp.integrate(symbolic_equation,(x,0,5)).subs({a:2, b:7, c:3}).n()
trig_equation
sp.diff(trig_equation,x)
sp.integrate(trig_equation,x)
# ### Ordinary differential equation - `dsolve`
f = sp.Function('f')
f(x)
sp.Derivative(f(x),x,x)
# +
equation_ode = sp.Derivative(f(x), x, x) + 9*f(x)
equation_ode
# -
sp.dsolve(equation_ode, f(x))
# ### Limits
# +
limit_equation = (1 + (1 / x)) ** x
limit_equation
# -
# $$\lim _{x\to 5 }\left(1+{\frac {1}{x}}\right)^{x}$$
sp.limit(limit_equation, x, 5)
sp.limit(limit_equation, x, 5).n()
# $$\lim _{x\to \infty }\left(1+{\frac {1}{x}}\right)^{x}$$
sp.limit(limit_equation, x, sp.oo)
sp.limit(limit_equation, x, sp.oo).n()
# ### Summation
# $$ \sum{\frac {x^{a}}{a!}} $$
# +
sum_equation = x**a / sp.factorial(a)
sum_equation
# -
# $$ \sum _{a=0}^{3}{\frac {x^{a}}{a!}} $$
sp.summation(sum_equation, [a, 0, 3])
sp.summation(sum_equation.subs({x:1}), [a, 0, 3])
sp.summation(sum_equation.subs({x:1}), [a, 0, 3]).n()
# $$ \sum _{a=0}^{10}{\frac {x^{a}}{a!}} $$
sp.summation(sum_equation.subs({x:1}), [a, 0, 10]).n()
# $$ \sum _{a=0}^{\infty}{\frac {x^{a}}{a!}} $$
sp.summation(sum_equation, [a, 0, sp.oo])
# ## Let's do some graphing stuff ...
# $$
# \large y_1 = \frac{x^3}{4} - 3x
# $$
# ### Need to create a `numpy` array to do the graphing
my_np_x = np.linspace(-2*np.pi,2*np.pi,200)
my_np_y1 = my_np_x ** 3 / 4 - 3 * my_np_x
# +
fig,ax = plt.subplots(1,1)
fig.set_size_inches(10,4)
fig.tight_layout()
ax.set_ylim(-7,7)
ax.set_xlim(-np.pi,np.pi)
ax.set_xlabel("This is X")
ax.set_ylabel("This is Y")
ax.plot(my_np_x, my_np_y1, color='r', marker='None', linestyle='-', linewidth=4);
# -
# ### Fourier Series
# +
my_sp_y1 = x ** 3 / 4 - 3 * x
my_sp_y1
# +
my_fourier = sp.fourier_series(my_sp_y1, (x, -sp.pi, sp.pi))
my_fourier
# -
my_fourier.truncate(3).n(2)
my_np_1term = -4.1 * np.sin(my_np_x)
my_np_2term = -4.1 * np.sin(my_np_x) + 0.91 * np.sin(2*my_np_x)
my_np_3term = -4.1 * np.sin(my_np_x) + 0.91 * np.sin(2*my_np_x) - 0.47 * np.sin(3*my_np_x)
# +
fig,ax = plt.subplots(1,1)
fig.set_size_inches(10,4)
fig.tight_layout()
ax.set_ylim(-7,7)
ax.set_xlim(-np.pi,np.pi)
ax.set_xlabel("This is X")
ax.set_ylabel("This is Y")
ax.plot(my_np_x, my_np_y1, color='r', marker='None', linestyle='-', linewidth=8)
ax.plot(my_np_x, my_np_1term, color='b', marker='None', linestyle='--', label="1-term")
ax.plot(my_np_x, my_np_2term, color='g', marker='None', linestyle='--', label="2-term")
ax.plot(my_np_x, my_np_3term, color='k', marker='None', linestyle='--', label="3-term")
ax.legend(loc = 0);
# -
# ### Another Function
#
# $$
# \large y_2 = 2\,\sin(5x) \ e^{-x}
# $$
my_np_y2 = 2 * np.sin(5 * my_np_x) * np.exp(-my_np_x)
# +
fig,ax = plt.subplots(1,1)
fig.set_size_inches(10,4)
fig.tight_layout()
ax.set_ylim(-10,10)
ax.set_xlim(-np.pi,np.pi)
ax.set_xlabel("This is X")
ax.set_ylabel("This is Y")
ax.plot(my_np_x, my_np_y2, color='r', marker='None', linestyle='-', linewidth=4);
# -
# ### Taylor Expansions
my_sp_y2 = 2 * sp.sin(5 * x) * sp.exp(-x)
my_sp_y2
# +
my_taylor = sp.series(my_sp_y2, x, x0 = 0)
my_taylor
# -
# #### if you want more terms
#
# * n = magnitude of the highest term
# * n = 8 means all terms up to x$^{8}$ or $\mathcal{O}(8)$
# +
my_taylor = sp.series(my_sp_y2, x, x0 = 0, n=8)
my_taylor
# -
my_taylor.removeO()
my_taylor.removeO().n(2)
# ## General Equation Solving - `nsolve`
# $$
# \large y_1 = \frac{x^3}{4} - 3x\\
# \large y_2 = 2\,\sin(5x) \ e^{-x}
# $$
# ### Where do they cross? - The graph
# +
fig,ax = plt.subplots(1,1)
fig.set_size_inches(10,4)
fig.tight_layout()
ax.set_ylim(-5,5)
ax.set_xlim(-np.pi,4)
ax.set_xlabel("This is X")
ax.set_ylabel("This is Y")
ax.plot(my_np_x, my_np_y1, color='b', marker='None', linestyle='--', linewidth = 4)
ax.plot(my_np_x, my_np_y2, color='r', marker='None', linestyle='-', linewidth = 4);
# -
# ### Where do they cross? - The `sympy` solution
my_sp_y1, my_sp_y2
# +
my_guess = 3.3
sp.nsolve(my_sp_y1 - my_sp_y2, x, my_guess)
# +
all_guesses = (3.3, 0, -0.75)
for val in all_guesses:
result = sp.nsolve(my_sp_y1 - my_sp_y2, x, val)
print(result)
# -
# ### Your guess has to be (somewhat) close or the solution will not converge:
# +
my_guess = -40
sp.nsolve(my_sp_y1 - my_sp_y2, x, my_guess)
# -
# # `SymPy` can do *so* much more. It really is magic.
#
# ## Complete documentation can be found [here](http://docs.sympy.org/latest/index.html)
| Python_SymPy.ipynb |