code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
```
# default_exp callback.noisy_student
```
# Noisy student
> Callback to apply noisy student self-training (a semi-supervised learning approach) based on: Xie, Q., Luong, M. T., Hovy, E., & Le, Q. V. (2020). Self-training with noisy student improves imagenet classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 10687-10698).
```
#export
from tsai.imports import *
from tsai.utils import *
from tsai.data.preprocessing import *
from tsai.data.transforms import *
from tsai.models.layers import *
from fastai.callback.all import *
#export
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
#export
# This is an unofficial implementation of noisy student based on:
# Xie, Q., Luong, M. T., Hovy, E., & Le, Q. V. (2020). Self-training with noisy student improves imagenet classification.
# In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 10687-10698).
# Official tensorflow implementation available in https://github.com/google-research/noisystudent
class NoisyStudent(Callback):
"""A callback to implement the Noisy Student approach. In the original paper this was used in combination with noise:
- stochastic depth: .8
- RandAugment: N=2, M=27
- dropout: .5
Steps:
1. Build the dl you will use as a teacher
2. Create dl2 with the pseudolabels (either soft or hard preds)
3. Pass any required batch_tfms to the callback
"""
def __init__(self, dl2:DataLoader, bs:Optional[int]=None, l2pl_ratio:int=1, batch_tfms:Optional[list]=None, do_setup:bool=True,
pseudolabel_sample_weight:float=1., verbose=False):
r'''
Args:
dl2: dataloader with the pseudolabels
bs: batch size of the new, combined dataloader. If None, it will pick the bs from the labeled dataloader.
l2pl_ratio: ratio between labels and pseudolabels in the combined batch
batch_tfms: transforms applied to the combined batch. If None, it will pick the batch_tfms from the labeled dataloader (if any)
do_setup: perform a transform setup on the labeled dataset.
pseudolabel_sample_weight: weight of each pseudolabel sample relative to the labeled one of the loss.
'''
self.dl2, self.bs, self.l2pl_ratio, self.batch_tfms, self.do_setup, self.verbose = dl2, bs, l2pl_ratio, batch_tfms, do_setup, verbose
self.pl_sw = pseudolabel_sample_weight
def before_fit(self):
if self.batch_tfms is None: self.batch_tfms = self.dls.train.after_batch
self.old_bt = self.dls.train.after_batch # Remove and store dl.train.batch_tfms
self.old_bs = self.dls.train.bs
self.dls.train.after_batch = noop
if self.do_setup and self.batch_tfms:
for bt in self.batch_tfms:
bt.setup(self.dls.train)
if self.bs is None: self.bs = self.dls.train.bs
self.dl2.bs = min(len(self.dl2.dataset), int(self.bs / (1 + self.l2pl_ratio)))
self.dls.train.bs = self.bs - self.dl2.bs
pv(f'labels / pseudolabels per training batch : {self.dls.train.bs} / {self.dl2.bs}', self.verbose)
rel_weight = (self.dls.train.bs/self.dl2.bs) * (len(self.dl2.dataset)/len(self.dls.train.dataset))
pv(f'relative labeled/ pseudolabel sample weight in dataset: {rel_weight:.1f}', self.verbose)
self.dl2iter = iter(self.dl2)
self.old_loss_func = self.learn.loss_func
self.learn.loss_func = self.loss
def before_batch(self):
if self.training:
X, y = self.x, self.y
try: X2, y2 = next(self.dl2iter)
except StopIteration:
self.dl2iter = iter(self.dl2)
X2, y2 = next(self.dl2iter)
if y.ndim == 1 and y2.ndim == 2: y = torch.eye(self.learn.dls.c)[y].to(device) # ensure both
X_comb, y_comb = concat(X, X2), concat(y, y2)
if self.batch_tfms is not None:
X_comb = compose_tfms(X_comb, self.batch_tfms, split_idx=0)
y_comb = compose_tfms(y_comb, self.batch_tfms, split_idx=0)
self.learn.xb = (X_comb,)
self.learn.yb = (y_comb,)
pv(f'\nX: {X.shape} X2: {X2.shape} X_comb: {X_comb.shape}', self.verbose)
pv(f'y: {y.shape} y2: {y2.shape} y_comb: {y_comb.shape}', self.verbose)
def loss(self, output, target):
if target.ndim == 2: _, target = target.max(dim=1)
if self.training and self.pl_sw != 1:
loss = (1 - self.pl_sw) * self.old_loss_func(output[:self.dls.train.bs], target[:self.dls.train.bs])
loss += self.pl_sw * self.old_loss_func(output[self.dls.train.bs:], target[self.dls.train.bs:])
return loss
else:
return self.old_loss_func(output, target)
def after_fit(self):
self.dls.train.after_batch = self.old_bt
self.learn.loss_func = self.old_loss_func
self.dls.train.bs = self.old_bs
self.dls.bs = self.old_bs
from tsai.data.all import *
from tsai.models.all import *
from tsai.tslearner import *
dsid = 'NATOPS'
X, y, splits = get_UCR_data(dsid, return_split=False)
pseudolabeled_data = X
soft_preds = True
pseudolabels = ToNumpyCategory()(y) if soft_preds else OneHot()(y)
dsets2 = TSDatasets(pseudolabeled_data, pseudolabels)
dl2 = TSDataLoader(dsets2, num_workers=0)
noisy_student_cb = NoisyStudent(dl2, bs=256, l2pl_ratio=2, verbose=True)
learn = TSClassifier(X, y, splits=splits, batch_tfms=[TSStandardize(), TSRandomSize(.5)], cbs=noisy_student_cb)
learn.fit_one_cycle(1)
pseudolabeled_data = X
soft_preds = False
pseudolabels = ToNumpyCategory()(y) if soft_preds else OneHot()(y)
dsets2 = TSDatasets(pseudolabeled_data, pseudolabels)
dl2 = TSDataLoader(dsets2, num_workers=0)
noisy_student_cb = NoisyStudent(dl2, bs=256, l2pl_ratio=2, verbose=True)
learn = TSClassifier(X, y, splits=splits, batch_tfms=[TSStandardize(), TSRandomSize(.5)], cbs=noisy_student_cb)
learn.fit_one_cycle(1)
#hide
out = create_scripts(); beep(out)
```
| github_jupyter |
# Customer Churn Prediction
```
# Importing necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
#import warnings
#warnings.simplefilter("ignore")
```
### Data Preparation based on EDA
```
def datapreparation(filepath):
df = pd.read_csv(filepath)
df.drop(["customerID"], inplace = True, axis = 1)
df.TotalCharges = df.TotalCharges.replace(" ",np.nan)
df.TotalCharges.fillna(0, inplace = True)
df.TotalCharges = df.TotalCharges.astype(float)
cols1 = ['Partner', 'Dependents', 'PaperlessBilling', 'Churn', 'PhoneService']
for col in cols1:
df[col] = df[col].apply(lambda x: 0 if x == "No" else 1)
df.gender = df.gender.apply(lambda x: 0 if x == "Male" else 1)
df.MultipleLines = df.MultipleLines.map({'No phone service': 0, 'No': 0, 'Yes': 1})
cols2 = ['OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies']
for col in cols2:
df[col] = df[col].map({'No internet service': 0, 'No': 0, 'Yes': 1})
df = pd.get_dummies(df, columns=['InternetService', 'Contract', 'PaymentMethod'], drop_first=True)
return df
telco = datapreparation(filepath = "./Data/Telco_Customer_Churn.csv")
telco.head()
telco.isnull().any().any()
```
The dataframe has no null values
### Model building
```
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from sklearn.metrics import roc_auc_score, roc_curve, precision_score, recall_score, f1_score
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
train, test = train_test_split(telco, test_size=0.2, random_state=111, stratify = telco.Churn)
x = telco.columns[telco.columns!="Churn"]
y = "Churn"
train_x = train[x]
train_y = train[y]
test_x = test[x]
test_y = test[y]
#function for model fitting
def churn_prediction(algo, training_x, training_y, testing_x, testing_y, cols, cf = 'coefficients'):
algo.fit(training_x,training_y)
predictions = algo.predict(testing_x)
probabilities = algo.predict_proba(testing_x)[:,1]
#coeffs
if cf == "coefficients":
coefficients = pd.DataFrame(algo.coef_.ravel())
elif cf == "features":
coefficients = pd.DataFrame(algo.feature_importances_)
column_df = pd.DataFrame(cols)
coef_sumry = (pd.merge(coefficients, column_df, left_index= True, right_index= True, how = "left"))
coef_sumry.columns = ["coefficients","features"]
coef_sumry = coef_sumry.sort_values(by = "coefficients",ascending = False)
print (algo)
print ("\n Classification report : \n",classification_report(testing_y,predictions))
print ("Accuracy Score : ",accuracy_score(testing_y,predictions))
#confusion matrix
conf_matrix = confusion_matrix(testing_y,predictions)
plt.figure(figsize=(12,12))
plt.subplot(221)
sns.heatmap(conf_matrix, fmt = "d",annot=True, cmap='Blues')
plt.title('Confuion Matrix')
plt.ylabel('True Values')
plt.xlabel('Predicted Values')
#roc_auc_score
model_roc_auc = roc_auc_score(testing_y,probabilities)
print ("Area under curve : ",model_roc_auc,"\n")
fpr,tpr,thresholds = roc_curve(testing_y,probabilities)
plt.subplot(222)
plt.plot(fpr, tpr, color='darkorange', lw=1, label = "Auc : %.3f" %model_roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.subplot(212)
sns.barplot(x = coef_sumry["features"] ,y = coef_sumry["coefficients"])
plt.title('Feature Importances')
plt.xticks(rotation="vertical")
plt.show()
```
### Hyperparameter Tuning
#### Grid 1: Selecting class weight and estimators
```
param_grid1 = {'max_features':['auto', 'sqrt', 'log2', None],
'n_estimators':[300, 500, 700, 900, 1100, 1300]}
rf_model = RandomForestClassifier()
grid1 = GridSearchCV(estimator=rf_model, param_grid=param_grid1, n_jobs=-1, cv=3, verbose=1, scoring = 'f1')
grid1.fit(train_x, train_y)
grid1.best_params_
dt = pd.DataFrame(grid1.cv_results_)
dt.param_max_features = dt.param_max_features.astype(str)
dt.param_n_estimators = dt.param_n_estimators.astype(str)
table = pd.pivot_table(dt, values='mean_test_score', index='param_n_estimators',
columns='param_max_features')
sns.heatmap(table)
grid1.best_score_
```
#### Grid 2: Selecting max depth and split criterion
```
param_grid2 = {'max_features':['log2'],
'n_estimators':[800, 900, 1000],
'criterion': ['entropy', 'gini'],
'max_depth': [7, 9, 11, 13, 15, None]}
rf_model = RandomForestClassifier()
grid2 = GridSearchCV(estimator=rf_model, param_grid=param_grid2, n_jobs=-1, cv=3, verbose=1, scoring = 'f1')
grid2.fit(train_x, train_y)
grid2.best_params_
dt = pd.DataFrame(grid2.cv_results_)
table = pd.pivot_table(dt, values='mean_test_score', index='param_max_depth', columns='param_criterion')
sns.heatmap(table)
table = pd.pivot_table(dt, values='mean_test_score', index='param_max_depth', columns='param_n_estimators')
sns.heatmap(table)
grid2.best_score_
```
Checking if other depth and estimator value results better score
```
param_grid2_2 = {'max_features':['log2'],
'n_estimators':[950, 1000, 1050],
'criterion': ['gini'],
'max_depth': [10, 11, 12]}
rf_model = RandomForestClassifier()
grid2_2 = GridSearchCV(estimator=rf_model, param_grid=param_grid2, n_jobs=-1, cv=3, verbose=1, scoring = 'f1')
grid2_2.fit(train_x, train_y)
grid2_2.best_params_
grid2_2.best_score_
```
#### Grid 3: Selecting minimum samples leaf and split
```
param_grid3 = {'max_features':['log2'],
'n_estimators':[1000],
'criterion': ['gini'],
'max_depth': [11],
'min_samples_leaf': [1, 3, 5, 7],
'min_samples_split': [2, 4, 6, 8]}
rf_model = RandomForestClassifier()
grid3 = GridSearchCV(estimator=rf_model, param_grid=param_grid3, n_jobs=-1, cv=3, verbose=1, scoring = 'f1')
grid3.fit(train_x, train_y)
grid3.best_params_
dt = pd.DataFrame(grid3.cv_results_)
table = pd.pivot_table(dt, values='mean_test_score', index='param_min_samples_leaf', columns='param_min_samples_split')
sns.heatmap(table)
grid3.best_score_
```
#### Grid 4: Selecting class weight
```
param_grid4 = {'class_weight':[{0:1, 1:1}, {0:1, 1:2}, {0:1, 1:3}],
'max_features':['log2'],
'n_estimators':[1000],
'criterion': ['gini'],
'max_depth': [11],
'min_samples_leaf': [3],
'min_samples_split': [8]}
rf_model = RandomForestClassifier()
grid4 = GridSearchCV(estimator=rf_model, param_grid=param_grid4, n_jobs=-1, cv=3, verbose=1, scoring = 'f1')
grid4.fit(train_x, train_y)
grid4.best_params_
dt = pd.DataFrame(grid4.cv_results_)
dt.param_class_weight = dt.param_class_weight.astype(str)
table = pd.pivot_table(dt, values='mean_test_score', index='param_class_weight')
sns.heatmap(table)
grid4.best_score_
```
#### Final Model
```
model = RandomForestClassifier(bootstrap=True, ccp_alpha=0.0, class_weight={0: 1, 1: 3},
criterion='gini', max_depth=11, max_features='log2',
max_leaf_nodes=None, max_samples=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=3, min_samples_split=8,
min_weight_fraction_leaf=0.0, n_estimators=1000,
n_jobs=None, oob_score=False, random_state=None,
verbose=0, warm_start=False)
churn_prediction(model, train_x, train_y, test_x, test_y, x,"features")
```
Checking the model's performance on train data itself
```
train_scores = cross_val_score(model, train_x, train_y, cv = 5, scoring='f1')
train_scores
np.mean(train_scores)
```
As we can see that the performance of the model on test data is same as training data. So, we can conclude that there is no overfitting or underfitting.
#### Saving model
```
import pickle
pickle.dump(model, open('churnmodel.pkl','wb'))
```
## Model Interpretability
#### ELI5
```
import eli5
from eli5.sklearn import PermutationImportance
perm = PermutationImportance(model, random_state=1).fit(test_x, test_y)
eli5.show_weights(perm, feature_names = test_x.columns.tolist())
```
Visualizing how the partial dependance plots look for top features
```
from pdpbox import pdp, info_plots
pdp_p = pdp.pdp_isolate(model=model, dataset=test_x, model_features=test_x.columns.values,
feature='InternetService_Fiber optic')
pdp.pdp_plot(pdp_p, 'InternetService_Fiber optic')
plt.show()
pdp_p = pdp.pdp_isolate(model=model, dataset=test_x, model_features=test_x.columns.values,
feature='PaymentMethod_Mailed check')
pdp.pdp_plot(pdp_p, 'PaymentMethod_Mailed check')
plt.show()
pdp_p = pdp.pdp_isolate(model=model, dataset=test_x, model_features=test_x.columns.values, feature='MonthlyCharges')
pdp.pdp_plot(pdp_p, 'MonthlyCharges')
plt.show()
pdp_p = pdp.pdp_isolate(model=model, dataset=test_x, model_features=test_x.columns.values, feature='TotalCharges')
pdp.pdp_plot(pdp_p, 'TotalCharges')
plt.show()
pdp_p = pdp.pdp_isolate(model=model, dataset=test_x, model_features=test_x.columns.values,
feature='Contract_Two year')
pdp.pdp_plot(pdp_p, 'Contract_Two year')
plt.show()
pdp_p = pdp.pdp_isolate(model=model, dataset=test_x, model_features=test_x.columns.values,
feature='tenure')
pdp.pdp_plot(pdp_p, 'tenure')
plt.show()
```
#### SHAP
```
import shap
shap.initjs()
import joblib
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(np.array(test_x.iloc[0]))
shap.force_plot(explainer.expected_value[1], shap_values[1], test_x.iloc[0])
# Saving Explainer
ex_filename = 'explainer.bz2'
joblib.dump(explainer, filename=ex_filename, compress=('bz2', 9))
explainer = joblib.load(filename="explainer.bz2")
shap_values = explainer.shap_values(np.array(test_x.iloc[0]))
shap.force_plot(explainer.expected_value[1], shap_values[1], list(test_x.columns), matplotlib = True, show = False).savefig('static/images/shap.png', bbox_inches="tight")
```
#### Gauge Chart
```
from matplotlib.patches import Circle, Wedge, Rectangle
def degree_range(n):
start = np.linspace(0,180,n+1, endpoint=True)[0:-1]
end = np.linspace(0,180,n+1, endpoint=True)[1::]
mid_points = start + ((end-start)/2.)
return np.c_[start, end], mid_points
def rot_text(ang):
rotation = np.degrees(np.radians(ang) * np.pi / np.pi - np.radians(90))
return rotation
def gauge(labels=['LOW','MEDIUM','HIGH','EXTREME'], \
colors=['#00FF00','#FFFF00','#FF7700','#FF0000'], Probability=1, fname=False):
N = len(labels)
colors = colors[::-1]
"""
begins the plotting
"""
fig, ax = plt.subplots()
ang_range, mid_points = degree_range(4)
labels = labels[::-1]
"""
plots the sectors and the arcs
"""
patches = []
for ang, c in zip(ang_range, colors):
# sectors
patches.append(Wedge((0.,0.), .4, *ang, facecolor='w', lw=2))
# arcs
patches.append(Wedge((0.,0.), .4, *ang, width=0.10, facecolor=c, lw=2, alpha=0.5))
[ax.add_patch(p) for p in patches]
"""
set the labels (e.g. 'LOW','MEDIUM',...)
"""
for mid, lab in zip(mid_points, labels):
ax.text(0.35 * np.cos(np.radians(mid)), 0.35 * np.sin(np.radians(mid)), lab, \
horizontalalignment='center', verticalalignment='center', fontsize=14, \
fontweight='bold', rotation = rot_text(mid))
"""
set the bottom banner and the title
"""
r = Rectangle((-0.4,-0.1),0.8,0.1, facecolor='w', lw=2)
ax.add_patch(r)
ax.text(0, -0.05, 'Churn Probability ' + np.round(Probability,2).astype(str), horizontalalignment='center', \
verticalalignment='center', fontsize=22, fontweight='bold')
"""
plots the arrow now
"""
pos = (1-Probability)*180
ax.arrow(0, 0, 0.225 * np.cos(np.radians(pos)), 0.225 * np.sin(np.radians(pos)), \
width=0.04, head_width=0.09, head_length=0.1, fc='k', ec='k')
ax.add_patch(Circle((0, 0), radius=0.02, facecolor='k'))
ax.add_patch(Circle((0, 0), radius=0.01, facecolor='w', zorder=11))
"""
removes frame and ticks, and makes axis equal and tight
"""
ax.set_frame_on(False)
ax.axes.set_xticks([])
ax.axes.set_yticks([])
ax.axis('equal')
plt.tight_layout()
if fname:
fig.savefig(fname, dpi=200)
gauge(Probability=model.predict_proba(test_x.iloc[0:1])[0,1])
# final features
test_x.columns
```
| github_jupyter |
# 窗口函数与卷积
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
## 窗口函数
在信号处理中,窗函数(window function)是一种除在给定区间之外取值均为0的实函数.譬如:在给定区间内为常数而在区间外为0的窗函数被形象地称为矩形窗.任何函数与窗函数之积仍为窗函数,所以相乘的结果就像透过窗口"看"其他函数一样.窗函数在频谱分析,滤波器设计,波束形成,以及音频数据压缩(如在Ogg Vorbis音频格式中)等方面有广泛的应用.
numpy中提供了几种常见的窗函数
函数|说明
---|---
bartlett(M) |Bartlett窗口函数
blackman(M) |Blackman 窗口函数
hamming(M) |Hamming窗口函数
hanning(M) |Hanning窗口函数
kaiser(M, beta) |Kaiser窗口函数
### bartlett窗
$$ w(n)=\frac{2}{N-1}\cdot\left(\frac{N-1}{2}-\left |n-\frac{N-1}{2}\right |\right)\, $$
```
window = np.bartlett(51)
plt.plot(window)
plt.title("Bartlett window")
plt.ylabel("Amplitude")
plt.xlabel("Sample")
plt.show()
```
### Blackman窗
$$
w(n)=a_0 - a_1 \cos \left ( \frac{2 \pi n}{N-1} \right) + a_2 \cos \left ( \frac{4 \pi n}{N-1} \right)
$$
$$
{\displaystyle a_{0}=0.42;\quad a_{1}=0.5;\quad a_{2}=0.08\,} a_0=0.42;\quad a_1=0.5;\quad a_2=0.08\,
$$
```
window = np.blackman(51)
plt.plot(window)
plt.title("Blackman window")
plt.ylabel("Amplitude")
plt.xlabel("Sample")
plt.show()
```
### Hamming窗
$$ w(n)=0.53836 - 0.46164\; \cos \left ( \frac{2 \pi n}{N-1} \right) $$
```
window = np.hamming(51)
plt.plot(window)
plt.title("Hamming window")
plt.ylabel("Amplitude")
plt.xlabel("Sample")
plt.show()
```
### Hanning窗
$$ w(n)= 0.5\; \left(1 - \cos \left ( \frac{2 \pi n}{N-1} \right) \right) $$
```
window = np.hanning(51)
plt.plot(window)
plt.title("Hanning window")
plt.ylabel("Amplitude")
plt.xlabel("Sample")
plt.show()
```
### Kaiser窗
$$ w(n)=\frac{I_0\Bigg (\pi\alpha \sqrt{1 - (\begin{matrix} \frac{2 n}{N-1} \end{matrix}-1)^2}\Bigg )} {I_0(\pi\alpha)} $$
```
window = np.kaiser(51, 14)
plt.plot(window)
plt.title("Kaiser window")
plt.ylabel("Amplitude")
plt.xlabel("Sample")
plt.show()
```
## 卷积
卷积运算符经常出现在信号处理中,其中它模拟线性时不变系统对信号的影响.在概率理论中,两个独立随机变量的和根据它们各自的分布的卷积来分布.
**离散**卷积运算定义为:
$$ (a * v)[n] = \sum_{m = -\infty}^{\infty} a[m] v[n - m] $$
numpy提供了通用的卷积操作`convolve(a, v, mode='full')`
其中前两个参数都是一维的输入向量,而mode则提供了可选的三种运算规则,它可以有3种选项
+ full
默认情况下,模式为"full".这在每个重叠点处返回卷积,其输出形状为(N M-1,).在卷积的端点,信号不完全重叠,并且可以看到边界效应.
+ same
模式same返回长度max(M,N)的输出.边界效应仍然可见.
+ valid
模式'valid'返回长度为max(M,N)-min(M,N)+1.卷积产物仅针对信号完全重叠的点给出.信号边界外的值没有效果.
```
np.convolve([1, 2, 3], [0, 1, 0.5])
# a相当于[...0,0,1,2, 3 ,0,0,...]
# v相当于[...0,0,0,1,0.5,0,0,...]
# [0*0.5+0*1+1*0+2*0+3*0,
# 0*0.5+1*1+2*0+3*0,
# 1*0.5+2*1+3*0,
# 1*0+2*0.5+3*1+0*0,
# 1*0+2*0+3*0.5+0*1+0*0]
np.convolve([1,2,3],[0,1,0.5], 'same')
np.convolve([1,2,3],[0,1,0.5], 'valid')
```
| github_jupyter |
# Basics of the DVR calculations with Libra
## Table of Content <a name="TOC"></a>
1. [General setups](#setups)
2. [Mapping points on multidimensional grids ](#mapping)
3. [Functions of the Wfcgrid2 class](#wfcgrid2)
4. [Showcase: computing energies of the HO eigenstates](#ho_showcase)
5. [Dynamics: computed with SOFT method](#soft_dynamics)
### A. Learning objectives
- to map sequential numbers of the grid points to the multi-dimensional index and vice versa
- to define the Wfcgrid2 class objects for DVR calculations
- to initialize wavefunctions of the grids
- to compute various properties of the wavefunctions defined on the grid
- to set up and conduct the quantum dynamics of the DVR of wavefunctions
### B. Use cases
- [Compute energies of the DVR wavefunctions](#energy-use-case)
- [Numerically exact solution of the TD-SE](#tdse-solution)
### C. Functions
- `liblibra::libdyn::libwfcgrid`
- [`compute_mapping`](#compute_mapping-1)
- [`compute_imapping`](#compute_imapping-1)
### D. Classes and class members
- `liblibra::libdyn::libwfcgrid2`
- [`Wfcgrid2`](#Wfcgrid2-1) | [also here](#Wfcgrid2-2)
- [`nstates`](#nstates-1)
- [`ndof`](#ndof-1)
- [`Npts`](#Npts-1)
- [`npts`](#npts-1)
- [`rmin`](#rmin-1)
- [`rmax`](#rmax-1)
- [`dr`](#dr-1)
- [`kmin`](#kmin-1)
- [`dk`](#dk-1)
- [`gmap`](#gmap-1) | [also here](#gmap-2)
- [`imap`](#imap-1) | [also here](#imap-2)
- [`PSI_dia`](#PSI_dia-1)
- [`reciPSI_dia`](#reciPSI_dia-1)
- [`PSI_adi`](#PSI_adi-1)
- [`reciPSI_adi`](#reciPSI_adi-1)
- [`Hdia`](#Hdia-1)
- [`U`](#U-1)
- [`add_wfc_Gau`](#add_wfc_Gau-1)
- [`add_wfc_HO`](#add_wfc_HO-1) | [also here](#add_wfc_HO-2)
- [`add_wfc_ARB`](#add_wfc_ARB-1)
- [`norm`](#norm-1) | [also here](#norm-2)
- [`e_kin`](#e_kin-1) | [also here](#e_kin-2)
- [`e_pot`](#e_pot-1) | [also here](#e_pot-2)
- [`e_tot`](#e_tot-1) | [also here](#e_tot-2)
- [`get_pow_q`](#get_pow_q-1)
- [`get_pow_p`](#get_pow_p-1) | [also here](#e_kin-2)
- [`get_den_mat`](#get_den_mat-1)
- [`get_pops`](#get_pops-1) | [also here](#get_pops-2)
- [`update_propagator_H`](#update_propagator_H-1) | [also here](#update_propagator_H-2)
- [`update_propagator_K`](#update_propagator_K-1)
- [`SOFT_propagate`](#SOFT_propagate-1)
- [`update_reciprocal`](#update_reciprocal-1) | [also here](#update_reciprocal-2)
- [`normalize`](#normalize-1) | [also here](#normalize-2)
- [`update_Hamiltonian`](#update_Hamiltonian-1) | [also here](#update_Hamiltonian-2)
- [`update_adiabatic`](#update_adiabatic-1)
## 1. General setups
<a name="setups"></a>[Back to TOC](#TOC)
First, import all the necessary libraries:
* liblibra_core - for general data types from Libra
The output of the cell below will throw a bunch of warnings, but this is not a problem nothing really serios. So just disregard them.
```
import os
import sys
import math
if sys.platform=="cygwin":
from cyglibra_core import *
elif sys.platform=="linux" or sys.platform=="linux2":
from liblibra_core import *
from libra_py import data_outs
```
Also, lets import matplotlib for plotting and define all the plotting parameters: sizes, colors, etc.
```
import matplotlib.pyplot as plt # plots
plt.rc('axes', titlesize=38) # fontsize of the axes title
plt.rc('axes', labelsize=38) # fontsize of the x and y labels
plt.rc('legend', fontsize=38) # legend fontsize
plt.rc('xtick', labelsize=38) # fontsize of the tick labels
plt.rc('ytick', labelsize=38) # fontsize of the tick labels
plt.rc('figure.subplot', left=0.2)
plt.rc('figure.subplot', right=0.95)
plt.rc('figure.subplot', bottom=0.13)
plt.rc('figure.subplot', top=0.88)
colors = {}
colors.update({"11": "#8b1a0e"}) # red
colors.update({"12": "#FF4500"}) # orangered
colors.update({"13": "#B22222"}) # firebrick
colors.update({"14": "#DC143C"}) # crimson
colors.update({"21": "#5e9c36"}) # green
colors.update({"22": "#006400"}) # darkgreen
colors.update({"23": "#228B22"}) # forestgreen
colors.update({"24": "#808000"}) # olive
colors.update({"31": "#8A2BE2"}) # blueviolet
colors.update({"32": "#00008B"}) # darkblue
colors.update({"41": "#2F4F4F"}) # darkslategray
clrs_index = ["11", "21", "31", "41", "12", "22", "32", "13","23", "14", "24"]
```
We'll use these auxiliary functions later:
```
class tmp:
pass
def harmonic1D(q, params):
"""
1D Harmonic potential
"""
x = q.get(0)
k = params["k"]
obj = tmp()
obj.ham_dia = CMATRIX(1,1)
obj.ham_dia.set(0,0, 0.5*k*x**2)
return obj
def harmonic2D(q, params):
"""
2D Harmonic potential
"""
x = q.get(0)
y = q.get(1)
kx = params["kx"]
ky = params["ky"]
obj = tmp()
obj.ham_dia = CMATRIX(1,1)
obj.ham_dia.set(0, 0, (0.5*kx*x**2 + 0.5*ky*y**2)*(1.0+0.0j) )
return obj
```
## 2. Mapping points on multidimensional grids
<a name="mapping"></a>[Back to TOC](#TOC)
Imagine a 3D grid with:
* 3 points in the 1-st dimension
* 2 points in the 2-nd dimension
* 4 points in the 3-rd dimension
So there are 3 x 2 x 4 = 24 points
However, we can still store all of them in 1D array, which is more efficient way. However, to refer to the points, we need a function that does the mapping.
This example demonstrates the functions:
`vector<vector<int> > compute_mapping(vector<vector<int> >& inp, vector<int>& npts)`
`int compute_imapping(vector<int>& inp, vector<int>& npts)`
defined in: dyn/wfcgrid/Grid_functions.h
<a name="compute_mapping-1"></a>
```
inp = intList2()
npts = Py2Cpp_int([3,2,4])
res = compute_mapping(inp, npts);
print("The number of points = ", len(res) )
print("The number of dimensions = ", len(res[0]) )
```
And the inverse of that mapping
<a name="compute_imapping-1"></a>
```
cnt = 0
for i in res:
print("point # ", cnt, Cpp2Py(i) )
print("index of that point in the global array =", compute_imapping(i, Py2Cpp_int([3,2,4])) )
cnt +=1
```
## 3. Functions of the Wfcgrid2 class
<a name="wfcgrid2"></a>[Back to TOC](#TOC)
This example demonstrates the functions of the class `Wfcgrid2`
defined in: `dyn/wfcgrid2/Wfcgrid2.h`
Here, we test simple Harmonic oscillator eigenfunctions and will
compare the energies as computed by Libra to the analytic results
### 3.1. Initialize the grid and do the mappings (internally):
`Wfcgrid2(vector<double>& rmin_, vector<double>& rmax_, vector<double>& dr_, int nstates_)`
<a name="Wfcgrid2-1"></a>
```
num_el_st = 1
wfc = Wfcgrid2(Py2Cpp_double([-15.0]), Py2Cpp_double([15.0]), Py2Cpp_double([0.01]), num_el_st)
```
The key descriptors are stored in the `wfc` object:
<a name="nstates-1"></a> <a name="ndof-1"></a> <a name="Npts-1"></a> <a name="npts-1"></a>
<a name="rmin-1"></a> <a name="rmax-1"></a> <a name="dr-1"></a> <a name="kmin-1"></a> <a name="dk-1"></a>
```
print(F"number of quantum states: {wfc.nstates}")
print(F"number of nuclear degrees of freedom: {wfc.ndof}")
print(F"the total number of grid points: {wfc.Npts}")
print(F"the number of grid points in each dimension: {Cpp2Py(wfc.npts)}")
print(F"the lower boundary of the real-space grid in each dimension: {Cpp2Py(wfc.rmin)}")
print(F"the upper boundary of the real-space grid in each dimension: {Cpp2Py(wfc.rmax)}")
print(F"the real-space grid-step in each dimension: {Cpp2Py(wfc.dr)}")
print(F"the lower boundary of the reciprocal-space grid in each dimension: {Cpp2Py(wfc.kmin)}")
print(F"the reciprocal-space grid-step in each dimension: {Cpp2Py(wfc.dk)}")
```
### Exercise 1:
What is the upper boundary of reciprocal space?
Grid mapping : the wavefunctions are stored in a consecutive order.
To convert the single integer (which is just an order of the point in a real or reciprocal space) from
the indices of the point on the 1D grid in each dimensions, we use the mapping below:
e.g. igmap[1] = [0, 1, 0, 0] means that the second (index 1) entry in the PSI array below corresponds to
a grid point that is first (lower boundary) in dimensions 0, 2, and 3, but is second (index 1) in the
dimension 1. Same for the reciprocal space
<a name="gmap-1"></a>
```
for i in range(10):
print(F"the point {i} corresponds to the grid indices = {Cpp2Py(wfc.gmap[i]) }")
```
Analogously, the inverse mapping of the indices of the point on the axes of all dimensions to the sequentian number:
<a name="imap-1"></a>
```
for i in range(10):
print(F"the point {i} corresponds to the grid indices = { wfc.imap( Py2Cpp_int([i]) ) }")
```
### 3.2. Let's run the above examples for a 2D case:
<a name="Wfcgrid2-2"></a> <a name="gmap-2"></a> <a name="imap-2"></a>
```
wfc2 = Wfcgrid2(Py2Cpp_double([-15.0, -15.0]), Py2Cpp_double([15.0, 15.0]), Py2Cpp_double([1, 1]), num_el_st)
print(F"number of quantum states: {wfc2.nstates}")
print(F"number of nuclear degrees of freedom: {wfc2.ndof}")
print(F"the total number of grid points: {wfc2.Npts}")
print(F"the number of grid points in each dimension: {Cpp2Py(wfc2.npts)}")
print(F"the lower boundary of the real-space grid in each dimension: {Cpp2Py(wfc2.rmin)}")
print(F"the upper boundary of the real-space grid in each dimension: {Cpp2Py(wfc2.rmax)}")
print(F"the real-space grid-step in each dimension: {Cpp2Py(wfc2.dr)}")
print(F"the lower boundary of the reciprocal-space grid in each dimension: {Cpp2Py(wfc2.kmin)}")
print(F"the reciprocal-space grid-step in each dimension: {Cpp2Py(wfc2.dk)}")
for i in range(10):
print(F"the point {i} corresponds to the grid indices = {Cpp2Py(wfc2.gmap[i]) }")
for i in range(10):
print(F"the point {i} corresponds to the grid indices = { wfc2.imap( Py2Cpp_int([i, i]) ) }")
```
### 3.3. Add a wavefunction to the grid
This can be done by sequentially adding either Gaussian wavepackets or the Harmonic osccillator eigenfunctions to the grid with the corresponding weights.
Adding of such functions is done with for instance:
`void add_wfc_HO(vector<double>& x0, vector<double>& px0, vector<double>& alpha, int init_state, vector<int>& nu, complex<double> weight, int rep)`
Here,
* `x0` - is the center of the added function
* `p0` - it's initial momentum (if any)
* `alpha` - the exponent parameters
* `init_state` - specialization of the initial electronic state
* `nu` - the selector of the HO eigenstate to be added
* `weight` - the amplitude with which the added function enters the superpositions, doesn't have to lead to a normalized function, the norm is included when computing the properties
* `rep` - representation
The variables x0, p0, etc. should have the dimensionality comparable to that of the grid.
For instance, in the example below we add the wavefunction (single HO eigenstate) to the 1D grid
<a name="add_wfc_HO-1"></a> <a name="norm-1"></a>
```
x0 = Py2Cpp_double([0.0])
p0 = Py2Cpp_double([0.0])
alphas = Py2Cpp_double([1.0])
nu = Py2Cpp_int([0])
el_st = 0
rep = 0
weight = 1.0+0.0j
wfc.add_wfc_HO(x0, p0, alphas, el_st, nu, weight, rep)
print(F" norm of the diabatic wfc = {wfc.norm(0)} and norm of the adiabatic wfc = {wfc.norm(1)}")
```
We can see that the wavefunction is pretty much normalized - this is becasue we have only added a single wavefunction which is already normalized.
Also, note how the norm of the diabatic wavefunction is 1.0, but that of the adiabatic is zero - this is because we have added the wavefunction only in the diabatic representation (`rep = 0`) and haven't yet run any calculations to do any updates of the other (adiabatic) representation
### Exercise 2
<a name="add_wfc_Gau-1"></a>
Use the `add_wfc_Gau` function to add several Gaussians to the grid.
### Exercise 3
Initialize the wavefunction as the superposition: $|0> - 0.5 |1> + 0.25i |2 >$
Is the resulting wavefunction normalized?
Use the `normalize()` method of the `Wfcgrid2` class to normalize it
<a name="normalize-1"></a>
### 3.4. A more advanced example: adding an arbitrary wavefunctions
using the `add_wfc_ARB` method
All we need to do is to set up a Python function that would take `vector<double>` as the input for coordinates, a Python dictionary for parameters, and it would return a `CMATRIX(nstates, 1)` object containing energies of all states as the function of the multidimensional coordinate.
Let's define the one:
```
def my_2D_sin(q, params):
"""
2D sine potential
"""
x = q.get(0,0)
y = q.get(1,0)
A = params["A"]
alpha = params["alpha"]
omega = params["omega"]
res = CMATRIX(1,1)
res.set(0,0, 0.5* A * math.sin(omega*(x**2 + y**2)) * math.exp(-alpha*(x**2 + y**2)) )
return res
```
Now, we can add the wavefunction to that grid using:
`void add_wfc_ARB(bp::object py_funct, bp::object params, int rep)`
<a name="add_wfc_ARB-1"></a>
```
rep = 0
wfc2.add_wfc_ARB(my_2D_sin, {"A":1, "alpha":1.0, "omega":1.0}, rep)
print(F" norm of the diabatic wfc = {wfc2.norm(0)} and norm of the adiabatic wfc = {wfc2.norm(1)}")
```
As we can see, this wavefunction is not normalized.
We can normalize it using `normalize(int rep)` method with `rep = 0` since we are working with the diabatic representation
<a name="normalize-2"></a>
```
wfc2.normalize(0)
print(F" norm of the diabatic wfc = {wfc2.norm(0)} and norm of the adiabatic wfc = {wfc2.norm(1)}")
```
### 3.5. Accessing wavefunction and the internal data
Now that we have initialized the wavefunction, we can access the wavefunction
<a name="PSI_dia-1"></a> <a name="PSI_adi-1"></a>
```
for i in range(10):
print(F"diabatic wfc = {wfc.PSI_dia[500+i].get(0,0) } adiabatic wfc = {wfc.PSI_adi[500+i].get(0,0) }")
```
We can also see what the reciprocal of the wavefunctions are.
<a name="reciPSI_dia-1"></a> <a name="reciPSI_adi-1"></a>
```
for i in range(10):
print(F"diabatic wfc = {wfc.reciPSI_dia[500+i].get(0,0) } adiabatic wfc = {wfc.reciPSI_adi[500+i].get(0,0) }")
```
### 3.6. Update the reciprocal of the initial wavefunction
This is needed for computing some properties, and also as the initialization of the dynamics
<a name="update_reciprocal-1"></a>
```
wfc.update_reciprocal(rep)
```
Now, since we have computed the reciprocal of the wavefunction (by doing an FFT of the real-space wfc), we can access those numbers (still in the diabatic representation only)
```
for i in range(10):
print(F"diabatic wfc = {wfc.reciPSI_dia[500+i].get(0,0) } adiabatic wfc = {wfc.reciPSI_adi[500+i].get(0,0) }")
```
### 3.4. Compute the Hamiltonian on the grid
The nice thing is - we can define any Hamiltonian function right in Python (this is done in [section 1]() ) and pass that function, together with the dictionary of the corresponding parameters to the `update_Hamiltonian` method.
Here, we define the force constant of the potential to be consistent with the alpha of the initial Gaussian wavepacket and the mass of the particle, as is done in any Quantum chemistry textbooks.
<a name="update_Hamiltonian-1"></a>
```
masses = Py2Cpp_double([2000.0])
omega = alphas[0]/masses[0]
k = masses[0] * omega**2
wfc.update_Hamiltonian(harmonic1D, {"k": k}, rep)
```
After this step, the internal storage will also contain the Hamitonians computed at the grid points:
<a name="Hdia-1"></a>
```
for i in range(10):
print(F"diabatic Hamiltonian (potential only) = {wfc.Hdia[500+i].get(0,0) } ")
```
### 3.5. Computing properties
Now, when the Hamiltonian is evaluated on the grid, we can compute various properties.
In this example, we use the wavefunction represented in the diabatic basis
<a name="norm-2"></a> <a name="e_kin-1"></a> <a name="e_pot-1"></a> <a name="e_tot-1"></a> <a name="get_pow_p-1"></a>
```
rep = 0
print( "Norm = ", wfc.norm(rep) )
print( "Ekin = ", wfc.e_kin(masses, rep) )
print( "Expected kinetic energy = ", 0.5*alphas[0]/(2.0*masses[0]) )
print( "Epot = ", wfc.e_pot(rep) )
print( "Expected potential energy = ", (0.5*k/alphas[0])*(0.5 + nu[0]) )
print( "Etot = ", wfc.e_tot(masses, rep) )
print( "Expected total energy = ", omega*(0.5 + nu[0]) )
p2 = wfc.get_pow_p(0, 2);
print( "p2 = ", p2.get(0).real )
print( "p2/2*m = ", p2.get(0).real/(2.0 * masses[0]) )
```
We can also compute the populations of all states and resolve it by the spatial region too:
<a name="get_pops-1"></a> <a name="get_pops-2"></a>
```
p = wfc.get_pops(0).get(0,0)
print(F" population of diabatic state 0 of wfc in the whole region {p}")
left, right = Py2Cpp_double([-15.0]), Py2Cpp_double([0.0])
p = wfc.get_pops(0, left, right).get(0,0)
print(F" population of diabatic state 0 of wfc in the half of the original region {p}")
```
### 3.6. Converting between diabatic and adiabatic representations
The transformation matrix `wfc.U` is computed when we compute the real-space propagator `wfc.update_propagator_H`
For the purposes of the adi-to-dia transformation, it doesn't matter what value for dt is used in that function.
<a name="update_propagator_H-1"></a>
```
wfc.update_propagator_H(0.0)
```
Now, we can access the transformation matrix - one for each grid point.
Note, in this tutorial we deal with the 1 electronic state, so all the transformation matrices are just the identity ones
<a name="U-1"></a>
```
for i in range(10):
print(F"dia-to-adi transformation matrix at point {500+i}\n")
data_outs.print_matrix(wfc.U[500+i])
```
Now, we can update the real-space adiabatic wavefunction and then its reciprocal for the adiabatic representation (`rep = 1`):
<a name="update_adiabatic-1"></a> <a name="update_reciprocal-1"></a>
```
wfc.update_adiabatic()
wfc.update_reciprocal(1)
```
And compute the properties but now in the adiabatic basis
```
for i in range(10):
print(F"diabatic wfc = {wfc.PSI_dia[500+i].get(0,0) } adiabatic wfc = {wfc.PSI_adi[500+i].get(0,0) }")
for i in range(10):
print(F"diabatic wfc = {wfc.reciPSI_dia[500+i].get(0,0) } adiabatic wfc = {wfc.reciPSI_adi[500+i].get(0,0) }")
print( "Norm = ", wfc.norm(1) )
print( "Ekin = ", wfc.e_kin(masses, 1) )
print( "Expected kinetic energy = ", 0.5*alphas[0]/(2.0*masses[0]) )
print( "Epot = ", wfc.e_pot(1) )
print( "Expected potential energy = ", (0.5*k/alphas[0])*(0.5 + nu[0]) )
print( "Etot = ", wfc.e_tot(masses, 1) )
print( "Expected total energy = ", omega*(0.5 + nu[0]) )
p2 = wfc.get_pow_p(1, 2);
print( "p2 = ", p2.get(0).real )
print( "p2/2*m = ", p2.get(0).real/(2.0 * masses[0]) )
p = wfc.get_pops(1).get(0,0)
print(F" population of adiabatic state 0 of wfc in the whole region {p}")
left, right = Py2Cpp_double([-15.0]), Py2Cpp_double([0.0])
p = wfc.get_pops(1, left, right).get(0,0)
print(F" population of adiabatic state 0 of wfc in the half of the original region {p}")
```
## 4. Showcase: computing energies of the HO eigenstates
<a name="ho_showcase"></a>[Back to TOC](#TOC)
<a name="energy-use-case"></a>
We, of course, know all the properties of the HO eigenstates analytically. Namely, the energies should be:
\\[ E_n = \hbar \omega (n + \frac{1}{2}) \\]
Let's see if we can also get them numerically
```
for n in [0, 1, 2, 3, 10, 20]:
wfc = Wfcgrid2(Py2Cpp_double([-15.0]), Py2Cpp_double([15.0]), Py2Cpp_double([0.01]), num_el_st)
nu = Py2Cpp_int([n])
wfc.add_wfc_HO(x0, p0, alphas, el_st, nu, 1.0+0.0j, rep)
wfc.update_reciprocal(rep)
wfc.update_Hamiltonian(harmonic1D, {"k": k}, rep)
print( "========== State %i ==============" % (n) )
print( "Etot = ", wfc.e_tot(masses, rep) )
print( "Expected total energy = ", omega*(0.5 + nu[0]) )
```
## 5. Dynamics: computed with SOFT method
<a name="soft_dynamics"></a>[Back to TOC](#TOC)
<a name="tdse-solution"></a>
### 5.1. Initialization
As usual, let's initialize the grid and populate it with some wavefunction
In this case, we start with a superposition of 2 HO eigenstates, so the initial wavefunction is not stationary with respect ot the chosen potential (or we won't be able to see any dynamics)
As in the axamples above, we update the reciprocal wavefunction and then the Hamiltonian
<a name="add_wfc_HO-2"></a> <a name="update_reciprocal-2"> <a name="update_Hamiltonian-2"></a>
```
wfc = Wfcgrid2(Py2Cpp_double([-15.0]), Py2Cpp_double([15.0]), Py2Cpp_double([0.01]), num_el_st)
wfc.add_wfc_HO(x0, p0, alphas, el_st, Py2Cpp_int([0]) , 1.0+0.0j, rep)
wfc.add_wfc_HO(x0, p0, alphas, el_st, Py2Cpp_int([1]) , 1.0+0.0j, rep)
wfc.update_reciprocal(rep)
wfc.update_Hamiltonian(harmonic1D, {"k": k}, rep)
```
### 5.2. Update the propagators
To compute the quantum dynamics on the grid, all we need to do is first to compute the propagators - the matrices that advances the wavefunction in real and reciprocal spaces.
The split-operator Fourier-transform (SOFT) method dates back to Kosloff & Kosloff and is basically the following:
If the Hamiltonian is given by:
\\[ H = K + V \\]
Then, the solution of the TD-SE:
\\[ i \hbar \frac{\partial \psi}{\partial t} = H \psi \\]
is given by:
\\[ \psi(t) = exp(-i \frac{H t}{\hbar} ) \psi(0) \\]
Of course, in practice we compute the state advancement by only small time increment \\[ \Delta t \\] as:
\\[ \psi(t + \Delta t) = exp(-i \frac{H \Delta t}{\hbar} ) \psi(t) \\]
So it all boils down to the computing the propagator
\\[ exp(-i \frac{H \Delta t}{\hbar} ) \\]
This is then done by the Trotter splitting technique:
\\[ exp(-i \frac{H \Delta t}{\hbar} ) \approx exp(-i \frac{V \Delta t}{2 \hbar} ) exp(-i \frac{K \Delta t}{\hbar} ) exp(-i \frac{V \Delta t}{2 \hbar} ) \\]
In the end, we need to compute the operators $ exp(-i \frac{V \Delta t}{2 \hbar} ) $ and
$exp(-i \frac{K \Delta t}{\hbar} )$
This is done by:
<a name="update_propagator_H-2"></a> <a name="update_propagator_K-1"></a>
```
dt = 10.0
wfc.update_propagator_H(0.5*dt)
wfc.update_propagator_K(dt, masses)
```
### 5.3. Compute the dynamics
The propagators in real and reciprocal spaces are stored in the class object, so we can now simply apply them many times to our starting wavefunction:
This is done with the `SOFT_propagate()` function.
Note how we use the following functions to compute the corresponding properties:
* `get_pow_q` - for \<q\>
* `get_pow_p` - for \<p\>
* `get_den_mat` - for $\rho_{ij} = |i><j|$
and so on
By default, the dynamics is executed in the diabatic representation, so for us to access the adiabatic properties (e.g. populations of the adiabatic states), we convert the propagated wavefunctions to the adiabatic representation with
* `update_adiabatic`
<a name="get_pow_q-1"></a> <a name="get_pow_p-2"></a> <a name="get_den_mat-1"></a>
<a name="e_kin-2"></a> <a name="e_pot-2"></a> <a name="e_tot-2"></a> <a name="SOFT_propagate-1"> </a>
```
nsteps = 100
for step in range(nsteps):
wfc.SOFT_propagate()
q = wfc.get_pow_q(0, 1).get(0).real
p = wfc.get_pow_p(0, 1).get(0).real
# Diabatic is the rep used for propagation, so we need to
# convert wfcs into adiabatic one
wfc.update_adiabatic()
Ddia = wfc.get_den_mat(0) # diabatic density matrix
Dadi = wfc.get_den_mat(1) # adiabatic density matrix
p0_dia = Ddia.get(0,0).real
p0_adi = Dadi.get(0,0).real
print("step= ", step, " Ekin= ", wfc.e_kin(masses, rep),
" Epot= ", wfc.e_pot(rep), " Etot= ", wfc.e_tot(masses, rep),
" q= ", q, " p= ", p, " p0_dia= ", p0_dia, " p0_adi= ", p0_adi )
```
### Exercise 4
Write the scripts to visualize various quantities computed by the dynamics
### Exercise 5
Compute the population in a certain region of space and observe how it evolves during the dynamics
### Exercise 6
Compute the dynamics of the 2D wavepacked we set up in the above examples.
### Exercise 7
Explore the behavior of the dynamics (e.g. conservation of energy, etc.) as you vary the initial conditions (e.g. the parameters of the initial wavefunction), the integration parameters (e.g. dt), and the grid properties (grid spacing and the boundaries)
| github_jupyter |
> **提示**:欢迎参加“调查数据集”项目!引用段会添加这种提示,帮助你制定调查方法。提交项目之前,最后浏览一下报告,将这一段删除,以保持报告简洁。首先,需要双击这个 Markdown 框,将标题更改为与数据集和调查相关的标题。
# 项目:TMDB电影集调查
## 目录
<ul>
<li><a href="#intro">简介</a></li>
<li><a href="#wrangling">数据整理</a></li>
<li><a href="#eda">探索性数据分析</a></li>
<li><a href="#conclusions">结论</a></li>
</ul>
<a id='intro'></a>
## 简介
> 本数据集中包含 1 万条电影信息,信息来源为“电影数据库”(TMDb,The Movie Database),包括用户评分和票房。“演职人员 (cast)”、“电影类别 (genres)”等数据列包含由竖线字符(|)分隔的多个数值。“演职人员 (cast) ”列中有一些奇怪的字符。先不要清洁它们,你可以保持原样,不去管它们。
> **提示**:在这一段报告中对你选择进行分析的数据集进行简要介绍。在本段末尾,对你计划在报告过程中探索的问题进行描述。自己尝试建立至少一个因变量和三个自变量的分析报告。如果你不确定要问什么问题,务必熟悉数据集、数据集变量以及数据集上下文,以便确定要探索的问题。
> 如果尚未选择和下载数据,务必先进行这一步,再回到这里。如需在这个工作区中处理数据,还需要将其上传到工作区。因此,请单击左上角的 jupyter 图标,回到工作区目录。右上角有一个‘上传’按钮,可以将你的数据文件添加到工作区。然后单击 .ipynb 文件名,回到这里。
```
# import packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
%matplotlib inline
%config InlineBackend.fig_format = 'retina'
```
<a id='wrangling'></a>
## 数据整理
> **提示**:在这一段报告中载入数据,检查简洁度,然后整理和清理数据集,以进行分析。务必将步骤仔细归档,并确定清理决策是否正确。
### 常规属性
```
# 导入数据,预览数据
df = pd.read_csv('tmdb-movies.csv')
df.head(2)
# 数据类型
df.info()
df.describe()
```
> **提示**:_不应_在每个框中进行太多操作。可以自由创建框,进行数据探索。在这个项目中,可以在初始 notebook 中进行大量探索操作。不要求对其进行组织,但务必仔细阅读备注,理解每个代码框的用途。完成分析之后,可以创建 notebook 副本,在其中去除多余数据,组织步骤,从而形成结构连贯、紧密的报告。
> **提示**:务必向你的读者告知你在调查中采取的步骤。在每个代码框或每组相关代码框后面,用 markdown 框对前面的框中的调查结果向读者进行说明。尽量做到这一点,以便读者理解后续框中的内容。
### 数据整理
观察数据,可以看出大多数电影没有 `homepage`,而且具体数据,我不关心,我只关注有和没有
```
# 添加一栏,有无homepage
df['has_homepage'] = df.homepage.notnull()
df['has_homepage'].sum()
```
删除对分析问题没帮助的描述性栏目
```
# 删除 id, imdb_id, homepage, tagline, overview
df.drop(['id', 'imdb_id', 'homepage', 'tagline', 'overview'], axis=1,inplace=True)
df.head()
df.info()
```
把发布时间转化成pandas的时间格式
```
def trans_to_datetime(row):
'''把数据集中的release_date转化成时间戳格式
'''
(month, day, _) = row['release_date'].split('/')
year = row['release_year']
return pd.datetime(int(year), int(month), int(day))
df.release_date = df.apply(trans_to_datetime, axis=1)
df.release_date.min(), df.release_date.max()
```
转换成功,都在合理的范围内
统一 budget 和 revenue 单位为 `int`
```
df.budget_adj = df.budget_adj.astype(int)
df.revenue_adj = df.revenue_adj.astype(int)
type(df.budget_adj[1]), type(df.revenue_adj[0])
df.describe()
df.head(3)
```
比较一下调整后的实际预算 和 初期的预算的比例 (用百分比显示x坐标)
```
((df.budget_adj - df.budget)/df.budget).hist(bins = 100);
```
比较一下调整后的实际收入 和 初期的预估收入的比例 (用百分比显示x坐标)
```
((df.revenue_adj - df.revenue)/df.revenue).hist(bins = 100);
```
发现有大量预算为 0 的电影
```
df.query('budget == 0')
```
# 删除 budget == 0 的数据
调整数据排序,按发布时间排序
```
df.sort_values('release_date',ascending=False,inplace=True)
df
```
调整 列标签的顺序
```
df.columns
df.set_index(['original_title', 'director', 'cast', 'production_companies', 'genres', 'keywords', 'runtime',
'release_date', 'release_year', 'budget', 'budget_adj', 'revenue', 'revenue_adj',
'vote_count', 'vote_average', 'popularity', 'has_homepage'], inplace=True)
df.head()
```
保存整理后的数据
```
df.to_csv('clean_movies.csv')
df_clean = pd.read_csv('clean_movies.csv')
df_clean.head()
df_clean.dtypes
df_clean.release_date = pd.to_datetime(df_clean.release_date, infer_datetime_format=True)
df_clean.info()
```
<a id='eda'></a>
## 探索性数据分析
> **提示**:整理和清理数据之后,现在可以进行探索。计算统计值,创建视图,解决你在简介段提出的研究问题。建议采用系统化方法。一次探索一个变量,然后探索变量之间的关系。
数据探索
每个栏目的唯一值
```
for c in df_clean.columns:
print('{} has {} different value'.format(c,len(df_clean[c].unique())))
# 有多少年份的数据
years = df_clean.release_year.unique()
print('从{}年到{}年,总共有{}个年份的电影'.format(min(years), max(years), len(years)))
```
投票得分 和 受欢迎程度 的关系?
```
# 绘制评分和受欢迎程度的散点图
plt.scatter(df_clean.vote_average,df_clean.popularity)
plt.xlabel('vote_average')
plt.ylabel('popularity')
plt.title('scatter of vote_average and popularity');
```
可以看出 评分 和 受欢迎 程正相关,但有一些比较奇怪的数据,
- 右上角有三个异常受欢迎的电影
- 还有评分高于8.5的电影好像都不怎么受欢迎
```
# 绘制投票数 和 受欢迎程度的散点图
plt.scatter(df_clean.vote_count, df_clean.popularity)
plt.xlabel('vote_count')
plt.ylabel('popularity')
plt.title('scatter of vote_count and popularity');
```
投票数 和 受欢迎程度 也是正相关,不过仍然有三个很显眼的数据
查找 受欢迎程度 大于20的电影
```
df_clean.query('popularity > 20')
```
三部电影都是 科幻 和 冒险 类电影
### 每年最受欢迎的电影类别是哪些?
求出每年最受欢迎的值
```
max_popular_of_the_year = df_clean.groupby('release_year', as_index=False)['popularity'].max()
max_popular_of_the_year
```
重命名,并把数集进行合并
```
max_popular_of_the_year=max_popular_of_the_year.rename(index=str, columns={'popularity':'popularity_max'})
max_popular_of_the_year.head(2)
df_clean = df_clean.merge(max_popular_of_the_year,on='release_year')
df_clean.head(2)
```
把 popularity_max 转化成bool值方便查询
```
df_clean.popularity_max = df_clean.popularity == df_clean.popularity_max
df_clean.popularity_max.dtype
```
### 查询到每一年最受欢迎的电影
```
df_clean.query('popularity_max == True')
```
### 票房高的电影有哪些特点?
```
# 继续探索数据,解决你的附加研究问题。
# 如果有其它问题要调查,
# 请根据需要添加更多标题。
```
<a id='conclusions'></a>
## 结论
> **提示**:最后,总结你的调查结果。确保了解探索结果的限制。如果尚未进行任何统计检验,不要做出任何统计结论。切记不要根据相互关系推导出因果关系!
> **提示**:如果对报告满意,应将其副本保存为 HTML 或 PDF 形式。导出报告之前请检查一遍,确保报告流程完整。应删除所有类似的“提示”引用段,以保持报告简洁。还需要查看课程结尾的项目提交页的项目审阅规范。
> 如需将报告导出到工作区,应运行下面的代码框。如果正确,会返回代码 0,工作区目录下会生成 .html 文件(单击左上角的 jupyter 图标)。也可以通过 **文件** > **下载为** 子菜单下载 html 报告,然后手动上传到工作区目录。完成之后,可以单击右下角的“提交项目”,提交你的项目。恭喜!
```
from subprocess import call
call(['python', '-m', 'nbconvert', 'Investigate_a_Dataset.ipynb'])
```
| github_jupyter |
## Allele filp QC YML generator
This module takes in a table of sumstat, with the columns: #chr, theme1, theme2, theme3 and each rows as 1 chr and the sumstat of corresponding chr and generate a list of yml to be used
```
[global]
# List of path to the index of sumstat, each correspond to 1 recipe file documenting the path to the sumstat of each chromosome.
parameter: sumstat_list = paths
# List of names that corresponding to each of the studies
parameter: name = list
parameter: cwd = path
import pandas as pd
input_list = sumstat_list
sumstat_list = pd.read_csv(input_list[0],sep = "\t")
sumstat_list = sumstat_list.sort_values('#chr')
for x in range(1,len(input_list)):
sumstat_list = sumstat_list.merge(pd.read_csv(input_list[x],sep = "\t"), on = "#chr")
sumstat_meta = sumstat_list.filter(regex='column').iloc[0].values.tolist()
sumstat_list = sumstat_list.drop(sumstat_list.filter(regex='column').columns,axis = 1)
sumstat_list.columns = ["#chr"] + name
sumstat_inv = sumstat_list.values.tolist()
names = "_".join(name)
## The target vcf file, with GENE,CHR,POS,A0,A1 as columns, should contains all snps
parameter: TARGET_list = path("./")
if TARGET_list.is_file():
TARGET_list = sumstat_list.merge(pd.read_csv(TARGET_list,sep = "\t"), on = "#chr" )[["TARGET"]].values.tolist()
## Assuming all the input sumstat are using the same header
parameter: CHR = "chrom"
parameter: POS = "pos"
parameter: A0 = "ref"
parameter: A1 = "alt"
parameter: SNP = "variant_id"
parameter: STAT = "beta"
parameter: SE = "se"
parameter: P = "pval"
parameter: KEEP_AMBIGUOUS = "True"
parameter: GENE = "phenotype_id"
parameter: container = ""
[yml_generator]
input: for_each = "sumstat_inv",group_with = "TARGET_list"
output: f'{cwd:a}/{names}.{_sumstat_inv[0]}/{names}.{_sumstat_inv[0]}.yml'
python: expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout' , container = container
import os
import yaml
import pandas as pd
output = dict()
## Input dict
output["INPUT"] = [pd.read_csv(y,"\t",index_col = 0,names = [x],header = 0 ).to_dict() for x,y in zip($[_sumstat_inv[1:len(_sumstat_inv)]],$[sumstat_meta])]
## Target dict
output["TARGET"] = ["$[_TARGET_list if _TARGET_list.is_file() else _sumstat_inv[1]]"] ## Allow for both external TARGET file or using one of the sumstat as TARGET
## Output dict
output["OUTPUT"] = [$[_output:dr]]
with open($[_output:ar], 'w') as f:
yaml.dump(output,f,sort_keys = False)
[yml_list]
input: output_from("yml_generator"),group_by = "all"
output: f'{cwd:a}/yml_list.txt', f'{cwd:a}/qced_sumstat_list.txt'
import pandas as pd
yml_df = pd.DataFrame({"#chr" : sumstat_list["#chr"].values.tolist() , "dir" : _input})
yml_df.to_csv(_output[0],sep = "\t",index = 0)
data_dir_tmp = pd.Series(_input)
data_dir = [f'{x:d}' for x in data_dir_tmp ]
theme = sumstat_list.columns.values.tolist()[1:]
for i in theme:
sumstat_list = sumstat_list.assign(**{i : data_dir+ pd.Series([f'/{path(x):b}' for x in sumstat_list[i].values.tolist()]) } )
sumstat_list.to_csv(_output[1],sep = "\t", index = 0)
#chr TARGET
1 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr1.txt
10 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr10.txt
11 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr11.txt
12 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr12.txt
13 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr13.txt
14 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr14.txt
15 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr15.txt
16 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr16.txt
17 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr17.txt
18 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr18.txt
19 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr19.txt
2 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr2.txt
20 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr20.txt
21 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr21.txt
22 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr22.txt
3 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr3.txt
4 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr4.txt
5 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr5.txt
6 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr6.txt
7 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr7.txt
8 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr8.txt
9 /mnt/mfs/statgen/snuc_pseudo_bulk/eight_tissue_analysis/8test/TARGET_ref_chr9.txt
```
| github_jupyter |
# Predicting the Outcome of Cricket Matches
## Introduction
In this project, we shall build a model which predicts the outcome of cricket matches in the Indian Premier League using data about matches and deliveries.
### Data Mining:
* Season : 2008 - 2015 (8 Seasons)
* Teams : DD, KKR, MI, RCB, KXIP, RR, CSK (7 Teams)
* Neglect matches that have inconsistencies such as No Result, Tie, D/L Method, etc.
### Features:
* Average Batsman Rating (Strike Rate)
* Average Bowler Rating (Wickets per Run)
* Player of the Match Awards
* Previous Encounters - Win by runs, Win by Wickets
* Recent form
### Prediction Model
* Logistic Regression using sklearn
* K-Nearest Neighbors using sklearn
```
%matplotlib inline
import numpy as np # imports a fast numerical programming library
import matplotlib.pyplot as plt #sets up plotting under plt
import pandas as pd #lets us handle data as dataframes
#sets up pandas table display
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
import seaborn as sns
sns.set(style="whitegrid", color_codes=True)
from __future__ import division
```
## Data Mining
```
# Reading in the data
allmatches = pd.read_csv("../data/matches.csv")
alldeliveries = pd.read_csv("../data/deliveries.csv")
allmatches.head(10)
# Selecting Seasons 2008 - 2015
matches_seasons = allmatches.loc[allmatches['season'] != 2016]
deliveries_seasons = alldeliveries.loc[alldeliveries['match_id'] < 518]
# Selecting teams DD, KKR, MI, RCB, KXIP, RR, CSK
matches_teams = matches_seasons.loc[(matches_seasons['team1'].isin(['Kolkata Knight Riders', \
'Royal Challengers Bangalore', 'Delhi Daredevils', 'Chennai Super Kings', 'Rajasthan Royals', \
'Mumbai Indians', 'Kings XI Punjab'])) & (matches_seasons['team2'].isin(['Kolkata Knight Riders', \
'Royal Challengers Bangalore', 'Delhi Daredevils', 'Chennai Super Kings', 'Rajasthan Royals', \
'Mumbai Indians', 'Kings XI Punjab']))]
matches_team_matchids = matches_teams.id.unique()
deliveries_teams = deliveries_seasons.loc[deliveries_seasons['match_id'].isin(matches_team_matchids)]
print "Teams selected:\n"
for team in matches_teams.team1.unique():
print team
# Neglect matches with inconsistencies like 'No Result' or 'D/L Applied'
matches = matches_teams.loc[(matches_teams['result'] == 'normal') & (matches_teams['dl_applied'] == 0)]
matches_matchids = matches.id.unique()
deliveries = deliveries_teams.loc[deliveries_teams['match_id'].isin(matches_matchids)]
# Verifying consistency between datasets
(matches.id.unique() == deliveries.match_id.unique()).all()
```
## Building Features
```
# Batsman Strike Rate Calculation
# Team 1: Batting First; Team 2: Fielding First
def getMatchDeliveriesDF(match_id):
return deliveries.loc[deliveries['match_id'] == match_id]
def getInningsOneBatsmen(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 1].batsman.unique()[0:5]
def getInningsTwoBatsmen(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 2].batsman.unique()[0:5]
def getBatsmanStrikeRate(batsman, match_id):
onstrikedeliveries = deliveries.loc[(deliveries['match_id'] < match_id) & (deliveries['batsman'] == batsman)]
total_runs = onstrikedeliveries['batsman_runs'].sum()
total_balls = onstrikedeliveries.shape[0]
if total_balls != 0:
return (total_runs/total_balls) * 100
else:
return None
def getTeamStrikeRate(batsmen, match_id):
strike_rates = []
for batsman in batsmen:
bsr = getBatsmanStrikeRate(batsman, match_id)
if bsr != None:
strike_rates.append(bsr)
return np.mean(strike_rates)
def getAverageStrikeRates(match_id):
match_deliveries = getMatchDeliveriesDF(match_id)
innOneBatsmen = getInningsOneBatsmen(match_deliveries)
innTwoBatsmen = getInningsTwoBatsmen(match_deliveries)
teamOneSR = getTeamStrikeRate(innOneBatsmen, match_id)
teamTwoSR = getTeamStrikeRate(innTwoBatsmen, match_id)
return teamOneSR, teamTwoSR
# Testing Functionality
getAverageStrikeRates(517)
# Bowler Rating : Wickets/Run (Higher the Better)
# Team 1: Batting First; Team 2: Fielding First
def getInningsOneBowlers(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 1].bowler.unique()[0:4]
def getInningsTwoBowlers(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 2].bowler.unique()[0:4]
def getBowlerWPR(bowler, match_id):
balls = deliveries.loc[(deliveries['match_id'] < match_id) & (deliveries['bowler'] == bowler)]
total_runs = balls['total_runs'].sum()
total_wickets = balls.loc[balls['dismissal_kind'].isin(['caught', 'bowled', 'lbw', \
'caught and bowled', 'stumped'])].shape[0]
if balls.shape[0] > 0:
return (total_wickets/total_runs) * 100
else:
return None
def getTeamWPR(bowlers, match_id):
WPRs = []
for bowler in bowlers:
bwpr = getBowlerWPR(bowler, match_id)
if bwpr != None:
WPRs.append(bwpr)
return np.mean(WPRs)
def getAverageWPR(match_id):
match_deliveries = getMatchDeliveriesDF(match_id)
innOneBowlers = getInningsOneBowlers(match_deliveries)
innTwoBowlers = getInningsTwoBowlers(match_deliveries)
teamOneWPR = getTeamWPR(innTwoBowlers, match_id)
teamTwoWPR = getTeamWPR(innOneBowlers, match_id)
return teamOneWPR, teamTwoWPR
# testing functionality
getAverageWPR(517)
# MVP Score (Total number of Player of the Match awards in a squad)
def getAllInningsOneBatsmen(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 1].batsman.unique()
def getAllInningsTwoBatsmen(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 2].batsman.unique()
def getAllInningsOneBowlers(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 1].bowler.unique()
def getAllInningsTwoBowlers(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 2].bowler.unique()
def makeSquad(batsmen, bowlers):
p = []
p = np.append(p, batsmen)
for i in bowlers:
if i not in batsmen:
p = np.append(p, i)
return p
def getPlayerMVPAwards(player, match_id):
return matches.loc[(matches['player_of_match'] == player) & (matches['id'] < match_id)].shape[0]
def getTeamMVPAwards(squad, match_id):
num_awards = 0
for player in squad:
num_awards += getPlayerMVPAwards(player, match_id)
return num_awards
def compareMVPAwards(match_id):
match_deliveries = getMatchDeliveriesDF(match_id)
innOneBatsmen = getAllInningsOneBatsmen(match_deliveries)
innTwoBatsmen = getAllInningsTwoBatsmen(match_deliveries)
innOneBowlers = getAllInningsOneBowlers(match_deliveries)
innTwoBowlers = getAllInningsTwoBowlers(match_deliveries)
teamOneSquad = makeSquad(innOneBatsmen, innTwoBowlers)
teamTwoSquad = makeSquad(innTwoBatsmen, innOneBowlers)
teamOneAwards = getTeamMVPAwards(teamOneSquad, match_id)
teamTwoAwards = getTeamMVPAwards(teamTwoSquad, match_id)
return teamOneAwards, teamTwoAwards
compareMVPAwards(517)
# Prints a comparison between two teams based on squad attributes
def generateSquadRating(match_id):
gameday_teams = deliveries.loc[(deliveries['match_id'] == match_id)].batting_team.unique()
teamOne = gameday_teams[0]
teamTwo = gameday_teams[1]
teamOneSR, teamTwoSR = getAverageStrikeRates(match_id)
teamOneWPR, teamTwoWPR = getAverageWPR(match_id)
teamOneMVPs, teamTwoMVPs = compareMVPAwards(match_id)
print "Comparing squads for {} vs {}".format(teamOne,teamTwo)
print "\nAverage Strike Rate for Batsmen in {} : {}".format(teamOne,teamOneSR)
print "\nAverage Strike Rate for Batsmen in {} : {}".format(teamTwo,teamTwoSR)
print "\nBowler Rating (W/R) for {} : {}".format(teamOne,teamOneWPR)
print "\nBowler Rating (W/R) for {} : {}".format(teamTwo,teamTwoWPR)
print "\nNumber of MVP Awards in {} : {}".format(teamOne,teamOneMVPs)
print "\nNumber of MVP Awards in {} : {}".format(teamTwo,teamTwoMVPs)
#Testing Functionality
generateSquadRating(517)
## 2nd Feature : Previous Encounter
# Won by runs and won by wickets (Higher the better)
def getTeam1(match_id):
return matches.loc[matches["id"] == match_id].team1.unique()
def getTeam2(match_id):
return matches.loc[matches["id"] == match_id].team2.unique()
def getPreviousEncDF(match_id):
team1 = getTeam1(match_id)
team2 = getTeam2(match_id)
return matches.loc[(matches["id"] < match_id) & (((matches["team1"].isin(team1)) & (matches["team2"].isin(team2))) | ((matches["team1"].isin(team2)) & (matches["team2"].isin(team1))))]
def getTeamWBR(match_id, team):
WBR = 0
DF = getPreviousEncDF(match_id)
winnerDF = DF.loc[DF["winner"] == team]
WBR = winnerDF['win_by_runs'].sum()
return WBR
def getTeamWBW(match_id, team):
WBW = 0
DF = getPreviousEncDF(match_id)
winnerDF = DF.loc[DF["winner"] == team]
WBW = winnerDF['win_by_wickets'].sum()
return WBW
def getTeamWinPerc(match_id):
dF = getPreviousEncDF(match_id)
timesPlayed = dF.shape[0]
team1 = getTeam1(match_id)[0].strip("[]")
timesWon = dF.loc[dF["winner"] == team1].shape[0]
if timesPlayed != 0:
winPerc = (timesWon/timesPlayed) * 100
else:
winPerc = 0
return winPerc
def getBothTeamStats(match_id):
DF = getPreviousEncDF(match_id)
team1 = getTeam1(match_id)[0].strip("[]")
team2 = getTeam2(match_id)[0].strip("[]")
timesPlayed = DF.shape[0]
timesWon = DF.loc[DF["winner"] == team1].shape[0]
WBRTeam1 = getTeamWBR(match_id, team1)
WBRTeam2 = getTeamWBR(match_id, team2)
WBWTeam1 = getTeamWBW(match_id, team1)
WBWTeam2 = getTeamWBW(match_id, team2)
print "Out of {} times in the past {} have won {} times({}%) from {}".format(timesPlayed, team1, timesWon, getTeamWinPerc(match_id), team2)
print "{} won by {} total runs and {} total wickets.".format(team1, WBRTeam1, WBWTeam1)
print "{} won by {} total runs and {} total wickets.".format(team2, WBRTeam2, WBWTeam2)
#Testing functionality
getBothTeamStats(517)
# 3rd Feature: Recent Form (Win Percentage of 3 previous matches of a team in the same season)
# Higher the better
def getMatchYear(match_id):
return matches.loc[matches["id"] == match_id].season.unique()
def getTeam1DF(match_id, year):
team1 = getTeam1(match_id)
return matches.loc[(matches["id"] < match_id) & (matches["season"] == year) & ((matches["team1"].isin(team1)) | (matches["team2"].isin(team1)))].tail(3)
def getTeam2DF(match_id, year):
team2 = getTeam2(match_id)
return matches.loc[(matches["id"] < match_id) & (matches["season"] == year) & ((matches["team1"].isin(team2)) | (matches["team2"].isin(team2)))].tail(3)
def getTeamWinPercentage(match_id):
year = int(getMatchYear(match_id))
team1 = getTeam1(match_id)[0].strip("[]")
team2 = getTeam2(match_id)[0].strip("[]")
team1DF = getTeam1DF(match_id, year)
team2DF = getTeam2DF(match_id, year)
team1TotalMatches = team1DF.shape[0]
team1WinMatches = team1DF.loc[team1DF["winner"] == team1].shape[0]
team2TotalMatches = team2DF.shape[0]
team2WinMatches = team2DF.loc[team2DF["winner"] == team2].shape[0]
if (team1TotalMatches != 0) and (team2TotalMatches !=0):
winPercTeam1 = ((team1WinMatches / team1TotalMatches) * 100)
winPercTeam2 = ((team2WinMatches / team2TotalMatches) * 100)
elif (team1TotalMatches != 0) and (team2TotalMatches ==0):
winPercTeam1 = ((team1WinMatches / team1TotalMatches) * 100)
winPercTeam2 = 0
elif (team1TotalMatches == 0) and (team2TotalMatches !=0):
winPercTeam1 = 0
winPercTeam2 = ((team2WinMatches / team2TotalMatches) * 100)
else:
winPercTeam1 = 0
winPercTeam2 = 0
return winPercTeam1, winPercTeam2
#Testing Functionality
getTeamWinPercentage(517)
#Function to implement all features
def getAllFeatures(match_id):
generateSquadRating(match_id)
print ("\n")
getBothTeamStats(match_id)
print("\n")
getTeamWinPercentage(match_id)
#Testing Functionality
getAllFeatures(517)
```
## Adding New Columns for Features in Matches DataFrame
```
#Create Column for Team 1 Winning Status (1 = Won, 0 = Lost)
matches['team1Winning'] = np.where(matches['team1'] == matches['winner'], 1, 0)
# New Column for Difference of Average Strike rates (First Team SR - Second Team SR)
# [Negative value means Second team is better]
firstTeamSR = []
secondTeamSR = []
for i in matches['id'].unique():
P, Q = getAverageStrikeRates(i)
firstTeamSR.append(P), secondTeamSR.append(Q)
firstSRSeries = pd.Series(firstTeamSR)
secondSRSeries = pd.Series(secondTeamSR)
matches["Avg_SR_Difference"] = firstSRSeries.values - secondSRSeries.values
# New Column for Difference of Wickets Per Run (First Team WPR - Second Team WPR)
# [Negative value means Second team is better]
firstTeamWPR = []
secondTeamWPR = []
for i in matches['id'].unique():
R, S = getAverageWPR(i)
firstTeamWPR.append(R), secondTeamWPR.append(S)
firstWPRSeries = pd.Series(firstTeamWPR)
secondWPRSeries = pd.Series(secondTeamWPR)
matches["Avg_WPR_Difference"] = firstWPRSeries.values - secondWPRSeries.values
# New column for difference of MVP Awards
# (Negative value means Second team is better)
firstTeamMVP = []
secondTeamMVP = []
for i in matches['id'].unique():
T, U = compareMVPAwards(i)
firstTeamMVP.append(T), secondTeamMVP.append(U)
firstMVPSeries = pd.Series(firstTeamMVP)
secondMVPSeries = pd.Series(secondTeamMVP)
matches["Total_MVP_Difference"] = firstMVPSeries.values - secondMVPSeries.values
# New column for Win Percentage of Team 1 in previous encounters
firstTeamWP = []
for i in matches['id'].unique():
WP = getTeamWinPerc(i)
firstTeamWP.append(WP)
firstWPSeries = pd.Series(firstTeamWP)
matches["Prev_Enc_Team1_WinPerc"] = firstWPSeries.values
# New column for Recent form(Win Percentage in the current season) of 1st Team compared to 2nd Team
# (Negative means 2nd team has higher win percentage)
firstTeamRF = []
secondTeamRF = []
for i in matches['id'].unique():
K, L = getTeamWinPercentage(i)
firstTeamRF.append(K), secondTeamRF.append(L)
firstRFSeries = pd.Series(firstTeamRF)
secondRFSeries = pd.Series(secondTeamRF)
matches["Total_RF_Difference"] = firstRFSeries.values - secondRFSeries.values
#Testing
matches.tail()
```
## Visualizations for Features vs. Response
```
# Graph for Average Strike Rate Difference
matches.boxplot(column = 'Avg_SR_Difference', by='team1Winning', showfliers= False)
# Graph for Average WPR(Wickets per Run) Difference
matches.boxplot(column = 'Avg_WPR_Difference', by='team1Winning', showfliers= False)
# Graph for MVP Difference
matches.boxplot(column = 'Total_MVP_Difference', by='team1Winning', showfliers= False)
#Graph for Previous encounters Win Percentage of Team #1
matches.boxplot(column = 'Prev_Enc_Team1_WinPerc', by='team1Winning', showfliers= False)
# Graph for Recent form(Win Percentage in the same season)
matches.boxplot(column = 'Total_RF_Difference', by='team1Winning', showfliers= False)
```
| github_jupyter |
Let's load the data from the csv just as in `dataset.ipynb`.
```
import pandas as pd
import numpy as np
raw_data_file_name = "../dataset/fer2013.csv"
raw_data = pd.read_csv(raw_data_file_name)
```
Now, we separate and clean the data a little bit. First, we create an array of only the training data. Then, we create an array of only the private test data (referred to in the code with the prefix `first_test`). The `reset_index` call re-aligns the `first_test_data` to index from 0 instead of wherever it starts in the set.
```
train_data = raw_data[raw_data["Usage"] == "Training"]
first_test_data = raw_data[raw_data["Usage"] == "PrivateTest"]
first_test_data.reset_index(inplace=True)
second_test_data = raw_data[raw_data["Usage"] == "PublicTest"]
second_test_data.reset_index(inplace=True)
import keras
train_expected = keras.utils.to_categorical(train_data["emotion"], num_classes=7, dtype='int32')
first_test_expected = keras.utils.to_categorical(first_test_data["emotion"], num_classes=7, dtype='int32')
second_test_expected = keras.utils.to_categorical(second_test_data["emotion"], num_classes=7, dtype='int32')
def process_pixels(array_input):
output = np.empty([int(len(array_input)), 2304])
for index, item in enumerate(output):
item[:] = array_input[index].split(" ")
output /= 255
return output
train_pixels = process_pixels(train_data["pixels"])
train_pixels = train_pixels.reshape(train_pixels.shape[0], 48, 48, 1)
first_test_pixels = process_pixels(first_test_data["pixels"])
first_test_pixels = first_test_pixels.reshape(first_test_pixels.shape[0], 48, 48, 1)
second_test_pixels = process_pixels(second_test_data["pixels"])
second_test_pixels = second_test_pixels.reshape(second_test_pixels.shape[0], 48, 48, 1)
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True
)
```
Here, we create our own top-level network to load on top of VGG16.
```
from keras.models import Sequential
from keras.layers import Dense, Dropout, MaxPooling2D, Conv2D, Flatten
from keras.optimizers import Adam
def gen_model(size):
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape = (48, 48, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(size, activation='relu'))
model.add(Dense(7, activation='softmax'))
optimizer = Adam(learning_rate=0.0009)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
from keras.callbacks.callbacks import EarlyStopping, ReduceLROnPlateau
early_stop = EarlyStopping('val_loss', patience=50)
reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(50/4), verbose=1)
callbacks = [early_stop, reduce_lr]
sizes = [32, 64, 128, 256]
results = [None] * len(sizes)
for i in range(len(sizes)):
model = gen_model(sizes[i])
model.fit_generator(datagen.flow(train_pixels, train_expected, batch_size=32),
steps_per_epoch=len(train_pixels) / 32,
epochs=10, verbose=1, callbacks=callbacks,
validation_data=(first_test_pixels,first_test_expected))
results[i] = model.evaluate(second_test_pixels, second_test_pixels, batch_size=32)
```
| github_jupyter |
# PTN Template
This notebook serves as a template for single dataset PTN experiments
It can be run on its own by setting STANDALONE to True (do a find for "STANDALONE" to see where)
But it is intended to be executed as part of a *papermill.py script. See any of the
experimentes with a papermill script to get started with that workflow.
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os, json, sys, time, random
import numpy as np
import torch
from torch.optim import Adam
from easydict import EasyDict
import matplotlib.pyplot as plt
from steves_models.steves_ptn import Steves_Prototypical_Network
from steves_utils.lazy_iterable_wrapper import Lazy_Iterable_Wrapper
from steves_utils.iterable_aggregator import Iterable_Aggregator
from steves_utils.ptn_train_eval_test_jig import PTN_Train_Eval_Test_Jig
from steves_utils.torch_sequential_builder import build_sequential
from steves_utils.torch_utils import get_dataset_metrics, ptn_confusion_by_domain_over_dataloader
from steves_utils.utils_v2 import (per_domain_accuracy_from_confusion, get_datasets_base_path)
from steves_utils.PTN.utils import independent_accuracy_assesment
from steves_utils.stratified_dataset.episodic_accessor import Episodic_Accessor_Factory
from steves_utils.ptn_do_report import (
get_loss_curve,
get_results_table,
get_parameters_table,
get_domain_accuracies,
)
from steves_utils.transforms import get_chained_transform
```
# Required Parameters
These are allowed parameters, not defaults
Each of these values need to be present in the injected parameters (the notebook will raise an exception if they are not present)
Papermill uses the cell tag "parameters" to inject the real parameters below this cell.
Enable tags to see what I mean
```
required_parameters = {
"experiment_name",
"lr",
"device",
"seed",
"dataset_seed",
"labels_source",
"labels_target",
"domains_source",
"domains_target",
"num_examples_per_domain_per_label_source",
"num_examples_per_domain_per_label_target",
"n_shot",
"n_way",
"n_query",
"train_k_factor",
"val_k_factor",
"test_k_factor",
"n_epoch",
"patience",
"criteria_for_best",
"x_transforms_source",
"x_transforms_target",
"episode_transforms_source",
"episode_transforms_target",
"pickle_name",
"x_net",
"NUM_LOGS_PER_EPOCH",
"BEST_MODEL_PATH",
"torch_default_dtype"
}
standalone_parameters = {}
standalone_parameters["experiment_name"] = "STANDALONE PTN"
standalone_parameters["lr"] = 0.0001
standalone_parameters["device"] = "cuda"
standalone_parameters["seed"] = 1337
standalone_parameters["dataset_seed"] = 1337
standalone_parameters["num_examples_per_domain_per_label_source"]=100
standalone_parameters["num_examples_per_domain_per_label_target"]=100
standalone_parameters["n_shot"] = 3
standalone_parameters["n_query"] = 2
standalone_parameters["train_k_factor"] = 1
standalone_parameters["val_k_factor"] = 2
standalone_parameters["test_k_factor"] = 2
standalone_parameters["n_epoch"] = 100
standalone_parameters["patience"] = 10
standalone_parameters["criteria_for_best"] = "target_accuracy"
standalone_parameters["x_transforms_source"] = ["unit_power"]
standalone_parameters["x_transforms_target"] = ["unit_power"]
standalone_parameters["episode_transforms_source"] = []
standalone_parameters["episode_transforms_target"] = []
standalone_parameters["torch_default_dtype"] = "torch.float32"
standalone_parameters["x_net"] = [
{"class": "nnReshape", "kargs": {"shape":[-1, 1, 2, 256]}},
{"class": "Conv2d", "kargs": { "in_channels":1, "out_channels":256, "kernel_size":(1,7), "bias":False, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":256}},
{"class": "Conv2d", "kargs": { "in_channels":256, "out_channels":80, "kernel_size":(2,7), "bias":True, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 80*256, "out_features": 256}}, # 80 units per IQ pair
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features":256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
]
# Parameters relevant to results
# These parameters will basically never need to change
standalone_parameters["NUM_LOGS_PER_EPOCH"] = 10
standalone_parameters["BEST_MODEL_PATH"] = "./best_model.pth"
# uncomment for CORES dataset
from steves_utils.CORES.utils import (
ALL_NODES,
ALL_NODES_MINIMUM_1000_EXAMPLES,
ALL_DAYS
)
standalone_parameters["labels_source"] = ALL_NODES
standalone_parameters["labels_target"] = ALL_NODES
standalone_parameters["domains_source"] = [1]
standalone_parameters["domains_target"] = [2,3,4,5]
standalone_parameters["pickle_name"] = "cores.stratified_ds.2022A.pkl"
# Uncomment these for ORACLE dataset
# from steves_utils.ORACLE.utils_v2 import (
# ALL_DISTANCES_FEET,
# ALL_RUNS,
# ALL_SERIAL_NUMBERS,
# )
# standalone_parameters["labels_source"] = ALL_SERIAL_NUMBERS
# standalone_parameters["labels_target"] = ALL_SERIAL_NUMBERS
# standalone_parameters["domains_source"] = [8,20, 38,50]
# standalone_parameters["domains_target"] = [14, 26, 32, 44, 56]
# standalone_parameters["pickle_name"] = "oracle.frame_indexed.stratified_ds.2022A.pkl"
# standalone_parameters["num_examples_per_domain_per_label_source"]=1000
# standalone_parameters["num_examples_per_domain_per_label_target"]=1000
# Uncomment these for Metahan dataset
# standalone_parameters["labels_source"] = list(range(19))
# standalone_parameters["labels_target"] = list(range(19))
# standalone_parameters["domains_source"] = [0]
# standalone_parameters["domains_target"] = [1]
# standalone_parameters["pickle_name"] = "metehan.stratified_ds.2022A.pkl"
# standalone_parameters["n_way"] = len(standalone_parameters["labels_source"])
# standalone_parameters["num_examples_per_domain_per_label_source"]=200
# standalone_parameters["num_examples_per_domain_per_label_target"]=100
standalone_parameters["n_way"] = len(standalone_parameters["labels_source"])
# Parameters
parameters = {
"experiment_name": "tuned_1v2:oracle.run2_limited",
"device": "cuda",
"lr": 0.0001,
"labels_source": [
"3123D52",
"3123D65",
"3123D79",
"3123D80",
"3123D54",
"3123D70",
"3123D7B",
"3123D89",
"3123D58",
"3123D76",
"3123D7D",
"3123EFE",
"3123D64",
"3123D78",
"3123D7E",
"3124E4A",
],
"labels_target": [
"3123D52",
"3123D65",
"3123D79",
"3123D80",
"3123D54",
"3123D70",
"3123D7B",
"3123D89",
"3123D58",
"3123D76",
"3123D7D",
"3123EFE",
"3123D64",
"3123D78",
"3123D7E",
"3124E4A",
],
"episode_transforms_source": [],
"episode_transforms_target": [],
"domains_source": [8, 32, 50],
"domains_target": [14, 20, 26, 38, 44],
"num_examples_per_domain_per_label_source": 2000,
"num_examples_per_domain_per_label_target": 2000,
"n_shot": 3,
"n_way": 16,
"n_query": 2,
"train_k_factor": 3,
"val_k_factor": 2,
"test_k_factor": 2,
"torch_default_dtype": "torch.float32",
"n_epoch": 50,
"patience": 3,
"criteria_for_best": "target_accuracy",
"x_net": [
{"class": "nnReshape", "kargs": {"shape": [-1, 1, 2, 256]}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 1,
"out_channels": 256,
"kernel_size": [1, 7],
"bias": False,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 256}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 256,
"out_channels": 80,
"kernel_size": [2, 7],
"bias": True,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 20480, "out_features": 256}},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features": 256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
],
"NUM_LOGS_PER_EPOCH": 10,
"BEST_MODEL_PATH": "./best_model.pth",
"pickle_name": "oracle.Run2_10kExamples_stratified_ds.2022A.pkl",
"x_transforms_source": ["unit_mag"],
"x_transforms_target": ["unit_mag"],
"dataset_seed": 1337,
"seed": 1337,
}
# Set this to True if you want to run this template directly
STANDALONE = False
if STANDALONE:
print("parameters not injected, running with standalone_parameters")
parameters = standalone_parameters
if not 'parameters' in locals() and not 'parameters' in globals():
raise Exception("Parameter injection failed")
#Use an easy dict for all the parameters
p = EasyDict(parameters)
supplied_keys = set(p.keys())
if supplied_keys != required_parameters:
print("Parameters are incorrect")
if len(supplied_keys - required_parameters)>0: print("Shouldn't have:", str(supplied_keys - required_parameters))
if len(required_parameters - supplied_keys)>0: print("Need to have:", str(required_parameters - supplied_keys))
raise RuntimeError("Parameters are incorrect")
###################################
# Set the RNGs and make it all deterministic
###################################
np.random.seed(p.seed)
random.seed(p.seed)
torch.manual_seed(p.seed)
torch.use_deterministic_algorithms(True)
###########################################
# The stratified datasets honor this
###########################################
torch.set_default_dtype(eval(p.torch_default_dtype))
###################################
# Build the network(s)
# Note: It's critical to do this AFTER setting the RNG
# (This is due to the randomized initial weights)
###################################
x_net = build_sequential(p.x_net)
start_time_secs = time.time()
###################################
# Build the dataset
###################################
if p.x_transforms_source == []: x_transform_source = None
else: x_transform_source = get_chained_transform(p.x_transforms_source)
if p.x_transforms_target == []: x_transform_target = None
else: x_transform_target = get_chained_transform(p.x_transforms_target)
if p.episode_transforms_source == []: episode_transform_source = None
else: raise Exception("episode_transform_source not implemented")
if p.episode_transforms_target == []: episode_transform_target = None
else: raise Exception("episode_transform_target not implemented")
eaf_source = Episodic_Accessor_Factory(
labels=p.labels_source,
domains=p.domains_source,
num_examples_per_domain_per_label=p.num_examples_per_domain_per_label_source,
iterator_seed=p.seed,
dataset_seed=p.dataset_seed,
n_shot=p.n_shot,
n_way=p.n_way,
n_query=p.n_query,
train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor),
pickle_path=os.path.join(get_datasets_base_path(), p.pickle_name),
x_transform_func=x_transform_source,
example_transform_func=episode_transform_source,
)
train_original_source, val_original_source, test_original_source = eaf_source.get_train(), eaf_source.get_val(), eaf_source.get_test()
eaf_target = Episodic_Accessor_Factory(
labels=p.labels_target,
domains=p.domains_target,
num_examples_per_domain_per_label=p.num_examples_per_domain_per_label_target,
iterator_seed=p.seed,
dataset_seed=p.dataset_seed,
n_shot=p.n_shot,
n_way=p.n_way,
n_query=p.n_query,
train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor),
pickle_path=os.path.join(get_datasets_base_path(), p.pickle_name),
x_transform_func=x_transform_target,
example_transform_func=episode_transform_target,
)
train_original_target, val_original_target, test_original_target = eaf_target.get_train(), eaf_target.get_val(), eaf_target.get_test()
transform_lambda = lambda ex: ex[1] # Original is (<domain>, <episode>) so we strip down to episode only
train_processed_source = Lazy_Iterable_Wrapper(train_original_source, transform_lambda)
val_processed_source = Lazy_Iterable_Wrapper(val_original_source, transform_lambda)
test_processed_source = Lazy_Iterable_Wrapper(test_original_source, transform_lambda)
train_processed_target = Lazy_Iterable_Wrapper(train_original_target, transform_lambda)
val_processed_target = Lazy_Iterable_Wrapper(val_original_target, transform_lambda)
test_processed_target = Lazy_Iterable_Wrapper(test_original_target, transform_lambda)
datasets = EasyDict({
"source": {
"original": {"train":train_original_source, "val":val_original_source, "test":test_original_source},
"processed": {"train":train_processed_source, "val":val_processed_source, "test":test_processed_source}
},
"target": {
"original": {"train":train_original_target, "val":val_original_target, "test":test_original_target},
"processed": {"train":train_processed_target, "val":val_processed_target, "test":test_processed_target}
},
})
# Some quick unit tests on the data
from steves_utils.transforms import get_average_power, get_average_magnitude
q_x, q_y, s_x, s_y, truth = next(iter(train_processed_source))
assert q_x.dtype == eval(p.torch_default_dtype)
assert s_x.dtype == eval(p.torch_default_dtype)
print("Visually inspect these to see if they line up with expected values given the transforms")
print('x_transforms_source', p.x_transforms_source)
print('x_transforms_target', p.x_transforms_target)
print("Average magnitude, source:", get_average_magnitude(q_x[0].numpy()))
print("Average power, source:", get_average_power(q_x[0].numpy()))
q_x, q_y, s_x, s_y, truth = next(iter(train_processed_target))
print("Average magnitude, target:", get_average_magnitude(q_x[0].numpy()))
print("Average power, target:", get_average_power(q_x[0].numpy()))
###################################
# Build the model
###################################
model = Steves_Prototypical_Network(x_net, device=p.device, x_shape=(2,256))
optimizer = Adam(params=model.parameters(), lr=p.lr)
###################################
# train
###################################
jig = PTN_Train_Eval_Test_Jig(model, p.BEST_MODEL_PATH, p.device)
jig.train(
train_iterable=datasets.source.processed.train,
source_val_iterable=datasets.source.processed.val,
target_val_iterable=datasets.target.processed.val,
num_epochs=p.n_epoch,
num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH,
patience=p.patience,
optimizer=optimizer,
criteria_for_best=p.criteria_for_best,
)
total_experiment_time_secs = time.time() - start_time_secs
###################################
# Evaluate the model
###################################
source_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test)
target_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test)
source_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val)
target_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val)
history = jig.get_history()
total_epochs_trained = len(history["epoch_indices"])
val_dl = Iterable_Aggregator((datasets.source.original.val,datasets.target.original.val))
confusion = ptn_confusion_by_domain_over_dataloader(model, p.device, val_dl)
per_domain_accuracy = per_domain_accuracy_from_confusion(confusion)
# Add a key to per_domain_accuracy for if it was a source domain
for domain, accuracy in per_domain_accuracy.items():
per_domain_accuracy[domain] = {
"accuracy": accuracy,
"source?": domain in p.domains_source
}
# Do an independent accuracy assesment JUST TO BE SURE!
# _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device)
# _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device)
# _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device)
# _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device)
# assert(_source_test_label_accuracy == source_test_label_accuracy)
# assert(_target_test_label_accuracy == target_test_label_accuracy)
# assert(_source_val_label_accuracy == source_val_label_accuracy)
# assert(_target_val_label_accuracy == target_val_label_accuracy)
experiment = {
"experiment_name": p.experiment_name,
"parameters": dict(p),
"results": {
"source_test_label_accuracy": source_test_label_accuracy,
"source_test_label_loss": source_test_label_loss,
"target_test_label_accuracy": target_test_label_accuracy,
"target_test_label_loss": target_test_label_loss,
"source_val_label_accuracy": source_val_label_accuracy,
"source_val_label_loss": source_val_label_loss,
"target_val_label_accuracy": target_val_label_accuracy,
"target_val_label_loss": target_val_label_loss,
"total_epochs_trained": total_epochs_trained,
"total_experiment_time_secs": total_experiment_time_secs,
"confusion": confusion,
"per_domain_accuracy": per_domain_accuracy,
},
"history": history,
"dataset_metrics": get_dataset_metrics(datasets, "ptn"),
}
ax = get_loss_curve(experiment)
plt.show()
get_results_table(experiment)
get_domain_accuracies(experiment)
print("Source Test Label Accuracy:", experiment["results"]["source_test_label_accuracy"], "Target Test Label Accuracy:", experiment["results"]["target_test_label_accuracy"])
print("Source Val Label Accuracy:", experiment["results"]["source_val_label_accuracy"], "Target Val Label Accuracy:", experiment["results"]["target_val_label_accuracy"])
json.dumps(experiment)
```
| github_jupyter |
## 1 Simple time series
Simple time series example: tracking state with linear dynamics
```
from pfilter import ParticleFilter, independent_sample, squared_error
from scipy.stats import norm, gamma, uniform
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
```
Utility function to filter a time series offline and return results as a dictionary of states:
```
def apply_filter(pf, ys, inputs=None):
"""Apply filter pf to a series of observations (time_steps, h) and return a dictionary:
particles: an array of particles (time_steps, n, d)
weights: an array of weights (time_steps,)
"""
states = []
pf.init_filter() # reset
for i,y in enumerate(ys):
if inputs is None:
pf.update(y)
else:
pf.update(y, **inputs[i])
states.append([pf.transformed_particles, np.array(pf.weights)])
return {
name: np.array([s[i] for s in states])
for i, name in enumerate(["particles", "weights"])
}
def plot_particles(x, y, yn, states):
"""Plot a 1D tracking result as a line graph with overlaid
scatterplot of particles. Particles are sized according to
normalised weight at each step.
x: time values
y: original (uncorrupted) values
yn: noisy (observed) values
states: dictionary return from apply_pfilter
"""
fig, ax = plt.subplots()
ax.plot(x, y, label='True', lw=1)
ax.plot(x, yn, label='Noisy', lw=2)
particles = states["particles"]
ws = states["weights"]
means = np.sum(particles[:,:,0] * ws, axis=1)
dev = (means - (particles[:,:,0]).T).T**2
var = np.sum(ws * dev, axis=1) / 1-np.sum(ws**2) # unbiased variance
stds = np.sqrt(var)
ax.plot(x, means, 'C4', label='Mean est.', lw=4)
ax.fill_between(x, means-stds, means+stds, color='C4', alpha=0.5, label='Std.')
ax.scatter(np.tile(x, (len(particles[0]),1)).ravel(), particles[:,:,0].T, s=ws*1000/np.sqrt(len(ws)),
alpha=0.15, label='Particles')
ax.set_xlabel("Time")
ax.set_ylabel("Observed")
ax.legend()
def filter_plot(x, y, yn, pf, inputs=None):
"""Apply a filter to yn, and plot the results using plot_particles()"""
states = apply_filter(pf, yn, inputs)
plot_particles(x, y, yn, states)
```
## (a) Tracking a 1D sinewave
### Data
We generate a noisy (co)sine wave with a linear trend, and Gaussian noise added:
```
# Noisy sine wave data
x = np.linspace(0, 100, 100)
y = np.cos(x/4.0) + x * 0.05
yn = y + np.random.normal(0,0.5,x.shape)
fig, ax = plt.subplots()
ax.plot(x, y, label='True', lw=1)
ax.plot(x, yn, label='Noisy', lw=1)
```
### Purely stochastic dynamics (random walk)
```
# No dynamics
# just diffusion on x
prior_fn = lambda n: np.random.normal(0,1,(n,1))
dt = 0.05
noise = 0.15
sigma = 1.5
pf = ParticleFilter(prior_fn = prior_fn,
observe_fn = lambda x: x,
dynamics_fn=lambda x: x ,
n_particles=250,
noise_fn = lambda x: x + np.random.normal(0, noise, x.shape),
weight_fn = lambda x,y : squared_error(x, y, sigma=sigma),
resample_proportion=0.01)
filter_plot(x, y, yn, pf)
```
### Simple linear dynamics
```
# Linear dynamics
# x, dx, ddx
prior_fn = lambda n: np.random.normal(0,1,(n,3))
dt = 0.25
noise = 0.125
sigma = 1.0
# linear dynamics
D = np.array([[1, dt, 0.5*dt**2],
[0, 1, dt],
[0, 0, 1]])
O = np.array([[1, 0, 0]])
pf = ParticleFilter(prior_fn = prior_fn,
observe_fn = lambda x: x @ O.T,
dynamics_fn=lambda x: x @ D.T ,
n_particles=200,
noise_fn = lambda x: x + np.random.normal(0, noise, x.shape),
weight_fn = lambda x,y : squared_error(x, y, sigma=sigma),
resample_proportion=0.02)
filter_plot(x, y, yn, pf)
```
### Linear dynamics with missing values
```
# Missing values; randomly delete 25% of the observations
y_missing = np.array([yt if np.random.uniform()>0.25 else None for yt in yn])
filter_plot(x, y, y_missing, pf)
```
### Latent variable estimation (cosine model)
```
# Cosine estimation
# x = a cos(wt + p) + kt
# state = [a,w,p,k]
prior_fn = lambda n: np.random.uniform(0,1,(n,4)) * [1.0, 0.25, np.pi*2.0, 0.1]
noise = 0.0005
sigma = 0.5
def cos_observe(x, t):
return x[:,0] * np.cos(t * x[:,1] + x[:,2] ) + x[:,3] * t
ts = [{"t":t} for t in x]
pf = ParticleFilter(prior_fn = prior_fn,
observe_fn = cos_observe,
dynamics_fn = lambda x, **kwargs:x ,
n_particles=200,
n_eff_threshold=1.0,
noise_fn = lambda x, **kwargs: x + np.random.normal(0, noise, x.shape) ,
weight_fn = lambda x,y, **kwargs: squared_error(x, y, sigma=sigma),
transform_fn = lambda x, weights, **kwargs: cos_observe(x, kwargs['t'])[:,None],
resample_proportion=0.01)
filter_plot(x, y, yn, pf, inputs=ts)
```
| github_jupyter |
# Step 2 - Data Wrangling Raw Data in Local Data Lake to Digestable Data
Loading, merging, cleansing, unifying and wrangling Oracle OpenWorld & CodeOne Session Data from still fairly raw JSON files in the local datalake.
The gathering of raw data from the (semi-)public API for the Session Catalog into a local data lake was discussed and performed in <a href="./1-OOW2018 Session Catalog - retrieving raw session data in JSON files.ipynb">Notebook 1-OOW2018 Session Catalog - retrieving raw session data in JSON files</a>. The current notebook starts from the 44 raw JSON files in local directory `./data`.
This notebook describes how to load, combine and wrangle the data from these files. This notebook shows for example how to load and merge data from dozens of (same formatted) JSON files, discard undesired attributes, deduplicate the record set, derive attributes for easier business intelligence & machine learning and write the resulting data set to a single JSON file.
Steps in this notebook:
* Load and Merge from raw JSON
* <a href="#deduplicate">discard redundant columns</a>
* <a href="#deduplicate">deduplication</a>
* <a href="#explore">Explore Data Frame</a>
* <a href="#enriching">Enrich Data</a>
* <a href="publish">Publish Wrangle Results</a>
The deliverable from this notebook is a single file `oow2018-sessions-wrangled.json` in the `datawarehouse` folder. This file contains unique, filtered, enriched data that is in a poper shape to perform further analysis on.
# Load and merge data from raw JSON files
This first section describes how the session data from Oracle OpenWorld 2018 is loaded from over 40 individual files with the raw JSON session data. These files are organized by session type and event (oow and codeone) - and have been produced by a different notebook (<a href="./1-OOW2018 Session Catalog - retrieving raw session data in JSON files.ipynb">Notebook 1-OOW2018 Session Catalog - retrieving raw session data in JSON files</a>) from the Oracle OpenWorld Session Catalog API.
The files are read into individual Pandas Data Frame objects. These Data Frames are concatenated. The end result from reading 44 files is a single Pandas Data Frame - called *ss* (session schedule).
Let's start with reading the session information from a single file into a Pandas Data Frame - to get a feel for how that works and what it results in.
```
#read a single session data file and parse the JSON content into a Pandas Data Frame
import pandas as pd
import json
dataLake = "datalake/" # file system directory used for storing the gathered data
#as a test, try to load data from one of the generated files
conference = 'oow' # could also be codeone
sessionType = 'TRN' # could also be one of 21 other values such as TUT, DEV, GEN, BOF, HOL, ...
ss = pd.read_json("{0}oow2018-sessions_{1}_{2}.json".format(dataLake, conference, sessionType))
# add an additional column to the Data Frame to specify the conference catalog of origin of these sessions; in this case oow
ss = ss.assign(catalog=conference)
ss.head(3)
#as a test, try to load data from another file ; same sessionType but different conference
conference = 'codeone'
sessionType = 'TRN'
ss2 = pd.read_json("{0}oow2018-sessions_{1}_{2}.json".format(dataLake, conference, sessionType))
# add an additional column to the Data Frame to specify the conference catalog of origin of these sessions; in this case codeone
ss2 = ss2.assign(catalog='codeone')
ss2.head(3)
```
All session data is to be merged into a single data frame. We will use `ss` as the sink - the data frame into which all sessions records are to be loaded. We use the Pandas concat operation to merge two Data Frames, as is done below for the two data frames with session records for session type TRN.
```
#add ss and ss2 together
#see https://pandas.pydata.org/pandas-docs/stable/merging.html
ss = pd.concat([ss,ss2], ignore_index=True , sort=True)
ss.head(8)
```
Overhead, two files were loaded, parsed into a Data Frame and added together into a single Data Frame. The next step is to load the session data from the raw JSON file for all 44 files - for the two events and for all session types. The session types are defined in the Dict object *sessionTypes* . The code loops over the keys in this Dict and reads the corresponding JSON file for each of the two events.
```
sessionTypes = {'BOF': '1518466139979001dQkv'
, 'BQS': 'bqs'
, 'BUS': '1519240082595001EpMm'
, 'CAS': 'casestudy'
, 'DEV': '1522435540042001BxTD'
, 'ESS': 'ess'
, 'FLP': 'flp'
, 'GEN': 'general'
, 'HOL': 'hol'
, 'HOM': 'hom'
, 'IGN': 'ignite'
, 'KEY': 'option_1508950285425'
, 'MTE':'1523906206279002QAu9'
, 'PKN': '1527614217434001RBfj'
, 'PRO': '1518464344082003KVWZ'
, 'PRM': '1518464344082002KM3k'
, 'TRN': '1518464344082001KHky'
, 'SIG': 'sig'
, 'THT': 'ts'
, 'TLD': '1537894888625001RriS'
, 'TIP': '1517517756579001F3CR'
, 'TUT': 'tutorial'
#commented out because TRN is dealt with earlier on, 'TRN': '1518464344082001KHky'
}
#loop over all session types and read the corresponding files for both events codeone and oow
for key,value in sessionTypes.items():
sessionType = key
conference = 'oow'
ssoow = pd.read_json("{0}oow2018-sessions_{1}_{2}.json".format(dataLake, conference, sessionType))
# add an additional column to the Data Frame to specify the conference catalog of origin of these sessions
ssoow = ssoow.assign(catalog=conference)
conference = 'codeone'
sscodeone = pd.read_json("{0}oow2018-sessions_{1}_{2}.json".format(dataLake, conference, sessionType))
sscodeone = sscodeone.assign(catalog=conference)
# merge data for sessions of type session type for both oow and codeone into a master set in ss
ss = pd.concat([ss,ssoow,sscodeone], ignore_index=True, sort=True)
print("Done - all data is merged into one data frame")
```
### Some key metrics on the merged Sessions Set
The shape function on the data frame returns the dimensions of the frame: the number of rows by the number of columns:
```
ss.shape
# total memory usage
ss.memory_usage(index=True, deep=True).sum()
#list all columns in the Dataframe
ss.columns
# data types for the columns in the data frame
ss.dtypes
ss.groupby(['event','type'])['event'].count()
```
<a name="discard" />
## Discard unneeded attributes
Early inspection of the JSON document and the session catalog website has provided insight in the available attributes and their relevance. Below, you will see an overview of all columns in the Data Frame - corresponding with the top level items in the JSON document. The subsequent step removes from the Data Frame all columns that seem irrelevant for our task at hand.
These columns seem relevant for the web frontend developers, for planned but not realized objectives or for unknown purposes. In order to not carry more weight than necessary - because of performance, resource usage and lurking complexity we get rid of columns that seem irrelevant. If we need them after all, they are still available in the data lake.
Remove the unwanted columns from the Dataframe
(allowDoubleBooking .'codeParts', 'code_id', 'es_metadata_id',type_displayorder type_displayorder_string useDoubleBooking useWaitingList videos viewAccess viewAccessPublic, ...)
```
# remove columns
# Note that the original 'data' object is changed when inplace=True
ss.drop([
'allowDoubleBooking'
,'codeParts', 'code_id', 'es_metadata_id','type_displayorder'
,'type_displayorder_string', 'useDoubleBooking'
,'useWaitingList', 'videos'
, 'viewAccess', 'viewAccessPublic','viewFileAccess', 'waitlistAccess', 'waitlistLimit'
, 'eventId','eventName','featured_value','publicViewPrivateSchedule','published', 'scheduleAccess','sessionID','status'
,'externalID','highlight','abbreviation'
]
, axis=1, inplace=True)
```
<a name="deduplicate" />
## Deduplicate
Some sessions are included in the catalog for both events - Oracle OpenWorld and CodeOne - even though they are associated with one of the two. The exact same session - with only a different value for attribute catalog - occurs twice in our data set for these sessions. We should get rid of duplicates. However, we should not do so before we capture the fact that a session is part of the catalogs of both events in the record that is retained.
Note: it seems there are some sessions that occur multiple times in the data set but are not included in both event catalogs. This seems to be just some form of data pollution.
Let's first see how many events are part of both events' catalogs.
```
# The code feature is supposed to be the unique identifier of sessions
# Let's see at the multifold occurrence of individual code values
counts = ss['code'].value_counts()
counts.head(13)
```
We have found quite a few code values that occur multiple times in the data frame. Each code should be in the data frame only once. Let's further look into these sessions.
```
# let's create a data frame with all sessions whose code occurs more than one in the data frame
duplicates = ss[ss['code'].isin(counts.index[counts > 1])]
# show the first ten records of these 'candidate duplicates'
duplicates[['code','title','event','catalog' ]].sort_values('code').head(10)
#are duplicates indeed associated with both events?
# some are - but not all of them:
duplicates.loc[duplicates['code']=='BOF4977']
```
The next step is a little bit complex: we want to record the fact that certain sessions (actually session codes) are associated with both catalogs. We join the sessions in `ss` with the sessions that occur multiple times in `duplicates` and we join records in `ss` with their counterparts (same session code) that have a different catalog origin.
This gives us a data frame with all session codes associated with both catalogs.
```
# find all sessions that occur in both catalogs: set their catalog attribute to both
# set catalog="both" if session in duplicates with a different catalog value than the session's own catalog value
doubleCatalogSessions = pd.merge(ss, duplicates, on=['code'], how='inner').query('catalog_y != catalog_x')
doubleCatalogSessions[['code','catalog_x', 'catalog_y']] .head(20)
```
The master dataset is still `ss`. All sessions in this data frame whose session code appears in `doubleCatalogSessions` will get their *catalog* attribute updated to *both*. The cell will then show the values in catalog and the number of their occurrences.
```
# all sessions in doubleCatalogSessions occur in both oow and code one session catalog
# time to update column catalog for all sessions in ss that have a code that occurs in doubleCatalogSessions['code']
ss.loc[ss['code'].isin(doubleCatalogSessions['code']),'catalog']='both'
ss['catalog'].value_counts()
```
If we now drop any duplicate records - any sessions whose session code occurs more than once - we will reduce our data frame to the unique set of sessions that actually took place, without the ghost duplicates introduced in our data set because a session appeared in more than one catalog.
```
#Drop duplicates - identifying rows by their code
ss.drop_duplicates(subset=['code'], keep='first', inplace=True)
#hpw many sessions appear in each and in both catalogs?
ss['catalog'].value_counts()
```
<a name="explore" />
# Exploring the Data
Let's briefly look at the data we now have in the Pandas Data Frame. What does the data look like? What values do we have in the columns? And what complexity is hiding in some columns with nested values, such as participants, attributevalues and files.
Note: https://morphocode.com/pandas-cheat-sheet/ provides a quick overview of commands used for inspecting and manipulating the data frame.
```
#an overview of the current contents of the data frame
ss.head(6)
# and this overview of the rows and columns in the data frame.
print(ss.info())
#let's look at all different values for length (the duration of each session in minutes)
ss['length'].unique()
# yes - it is that simple!
# and what about (session) type?
ss['type'].unique()
```
Some of the columns in the data frame contain complex, nested values. For example the `attributevalues` column. It contains a JSON array - a list of objects that each describe some attribute for the session. Examples of session attributes that are defined in this somewhat convoluted wat are *(target Experience) Level*, *Track*, *Day* , *Role*. A little later on, we will create new, proper features in the data frame based on values extracted from this complex attributevalues colunmn - in order to make it possible to make good use of this information for visualization, analysis and machine learning.
```
# some of the columns or attributes have nested values. It seems useful to take a closer look at them.
# show nested array attributevalues for first record in Dataframe - this is an attribute which contains an array of objects that each define a specific characteristic of the session,
# such as date and time, track, topic, level, role, company market size,
ss['attributevalues'][10]
```
The *participants* column also contains a complex object. This column too contains a JSON array with the people associated with a session as speaker. The array contains a nested object for each speaker. This object has a lot of data in it - from name and biography and special designations (titles) for the speaker to company, job title, URL to a picture and details on all (other) sessions the speaker is involved in.
Let's take a look at an example.
```
#show nested array participants for 11th record in the Dataframe
ss['participants'][10]
```
The *files* column too contains a JSON array. This array contains entries for files associated with the session. These files provide the slides or other supporting content for the session. Each session is supposed to have exactly one files associated with it. Some do not - for example because the speaker(s) forgot to upload their slides.
Let's inspect the JSON contents of the *files* column. It contains the name of the file and the URL from where it can be downloaded.
```
#show nested array files
ss['files'][0]
```
<a name="enriching" />
# Enriching the data
In this section we are engineering the data to produce some attributes or features that are easier to work with once we start doing data science activities such as business intelligence or machine learning. In this section we are not yet actually bringing in external data sources to add information we not already have in our set. However, we are making the data we already have more accessible and thereby more valuable. So in that sense, this too can be called *enriching*.
The enrichments performed on the session data in the data frame :
* set a flag at session level to indicate whether the session is an Oracle session (with Oracle staff among the speakers- speaker has Oracle in companyName attribute)
* set a flag at session level to indicate whether a file has been uploaded for the session
* set a flag at session level to indicate whether one of the speakers has Java Rockstar, Java Champion, Developer Champion/Groundbreaker Ambassador, ACE or ACE Director
* set attribute level (based on attributevalues array) - beginner, all, ...
* derive track(s?) from attributevalues array (where attribute_id=Track or attribute_id=CodeOneTracks )
* set attribute with number of speakers on session
* number of instances of the session
### Oracle Speaker and File Uploaded
These functions `oracle_speaker` and `file_flag` are invoked for every record in the data frame. They are used to derive new, first class attributes to indicate whether or not at least one speaker working for Oracle is associated with a session (Y or N) and if a file has been upload (presumably with slides) for the session. The information represented by these two new attributes already exists in the data frame - but in a way that makes it quite unaccessible to the data analyst.
```
#function to derive Oracle flag from participants
def oracle_speaker(session):
result = "N"
# loop over speakers; if for any of them, companyName contains the word Oracle, the result =Y
for x in session["participants"][:]:
if ("oracle" in x.get('companyName','x').lower()):
result='Y'
return result
```
New columns `oracle_speaker` and `file_flag` are added to the data frame with values derived for each record by applying the functions with corresponding names.
```
#set oracle_speaker flag
#apply function oracle_speaker to every row in the data frame to derive values for the new column oracle_speaker
ss['oracle_speaker'] = ss.apply(oracle_speaker, axis=1)
# show the values and the number of occurrences for the new column oracle_speaker
ss['oracle_speaker'].value_counts()
#function to derive file flag from files
def file_flag(session):
result = "N"
if isinstance(session.get("files",None),list) :
# loop over files; if any exist, then result = Y
for x in session["files"][:]:
if x['fileId']:
result='Y'
break
return result
#set file_flag
#apply function file_flag to every row in the data frame to derive values for the new column file_flag
ss['file_flag'] = ss.apply(file_flag, axis=1)
ss['file_flag'].value_counts()
```
### Speaker Designations
Many of the speakers are special - in the sense that they have been awarded community awards and titles, such as (Oracle) Java Champion, Oracle ACE Directory, JavaOne Rockstar and Groundbreaker Ambassador. These designations can be found for speakers (a nested JSON object) in their *attributevalues* feature - which happens to be another nested JSON object.
The next function *speaker_designation* finds out for a session if it has at least one speaker associated with it who has the requested designation.
```
#function to derive designation flag from speakers
# values for designation: JavaOne Rockstar, Oracle ACE Director, Oracle Java Champion, Groundbreaker Ambassador,
def speaker_designation(session, designation):
result = "N"
# loop over speakers and their attributevalues; if any exist with attribute_id == specialdesignations and value == JavaOne Rockstar
for x in session["participants"][:]:
if "attributevalues" in x:
for y in x["attributevalues"][:]:
if "attribute_id" in y:
if y["attribute_id"]=="specialdesignations":
if y["value"]== designation:
result="Y"
return result
```
The next cell iterates over four major `designations` and derives for each designation a new column in the data frame that contain Y or N, depending on whether a speaker with the designation will present in the session.
```
#set flags for designations
designations = ['JavaOne Rockstar', 'Oracle ACE Director', 'Oracle Java Champion', 'Groundbreaker Ambassador']
for d in designations:
ss[d] = ss.apply(speaker_designation, args=(d,), axis=1)
```
Let's check the newly created columns: what values do they contain and how often does each value occur?
```
ss[['JavaOne Rockstar','Oracle ACE Director' , 'Oracle Java Champion', 'Groundbreaker Ambassador']].apply(pd.value_counts).fillna(0)
```
### Level
Each session can be described as suitable for one or more levels of experience: Beginner, Intermediate or Advaned. Each session can be associated with all levels - or a subset of them. This level indication is somewhat hidden away, in the attributevalues object. The next function `session_level` will unearth for a given session whether it is associated with the specified level, Y or N.
```
#function to derive level flag for a session
def session_level(session, level):
result = "N"
# loop over attributevalues; if any exist with attribute_id == "SessionsbyExperienceLevel", and value == level
for x in session["attributevalues"][:]:
if "attribute_id" in x:
if x["attribute_id"]=="SessionsbyExperienceLevel":
if x["value"]== level:
result="Y"
break # no point in continuing if we have found what we are looking for
return result
```
This cell runs through all three level values and creates a new column in the data frame for each level. It will set a Y or N for each session in the new columns, depending on whether session is associated with the level, or not.
```
#set flags for designations
levels = ['Intermediate', 'Beginner', 'Advanced']
for l in levels:
ss[l] = ss.apply(session_level, args=(l,), axis=1)
print("Assigned Level Flags (Advanced, Intermediate, Beginner)")
```
Next we will derive values for a new column 'All' that indicates whether a session has been associated with all levels - even though I am not sure what exactly that means.
```
def isAll(session):
return 'Y' if session['Beginner'] == 'Y' and session['Intermediate'] == 'Y' and session['Advanced'] == 'Y'else 'N'
ss['All'] = ss.apply( isAll, axis=1)
ss[['Intermediate', 'Beginner', 'Advanced', 'All']].apply(pd.value_counts).fillna(0)
```
### Track
All sessions are assigned to one or more tracks. These tracks are categories that help attendees identify and assess sessions. Some examples of tracks are: Core Java Platform, Development Tools, Oracle Cloud Platform, MySQL, Containers, Serverless, and Cloud, Emerging Technologies, Modern Web, Application Development, Infrastructure Technologies (Data Center).
Depending on whether a session originates from the Oracle OpenWorld or CodeOne catalog, the track(s) are found in the nested object *attributevalues* under the attribute_id *CodeOneTracks* or just *Track*.
The next function is created to return a String array for a session with all the tracks associated with the session.
```
#function to derive track flag for a session
def session_track(session):
result = ['abc']
# loop over attributevalues; if any exist with attribute_id == "SessionsbyExperienceLevel", and value == level
for x in session["attributevalues"][:]:
if "attribute_id" in x:
if x["attribute_id"]=="CodeOneTracks":
result.append( x["value"])
if x["attribute_id"]=="Track":
result.append( x["value"])
del result[0]
return result
```
The next cell uses the function `session_track` to produce the value for each session for the track. The cell then prints out the tracks for a sample of sessions.
```
# add column track with a value derived from the session record
ss["track"] = ss.apply(session_track, axis=1)
print("Assigned Track")
ss[['title','catalog','track']].tail(10)
```
### Number of Speakers per Session
The number of speakers making an appearance in a session is another fact that while not readily available is also hiding in our data frame. We will turn this piece of information into an explicit feature. The next cell counts the number of elements in the participants array - and assigns it to the speaker_count attribute of each session record.
```
#set number of speakers for each session by taking the length of the list in the participants column
ss["speaker_count"] = ss['participants'].apply(lambda x: len(x))
# list the values in the new speaker_count column and the number of occurrences. 12 participants in one session?
ss['speaker_count'].value_counts()
```
### Number of instances per session
Most sessions are scheduled just once. However, some sessions are executed multiple times. This can be derived from the *times* column in the data frame, simply by taking the number of elements in the JSON array in that column.
The next cell adds a column to the data frame, with for each session the number of times that session takes place.
```
#set number of instancesfor each session by taking the length of the list in the times column
ss["instance_count"] = ss['times'].apply(lambda x: len(x) if x else None)
ss['instance_count'].value_counts()
```
### Session Room Capacity
The session records have a *times* feature - a JSON array with all the instances of the session. The elements contain the *capacity* attribute that gives the size or capcity of the room in which the session is scheduled. Because most sessions occur only once it seems acceptable to set a room_capacity feature on all session records in the data frame derived from the capacity found first element in the times feature. Likewise, we can derive the value for room itself.
```
#set the room capacity based on the capacity of the room of the first entry in the times list
# note: when the session is scheduled multiple times, not all rooms may have the same capacity; that detail gets lost
ss["room_capacity"]= ss['times'].apply(lambda x:x[0]['capacity'] if x else None)
ss["room"]= ss['times'].apply(lambda x:x[0]['room'] if x else None)
```
### Session Slot - Day and Time
As discussed in the preceding section, the session records have a *times* feature - a JSON array with all the instances of the session. The elements contain attributes *dayName* and *time* that mark the slot in which the session is scehduled. Because most sessions occur only once it seems acceptable to set a session day and time feature from the values for time and day found first element in the times feature. Likewise, we can derive the Python DateTime value for the starting timestamp of the session.
```
#likewise derive day, time and room - from the first occurrence of the session
ss["day"]= ss['times'].apply(lambda x:x[0]['dayName'] if x else None)
ss["time"]= ss['times'].apply(lambda x:x[0]['time'] if x else None)
ss[['day','time']].apply(pd.value_counts).fillna(0)
```
The columns `day` and `time` are just strings. It may be useful for the data analysts further downstream in the data science pipeline to also have a real DateTime object to work with. The next cell introduces the `session_timestamp` column, set to the real timestamp derived from day and time. Note that we make use of one external piece of data not found in the datalake: the fact that Sunday in this case means 21st October 2018.
```
import datetime
# see https://stackabuse.com/converting-strings-to-datetime-in-python/
#Sunday means 21st October 2018; monday through thursday are the days following the 21st
dayMap = {'Sunday': '21', 'Monday': '22', 'Tuesday':'23', 'Wednesday':'24', 'Thursday':'25'}
def create_timestamp(day, timestr):
if not timestr:
return None
if not day:
return None
dtString = '2018-10-'+ dayMap[day] +' '+ timestr
return datetime.datetime.strptime(dtString, '%Y-%m-%d %H:%M')
def create_session_timestamp(session):
return create_timestamp(session['day'], session['time'])
ss['session_timestamp'] = ss.apply(create_session_timestamp, axis=1)
ss[['day','time','session_timestamp'] ].head(10)
```
<a name="publish" />
## Persist Pandas Dataframe - as single, consolidated, reshaped, enriched JSON file
One thing we want to be able to do with the data we gather, is to persist it for future use by data analists, data scientists and other stakeholders. We could store the data in a NoSQL database, a cloud storage service or simply as a local file. For now, let's do the latter: store the cleansed and reshaped data in a local JSON file for further enrichment, visualization and machine learning purposes.
The file is called `oow2018-sessions-wrangled.json` and it will be stored in the `datawarehouse` folder.
```
dataWarehouse = "datawarehouse/" # file system directory used for storing the wrangled data
ss.to_json("{0}oow2018-sessions-wrangled.json".format(dataWarehouse), force_ascii=False)
```
A quick check to see whether the wrangled session data was successfully written to disk - and can be read again. If we can read it, than we can safely assume that in the next phase in the data analytics flow the same will succeed.
```
dubss = pd.read_json("{0}oow2018-sessions-wrangled.json".format(dataWarehouse))
dubss.head(10)
```
If and when the previous cell lists session records correctly, then the data warehouse has been populated with the consolidated file *oow2018-sessions-wrangled.json* with all sessions - cleansed, deduplicated, enriched and ready for further processing.
The wrangled data set no longer contains many of the attributes not conceivably useful for further analysis. It has been extended with (derived) attributes that will probably be useful for next data analytics tasks. Additionally, the record set has been deduplicated to only the uniqe sessions.
```
# a quick list of all columns
dubss.columns
```
| github_jupyter |
```
pip install scipy==1.1.0
pip install pandas==0.21.3
#pip install numpy==1.18.1
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.utils import np_utils
from keras.preprocessing.text import Tokenizer
from keras import metrics
from keras.layers.embeddings import Embedding
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import one_hot
from keras.layers import Flatten
from nltk import word_tokenize, pos_tag, chunk
from pprint import pprint
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer
from pprint import pprint
import pandas as pd
import numpy as np
from keras import optimizers
from keras.layers import Dense
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from keras.constraints import maxnorm
from keras.layers import Dropout
import os
df = pd.read_csv('data.csv')
selected_df = df[['job_description','job_title','category']]
selected_df = selected_df.dropna()
train, test = train_test_split(selected_df ,test_size = 0.2)
train_descs = train['job_description']
train_labels = train['category']
#train_labels = train['job_title']
test_descs = test['job_description']
test_labels = test['category']
num_labels = len(train_labels.unique().tolist())
vocab_size = 1000
batch_size = 32
nb_epoch = 50
# define Tokenizer with Vocab Size
tokenizer = Tokenizer(num_words=vocab_size)
tokenizer.fit_on_texts(train_descs)
x_train = tokenizer.texts_to_matrix(train_descs, mode='tfidf')
x_test = tokenizer.texts_to_matrix(test_descs, mode='tfidf')
encoder = LabelBinarizer()
encoder.fit(train_labels)
y_train = encoder.transform(train_labels)
y_test = encoder.transform(test_labels)
model = Sequential()
model.add(Dense(4096, input_shape=(vocab_size,), activation = 'relu', kernel_initializer = 'glorot_normal', kernel_constraint=maxnorm(2)))
model.add(Dropout(0.1))
model.add(Dense(1024, kernel_initializer = 'glorot_normal', activation= 'relu'))
model.add(Dropout(0.1))
model.add(Dense(num_labels))
model.add(Activation('softmax'))
# Compile model
model.compile(loss = 'categorical_crossentropy',
optimizer = 'sgd',
metrics = [metrics.categorical_accuracy, 'accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=nb_epoch,
verbose=1,
validation_split=0.1)
score = model.evaluate(x_test, y_test,
batch_size=batch_size, verbose=1)
print('\nTest categorical_crossentropy:', score[0])
print('Categorical accuracy:', score[1])
print('Accuracy:', score[2])
with open('resume.txt','r') as f:
resume = f.read()
import re
resume = re.sub('\s+',' ',resume)
resume = pd.Series(resume)
tokenizer_resume = tokenizer.texts_to_matrix(resume, mode='tfidf')
pred = model.predict(tokenizer_resume)
print('The suggested work is', (encoder.inverse_transform(pred))[0])
```
next step: batch inputs, thousand input resumes (pdf format) to text, then make prediction
3 functions needs:
1. pdf converter: pdf to txt [[resume1],[resume2],....,[resumen]]
2. function for multiple predictions?
3. fuction to tokenizer all resumes
| github_jupyter |
Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.
- Author: Sebastian Raschka
- GitHub Repository: https://github.com/rasbt/deeplearning-models
```
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p tensorflow,numpy
```
# Using Queue Runners to Feed Images Directly from Disk
TensorFlow provides users with multiple options for providing data to the model. One of the probably most common methods is to define placeholders in the TensorFlow graph and feed the data from the current Python session into the TensorFlow `Session` using the `feed_dict` parameter. Using this approach, a large dataset that does not fit into memory is most conveniently and efficiently stored using NumPy archives as explained in [Chunking an Image Dataset for Minibatch Training using NumPy NPZ Archives](image-data-chunking-npz.ipynb) or HDF5 data base files ([Storing an Image Dataset for Minibatch Training using HDF5](image-data-chunking-hdf5.ipynb)).
Another approach, which is often preferred when it comes to computational efficiency, is to do the "data loading" directly in the graph using input queues from so-called TFRecords files, which is illustrated in the [Using Input Pipelines to Read Data from TFRecords Files](tfrecords.ipynb) notebook.
This notebook will introduce an alternative approach which is similar to the TFRecords approach as we will be using input queues to load the data directly on the graph. However, here we are going to read the images directly from JPEG files, which is a useful approach if disk space is a concern and we don't want to create a large TFRecords file from our "large" image database.
Beyond the examples in this notebook, you are encouraged to read more in TensorFlow's "[Reading Data](https://www.tensorflow.org/programmers_guide/reading_data)" guide.
## 0. The Dataset
Let's pretend we have a directory of images containing two subdirectories with images for training, validation, and testing. The following function will create such a dataset of images in JPEG format locally for demonstration purposes.
```
# Note that executing the following code
# cell will download the MNIST dataset
# and save all the 60,000 images as separate JPEG
# files. This might take a few minutes depending
# on your machine.
import numpy as np
# load utilities from ../helper.py
import sys
sys.path.insert(0, '..')
from helper import mnist_export_to_jpg
np.random.seed(123)
mnist_export_to_jpg(path='./')
```
The `mnist_export_to_jpg` function called above creates 3 directories, mnist_train, mnist_test, and mnist_validation. Note that the names of the subdirectories correspond directly to the class label of the images that are stored under it:
```
import os
for i in ('train', 'valid', 'test'):
dirs = [d for d in os.listdir('mnist_%s' % i) if not d.startswith('.')]
print('mnist_%s subdirectories' % i, dirs)
```
To make sure that the images look okay, the snippet below plots an example image from the subdirectory `mnist_train/9/`:
```
%matplotlib inline
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import os
some_img = os.path.join('./mnist_train/9/', os.listdir('./mnist_train/9/')[0])
img = mpimg.imread(some_img)
print(img.shape)
plt.imshow(img, cmap='binary');
```
Note: The JPEG format introduces a few artifacts that we can see in the image above. In this case, we use JPEG instead of PNG. Here, JPEG is used for demonstration purposes since that's still format many image datasets are stored in.
# 1. Reading
This section provides an example of how to use the [`tf.WholeFileReader`](https://www.tensorflow.org/api_docs/python/tf/WholeFileReader) and a filename queue to read in the images from the `mnist_train` directory. Also, we will be extracting the class labels directly from the file paths and convert the images to a one-hot encoded format that we will use in the later sections to train a multilayer neural network.
```
import tensorflow as tf
g = tf.Graph()
with g.as_default():
filename_queue = tf.train.string_input_producer(
tf.train.match_filenames_once('mnist_train/*/*.jpg'),
seed=123,
shuffle=True)
image_reader = tf.WholeFileReader()
file_name, image_raw = image_reader.read(filename_queue)
file_name = tf.identity(file_name, name='file_name')
image = tf.image.decode_jpeg(image_raw, name='image')
image = tf.cast(image, tf.float32)
label = tf.string_split([file_name], '/').values[1]
label = tf.string_to_number(label, tf.int32, name='label')
onehot_label = tf.one_hot(indices=label,
depth=10,
name='onehot_label')
with tf.Session(graph=g) as sess:
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,
coord=coord)
image_tensor, file_name, class_label, ohe_label =\
sess.run(['image:0',
'file_name:0',
'label:0',
'onehot_label:0'])
print('Image shape:', image_tensor.shape)
print('File name:', file_name)
print('Class label:', class_label)
print('One-hot class label:', ohe_label)
coord.request_stop()
coord.join(threads)
```
- The `tf.train.string_input_producer` produces a filename queue that we iterate over in the session. Note that we need to call `sess.run(tf.local_variables_initializer())` for our filename queue. y."
- The `tf.train.start_queue_runners` function uses a queue runner that uses a separate thread to load the filenames from the `queue` that we defined in the graph without blocking the reader.
Note that it is important to shuffle the dataset so that we can later make use of TensorFlow's [`tf.train.shuffle_batch`](https://www.tensorflow.org/api_docs/python/tf/train/shuffle_batch) function and don't need to load the whole dataset into memory to shuffle epochs.
## 2. Reading in batches
While the previous section illustrated how we can use input pipelines to read images one by one, we rarely (want to) train neural networks with one datapoint at a time but use minibatches instead. TensorFlow also has some really convenient utility functions to do the batching conveniently. In the following code example, we will use the [`tf.train.shuffle_batch`](https://www.tensorflow.org/api_docs/python/tf/train/shuffle_batch) function to load the images and labels in batches of size 64.
Also, let us put the code for processing the images and labels into a function, `read_images_from_disk`, that we can reuse later.
```
import tensorflow as tf
def read_images_from_disk(filename_queue, image_dimensions, normalize=True):
image_reader = tf.WholeFileReader()
file_name, image_raw = image_reader.read(filename_queue)
file_name = tf.identity(file_name, name='file_name')
image = tf.image.decode_jpeg(image_raw, name='image')
image.set_shape(image_dimensions)
image = tf.cast(image, tf.float32)
if normalize:
# normalize to [0, 1] range
image = image / 255.
label = tf.string_split([file_name], '/').values[1]
label = tf.string_to_number(label, tf.int32)
onehot_label = tf.one_hot(indices=label,
depth=10,
name='onehot_label')
return image, onehot_label
g = tf.Graph()
with g.as_default():
filename_queue = tf.train.string_input_producer(
tf.train.match_filenames_once('mnist_train/*/*.jpg'),
seed=123)
image, label = read_images_from_disk(filename_queue,
image_dimensions=[28, 28, 1])
image_batch, label_batch = tf.train.shuffle_batch([image, label],
batch_size=64,
capacity=2000,
min_after_dequeue=1000,
num_threads=1,
seed=123)
with tf.Session(graph=g) as sess:
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,
coord=coord)
multipe_images, multiple_labels = sess.run([image_batch, label_batch])
print('Image batch shape:', multipe_images.shape)
print('Label batch shape:', label_batch.shape)
coord.request_stop()
coord.join(threads)
```
The other relevant arguments we provided to `tf.train.shuffle_batch` are described below:
- `capacity`: An integer that defines the maximum number of elements in the queue.
- `min_after_dequeue`: The minimum number elements in the queue after a dequeue, which is used to ensure that a minimum number of data points have been loaded for shuffling.
- `num_threads`: The number of threads for enqueuing.
## 3. Use queue runners to train a neural network
In this section, we will take the concepts that were introduced in the previous sections and train a multilayer perceptron using the concepts introduced in the previous sections: the `read_images_from_disk` function, a filename queue, and the `tf.train.shuffle_batch` function.
```
##########################
### SETTINGS
##########################
# Hyperparameters
learning_rate = 0.1
batch_size = 128
n_epochs = 15
n_iter = n_epochs * (45000 // batch_size)
# Architecture
n_hidden_1 = 128
n_hidden_2 = 256
height, width = 28, 28
n_classes = 10
##########################
### GRAPH DEFINITION
##########################
g = tf.Graph()
with g.as_default():
tf.set_random_seed(123)
# Input data
filename_queue = tf.train.string_input_producer(
tf.train.match_filenames_once('mnist_train/*/*.jpg'),
seed=123)
image, label = read_images_from_disk(filename_queue,
image_dimensions=[28, 28, 1])
image = tf.reshape(image, (width*height,))
image_batch, label_batch = tf.train.shuffle_batch([image, label],
batch_size=batch_size,
capacity=2000,
min_after_dequeue=1000,
num_threads=1,
seed=123)
tf_images = tf.placeholder_with_default(image_batch,
shape=[None, 784],
name='images')
tf_labels = tf.placeholder_with_default(label_batch,
shape=[None, 10],
name='labels')
# Model parameters
weights = {
'h1': tf.Variable(tf.truncated_normal([height*width, n_hidden_1], stddev=0.1)),
'h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2], stddev=0.1)),
'out': tf.Variable(tf.truncated_normal([n_hidden_2, n_classes], stddev=0.1))
}
biases = {
'b1': tf.Variable(tf.zeros([n_hidden_1])),
'b2': tf.Variable(tf.zeros([n_hidden_2])),
'out': tf.Variable(tf.zeros([n_classes]))
}
# Multilayer perceptron
layer_1 = tf.add(tf.matmul(tf_images, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
# Loss and optimizer
loss = tf.nn.softmax_cross_entropy_with_logits(logits=out_layer, labels=tf_labels)
cost = tf.reduce_mean(loss, name='cost')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train = optimizer.minimize(cost, name='train')
# Prediction
prediction = tf.argmax(out_layer, 1, name='prediction')
correct_prediction = tf.equal(tf.argmax(label_batch, 1), tf.argmax(out_layer, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
with tf.Session(graph=g) as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
saver0 = tf.train.Saver()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
avg_cost = 0.
iter_per_epoch = n_iter // n_epochs
epoch = 0
for i in range(n_iter):
_, cost = sess.run(['train', 'cost:0'])
avg_cost += cost
if not i % iter_per_epoch:
epoch += 1
avg_cost /= iter_per_epoch
print("Epoch: %03d | AvgCost: %.3f" % (epoch, avg_cost))
avg_cost = 0.
coord.request_stop()
coord.join(threads)
saver0.save(sess, save_path='./mlp')
```
After looking at the graph above, you probably wondered why we used [`tf.placeholder_with_default`](https://www.tensorflow.org/api_docs/python/tf/placeholder_with_default) to define the two placeholders:
```python
tf_images = tf.placeholder_with_default(image_batch,
shape=[None, 784],
name='images')
tf_labels = tf.placeholder_with_default(label_batch,
shape=[None, 10],
name='labels')
```
In the training session above, these placeholders are being ignored if we don't feed them via a session's `feed_dict`, or in other words "[A `tf.placeholder_with_default` is a] placeholder op that passes through input when its output is not fed" (https://www.tensorflow.org/api_docs/python/tf/placeholder_with_default).
However, these placeholders are useful if we want to feed new data to the graph and make predictions after training as in a real-world application, which we will see in the next section.
## 4. Feeding new datapoints through placeholders
To demonstrate how we can feed new data points to the network that are not part of the training queue, let's use the test dataset and load the images into Python and pass it to the graph using a `feed_dict`:
```
import matplotlib.image as mpimg
import numpy as np
import glob
img_paths = np.array([p for p in glob.iglob('mnist_test/*/*.jpg')])
labels = np.array([int(path.split('/')[1]) for path in img_paths])
with tf.Session() as sess:
saver1 = tf.train.import_meta_graph('./mlp.meta')
saver1.restore(sess, save_path='./mlp')
num_correct = 0
cnt = 0
for path, lab in zip(img_paths, labels):
cnt += 1
image = mpimg.imread(path)
image = image.reshape(1, -1)
pred = sess.run('prediction:0',
feed_dict={'images:0': image})
num_correct += int(lab == pred[0])
acc = num_correct / cnt * 100
print('Test accuracy: %.1f%%' % acc)
```
| github_jupyter |
# SQL TO KQL Conversion (Experimental)
The `sql_to_kql` module is a simple converter to KQL based on [moz_sql_parser](https://github.com/DrDonk/moz-sql-parser).
It is an experimental feature built to help us convert a few queries but we
thought that it was useful enough to include in MSTICPy.
You must have msticpy installed along with the moz_sql_parser package to run this notebook:
```
%pip install --upgrade msticpy[sql2kql]
```
It supports a subset of ANSI SQL-92 which includes the following:
- SELECT (including column renaming and functions)
- FROM (including from subquery)
- WHERE (common string and int operations, LIKE, some common functions)
- LIMIT
- UNION, UNION ALL
- JOIN - only tested for relatively simple join expressions
- GROUP BY
- SQL Comments (ignored)
It does not support HAVING, multiple SQL statements or anything complex like Common Table Expressions.
It does support a few additional Spark SQL extensions like RLIKE.
## Caveat Emptor!
This module is included in MSTICPy in the hope that it might be useful to others.
We do not intend to expand its capabilities.
It is also not guaranteed to produce perfectly-executing KQL - there will likely
be things that you have to fix up in the output query.
You will, for example, nearly always need change
the names of the fields used since the source data tables are unlikely
to exactly match the schema of your Kusto/Azure Sentinel target.
The module does include an elementary table name mapping function that we
demonstrate below.
```
from pathlib import Path
import os
import sys
import warnings
from IPython.display import display, HTML, Markdown
from msticpy.nbtools import nbinit
nbinit.init_notebook(namespace=globals())
from msticpy.data.sql_to_kql import sql_to_kql
```
## Simple SQL Query
```
sql = """
SELECT DISTINCT Message, Otherfield
FROM apt29Host
WHERE Channel = "Microsoft-Windows-Sysmon/Operational"
AND EventID BETWEEN 1 AND 10
AND LOWER(ParentImage) LIKE '%explorer.exe'
AND EventID IN ('4', '5', '6')
AND LOWER(Image) LIKE "3aka3%"
LIMIT 10
"""
kql = sql_to_kql(sql)
print(kql)
```
## SQL Joins
```
sql="""
SELECT DISTINCT Message, Otherfield, COUNT(DISTINCT EventID)
FROM (SELECT EventID, ParentImage, Image, Message, Otherfield FROM apt29Host) as A
--FROM A
INNER JOIN (Select Message, evt_id FROM MyTable ) on MyTable.Message == A.Message and MyTable.evt_id == A.EventID
WHERE Channel = "Microsoft-Windows-Sysmon/Operational"
AND EventID = 1
AND LOWER(ParentImage) LIKE "%explorer.exe"
AND LOWER(Image) RLIKE ".*3aka3%"
GROUP BY EventID
ORDER BY Message DESC, Otherfield
LIMIT 10
"""
kql = sql_to_kql(sql)
print(kql)
```
## Table Renaming
```
sql="""
SELECT DISTINCT Message, Otherfield, COUNT(DISTINCT EventID)
FROM (SELECT EventID, ParentImage, Image, Message, Otherfield FROM apt29Host) as A
INNER JOIN (Select Message, evt_id FROM MyTable ) on MyTable.Message == A.Message and MyTable.evt_id == A.EventID
WHERE Channel = "Microsoft-Windows-Sysmon/Operational"
AND EventID = 1
AND LOWER(ParentImage) LIKE "%explorer.exe"
AND LOWER(Image) RLIKE ".*3aka3%"
GROUP BY EventID
ORDER BY Message DESC, Otherfield
LIMIT 10
"""
table_map = {"apt29Host": "SecurityEvent", "MyTable": "SigninLogs"}
kql = sql_to_kql(sql, table_map)
print(kql)
```
## Join with Aliases
```
sql="""
SELECT Message
FROM apt29Host a
INNER JOIN (
SELECT ProcessGuid
FROM apt29Host
WHERE Channel = "Microsoft-Windows-Sysmon/Operational"
AND EventID = 1
AND LOWER(ParentImage) RLIKE '.*partial_string.*'
AND LOWER(Image) LIKE '%cmd.exe'
) b
ON a.ParentProcessGuid = b.ProcessGuid
WHERE Channel = "Microsoft-Windows-Sysmon/Operational"
AND EventID = 1
AND LOWER(Image) LIKE '%powershell.exe'
"""
kql = sql_to_kql(sql, table_map)
print(kql)
```
## Unions and Group By
```
sql="""
SELECT DISTINCT Message, COUNT(Otherfield)
FROM (SELECT *
FROM (SELECT EventID, ParentImage, Image, Message, Otherfield FROM apt29Host)
UNION
SELECT DISTINCT Message, Otherfield, EventID
FROM (SELECT EventID, ParentImage, Image, Message, Otherfield FROM apt29Host) as A
INNER JOIN MyTable on MyTable.mssg = A.Message
WHERE Channel = "Microsoft-Windows-Sysmon/Operational"
AND EventID = 1
AND LOWER(ParentImage) LIKE "%explorer.exe"
AND LOWER(Image) RLIKE ".*3aka3%"
LIMIT 10
)
GROUP BY Message
ORDER BY Message DESC, Otherfield
"""
kql = sql_to_kql(sql, table_map)
print(kql)
```
## Aliased and Calculated Select Columns
```
sql="""
SELECT DISTINCT Message as mssg, COUNT(Otherfield)
FROM (SELECT EventID as ID, ParentImage, Image, Message,
ParentImage + Message as ParentMessage,
LOWER(Otherfield) FROM apt29Host
)
WHERE Channel = "Microsoft-Windows-Sysmon/Operational"
AND EventID = 1
AND LOWER(ParentImage) LIKE "%explorer.exe"
"""
kql = sql_to_kql(sql, table_map)
print(kql)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/oferbaharav/tally-ai-ds/blob/eda/Ofer_Spacy_NLP.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import boto3
import dask.dataframe as dd
#from sagemaker import get_execution_role
import pandas as pd
!pip install fastparquet
from fastparquet import ParquetFile
#role = get_execution_role()
bucket='tally-ai-dspt3'
folder = 'yelp-kaggle-raw-data'
pd.set_option('display.max_columns', None)
print(f"S3 Bucket is {bucket}, and Folder is {folder}")
#Loading data
data = 'final_combined.parquet.gzip'
data_location = 'https://s3.amazonaws.com/{}/{}/{}'.format(bucket, folder, data)
df = dd.read_parquet(data_location)
df.head()
from flask import Flask, render_template, request, jsonify
import json
import warnings
import pandas as pd
import spacy
!pip install scattertext
import scattertext as st
from lxml import html
from requests import Session
from concurrent.futures import ThreadPoolExecutor as Executor
import requests
# from flask_cors import CORS
# from decouple import config
import re
nlp = spacy.load("en_core_web_sm")#if you run into problems here, 'Restart Runtime' and run all, it might fix things.
def customtokensize(text):
return re.findall("[\w']+", str(text))
df['tokenized_text'] = df['text'].apply(customtokensize)
df.head(2)
stopwords = ['ve',"n't",'check-in','=','= =','u','want', 'u want', 'cuz','him',"i've",'on', 'her','told','ins', '1 check','I', 'i"m', 'i', ' ', 'it', "it's", 'it.','they', 'the', 'this','its', 'l','-','they','this',"don't",'the ', ' the', 'it', 'i"ve', 'i"m', '!', '1','2','3','4', '5','6','7','8','9','0','/','.',',']
def filter_stopwords(text):
nonstopwords = []
for i in text:
if i not in stopwords:
nonstopwords.append(i)
return nonstopwords
df['tokenized_text'] = df['tokenized_text'].apply(filter_stopwords)
df['parts_of_speech_reference'] = df['tokenized_text'].apply(filter_stopwords)
df['parts_of_speech_reference'] = df['parts_of_speech_reference'].str.join(' ')
df.head(2)
def find_part_of_speech(x):
"""Use spacy's entity recognition to recogize if word is noun, verb, adjective, etc."""
part_of_speech = []
doc = nlp(str(x))
for token in doc:
part_of_speech.append(token.pos_)
return part_of_speech
df['parts_of_speech'] = df['parts_of_speech_reference'].apply(find_part_of_speech)
df.head(2)
#Useless?
def extract_adjective_indexes(text):
"""Get the indexes of Adjectives and delete the occurrence
of adjectives in order to persistently find new adjective
occurrences. In the future, add words occurring before and after"""
adjective_indexes = []
for i in text:
if i == 'ADJ':
adj_index = text.index('ADJ')
adjective_indexes.append(adj_index)
text.remove(i)
return adjective_indexes
df['adjective_positions'] = df['parts_of_speech'].apply(extract_adjective_indexes)
df.head(2)
def find_adj(x):
"""Get Just the Adjectives"""
adj_list = []
doc = nlp(str(x))
for token in doc:
if token.pos_ == 'ADJ':
adj_list.append(token)
return adj_list
df['adj_list'] = df['parts_of_speech_reference'].apply(find_adj)
df.head(2)
def find_phrases(x):
"""Create a list where adjectives come immediately before nouns for each review"""
adj_list = []
doc = nlp(str(x))
try:
for token in range(len(doc)):
sub_list = []
if (doc[token].pos_ == 'ADJ'and doc[token+1].pos_ =='NOUN') or (doc[token].pos_ == 'VERB'and doc[token+1].pos_ =='NOUN'):
sub_list.append(doc[token])
sub_list.append(doc[token+1])
elif (doc[token].pos_ == 'ADJ'and doc[token+1].pos_ == 'ADJ'and doc[token+2].pos_ =='NOUN')or (doc[token].pos_ == 'ADJ'and doc[token+1].pos_ =='VERB'and doc[token+2].pos_ =='NOUN')or (doc[token].pos_ == 'ADJ'and doc[token+1].pos_ == 'NOUN'and doc[token+2].pos_ =='NOUN'):
sub_list.append(doc[token])
sub_list.append(doc[token+1])
sub_list.append(doc[token+2])
if (doc[token].lemma_ == 'wait'):
sub_list.append(doc[token-2])
sub_list.append(doc[token-1])
sub_list.append(doc[token])
sub_list.append(doc[token+1])
sub_list.append(doc[token+2])
sub_list.append(doc[token+3])
if (doc[token].lemma_ == 'service'):
sub_list.append(doc[token-2])
sub_list.append(doc[token-1])
sub_list.append(doc[token])
sub_list.append(doc[token+1])
sub_list.append(doc[token+2])
sub_list.append(doc[token+3])
if len(sub_list) != 0:
adj_list.append(sub_list)
return adj_list
except IndexError as e:
pass
df['adj_noun_phrases'] = df['parts_of_speech_reference'].apply(find_phrases)
df['adj_noun_phrases'].head(10)
doc = nlp("Apple is looking at buying U.K. startup for $1 billion")
for ent in doc.ents:
print(ent.text, ent.start_char, ent.end_char, ent.label_)
def find_money(x):
"""Create a list where adjectives come immediately before nouns for each review"""
money_list = []
doc = nlp(str(x))
for ent in doc.ents:
if ent.label_ == 'MONEY':
money_list.append(ent)
return money_list
df['money_list'] = df['parts_of_speech_reference'].apply(find_money)
df.head(2)
def find_noun_chunks(x):
"""Create a list where adjectives come immediately before nouns for each review"""
noun_list = []
doc = nlp(str(x))
for chunk in doc.noun_chunks:
noun_list.append(chunk)
return noun_list
df['noun_chunks'] = df['parts_of_speech_reference'].apply(find_noun_chunks)
doc = nlp("Autonomous cars shift insurance liability toward manufacturers")
for token in doc:
print(token.text, token.dep_, token.head.text, token.head.pos_,
[child for child in token.children])
for token in doc:
print(token.text, token.dep_, token.head.text, token.head.pos_,
[child for child in token.children])
corpus = st.CorpusFromPandas(df,
category_col='stars_review',
text_col='text',
nlp=nlp).build()
term_freq_df = corpus.get_term_freq_df()
term_freq_df['highratingscore'] = corpus.get_scaled_f_scores('5')
term_freq_df['poorratingscore'] = corpus.get_scaled_f_scores('1')
dh = term_freq_df.sort_values(by= 'highratingscore', ascending = False)
dh = dh[['highratingscore', 'poorratingscore']]
dh = dh.reset_index(drop=False)
dh = dh.rename(columns={'highratingscore':'score'})
dh = dh.drop(columns='poorratingscore')
positive_df = dh.head(10)
negative_df = dh.tail(10)
# word_df = pd.concat([positive_df, negative_df])
# word_df
results = {'positive': [{'term': pos_term, 'score': pos_score} for pos_term, pos_score in zip(positive_df['term'], positive_df['score'])], 'negative': [{'term': neg_term, 'score': neg_score} for neg_term, neg_score in zip(negative_df['term'], negative_df['score'])]}
results
```
| github_jupyter |
```
#hide
#skip
! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab
#all_slow
#export
from fastai.basics import *
from fastai.learner import Callback
#hide
from nbdev.showdoc import *
#default_exp callback.azureml
```
# AzureML Callback
Track fastai experiments with the azure machine learning plattform.
## Prerequisites
Install the azureml SDK:
```python
pip install azureml-core
```
## How to use it?
Import and use `AzureMLCallback` during model fitting.
If you are submitting your training run with azureml SDK [ScriptRunConfig](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targets), the callback will automatically detect the run and log metrics. For example:
```python
from fastai.callback.azureml import AzureMLCallback
learn.fit_one_cycle(epoch, lr, cbs=AzureMLCallback())
```
If you are running an experiment manually and just want to have interactive logging of the run, use azureml's `Experiment.start_logging` to create the interactive `run`, and pass that into `AzureMLCallback`. For example:
```python
from azureml.core import Experiment
experiment = Experiment(workspace=ws, name='experiment_name')
run = experiment.start_logging(outputs=None, snapshot_directory=None)
from fastai.callback.azureml import AzureMLCallback
learn.fit_one_cycle(epoch, lr, cbs=AzureMLCallback(run))
```
If you are running an experiment on your local machine (i.e. not using `ScriptRunConfig` and not passing an azureml `run` into the callback), it will recognize that there is no AzureML run to log to, and print the log attempts instead.
To save the model weights, use the usual fastai methods and save the model to the `outputs` folder, which is a "special" (for Azure) folder that is automatically tracked in AzureML.
As it stands, note that if you pass the callback into your `Learner` directly, e.g.:
```python
learn = Learner(dls, model, cbs=AzureMLCallback())
```
…some `Learner` methods (e.g. `learn.show_results()`) might add unwanted logging into your azureml experiment runs. Adding further checks into the callback should help eliminate this – another PR needed.
```
#export
from azureml.core.run import Run
# export
class AzureMLCallback(Callback):
"Log losses, metrics, model architecture summary to AzureML"
order = Recorder.order+1
def __init__(self, azurerun=None):
if azurerun:
self.azurerun = azurerun
else:
self.azurerun = Run.get_context()
def before_fit(self):
self.azurerun.log("n_epoch", self.learn.n_epoch)
self.azurerun.log("model_class", str(type(self.learn.model)))
try:
summary_file = Path("outputs") / 'model_summary.txt'
with summary_file.open("w") as f:
f.write(repr(self.learn.model))
except:
print('Did not log model summary. Check if your model is PyTorch model.')
def after_batch(self):
# log loss and opt.hypers
if self.learn.training:
self.azurerun.log('batch__loss', self.learn.loss.item())
self.azurerun.log('batch__train_iter', self.learn.train_iter)
for i, h in enumerate(self.learn.opt.hypers):
for k, v in h.items():
self.azurerun.log(f'batch__opt.hypers.{k}', v)
def after_epoch(self):
# log metrics
for n, v in zip(self.learn.recorder.metric_names, self.learn.recorder.log):
if n not in ['epoch', 'time']:
self.azurerun.log(f'epoch__{n}', v)
if n == 'time':
# split elapsed time string, then convert into 'seconds' to log
m, s = str(v).split(':')
elapsed = int(m)*60 + int(s)
self.azurerun.log(f'epoch__{n}', elapsed)
```
| github_jupyter |
```
# import libraries here; add more as necessary
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
df2019 = pd.read_csv('./2019survey_results_public.csv', header = 0)
df2019.head()
df2019.describe()
def compare_plt(column, n, df):
fig, axs = plt.subplots(n,2, figsize=(15, 10))
fig.subplots_adjust(hspace =2 , wspace=.2)
axs = axs.ravel()
for i in range(n):
plt.subplot(121)
sns.countplot(x = column, data = df).set_title('Few missing')
plt.subplot(122)
sns.countplot(x = column, data = df).set_title('High missing')
sns.countplot(x = df2019['MainBranch'].value_counts(), data = df2019['MainBranch'])
interested_var = ['MainBranch', 'YearsCodePro', 'Age1stCode', 'CareerSat', 'ConvertedComp', 'WorkWeekHrs', 'Age', 'EdLevel']
for var in interested_var:
sns.countplot(x = var, data = df2019)
df2019.hist(figsize = (10, 10))
sns.heatmap(df2019.corr(), annot = True, fmt = '.2f')
```
To do:
- filtering out
```
df2019_rows, df2019_cols = df2019.shape
df2019_col_names = df2019.columns
print(df2019_col_names)
col_to_keep = ['MainBranch', 'Hobbyist', 'Employment', 'Country', 'Student', 'EdLevel', 'UndergradMajor', 'DevType', 'YearsCodePro', 'CareerSat', 'ConvertedComp', 'WorkWeekHrs', 'LanguageWorkedWith', 'LanguageDesireNextYear', 'DatabaseWorkedWith''DatabaseDesireNextYear', 'PlatformWorkedWith',
'PlatformDesireNextYear', 'WebFrameWorkedWith',
'WebFrameDesireNextYear', 'MiscTechWorkedWith',
'MiscTechDesireNextYear', 'DevEnviron', 'OpSys', 'Containers', 'Age', 'Gender', 'Ethnicity']
#schema2019 = pd.read_csv('./2019survey_results_schema.csv', header = 0)
#schema2019.head()
print(df2019.shape)
print(df2019.info())
for col in df2019.columns:
print(df2019[col].name, df2019[col].unique())
print('-----------------------------------------------------')
def assess_missing_col(df):
df_rows, df_cols = df.shape
missing_num = pd.Series(df.isnull().sum(), name = 'Number of Missing')
#all columns
missing_per = pd.Series(missing_num/(df_rows)*100, name = '% NaN Missing')
#only columns with missing data
missing_data = pd.Series(missing_num[missing_num > 0]/df_rows*100, name = '% NaN Missing')
missing_data.sort_values(inplace = True)
print(missing_data)
plt.hist(missing_data, bins = 50)
plt.xlabel('Nan % in a column (%)')
plt.ylabel('Counts')
#plt.title('Histogram of missing value counts for each column')
plt.grid(True)
plt.minorticks_on()
plt.grid(b=True, which='minor', alpha=0.2)
plt.show()
return missing_data, plt.show()
missing_data, plot = assess_missing_col(df2019)
#Investigate missing data using different thresholds of %NaN missing.
def investigate_nan_threshold(df, interval, start):
'''
This function finds how many columns have more than a certain percentage
of data missing.
INPUTS:
start - the initial threshold percentage of data missing to be analyzed
interval - the amount of increase in the analysis threshold
if the previous threshold has at least 1 column remaining
OUTPUTS:
Prints the names of the columns that have more than the threshold % of
data missing as well as the current threshold.
'''
n = start
df_rows, df_cols = df.shape
missing_list = [1]
while len(missing_list) > 0:
missing_list = [col for col in df.columns if (df[col].isnull().sum()/df_rows)*100 > n]
if len(missing_list) > 0:
print('There are {} columns with more than {}% of data missing.'.format(len(missing_list), n))
print(missing_list)
print('--------------------------------------')
n = n+interval
else:
break
investigate_nan_threshold(df2019, 10, 5)
#Visualize all columns.
missing_data.plot(kind='barh', figsize = (7,15))
plt.xlabel('Nan % in a column (%)')
plt.ylabel('Feature')
#plt.title('Bar graph of missing value counts')
plt.grid(True)
plt.show()
#function for dropping all columns above a certain % threshold and
#returns it as a new df called df_dropped
def drop_missing_cols(df, threshold):
most_missing_cols = list(df.columns[df.isnull().mean()*100 > threshold])
df_dropped = df.copy()
for col in most_missing_cols:
df_dropped.drop(col, axis = 1, inplace = True)
return df_dropped
df2019_dropped = drop_missing_cols(df2019, 40)
df2019_dropped.head()
#currently useless
df2019_dropped['JobSat'].isnull().mean()
df2019['ConvertedComp'].hist(bins = 50, figsize = (10, 6))
df2019_convertedcomp = df2019.groupby(['ConvertedComp']).mean()
#ConvertedComp 55823 non-null float64
#WorkWeekHrs
print(df2019_convertedcomp)
#WHY DOESN'T THIS WORK
df2019.groupby(['Hobbyist']).mean()['JobSat']
df2019.groupby(['Hobbyist']).mean()['YearsCode'].sort_values()
#only works for simple, mutually exclusive categories
def evaluate_col(df, col, plot_type):
col_num = df[df[col].isnull() == 0].shape[0]
col_vals = pd.Series(df[col].value_counts())
print(col_vals)
(col_vals/col_num*100).plot(kind = plot_type)
evaluate_col(df2019, 'UndergradMajor', 'bar')
def eval_complex_col(df, col, plot_type):
col_num = df[df[col].isnull() == 0].shape[0]
col_df = df[col].value_counts().reset_index()
col_df.rename(columns={'index': col, col:'count'}, inplace = True)
col_series = pd.Series(col_df[col].unique()).dropna()
clean_list = col_series.str.split(pat = ';').tolist()
flat_list = []
for sublist in clean_list:
for item in sublist:
flat_list.append(item)
clean_series = pd.DataFrame(flat_list)
col_vals = clean_series[0].unique()
cat_count = clean_series[0].value_counts()
print('Unique Categories: ', col_vals)
print(cat_count)
(cat_count/col_num*100).plot(kind = plot_type, figsize = (7,10))
plt.xlabel('Proportion (%)')
plt.ylabel(col)
plt.grid(True)
plt.show()
'''
'''
eval_complex_col(df2019, 'LanguageWorkedWith', 'bar')
eval_complex_col(df2019, 'PlatformWorkedWith', 'bar')
eval_complex_col(df2019, 'LastInt', 'bar')
eval_complex_col(df2019, '', 'bar')
evaluate_col(df2019, 'SocialMedia', 'bar')
evaluate_col(df2019, 'EdLevel', 'bar')
evaluate_col(df2019, 'Gender', 'bar')
evaluate_col(df2019, 'CareerSat', 'bar')
evaluate_col(df2019, 'JobSat', 'bar')
evaluate_col(df2019, 'Ethnicity', 'pie')
evaluate_col(df2019, 'WorkWeekHrs', 'hist')
eval_complex_col(df2019, 'WebFrameWorkedWith', 'bar')
eval_complex_col(df2019, 'MiscTechWorkedWith', 'bar')
evaluate_col(df2019, 'Student', 'pie')
eval_complex_col(df2019, 'DevType', 'bar')
#divide the data into two subsets based on the number of missing values in each row
row_threshold = 45
df2019_lowmiss = df2019_dropped[df2019_dropped.isnull().sum(axis=1) < row_threshold].reset_index(drop=True)
df2019_highmiss = df2019_dropped[df2019_dropped.isnull().sum(axis=1) >= row_threshold].reset_index(drop=True)
print(df2019_lowmiss.shape)
print(df2019_highmiss.shape)
rowmissing_per = int(df2019_highmiss.shape[0]/df2019_dropped.shape[0]*100)
print('{}% of the rows have 8 or more missing values'.format(rowmissing_per))
def compare_plt(column, n, df):
fig, axs = plt.subplots(n,2, figsize=(10, 5))
fig.subplots_adjust(hspace =2 , wspace=.2)
axs = axs.ravel()
for i in range(n):
plt.subplot(121)
sns.countplot(x = column, data = df).set_title('Few missing')
plt.subplot(122)
sns.countplot(x = column, data = df).set_title('High missing')
```
# Questions
is educational level related to salary?
What are the biggest factors relating to salary?
Languages related to salary?
Type of developer related to salary?
What languages, platforms, etc are people using?
What languages are people likely to learn together?
# Variables of interest:
ConvertedComp - annual compensation
WorkWeekHrs - hours/week worked
LanguageWorkedWith
DatabaseWorkedWith
PlatformWorkedWith
WebFrameWorkedWith
MiscTechWorkedWith
DevEnviron
OpSys
LastInt - "In your most recent successful job interview (resulting in a job offer), you were asked to... (check all that apply)"
JobSat
CareerSat
YearsCodePro - How many years have you coded professionally (as a part of your work)?
DevType
OrgSize
EduOther
UndergradMajor
EdLevel
Country
Age
Gender
Ethnicity
```
for x in schema2019['QuestionText']:
print(x)
```
Randomized respondent ID number (not in order of survey response time)
Which of the following options best describes you today? Here, by "developer" we mean "someone who writes code."
Do you code as a hobby?
How often do you contribute to open source?
How do you feel about the quality of open source software (OSS)?
Which of the following best describes your current employment status?
In which country do you currently reside?
Are you currently enrolled in a formal, degree-granting college or university program?
Which of the following best describes the highest level of formal education that you’ve completed?
What was your main or most important field of study?
Which of the following types of non-degree education have you used or participated in? Please select all that apply.
Approximately how many people are employed by the company or organization you work for?
Which of the following describe you? Please select all that apply.
Including any education, how many years have you been coding?
At what age did you write your first line of code or program? (E.g., webpage, Hello World, Scratch project)
How many years have you coded professionally (as a part of your work)?
Overall, how satisfied are you with your career thus far?
How satisfied are you with your current job? (If you work multiple jobs, answer for the one you spend the most hours on.)
How confident are you that your manager knows what they’re doing?
Do you believe that you need to be a manager to make more money?
Do you want to become a manager yourself in the future?
Which of the following best describes your current job-seeking status?
When was the last time that you took a job with a new employer?
In your most recent successful job interview (resulting in a job offer), you were asked to... (check all that apply)
Have you ever been asked to solve FizzBuzz in an interview?
Imagine that you are deciding between two job offers with the same compensation, benefits, and location. Of the following factors, which 3 are MOST important to you?
Think back to the last time you updated your resumé, CV, or an online profile on a job site. What is the PRIMARY reason that you did so?
Which currency do you use day-to-day? If your answer is complicated, please pick the one you're most comfortable estimating in.
Which currency do you use day-to-day? If your answer is complicated, please pick the one you're most comfortable estimating in.
What is your current total compensation (salary, bonuses, and perks, before taxes and deductions), in `CurrencySymbol`? Please enter a whole number in the box below, without any punctuation. If you are paid hourly, please estimate an equivalent weekly, monthly, or yearly salary. If you prefer not to answer, please leave the box empty.
Is that compensation weekly, monthly, or yearly?
Salary converted to annual USD salaries using the exchange rate on 2019-02-01, assuming 12 working months and 50 working weeks.
On average, how many hours per week do you work?
How structured or planned is your work?
Of these options, what are your greatest challenges to productivity as a developer? Select up to 3:
How often do you work remotely?
Where would you prefer to work?
For the specific work you do, and the years of experience you have, how do you rate your own level of competence?
Do you review code as part of your work?
On average, how many hours per week do you spend on code review?
Does your company regularly employ unit tests in the development of their products?
How does your company make decisions about purchasing new technology (cloud, AI, IoT, databases)?
What level of influence do you, personally, have over new technology purchases at your organization?
Which of the following programming, scripting, and markup languages have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the language and want to continue to do so, please check both boxes in that row.)
Which of the following programming, scripting, and markup languages have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the language and want to continue to do so, please check both boxes in that row.)
Which of the following database environments have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the database and want to continue to do so, please check both boxes in that row.)
Which of the following database environments have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the database and want to continue to do so, please check both boxes in that row.)
Which of the following platforms have you done extensive development work for over the past year? (If you both developed for the platform and want to continue to do so, please check both boxes in that row.)
Which of the following platforms have you done extensive development work for over the past year? (If you both developed for the platform and want to continue to do so, please check both boxes in that row.)
Which of the following web frameworks have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the framework and want to continue to do so, please check both boxes in that row.)
Which of the following web frameworks have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the framework and want to continue to do so, please check both boxes in that row.)
Which of the following other frameworks, libraries, and tools have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the technology and want to continue to do so, please check both boxes in that row.)
Which of the following other frameworks, libraries, and tools have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the technology and want to continue to do so, please check both boxes in that row.)
Which development environment(s) do you use regularly? Please check all that apply.
What is the primary operating system in which you work?
How do you use containers (Docker, Open Container Initiative (OCI), etc.)?
How is your organization thinking about or implementing blockchain technology?
Blockchain / cryptocurrency technology is primarily:
Do you think people born today will have a better life than their parents?
Are you the "IT support person" for your family?
Have you tried turning it off and on again?
What social media site do you use the most?
Do you prefer online chat or IRL conversations?
What do you call it?
To the best of your memory, when did you first visit Stack Overflow?
How frequently would you say you visit Stack Overflow?
I visit Stack Overflow to... (check all that apply)
On average, how many times a week do you find (and use) an answer on Stack Overflow?
Think back to the last time you solved a coding problem using Stack Overflow, as well as the last time you solved a problem using a different resource. Which was faster?
About how much time did you save? If you're not sure, please use your best estimate.
Do you have a Stack Overflow account?
How frequently would you say you participate in Q&A on Stack Overflow? By participate we mean ask, answer, vote for, or comment on questions.
Have you ever used or visited Stack Overflow Jobs?
Have you ever used Stack Overflow for Enterprise or Stack Overflow for Teams?
Do you consider yourself a member of the Stack Overflow community?
Compared to last year, how welcome do you feel on Stack Overflow?
Would you like to see any of the following on Stack Overflow? Check all that apply.
What is your age (in years)? If you prefer not to answer, you may leave this question blank.
Which of the following do you currently identify as? Please select all that apply. If you prefer not to answer, you may leave this question blank.
Do you identify as transgender?
Which of the following do you currently identify as? Please select all that apply. If you prefer not to answer, you may leave this question blank.
Which of the following do you identify as? Please check all that apply. If you prefer not to answer, you may leave this question blank.
Do you have any dependents (e.g., children, elders, or others) that you care for?
| github_jupyter |
```
#Importing necessary dependencies
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
pd.set_option('display.max_columns',None)
df=pd.read_excel('Data_Train.xlsx')
df.head()
df.shape
```
## Exploratory data analysis
First we will try to find the missing values and we will try to find relationship between different features and we will also visualize the data and see the relationship between them.
```
df.isnull().sum()
df.info()
#Describe the data
df.describe()
```
Since there is only one numerical feature we will try to analyze the categorical data and see the relationship with the price
```
feature_categorical=[feature for feature in df.columns if df[feature].dtypes=='O']
feature_categorical
df.dropna(inplace=True)
```
## Lets change the date time format
```
#train_data["Journey_day"] = pd.to_datetime(train_data.Date_of_Journey, format="%d/%m/%Y").dt.day
df['Day_of_Journey']=pd.to_datetime(df['Date_of_Journey']).dt.day
df['Journey_Month']=pd.to_datetime(df['Date_of_Journey']).dt.month
# Now we will extract the hour and minutes in Arrival time
df["Arrival_hour"]=pd.to_datetime(df['Arrival_Time']).dt.hour
df['Arrival_minute']=pd.to_datetime(df['Arrival_Time']).dt.minute
df.head()
df.drop(['Date_of_Journey','Arrival_Time'],axis=1,inplace=True)
df.head()
df['Dep_hour']=pd.to_datetime(df['Dep_Time']).dt.hour
df['Dep_min']=pd.to_datetime(df['Dep_Time']).dt.minute
df.drop(['Dep_Time'],inplace=True,axis=1)
df.head()
duration=list(df['Duration'])
duration[0].split(" ")
for num in range(len(duration)):
if len(duration[num].split(" "))!=2:
if 'h' in duration[num]:
duration[num]=duration[num].strip()+'0m'
else:
duration[num]='0h'+duration[num]
duration_hour=[]
duration_min=[]
for num in range(len(duration)):
duration_hour.append(int(duration[num].split("h")[0]))
duration_min.append(int(duration[num].split("h")[1].split('m')[0].strip()))
df['Duration_hour']=duration_hour
df['Duration_min']=duration_min
df.drop('Duration',axis=1,inplace=True)
df.head()
```
# Handling the categorical data
```
airway=df['Airline']
df['Airline'].value_counts()
plt.figure(figsize=(18,8))
sns.boxplot(x='Airline',y='Price',data=df.sort_values('Price',ascending=False))
```
# Encoding categorical data into numerical
Since the airlines are not ordinal we will one hot encode the data using get dummies function in pandas
```
Airline=pd.get_dummies(df['Airline'],drop_first=True)
Airline.head()
df['Source'].value_counts()
# Source vs Price
plt.figure(figsize=(14,8))
sns.boxplot(x='Source',y='Price',data=df.sort_values('Price',ascending=False))
# Now we one hot encode the source feature using same method used above
Source=df['Source']
Source=pd.get_dummies(Source,drop_first=False)
Source.head()
Destination=df['Destination']
Destination=pd.get_dummies(Destination,drop_first=False)
Destination=Destination.rename(columns={"Banglore":"Dest_Banglore",'Cochin':'Dest_Cochin',"Delhi":'Dest_Delhi','Hyderabad':'Dest_Hyderabad',"Kolkata":'Dest_Kolkata','New Delhi':'Dest_NewDelhi'})
df.head()
df["Route"].head()
df['Total_Stops'].value_counts()
# Since the route is related to no of stops we can drop that feature
# Now we can change no of stops using ordinal encoding since it is ordinal data
df['Total_Stops'].replace({'non-stop':0,'1 stop':1,'2 stops':2,'3 stops':3,'4 stops':4},inplace=True)
# Since the Airline, Source and Destination are one hot encoded and
# we can determine the route by seeing the no of stops we can drop those features
df.drop(['Airline','Source','Destination','Route'],inplace=True,axis=1)
df.head()
df['Additional_Info'].value_counts()
```
## Since the Addtional_info has lot of no info we can actually drop this feature
```
df.drop('Additional_Info',axis=1,inplace=True)
df.head()
df_concat=pd.concat([df,Airline,Source,Destination],axis=1)
df_concat.head()
```
# Let's repeat above for the test data
```
df_test=pd.read_excel('Test_set.xlsx')
df_test.head()
# duration=list(df['Duration'])
# duration[0].split(" ")
# for num in range(len(duration)):
# if len(duration[num].split(" "))!=2:
# if 'h' in duration[num]:
# duration[num]=duration[num].strip()+'0m'
# else:
# duration[num]='0h'+duration[num]
# duration_hour=[]
# duration_min=[]
# for num in range(len(duration)):
# duration_hour.append(int(duration[num].split("h")[0]))
# duration_min.append(int(duration[num].split("h")[1].split('m')[0].strip()))
# df_test['Duration_hour']=duration_hour
# df_test['Duration_min']=duration_min
# df_test.drop('Duration',axis=1,inplace=True)
# Airline=pd.get_dummies(df_test['Airline'],drop_first=True)
# # Now we one hot encode the source feature using same method used above
# Source=df_test['Source']
# Source=pd.get_dummies(Source,drop_first=False)
# Destination=df['Destination']
# Destination=pd.get_dummies(Destination,drop_first=False)
# Since the route is related to no of stops we can drop that feature
# Now we can change no of stops using ordinal encoding since it is ordinal data
# df_test['Total_Stops'].replace({'non-stop':0,'1 stop':1,'2 stops':2,'3 stops':3,'4 stops':4},inplace=True)
# df_test.drop(['Airline','Source','Destination','Route'],inplace=True,axis=1)
# df_test.drop('Additional_Info',axis=1,inplace=True)
# df_concat1=pd.concat([df_test,Airline,Source,Destination],axis=1)
df_test['Day_of_Journey']=pd.to_datetime(df_test['Date_of_Journey']).dt.day
df_test['Journey_Month']=pd.to_datetime(df_test['Date_of_Journey']).dt.month
# Now we will extract the hour and minutes in Arrival time
df_test["Arrival_hour"]=pd.to_datetime(df_test['Arrival_Time']).dt.hour
df_test['Arrival_minute']=pd.to_datetime(df_test['Arrival_Time']).dt.minute
df_test.drop(['Date_of_Journey','Arrival_Time'],axis=1,inplace=True)
df_test['Dep_hour']=pd.to_datetime(df_test['Dep_Time']).dt.hour
df_test['Dep_min']=pd.to_datetime(df_test['Dep_Time']).dt.minute
duration=list(df_test['Duration'])
duration[0].split(" ")
for num in range(len(duration)):
if len(duration[num].split(" "))!=2:
if 'h' in duration[num]:
duration[num]=duration[num].strip()+'0m'
else:
duration[num]='0h'+duration[num]
duration_hour=[]
duration_min=[]
for num in range(len(duration)):
duration_hour.append(int(duration[num].split("h")[0]))
duration_min.append(int(duration[num].split("h")[1].split('m')[0].strip()))
df_test['Duration_hour']=duration_hour
df_test['Duration_min']=duration_min
df_test.drop('Duration',axis=1,inplace=True)
Airline=pd.get_dummies(df_test['Airline'],drop_first=True)
# Now we one hot encode the source feature using same method used above
Source=df_test['Source']
Source=pd.get_dummies(Source,drop_first=False)
Destination=df_test['Destination']
Destination=pd.get_dummies(Destination,drop_first=False)
Destination=Destination.rename(columns={"Banglore":"Dest_Banglore",'Cochin':'Dest_Cochin',"Delhi":'Dest_Delhi','Hyderabad':'Dest_Hyderabad',"Kolkata":'Dest_Kolkata','New Delhi':'Dest_NewDelhi'})
# Since the route is related to no of stops we can drop that feature
# Now we can change no of stops using ordinal encoding since it is ordinal data
df_test['Total_Stops'].replace({'non-stop':0,'1 stop':1,'2 stops':2,'3 stops':3,'4 stops':4},inplace=True)
df_test.drop(['Airline','Source','Destination','Route'],inplace=True,axis=1)
df_test.drop(['Additional_Info','Dep_Time'],axis=1,inplace=True)
df_concat1=pd.concat([df_test,Airline,Source,Destination],axis=1)
df_concat1.head()
df_concat.head()
df_concat1.head()
df_concat.shape
df_test['Dep_min']
Xtr=df_concat.drop(['Price','Trujet'],axis=1)
Ytr=df["Price"]
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(Xtr,Ytr,test_size=0.2,random_state=5)
x_test.shape
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
from xgboost import XGBRegressor
model=XGBRegressor()
model.fit(x_train,y_train)
y_pre=model.predict(x_test)
y_pre
y_test
from sklearn.metrics import r2_score
r2_score(y_test,y_pred=y_pre)
model.predict(df_concat1)
plt.figure(figsize=(14,8))
plt.scatter(y_test,y_pre,color='g')
```
| github_jupyter |
```
%run ../../main.py
%matplotlib inline
import pandas as pd
from cba.algorithms import M1Algorithm, M2Algorithm, top_rules, createCARs
from cba.data_structures import TransactionDB
import sklearn.metrics as skmetrics
df = pd.read_csv("c:/code/python/machine_learning/assoc_rules/train/lymph0.csv")
len(df)
#
#
# =========================
# Oveření běhu v závislosti na vložených pravidlech / instancích
# =========================
#
#
#
import time
target_rule_count = 50
benchmark_data = {
"rule_count": [],
"M1_duration": [],
"M2_duration": [],
"M1_accuracy": [],
"M2_accuracy": [],
"M1_output_rules": [],
"M2_output_rules": []
}
number_of_iterations = 10
directory = "c:/code/python/machine_learning/assoc_rules"
for sequence in [*range(10, 20), *range(20, 100, 10), *range(200, 1000, 100), *range(2000, 10000, 1000), *range(20000, 100000, 10000)]:
target_rule_count = sequence
dataset_name_benchmark = "lymph0"
pd_ds = pd.read_csv("c:/code/python/machine_learning/assoc_rules/train/{}.csv".format(dataset_name_benchmark))
pd_ds_test = pd.read_csv("c:/code/python/machine_learning/assoc_rules/test/{}.csv".format(dataset_name_benchmark))
txns = TransactionDB.from_DataFrame(pd_ds, unique_transactions=True)
txns_test = TransactionDB.from_DataFrame(pd_ds_test)
actual = list(map(lambda i: i.value, txns_test.class_labels))
rules = top_rules(txns.string_representation, appearance=txns.appeardict)
cars = createCARs(rules)
if len(cars) > target_rule_count:
cars = cars[:target_rule_count]
m1t1 = time.time()
m1accs = []
m1rules = []
for _ in range(number_of_iterations):
m1 = M1Algorithm(cars, txns)
clf = m1.build()
pred = clf.predict_all(txns_test)
acc = skmetrics.accuracy_score(pred, actual)
m1rules.append(len(clf.rules) + 1)
m1accs.append(acc)
m1t2 = time.time()
m2t1 = time.time()
m2accs = []
m2rules = []
for _ in range(number_of_iterations):
m2 = M2Algorithm(cars, txns)
clf = m2.build()
pred = clf.predict_all(txns_test)
acc = skmetrics.accuracy_score(pred, actual)
m2rules.append(len(clf.rules) + 1)
m2accs.append(acc)
m2t2 = time.time()
m1duration = (m1t2 - m1t1) / number_of_iterations
m2duration = (m2t2 - m2t1) / number_of_iterations
m1acc = sum(m1accs) / len(m1accs)
m2acc = sum(m2accs) / len(m2accs)
m1rules = sum(m1rules) / number_of_iterations
m2rules = sum(m2rules) / number_of_iterations
benchmark_data["rule_count"].append(target_rule_count)
benchmark_data["M1_duration"].append(m1duration)
benchmark_data["M2_duration"].append(m2duration)
benchmark_data["M1_accuracy"].append(m1acc)
benchmark_data["M2_accuracy"].append(m2acc)
benchmark_data["M1_output_rules"].append(m1rules)
benchmark_data["M2_output_rules"].append(m2rules)
print("target rule count:", target_rule_count)
print("M1 duration:", m1duration)
print("M2 duration:", m2duration)
print("M1 acc:", m1acc)
print("M2 acc:", m2acc)
print("M1 clf len:", m1rules)
print("M2 clf len:", m2rules)
print("\n\n")
import matplotlib.pyplot as plt
benchmark_data_pd = pd.DataFrame(benchmark_data)
benchmark_data_pd.plot(x=["rule_count"], y=["M1_duration", "M2_duration"])
plt.savefig("../data/m1_m2_podrobne.PNG")
arc_rule_sensitivity = pd.read_csv("../data/arc-rule-sensitivity.csv")
#times_arc = arc_rule_sensitivity[["input rules", "time_rcba", "time_arc", "time_acba"]]
times_arc = arc_rule_sensitivity
times_arc = times_arc.astype({"input rules": int})
times_arc = times_arc.set_index("input rules")
times_arc.head()
benchmark_data_pd.head()
benchmark_data_pd.columns = ["acc_pyARC_m1", "time_pyARC_m1", "output_rules_pyARC_m1", "acc_pyARC_m2", "time_pyARC_m2", "output_rules_pyARC_m2", "input rules"]
benchmark_data_pd = benchmark_data_pd.set_index("input rules")
times_df = benchmark_data_pd.join(times_arc)
times_df
times_df["input rules"] = times_df.index
labels = ["pyARC - m1", "pyARC - m2", "arc", "rCBA", "arulesCBA"]
import matplotlib.pyplot as plt
ax = times_df.plot(x=["input rules"], y=["time_pyARC_m1", "time_pyARC_m2", "time_arc", "time_rcba", "time_acba"])
ax.legend(labels)
plt.savefig('../data/rule_sensitivity_plot.png')
import matplotlib.pyplot as plt
ax = times_df.plot(subplots=True, sharey=True, sharex=True, figsize=(5, 10), x=["input rules"], y=["acc_pyARC_m1", "acc_pyARC_m2", "acc_arc", "acc_rcba", "acc_acba"])
plt.savefig('../data/rule_sensitivity_plot_accuracy.png')
import matplotlib.pyplot as plt
times_df.plot(subplots=True, sharey=True, sharex=True, figsize=(5, 10), x=["input rules"], y=["output_rules_pyARC_m1", "output_rules_pyARC_m2", "output_rules_arc", "output_rules_rcba", "output_rules_acba"])
plt.savefig('../data/rule_sensitivity_plot_output_rules.png')
```
| github_jupyter |
# Multiple Qubits & Entangled States
Single qubits are interesting, but individually they offer no computational advantage. We will now look at how we represent multiple qubits, and how these qubits can interact with each other. We have seen how we can represent the state of a qubit using a 2D-vector, now we will see how we can represent the state of multiple qubits.
## Contents
1. [Representing Multi-Qubit States](#represent)
1.1 [Exercises](#ex1)
2. [Single Qubit Gates on Multi-Qubit Statevectors](#single-qubit-gates)
2.1 [Exercises](#ex2)
3. [Multi-Qubit Gates](#multi-qubit-gates)
3.1 [The CNOT-gate](#cnot)
3.2 [Entangled States](#entangled)
3.3 [Visualizing Entangled States](#visual)
3.4 [Exercises](#ex3)
## 1. Representing Multi-Qubit States <a id="represent"></a>
We saw that a single bit has two possible states, and a qubit state has two complex amplitudes. Similarly, two bits have four possible states:
`00` `01` `10` `11`
And to describe the state of two qubits requires four complex amplitudes. We store these amplitudes in a 4D-vector like so:
$$ |a\rangle = a_{00}|00\rangle + a_{01}|01\rangle + a_{10}|10\rangle + a_{11}|11\rangle = \begin{bmatrix} a_{00} \\ a_{01} \\ a_{10} \\ a_{11} \end{bmatrix} $$
The rules of measurement still work in the same way:
$$ p(|00\rangle) = |\langle 00 | a \rangle |^2 = |a_{00}|^2$$
And the same implications hold, such as the normalisation condition:
$$ |a_{00}|^2 + |a_{01}|^2 + |a_{10}|^2 + |a_{11}|^2 = 1$$
If we have two separated qubits, we can describe their collective state using the tensor product:
$$ |a\rangle = \begin{bmatrix} a_0 \\ a_1 \end{bmatrix}, \quad |b\rangle = \begin{bmatrix} b_0 \\ b_1 \end{bmatrix} $$
$$
|ba\rangle = |b\rangle \otimes |a\rangle = \begin{bmatrix} b_0 \times \begin{bmatrix} a_0 \\ a_1 \end{bmatrix} \\ b_1 \times \begin{bmatrix} a_0 \\ a_1 \end{bmatrix} \end{bmatrix} = \begin{bmatrix} b_0 a_0 \\ b_0 a_1 \\ b_1 a_0 \\ b_1 a_1 \end{bmatrix}
$$
And following the same rules, we can use the tensor product to describe the collective state of any number of qubits. Here is an example with three qubits:
$$
|cba\rangle = \begin{bmatrix} c_0 b_0 a_0 \\ c_0 b_0 a_1 \\ c_0 b_1 a_0 \\ c_0 b_1 a_1 \\
c_1 b_0 a_0 \\ c_1 b_0 a_1 \\ c_1 b_1 a_0 \\ c_1 b_1 a_1 \\
\end{bmatrix}
$$
If we have $n$ qubits, we will need to keep track of $2^n$ complex amplitudes. As we can see, these vectors grow exponentially with the number of qubits. This is the reason quantum computers with large numbers of qubits are so difficult to simulate. A modern laptop can easily simulate a general quantum state of around 20 qubits, but simulating 100 qubits is too difficult for the largest supercomputers.
Let's look at an example circuit:
```
from qiskit import QuantumCircuit, Aer, assemble
from math import pi
import numpy as np
from qiskit.visualization import plot_histogram, plot_bloch_multivector
qc = QuantumCircuit(3)
# Apply H-gate to each qubit:
for qubit in range(3):
qc.h(qubit)
# See the circuit:
qc.draw()
```
Each qubit is in the state $|+\rangle$, so we should see the vector:
$$
|{+++}\rangle = \frac{1}{\sqrt{8}}\begin{bmatrix} 1 \\ 1 \\ 1 \\ 1 \\
1 \\ 1 \\ 1 \\ 1 \\
\end{bmatrix}
$$
```
# Let's see the result
svsim = Aer.get_backend('statevector_simulator')
qobj = assemble(qc)
final_state = svsim.run(qobj).result().get_statevector()
# In Jupyter Notebooks we can display this nicely using Latex.
# If not using Jupyter Notebooks you may need to remove the
# array_to_latex function and use print(final_state) instead.
from qiskit_textbook.tools import array_to_latex
array_to_latex(final_state, pretext="\\text{Statevector} = ")
```
And we have our expected result.
### 1.2 Quick Exercises: <a id="ex1"></a>
1. Write down the tensor product of the qubits:
a) $|0\rangle|1\rangle$
b) $|0\rangle|+\rangle$
c) $|+\rangle|1\rangle$
d) $|-\rangle|+\rangle$
2. Write the state:
$|\psi\rangle = \tfrac{1}{\sqrt{2}}|00\rangle + \tfrac{i}{\sqrt{2}}|01\rangle $
as two separate qubits.
## 2. Single Qubit Gates on Multi-Qubit Statevectors <a id="single-qubit-gates"></a>
We have seen that an X-gate is represented by the matrix:
$$
X = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}
$$
And that it acts on the state $|0\rangle$ as so:
$$
X|0\rangle = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}\begin{bmatrix} 1 \\ 0 \end{bmatrix} = \begin{bmatrix} 0 \\ 1\end{bmatrix}
$$
but it may not be clear how an X-gate would act on a qubit in a multi-qubit vector. Fortunately, the rule is quite simple; just as we used the tensor product to calculate multi-qubit statevectors, we use the tensor product to calculate matrices that act on these statevectors. For example, in the circuit below:
```
qc = QuantumCircuit(2)
qc.h(0)
qc.x(1)
qc.draw()
```
we can represent the simultaneous operations (H & X) using their tensor product:
$$
X|q_1\rangle \otimes H|q_0\rangle = (X\otimes H)|q_1 q_0\rangle
$$
The operation looks like this:
$$
X\otimes H = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} \otimes \tfrac{1}{\sqrt{2}}\begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix} = \frac{1}{\sqrt{2}}
\begin{bmatrix} 0 \times \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix}
& 1 \times \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix}
\\
1 \times \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix}
& 0 \times \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix}
\end{bmatrix} = \frac{1}{\sqrt{2}}
\begin{bmatrix} 0 & 0 & 1 & 1 \\
0 & 0 & 1 & -1 \\
1 & 1 & 0 & 0 \\
1 & -1 & 0 & 0 \\
\end{bmatrix}
$$
Which we can then apply to our 4D statevector $|q_1 q_0\rangle$. This can become quite messy, you will often see the clearer notation:
$$
X\otimes H =
\begin{bmatrix} 0 & H \\
H & 0\\
\end{bmatrix}
$$
Instead of calculating this by hand, we can use Qiskit’s `unitary_simulator` to calculate this for us. The unitary simulator multiplies all the gates in our circuit together to compile a single unitary matrix that performs the whole quantum circuit:
```
usim = Aer.get_backend('unitary_simulator')
qobj = assemble(qc)
unitary = usim.run(qobj).result().get_unitary()
```
and view the results:
```
# In Jupyter Notebooks we can display this nicely using Latex.
# If not using Jupyter Notebooks you may need to remove the
# array_to_latex function and use print(unitary) instead.
from qiskit_textbook.tools import array_to_latex
array_to_latex(unitary, pretext="\\text{Circuit = }\n")
```
If we want to apply a gate to only one qubit at a time (such as in the circuit below), we describe this using tensor product with the identity matrix, e.g.:
$$ X \otimes I $$
```
qc = QuantumCircuit(2)
qc.x(1)
qc.draw()
# Simulate the unitary
usim = Aer.get_backend('unitary_simulator')
qobj = assemble(qc)
unitary = usim.run(qobj).result().get_unitary()
# Display the results:
array_to_latex(unitary, pretext="\\text{Circuit = } ")
```
We can see Qiskit has performed the tensor product:
$$
X \otimes I =
\begin{bmatrix} 0 & I \\
I & 0\\
\end{bmatrix} =
\begin{bmatrix} 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 1 \\
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
\end{bmatrix}
$$
### 2.1 Quick Exercises: <a id="ex2"></a>
1. Calculate the single qubit unitary ($U$) created by the sequence of gates: $U = XZH$. Use Qiskit's unitary simulator to check your results.
2. Try changing the gates in the circuit above. Calculate their tensor product, and then check your answer using the unitary simulator.
**Note:** Different books, softwares and websites order their qubits differently. This means the tensor product of the same circuit can look very different. Try to bear this in mind when consulting other sources.
## 3. Multi-Qubit Gates <a id="multi-qubit-gates"></a>
Now we know how to represent the state of multiple qubits, we are now ready to learn how qubits interact with each other. An important two-qubit gate is the CNOT-gate.
### 3.1 The CNOT-Gate <a id="cnot"></a>
You have come across this gate before in _[The Atoms of Computation](../ch-states/atoms-computation.html)._ This gate is a conditional gate that performs an X-gate on the second qubit (target), if the state of the first qubit (control) is $|1\rangle$. The gate is drawn on a circuit like this, with `q0` as the control and `q1` as the target:
```
qc = QuantumCircuit(2)
# Apply CNOT
qc.cx(0,1)
# See the circuit:
qc.draw()
```
When our qubits are not in superposition of $|0\rangle$ or $|1\rangle$ (behaving as classical bits), this gate is very simple and intuitive to understand. We can use the classical truth table:
| Input (t,c) | Output (t,c) |
|:-----------:|:------------:|
| 00 | 00 |
| 01 | 11 |
| 10 | 10 |
| 11 | 01 |
And acting on our 4D-statevector, it has one of the two matrices:
$$
\text{CNOT} = \begin{bmatrix} 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 \\
0 & 0 & 1 & 0 \\
0 & 1 & 0 & 0 \\
\end{bmatrix}, \quad
\text{CNOT} = \begin{bmatrix} 1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 0 & 1 \\
0 & 0 & 1 & 0 \\
\end{bmatrix}
$$
depending on which qubit is the control and which is the target. Different books, simulators and papers order their qubits differently. In our case, the left matrix corresponds to the CNOT in the circuit above. This matrix swaps the amplitudes of $|01\rangle$ and $|11\rangle$ in our statevector:
$$
|a\rangle = \begin{bmatrix} a_{00} \\ a_{01} \\ a_{10} \\ a_{11} \end{bmatrix}, \quad \text{CNOT}|a\rangle = \begin{bmatrix} a_{00} \\ a_{11} \\ a_{10} \\ a_{01} \end{bmatrix} \begin{matrix} \\ \leftarrow \\ \\ \leftarrow \end{matrix}
$$
We have seen how this acts on classical states, but let’s now see how it acts on a qubit in superposition. We will put one qubit in the state $|+\rangle$:
```
qc = QuantumCircuit(2)
# Apply H-gate to the first:
qc.h(0)
qc.draw()
# Let's see the result:
svsim = Aer.get_backend('statevector_simulator')
qobj = assemble(qc)
final_state = svsim.run(qobj).result().get_statevector()
# Print the statevector neatly:
array_to_latex(final_state, pretext="\\text{Statevector = }")
```
As expected, this produces the state $|0\rangle \otimes |{+}\rangle = |0{+}\rangle$:
$$
|0{+}\rangle = \tfrac{1}{\sqrt{2}}(|00\rangle + |01\rangle)
$$
And let’s see what happens when we apply the CNOT gate:
```
qc = QuantumCircuit(2)
# Apply H-gate to the first:
qc.h(0)
# Apply a CNOT:
qc.cx(0,1)
qc.draw()
# Let's get the result:
qobj = assemble(qc)
result = svsim.run(qobj).result()
# Print the statevector neatly:
final_state = result.get_statevector()
array_to_latex(final_state, pretext="\\text{Statevector = }")
```
We see we have the state:
$$
\text{CNOT}|0{+}\rangle = \tfrac{1}{\sqrt{2}}(|00\rangle + |11\rangle)
$$
This state is very interesting to us, because it is _entangled._ This leads us neatly on to the next section.
### 3.2 Entangled States <a id="entangled"></a>
We saw in the previous section we could create the state:
$$
\tfrac{1}{\sqrt{2}}(|00\rangle + |11\rangle)
$$
This is known as a _Bell_ state. We can see that this state has 50% probability of being measured in the state $|00\rangle$, and 50% chance of being measured in the state $|11\rangle$. Most interestingly, it has a **0%** chance of being measured in the states $|01\rangle$ or $|10\rangle$. We can see this in Qiskit:
```
plot_histogram(result.get_counts())
```
This combined state cannot be written as two separate qubit states, which has interesting implications. Although our qubits are in superposition, measuring one will tell us the state of the other and collapse its superposition. For example, if we measured the top qubit and got the state $|1\rangle$, the collective state of our qubits changes like so:
$$
\tfrac{1}{\sqrt{2}}(|00\rangle + |11\rangle) \quad \xrightarrow[]{\text{measure}} \quad |11\rangle
$$
Even if we separated these qubits light-years away, measuring one qubit collapses the superposition and appears to have an immediate effect on the other. This is the [‘spooky action at a distance’](https://en.wikipedia.org/wiki/Quantum_nonlocality) that upset so many physicists in the early 20th century.
It’s important to note that the measurement result is random, and the measurement statistics of one qubit are **not** affected by any operation on the other qubit. Because of this, there is **no way** to use shared quantum states to communicate. This is known as the no-communication theorem.[1]
### 3.3 Visualizing Entangled States<a id="visual"></a>
We have seen that this state cannot be written as two separate qubit states, this also means we lose information when we try to plot our state on separate Bloch spheres:
```
plot_bloch_multivector(final_state)
```
Given how we defined the Bloch sphere in the earlier chapters, it may not be clear how Qiskit even calculates the Bloch vectors with entangled qubits like this. In the single-qubit case, the position of the Bloch vector along an axis nicely corresponds to the expectation value of measuring in that basis. If we take this as _the_ rule of plotting Bloch vectors, we arrive at this conclusion above. This shows us there is _no_ single-qubit measurement basis for which a specific measurement is guaranteed. This contrasts with our single qubit states, in which we could always pick a single-qubit basis. Looking at the individual qubits in this way, we miss the important effect of correlation between the qubits. We cannot distinguish between different entangled states. For example, the two states:
$$\tfrac{1}{\sqrt{2}}(|01\rangle + |10\rangle) \quad \text{and} \quad \tfrac{1}{\sqrt{2}}(|00\rangle + |11\rangle)$$
will both look the same on these separate Bloch spheres, despite being very different states with different measurement outcomes.
How else could we visualize this statevector? This statevector is simply a collection of four amplitudes (complex numbers), and there are endless ways we can map this to an image. One such visualization is the _Q-sphere,_ here each amplitude is represented by a blob on the surface of a sphere. The size of the blob is proportional to the magnitude of the amplitude, and the colour is proportional to the phase of the amplitude. The amplitudes for $|00\rangle$ and $|11\rangle$ are equal, and all other amplitudes are 0:
```
from qiskit.visualization import plot_state_qsphere
plot_state_qsphere(final_state)
```
Here we can clearly see the correlation between the qubits. The Q-sphere's shape has no significance, it is simply a nice way of arranging our blobs; the number of `0`s in the state is proportional to the states position on the Z-axis, so here we can see the amplitude of $|00\rangle$ is at the top pole of the sphere, and the amplitude of $|11\rangle$ is at the bottom pole of the sphere.
### 3.4 Exercise: <a id="ex3"></a>
1. Create a quantum circuit that produces the Bell state: $\tfrac{1}{\sqrt{2}}(|01\rangle + |10\rangle)$.
Use the statevector simulator to verify your result.
2. The circuit you created in question 1 transforms the state $|00\rangle$ to $\tfrac{1}{\sqrt{2}}(|01\rangle + |10\rangle)$, calculate the unitary of this circuit using Qiskit's simulator. Verify this unitary does in fact perform the correct transformation.
3. Think about other ways you could represent a statevector visually. Can you design an interesting visualization from which you can read the magnitude and phase of each amplitude?
## 4. References
[1] Asher Peres, Daniel R. Terno, _Quantum Information and Relativity Theory,_ 2004, https://arxiv.org/abs/quant-ph/0212023
```
import qiskit
qiskit.__qiskit_version__
```
| github_jupyter |
# A Transformer based Language Model from scratch
> Building transformer with simple building blocks
- toc: true
- branch: master
- badges: true
- comments: true
- author: Arto
- categories: [fastai, pytorch]
```
#hide
import sys
if 'google.colab' in sys.modules:
!pip install -Uqq fastai
```
In this notebook i'm going to construct transformer based language model from scratch starting with the simplest building blocks. This is inspired by Chapter 12 of [Deep Learning for Coders book](https://www.amazon.com/Deep-Learning-Coders-fastai-PyTorch/dp/1492045527) in which it's demonstrated how to create a Recurrent Neural Network. It provides a strong intuition of how RNNs relate to regular feed-forward neural nets and why certain design choices were made. Here we aim to aquire similar kind of intuition about Transfomer based architectures.
But as always we should start with the data to be modeled, 'cause without data any model makes no particular sense.
## Data
Similar to authors of the book I'll use simple Human numbers dataset which is specifically designed to prototyping model fast and straightforward. For more details on the data one can refer to the aforemantioned book chapter which is also available for free as [a notebook](https://github.com/fastai/fastbook/blob/master/12_nlp_dive.ipynb) (isn't that awesome?!)
```
from fastai.text.all import *
path = untar_data(URLs.HUMAN_NUMBERS)
Path.BASE_PATH = path
path.ls()
```
The data consists of consecutive numbers from 1 to 9999 inclusive spelled as words.
```
lines = L()
with open(path/'train.txt') as f: lines += L(*f.readlines())
with open(path/'valid.txt') as f: lines += L(*f.readlines())
lines
text = ' . '.join([l.strip() for l in lines])
tokens = text.split(' ')
tokens[:10]
vocab = L(*tokens).unique()
vocab
word2idx = {w:i for i,w in enumerate(vocab)}
nums = L(word2idx[i] for i in tokens)
nums
```
The task will be to predict subsequent token given preceding three. This kind of tasks when the goal is to predict next token from previous ones is called autoregresive language modeling.
```
L((tokens[i:i+3], tokens[i+3]) for i in range(0,len(tokens)-4,3))
seqs = L((tensor(nums[i:i+3]), nums[i+3]) for i in range(0,len(nums)-4,3))
seqs
bs = 64
cut = int(len(seqs) * 0.8)
dls = DataLoaders.from_dsets(seqs[:cut], seqs[cut:], bs=64, shuffle=False)
x, y = dls.one_batch()
x.shape, y.shape
```
## Dot product attention

The core idea behind Transformers is Attention. Since the release of famous paper [Attention is All You Need](https://arxiv.org/abs/1706.03762) transformers has become most popular architecture for language modelling.
There are a lot of great resourses explaining transformers architecture. I'll list some of those I found useful and comprehensive:
1. [The Annotated Transformer](http://nlp.seas.harvard.edu/2018/04/03/attention.html) completes the original paper with code
2. [Encoder-Decoder Model](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Encoder_Decoder_Model.ipynb) notebook by huggingface gives mathemetically grounded explanation of how transformer encoder-decoder models work
3. [The Illustrated GPT-2](https://jalammar.github.io/illustrated-gpt2/) one of the great blogposts by Jay Alammar visualizing generative language modelling on exaple of GPT-2
4. [minGPT](https://github.com/karpathy/minGPT) cool repo by A. Karpathy providing clear minimal implementation of GPT model
There exist multiple attention mechanisms. The particular one used in the original transformer paper is Scaled Dot Product attention.
Given query vector for particular token we will compare it with a key vector for each token in a sequence and decide how much value vectors of those will effect resulting representetion of the token of interest. One way to view this from a linguistic prospective is: a key is a question each word respondes to, value is information that word represent and a query is related to what every word was looking to combine with.
Mathemetically we can compute attention for all _q_, _k_, _v_ in a matrix form:
$$\textbf {Attention}(Q,K,V) = \textbf {softmax}({QK^T\over\sqrt d_k})V $$
Note that dot product $QK^T$ results in matrix of shape (seq_len x seq_len). Then it is devided by $ \sqrt d_k$ to compensate the fact, that longer sequences will have larger dot product. $ \textbf{softmax}$ is applied to rescale the attention matrix to be betwin 0 and 1. When multiplied by $V$ it produces a matrix of the same shape as $V$ (seq_len x dv).
So where those _q_, _k_, _v_ come from. Well that's fairly straitforward queries are culculated from the embeddings of tokens we want to find representation for by simple linear projection. Keys and values are calculated from the embeddings of context tokens. In case of self attention all of them come from the original sequence.
```
class SelfAttention(Module):
def __init__(self, d_in, d_qk, d_v=None):
d_v = ifnone(d_v, d_qk)
self.iq = nn.Linear(d_in, d_qk)
self.ik = nn.Linear(d_in, d_qk)
self.iv = nn.Linear(d_in, d_v)
self.out = nn.Linear(d_v, d_in)
self.scale = d_qk**-0.5
def forward(self, x):
q, k, v = self.iq(x), self.ik(x), self.iv(x)
q *= self.scale
return self.out(F.softmax(q@k.transpose(-2,-1), -1)@v)
```
Even though self attention mechanism is extremely useful it posseses limited expressive power. Essentially we are computing weighted some of the input modified by single affine transformation, shared across the whole sequence. To add more computational power to the model we can introduce fully connected feedforward network on top of the SelfAttention layer.
Curious reader can find detailed formal analysis of the roles of SelfAttention and FeedForward layers in transformer architecture in [this paper](https://arxiv.org/pdf/1912.10077.pdf) by C. Yun et al.
In brief the authors state that SelfAttention layers compute precise contextual maps and FeedForward layers then assign the results of these contextual maps to the desired output values.
```
class FeedForward(Module):
def __init__(self, d_in, d_ff):
self.lin1 = nn.Linear(d_in, d_ff)
self.lin2 = nn.Linear(d_ff, d_in)
self.act = nn.ReLU()
def forward(self, x):
out = self.lin2(self.act(self.lin1(x)))
return out
```
The output would be of shape (bs, seq_len, d) which then may be mapped to (bs, seq_len, vocab_sz) using linear layer. But we have only one target. To adress this issue we can simply do average pooling over seq_len dimention.
The resulting model is fairly simple:
```
class Model1(Module):
def __init__(self, vocab_sz, d_model, d_qk, d_ff):
self.emb = Embedding(vocab_sz, d_model)
self.attn = SelfAttention(d_model, d_qk)
self.ff = FeedForward(d_model, d_ff)
self.out = nn.Linear(d_model, vocab_sz)
def forward(self, x):
x = self.emb(x)
x = self.ff(self.attn(x))
x = x.mean(1)
return self.out(x)
model = Model1(len(vocab), 64, 64, 128)
out = model(x)
out.shape
learn = Learner(dls, model, loss_func=CrossEntropyLossFlat(), metrics=accuracy)
learn.lr_find()
learn.fit_one_cycle(5, 5e-3)
```
To evaluete the model performance we need to compare it to some baseline. Let's see what would be the accuracy if of the model which would always predict most common token.
```
n,counts = 0,torch.zeros(len(vocab))
for x,y in dls.valid:
n += y.shape[0]
for i in range_of(vocab): counts[i] += (y==i).long().sum()
idx = torch.argmax(counts)
idx, vocab[idx.item()], counts[idx].item()/n
```
As you can see, always predicting "thousand" which turn out to be the most common token in the dataset would result in ~15% accuracy. Our simple transformer does much better then that. It feels promising, so let's try to improve the architecture and check if we can get better results.
### Multihead attention
A structured sequence may comprise multiple distinctive kinds of relationships. Our model is forced to learn only one way in which queries, keys and values are constructed from the original token embedding. To remove this limitation we can modify attention layer include multiple heads which would correspond to extracting different kinds of relationships between tokens. The MultiHeadAttention layer consits of several heads each of those is similar to SelfAttention layer we made before. To keep computational cost of the multi-head layer we set $d_k = d_v = d_{model}/n_h$, where $n_h$ is number of heads.
```
class SelfAttention(Module):
def __init__(self, d_in, d_qk, d_v=None):
d_v = ifnone(d_v, d_qk)
self.iq = nn.Linear(d_in, d_qk)
self.ik = nn.Linear(d_in, d_qk)
self.iv = nn.Linear(d_in, d_v)
self.scale = d_qk**-0.5
def forward(self, x):
q, k, v = self.iq(x), self.ik(x), self.iv(x)
return F.softmax(q@k.transpose(-2,-1)*self.scale, -1)@v
class MultiHeadAttention(Module):
def __init__(self, d_model, n_heads, d_qk=None, d_v=None):
d_qk = ifnone(d_qk, d_model//n_heads)
d_v = ifnone(d_v, d_qk)
self.heads = nn.ModuleList([SelfAttention(d_model, d_qk) for _ in range(n_heads)])
self.out = nn.Linear(d_v*n_heads, d_model)
def forward(self, x):
out = [m(x) for m in self.heads]
return self.out(torch.cat(out, -1))
inp = torch.randn(8, 10, 64)
mha = MultiHeadAttention(64, 8)
out = mha(inp)
out.shape
class Model2(Module):
def __init__(self, vocab_sz, d_model=64, n_heads=4, d_ff=64*4):
self.emb = nn.Embedding(vocab_sz, d_model)
self.attn = MultiHeadAttention(d_model, n_heads)
self.ff = FeedForward(d_model, d_ff)
self.out = nn.Linear(d_model, vocab_sz)
def forward(self, x):
x = self.emb(x)
x = self.ff(self.attn(x))
x = x.mean(1)
return self.out(x)
learn = Learner(dls, Model2(len(vocab)), loss_func=CrossEntropyLossFlat(), metrics=accuracy)
learn.fit_one_cycle(5, 5e-4)
```
### MultiHead Attention Refactor
Python `for` loops are slow, therefore it is better to refactor the MultiHeadAttention module to compute Q, K, V for all heads in batch.
```
class MultiHeadAttention(Module):
def __init__(self, d_model, n_heads):
assert d_model%n_heads == 0
self.n_heads = n_heads
#d_qk, d_v = d_model//n_heads, d_model//n_heads
self.iq = nn.Linear(d_model, d_model, bias=False)
self.ik = nn.Linear(d_model, d_model, bias=False)
self.iv = nn.Linear(d_model, d_model, bias=False)
self.out = nn.Linear(d_model, d_model, bias=False)
self.scale = d_model//n_heads
def forward(self, x):
bs, seq_len, d = x.size()
# (bs,sl,d) -> (bs,sl,nh,dh) -> (bs,nh,sl,dh)
q = self.iq(x).view(bs, seq_len, self.n_heads, d//self.n_heads).transpose(1, 2)
k = self.ik(x).view(bs, seq_len, self.n_heads, d//self.n_heads).transpose(1, 2)
v = self.iv(x).view(bs, seq_len, self.n_heads, d//self.n_heads).transpose(1, 2)
q*= self.scale
att = F.softmax(q@k.transpose(-2,-1), -1)
out = att @ v # (bs, nh, sl, sl) x (bs, nh, sl, dh) -> (bs, nh, sl, dh)
out = out.transpose(1, 2).contiguous().view(bs, seq_len, d) # back to original shape
return self.out(out)
learn = Learner(dls, Model2(len(vocab)), loss_func=CrossEntropyLossFlat(), metrics=accuracy)
learn.fit_one_cycle(5, 1e-3)
```
Note that some speedup is observed even on such a tiny dataset and small model.
## More signal
Similarly to the RNN case considered in the book, we can take the next step and create more signal for the model to learn from. To adapt to the modified objective we need to make couple of steps. First let's rearrange data to proper input-target pairs for the new task.
### Arranging data
Unlike RNN the tranformer is not a stateful model. This means it treats each sequence indepently and can only attend within fixed length context. This limitation was addressed by authors of [Transformer-XL paper](https://arxiv.org/abs/1901.02860) where adding a segment-level recurrence mechanism and a novel positional encoding scheme were proposed to enable capturing long-term dependencies. I will not go into details of TransformerXL architecture here. As we shell see stateless transformer can also learn a lot about the structure of our data.
One thing to note in this case is that we don't need to maintain the structure of the data outside of the sequences, so we can shuffle the sequences randomly in the dataloader.
```
sl = 16
seqs = L((tensor(nums[i:i+sl]), tensor(nums[i+1:i+sl+1]))
for i in range(0,len(nums)-sl-1,sl))
cut = int(len(seqs) * 0.8)
dls = DataLoaders.from_dsets(seqs[:cut], seqs[cut:],
bs=bs, drop_last=True, shuffle=True)
xb, yb = dls.one_batch()
xb.shape, yb.shape
[L(vocab[o] for o in s) for s in seqs[0]]
```
### Positional encoding
Before we did average pooling over seq_len dimension. Our model didn't care about the order of the tokens at all. But actually order of the tokens in a sentence matter a lot. In our case `one hundred two` and `two hundred one` are pretty different and `hundred one two` doesn't make sense.
To encorporate positional information into the model authors of the transformer architecture proposed to use positional encodings in addition to regular token embeddings. Positional encodings may be learned, but it's also possible to use hardcoded encodings. For instance encodings may be composed of sin and cos.
In this way each position in a sequence will get unique vector associated with it.
```
class PositionalEncoding(Module):
def __init__(self, d):
self.register_buffer('freq', 1/(10000 ** (torch.arange(0., d, 2.)/d)))
self.scale = d**0.5
def forward(self, x):
device = x.device
pos_enc = torch.cat([torch.sin(torch.outer(torch.arange(x.size(1), device=device), self.freq)),
torch.cos(torch.outer(torch.arange(x.size(1), device=device), self.freq))],
axis=-1)
return x*self.scale + pos_enc
#collapse-hide
x = torch.zeros(1, 16, 64)
encs = PositionalEncoding(64)(x)
plt.matshow(encs.squeeze())
plt.xlabel('Embedding size')
plt.ylabel('Sequence length')
plt.show()
class TransformerEmbedding(Module):
def __init__(self, emb_sz, d_model):
self.emb = nn.Embedding(emb_sz, d_model)
self.pos_enc = PositionalEncoding(d_model)
def forward(self, x):
return self.pos_enc(self.emb(x))
class Model3(Module):
def __init__(self, vocab_sz, d_model=64, n_heads=4, d_ff=64*4):
self.emb = TransformerEmbedding(vocab_sz, d_model)
self.attn = MultiHeadAttention(d_model, n_heads)
self.ff = FeedForward(d_model, d_ff)
self.out = nn.Linear(d_model, vocab_sz)
def forward(self, x):
x = self.emb(x)
x = self.ff(self.attn(x))
return self.out(x)
model = Model3(len(vocab))
out = model(xb)
out.shape
def loss_func(inp, targ):
return F.cross_entropy(inp.view(-1, len(vocab)), targ.view(-1))
learn = Learner(dls, Model3(len(vocab)), loss_func=loss_func, metrics=accuracy)
learn.fit_one_cycle(5, 1e-2)
```
Wow! That's a great accuracy! So the problem is solved and we only needed one attention layer and 2 layer deep feed-forward block? Don't you feel somewhat skeptical about this result?
Well, you should be! Think about what we did here: the goal was to predict a target sequence, say `['.','two','.','three','.','four']` from an input `['one','.','two','.','three','.']`. These two sequences intersect on all positions except the first and the last one. So models needs to learn simply to copy input tokens starting from the second one to the outputs. In our case this will result in 15 correct predictions of total 16 positions, that's almost 94% accuracy. This makes the task very simple but not very useful to learn. To train proper autoregressive language model, as we did with RNNs, a concept of masking is to be introduced.
### Causal Masking
So we want to allow the model for each token to attend only to itself and those prior to it. To acomplish this we can set all the values of attention matrix above the main diagonal to $-\infty$. After softmax this values will effectively turn to 0 thus disabling attention to the "future".
```
def get_subsequent_mask(x):
sz = x.size(1)
mask = (torch.triu(torch.ones(sz, sz, device=x.device)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
inp = torch.randn(8, 10, 64)
mask = get_subsequent_mask(inp)
plt.matshow(mask);
q, k = torch.rand(1,10,32), torch.randn(1,10,32)
att_ = F.softmax((q@k.permute(0,2,1)+mask), -1)
plt.matshow(att_[0].detach());
```
We should also modify the attention layer to accept mask:
```
class MultiHeadAttention(Module):
def __init__(self, d_model, n_heads):
assert d_model%n_heads == 0
self.n_heads = n_heads
d_qk, d_v = d_model//n_heads, d_model//n_heads
self.iq = nn.Linear(d_model, d_model, bias=False)
self.ik = nn.Linear(d_model, d_model, bias=False)
self.iv = nn.Linear(d_model, d_model, bias=False)
self.scale = d_qk**-0.5
self.out = nn.Linear(d_model, d_model, bias=False)
def forward(self, x, mask=None):
bs, seq_len, d = x.size()
mask = ifnone(mask, 0)
q = self.iq(x).view(bs, seq_len, self.n_heads, d//self.n_heads).transpose(1, 2)
k = self.ik(x).view(bs, seq_len, self.n_heads, d//self.n_heads).transpose(1, 2)
v = self.iv(x).view(bs, seq_len, self.n_heads, d//self.n_heads).transpose(1, 2)
q*= self.scale
att = F.softmax(q@k.transpose(-2,-1) + mask, -1)
out = att @ v # (bs, nh, sl, sl) x (bs, nh, sl, dh) -> (bs, nh, sl, dh)
out = out.transpose(1, 2).contiguous().view(bs, seq_len, d) # back to original shape
return self.out(out)
class Model4(Module):
def __init__(self, vocab_sz, d_model=64, n_heads=8, d_ff=64*4):
self.emb = TransformerEmbedding(vocab_sz, d_model)
self.attn = MultiHeadAttention(d_model, n_heads)
self.ff = FeedForward(d_model, d_ff)
self.out = nn.Linear(d_model, vocab_sz)
def forward(self, x):
x = self.emb(x)
mask = get_subsequent_mask(x)
x = self.ff(self.attn(x, mask))
return self.out(x)
learn = Learner(dls, Model4(len(vocab)), loss_func=loss_func, metrics=accuracy)
learn.fit_one_cycle(5, 3e-3)
```
Now we get somewhat lower accuracy, which is expected given that the task has become more difficult. Also training loss is significantly lower than validation loss, which means the model is overfitting. Let's see if the same approaches as was applied to RNNs can help.
### Multilayer transformer
To solve a more difficult task we ussualy need a deeper model. For convenience let's make a TransformerLayer which will combine self-attention and feed-forward blocks.
```
class TransformerLayer(Module):
def __init__(self, d_model, n_heads=8, d_ff=None, causal=True):
d_ff = ifnone(d_ff, 4*d_model)
self.attn = MultiHeadAttention(d_model, n_heads)
self.ff = FeedForward(d_model, d_ff)
self.causal = causal
def forward(self, x, mask=None):
if self.causal:
mask = get_subsequent_mask(x)
return self.ff(self.attn(x, mask))
class Model5(Module):
def __init__(self, vocab_sz, d_model=64, n_layer=4, n_heads=8):
self.emb = TransformerEmbedding(vocab_sz, d_model)
self.encoder = nn.Sequential(*[TransformerLayer(d_model, n_heads) for _ in range(n_layer)])
self.out = nn.Linear(d_model, vocab_sz)
def forward(self, x):
x = self.emb(x)
x = self.encoder(x)
return self.out(x)
learn = Learner(dls, Model5(len(vocab), n_layer=4), loss_func=loss_func, metrics=accuracy)
learn.fit_one_cycle(5, 1e-2)
```
That's not good! 4 layer deep Transformer strugles to learn anything. But there are good news, this problem has been already resolved in the original transformer.
### Residual connections and Regularization
If you are familiar with ResNets the proposed solution will not surprise you much. The idea is simple yet very effective. Instead of returning modified output $f(x)$ each transformer sublayer will return $x + f(x)$. This allows the original input to propagate freely through the model. So the model learns not an entirely new representation of $x$ but how to modify $x$ to add some useful information to the original representation.
As we modify layers to include the residual connections let's also add some regularization by inserting Dropout layers.
```
class TransformerEmbedding(Module):
def __init__(self, emb_sz, d_model, p=0.1):
self.emb = Embedding(emb_sz, d_model)
nn.init.trunc_normal_(self.emb.weight, std=d_model**-0.5)
self.pos_enc = PositionalEncoding(d_model)
self.drop = nn.Dropout(p)
def forward(self, x):
return self.drop(self.pos_enc(self.emb(x)))
```
Another modification is to add layer normalization which is intended to improve learning dynamics of the network by reparametrising data statistics and is generally used in transformer based architectures.
```
class FeedForward(Module):
def __init__(self, d_model, d_ff, p=0.2):
self.lin1 = nn.Linear(d_model, d_ff)
self.lin2 = nn.Linear(d_ff, d_model)
self.act = nn.ReLU()
self.norm = nn.LayerNorm(d_model)
self.drop = nn.Dropout(p)
def forward(self, x):
x = self.norm(x)
out = self.act(self.lin1(x))
out = self.lin2(out)
return x + self.drop(out)
class MultiHeadAttention(Module):
def __init__(self, d_model, n_heads, p=0.1):
assert d_model%n_heads == 0
self.n_heads = n_heads
d_qk, d_v = d_model//n_heads, d_model//n_heads
self.iq = nn.Linear(d_model, d_model, bias=False)
self.ik = nn.Linear(d_model, d_model, bias=False)
self.iv = nn.Linear(d_model, d_model, bias=False)
self.scale = d_qk**0.5
self.out = nn.Linear(d_model, d_model, bias=False)
self.norm = nn.LayerNorm(d_model)
self.drop = nn.Dropout(p)
def forward(self, x, mask=None):
bs, seq_len, d = x.size()
mask = ifnone(mask, 0)
x = self.norm(x)
k = self.ik(x).view(bs, seq_len, self.n_heads, d//self.n_heads).transpose(1, 2)
q = self.iq(x).view(bs, seq_len, self.n_heads, d//self.n_heads).transpose(1, 2)
v = self.iv(x).view(bs, seq_len, self.n_heads, d//self.n_heads).transpose(1, 2)
att = F.softmax(q@k.transpose(-2,-1)/self.scale + mask, -1)
out = att @ v # (bs, nh, sl, sl) x (bs, nh, sl, dh) -> (bs, nh, sl, dh)
out = out.transpose(1, 2).contiguous().view(bs, seq_len, d) # back to original shape
return x + self.drop(self.out(out))
class TransformerLayer(Module):
def __init__(self, d_model, n_heads=8, d_ff=None, causal=True,
p_att=0.1, p_ff=0.1):
d_ff = ifnone(d_ff, 4*d_model)
self.attn = MultiHeadAttention(d_model, n_heads)
self.ff = FeedForward(d_model, d_ff, p=p_ff)
self.causal = causal
self._init()
def forward(self, x, mask=None):
if self.causal:
mask = get_subsequent_mask(x)
return self.ff(self.attn(x, mask))
def _init(self):
for p in self.parameters():
if p.dim()>1: nn.init.xavier_uniform_(p)
class Model6(Module):
def __init__(self, vocab_sz, d_model=64, n_layer=4, n_heads=8,
p_emb=0.1, p_att=0.1, p_ff=0.2, tie_weights=True):
self.emb = TransformerEmbedding(vocab_sz, d_model, p=p_emb)
self.encoder = nn.Sequential(*[TransformerLayer(d_model, n_heads,
p_att=p_att, p_ff=p_ff)
for _ in range(n_layer)],
nn.LayerNorm(d_model))
self.out = nn.Linear(d_model, vocab_sz)
if tie_weights: self.out.weight = self.emb.emb.weight
def forward(self, x):
x = self.emb(x)
x = self.encoder(x)
return self.out(x)
learn = Learner(dls, Model6(len(vocab), n_layer=2), loss_func=loss_func, metrics=accuracy)
learn.fit_one_cycle(8, 1e-2)
```
## Bonus - Generation example
```
#hide
from google.colab import drive
drive.mount('/content/drive')
path = Path('/content/drive/MyDrive/char_model')
```
Learning to predict numbers is great, but let's try something more entertaining. We can train a language model to generate texts. For example let's try to generate some text in style of Lewis Carroll. For this we'll fit a language model on "Alice in Wonderland" and "Through the looking glass".
```
#collapse-hide
def parse_txt(fns):
txts = []
for fn in fns:
with open(fn) as f:
tmp = ''
for line in f.readlines():
line = line.strip('\n')
if line:
tmp += ' ' + line
elif tmp:
txts.append(tmp.strip())
tmp = ''
return txts
texts = parse_txt([path/'11-0.txt', path/'12-0.txt'])
len(texts)
texts[0:2]
#collapse-hide
class CharTokenizer(Transform):
"Simple charecter level tokenizer"
def __init__(self, vocab=None):
self.vocab = ifnone(vocab, ['', 'xxbos', 'xxeos'] + list(string.printable))
self.c2i = defaultdict(int, [(c,i) for i, c in enumerate(self.vocab)])
def encodes(self, s, add_bos=False, add_eos=False):
strt = [self.c2i['xxbos']] if add_bos else []
end = [self.c2i['xxeos']] if add_eos else []
return LMTensorText(strt + [self.c2i[c] for c in s] + end)
def decodes(self, s, remove_special=False):
return TitledStr(''.join([self.decode_one(i) for i in s]))
def decode_one(self, i):
if i == 2: return '\n'
elif i == 1: return ''
else: return self.vocab[i]
@property
def vocab_sz(self):
return len(self.vocab)
tok = CharTokenizer()
def add_bos_eos(x:list, bos_id=1, eos_id=2):
return [bos_id] + x + [eos_id]
nums = [add_bos_eos(tok(t.lower()).tolist()) for t in texts]
len(nums)
all_nums = []
for n in nums: all_nums.extend(n)
all_nums[:15]
print(tok.decode(all_nums[:100]))
sl = 512
seqs = L((tensor(all_nums[i:i+sl]), tensor(all_nums[i+1:i+sl+1]))
for i in range(0,len(all_nums)-sl-1,sl))
cut = int(len(seqs) * 0.8)
dls = DataLoaders.from_dsets(seqs[:cut], seqs[cut:], device='cuda',
bs=8, drop_last=True, shuffle=True)
xb, yb = dls.one_batch()
xb.shape, yb.shape
model = Model6(tok.vocab_sz, 512, 6, p_emb=0.1, p_ff=0.1, tie_weights=True)
learn = Learner(dls, model, loss_func=CrossEntropyLossFlat(), metrics=[accuracy, perplexity]).to_native_fp16()
learn.lr_find()
#collapse_output
learn.fit_one_cycle(50, 5e-4, cbs=EarlyStoppingCallback(patience=5))
```
### Text generation
Text generation is a big topic on it's own. One can refer to great posts [by Patrick von Platen from HuggingFace](https://huggingface.co/blog/how-to-generate) and [Lilian Weng](https://lilianweng.github.io/lil-log/2021/01/02/controllable-neural-text-generation.html) for more details on various approaches. Here I will use nucleus sampling. This method rallies on sampling from candidates compounding certain value of probability mass. Intuitively this approach should work for character level generation: when there is only one grammatically correct option for continuation we always want to select it, but when starting a new word some diversity in outputs is desirable.
```
#collapse-hide
def expand_dim1(x):
if len(x.shape) == 1:
return x[None, :]
else: return x
def top_p_filter(logits, top_p=0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > top_p
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
logits[indices_to_remove] = float('-inf')
return logits
@torch.no_grad()
def generate(model, inp,
max_len=50,
temperature=1.,
top_k = 20,
top_p = 0.9,
early_stopping=False, #need eos_idx to work
eos_idx=None):
model.to(inp.device)
model.eval()
thresh = top_p
inp = expand_dim1(inp)
b, t = inp.shape
out = inp
for _ in range(max_len):
x = out
logits = model(x)[:, -1, :]
filtered_logits = top_p_filter(logits)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if early_stopping and (sample == eos_idx).all():
break
return out
out = generate(learn.model, tok('Alice said '), max_len=200, early_stopping=True, eos_idx=tok.c2i['xxeos'])
print(tok.decode(out[0]))
```
Our relatively simple model learned to generate mostly grammatically plausible text, but it's not entirely coherent. But it would be too much to ask from the model to learn language from scratch by "reading" only two novels (however great those novels are). To get more from the model let's feed it larger corpus of data.
### Pretraining on larger dataset
```
#hide
import sys
if 'google.colab' in sys.modules:
!pip install -Uqq datasets
from datasets import load_dataset
```
For this purpose I will use a sample from [bookcorpus dataset](https://huggingface.co/datasets/bookcorpus).
```
#hide_ouput
dataset = load_dataset("bookcorpus", split='train')
df = pd.DataFrame(dataset[:10_000_000])
df.head()
df['len'] = df['text'].str.len()
cut = int(len(df)*0.8)
splits = range_of(df)[:cut], range_of(df[cut:])
tfms = Pipeline([ColReader('text'), tok])
dsets = Datasets(df, tfms=tfms, dl_type=LMDataLoader, splits=splits)
#collapse
@patch
def create_item(self:LMDataLoader, seq):
if seq>=self.n: raise IndexError
sl = self.last_len if seq//self.bs==self.n_batches-1 else self.seq_len
st = (seq%self.bs)*self.bl + (seq//self.bs)*self.seq_len
txt = self.chunks[st : st+sl+1]
return LMTensorText(txt[:-1]),txt[1:]
%%time
dl_kwargs = [{'lens':df['len'].values[splits[0]]}, {'val_lens':df['len'].values[splits[1]]}]
dls = dsets.dataloaders(bs=32, seq_len=512, dl_kwargs=dl_kwargs, shuffle_train=True, num_workers=2)
dls.show_batch(max_n=2)
model = Model6(tok.vocab_sz, 512, 8, p_emb=0.1, p_ff=0.1, tie_weights=True)
learn = Learner(dls, model, loss_func=CrossEntropyLossFlat(), metrics=[accuracy, perplexity]).to_native_fp16()
learn.lr_find()
learn = learn.load(path/'char_bookcorpus_10m')
learn.fit_one_cycle(1, 1e-4)
learn.save(path/'char_bookcorpus_10m')
```
### Finetune on Carrolls' books
Finally we can finetune the pretrained bookcorpus model on Carroll's books. This will determine the style of generated text.
```
sl = 512
seqs = L((tensor(all_nums[i:i+sl]), tensor(all_nums[i+1:i+sl+1]))
for i in range(0,len(all_nums)-sl-1,sl))
cut = int(len(seqs) * 0.8)
dls = DataLoaders.from_dsets(seqs[:cut], seqs[cut:], device='cuda',
bs=16, drop_last=True, shuffle=True)
model = Model6(tok.vocab_sz, 512, 8, p_emb=0.1, p_ff=0.1, tie_weights=True)
learn = Learner(dls, model, loss_func=CrossEntropyLossFlat(), metrics=[accuracy, perplexity]).to_native_fp16()
learn = learn.load(path/'char_bookcorpus_10m')
learn.lr_find()
learn.fit_one_cycle(10, 1e-4)
```
As you see pretraining model on large corpus followed by finetuning helped to reduce validation loss from arount 1.53 to 1.037 and improve accuracy in predicting next character to 68% (compared to 56.7% before). Let's see how it effects sampled text:
```
out = generate(learn.model, tok('Alice said '), max_len=200, early_stopping=True, eos_idx=tok.c2i['xxeos'])
#collapse-hide
print(tok.decode(out[0]))
#hide
learn.save(path/'char_alice')
```
| github_jupyter |
```
%%bash
head /Users/jackyso/Desktop/data_files/source_data.json
"""
clean and prep data for matching:
lowercase everything, take only first 5 digits of zip, pop out each address from each doctor, make all string to preserve zip and npi values
columns = ['first_name','last_name','npi','street','street_2','zip','city','state']
address = [['street'],['street_2'],['zip'],['city'],['state']]
doctor = ['first_name','last_name','npi']
practices = [address]
"""
import pandas as pd
import json
import csv
import numpy as np
# set view width to fit entire row
pd.set_option('max_colwidth',-1)
csvFile = '/Users/jackyso/Desktop/data_files/match_file.csv'
jsonFile = '/Users/jackyso/Desktop/data_files/source_data.json'
# read in csv and turn into dataframe, make it string to preserve values
df_match = pd.read_csv(csvFile, header = 0, dtype = str)
# take only first 5 chars of zip to standardize
df_match['zip'] = df_match['zip'].str[:5]
# make all lowercase to standardize using apply astype
# df_match=df_match.apply(lambda x:x.astype(str).str.lower())
df_match = df_match.fillna('').astype(str).apply(lambda x: x.str.lower())
# remove punctuations from street_2 column
df_match['street_2'] = df_match['street_2'].str.replace('[^\w\s]','')
# need to convert empty string or whitespace into NaN so we can do pandas notna/isna string compare
df_match=df_match.astype(str).apply(lambda x: x.str.strip()).replace('', np.nan)
# df_match[df_match['npi'].notna()].head(10)
#view top 10 results
df_match[0:10]
# read in json, values already in string but need to orient as columns
# already declared jsonFile path above
df_source = pd.read_json(jsonFile, orient='columns', lines=True)
# view top
df_source.head()
# view doctor returns doctor, dtype: object
df_source['doctor'].head()
# view array of practices
df_source['practices'].head()
"""
one doctor can be at many addresses, so "practices" is an array of addresses
pop out each address so you can see doctor-practice individual association
need to define the dictionary. for every row in the data, key is "doctor" and many addresses can be tied to one doctor
PRO-TIP:
lowercase data to standardize. If do this before popping out addresses from array, it will give you error because str.lower will give you an object.
then you are trying to make a dataframe out of a list of tuples. easier to lowercase after popping out addresses separately.
> df_source = df_source.apply(lambda x:x.astype(str).str.lower())
"""
df_source = pd.DataFrame([dict(y,doctor = i) for i,x in df_source.values.tolist() for y in x])
# to view
df_source.head()
# unpack tuples doctor column to individual columns
# take dict, make a new dataframe
d = df_source['doctor'].to_dict()
# transpose it, redefine it
df_source_d = pd.DataFrame.from_dict(d).transpose()
# view new dataframe
df_source_d.head()
# drop doctor column
df_source = df_source.drop(columns='doctor')
df_source.head()
# add new columns from new dataframe via concat
df_good = pd.concat([df_source, df_source_d], join = 'outer', sort = False, axis = 1)
df_good.head()
# after popping everything out, now do lowercase data to standardize.
# df_source = df_source.apply(lambda x:x.astype(str).str.lower())
df_good = df_good.fillna('').astype(str).apply(lambda x: x.str.lower())
df_good.head()
# standardize zip with first 5 chars for addresses from json file
df_good['zip'] = df_good['zip'].str[:5]
df_good.head()
# null check
# df_good[df_good['npi'].notna()]
# remove punctuations from street_2 column
df_good['street_2'] = df_good['street_2'].str.replace('[^\w\s]','')
df_good.head()
# recall that dataframe01 = df_match
# dataframe02 = df_good
# select distinct npi from dataframe01 where exists(select 1 from dataframe02 where dataframe02.npi = dataframe01.npi)
# merge does inner join by default
# SELECT * FROM df1 INNER JOIN df2 ON df1.key = df2.key
df_npi = pd.merge(df_match, df_good, on='npi')
# df_npi.head()
len(df_npi['npi'].unique())
# 864 unique npis in both sets
# group by
# inner join to find only same ones in both
# left join gets you match data in despite whether it matches or not
# pandas merge full reference: http://pandas.pydata.org/pandas-docs/version/0.19.1/generated/pandas.DataFrame.merge.html
df_fullname_npi = pd.merge(df_match, df_good, on=['npi','first_name','last_name'])
# new_df = pd.merge(A_df, B_df, how='left', left_on=['A_c1','c2'], right_on = ['B_c1','c2'])
len(df_fullname_npi['npi'].unique())
# name and address match = 799
df_name_address = pd.merge(df_match, df_good, on=['first_name','last_name','street','street_2','zip','city','state'])
# new_df = pd.merge(A_df, B_df, how='left', left_on=['A_c1','c2'], right_on = ['B_c1','c2'])
len(df_name_address['first_name'].unique())
# address match = 921
df_address = pd.merge(df_match, df_good, on=['street','street_2','zip','city','state'])
len(df_address['street'].unique())
# left-excluding join to get unmatched data.
# join csv df to source df with left. matches get mapped to source that includes lat, lon. those without get NaN lat lon and get dropped.
# add indicator column to see how it was merged. "left_only" means only in csv. "both" means it appears in both files.
df_unmatched = pd.merge(df_match,df_good, how='left', indicator=True)
df_unmatched
# csv only had 1265 rows. json had over 11k rows but over 22k practices. thus, only 1265 are attempting matches.
# unmatched check displays results for the 1265 rows.
df_unmatched_results = df_unmatched[df_unmatched['_merge'].eq('left_only')]
# .drop(['df_good','_merge'],axis=1)
df_unmatched_results
# returns 574 rows but does not account for any possible doctor-address matches, only all matches of dr, npi, and address
"""
dataframe01 = df_match
dataframe02 = df_good
1)
"df_compare.shape[0]" will tell you how many rows were evaluated
"df_compare.count()" will give you the breakdown of values found in each column, but it will exclude NaN
2)
df_npi = pd.merge(df_match, df_good, on='npi')
len(df_npi['npi'].unique())
# returns 864
3)
df_name_address = pd.merge(df_match, df_good, on=['first_name','last_name','street','street_2','zip','city','state'])
# new_df = pd.merge(A_df, B_df, how='left', left_on=['A_c1','c2'], right_on = ['B_c1','c2'])
len(df_name_address['first_name'].unique())
# returns 799
4)
df_address = pd.merge(df_match, df_good, on=['street','street_2','zip','city','state'])
len(df_address['street'].unique())
# returns 921
5) total unmatched - return unmatched from each of above, and throw in total from left-excluding join, too
# left-excluding join to get unmatched data.
# join csv df to source df with left. matches get mapped to source that includes lat, lon. those without get NaN lat lon and get dropped.
# add indicator column to see how it was merged. "left_only" means only in csv. "both" means it appears in both files.
df_unmatched = pd.merge(df_match,df_good, how='left', indicator=True)
df_unmatched
# csv only had 1265 rows. json had over 11k rows but over 22k practices. thus, only 1265 are attempting matches.
# unmatched check displays results for the 1265 rows.
df_unmatched_results = df_unmatched[df_unmatched['_merge'].eq('left_only')]
# .drop(['df_good','_merge'],axis=1)
df_unmatched_results
# returns 574 rows but does not account for any possible doctor-address matches, only all matches of dr, npi, and address
# spot check or view sample to compare between and test
"""
df_compare = pd.merge(df_match,df_good, how='left', indicator=True)
# would be great to do a left join on this and use the indicator column to select for records in both or not in both, but would need to drop duplicates
# merge will inner join by default, which is what we want for only the npis in both data sets
df_npi = pd.merge(df_match,df_good, on='npi')
df_name_address = pd.merge(df_match, df_good, on=['first_name','last_name','street','street_2','zip','city','state'])
# only unique addresses, do left merge so can use it later for unmatched addresses
df_address = pd.merge(df_match, df_good, how = 'left', on=['street','street_2','zip','city','state'], indicator = True)
# len(df_name_address['first_name'].unique())
df_matched_results = pd.merge(df_match,df_good, how='left', indicator=True)
df_all_unmatched_columns_result = df_matched_results[df_matched_results['_merge'].eq('left_only')]
df_not_npi = pd.merge(df_match, df_good, how='left', on='npi', indicator=True).drop_duplicates(keep='first')
# df_not_npi.query('_merge != "both"')
df_not_name_address = pd.merge(df_match, df_good, how='left', on=['first_name','last_name','street','street_2','zip','city','state'], indicator=True)
print("*****Breakdown of Total Number of Documents Scanned:*****\n\n", df_compare.count())
print("\n*****MATCHED DOCUMENTS BREAKDOWN:*****\n")
print("\n*Number of Doctors Matched with NPI:\n", len(df_npi['npi'].unique()))
print("\n*Number of Doctors Matched with Name + Address:\n", len(df_name_address['first_name'].unique()))
print("\n*Number of Practices Matched with Address:\n", len(df_address.query('_merge == "both"')))
# unmatches are interesting, because you can find a practice that exists and is missing a doctor, so the doctor might need to be appended to it later
print("\n*****UNMATCHED DOCUMENTS BREAKDOWN:*****\n")
# npis not matched
print("*Number of Unmatched NPIs:\n", len(df_not_npi.query('_merge != "both"')))
# doctor name and address not matched
print("\n*Number of Unmatched Doctors by Name + Address:\n", len(df_not_name_address.query('_merge != "both"')))
# practices not matched
print("\n*Number of Unmatched Practices by Address:\n", len(df_address.query('_merge != "both"')))
# for kicks, doctors that did not match by all given fields: name, address, and npi
# this is interesting, because you can have a doctor who does not match by all fields due to missing npi, but they have the same name and one of their many addresses
print("\n*Number of Unmatched Doctors by Name, Address, and NPI:\n", len(df_all_unmatched_columns_result.query('_merge != "both"')))
print("\n*****Breakdown of Total Number of Unmatched Doctors by Name, Address, and NPI:*****\n\n", df_all_unmatched_columns_result.count())
print("\n*****EDGE CASES TO CONSIDER:*****\n- practice matches but does not have doctor, consider appending as \"new doctor\" to practice\n- doctor name and address match with NaN npi, consider assumption that doctor is same person")
# for extra kicks, write all the unmatched data to a csv so you can share it with interested parties or have it for reference later
# import os
# """change filepath as you wish"""
# file = 'matched_results.csv'
# df_matched_results.to_csv(file, header=True, index=False)
"""uncomment these for each result csv, depending on what you want to compare.\"both" means matched fields appear in both, \"left_only" means matched fields not found in source data as is"""
import os
df_matched_results.to_csv('matched_results.csv', header=True, index=False)
df_name_address.to_csv('name_address.csv', header=True, index=False)
df_address.to_csv('practice_match.csv', header=True, index=False)
print("\nMAGICAL DATA! THANK YOU FOR EXPLORING WITH ME!")
print("""
\
\\
\%, ,' , ,.
\%\,';/J,";";";;,,.
~.------------\%;((`);)));`;;,.,-----------,~
~~: ,`;@)((;`,`((;(;;);;,` :~~
~~ : ;`(@```))`~ ``; );(;));;, : ~~
~~ : `X `(( `), (;;);;;;` : ~~
~~~~ : / `) `` /;~ `;;;;;;;);, : ~~~~
~~~~ : / , ` ,/` / (`;;(;;;;, : ~~~~
~~~ : (o /]_/` / ,);;;`;;;;;`,, : ~~~
~~ : `~` `~` ` ``;, ``;" ';, : ~~
~~: YAY! `' `' `' :~~
~`-----------------------------------------`~
""")
```
| github_jupyter |
# Regularization
Welcome to the second assignment of this week. Deep Learning models have so much flexibility and capacity that **overfitting can be a serious problem**, if the training dataset is not big enough. Sure it does well on the training set, but the learned network **doesn't generalize to new examples** that it has never seen!
**You will learn to:** Use regularization in your deep learning models.
Let's first import the packages you are going to use.
### <font color='darkblue'> Updates to Assignment <font>
#### If you were working on a previous version
* The current notebook filename is version "2a".
* You can find your work in the file directory as version "2".
* To see the file directory, click on the Coursera logo at the top left of the notebook.
#### List of Updates
* Clarified explanation of 'keep_prob' in the text description.
* Fixed a comment so that keep_prob and 1-keep_prob add up to 100%
* Updated print statements and 'expected output' for easier visual comparisons.
```
# import packages
import numpy as np
import matplotlib.pyplot as plt
from reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec
from reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters
import sklearn
import sklearn.datasets
import scipy.io
from testCases import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
```
**Problem Statement**: You have just been hired as an AI expert by the French Football Corporation. They would like you to recommend positions where France's goal keeper should kick the ball so that the French team's players can then hit it with their head.
<img src="images/field_kiank.png" style="width:600px;height:350px;">
<caption><center> <u> **Figure 1** </u>: **Football field**<br> The goal keeper kicks the ball in the air, the players of each team are fighting to hit the ball with their head </center></caption>
They give you the following 2D dataset from France's past 10 games.
```
train_X, train_Y, test_X, test_Y = load_2D_dataset()
```
Each dot corresponds to a position on the football field where a football player has hit the ball with his/her head after the French goal keeper has shot the ball from the left side of the football field.
- If the dot is blue, it means the French player managed to hit the ball with his/her head
- If the dot is red, it means the other team's player hit the ball with their head
**Your goal**: Use a deep learning model to find the positions on the field where the goalkeeper should kick the ball.
**Analysis of the dataset**: This dataset is a little noisy, but it looks like a diagonal line separating the upper left half (blue) from the lower right half (red) would work well.
You will first try a non-regularized model. Then you'll learn how to regularize it and decide which model you will choose to solve the French Football Corporation's problem.
## 1 - Non-regularized model
You will use the following neural network (already implemented for you below). This model can be used:
- in *regularization mode* -- by setting the `lambd` input to a non-zero value. We use "`lambd`" instead of "`lambda`" because "`lambda`" is a reserved keyword in Python.
- in *dropout mode* -- by setting the `keep_prob` to a value less than one
You will first try the model without any regularization. Then, you will implement:
- *L2 regularization* -- functions: "`compute_cost_with_regularization()`" and "`backward_propagation_with_regularization()`"
- *Dropout* -- functions: "`forward_propagation_with_dropout()`" and "`backward_propagation_with_dropout()`"
In each part, you will run this model with the correct inputs so that it calls the functions you've implemented. Take a look at the code below to familiarize yourself with the model.
```
def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)
learning_rate -- learning rate of the optimization
num_iterations -- number of iterations of the optimization loop
print_cost -- If True, print the cost every 10000 iterations
lambd -- regularization hyperparameter, scalar
keep_prob - probability of keeping a neuron active during drop-out, scalar.
Returns:
parameters -- parameters learned by the model. They can then be used to predict.
"""
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 20, 3, 1]
# Initialize parameters dictionary.
parameters = initialize_parameters(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
if keep_prob == 1:
a3, cache = forward_propagation(X, parameters)
elif keep_prob < 1:
a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)
# Cost function
if lambd == 0:
cost = compute_cost(a3, Y)
else:
cost = compute_cost_with_regularization(a3, Y, parameters, lambd)
# Backward propagation.
assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout,
# but this assignment will only explore one at a time
if lambd == 0 and keep_prob == 1:
grads = backward_propagation(X, Y, cache)
elif lambd != 0:
grads = backward_propagation_with_regularization(X, Y, cache, lambd)
elif keep_prob < 1:
grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 10000 iterations
if print_cost and i % 10000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
if print_cost and i % 1000 == 0:
costs.append(cost)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (x1,000)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
```
Let's train the model without any regularization, and observe the accuracy on the train/test sets.
```
parameters = model(train_X, train_Y)
print ("On the training set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
```
The train accuracy is 94.8% while the test accuracy is 91.5%. This is the **baseline model** (you will observe the impact of regularization on this model). Run the following code to plot the decision boundary of your model.
```
plt.title("Model without regularization")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
The non-regularized model is obviously overfitting the training set. It is fitting the noisy points! Lets now look at two techniques to reduce overfitting.
## 2 - L2 Regularization
The standard way to avoid overfitting is called **L2 regularization**. It consists of appropriately modifying your cost function, from:
$$J = -\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} \tag{1}$$
To:
$$J_{regularized} = \small \underbrace{-\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} }_\text{cross-entropy cost} + \underbrace{\frac{1}{m} \frac{\lambda}{2} \sum\limits_l\sum\limits_k\sum\limits_j W_{k,j}^{[l]2} }_\text{L2 regularization cost} \tag{2}$$
Let's modify your cost and observe the consequences.
**Exercise**: Implement `compute_cost_with_regularization()` which computes the cost given by formula (2). To calculate $\sum\limits_k\sum\limits_j W_{k,j}^{[l]2}$ , use :
```python
np.sum(np.square(Wl))
```
Note that you have to do this for $W^{[1]}$, $W^{[2]}$ and $W^{[3]}$, then sum the three terms and multiply by $ \frac{1}{m} \frac{\lambda}{2} $.
```
# GRADED FUNCTION: compute_cost_with_regularization
def compute_cost_with_regularization(A3, Y, parameters, lambd):
"""
Implement the cost function with L2 regularization. See formula (2) above.
Arguments:
A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
parameters -- python dictionary containing parameters of the model
Returns:
cost - value of the regularized loss function (formula (2))
"""
m = Y.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
W3 = parameters["W3"]
cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost
### START CODE HERE ### (approx. 1 line)
L2_regularization_cost = lambd/(m*2)*(np.sum(np.square(W1))+np.sum(np.square(W2))+np.sum(np.square(W3)))
### END CODER HERE ###
cost = cross_entropy_cost + L2_regularization_cost
return cost
A3, Y_assess, parameters = compute_cost_with_regularization_test_case()
print("cost = " + str(compute_cost_with_regularization(A3, Y_assess, parameters, lambd = 0.1)))
```
**Expected Output**:
<table>
<tr>
<td>
**cost**
</td>
<td>
1.78648594516
</td>
</tr>
</table>
Of course, because you changed the cost, you have to change backward propagation as well! All the gradients have to be computed with respect to this new cost.
**Exercise**: Implement the changes needed in backward propagation to take into account regularization. The changes only concern dW1, dW2 and dW3. For each, you have to add the regularization term's gradient ($\frac{d}{dW} ( \frac{1}{2}\frac{\lambda}{m} W^2) = \frac{\lambda}{m} W$).
```
# GRADED FUNCTION: backward_propagation_with_regularization
def backward_propagation_with_regularization(X, Y, cache, lambd):
"""
Implements the backward propagation of our baseline model to which we added an L2 regularization.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward_propagation()
lambd -- regularization hyperparameter, scalar
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
### START CODE HERE ### (approx. 1 line)
dW3 = 1./m * np.dot(dZ3, A2.T) + lambd/m*W3
### END CODE HERE ###
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
### START CODE HERE ### (approx. 1 line)
dW2 = 1./m * np.dot(dZ2, A1.T) + lambd/m*W2
### END CODE HERE ###
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
### START CODE HERE ### (approx. 1 line)
dW1 = 1./m * np.dot(dZ1, X.T) + lambd/m*W1
### END CODE HERE ###
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2,
"dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
X_assess, Y_assess, cache = backward_propagation_with_regularization_test_case()
grads = backward_propagation_with_regularization(X_assess, Y_assess, cache, lambd = 0.7)
print ("dW1 = \n"+ str(grads["dW1"]))
print ("dW2 = \n"+ str(grads["dW2"]))
print ("dW3 = \n"+ str(grads["dW3"]))
```
**Expected Output**:
```
dW1 =
[[-0.25604646 0.12298827 -0.28297129]
[-0.17706303 0.34536094 -0.4410571 ]]
dW2 =
[[ 0.79276486 0.85133918]
[-0.0957219 -0.01720463]
[-0.13100772 -0.03750433]]
dW3 =
[[-1.77691347 -0.11832879 -0.09397446]]
```
Let's now run the model with L2 regularization $(\lambda = 0.7)$. The `model()` function will call:
- `compute_cost_with_regularization` instead of `compute_cost`
- `backward_propagation_with_regularization` instead of `backward_propagation`
```
parameters = model(train_X, train_Y, lambd = 0.7)
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
```
Congrats, the test set accuracy increased to 93%. You have saved the French football team!
You are not overfitting the training data anymore. Let's plot the decision boundary.
```
plt.title("Model with L2-regularization")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
**Observations**:
- The value of $\lambda$ is a hyperparameter that you can tune using a dev set.
- L2 regularization makes your decision boundary smoother. If $\lambda$ is too large, it is also possible to "oversmooth", resulting in a model with high bias.
**What is L2-regularization actually doing?**:
L2-regularization relies on the assumption that a model with small weights is simpler than a model with large weights. Thus, by penalizing the square values of the weights in the cost function you drive all the weights to smaller values. It becomes too costly for the cost to have large weights! This leads to a smoother model in which the output changes more slowly as the input changes.
<font color='blue'>
**What you should remember** -- the implications of L2-regularization on:
- The cost computation:
- A regularization term is added to the cost
- The backpropagation function:
- There are extra terms in the gradients with respect to weight matrices
- Weights end up smaller ("weight decay"):
- Weights are pushed to smaller values.
## 3 - Dropout
Finally, **dropout** is a widely used regularization technique that is specific to deep learning.
**It randomly shuts down some neurons in each iteration.** Watch these two videos to see what this means!
<!--
To understand drop-out, consider this conversation with a friend:
- Friend: "Why do you need all these neurons to train your network and classify images?".
- You: "Because each neuron contains a weight and can learn specific features/details/shape of an image. The more neurons I have, the more featurse my model learns!"
- Friend: "I see, but are you sure that your neurons are learning different features and not all the same features?"
- You: "Good point... Neurons in the same layer actually don't talk to each other. It should be definitly possible that they learn the same image features/shapes/forms/details... which would be redundant. There should be a solution."
!-->
<center>
<video width="620" height="440" src="images/dropout1_kiank.mp4" type="video/mp4" controls>
</video>
</center>
<br>
<caption><center> <u> Figure 2 </u>: Drop-out on the second hidden layer. <br> At each iteration, you shut down (= set to zero) each neuron of a layer with probability $1 - keep\_prob$ or keep it with probability $keep\_prob$ (50% here). The dropped neurons don't contribute to the training in both the forward and backward propagations of the iteration. </center></caption>
<center>
<video width="620" height="440" src="images/dropout2_kiank.mp4" type="video/mp4" controls>
</video>
</center>
<caption><center> <u> Figure 3 </u>: Drop-out on the first and third hidden layers. <br> $1^{st}$ layer: we shut down on average 40% of the neurons. $3^{rd}$ layer: we shut down on average 20% of the neurons. </center></caption>
When you shut some neurons down, you actually modify your model. The idea behind drop-out is that at each iteration, you train a different model that uses only a subset of your neurons. With dropout, your neurons thus become less sensitive to the activation of one other specific neuron, because that other neuron might be shut down at any time.
### 3.1 - Forward propagation with dropout
**Exercise**: Implement the forward propagation with dropout. You are using a 3 layer neural network, and will add dropout to the first and second hidden layers. We will not apply dropout to the input layer or output layer.
**Instructions**:
You would like to shut down some neurons in the first and second layers. To do that, you are going to carry out 4 Steps:
1. In lecture, we dicussed creating a variable $d^{[1]}$ with the same shape as $a^{[1]}$ using `np.random.rand()` to randomly get numbers between 0 and 1. Here, you will use a vectorized implementation, so create a random matrix $D^{[1]} = [d^{[1](1)} d^{[1](2)} ... d^{[1](m)}] $ of the same dimension as $A^{[1]}$.
2. Set each entry of $D^{[1]}$ to be 1 with probability (`keep_prob`), and 0 otherwise.
**Hint:** Let's say that keep_prob = 0.8, which means that we want to keep about 80% of the neurons and drop out about 20% of them. We want to generate a vector that has 1's and 0's, where about 80% of them are 1 and about 20% are 0.
This python statement:
`X = (X < keep_prob).astype(int)`
is conceptually the same as this if-else statement (for the simple case of a one-dimensional array) :
```
for i,v in enumerate(x):
if v < keep_prob:
x[i] = 1
else: # v >= keep_prob
x[i] = 0
```
Note that the `X = (X < keep_prob).astype(int)` works with multi-dimensional arrays, and the resulting output preserves the dimensions of the input array.
Also note that without using `.astype(int)`, the result is an array of booleans `True` and `False`, which Python automatically converts to 1 and 0 if we multiply it with numbers. (However, it's better practice to convert data into the data type that we intend, so try using `.astype(int)`.)
3. Set $A^{[1]}$ to $A^{[1]} * D^{[1]}$. (You are shutting down some neurons). You can think of $D^{[1]}$ as a mask, so that when it is multiplied with another matrix, it shuts down some of the values.
4. Divide $A^{[1]}$ by `keep_prob`. By doing this you are assuring that the result of the cost will still have the same expected value as without drop-out. (This technique is also called inverted dropout.)
```
# GRADED FUNCTION: forward_propagation_with_dropout
def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5):
"""
Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.
Arguments:
X -- input dataset, of shape (2, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (20, 2)
b1 -- bias vector of shape (20, 1)
W2 -- weight matrix of shape (3, 20)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
keep_prob - probability of keeping a neuron active during drop-out, scalar
Returns:
A3 -- last activation value, output of the forward propagation, of shape (1,1)
cache -- tuple, information stored for computing the backward propagation
"""
np.random.seed(1)
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above.
D1 = np.random.rand(A1.shape[0],A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...)
D1 = (D1 < keep_prob).astype(int) # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)
A1 *= D1 # Step 3: shut down some neurons of A1
A1 /= keep_prob # Step 4: scale the value of neurons that haven't been shut down
### END CODE HERE ###
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
### START CODE HERE ### (approx. 4 lines)
D2 = np.random.rand(A2.shape[0],A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...)
D2 = (D2 < keep_prob).astype(int) # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold)
A2 *= D2 # Step 3: shut down some neurons of A2
A2 /= keep_prob # Step 4: scale the value of neurons that haven't been shut down
### END CODE HERE ###
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)
return A3, cache
X_assess, parameters = forward_propagation_with_dropout_test_case()
A3, cache = forward_propagation_with_dropout(X_assess, parameters, keep_prob = 0.7)
print ("A3 = " + str(A3))
```
**Expected Output**:
<table>
<tr>
<td>
**A3**
</td>
<td>
[[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]]
</td>
</tr>
</table>
### 3.2 - Backward propagation with dropout
**Exercise**: Implement the backward propagation with dropout. As before, you are training a 3 layer network. Add dropout to the first and second hidden layers, using the masks $D^{[1]}$ and $D^{[2]}$ stored in the cache.
**Instruction**:
Backpropagation with dropout is actually quite easy. You will have to carry out 2 Steps:
1. You had previously shut down some neurons during forward propagation, by applying a mask $D^{[1]}$ to `A1`. In backpropagation, you will have to shut down the same neurons, by reapplying the same mask $D^{[1]}$ to `dA1`.
2. During forward propagation, you had divided `A1` by `keep_prob`. In backpropagation, you'll therefore have to divide `dA1` by `keep_prob` again (the calculus interpretation is that if $A^{[1]}$ is scaled by `keep_prob`, then its derivative $dA^{[1]}$ is also scaled by the same `keep_prob`).
```
# GRADED FUNCTION: backward_propagation_with_dropout
def backward_propagation_with_dropout(X, Y, cache, keep_prob):
"""
Implements the backward propagation of our baseline model to which we added dropout.
Arguments:
X -- input dataset, of shape (2, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward_propagation_with_dropout()
keep_prob - probability of keeping a neuron active during drop-out, scalar
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
### START CODE HERE ### (≈ 2 lines of code)
dA2 *= D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation
dA2 /= keep_prob # Step 2: Scale the value of neurons that haven't been shut down
### END CODE HERE ###
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T)
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
### START CODE HERE ### (≈ 2 lines of code)
dA1 *= D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation
dA1 /= keep_prob # Step 2: Scale the value of neurons that haven't been shut down
### END CODE HERE ###
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2,
"dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
X_assess, Y_assess, cache = backward_propagation_with_dropout_test_case()
gradients = backward_propagation_with_dropout(X_assess, Y_assess, cache, keep_prob = 0.8)
print ("dA1 = \n" + str(gradients["dA1"]))
print ("dA2 = \n" + str(gradients["dA2"]))
```
**Expected Output**:
```
dA1 =
[[ 0.36544439 0. -0.00188233 0. -0.17408748]
[ 0.65515713 0. -0.00337459 0. -0. ]]
dA2 =
[[ 0.58180856 0. -0.00299679 0. -0.27715731]
[ 0. 0.53159854 -0. 0.53159854 -0.34089673]
[ 0. 0. -0.00292733 0. -0. ]]
```
Let's now run the model with dropout (`keep_prob = 0.86`). It means at every iteration you shut down each neurons of layer 1 and 2 with 14% probability. The function `model()` will now call:
- `forward_propagation_with_dropout` instead of `forward_propagation`.
- `backward_propagation_with_dropout` instead of `backward_propagation`.
```
parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3)
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
```
Dropout works great! The test accuracy has increased again (to 95%)! Your model is not overfitting the training set and does a great job on the test set. The French football team will be forever grateful to you!
Run the code below to plot the decision boundary.
```
plt.title("Model with dropout")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
**Note**:
- A **common mistake** when using dropout is to use it both in training and testing. You should use dropout (randomly eliminate nodes) only in training.
- Deep learning frameworks like [tensorflow](https://www.tensorflow.org/api_docs/python/tf/nn/dropout), [PaddlePaddle](http://doc.paddlepaddle.org/release_doc/0.9.0/doc/ui/api/trainer_config_helpers/attrs.html), [keras](https://keras.io/layers/core/#dropout) or [caffe](http://caffe.berkeleyvision.org/tutorial/layers/dropout.html) come with a dropout layer implementation. Don't stress - you will soon learn some of these frameworks.
<font color='blue'>
**What you should remember about dropout:**
- Dropout is a regularization technique.
- You only use dropout during training. Don't use dropout (randomly eliminate nodes) during test time.
- Apply dropout both during forward and backward propagation.
- During training time, divide each dropout layer by keep_prob to keep the same expected value for the activations. For example, if keep_prob is 0.5, then we will on average shut down half the nodes, so the output will be scaled by 0.5 since only the remaining half are contributing to the solution. Dividing by 0.5 is equivalent to multiplying by 2. Hence, the output now has the same expected value. You can check that this works even when keep_prob is other values than 0.5.
## 4 - Conclusions
**Here are the results of our three models**:
<table>
<tr>
<td>
**model**
</td>
<td>
**train accuracy**
</td>
<td>
**test accuracy**
</td>
</tr>
<td>
3-layer NN without regularization
</td>
<td>
95%
</td>
<td>
91.5%
</td>
<tr>
<td>
3-layer NN with L2-regularization
</td>
<td>
94%
</td>
<td>
93%
</td>
</tr>
<tr>
<td>
3-layer NN with dropout
</td>
<td>
93%
</td>
<td>
95%
</td>
</tr>
</table>
Note that regularization hurts training set performance! This is because it limits the ability of the network to overfit to the training set. But since it ultimately gives better test accuracy, it is helping your system.
Congratulations for finishing this assignment! And also for revolutionizing French football. :-)
<font color='blue'>
**What we want you to remember from this notebook**:
- Regularization will help you reduce overfitting.
- Regularization will drive your weights to lower values.
- L2 regularization and Dropout are two very effective regularization techniques.
| github_jupyter |
## First test tables notebook: Create and destroy tables in Postgres using psycopg2
### Using GALAH data to test because they have full fits headers
```
# imports
import os
from astropy.io import fits
import sqlalchemy
from sqlalchemy import create_engine, Table, Column, Integer, String, Float, MetaData, ForeignKey
# do some setup
# detect current location
path1=os.getcwd()
# path to local data stash (input by user)
datapath='/Users/sarah/active/programs/FunnelWeb/computing/test-data/'
# create a new database
engine=create_engine("postgresql://sarah:imoLonae@localhost/db1")
# this isn't currently working (but errors out silently) - set up the database directly in Postgres?
metadata=MetaData()
# create the observation table: includes all exposures
# the extend_existing=True allows updates to an existing Table definition
observationtable=Table('observationtable',metadata,
Column('obsid',String,primary_key=True),
Column('configfile',String),
Column('mjd',String),
Column('date',Integer),
Column('filename',String),
Column('telra',Float),
Column('teldec',Float),
Column('obstype',String),
Column('exptime',Float),
Column('camera',Integer)
,extend_existing=True)
# create one table that's been defined, but only if it doesn't already exist (default behaviour)
observationtable.create(engine,checkfirst=True)
# create all tables that were defined, whether or not they already exist
# not sure what happens when there's a clash)
#metadata.create_all(engine, checkfirst=False)
# drop one particular table, but only if it exists (default behaviour)
observationtable.drop(engine,checkfirst=True)
# drop all tables, whether or not they exist
# not sure what happens if they don't exist
metadata.drop_all(engine)
# find all local data files, loop through and concatenate the header information into a list of dictionaries
updatelist=[]
for root,dirs,files in os.walk(datapath):
for name in files:
fitspath=os.path.join(root,name)
header = fits.getheader(fitspath)
updatelist.append({'obsid':header['FILEORIG'][-9:-5]
,'configfile':header['CFG_FILE']
,'mjd':header['UTMJD']
,'date':int((header['UTDATE'].replace(':',''))[2:])
,'filename':(header['FILEORIG'].split('/'))[-1]
,'telra':header['MEANRA']
,'teldec':header['MEANDEC']
,'obstype':header['OBSTYPE']
,'exptime':header['EXPOSED']})
updatelist
# populate the table we just created by inserting values into columns
# add one row to the table
#ins = observationtable.insert()
#conn.execute(ins, obsid='0012', configfile='field1324tile3.txt')
# add many rows to the table: feed in a list of dictionaries
conn.execute(observationtable.insert(),updatelist)
```
| github_jupyter |
## <span style="color:purple">ArcGIS API for Python: Real-time Person Detection</span>
<img src="../img/webcam_detection.PNG" style="width: 100%"></img>
## Integrating ArcGIS with TensorFlow Deep Learning using the ArcGIS API for Python
This notebook provides an example of integration between ArcGIS and deep learning frameworks like TensorFlow using the ArcGIS API for Python.
<img src="../img/ArcGIS_ML_Integration.png" style="width: 75%"></img>
We will leverage a model to detect objects on your device's video camera, and use these to update a feature service on a web GIS in real-time. As people are detected on your camera, the feature will be updated to reflect the detection.
### Notebook Requirements:
#### 1. TensorFlow and Object Detection API
This demonstration is designed to run using the TensorFlow Object Detection API (https://github.com/tensorflow/models/tree/master/research/object_detection)
Please follow the instructions found in that repository to install TensorFlow, clone the repository, and test a pre-existing model.
Once you have followed those instructions, this notebook should be placed within the "object_detection" folder of that repository. Alternatively, you may leverage this notebook from another location but reference paths to the TensorFlow model paths and utilities will need to be adjusted.
#### 2. Access to ArcGIS Online or ArcGIS Enterprise
This notebook will make a connection to an ArcGIS Enterprise or ArcGIS Online organization to provide updates to a target feature service.
Please ensure you have access to an ArcGIS Enterprise or ArcGIS Online account with a feature service to serve as the target of your detection updates. The feature service should have a record with an boolean attribute (i.e. column with True or False possible options) named "Person_Found".
# Import needed modules
```
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import cv2
```
We will use VideoCapture to connect to the device's web camera feed. The cv2 module helps here.
```
# Set our caption
cap = cv2.VideoCapture(0)
# This is needed since the notebook is meant to be run in the object_detection folder.
sys.path.append("..")
```
## Object detection imports
Here are the imports from the object detection module.
```
from utils import label_map_util
from utils import visualization_utils as vis_util
```
# Model preparation
## Variables
Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.
By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
```
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
```
## Download Model
```
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
```
## Load a (frozen) Tensorflow model into memory.
```
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
```
## Loading label map
Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
```
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
category_index
```
## Helper code
```
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
```
This is a helper function that takes the detection graph output tensor (np arrays), stacks the classes and scores, and determines if the class for a person (1) is available within a certain score and within a certain amount of objects
```
def person_in_image(classes_arr, scores_arr, obj_thresh=5, score_thresh=0.5):
stacked_arr = np.stack((classes_arr, scores_arr), axis=-1)
person_found_flag = False
for ix in range(obj_thresh):
if 1.00000000e+00 in stacked_arr[ix]:
if stacked_arr[ix][1] >= score_thresh:
person_found_flag = True
return person_found_flag
```
# Establish Connection to GIS via ArcGIS API for Python
### Authenticate
```
import arcgis
gis_url = "" # Replace with gis URL
username = "" # Replace with username
gis = arcgis.gis.GIS(gis_url, username)
```
### Retrieve the Object Detection Point Layer
```
target_service_name = "" # Replace with name of target service
object_point_srvc = gis.content.search(target_service_name)[0]
object_point_srvc
# Convert our existing service into a pandas dataframe
object_point_lyr = object_point_srvc.layers[0]
obj_fset = object_point_lyr.query() #querying without any conditions returns all the features
obj_df = obj_fset.df
obj_df.head()
all_features = obj_fset.features
all_features
from copy import deepcopy
original_feature = all_features[0]
feature_to_be_updated = deepcopy(original_feature)
feature_to_be_updated
```
### Test of Manual Update
```
feature_to_be_updated.attributes['Person_Found']
features_for_update = []
feature_to_be_updated.attributes['Person_Found'] = "False"
features_for_update.append(feature_to_be_updated)
object_point_lyr.edit_features(updates=features_for_update)
```
# Detection
```
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
while True:
ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8, min_score_thresh=0.5)
cv2.imshow('object detection', cv2.resize(image_np, (800,600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
person_found = person_in_image(np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
obj_thresh=2)
features_for_update = []
feature_to_be_updated.attributes['Person_Found'] = str(person_found)
features_for_update.append(feature_to_be_updated)
object_point_lyr.edit_features(updates=features_for_update)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Meet953/TUS-Engineering-Team-Project/blob/main/ARIMA.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
def test_stationarity(timeseries):
import matplotlib.pyplot as plt
rolmean = timeseries.rolling(window=5).mean()
rolstd = timeseries.rolling(window=5).std()
orig = plt.plot(timeseries, label = 'Original')
mean = plt.plot(rolmean, label='Rolling mean')
std = plt.plot(rolstd, label='Rolling std')
plt.legend(loc='best')
plt.title('Timeseries data with rolling mean and std. deviation')
plt.show()
from statsmodels.tsa.stattools import adfuller
dftest = adfuller(timeseries)
dfoutput = pd.Series(dftest[0:4], index = ['Test Statistics', 'Mackinnons approx p-value', 'used lags','NOBS'])
print(dfoutput)
url = 'https://raw.githubusercontent.com/SimonMcLain/TUS-Engineering-Team-Project/main/Data/HSE/COVID-19_HPSC_County_Statistics_Historic_Data.csv'
import pandas as pd
covid_19_dataset = pd.read_csv(url)
covid_19_dataset.head()
covid_19_dataset.info()
covid_19_dataset['TimeStamp'] = pd.to_datetime(covid_19_dataset['TimeStamp'], infer_datetime_format=True)
indexed_covid_19_dataset = covid_19_dataset.set_index(['TimeStamp'])
indexed_covid_19_dataset.head()
covid_19_confirmed_case_dataset = indexed_covid_19_dataset['ConfirmedCovidCases']
covid_19_confirmed_agg_dataset = covid_19_confirmed_case_dataset.groupby('TimeStamp').sum()
test_stationarity(covid_19_confirmed_agg_dataset)
import numpy as np
covid_19_confirmed_agg_dataset_log_scaled = np.log(covid_19_confirmed_agg_dataset)
covid_19_confirmed_agg_dataset_log_scaled = covid_19_confirmed_agg_dataset_log_scaled[covid_19_confirmed_agg_dataset_log_scaled > 0]
test_stationarity(covid_19_confirmed_agg_dataset_log_scaled)
ma = covid_19_confirmed_agg_dataset_log_scaled.rolling(window=6).mean()
covid_19_confirmed_agg_dataset_log_scaled_minus_ma = covid_19_confirmed_agg_dataset_log_scaled - ma
covid_19_confirmed_agg_dataset_log_scaled_minus_ma.dropna(inplace = True)
test_stationarity(covid_19_confirmed_agg_dataset_log_scaled_minus_ma)
covid_19_confirmed_agg_dataset_log_scaled_ps = covid_19_confirmed_agg_dataset_log_scaled.diff(periods=6)
covid_19_confirmed_agg_dataset_log_scaled_ps.dropna(inplace = True)
test_stationarity(covid_19_confirmed_agg_dataset_log_scaled_ps)
from statsmodels.tsa.stattools import acf
from statsmodels.tsa.stattools import pacf
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
import matplotlib.pyplot as plt
lag_acf = acf(covid_19_confirmed_agg_dataset_log_scaled_ps, nlags = 32)
lag_pacf = pacf(covid_19_confirmed_agg_dataset_log_scaled_ps, nlags = 16)
fig,ax = plt.subplots(1,2,figsize=(20,5))
plot_acf(lag_acf, ax = ax[0])
plot_pacf(lag_pacf, lags = 7, ax = ax[1])
plt.show()
def predict(timeseries,p,d,q):
from statsmodels.tsa.arima_model import ARIMA
from sklearn.model_selection import train_test_split
timeseries.dropna(inplace = True)
train, test = train_test_split(timeseries, test_size = 0.20, shuffle = False)
model_arima = ARIMA(train, order=(p,d,q))
model_arima_fit = model_arima.fit()
predictions = model_arima_fit.predict(start='2021-08-14', end = '2021-12-22')
from sklearn.metrics import mean_squared_error
error = mean_squared_error(test, predictions)
print('Test MSE %.5f' % error)
predict = np.exp(predictions)
test_set = np.exp(test)
plt.plot(test_set)
plt.plot(predict, color='red')
plt.show()
from pandas import DataFrame
residual = DataFrame(model_arima_fit.resid)
residual.plot(kind='kde')
predict(covid_19_confirmed_agg_dataset_log_scaled_minus_ma, 10,2,3)
from sklearn.model_selection import train_test_split
covid_19_confirmed_agg_dataset_log_scaled_ps.dropna(inplace = True)
train, test= train_test_split(covid_19_confirmed_agg_dataset_log_scaled_ps,test_size = 0.20, shuffle = False)
test.head()
```
| github_jupyter |
# This is the Python Code for Chapter2 ''Statistical Learning"
## 2.3.1 Basic Commands
```
import numpy as np # for calculation purpose, let use np.array
import random # for the random
x = np.array([1, 3, 2, 5])
print(x)
x = np.array([1, 6, 2])
print(x)
y = [1, 4, 3]
```
### use len() to find length of a vector
```
len(x)
len(y)
print(x + y) # please note that we define x and y a little bit differently, but we still can do the calculation
y = np.array([1, 4, 3])
whos
del x # reset_selective x
%whos
reset?
x = [[1,2],[3, 4]]
print (x)
x = np.array([1, 2, 3, 4])
x = np.reshape(x, [2,2])
print(x)
np.sqrt(x)
x**2
np.square(x)
mu, sigma = 0, 1
x = np.random.normal(mu, sigma, 50)
y = x + np.random.normal(50, 0.1, 50)
print( x, y)
np.corrcoef(x, y)
```
### Above will return the correlation matrix
```
np.corrcoef(x, y)[0,1]
import random
random.seed(2333)
np.random.normal(mu, sigma, 50) # after set up the seed, this should genernate the same result
y = np.random.normal(mu, sigma, 100)
print (np.mean(y))
print (np.var(y))
print (np.sqrt(np.var(y)))
print (np.std(y))
```
### if we raise the number of sample to a larger number, the mean and std will be more close to (0, 1)
```
y = np.random.normal(mu, sigma, 5000)
print (np.mean(y))
print (np.std(y))
```
## 2.3.2 Graphics
```
import numpy as np # for calculation purpose, let use np.array
import random # for the random
x = np.random.normal(0, 1, 100)
y = np.random.normal(0, 1, 100)
# In python, matplotlib is the most used library for plot
# matplotlib.pyplot is a collection of command style functions that make matplotlib work like MATLAB.
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x, y, 'bo') # please use plt.plot? to look at more options
plt.ylabel("this is the y-axis")
plt.xlabel("this is the x-axis")
plt.title("Plot of X vs Y")
plt.savefig('Figure.pdf') # use plt.savefig function to save images
plt.show()
x = np.arange(1, 11) # note the arange excludes right end of rande specification
print (x )
```
### note: this actually can result in unexpected results; check np.arange(0.2, 0.6, 0.4) vs np.arange(0.2, 1.6, 1.4);
```
print(np.arange(0.2,0.6,0.4))
print(np.arange(0.2,1.6,1.4))
# in order to use Pi, math module needs to loaded first
import math
x = np.linspace(-math.pi, math.pi, num = 50)
print (x)
import matplotlib.cm as cm
import matplotlib.mlab as mlab
y = x
X, Y = np.meshgrid(x,y)
whos
plt.figure()
f = np.cos(Y)/(1 + np.square(X))
CS = plt.contour(X, Y, f)
plt.show()
```
### same as above, use plt.contour? to explore the options
```
fa = (f - f.T)/2 #f.T for transpose or tranpose(f)
plt.imshow(fa, extent=(x[0], x[-1], y[0], y[-1]))
plt.show()
```
### I think imshow looks nicer for heatmap, use 'extent =' fix the x, y axis
```
from mpl_toolkits.mplot3d import axes3d
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, Y, fa)
plt.show()
```
## 2.3.3 Indexing Data
```
A = np.arange(1,17,1).reshape(4, 4).transpose()
print(A)
A[2, 3]
```
### try the same index as the book, but we got different number. The reason is R starts the index from 1 (Matlab too), but Python starts the index from 0. To select the same number (10) as the book did, we reduce the index by 1
```
A[1, 2]
```
### to select a submatrix, need the non-singleton dimension of your indexing array to be aligned with the axis you're indexing into, e.g. for an n x m 2D subarray: A[n by 1 array,1 by m array]
```
A[[[0],[2]], [1,3]]
A[0:3:1, 1:4:1] # this is another way of doing it
A[0:2,:]
A[:,0:2]
```
### The last two examples include either no index for the columns or no index for the rows. These indicate that Python should include all columns or all rows, respectively
```
A[0,:]
```
### '-' sign has a different meaning in Python. This means index from the end, -1 means the last element
```
A[-1, -1]
```
### There are quite a few ways to let Python keep all rows except certain index. Here boolean was used.
```
ind = np.ones((4,), bool)
ind[[0,2]] = False
ind
A[ind,:]
A[ind]
A.shape
```
## 2.3.4 Loading Data
### In Python, Pandas is a common used module to read from file into a data frame. I downloaded the Auto.csv from the book website. First, take a look at the csv file. There are headers, missing value is marked by '?' .
```
import pandas as pd
Auto = pd.read_csv('data/Auto.csv', header=0, na_values='?')
```
### check one record with missing value, and make sure the missing value is correctly imported
```
Auto.iloc[32]
```
### Use the same function as in ndarray to find out the dimension of the data frame
```
Auto.shape
Auto[:4]
Auto.iloc[:4, :2]
list(Auto)
```
### Use .isnull and .sum to find out how many NaNs in each variables
```
Auto.isnull().sum()
```
### after the previous steps, there are 397 obs in the data and only 5 with missing values. We can just drop the ones with missing values
```
Auto = Auto.dropna()
Auto.shape
```
## 2.3.5 Additional Graphical and Numerical Summaries
### refer a column of data frame by name, by using a '.'. Ref the options in plt.plot for more.
```
plt.plot(Auto.cylinders, Auto.mpg, 'ro')
plt.show()
```
### Use .hist to get the histogram of certain variables. column = to specify which variable
```
Auto.hist(column = ['cylinders', 'mpg'])
plt.show()
```
### Use the .describe() to get a summary of the data frame. Use .describe ( include = 'all' ) for mix types, use describe(include = [np.number]) for numerical columns, use describe(include = ['O']) for objects.
```
Auto.describe()
```
### We can change type of certain variable(s). Here changed the cylinders into categorical variable
```
Auto['cylinders'] = Auto['cylinders'].astype('category')
Auto.describe()
Auto.describe(include= 'all')
```
## Exercises
| github_jupyter |
```
from pathlib import Path
import pandas as pd
import numpy as np
from import_clean_data import load_annotated_meter_data, load_co2_data
from load_dayahead_prices import load_dayahead_prices
import warnings
import matplotlib.pyplot as plt
DATA_DIR = (Path.cwd() / ".." / "Data").resolve()
dayahead_2020_filename = "Day-ahead Prices_202001010000-202101010000.csv"
dayahead_2021_filename = "Day-ahead Prices_202101010000-202201010000.csv"
dayahead_2020 = load_dayahead_prices(DATA_DIR / dayahead_2020_filename)
dayahead_2021 = load_dayahead_prices(DATA_DIR / dayahead_2021_filename)
dayahead_2020
dayahead = pd.concat([dayahead_2020, dayahead_2021])
dayahead.drop(["BZN|CH"], axis=1, inplace=True)
dayahead = dayahead[dayahead['Day-ahead Price [EUR/MWh]'].notna()]
dayahead.dtypes
mondays = dayahead.loc[dayahead['datetime'].dt.dayofweek == 0]
tuesdays = dayahead.loc[dayahead['datetime'].dt.dayofweek == 1]
wednesdays = dayahead.loc[dayahead['datetime'].dt.dayofweek == 2]
thursdays = dayahead.loc[dayahead['datetime'].dt.dayofweek == 3]
fridays = dayahead.loc[dayahead['datetime'].dt.dayofweek == 4]
saturdays = dayahead.loc[dayahead['datetime'].dt.dayofweek == 5]
sundays = dayahead.loc[dayahead['datetime'].dt.dayofweek == 6]
# Calculate price per hour of average weekday
days = [mondays, tuesdays, wednesdays, thursdays, fridays, saturdays, sundays]
result = {}
j = 0
for day in days:
hours = []
for i in range(24):
thishour = day.loc[day['datetime'].dt.hour == i]
number = len(thishour.index)
avg = thishour['Day-ahead Price [EUR/MWh]'].sum() / number
hours.append(avg)
result[str(j)] = hours
j += 1
plt.figure(figsize=(14, 8))
plt.plot(result['0'], label="Monday")
plt.plot(result['1'], label="Tuesday")
plt.plot(result['2'], label="Wednesdays")
plt.plot(result['3'], label="Thursday")
plt.plot(result['4'], label="Friday")
plt.plot(result['5'], label="Saturday")
plt.plot(result['6'], label="Sunday")
plt.title("Average energy price per hour of weekday")
plt.ylabel("day-ahead price (EUR / MWh)")
plt.xlabel("hour of day [UTC]")
plt.legend()
plt.savefig("daily_dayahead_price.png", dpi=300)
# per month
Jan = dayahead.loc[dayahead['datetime'].dt.month == 1]
Feb = dayahead.loc[dayahead['datetime'].dt.month == 2]
Mar = dayahead.loc[dayahead['datetime'].dt.month == 3]
Apr = dayahead.loc[dayahead['datetime'].dt.month == 4]
Mai = dayahead.loc[dayahead['datetime'].dt.month == 5]
Jun = dayahead.loc[dayahead['datetime'].dt.month == 6]
Jul = dayahead.loc[dayahead['datetime'].dt.month == 7]
Aug = dayahead.loc[dayahead['datetime'].dt.month == 8]
Sep = dayahead.loc[dayahead['datetime'].dt.month == 9]
Okt = dayahead.loc[dayahead['datetime'].dt.month == 10]
Nov = dayahead.loc[dayahead['datetime'].dt.month == 11]
Dez = dayahead.loc[dayahead['datetime'].dt.month == 12]
# Calculate average price per month
months = [Jan, Feb, Mar, Apr, Mai, Jun, Jul, Aug, Sep, Okt, Nov, Dez]
result = {}
j = 0
for month in months:
hours = []
for i in range(24):
thishour = month.loc[month['datetime'].dt.hour == i]
number = len(thishour.index)
avg = thishour['Day-ahead Price [EUR/MWh]'].sum() / number
hours.append(avg)
result[str(j)] = hours
j += 1
plt.plot(result['0'], label="Jan")
plt.plot(result['1'], label="Feb")
plt.plot(result['2'], label="Mar")
plt.plot(result['3'], label="Apr")
plt.plot(result['4'], label="Mai")
plt.plot(result['5'], label="Jun")
plt.plot(result['6'], label="Jul")
plt.plot(result['7'], label="Aug")
plt.plot(result['8'], label="Sep")
plt.plot(result['9'], label="Okt")
plt.plot(result['10'], label="Nov")
plt.plot(result['11'], label="Dez")
plt.title("Average electricity price per hour of months")
plt.ylabel("day-ahead price (EUR / MWh)")
plt.xlabel("hour of day [UTC]")
plt.legend();
# for every weekday, plot all months
monthcount = 1
for month in months:
mondays = month.loc[month['datetime'].dt.dayofweek == 0]
tuesdays = month.loc[month['datetime'].dt.dayofweek == 1]
wednesdays = month.loc[month['datetime'].dt.dayofweek == 2]
thursdays = month.loc[month['datetime'].dt.dayofweek == 3]
fridays = month.loc[month['datetime'].dt.dayofweek == 4]
saturdays = month.loc[month['datetime'].dt.dayofweek == 5]
sundays = month.loc[month['datetime'].dt.dayofweek == 6]
# Calculate average co2_intensity per weekday
days = [mondays, tuesdays, wednesdays, thursdays, fridays, saturdays, sundays]
result = {}
j = 0
for day in days:
hours = []
for i in range(24):
thishour = day.loc[day['datetime'].dt.hour == i]
number = len(thishour.index)
avg = thishour['Day-ahead Price [EUR/MWh]'].sum() / number
hours.append(avg)
result[str(j)] = hours
j += 1
# plot
plt.figure()
plt.plot(result['0'], label="Monday")
plt.plot(result['1'], label="Tuesday")
plt.plot(result['2'], label="Wednesdays")
plt.plot(result['3'], label="Thursday")
plt.plot(result['4'], label="Friday")
plt.plot(result['5'], label="Saturday")
plt.plot(result['6'], label="Sunday")
plt.title("Average electricity price per hour of weekday in month " + str(monthcount))
plt.ylabel("Day-ahead Price [EUR/MWh]")
plt.xlabel("hour of day [UTC]")
plt.legend()
monthcount += 1
plt.show()
```
| github_jupyter |
```
import tensorflow as tf
config = tf.compat.v1.ConfigProto(
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.8),
)
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(session)
import os
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
from pathlib import Path
from keras import backend, layers, activations, Model
from amp.utils.basic_model_serializer import load_master_model_components
from amp.models.decoders import amp_expanded_decoder
from amp.models.encoders import amp_expanded_encoder
from amp.models.master import master
from amp.utils import basic_model_serializer
import amp.data_utils.data_loader as data_loader
from amp.data_utils.sequence import pad, to_one_hot
from tqdm import tqdm
from joblib import dump, load
from sklearn.decomposition import PCA
import seaborn as sns
import matplotlib.pyplot as plt
params = {'axes.labelsize': 16,
'axes.titlesize': 24,
'xtick.labelsize':14,
'ytick.labelsize': 14}
plt.rcParams.update(params)
plt.rc('text', usetex=False)
sns.set_style('whitegrid', {'grid.color': '.95', 'axes.spines.right': False, 'axes.spines.top': False})
sns.set_context("notebook")
seed = 7
np.random.seed(seed)
from amp.config import MIN_LENGTH, MAX_LENGTH, LATENT_DIM, MIN_KL, RCL_WEIGHT, HIDDEN_DIM, MAX_TEMPERATURE
input_to_encoder = layers.Input(shape=(MAX_LENGTH,))
input_to_decoder = layers.Input(shape=(LATENT_DIM+2,))
def translate_generated_peptide(encoded_peptide):
alphabet = list('ACDEFGHIKLMNPQRSTVWY')
return ''.join([alphabet[el - 1] if el != 0 else "" for el in encoded_peptide[0].argmax(axis=1)])
def translate_peptide(encoded_peptide):
alphabet = list('ACDEFGHIKLMNPQRSTVWY')
return ''.join([alphabet[el-1] if el != 0 else "" for el in encoded_peptide])
models = [
'HydrAMP',
'PepCVAE',
'Basic',
]
model_labels = [
'HydrAMP',
'PepCVAE',
'Basic',
]
bms = basic_model_serializer.BasicModelSerializer()
amp_classifier = bms.load_model('../models/amp_classifier')
amp_classifier_model = amp_classifier()
mic_classifier = bms.load_model('../models/mic_classifier/')
mic_classifier_model = mic_classifier()
```
# Get validation data
```
data_manager = data_loader.AMPDataManager(
'../data/unlabelled_positive.csv',
'../data/unlabelled_negative.csv',
min_len=MIN_LENGTH,
max_len=MAX_LENGTH)
amp_x, amp_y = data_manager.get_merged_data()
amp_x_train, amp_x_test, amp_y_train, amp_y_test = train_test_split(amp_x, amp_y, test_size=0.1, random_state=36)
amp_x_train, amp_x_val, amp_y_train, amp_y_val = train_test_split(amp_x_train, amp_y_train, test_size=0.2, random_state=36)
# Restrict the length
ecoli_df = pd.read_csv('../data/mic_data.csv')
mask = (ecoli_df['sequence'].str.len() <= MAX_LENGTH) & (ecoli_df['sequence'].str.len() >= MIN_LENGTH)
ecoli_df = ecoli_df.loc[mask]
mic_x = pad(to_one_hot(ecoli_df['sequence']))
mic_y = ecoli_df.value
mic_x_train, mic_x_test, mic_y_train, mic_y_test = train_test_split(mic_x, mic_y, test_size=0.1, random_state=36)
mic_x_train, mic_x_val, mic_y_train, mic_y_val = train_test_split(mic_x_train, mic_y_train, test_size=0.2, random_state=36)
pos = np.vstack([amp_x_val[amp_y_val == 1], mic_x_val[mic_y_val < 1.5]])
neg = np.vstack([amp_x_val[amp_y_val == 0], mic_x_val[mic_y_val > 1.5]])
neg.shape, pos.shape
pos_amp = amp_classifier_model.predict(pos, verbose=1).reshape(len(pos))
neg_mic = mic_classifier_model.predict(neg, verbose=1).reshape(len(neg))
neg_amp = amp_classifier_model.predict(neg, verbose=1).reshape(len(neg))
pos_mic = mic_classifier_model.predict(pos, verbose=1).reshape(len(pos))
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(12, 4), sharex=True, sharey=True)
ax1.hist(pos_amp)
ax1.set_ylabel('AMP')
ax1.set_title('Positives')
ax2.hist(neg_amp)
ax2.set_title('Negatives')
ax3.hist(pos_mic)
ax3.set_ylabel('MIC')
ax4.hist(neg_mic)
plt.show()
pos = np.vstack([pos] * 64).reshape(-1, 25)
pos_amp = np.vstack([pos_amp] * 64).reshape(-1, 1)
pos_mic = np.vstack([pos_mic] * 64).reshape(-1, 1)
neg = np.vstack([neg] * 64).reshape(-1, 25)
neg_amp = np.vstack([neg_amp] * 64).reshape(-1, 1)
neg_mic = np.vstack([neg_mic] * 64).reshape(-1, 1)
def improve(x, model, epoch, mode):
if mode == 'pos':
amp = pos_amp
mic = pos_mic
else:
amp = neg_mic
mic = neg_mic
encoded = encoder_model.predict(x, batch_size=5000)
conditioned = np.hstack([
encoded,
np.ones((len(x), 1)),
np.ones((len(x), 1)),
])
decoded = decoder_model.predict(conditioned, batch_size=5000)
new_peptides = np.argmax(decoded, axis=2)
new_amp = amp_classifier_model.predict(new_peptides, batch_size=5000)
new_mic = mic_classifier_model.predict(new_peptides, batch_size=5000)
# RELATIVE
rel_better = new_amp > amp.reshape(-1, 1)
rel_better = rel_better & (new_mic > mic.reshape(-1, 1))
rel_better = np.logical_or.reduce(rel_better, axis=1)
rel_improved = new_peptides[np.where(rel_better), :].reshape(-1, 25)
before_rel_improve = x[np.where(rel_better), :].reshape(-1, 25)
# ABSOLUTE
abs_better = new_amp >= 0.8
abs_better = abs_better & (new_mic > 0.5)
abs_better = np.logical_or.reduce(abs_better, axis=1)
abs_improved = new_peptides[np.where(abs_better), :].reshape(-1, 25)
before_abs_improve = x[np.where(abs_better), :].reshape(-1, 25)
return {
'new_peptides': new_peptides,
'rel_improved': rel_improved,
'abs_improved': abs_improved,
'before_rel_improve': before_rel_improve,
'before_abs_improve': before_abs_improve,
'new_amp': new_amp,
'new_mic': new_mic,
}
```
# HydrAMP improve
```
from keras.models import Model
model = models[0]
current_model_pos = {epoch: [] for epoch in range(40)}
current_model_neg = {epoch: [] for epoch in range(40)}
for epoch in tqdm(range(40)):
AMPMaster = bms.load_model(f'../models/{model}/{epoch}')
encoder_model = AMPMaster.encoder(input_to_encoder)
decoder_model = AMPMaster.decoder(input_to_decoder)
current_model_pos[epoch] = improve(pos, model, epoch, 'pos')
current_model_neg[epoch] = improve(neg, model, epoch, 'neg')
dump(current_model_pos, f'../results/improvement_PosVal_{model}.joblib')
dump(current_model_neg, f'../results/improvement_NegVal_{model}.joblib')
```
# PepCVAE improve
```
from keras.models import Model
model = models[1]
current_model_pos = {epoch: [] for epoch in range(40)}
current_model_neg = {epoch: [] for epoch in range(40)}
for epoch in tqdm(range(40)):
AMPMaster = bms.load_model(f'../models/{model}/{epoch}')
encoder_model = AMPMaster.encoder(input_to_encoder)
decoder_model = AMPMaster.decoder(input_to_decoder)
new_act = layers.TimeDistributed(
layers.Activation(activations.softmax),
name='decoder_time_distribute_activation')
decoder_model.layers.pop()
x = new_act(decoder_model.layers[-1].output)
decoder_model = Model(input=decoder_model.input, output=[x])
current_model_pos[epoch] = improve(pos, model, epoch, 'pos')
current_model_neg[epoch] = improve(neg, model, epoch, 'neg')
dump(current_model_pos, f'../results/improvement_PosVal_{model}.joblib')
dump(current_model_neg, f'../results/improvement_NegVal_{model}.joblib')
```
# Basic improvement
```
from keras.models import Model
model = models[2]
current_model_pos = {epoch: [] for epoch in range(40)}
current_model_neg = {epoch: [] for epoch in range(40)}
for epoch in tqdm(range(40)):
AMPMaster = bms.load_model(f'../models/{model}/{epoch}')
encoder_model = AMPMaster.encoder(input_to_encoder)
decoder_model = AMPMaster.decoder(input_to_decoder)
new_act = layers.TimeDistributed(
layers.Activation(activations.softmax),
name='decoder_time_distribute_activation')
decoder_model.layers.pop()
x = new_act(decoder_model.layers[-1].output)
decoder_model = Model(input=decoder_model.input, output=[x])
current_model_pos[epoch] = improve(pos, model, epoch, 'pos')
current_model_neg[epoch] = improve(neg, model, epoch, 'neg')
dump(current_model_pos, f'../results/improvement_PosVal_{model}.joblib')
dump(current_model_neg, f'../results/improvement_NegVal_{model}.joblib')
```
# Collect results
```
pos_final_results = {model: {epoch:
{'absolute improvement':0,
'relative improvement':0,
} for epoch in range(40)} for model in models}
neg_final_results = {model: {epoch:
{'absolute improvement':0,
'relative improvement':0,
} for epoch in range(40)} for model in models}
for model in models:
if model in ['PepCVAE', 'Basic']:
model_results = load(f'../results/improvement_PosVal_{model}.joblib')
else:
model_results = load(f'../results/improvement_PosVal_{model}.joblib')
for epoch in range(40):
pos_final_results[model][epoch]['relative improvement'] = np.unique(
model_results[epoch]['rel_improved'], axis=0).shape[0]
pos_final_results[model][epoch]['absolute improvement'] = np.unique(
model_results[epoch]['abs_improved'], axis=0).shape[0]
pos_final_results[model][epoch]['before relative improvement'] = np.unique(
model_results[epoch]['before_rel_improve'], axis=0).shape[0]
pos_final_results[model][epoch]['before absolute improvement'] = np.unique(
model_results[epoch]['before_abs_improve'], axis=0).shape[0]
for model in models:
if model in ['PepCVAE', 'Basic']:
model_results = load(f'../results/improvement_NegVal_{model}.joblib')
else:
model_results = load(f'../results/improvement_NegVal_{model}.joblib')
for epoch in range(40):
neg_final_results[model][epoch]['relative improvement'] = np.unique(
model_results[epoch]['rel_improved'], axis=0).shape[0]
neg_final_results[model][epoch]['absolute improvement'] = np.unique(
model_results[epoch]['abs_improved'], axis=0).shape[0]
neg_final_results[model][epoch]['before relative improvement'] = np.unique(
model_results[epoch]['before_rel_improve'], axis=0).shape[0]
neg_final_results[model][epoch]['before absolute improvement'] = np.unique(
model_results[epoch]['before_abs_improve'], axis=0).shape[0]
hydra_metrics = pd.read_csv('../models/HydrAMP/metrics.csv')
pepcvae_metrics = pd.read_csv('../models/PepCVAE/metrics.csv')
basic_metrics = pd.read_csv('../models/Basic/metrics.csv')
plt.title('Relative improved')
plt.plot([pos_final_results[models[0]][epoch]['relative improvement'] for epoch in range(10, 40)], c='red', label='HydrAMP')
plt.plot([pos_final_results[models[1]][epoch]['relative improvement'] for epoch in range(10, 40)], c='orange', label='PepCVAE')
plt.plot([pos_final_results[models[2]][epoch]['relative improvement'] for epoch in range(10, 40)], c='blue', label='Basic')
plt.legend(bbox_to_anchor=(1.1, 0.5))
plt.show()
plt.title('Absolute improved')
plt.plot([pos_final_results[models[0]][epoch]['absolute improvement'] for epoch in range(10, 40)], c='red', label='HydrAMP')
plt.plot([pos_final_results[models[1]][epoch]['absolute improvement'] for epoch in range(10, 40)], c='orange', label='PepCVAE')
plt.plot([pos_final_results[models[2]][epoch]['absolute improvement'] for epoch in range(10, 40)], c='blue', label='Basic')
plt.legend(bbox_to_anchor=(1.1, 0.5))
plt.show()
plt.figure(figsize=(10,5))
plt.plot([float(x) for x in hydra_metrics['val_vae_loss_1__amino_acc'].tolist()[10:40]],
c='red', label='HydrAMP', linestyle='--')
plt.plot([float(x) for x in pepcvae_metrics['val_vae_loss_1__amino_acc'].tolist()[10:40]],
c='orange', label='PepCVAE', linestyle='--')
plt.plot([float(x) for x in basic_metrics['val_vae_loss_1__amino_acc'].tolist()[10:40]],
c='blue', label='Basic', linestyle='--')
plt.title('How many petides were susceptible to (relative) improvement out of 2404 known AMPs? ')
plt.plot([pos_final_results[models[0]][epoch]['before relative improvement']/2404 for epoch in range(10, 40)], c='red')
plt.plot([pos_final_results[models[1]][epoch]['before relative improvement']/2404 for epoch in range(10, 40)], c='orange')
plt.plot([pos_final_results[models[2]][epoch]['before relative improvement']/2404 for epoch in range(10, 40)], c='blue')
plt.legend(bbox_to_anchor=(1.1, 0.5))
plt.show()
plt.figure(figsize=(10,5))
plt.plot([float(x) for x in hydra_metrics['val_vae_loss_1__amino_acc'].tolist()[10:40]],
c='red', label='HydrAMP', linestyle='--')
plt.plot([float(x) for x in pepcvae_metrics['val_vae_loss_1__amino_acc'].tolist()[10:40]],
c='orange', label='PepCVAE', linestyle='--')
plt.plot([float(x) for x in basic_metrics['val_vae_loss_1__amino_acc'].tolist()[10:40]],
c='blue', label='Basic', linestyle='--')
plt.title('How many petides were susceptible to (absolute) improvement out of 2404 known AMPs? ')
plt.plot([pos_final_results[models[0]][epoch]['before absolute improvement']/2404 for epoch in range(10, 40)], c='red')
plt.plot([pos_final_results[models[1]][epoch]['before absolute improvement']/2404 for epoch in range(10, 40)], c='orange')
plt.plot([pos_final_results[models[2]][epoch]['before absolute improvement']/2404 for epoch in range(10, 40)], c='blue')
plt.legend(bbox_to_anchor=(1.1, 0.5))
plt.show()
```
# Model selection
```
def choose_best_epoch(model):
model_metrics = pd.read_csv(f'../models/{model}/metrics.csv')
good_epochs = model_metrics.iloc[10:40][model_metrics['val_vae_loss_1__amino_acc'].astype(float) > 0.95].epoch_no.tolist()
improved_peptides = [pos_final_results[model][epoch]['before relative improvement']/2404 + \
neg_final_results[model][epoch]['before absolute improvement']/2223 \
for epoch in good_epochs]
return good_epochs[np.argmax(improved_peptides)], np.max(improved_peptides)
best_epochs = {model: [] for model in models}
for model in models:
best_epochs[model] = choose_best_epoch(model)
best_epochs
ax = sns.barplot(
x=model_labels,
y=[
pos_final_results[model][int(best_epochs[model][0])]['before relative improvement']/2404 + \
neg_final_results[model][int(best_epochs[model][0])]['before absolute improvement']/2223 \
for model in models
]
)
ax.set_title('VALIDATION SET\n % of relatively improved positives + % of absolutely improved negatives')
ax.set_xticklabels(model_labels, rotation=90)
plt.show()
metrics_to_consider = [
'before relative improvement',
'before absolute improvement',
'relative improvement',
'absolute improvement',
]
metrics_labels = [
'How many petides were susceptible to (relative) improvement?',
'How many petides were susceptible to (absolute) improvement?',
'Number of uniquely generated peptides during relative improvement procedure (64 attempts per peptide)',
'Number of uniquely generated peptides during absolute improvement procedure (64 attempts per peptide)',
]
for metric, metric_label in zip(metrics_to_consider, metrics_labels):
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12, 4), sharex=True)
plt.suptitle(metric_label, y=1.1)
sns.barplot(x=model_labels, y=[pos_final_results[model][int(best_epochs[model][0])][metric] for model in models], ax=ax1)
sns.barplot(x=model_labels, y=[neg_final_results[model][int(best_epochs[model][0])][metric] for model in models], ax=ax2)
ax1.set_title('2404 positives (validation set)')
ax2.set_title('2223 negatives (validation set)')
ax1.set_xticklabels(model_labels, rotation=90)
ax2.set_xticklabels(model_labels, rotation=90)
plt.show()
```
# Test set
```
best_epochs = {
'HydrAMP': 37,
'PepCVAE': 35,
'Basic': 15,
}
pos = np.vstack([amp_x_test[amp_y_test == 1], mic_x_test[mic_y_test < 1.5]])
neg = np.vstack([amp_x_test[amp_y_test == 0], mic_x_test[mic_y_test > 1.5]])
print(pos.shape, neg.shape)
pos_amp = amp_classifier_model.predict(pos, verbose=1).reshape(len(pos))
neg_mic = mic_classifier_model.predict(neg, verbose=1).reshape(len(neg))
neg_amp = amp_classifier_model.predict(neg, verbose=1).reshape(len(neg))
pos_mic = amp_classifier_model.predict(pos, verbose=1).reshape(len(pos))
pos = np.vstack([pos] * 64).reshape(-1, 25)
pos_amp = np.vstack([pos_amp] * 64).reshape(-1, 1)
pos_mic = np.vstack([pos_mic] * 64).reshape(-1, 1)
neg = np.vstack([neg] * 64).reshape(-1, 25)
neg_amp = np.vstack([neg_amp] * 64).reshape(-1, 1)
neg_mic = np.vstack([neg_mic] * 64).reshape(-1, 1)
final_pos_results = {}
final_neg_results = {}
for model in tqdm(models):
epoch = int(best_epochs[model])
AMPMaster = bms.load_model(f'../models/{model}/{epoch}')
encoder_model = AMPMaster.encoder(input_to_encoder)
decoder_model = AMPMaster.decoder(input_to_decoder)
if model in ['PepCVAE', 'Basic']:
new_act = layers.TimeDistributed(
layers.Activation(activations.softmax),
name='decoder_time_distribute_activation')
decoder_model.layers.pop()
x = new_act(decoder_model.layers[-1].output)
decoder_model = Model(input=decoder_model.input, output=[x])
final_pos_results[model] = improve(pos, model, epoch, 'pos')
final_neg_results[model] = improve(neg, model, epoch, 'neg')
dump(final_pos_results, f'../results/improvement_PosTest.joblib')
dump(final_neg_results, f'../results/improvement_NegTest.joblib')
pos_final_results = {models: {} for models in models}
neg_final_results = {models: {} for models in models}
for model in models:
pos_final_results[model]['relative improvement'] = np.unique(
final_pos_results[model]['rel_improved'], axis=0).shape[0]
pos_final_results[model]['absolute improvement'] = np.unique(
final_pos_results[model]['abs_improved'], axis=0).shape[0]
pos_final_results[model]['before relative improvement'] = np.unique(
final_pos_results[model]['before_rel_improve'], axis=0).shape[0]
pos_final_results[model]['before absolute improvement'] = np.unique(
final_pos_results[model]['before_abs_improve'], axis=0).shape[0]
neg_final_results[model]['relative improvement'] = np.unique(
final_neg_results[model]['rel_improved'], axis=0).shape[0]
neg_final_results[model]['absolute improvement'] = np.unique(
final_neg_results[model]['abs_improved'], axis=0).shape[0]
neg_final_results[model]['before relative improvement'] = np.unique(
final_neg_results[model]['before_rel_improve'], axis=0).shape[0]
neg_final_results[model]['before absolute improvement'] = np.unique(
final_neg_results[model]['before_abs_improve'], axis=0).shape[0]
ax = sns.barplot(
x=model_labels,
y=[
pos_final_results[model]['before relative improvement']/1319 + \
neg_final_results[model]['before absolute improvement']/1253 \
for model in models])
ax.set_title('Before relative improvement (positives) + before absolute improvement (negatives)')
ax.set_xticklabels(model_labels, rotation=90)
plt.show()
for metric, metric_label in zip(metrics_to_consider, metrics_labels):
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12, 4), sharex=True)
plt.suptitle(metric_label, y=1.1)
sns.barplot(x=model_labels, y=[pos_final_results[model][metric] for model in models], ax=ax1)
sns.barplot(x=model_labels, y=[neg_final_results[model][metric] for model in models], ax=ax2)
ax1.set_title('1319 positives (test set)')
ax2.set_title('1253 negatives (test set)')
ax1.set_xticklabels(model_labels, rotation=90)
ax2.set_xticklabels(model_labels, rotation=90)
plt.show()
```
| github_jupyter |
# Notebook to be used to Develop Display of Results
```
from importlib import reload
import pandas as pd
import numpy as np
from IPython.display import Markdown
# If one of the modules changes and you need to reimport it,
# execute this cell again.
import heatpump.hp_model
reload(heatpump.hp_model)
import heatpump.home_heat_model
reload(heatpump.home_heat_model)
import heatpump.library as lib
reload(lib)
# Anchorage large home inputs
util = lib.util_from_id(1)
inputs1 = dict(
city_id=1,
utility=util,
pce_limit=500.0,
co2_lbs_per_kwh=1.1,
exist_heat_fuel_id=2,
exist_unit_fuel_cost=0.97852,
exist_fuel_use=1600,
exist_heat_effic=.8,
exist_kwh_per_mmbtu=8,
includes_dhw=True,
includes_dryer=True,
includes_cooking=False,
occupant_count=3,
elec_use_jan=550,
elec_use_may=400,
hp_model_id=575,
low_temp_cutoff=5,
garage_stall_count=2,
garage_heated_by_hp=False,
bldg_floor_area=3600,
indoor_heat_setpoint=70,
insul_level=3,
pct_exposed_to_hp=0.46,
doors_open_to_adjacent=False,
bedroom_temp_tolerance=2,
capital_cost=4500,
rebate_dol=500,
pct_financed=0.5,
loan_term=10,
loan_interest=0.05,
hp_life=14,
op_cost_chg=10,
sales_tax=0.02,
discount_rate=0.05,
inflation_rate=0.02,
fuel_esc_rate=0.03,
elec_esc_rate=0.02,
)
# Ambler Home inputs
util = lib.util_from_id(202)
inputs2 = dict(
city_id=45,
utility=util,
pce_limit=500.0,
co2_lbs_per_kwh=1.6,
exist_heat_fuel_id=4,
exist_unit_fuel_cost=8.0,
exist_fuel_use=450,
exist_heat_effic=.86,
exist_kwh_per_mmbtu=8,
includes_dhw=False,
includes_dryer=False,
includes_cooking=False,
occupant_count=3,
elec_use_jan=550,
elec_use_may=300,
hp_model_id=575,
low_temp_cutoff=5,
garage_stall_count=0,
garage_heated_by_hp=False,
bldg_floor_area=800,
indoor_heat_setpoint=70,
insul_level=2,
pct_exposed_to_hp=1.0,
doors_open_to_adjacent=False,
bedroom_temp_tolerance=3,
capital_cost=6500,
rebate_dol=0,
pct_financed=0.0,
loan_term=10,
loan_interest=0.05,
hp_life=14,
op_cost_chg=0,
sales_tax=0.00,
discount_rate=0.05,
inflation_rate=0.02,
fuel_esc_rate=0.03,
elec_esc_rate=0.02,
)
inputs2
# Change from **inputs1 to **inputs2 to run the two cases.
mod = heatpump.hp_model.HP_model(**inputs2)
mod.run()
# Pull out the results from the model object.
# Use these variable names in your display of outputs.
smy = mod.summary
df_cash_flow = mod.df_cash_flow
df_mo_en_base = mod.df_mo_en_base
df_mo_en_hp = mod.df_mo_en_hp
df_mo_dol_base = mod.df_mo_dol_base
df_mo_dol_hp = mod.df_mo_dol_hp
# This is a dictionary containing summary output.
# The 'fuel_use_xxx' values are annual totals in physical units
# like gallons. 'elec_use_xxx' are kWh. 'hp_max_capacity' is the
# maximum output of the heat pump at 5 deg F. 'max_hp_reached'
# indicates whether the heat pump ever used all of its capacity
# at some point during the year.
smy
md = f"Design Heat Load: **{smy['design_heat_load']:,.0f} Btu/hour** at {smy['design_heat_temp']:.0f} degrees F outdoors"
md
# You can get a string that is in Markdown format rendered properly
# by using the Markdown class.
Markdown(md)
# Or, this might be a case where f-strings are not the cleanest.
# Here is another way:
md = 'Design Heat Load of Entire Building: **{design_heat_load:,.0f} Btu/hour** at {design_heat_temp:.0f} degrees F outdoors \n(required output of heating system, no safety margin)'.format(**smy)
Markdown(md)
# Cash Flow over the life of the heat pump.
# Negative values are costs and positive values are benefits.
# When displaying this table delete the two columns that don't apply,
# depending on whether you are showing the PCE or no PCE case.
df_cash_flow
# The Base case and w/ Heat Pump monthly energy results.
df_mo_en_base
df_mo_en_hp
# The monthly dollar flows with and without the heat pump
# The PCE and no PCE case are included in this one table
df_mo_dol_base
df_mo_dol_hp
list(df_mo_en_base.columns.values)
import plotly
plotly.tools.set_credentials_file(username='dustin_cchrc', api_key='yzYaFYf93PQ7D0VUZKGy')
import plotly.plotly as py
import plotly.graph_objs as go
```
## Monthly Heating Load
```
data = [go.Bar(x=df_mo_en_base.index,
y=df_mo_en_base.secondary_load_mmbtu,
name='Monthly Heating Load')]
layout = go.Layout(title='Monthly Heating Load',
xaxis=dict(title='Month'),
yaxis=dict(title='Total Estimated Heat Load (MMBTU)', hoverformat='.1f')
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='estimated_heat_load', fileopt='overwrite')
```
## Heating Cost Comparison
```
df_mo_dol_chg = df_mo_dol_hp - df_mo_dol_base
df_mo_dol_chg['cost_savings'] = np.where(
df_mo_dol_chg.total_dol < 0.0,
-df_mo_dol_chg.total_dol,
0.0
)
# Note: we make these negative values so bars extend downwards
df_mo_dol_chg['cost_increases'] = np.where(
df_mo_dol_chg.total_dol >= 0.0,
-df_mo_dol_chg.total_dol,
0.0
)
df_mo_dol_chg
# calculate the change in dollars between the base scenario and the heat
# pump scenario.
hp_cost = go.Bar(
x=df_mo_dol_hp.index,
y=df_mo_dol_hp.total_dol,
name='',
marker=dict(color='#377eb8'),
hoverinfo = 'y',
)
cost_savings = go.Bar(
x=df_mo_dol_chg.index,
y=df_mo_dol_chg.cost_savings,
name='Cost Savings',
marker=dict(color='#4daf4a'),
hoverinfo = 'y',
)
cost_increases = go.Bar(
x=df_mo_dol_chg.index,
y=df_mo_dol_chg.cost_increases,
name='Cost Increases',
marker=dict(color='#e41a1c'),
hoverinfo = 'y',
)
no_hp_costs = go.Scatter(
x=df_mo_dol_base.index,
y=df_mo_dol_base.total_dol,
name='Baseline Energy Costs',
mode='markers',
marker=dict(color='#000000', size=12),
hoverinfo = 'y',
)
data = [hp_cost, cost_savings, cost_increases, no_hp_costs]
layout = go.Layout(
title='Energy Costs: Heat Pump vs. Baseline',
xaxis=dict(title='Month', fixedrange=True,),
yaxis=dict(
title='Total Energy Costs',
hoverformat='$,.0f',
fixedrange=True,
tickformat='$,.0f',
),
barmode='stack',
hovermode= 'closest',
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='heatpump_costs', fileopt='overwrite')
```
## Monthly Heat Pump Efficiency
```
efficiency = [go.Scatter(x=df_mo_en_hp.index,
y=df_mo_en_hp.cop,
name='COP',
mode='lines+markers')]
layout = go.Layout(title='Monthly Heat Pump Efficiency',
xaxis=dict(title='Month'),
yaxis=dict(title='COP'))
fig = go.Figure(data=efficiency, layout=layout)
py.iplot(fig, layout=layout, filename='cop', fileopt='overwrite')
```
## Energy Use Comparison
```
list(df_mo_en_base.columns.values)
list(df_mo_dol_base.columns.values)
list(df_mo_dol_hp.columns.values)
from plotly import tools
elec_no_hp = go.Scatter(x=df_mo_dol_base.index,
y=df_mo_dol_base.elec_kwh,
name='Monthly kWh (no Heat Pump)',
line=dict(color='#92c5de',
width=2,
dash='dash')
)
elec_w_hp = go.Scatter(x=df_mo_dol_hp.index,
y=df_mo_dol_hp.elec_kwh,
name='Monthly kWh (with Heat Pump)',
mode='lines',
marker=dict(color='#0571b0')
)
fuel_no_hp = go.Scatter(x=df_mo_dol_base.index,
y=df_mo_dol_base.secondary_fuel_units,
name='Monthly Fuel Usage (no Heat Pump)',
line=dict(color='#f4a582',
width = 2,
dash = 'dash')
)
fuel_w_hp = go.Scatter(x=df_mo_dol_hp.index,
y=df_mo_dol_hp.secondary_fuel_units,
name='Monthly Fuel Usage (with Heat Pump)',
mode='lines',
marker=dict(color='#ca0020'))
fig = tools.make_subplots(rows=2, cols=1)
fig.append_trace(elec_no_hp, 1, 1)
fig.append_trace(elec_w_hp, 1, 1)
fig.append_trace(fuel_no_hp, 2, 1)
fig.append_trace(fuel_w_hp, 2, 1)
fig['layout'].update(title='Energy Usage: Heat Pump vs. Baseline')
fig['layout']['xaxis1'].update(title='Month')
fig['layout']['xaxis2'].update(title='Month')
fig['layout']['yaxis1'].update(title='Electricity Use (kWh)', hoverformat='.0f')
yaxis2_title = 'Heating Fuel Use (%s)' % (smy['fuel_unit'])
fig['layout']['yaxis2'].update(title=yaxis2_title, hoverformat='.1f')
py.iplot(fig, filename='heatpump_energy_usage', fileopt='overwrite')
```
## Cash Flow Visualization
```
df_cash_flow
df_cash_flow['negative_flow'] = np.where(df_cash_flow.cash_flow < 0, df_cash_flow.cash_flow, 0)
df_cash_flow['positive_flow'] = np.where(df_cash_flow.cash_flow > 0, df_cash_flow.cash_flow, 0)
negative_flow = go.Bar(x=df_cash_flow.index,
y=df_cash_flow.negative_flow,
name='Cash Flow',
marker=dict(color='#d7191c'))
positive_flow = go.Bar(x=df_cash_flow.index,
y=df_cash_flow.positive_flow,
name='Cash Flow',
marker=dict(color='#000000'))
data = [negative_flow, positive_flow]
layout = go.Layout(title='Heat Pump Cash Flow',
xaxis=dict(title='Year'),
yaxis=dict(title='Annual Cash Flow ($)', hoverformat='dol,.0f')
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='cash_flow', fileopt='overwrite')
```
## Cumulative Discounted Cash Flow
```
df_cash_flow
df_cash_flow['cum_negative_flow'] = np.where(df_cash_flow.cum_disc_cash_flow < 0, df_cash_flow.cum_disc_cash_flow, 0)
df_cash_flow['cum_positive_flow'] = np.where(df_cash_flow.cum_disc_cash_flow > 0, df_cash_flow.cum_disc_cash_flow, 0)
negative_cash_flow = go.Scatter(x=df_cash_flow.index,
y=df_cash_flow.cum_negative_flow,
name='Cash Flow ($)',
fill='tozeroy',
fillcolor='#d7191c',
line=dict(color='#ffffff')
)
positive_cash_flow = go.Scatter(x=df_cash_flow.index,
y=df_cash_flow.cum_positive_flow,
name='Cash Flow ($)',
fill='tozeroy',
fillcolor='#000000',
line=dict(color='#ffffff')
)
data = [negative_cash_flow, positive_cash_flow]
layout = go.Layout(title='Heat Pump Lifetime Cumulative Discounted Cash Flow',
xaxis=dict(title='Year'),
yaxis=dict(title='Annual Discounted Cash Flow ($)', hoverformat='.0f'),
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='cumulative_discounted_heatpump_cash_flow', fileopt='overwrite')
```
## Markdown Display of Results
```
## Need to account for NaN internal rate of return (hide somehow)
```
### With PCE
```
md_results = '''# Results
## Heat Pump Cost Effectiveness
### Net Present Value: **\${:,.0f}**
The Net Present Value of installing an air-source heat pump is estimated to be **\${:,.0f}**.
This means that over the course of the life of the equipment you will {} **\${:,.0f}** in today's dollars.
### Internal Rate of Return: **{:.1f}%**
The internal rate of return on the investment is estimated to be **{:.1f}%**. Compare this tax-free investment to your other investment options.
### Cash Flow
This is how your cash flow will be affected by installing a heat pump:
## Greenhouse Gas Emissions
Installing a heat pump is predicted to save {:,.0f} pounds of CO2 emissions annually, or {:,.0f} pounds over the life of the equipment.
This is equivalent to a reduction of {:,.0f} miles driven by an average passenger vehicle annually, or {:,.0f} over the equipment's life.
'''
def npv_indicator(summary, pce_indicator):
if pce_indicator == 1:
if summary['npv'] > 0:
return 'earn'
else:
return 'lose'
else:
if summary['npv_no_pce'] > 0:
return 'earn'
else:
return 'lose'
smy
smy['npv']
md = md_results.format(smy['npv'],
smy['npv'],
npv_indicator(smy, 1),
abs(smy['npv']),
smy['irr']*100,
smy['irr']*100,
smy['co2_lbs_saved'],
smy['co2_lbs_saved'] * 12,
smy['co2_driving_miles_saved'],
smy['co2_driving_miles_saved'] * 12)
Markdown(md)
from textwrap import dedent
inputs = {'hp_life': 14}
sumd = smy.copy()
sumd['npv_abs'] = abs(sumd['npv'])
sumd['irr'] *= 100. # convert to %
sumd['npv_indicator'] = 'earn' if sumd['npv'] >= 0 else 'lose'
sumd['co2_lbs_saved_life'] = sumd['co2_lbs_saved'] * inputs['hp_life']
sumd['co2_driving_miles_saved_life'] = sumd['co2_driving_miles_saved'] * inputs['hp_life']
md_tmpl = dedent('''
# Results
## Heat Pump Cost Effectiveness
### Net Present Value: **\${npv:,.0f}**
The Net Present Value of installing an air-source heat pump is estimated to
be **\${npv:,.0f}**. This means that over the course of the life of the equipment you
will {npv_indicator} **\${npv_abs:,.0f}** in today's dollars.
### Internal Rate of Return: **{irr:.1f}%**
The internal rate of return on the investment is estimated to be **{irr:.1f}%**.
Compare this tax-free investment to your other investment options.
### Cash Flow
This is how your cash flow will be affected by installing a heat pump:
## Greenhouse Gas Emissions
Installing a heat pump is predicted to save {co2_lbs_saved:,.0f} pounds of CO2 emissions annually,
or {co2_lbs_saved_life:,.0f} pounds over the life of the equipment. This is equivalent to a reduction
of {co2_driving_miles_saved:,.0f} miles driven by an average passenger vehicle annually,
or {co2_driving_miles_saved_life:,.0f} miles over the equipment's life.
''')
md = md_tmpl.format(**sumd)
Markdown(md)
sumd
```
| github_jupyter |
```
import pandas as pd
medicare = pd.read_csv("/netapp2/home/se197/data/CMS/Data/medicare.csv")
train_set = medicare[medicare.Hospital != 'BWH'] # MGH
validation_set = medicare[medicare.Hospital == 'BWH'] # BWH and Neither
import numpy as np
fifty_perc_EHR_cont = np.percentile(medicare['Cal_MPEC_R0'],50)
train_set_high = train_set[train_set.Cal_MPEC_R0 >= fifty_perc_EHR_cont]
train_set_low= train_set[train_set.Cal_MPEC_R0 < fifty_perc_EHR_cont]
validation_set_high = validation_set[validation_set.Cal_MPEC_R0 >= fifty_perc_EHR_cont]
validation_set_low = validation_set[validation_set.Cal_MPEC_R0 < fifty_perc_EHR_cont]
predictor_variable = [
'Co_CAD_R0', 'Co_Embolism_R0', 'Co_DVT_R0', 'Co_PE_R0', 'Co_AFib_R0',
'Co_Hypertension_R0', 'Co_Hyperlipidemia_R0', 'Co_Atherosclerosis_R0',
'Co_HF_R0', 'Co_HemoStroke_R0', 'Co_IscheStroke_R0', 'Co_OthStroke_R0',
'Co_TIA_R0', 'Co_COPD_R0', 'Co_Asthma_R0', 'Co_Pneumonia_R0', 'Co_Alcoholabuse_R0',
'Co_Drugabuse_R0', 'Co_Epilepsy_R0', 'Co_Cancer_R0', 'Co_MorbidObesity_R0',
'Co_Dementia_R0', 'Co_Depression_R0', 'Co_Bipolar_R0', 'Co_Psychosis_R0',
'Co_Personalitydisorder_R0', 'Co_Adjustmentdisorder_R0', 'Co_Anxiety_R0',
'Co_Generalizedanxiety_R0', 'Co_OldMI_R0', 'Co_AcuteMI_R0', 'Co_PUD_R0',
'Co_UpperGIbleed_R0', 'Co_LowerGIbleed_R0', 'Co_Urogenitalbleed_R0',
'Co_Othbleed_R0', 'Co_PVD_R0', 'Co_LiverDisease_R0', 'Co_MRI_R0',
'Co_ESRD_R0', 'Co_Obesity_R0', 'Co_Sepsis_R0', 'Co_Osteoarthritis_R0',
'Co_RA_R0', 'Co_NeuroPain_R0', 'Co_NeckPain_R0', 'Co_OthArthritis_R0',
'Co_Osteoporosis_R0', 'Co_Fibromyalgia_R0', 'Co_Migraine_R0', 'Co_Headache_R0',
'Co_OthPain_R0', 'Co_GeneralizedPain_R0', 'Co_PainDisorder_R0',
'Co_Falls_R0', 'Co_CoagulationDisorder_R0', 'Co_WhiteBloodCell_R0', 'Co_Parkinson_R0',
'Co_Anemia_R0', 'Co_UrinaryIncontinence_R0', 'Co_DecubitusUlcer_R0',
'Co_Oxygen_R0', 'Co_Mammography_R0', 'Co_PapTest_R0', 'Co_PSATest_R0',
'Co_Colonoscopy_R0', 'Co_FecalOccultTest_R0', 'Co_FluShot_R0', 'Co_PneumococcalVaccine_R0', 'Co_RenalDysfunction_R0', 'Co_Valvular_R0', 'Co_Hosp_Prior30Days_R0',
'Co_RX_Antibiotic_R0', 'Co_RX_Corticosteroid_R0', 'Co_RX_Aspirin_R0', 'Co_RX_Dipyridamole_R0',
'Co_RX_Clopidogrel_R0', 'Co_RX_Prasugrel_R0', 'Co_RX_Cilostazol_R0', 'Co_RX_Ticlopidine_R0',
'Co_RX_Ticagrelor_R0', 'Co_RX_OthAntiplatelet_R0', 'Co_RX_NSAIDs_R0',
'Co_RX_Opioid_R0', 'Co_RX_Antidepressant_R0', 'Co_RX_AAntipsychotic_R0', 'Co_RX_TAntipsychotic_R0',
'Co_RX_Anticonvulsant_R0', 'Co_RX_PPI_R0', 'Co_RX_H2Receptor_R0', 'Co_RX_OthGastro_R0',
'Co_RX_ACE_R0', 'Co_RX_ARB_R0', 'Co_RX_BBlocker_R0', 'Co_RX_CCB_R0', 'Co_RX_Thiazide_R0',
'Co_RX_Loop_R0', 'Co_RX_Potassium_R0', 'Co_RX_Nitrates_R0', 'Co_RX_Aliskiren_R0',
'Co_RX_OthAntihypertensive_R0', 'Co_RX_Antiarrhythmic_R0', 'Co_RX_OthAnticoagulant_R0',
'Co_RX_Insulin_R0', 'Co_RX_Noninsulin_R0', 'Co_RX_Digoxin_R0', 'Co_RX_Statin_R0',
'Co_RX_Lipid_R0', 'Co_RX_Lithium_R0', 'Co_RX_Benzo_R0', 'Co_RX_ZDrugs_R0',
'Co_RX_OthAnxiolytic_R0', 'Co_RX_Barbiturate_R0', 'Co_RX_Dementia_R0', 'Co_RX_Hormone_R0',
'Co_RX_Osteoporosis_R0', 'Co_N_Drugs_R0', 'Co_N_Hosp_R0', 'Co_Total_HospLOS_R0',
'Co_N_MDVisit_R0', 'Co_RX_AnyAspirin_R0', 'Co_RX_AspirinMono_R0', 'Co_RX_ClopidogrelMono_R0',
'Co_RX_AspirinClopidogrel_R0', 'Co_RX_DM_R0', 'Co_RX_Antipsychotic_R0'
]
co_train_gpop = train_set[predictor_variable]
co_train_high = train_set_high[predictor_variable]
co_train_low = train_set_low[predictor_variable]
co_validation_gpop = validation_set[predictor_variable]
co_validation_high = validation_set_high[predictor_variable]
co_validation_low = validation_set_low[predictor_variable]
len(predictor_variable)
out_train_death_gpop = train_set['ehr_claims_death']
out_train_death_high = train_set_high['ehr_claims_death']
out_train_death_low = train_set_low['ehr_claims_death']
out_validation_death_gpop = validation_set['ehr_claims_death']
out_validation_death_high = validation_set_high['ehr_claims_death']
out_validation_death_low = validation_set_low['ehr_claims_death']
def bart(X_train, y_train):
from bartpy.sklearnmodel import SklearnModel
from sklearn.model_selection import GridSearchCV
from bartpy.data import Data
from bartpy.sigma import Sigma
param_grid = [{
'n_trees': [10,30,50] #
}]
model = SklearnModel()
clf = GridSearchCV(estimator = model, param_grid = param_grid, n_jobs = 10, verbose = True)
best_clf = clf.fit(X_train, y_train.to_numpy())
print(best_clf)
return best_clf
def scores(X_train,y_train, best_clf):
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import log_loss
import numpy as np
pred = np.round(best_clf.predict(X_train))
print(pred)
actual = y_train
print(accuracy_score(actual,pred))
print(f1_score(actual,pred))
print(fbeta_score(actual,pred, average = 'macro', beta = 2))
print(roc_auc_score(actual, best_clf.predict(X_train)))
print(log_loss(actual,best_clf.predict(X_train)))
def cross_val(X,y):
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_validate
from sklearn.metrics import log_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import fbeta_score
import sklearn
import numpy as np
cv = KFold(n_splits=5, random_state=1, shuffle=True)
log_loss = []
auc = []
accuracy = []
f1 = []
f2 = []
for train_index, test_index in cv.split(X):
X_train, X_test, y_train, y_test = X.iloc[train_index], X.iloc[test_index], y.iloc[train_index], y.iloc[test_index]
model = bart(X_train, y_train)
prob = model.predict(X_test) # prob is a vector of probabilities
pred = np.round(model.predict(X_test)) # pred is the rounded predictions
log_loss.append(sklearn.metrics.log_loss(y_test, prob))
auc.append(sklearn.metrics.roc_auc_score(y_test, prob))
accuracy.append(sklearn.metrics.accuracy_score(y_test, pred))
f1.append(sklearn.metrics.f1_score(y_test, pred, average = 'macro'))
f2.append(fbeta_score(y_test,pred, average = 'macro', beta = 2))
print(np.mean(accuracy))
print(np.mean(f1))
print(np.mean(f2))
print(np.mean(auc))
print(np.mean(log_loss))
import datetime
begin_time = datetime.datetime.now()
best_clf = bart(co_train_gpop,out_train_death_gpop)
cross_val(co_train_gpop,out_train_death_gpop)
print()
scores(co_validation_gpop,out_validation_death_gpop, best_clf)
print(datetime.datetime.now() - begin_time)
scores(co_validation_gpop,out_validation_death_gpop, best_clf)
pip install knockknock
import datetime
begin_time = datetime.datetime.now()
best_clf = bart(co_train_low,out_train_death_low)
cross_val(co_train_low,out_train_death_low)
print()
scores(co_validation_low,out_validation_death_low, best_clf)
print(datetime.datetime.now() - begin_time)
import datetime
begin_time = datetime.datetime.now()
best_clf = bart(co_train_high,out_train_death_high)
cross_val(co_train_high,out_train_death_high)
print()
scores(co_validation_high,out_validation_death_high, best_clf)
print(datetime.datetime.now() - begin_time)
```
| github_jupyter |
# Module 5 -- Dimensionality Reduction -- Case Study
# Import Libraries
**Import the usual libraries **
```
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
%matplotlib inline
```
# Data Set : Cancer Data Set
Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. n the 3-dimensional space is that described in: [K. P. Bennett and O. L. Mangasarian: "Robust Linear Programming Discrimination of Two Linearly Inseparable Sets", Optimization Methods and Software 1, 1992, 23-34].
This database is also available through the UW CS ftp server: ftp ftp.cs.wisc.edu cd math-prog/cpo-dataset/machine-learn/WDBC/
Also can be found on UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29
Attribute Information:
1) ID number 2) Diagnosis (M = malignant, B = benign) 3-32)
Ten real-valued features are computed for each cell nucleus:
a) radius (mean of distances from center to points on the perimeter) b) texture (standard deviation of gray-scale values) c) perimeter d) area e) smoothness (local variation in radius lengths) f) compactness (perimeter^2 / area - 1.0) g) concavity (severity of concave portions of the contour) h) concave points (number of concave portions of the contour) i) symmetry j) fractal dimension ("coastline approximation" - 1)
The mean, standard error and "worst" or largest (mean of the three largest values) of these features were computed for each image, resulting in 30 features. For instance, field 3 is Mean Radius, field 13 is Radius SE, field 23 is Worst Radius.
All feature values are recoded with four significant digits.
Missing attribute values: none
Class distribution: 357 benign, 212 malignant
## Get the Data
** Use pandas to read data as a dataframe called df.**
```
df = pd.read_csv('breast-cancer-data.csv')
df.head()
# Check the data , there should be no missing values
df.info()
feature_names = np.array(['mean radius' 'mean texture' 'mean perimeter' 'mean area'
'mean smoothness' 'mean compactness' 'mean concavity'
'mean concave points' 'mean symmetry' 'mean fractal dimension'
'radius error' 'texture error' 'perimeter error' 'area error'
'smoothness error' 'compactness error' 'concavity error'
'concave points error' 'symmetry error' 'fractal dimension error'
'worst radius' 'worst texture' 'worst perimeter' 'worst area'
'worst smoothness' 'worst compactness' 'worst concavity'
'worst concave points' 'worst symmetry' 'worst fractal dimension'])
```
#### Convert diagnosis column to 1/0 and store in new column target
```
from sklearn.preprocessing import LabelEncoder
# # Encode label diagnosis
# # M -> 1
# # B -> 0
# Get All rows, but only last column
target_data=df["diagnosis"]
encoder = LabelEncoder()
target_data = encoder.fit_transform(target_data)
```
#### Store the encoded column in dataframe and drop the diagnosis column for simpilcity
```
df.drop(["diagnosis"],axis = 1, inplace = True)
```
## Principal Component Analysis -- PCA
Lets use PCA to find the first two principal components, and visualize the data in this new, two-dimensional space, with a single scatter-plot
Scale data so that each feature has a single unit variance.
```
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(df)
scaled_data = scaler.transform(df)
```
Now we can transform this data to its first 2 principal components.
```
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(scaled_data)
x_pca = pca.transform(scaled_data)
scaled_data.shape
x_pca.shape
```
#### Reduced 30 dimensions to just 2! Let's plot these two dimensions out!
** Q1. Plot scatter for 2 components. What inference can you draw from this data? **
```
plt.figure(figsize=(9,6))
plt.scatter(x_pca[:,0],x_pca[:,1],c=target_data,cmap='viridis')
plt.xlabel('First Principal Component')
plt.ylabel('Second Principal Component')
```
## Interpreting the components
Unfortunately, with this great power of dimensionality reduction, comes the cost of being able to easily understand what these components represent.
The components correspond to combinations of the original features, the components themselves are stored as an attribute of the fitted PCA object:
```
pca.components_
```
# Explained Variance
The explained variance tells you how much information (variance) can be attributed to each of the principal components. This is important as you can convert n dimensional space to 2 dimensional space, you lose some of the variance (information).
** Q2. What is the variance attributed by 1st and 2nd Components? **
** Q3 Ideally the sum above should be 100%. What happened to the remaining variance ? **
```
pca.explained_variance_ratio_
```
## Lets try with 3 Principal Components
```
pca_3 = PCA(n_components=3)
pca_3.fit(scaled_data)
x_pca_3 = pca_3.transform(scaled_data)
```
In this numpy matrix array, each row represents a principal component, and each column relates back to the original features. we can visualize this relationship with a heatmap:
```
x_pca_3.shape
```
** Q4. What is the total variance attributed by three Components? **
```
pca_3.explained_variance_ratio_
```
### Lets check the accuracy for 2 vs. 3 components
** Q5. What is accuracy for component count 2 vs. 3 ?**
```
from sklearn.model_selection import train_test_split
train_data, test_data, train_output, test_output = train_test_split( df, target_data, test_size=0.3, random_state=101)
train_data = scaler.transform(train_data)
test_data = scaler.transform(test_data)
train_data = pca.transform(train_data)
test_data = pca.transform(test_data)
from sklearn.linear_model import LogisticRegression
logisticRegr = LogisticRegression(solver = 'lbfgs')
logisticRegr.fit(train_data, train_output)
logisticRegr.score(test_data, test_output)
```
Score for 3 components
```
train_data, test_data, train_output, test_output = train_test_split( df, target_data, test_size=0.3, random_state=101)
train_data = scaler.transform(train_data)
test_data = scaler.transform(test_data)
train_data = pca_3.transform(train_data)
test_data = pca_3.transform(test_data)
logisticRegr = LogisticRegression(solver = 'lbfgs')
logisticRegr.fit(train_data, train_output)
logisticRegr.score(test_data, test_output)
```
# End of Case Study
| github_jupyter |
## CCNSS 2018 Module 5: Whole-Brain Dynamics and Cognition
# Tutorial 2: Introduction to Complex Network Analysis (II)
*Please execute the cell bellow in order to initialize the notebook environment*
```
!rm -rf data ccnss2018_students
!if [ ! -d data ]; then git clone https://github.com/ccnss/ccnss2018_students; \
cp -rf ccnss2018_students/module5/2_introduction_to_complex_network_analysis_2/data ./; \
cp ccnss2018_students/module5/net_tool.py ./; fi
import matplotlib.pyplot as plt # import matplotlib
import numpy as np # import numpy
import math # import basic math functions
import random # import basic random number generator functions
import csv # import CSV(Comma Separated Values) file reading and writing
import scipy as sp # import scipy
from scipy import sparse # import sparse module from scipy
from scipy import signal # import signal module from scipy
import os # import basic os functions
import time # import time to measure real time
import collections # import collections
import networkx as nx # import networkx
import sys
sys.path.append('../')
import net_tool as net # import net_tool, a network analysis toolbox from tutorial #1
data_folder = 'data'
print('Available data files:\n'+'\n'.join(sorted(os.listdir(data_folder))))
data_file_1 = os.path.join(data_folder, 'george_baseline_44.txt')
data_file_2 = os.path.join(data_folder, 'george_propofol.txt')
data_file_3 = os.path.join(data_folder, 'george_ketamin.txt')
data_file_4 = os.path.join(data_folder, 'george_medetomidine.txt')
```
# Objectives
In this notebook we will construct a fuctional network from a given time series. Following up on the powerpoint tutorial, we will first construct a functional network from the brain signals, and compare functional network properties for different states of the brain.
## Background
Network theory (graph theory) measures can be applied to any kind of network, including the brain. Structural networks of various species are good examples. We can also construct fuctional networks from time series data we observe using various techniques such as fMRI, EEG, ECoG, and MEG.
Using an ECoG data from a macaque as an example, We will go through the following steps:
* Appy a measure (PLI: phase lag index) to two time series, and construct a PLI matrix.
* Construct a network from the PLI matrix, by applying a threshold.
* Apply various network measures to the resulting network.
* Construct the functional networks for different brain states, and compare how they differ from each other.
* (Optional) Divide the time series into small time windows, and construct functional network for each time window.
The example we will analyze is a thirty second - segment of whole brain ECoG data of a macaque monkey named George, from an eyes closed resting state. The sampling freqeuncy is 1000 Hz, resulting in total of 30,000 time points for each channel. The data consists of signals coming from 106 areas that cover the left hemisphere. The data is preprocessed, by applying a band path filter to remove the alpha wave component (7-13 Hz) from the signal. Alpha waves are correlated with global interactions of the brain for many instances of the brain states.
```
george_base = [ row for row in csv.reader(open(data_file_1,'r'),delimiter='\t')]
george_base = np.array(george_base).astype(np.float32)
george_propofol = [ row for row in csv.reader(open(data_file_2,'r'),delimiter='\t')]
george_propofol = np.array(george_propofol).astype(np.float32)
```
**EXERCISE 0: Calculating* i)* the phases of oscillating signals, and* ii)* the differences between the phases from two signals. Read through and understand the code, which will be used in later exercises (Exercise #4).
**
$i)$ Every oscillating signal $S_j$ can be represented by its amplitude and its phase:
$$ S_j(t) = r_j(t) e^{i \theta_j(t) } = r_j(t) ( \cos \theta_j(t) + i \ \sin \theta_j(t) ) .\\$$
Using this representation, we could assign $phase$ $\theta_j$ to the signal at every time point $t$. One way of computing the phase of a signal for each time point is using the ***Hilbert transform***.
• We can obtain the signal in the form of above representation by `sp.hilbert`($S_j$). After that, we could use `np.angle()` to get the angle at each time point $t$: `np.angle(sp.hilbert`( $S_j$ ) `).`
$$ $$
$ii)$ After getting the angle $\theta_j$ of each signal $S_j$, we can calculate the differences between phases:
$$ \Delta \theta_{jk}(t) = \theta_j(t) - \theta_k(t) \\$$
Best way to calculate the phase difference, again is to calculate it in the exponent form:
$$ e^{i \Delta \theta_{jk} (t)} = e^{i ( \theta_j (t) - \theta_k (t) ) },\\ $$
then take the angle of $ e^{i \Delta \theta_{jk} (t)} $:
$$ \Delta \theta_{jk} (t) = arg ( e^{i \Delta \theta_{jk} (t)} ) .\\ $$
We can obtain the angle by using `np.angle()`.
This phase difference gives a valuable information about the "directionality" between pair of oscillators.
• Calculate the $\theta_{ij}$ between all pairs of time series, and build a phase-difference matrix. Each elements of the matrix containing time averaged phase difference $\langle \theta_{ij} \rangle _t$ between $i$ and $j$. The resulting matrix will be anti-symmetric.
• From the phase-difference matrix we constructed, compute the average phase-difference for each node. Calculate the row-sum of the matrix:
$$ \theta_i = \frac{1}{N} \sum_{j=1}^{N} \langle \theta_{ij} \rangle _t,$$
then we can have a vector of averaged phase-differences, each element of the vector corresponding for each node.
This average phase-difference for each node will tell us whether one node is phase-leading or phase-lagging with respect to other nodes over a given period of time.
```
# getting the phases from the signals, using np.angle and sp.signal.hilbert
george_base_angle = np.angle(sp.signal.hilbert( george_base,axis=0) )
print("size of george_base_angle is:" , george_base_angle.shape )
def phase_diff_mat(theta):
# theta must has dimension TxN, where T is the length of time points and N is the number of nodes
N_len = theta.shape[1]
PDiff_mat= np.zeros((N_len,N_len))
for ch1 in range(N_len):
for ch2 in range(ch1+1,N_len):
PDiff=theta[:,ch1]-theta[:,ch2] # theta_ch1 - theta_ch2
PDiff_exp_angle = np.angle( np.exp(1j*PDiff) ) # angle of exp (1i * (theta_ch1-theta_ch2) )
PDiff_exp_mean = np.mean(PDiff_exp_angle) # mean of the angle with respect to time
PDiff_mat[ch1,ch2] = PDiff_exp_mean # put the mean into the matrix
PDiff_mat[ch2,ch1] = -1*PDiff_exp_mean # the matrix will be anti-symmetric
PDiff_mean = np.mean(PDiff_mat,axis=1) # calculate the mean for each node, with respect to all the other nodes
#alternative code
#arr = np.array([np.roll(theta, i, axis=1) for i in range(N_len)])
#PDiff_mat = theta[None, :] - arr
#PDiff_mean = PDiff_mat.mean(1)
return PDiff_mean,PDiff_mat
```
**EXERCISE 1: Calculating the PLI for two given time series**
The data is in a form of 30,000x106 (# of time points x # of channels) sized matrix. We will measure $PLI$s between all possible pairs of channels.
We now define $dPLI$ (directed phase-lag index) as the following:
$$ dPLI_{ij} = \frac{1}{T}\sum_{t=1}^{T} sign ( \Delta \theta_{ij} (t) ) \, $$
where
$$ \Delta \theta_{ij} = \theta_i - \theta_j ,$$
and
$$ sign ( \theta_i - \theta_j ) =
\begin{cases}
1 & if \ \Delta \theta_{ij} > 0 \\
0 & if \ \Delta \theta_{ij} = 0 \\
-1 & if \ \Delta \theta_{ij} < 0. \\
\end{cases} \\ $$
$dPLI$ will range from 1 to -1, and give us information about which signal is leading another. \
If we take absolute value of $dPLI$, we get $PLI$ (phase lag index):
$$\\ PLI_{ij} =|dPLI_{ij}| = | \langle sign ( \Delta \theta_{ij} ) \rangle_t | .\\$$
$PLI$ will range from 0 to 1, and give us information about whether two signals have consistent phase-lead/lag relationship with each other over given period of time.
• Plot the time series for the first 3 channels of `george_base` (first 500 time points)
• Plot the time series for the first 3 channels of `george_base_angle` (first 500 time points).
• Compute $PLI_{ij}$ for all pairs of $i$ and $j$, and make $PLI$ matrix. The resulting matrix will be symmetric. You can use `np.sign()`.
```
# Write your code for plotting time series
```
**EXPECTED OUTPUT**

```
def cal_dPLI_PLI(theta):
# insert your code for calculating dPLI and PLI
# theta must has dimension TxN, where T is the length of time points and N is the number of nodes
# outputs PLI matrix containing PLIs between all pairs of channels, and dPLI matrix containg dPLIs between all pairs of channels
return PLI,dPLI
george_base_PLI, george_base_dPLI = cal_dPLI_PLI(george_base_angle)
print(george_base_dPLI[:5,:5])
```
**EXPECTED OUTPUT**
```
[[ 0. -0.09446667 0.0348 -0.05666667 0.28 ]
[ 0.09446667 0. 0.04926667 0.00693333 0.341 ]
[-0.0348 -0.04926667 0. -0.0614 0.2632 ]
[ 0.05666667 -0.00693333 0.0614 0. 0.3316 ]
[-0.28 -0.341 -0.2632 -0.3316 0. ]]
```
**EXERCISE 2: Constructing network connectivity matrix**
We can construct a network from the above PLI matrix. Two approaches are possible. We can apply a threshold value for the PLI matrix and turn it into a binary network. Or, we can take the PLI value as is, and turn the matrix into a weighted network. We will take the first approach.
• Binary network approach: one must determine a right threshold value for the matrix. For example, you can choose a value such that highest 30% of the PLI values between nodes will turn into connection.
• (Optional) Weighted network approach: we can take the PLI value itself as the weighted link between two nodes.
```
def cal_mat_thresholded(data_mat, threshold):
# insert your code here
# input is the original matrix with threshold
# output is the thresholded matrix. It would be symmetric.
return data_mat_binary
threshold = 0.3
george_base_PLI_p3 = cal_mat_thresholded(george_base_PLI,threshold)
print("sum of george_base_PLI_p3:", np.sum(george_base_PLI_p3))
```
**EXPECTED OUTPUT**
```
sum of george_base_PLI_p3: 3372.0
```
**EXERCISE 3: Applying network measure to the functional network**
We now have a resulting functional network from a macaque ECoG data. Now we can apply network measures to this network.
• Apply network measures to this network, such as $C, L, E$ and $b$ (clustering coefficient, characteristic path length, efficiency, and betweenness centrality).
(If you prefer, you can use functions that we provide in net.py. Ask tutors for the details.)
```
# insert your code here
```
**EXPECTED OUTPUT**
```
C: 0.4405029623271814
E and L: 1.735130278526505 0.6451332734351602
b: 38.594339622641506
```
**EXERCISE 4: Computing phase measures for the functional network**
We can define a mean of $PLI_i$ over all other nodes as follows:
$$ PLI_i = \frac{1}{N-1} \sum_{j=1,\ j \neq i }^{N} PLI_{ij} ,$$
This quantity will tell us how persistantly a node is locked with respect to other nodes, over a given period of time. Usually, the node with high $PLI_i$ is the one with high degree in a network: the $k_i$ and $PLI_i$ of a node $i$ is correlated.
We can also define a mean of $dPLI_i$ over all other nodes as follows:
$$ dPLI_i = \frac{1}{N-1} \sum_{j=1,\ j \neq i}^{N} dPLI_{ij} ,$$
This quantity will tell us how persistantly a node is phase-leadaing or phase-lagging with respect to other nodes, over a given period of time. This quantity is correlated with the average phase-difference $\theta_i$ which we defined in earlier exercise.
• Do a scatterplot of the mean PLI and mean dPLI. Is there any pattern between these two quantities? Calculate the Pearson correlation coefficient between these two vectors.
• Also, you can do a scatterplot of degree of each node vs. average phase-difference. Do they resemble above the scatter plot?
```
# insert your code for calculating mean dPLI and PLI, mean phase, and degree of the network
george_base_PLI_mean =
george_base_dPLI_mean =
george_base_phase_diff_mean,_ = phase_diff_mat(george_base_angle)
george_base_PLI_p3_degree =
plt.figure()
for i in range(len(george_base_PLI_mean)):
plt.plot(george_base_PLI_mean[i],george_base_dPLI_mean[i],'C0s')
plt.text(george_base_PLI_mean[i],george_base_dPLI_mean[i],str(i))
plt.xlabel('PLI')
plt.ylabel('dPLI')
plt.title('dPLI vs PLI')
plt.show()
corr_PLI_dPLI = np.corrcoef(george_base_PLI_mean,george_base_dPLI_mean)
print("corr. of PLI and dPLI is:", corr_PLI_dPLI[1,0])
plt.figure()
for i in range(len(george_base_PLI_p3_degree)):
plt.plot(george_base_PLI_p3_degree[i] , george_base_phase_diff_mean[i],'C0s' )
plt.text(george_base_PLI_p3_degree[i] , george_base_phase_diff_mean[i],str(i))
plt.xlabel('k')
plt.ylabel('theta')
plt.title('theta vs k')
plt.show()
corr_degree_phase = np.corrcoef(george_base_PLI_p3_degree , george_base_phase_diff_mean)
print("corr. of degree and phase is:", corr_degree_phase[1,0])
```
**EXPECTED OUTPUT**

```
corr. of PLI and dPLI is: -0.5848065158893657
```

```
corr. of degree and phase is: -0.5082925792988023
```
**EXERCISE 5: Dividing the data into moving time windows (optional)**
Sometimes the time length of the data is large. Or, one wants to investigate the changes that occurs in finer time resolution. For example, we can apply a time window of 2 seconds with an overlap of 1 second to the data, dividing the data into 29 time segments of size 2000x106 matrix.
• Write a code for a function that divide a given time series into moving time windows.
• Using the codes from Exercise 1 and 2, construct a connectivity matrix for each time window.
• We can now apply network measures to the resulting connectivity matrices.
```
win_len = 2000
win_start = 10000
overlap = 1000
PLI_win = []
dPLI_win = []
for idx in range(0, george_base_angle.shape[0], overlap):
temp = cal_dPLI_PLI(george_base_angle[idx:idx+win_len])
PLI_win += [temp[0]]
dPLI_win += [temp[1]]
PLI_win = np.array(PLI_win[:-1])
dPLI_win = np.array(dPLI_win[:-1])
```
**EXERCISE 6: Comparison between two different states of brain (optional, possible for mini projects)**
The above analysis can be repeated to different states of the brain. For example, we can construct the network from anesthesized unconcious states. The provided data is from anesthetized George, induced with propofol. We can construct the connectivity network and apply network measure.
• Repeat the processes in Exercise 1 and 2 to construct the resulting fuctional network.
• Apply network measures as in Exercise 3, and phase measures as in Exercise 4. Compare the result with the resting state network. How are they different from each other?
```
```
**EXERCISE 7: Phase coherence (optional, possible for mini projects)**
There are many measures which can be applied to construct functional connectivity matrix. One measure is phase coherence $(PC)$. Phase coherence $PC$ between two time-series $a$ and $b$ is defined as the following:
$$ PC_{ab} = \lvert {R e^{i \Theta_{ab}}} \rvert = \left| \frac{1}{T} \sum_{t=1}^{T} e^{i \theta_{ab}(t)} \right| , \\ $$
where $\theta_{ab}(t)$ is difference of phases of time-series $a$ and $b$ at time $t$:
$$ \theta_{ab}(t) = \theta_a(t) - \theta_b(t) \\ $$
• Construct a code for a function that computes $PC_{ij}$ for given time-series $i$ and $j$.
• Construct a code for a function that constructs $PC$ matrix which contain $PC_{ij}$ for all possible pairs of time_series.
• Use the codes to construct connectivity matrix as in Exercise 2.
• After the construction, we can proceed to apply the measures as in Exercise 3.
```
```
** EXERCISE 8: Pearson correlation coefficients (optional, possible for mini projects)**
• Another measure which can be used to construct connectivity matrix is Pearson correlation coefficient $c$. Measure *Pearson* correlation coefficients ($c$) between all possible pairs, and contruct a correlation matrix with the coefficients as its element. The resulting matrix will be a symmetric matrix. The pearson correlation coefficient $c_{xy}$ between two data set $x=\{x_1, x_2, x_3, ..., x_n \}$ and $y=\{y_1, y_2, y_3, ..., y_n \}$ is defined as the following:
$$ c_{xy} = \frac { \sum_{i=1}^{n} (x_i - \bar x) (y_i - \bar y) } { \sqrt { \sum_{i=1}^{n} (x_i - \bar x )^2 } \sqrt {\sum_{i=1}^{n} (y_i - \bar y)^2 } } $$
where $\bar x$ and $\bar y$ are the mean of $x$ and $y$.
Alternatively, we can rewrite in the following way:
$$ c_{xy} = \frac { cov(x,y) } { \sqrt { var(x) \ var(y) } } $$
where
$$ cov(x,y) = \langle (x_i - \bar x) (y_i - \bar y) \rangle _i \\
var(x,y) = \langle x_i - \bar x \rangle _i.$$
• You can construct a code for a function that computes $c_{ij}$ for given time-series $i$ and $j$, or you can use a numpy function, `np.corrcoef()`.
• Construct a code for a function that constructs correlation coefficient $c$ matrix which contain $c_{ij}$ for all possible pairs of time series.
• Use the codes to construct connectivity matrix as in Exercise 2.
• After the construction, we can proceed to Exercise 3.
```
```
| github_jupyter |
```
#import the needed package
import requests
import pandas as pd
import numpy as np
from bokeh.plotting import figure, output_file, show, output_notebook
from bokeh.models import NumeralTickFormatter
from bokeh.io import show
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, CustomJS, Select
from bokeh.plotting import figure
# # Call the PV Watts to get the data (prediction)
# nrel_long_tilt_tmy2 = []
# # We choose the tilt at the below degree, and the Fairbanks lon & lat are (64.82,-147.87)
# tilts = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90]
# for i in range(len(tilts)):
# list_parameters = {"formt": 'JSON', "api_key": "spJFj2l5ghY5jwk7dNfVYs3JHbpR6BOGHQNO8Y9Z", "system_capacity": 4, "module_type": 0, "losses": 14.08,
# "array_type": 0, "tilt": tilts[i], "azimuth": 180, "lat": 61.58, "lon": -149.44, "dataset": 'tmy2'}
# json_response = requests.get("https://developer.nrel.gov/api/pvwatts/v6", params = list_parameters).json()
# new_dataframe = pd.DataFrame(data = json_response['outputs'])
# nrel_long_tilt_tmy2.append(new_dataframe)
# Call the PV Watts to get the data (prediction)
nrel_long_tilt_tmy3 = []
# We choose the tilt at the below degree, and the Fairbanks lon & lat are (64.82,-147.87)
tilts = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90]
for i in range(len(tilts)):
list_parameters = {"formt": 'JSON', "api_key": "spJFj2l5ghY5jwk7dNfVYs3JHbpR6BOGHQNO8Y9Z", "system_capacity": 4, "module_type": 0, "losses": 14.08,
"array_type": 0, "tilt": tilts[i], "azimuth": 180, "lat": 61.58, "lon": -149.44, "dataset": 'tmy3'}
json_response = requests.get("https://developer.nrel.gov/api/pvwatts/v6", params = list_parameters).json()
new_dataframe = pd.DataFrame(data = json_response['outputs'])
nrel_long_tilt_tmy3.append(new_dataframe)
json_response
annual_production_tmy2 = []
for i in range(len(tilts)):
annual_production_tmy2.append(nrel_long_tilt_tmy2[i]['ac_annual'][2])
annual_production_tmy3 = []
for i in range(len(tilts)):
annual_production_tmy3.append(nrel_long_tilt_tmy3[i]['ac_annual'][2])
d_tmy2 = {'Tilts':tilts,'Annual_production':annual_production_tmy2}
df_tmy2 = pd.DataFrame(d_tmy2)
d_tmy3 = {'Tilts':tilts,'Annual_production':annual_production_tmy3}
df_tmy3 = pd.DataFrame(d_tmy3)
#Then find out the max production raw
max_tilt_tmy2 = int(df_tmy2[['Annual_production']].idxmax().values)
max_tilt_tmy3 = int(df_tmy3[['Annual_production']].idxmax().values)
#Then calculate the other tilts' lose compared with the max annual production
lose_tmy2 = []
for index, row in df_tmy2.iterrows():
tilt_loss = 1- row['Annual_production']/df_tmy2['Annual_production'][max_tilt_tmy2]
lose_tmy2.append(tilt_loss)
df_tmy2['loss']=lose_tmy2
lose_tmy3 = []
for index, row in df_tmy3.iterrows():
tilt_loss = 1- row['Annual_production']/df_tmy3['Annual_production'][max_tilt_tmy3]
lose_tmy3.append(tilt_loss)
df_tmy3['loss']=lose_tmy3
output_file("Wasilla-Palmer_tilts_loss.html")
p = figure(x_axis_label='Tilts', y_axis_label='loss (%)',plot_width=500, plot_height=250)
# add a line renderer
p.line(tilts, df_tmy2['loss'], line_width=2,color='red',legend="TMY2")
p.line(tilts,df_tmy3['loss'],line_width=2,color='blue',legend="TMY3")
p.xaxis.ticker = [10,20,30,40,50,60,70,80,90]
p.yaxis.formatter = NumeralTickFormatter(format='0 %')
p.title.text = "Annual Production loss of different tilts"
p.title.align = "center"
p.title.text_color = "olive"
p.title.text_font = "times"
p.title.text_font_style = "italic"
p.title.text_font_size = '12pt'
show(p)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/dlmacedo/starter-academic/blob/master/3The_ultimate_guide_to_Encoder_Decoder_Models_3_4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
%%capture
!pip install -qq git+https://github.com/huggingface/transformers.git
```
# **Transformer-based Encoder-Decoder Models**
The *transformer-based* encoder-decoder model was introduced by Vaswani et al. in the famous [Attention is all you need paper](https://arxiv.org/abs/1706.03762) and is today the *de-facto* standard encoder-decoder architecture in natural language processing (NLP).
Recently, there has been a lot of research on different *pre-training* objectives for transformer-based encoder-decoder models, *e.g.* T5, Bart, Pegasus, ProphetNet, Marge, *etc*..., but the model architecture has stayed largely the same.
The goal of the blog post is to give an **in-detail** explanation of **how** the transformer-based encoder-decoder architecture models *sequence-to-sequence* problems. We will focus on the mathematical model defined by the architecture and how the model can be used in inference. Along the way, we will give some background on sequence-to-sequence models in NLP and break down the *transformer-based* encoder-decoder architecture into its **encoder** and **decoder** part. We provide many illustrations and establish the link
between the theory of *transformer-based* encoder-decoder models and their practical usage in 🤗Transformers for inference.
Note that this blog post does *not* explain how such models can be trained - this will be the topic of a future blog post.
Transformer-based encoder-decoder models are the result of years of research on *representation learning* and *model architectures*.
This notebook provides a short summary of the history of neural encoder-decoder models. For more context, the reader is advised to read this awesome [blog post](https://ruder.io/a-review-of-the-recent-history-of-nlp/) by Sebastion Ruder. Additionally, a basic understanding of the *self-attention architecture* is recommended.
The following blog post by Jay Alammar serves as a good refresher on the original Transformer model [here](http://jalammar.github.io/illustrated-transformer/).
At the time of writing this notebook, 🤗Transformers comprises the encoder-decoder models *T5*, *Bart*, *MarianMT*, and *Pegasus*, which are summarized in the docs under [model summaries](https://huggingface.co/transformers/model_summary.html#sequence-to-sequence-models).
The notebook is divided into four parts:
- **Background** - *A short history of neural encoder-decoder models is given with a focus on on RNN-based models.* - [click here](https://colab.research.google.com/drive/18ZBlS4tSqSeTzZAVFxfpNDb_SrZfAOMf?usp=sharing)
- **Encoder-Decoder** - *The transformer-based encoder-decoder model is presented and it is explained how the model is used for inference.* - [click here](https://colab.research.google.com/drive/1XpKHijllH11nAEdPcQvkpYHCVnQikm9G?usp=sharing)
- **Encoder** - *The encoder part of the model is explained in detail.*
- **Decoder** - *The decoder part of the model is explained in detail.* - to be published on *Thursday, 08.10.2020*
Each part builds upon the previous part, but can also be read on its own.
## **Encoder**
As mentioned in the previous section, the *transformer-based* encoder maps the input sequence to a contextualized encoding sequence:
$$ f_{\theta_{\text{enc}}}: \mathbf{X}_{1:n} \to \mathbf{\overline{X}}_{1:n}. $$
Taking a closer look at the architecture, the transformer-based encoder is a stack of residual *encoder blocks*.
Each encoder block consists of a **bi-directional** self-attention layer, followed by two feed-forward layers. For simplicity, we disregard the normalization layers in this notebook. Also, we will not further discuss the role of the two feed-forward layers, but simply see it as a final vector-to-vector mapping required in each encoder block ${}^1$.
The bi-directional self-attention layer puts each input vector $\mathbf{x'}_j, \forall j \in \{1, \ldots, n\}$ into relation with all input vectors $\mathbf{x'}_1, \ldots, \mathbf{x'}_n$ and by doing so transforms the input vector $\mathbf{x'}_j$ to a more "refined" contextual representation of itself, defined as $\mathbf{x''}_j$.
Thereby, the first encoder block transforms each input vector of the input sequence $\mathbf{X}_{1:n}$ (shown in light green below) from a *context-independent* vector representation to a *context-dependent* vector representation, and the following encoder blocks further refine this contextual representation until the last encoder block outputs the final contextual encoding $\mathbf{\overline{X}}_{1:n}$ (shown in darker green below).
Let's visualize how the encoder processes the input sequence "I want to buy a car EOS" to a contextualized encoding sequence. Similar to RNN-based encoders, transformer-based encoders also add a special "end-of-sequence" input vector to the input sequence to hint to the model that the input vector sequence is finished ${}^2$.

Our exemplary *transformer-based* encoder is composed of three encoder blocks, whereas the second encoder block is shown in more detail in the red box on the right for the first three input vectors $\mathbf{x}_1, \mathbf{x}_2 and \mathbf{x}_3$.
The bi-directional self-attention mechanism is illustrated by the fully-connected graph in the lower part of the red box and the two feed-forward layers are shown in the upper part of the red box. As stated before, we will focus only on the bi-directional self-attention mechanism.
As can be seen each output vector of the self-attention layer $\mathbf{x''}_i, \forall i \in \{1, \ldots, 7\}$ depends *directly* on *all* input vectors $\mathbf{x'}_1, \ldots, \mathbf{x'}_7$. This means, *e.g.* that the input vector representation of the word "want", *i.e.* $\mathbf{x'}_2$, is put into direct relation with the word "buy", *i.e.* $\mathbf{x'}_4$, but also with the word "I",*i.e.* $\mathbf{x'}_1$. The output vector representation of "want", *i.e.* $\mathbf{x''}_2$, thus represents a more refined contextual representation for the word "want".
Let's take a deeper look at how bi-directional self-attention works.
Each input vector $\mathbf{x'}_i$ of an input sequence $\mathbf{X'}_{1:n}$ of an encoder block is projected to a key vector $\mathbf{k}_i$, value vector $\mathbf{v}_i$ and query vector $\mathbf{q}_i$ (shown in orange, blue, and purple respectively below) through three trainable weight matrices $\mathbf{W}_q, \mathbf{W}_v, \mathbf{W}_k$:
$$ \mathbf{q}_i = \mathbf{W}_q \mathbf{x'}_i,$$
$$ \mathbf{v}_i = \mathbf{W}_v \mathbf{x'}_i,$$
$$ \mathbf{k}_i = \mathbf{W}_k \mathbf{x'}_i, $$
$$ \forall i \in \{1, \ldots n \}.$$
Note, that the **same** weight matrices are applied to each input vector $\mathbf{x}_i, \forall i \in \{i, \ldots, n\}$. After projecting each input vector $\mathbf{x}_i$ to a query, key, and value vector, each query vector $\mathbf{q}_j, \forall j \in \{1, \ldots, n\}$ is compared to all key vectors $\mathbf{k}_1, \ldots, \mathbf{k}_n$. The more similar one of the key vectors $\mathbf{k}_1, \ldots \mathbf{k}_n$ is to a query vector $\mathbf{q}_j$, the more important is the corresponding value vector $\mathbf{v}_j$ for the output vector $\mathbf{x''}_j$. More specifically, an output vector $\mathbf{x''}_j$ is defined as the weighted sum of all value vectors $\mathbf{v}_1, \ldots, \mathbf{v}_n$ plus the input vector $\mathbf{x'}_j$. Thereby, the weights are proportional to the cosine similarity between $\mathbf{q}_j$ and the respective key vectors $\mathbf{k}_1, \ldots, \mathbf{k}_n$, which is mathematically expressed by $\textbf{Softmax}(\mathbf{K}_{1:n}^\intercal \mathbf{q}_j)$ as illustrated in the equation below.
For a complete description of the self-attention layer, the reader is advised to take a look at [this](http://jalammar.github.io/illustrated-transformer/) blog post or the original [paper](https://arxiv.org/abs/1706.03762).
Alright, this sounds quite complicated. Let's illustrate the bi-directional self-attention layer for one of the query vectors of our example above. For simplicity, it is assumed that our exemplary *transformer-based* decoder uses only a single attention head `config.num_heads = 1` and that no normalization is applied.

On the left, the previously illustrated second encoder block is shown again and on the right, an in detail visualization of the bi-directional self-attention mechanism is given for the second input vector $\mathbf{x'}_2$ that corresponds to the input word "want".
At first all input vectors $\mathbf{x'}_1, \ldots, \mathbf{x'}_7$ are projected to their respective query vectors $\mathbf{q}_1, \ldots, \mathbf{q}_7$ (only the first three query vectors are shown in purple above), value vectors $\mathbf{v}_1, \ldots, \mathbf{v}_7$ (shown in blue), and key vectors $\mathbf{k}_1, \ldots, \mathbf{k}_7$ (shown in orange). The query vector $\mathbf{q}_2$ is then multiplied by the transpose of all key vectors, *i.e.* $\mathbf{K}_{1:7}^{\intercal}$ followed by the softmax operation to yield the *self-attention weights*. The self-attention weights are finally multiplied by the respective value vectors and the input vector $\mathbf{x'}_2$ is added to output the "refined" representation of the word "want", *i.e.* $\mathbf{x''}_2$ (shown in dark green on the right).
The whole equation is illustrated in the upper part of the box on the right.
The multiplication of $\mathbf{K}_{1:7}^{\intercal}$ and $\mathbf{q}_2$ thereby makes it possible to compare the vector representation of "want" to all other input vector representations "I", "to", "buy", "a", "car", "EOS" so that the self-attention weights mirror the importance each of the other input vector representations $\mathbf{x'}_j \text{, with } j \ne 2$ for the refined representation $\mathbf{x''}_2$ of the word "want".
To further understand the implications of the bi-directional self-attention layer, let's assume the following sentence is processed: "*The house is beautiful and well located in the middle of the city where it is easily accessible by public transport*". The word "it" refers to "house", which is 12 "positions away". In transformer-based encoders, the bi-directional self-attention layer performs a single mathematical operation to put the input vector of "house" into relation with the input vector of "it" (compare to the first illustration of this section). In contrast, in an RNN-based encoder, a word that is 12 "positions away", would require at least 12 mathematical operations meaning that in an RNN-based encoder a linear number of mathematical operations are required. This makes it much harder for an RNN-based encoder to model long-range contextual representations.
Also, it becomes clear that a transformer-based encoder is much less prone to lose important information than an RNN-based encoder-decoder model because the sequence length of the encoding is kept the same, *i.e.* $\textbf{len}(\mathbf{X}_{1:n}) = \textbf{len}(\mathbf{\overline{X}}_{1:n}) = n$, while an RNN compresses the length from $\textbf{len}((\mathbf{X}_{1:n}) = n$ to just $\textbf{len}(\mathbf{c}) = 1$, which makes it very difficult for RNNs to effectively encode long-range dependencies between input words.
In addition to making long-range dependencies more easily learnable, we can see that the Transformer architecture is able to process text in parallel.Mathematically, this can easily be shown by writing the self-attention formula as a product of query, key, and value matrices:
$$\mathbf{X''}_{1:n} = \mathbf{V}_{1:n} \text{Softmax}(\mathbf{Q}_{1:n}^\intercal \mathbf{K}_{1:n}) + \mathbf{X'}_{1:n}. $$
The output $\mathbf{X''}_{1:n} = \mathbf{x''}_1, \ldots, \mathbf{x''}_n$ is computed via a series of matrix multiplications and a softmax operation, which can be parallelized effectively.
Note, that in an RNN-based encoder model, the computation of the hidden state $\mathbf{c}$ has to be done sequentially: Compute hidden state of the first input vector $\mathbf{x}_1$, then compute the hidden state of the second input vector that depends on the hidden state of the first hidden vector, etc. The sequential nature of RNNs prevents effective parallelization and makes them much more inefficient compared to transformer-based encoder models on modern GPU hardware.
Great, now we should have a better understanding of a) how transformer-based encoder models effectively model long-range contextual representations and b) how they efficiently process long sequences of input vectors.
Now, let's code up a short example of the encoder part of our `MarianMT` encoder-decoder models to verify that the explained theory holds in practice.
---
${}^1$ An in-detail explanation of the role the feed-forward layers play in transformer-based models is out-of-scope for this notebook. It is argued in [Yun et. al, (2017)](https://arxiv.org/pdf/1912.10077.pdf) that feed-forward layers are crucial to map each contextual vector $\mathbf{x'}_i$ individually to the desired output space, which the *self-attention* layer does not manage to do on its own. It should be noted here, that each output token $\mathbf{x'}$ is processed by the same feed-forward layer. For more detail, the reader is advised to read the paper.
${}^2$ However, the EOS input vector does not have to be appended to the input sequence, but has been shown to improve performance in many cases. In contrast to the *0th* $\text{BOS}$ target vector of the transformer-based decoder is required as a starting input vector to predict a first target vector.
```
%%capture
from transformers import MarianMTModel, MarianTokenizer
import torch
tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
model = MarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de")
embeddings = model.get_input_embeddings()
# create ids of encoded input vectors
input_ids = tokenizer("I want to buy a car", return_tensors="pt").input_ids
# pass input_ids to encoder
encoder_hidden_states = model.base_model.encoder(input_ids, return_dict=True).last_hidden_state
# change the input slightly and pass to encoder
input_ids_perturbed = tokenizer("I want to buy a house", return_tensors="pt").input_ids
encoder_hidden_states_perturbed = model.base_model.encoder(input_ids_perturbed, return_dict=True).last_hidden_state
# compare shape and encoding of first vector
print(f"Length of input embeddings {embeddings(input_ids).shape[1]}. Length of encoder_hidden_states {encoder_hidden_states.shape[1]}")
# compare values of word embedding of "I" for input_ids and perturbed input_ids
print("Is encoding for `I` equal to its perturbed version?: ", torch.allclose(encoder_hidden_states[0, 0], encoder_hidden_states_perturbed[0, 0], atol=1e-3))
```
We compare the length of the input word embeddings, *i.e.* `embeddings(input_ids)` corresponding to $\mathbf{X}_{1:n}$, with the length of the `encoder_hidden_states`, corresponding to $\mathbf{\overline{X}}_{1:n}$.
Also, we have forwarded the word sequence "I want to buy a car" and a slightly perturbated version "I want to buy a house" through the encoder to check if the first output encoding, corresponding to "I", differs when only the last word is changed in the input sequence.
As expected the output length of the input word embeddings and encoder output encodings, *i.e.* $\textbf{len}(\mathbf{X}_{1:n})$ and $\textbf{len}(\mathbf{\overline{X}}_{1:n})$, is equal.
Second, it can be noted that the values of the encoded output vector of $\mathbf{\overline{x}}_1 = \text{"I"}$ are different when the last word is changed from "car" to "house". This however should not come as a surprise if one has understood bi-directional self-attention.
On a side-note, *autoencoding* models, such as BERT, have the exact same architecture as *transformer-based* encoder models. *Autoencoding* models leverage this architecture for massive self-supervised pre-training on open-domain text data so that they can map any word sequence to a deep bi-directional representation. In [Devlin et al. (2018)](https://arxiv.org/abs/1810.04805), the authors show that a pre-trained BERT model with a single task-specific classification layer on top can achieve SOTA results on eleven NLP tasks. All *autoencoding* models of 🤗Transformers can be found [here](https://huggingface.co/transformers/model_summary.html#autoencoding-models).
| github_jupyter |
# BPR on ML-1m in Tensorflow
```
!pip install tensorflow==2.5.0
!wget -q --show-progress https://files.grouplens.org/datasets/movielens/ml-1m.zip
!unzip ml-1m.zip
import os
import pandas as pd
import numpy as np
import random
from time import time
from tqdm.notebook import tqdm
from collections import defaultdict
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Embedding, Input
from tensorflow.keras.regularizers import l2
!pip install -q watermark
%reload_ext watermark
%watermark -m -iv -u -t -d
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
file = 'ml-1m/ratings.dat'
trans_score = 1
test_neg_num = 100
embed_dim = 64
mode = 'inner' # dist
embed_reg = 1e-6 # 1e-6
K = 10 # top-k
learning_rate = 0.001
epochs = 20
batch_size = 512
def sparseFeature(feat, feat_num, embed_dim=4):
"""
create dictionary for sparse feature
:param feat: feature name
:param feat_num: the total number of sparse features that do not repeat
:param embed_dim: embedding dimension
:return:
"""
return {'feat': feat, 'feat_num': feat_num, 'embed_dim': embed_dim}
def create_ml_1m_dataset(file, trans_score=2, embed_dim=8, test_neg_num=100):
"""
:param file: A string. dataset path.
:param trans_score: A scalar. Greater than it is 1, and less than it is 0.
:param embed_dim: A scalar. latent factor.
:param test_neg_num: A scalar. The number of test negative samples
:return: user_num, item_num, train_df, test_df
"""
print('==========Data Preprocess Start=============')
data_df = pd.read_csv(file, sep="::", engine='python',
names=['user_id', 'item_id', 'label', 'Timestamp'])
# filtering
data_df['item_count'] = data_df.groupby('item_id')['item_id'].transform('count')
data_df = data_df[data_df.item_count >= 5]
# trans score
data_df = data_df[data_df.label >= trans_score]
# sort
data_df = data_df.sort_values(by=['user_id', 'Timestamp'])
# split dataset and negative sampling
print('============Negative Sampling===============')
train_data, val_data, test_data = defaultdict(list), defaultdict(list), defaultdict(list)
item_id_max = data_df['item_id'].max()
for user_id, df in tqdm(data_df[['user_id', 'item_id']].groupby('user_id')):
pos_list = df['item_id'].tolist()
def gen_neg():
neg = pos_list[0]
while neg in set(pos_list):
neg = random.randint(1, item_id_max)
return neg
neg_list = [gen_neg() for i in range(len(pos_list) + test_neg_num)]
for i in range(1, len(pos_list)):
hist_i = pos_list[:i]
if i == len(pos_list) - 1:
test_data['user_id'].append(user_id)
test_data['pos_id'].append(pos_list[i])
test_data['neg_id'].append(neg_list[i:])
elif i == len(pos_list) - 2:
val_data['user_id'].append(user_id)
val_data['pos_id'].append(pos_list[i])
val_data['neg_id'].append(neg_list[i])
else:
train_data['user_id'].append(user_id)
train_data['pos_id'].append(pos_list[i])
train_data['neg_id'].append(neg_list[i])
# feature columns
user_num, item_num = data_df['user_id'].max() + 1, data_df['item_id'].max() + 1
feat_col = [sparseFeature('user_id', user_num, embed_dim),
sparseFeature('item_id', item_num, embed_dim)]
# shuffle
random.shuffle(train_data)
random.shuffle(val_data)
train = [np.array(train_data['user_id']), np.array(train_data['pos_id']),
np.array(train_data['neg_id'])]
val = [np.array(val_data['user_id']), np.array(val_data['pos_id']),
np.array(val_data['neg_id'])]
test = [np.array(test_data['user_id']), np.array(test_data['pos_id']),
np.array(test_data['neg_id'])]
print('============Data Preprocess End=============')
return feat_col, train, val, test
class BPR(Model):
def __init__(self, feature_columns, mode='inner', embed_reg=1e-6):
"""
BPR
:param feature_columns: A list. user feature columns + item feature columns
:mode: A string. 'inner' or 'dist'.
:param embed_reg: A scalar. The regularizer of embedding.
"""
super(BPR, self).__init__()
# feature columns
self.user_fea_col, self.item_fea_col = feature_columns
# mode
self.mode = mode
# user embedding
self.user_embedding = Embedding(input_dim=self.user_fea_col['feat_num'],
input_length=1,
output_dim=self.user_fea_col['embed_dim'],
mask_zero=False,
embeddings_initializer='random_normal',
embeddings_regularizer=l2(embed_reg))
# item embedding
self.item_embedding = Embedding(input_dim=self.item_fea_col['feat_num'],
input_length=1,
output_dim=self.item_fea_col['embed_dim'],
mask_zero=True,
embeddings_initializer='random_normal',
embeddings_regularizer=l2(embed_reg))
def call(self, inputs):
user_inputs, pos_inputs, neg_inputs = inputs # (None, 1), (None, 1)
# user info
user_embed = self.user_embedding(user_inputs) # (None, 1, dim)
# item
pos_embed = self.item_embedding(pos_inputs) # (None, 1, dim)
neg_embed = self.item_embedding(neg_inputs) # (None, 1, dim)
if self.mode == 'inner':
# calculate positive item scores and negative item scores
pos_scores = tf.reduce_sum(tf.multiply(user_embed, pos_embed), axis=-1) # (None, 1)
neg_scores = tf.reduce_sum(tf.multiply(user_embed, neg_embed), axis=-1) # (None, 1)
# add loss. Computes softplus: log(exp(features) + 1)
# self.add_loss(tf.reduce_mean(tf.math.softplus(neg_scores - pos_scores)))
self.add_loss(tf.reduce_mean(-tf.math.log(tf.nn.sigmoid(pos_scores - neg_scores))))
else:
# clip by norm
# user_embed = tf.clip_by_norm(user_embed, 1, -1)
# pos_embed = tf.clip_by_norm(pos_embed, 1, -1)
# neg_embed = tf.clip_by_norm(neg_embed, 1, -1)
pos_scores = tf.reduce_sum(tf.square(user_embed - pos_embed), axis=-1)
neg_scores = tf.reduce_sum(tf.square(user_embed - neg_embed), axis=-1)
self.add_loss(tf.reduce_sum(tf.nn.relu(pos_scores - neg_scores + 0.5)))
logits = tf.concat([pos_scores, neg_scores], axis=-1)
return logits
def summary(self):
user_inputs = Input(shape=(1, ), dtype=tf.int32)
pos_inputs = Input(shape=(1, ), dtype=tf.int32)
neg_inputs = Input(shape=(1, ), dtype=tf.int32)
Model(inputs=[user_inputs, pos_inputs, neg_inputs],
outputs=self.call([user_inputs, pos_inputs, neg_inputs])).summary()
def test_model():
user_features = {'feat': 'user_id', 'feat_num': 100, 'embed_dim': 8}
item_features = {'feat': 'item_id', 'feat_num': 100, 'embed_dim': 8}
features = [user_features, item_features]
model = BPR(features)
model.summary()
def evaluate_model(model, test, K):
"""
evaluate model
:param model: model
:param test: test set
:param K: top K
:return: hit rate, ndcg
"""
if model.mode == 'inner':
pred_y = - model.predict(test)
else:
pred_y = model.predict(test)
rank = pred_y.argsort().argsort()[:, 0]
hr, ndcg = 0.0, 0.0
for r in rank:
if r < K:
hr += 1
ndcg += 1 / np.log2(r + 2)
return hr / len(rank), ndcg / len(rank)
# ========================== Create dataset =======================
feature_columns, train, val, test = create_ml_1m_dataset(file, trans_score, embed_dim, test_neg_num)
# ============================Build Model==========================
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = BPR(feature_columns, mode, embed_reg)
model.summary()
# =========================Compile============================
model.compile(optimizer=Adam(learning_rate=learning_rate))
results = []
for epoch in range(1, epochs + 1):
# ===========================Fit==============================
t1 = time()
model.fit(
train,
None,
validation_data=(val, None),
epochs=1,
batch_size=batch_size,
)
# ===========================Test==============================
t2 = time()
if epoch % 5 == 0:
hit_rate, ndcg = evaluate_model(model, test, K)
print('Iteration %d Fit [%.1f s], Evaluate [%.1f s]: HR = %.4f, NDCG = %.4f'
% (epoch, t2 - t1, time() - t2, hit_rate, ndcg))
results.append([epoch, t2 - t1, time() - t2, hit_rate, ndcg])
# ========================== Write Log ===========================
pd.DataFrame(results, columns=['Iteration', 'fit_time', 'evaluate_time', 'hit_rate', 'ndcg'])\
.to_csv('BPR_log_dim_{}_mode_{}_K_{}.csv'.format(embed_dim, mode, K), index=False)
```
| github_jupyter |
## 1: Import packages and Load data
```
import pandas as pd
import os
import matplotlib.pyplot as plt
from google.colab import drive
drive.mount('/content/drive')
df = pd.read_csv('/content/drive/My Drive/Colab Notebooks/CoTAI/Data Science Internship CoTAI 2021/Sales Analysis/Data/sales2019_3.csv')
df.head()
df
```
## 2: Clean and Preprocess data
### 2.1: Merge 12-month data
```
path = '/content/drive/My Drive/Colab Notebooks/CoTAI/Data Science Internship CoTAI 2021/Sales Analysis/Data/'
frames = [] # List of all files' names
all_length = []
for file in os.listdir(path): # Get all directory of each file
if file.endswith('.csv'): # Get only files with .csv
filepath = path + file
df1 = pd.read_csv(filepath)
frames.append(df1)
result = pd.concat(frames) # Merge all dataframes into a master dataframe
length_1month = len(df1.index)
all_length.append(length_1month) # Find sum of length of all dataframes' rows
result.to_csv('annualSales2019.csv', index=False) # Save the original data, the first Index column is unnecessary so it can be removed
df = result
df
print(sum(all_length))
```
The total number of all_length is now proven to match the number of all rows in the master data frame.
### 2.2: Add 'Month' column
The Month column's type is mm/dd/yy so only 2 first indices are needed.
```
df['Month'] = df['Order Date'].str[0:2]
df.head()
```
### 2.3: Get rid of 'NaN' and 'Or' value
Print all unique values in Month by using set
```
print(set(df['Month']))
```
Now remove Nan and Or
```
df = df.dropna(how='all') # Remove NaN
df = df[df['Month'] != 'Or'] # Keep rows of Month with values different from 'Or
df
```
## 3: Reporting
### 3.1: What was the best month for Sales? How much was earned on that month?
To calculate or multiply successfully, let's check the types of 2 needed variables Quantity Ordered and Price Each.
```
print(df['Quantity Ordered'].dtypes)
print(df['Price Each'].dtypes)
```
Convert the object type into interger and float types
```
df['Quantity Ordered'] = pd.to_numeric(df['Quantity Ordered'], downcast='integer')
df['Price Each'] = pd.to_numeric(df['Price Each'], downcast='float')
```
Let's check their types again
```
print(df['Quantity Ordered'].dtypes)
print(df['Price Each'].dtypes)
```
Create a new Sales column from Quantity Ordered and Price each
```
df['Sales'] = df['Quantity Ordered'] * df['Price Each']
df.head()
print(df['Sales'].dtypes)
```
Sales in now visible in the master data frame. But it is placed at the last column and it might be hard to see or find. We shoud then create a new data frame 'moving_colum' as a temporary data frame. The purpose of this step is to add all variables of moving_columns into the master data and place Sales variable at the index 4.
```
moving_column = df.pop('Sales')
df.insert(4, 'Sales', moving_column)
df.head()
```
Filter data grouped by Month with each month's total Sales
```
sales_value = df.groupby('Month').sum()['Sales']
sales_value
months = range(1,13)
months
```
Find the highest Sales value of 12 months
```
sales_value.max()
```
Months will have 12 bar charts on X axis. Sales values will be on Y axis.
```
plt.bar(x=months, height=sales_value)
plt.xticks(months)
plt.xlabel('Months')
plt.ylabel('Sales in USD')
plt.show()
```
It can be easily seen that December had the highest Sales Value at US$ 4,613,443.5.
To dive deeper into why December had the highest Sales Value. We can analyze further by the following hypotheses.
- Holiday season (for example Christmas, New Year, etc.) might affect and have a positive correlation with the highest sales of the year, namely in December.
- Electronics corporations launch new products in Quarter 4 so people tend to spend more in December.
### 3.2: Which city had the best Sales of the year?
Create a new column City. First, we have to get the city name from Purchase Address which is between 2 commas. Then we will split each of its value by comma, and get the values of the city name at index 1.
```
address_to_city = lambda address:address.split(',')[1]
address_to_city
```
After that, we will apply this method to all values of Purchase Address and create a new variable City to contain all new values.
```
df['City'] = df['Purchase Address'].apply(address_to_city)
df.head()
```
We will use the previous method of grouping the master data frame by City with each city's total sales value.
```
df.groupby('City').sum()['Sales']
```
Find the best sales by City
```
sales_value_city = df.groupby('City').sum()['Sales']
sales_value_city.max()
```
Print all unique city names
```
cities = df['City'].unique()
print(cities)
plt.bar(x=cities, height=sales_value_city)
plt.xticks(cities)
plt.xlabel('Cities')
plt.ylabel('Sales in USD')
plt.show()
```
At this stage, we can visualize City by Sales. However, we can enhance its format for a better look.
```
plt.bar(x=cities, height=sales_value_city)
plt.xticks(cities, rotation=90, size=8)
plt.xlabel('Cities')
plt.ylabel('Sales in USD')
plt.show()
```
To make sure that the order of cities aligns with that of sales_value_city. Let's print them out.
```
print(cities)
print(sales_value_city)
```
Their city values do not align with each other. We now have to use the order of sales_value_city to get the correct order. And we will apply list comprehension to cities from sales_value_city.
```
cities = [city for city, sales in sales_value_city.items()]
plt.bar(x=cities, height=sales_value_city)
plt.xticks(cities, rotation=90, size=8)
plt.xlabel('Cities')
plt.ylabel('Sales in USD')
plt.show()
```
It now shows accurately that San Francisco had the highest Sales at US$ 8,262,204.
To dive deeper into why San Franciso has the highest Sales Value. We can analyze further by the following hypotheses.
- Silicon Valley is based in San Francisco.
- There is a density of engineers in San Franciso with high income. Therefore, they tend to spend more in this city, especially on hi-tech products.
### 3.3: When was the most efficient time to display ads to maximize/optimize the possibility of customers' decisions to buy products?
```
print(df['Order Date'].dtypes)
```
Convert Order Data into datetime type as this type has many already-built-in functions for faster and more coding efficiency.
```
df['Order Date'] = pd.to_datetime(df['Order Date'])
print(df['Order Date'].dtypes)
```
We just need only the hour values from Order Date and create a new variable Hours to contain these values.
```
df['Hours'] = df['Order Date'].dt.hour
df.head()
```
We repeat the same method as the previous question.
```
sales_value_hours = df.groupby('Hours').sum()['Sales']
hours = [hour for hour, sales in sales_value_hours.items()]
plt.plot(hours, sales_value_hours)
plt.grid()
plt.xticks(hours, rotation=90, size=8)
plt.xlabel('Hours')
plt.ylabel('Sales in USD')
plt.show()
```
We are using Sum for the Sales, which means we can obtain the Total Sales within the hour. It is different from the Total Orders within the hour. Therefore, the Total Orders within the hour have more sense to be used to get an answer since we are trying to find the best hour to optimizing the number of Orders.
So for this chart, we will visualize by using count instead.
```
sales_value_hours = df.groupby('Hours').count()['Sales']
hours = [hour for hour, sales in sales_value_hours.items()]
plt.plot(hours, sales_value_hours)
plt.grid()
plt.xticks(hours, rotation=90, size=8)
plt.xlabel('Hours')
plt.ylabel('Sales in USD')
plt.show()
```
After using count, the illustration of the line chart does not change much. But the most important thing here is to understand the question and our data.
The peak of placing Orders was at 11 AM and 12 PM (during lunch break), and at 7 PM, when people got home after work. Therefore, running an ad within 30-60 minutes before these two periods can be recommended to get the most viewers.
```
df_dup = df[df['Order ID'].duplicated(keep=False)]
groupProduct = lambda product: ', '.join(product)
df_dup['All Products'] = df_dup.groupby('Order ID')['Product'].transform(groupProduct)
df_dup = df_dup[['Order ID', 'All Products']].drop_duplicates()
df_dup['All Products'].value_counts().head(10)
```
### 3.4: Which products were most sold together?
First, we will find items ordered on the same day, at the same time and/or with the same Order ID to obtain the items of the same Order. Now, we find the duplicated Item.
```
df_dup = df[df['Order ID'].duplicated(keep=False)]
df_dup.head()
```
We group all rows by Order ID and join all rows of Product by ','.
If using pandas apply(), the first parameter must be a function, then we will create a new function by using lambda to get all product values into groupProduct.
```
groupProduct = lambda product: ', '.join(product)
```
However, pandas apply() returns a DataFrame while we need a return of Series to input to a new column All Products. Then, we use pandas transform() instead to return/obtain a Series.
```
df_dup['All Products'] = df_dup.groupby('Order ID')['Product'].transform(groupProduct)
df_dup.head()
```
Since we don't need to use other variables, except Order ID and All Products, we will delete the rest of the unnecessary variables and duplicated rows.
```
df_dup = df_dup[['Order ID', 'All Products']].drop_duplicates()
df_dup
```
Print the top 10 of best-selling products by using value_counts()
```
df_dup['All Products'].value_counts().head(10)
```
It returns the top-selling products which were sold at the same time within the same order. Then we can identify the best sellers and the other less sold items. Companies can sell their products in a combo to push sales for both best sellers and the less common items. Or they can place promotion on a combo while keeping the regular price of their single purchased product to raise demand for buying a combo rather than a single item.
### 3.5: Which product was sold the most? Why do you think it was the best-selling product?
This is simply handled by grouping the data frame by Product with each product's total Quantity Ordered.
```
all_products = df.groupby('Product').sum()['Quantity Ordered']
all_products
```
We repeat the same method on prices and products list.
```
prices = df.groupby('Product').mean()['Price Each']
prices
products_ls = [product for product, quant in all_products.items()]
products_ls
x = products_ls
y1 = all_products
y2 = prices
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.bar(x, y1, color='g')
ax2.plot(x, y2, 'b-')
ax1.set_xticklabels(products_ls, rotation=90, size=8)
ax1.set_xlabel('Products')
ax1.set_ylabel('Quantity Ordered', color='g')
ax2.set_ylabel('Price Each', color='b')
plt.show()
```
Instead of plotting on bar charts of Products and Quantity Order, we will add a line chart of Price of each product to demonstrate the correlation between Products, Quantity and Price.
Look at the plot, we can see a correlation between the Products' Prices and their Volumes. The AA and AAA Batteries have the highest Volumes Ordered while other pricey products have both low quantities and revenues.
As we do not have other data of other factors or criteria such as brand quality to research and analyze further, so far that is the sum-up of our conclusion.
```
```
| github_jupyter |
# Step1: Create the Python Script
In the cell below, you will need to complete the Python script and run the cell to generate the file using the magic `%%writefile` command. Your main task is to complete the following methods for the `PersonDetect` class:
* `load_model`
* `predict`
* `draw_outputs`
* `preprocess_outputs`
* `preprocess_inputs`
For your reference, here are all the arguments used for the argument parser in the command line:
* `--model`: The file path of the pre-trained IR model, which has been pre-processed using the model optimizer. There is automated support built in this argument to support both FP32 and FP16 models targeting different hardware.
* `--device`: The type of hardware you want to load the model on (CPU, GPU, MYRIAD, HETERO:FPGA,CPU)
* `--video`: The file path of the input video.
* `--output_path`: The location where the output stats and video file with inference needs to be stored (results/[device]).
* `--max_people`: The max number of people in queue before directing a person to another queue.
* `--threshold`: The probability threshold value for the person detection. Optional arg; default value is 0.60.
```
%%writefile person_detect.py
import numpy as np
import time
from openvino.inference_engine import IENetwork, IECore
import os
import cv2
import argparse
import sys
class Queue:
'''
Class for dealing with queues
'''
def __init__(self):
self.queues=[]
def add_queue(self, points):
self.queues.append(points)
def get_queues(self, image):
for q in self.queues:
x_min, y_min, x_max, y_max=q
frame=image[y_min:y_max, x_min:x_max]
yield frame
def check_coords(self, coords):
d={k+1:0 for k in range(len(self.queues))}
for coord in coords:
for i, q in enumerate(self.queues):
if coord[0]>q[0] and coord[2]<q[2]:
d[i+1]+=1
return d
class PersonDetect:
'''
Class for the Person Detection Model.
'''
def __init__(self, model_name, device, threshold=0.60):
self.model_weights=model_name+'.bin'
self.model_structure=model_name+'.xml'
self.device=device
self.threshold=threshold
try:
self.model=IENetwork(self.model_structure, self.model_weights)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
self.input_name=next(iter(self.model.inputs))
self.input_shape=self.model.inputs[self.input_name].shape
self.output_name=next(iter(self.model.outputs))
self.output_shape=self.model.outputs[self.output_name].shape
def load_model(self):
'''
TODO: This method needs to be completed by you
'''
raise NotImplementedError
def predict(self, image):
'''
TODO: This method needs to be completed by you
'''
raise NotImplementedError
def draw_outputs(self, coords, image):
'''
TODO: This method needs to be completed by you
'''
raise NotImplementedError
def preprocess_outputs(self, outputs):
'''
TODO: This method needs to be completed by you
'''
raise NotImplementedError
def preprocess_input(self, image):
'''
TODO: This method needs to be completed by you
'''
raise NotImplementedError
def main(args):
model=args.model
device=args.device
video_file=args.video
max_people=args.max_people
threshold=args.threshold
output_path=args.output_path
start_model_load_time=time.time()
pd= PersonDetect(model, device, threshold)
pd.load_model()
total_model_load_time = time.time() - start_model_load_time
queue=Queue()
try:
queue_param=np.load(args.queue_param)
for q in queue_param:
queue.add_queue(q)
except:
print("error loading queue param file")
try:
cap=cv2.VideoCapture(video_file)
except FileNotFoundError:
print("Cannot locate video file: "+ video_file)
except Exception as e:
print("Something else went wrong with the video file: ", e)
initial_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
initial_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
out_video = cv2.VideoWriter(os.path.join(output_path, 'output_video.mp4'), cv2.VideoWriter_fourcc(*'avc1'), fps, (initial_w, initial_h), True)
counter=0
start_inference_time=time.time()
try:
while cap.isOpened():
ret, frame=cap.read()
if not ret:
break
counter+=1
coords, image= pd.predict(frame)
num_people= queue.check_coords(coords)
print(f"Total People in frame = {len(coords)}")
print(f"Number of people in queue = {num_people}")
out_text=""
y_pixel=25
for k, v in num_people.items():
out_text += f"No. of People in Queue {k} is {v} "
if v >= int(max_people):
out_text += f" Queue full; Please move to next Queue "
cv2.putText(image, out_text, (15, y_pixel), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
out_text=""
y_pixel+=40
out_video.write(image)
total_time=time.time()-start_inference_time
total_inference_time=round(total_time, 1)
fps=counter/total_inference_time
with open(os.path.join(output_path, 'stats.txt'), 'w') as f:
f.write(str(total_inference_time)+'\n')
f.write(str(fps)+'\n')
f.write(str(total_model_load_time)+'\n')
cap.release()
cv2.destroyAllWindows()
except Exception as e:
print("Could not run Inference: ", e)
if __name__=='__main__':
parser=argparse.ArgumentParser()
parser.add_argument('--model', required=True)
parser.add_argument('--device', default='CPU')
parser.add_argument('--video', default=None)
parser.add_argument('--queue_param', default=None)
parser.add_argument('--output_path', default='/results')
parser.add_argument('--max_people', default=2)
parser.add_argument('--threshold', default=0.60)
args=parser.parse_args()
main(args)
```
# Next Step
Now that you've run the above cell and created your Python script, you will create your job submission shell script in the next workspace.
**Note**: As a reminder, if you need to make any changes to the Python script, you can come back to this workspace to edit and run the above cell to overwrite the file with your changes.
| github_jupyter |
```
import os
import matplotlib.pyplot as plt
from tqdm import tqdm
import shutil
import PIL
import pandas as pd
from libtiff import TIFF
import numpy as np
import re
from tifffile import tifffile
from sklearn.model_selection import StratifiedShuffleSplit, train_test_split
from keras.preprocessing.image import ImageDataGenerator
import pandas as pd
import tensorflow as tf
from keras.models import Model
from keras.layers import Dense, Dropout, Flatten
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from keras.optimizers import Adam
from keras.applications import ResNet50
from keras.utils import to_categorical
import os
import numpy as np
from tqdm import tqdm
from tifffile import imread, imwrite
DATA_DIR= "/home/fatima_tuz_zehra/Dataset/eurosat-all"
model_dir = "/home/fatima_tuz_zehra/Dataset/euro-rgb-new"
ms_dir = "/home/fatima_tuz_zehra/Dataset/euro-all-assets"
data_path = "/home/fatima_tuz_zehra/Dataset"
#ds_sizes = {label: len(os.listdir(os.path.join(DATA_DIR,label))) for label in classes}
if not os.path.isdir(os.path.join(data_path,'ms_and_ind')):
os.mkdir(os.path.join(data_path,'ms_and_ind'))
for land_class in classes:
class_path = os.path.join(os.path.join(data_path,'ms_and_ind'), land_class)
if not os.path.isdir(class_path):
os.mkdir(class_path)
reject = [0, 11, 12]
accept = [i for i in range(13) if i not in reject]
for land_class in tqdm(classes):
class_path = os.path.join(DATA_DIR, land_class)
image_fps = [os.path.join(class_path, img_name) for img_name in os.listdir(class_path)]
for path in tqdm(image_fps):
img = imread(path)
req_bands = img[accept]
# indices = get_indices(img)
#result = np.vstack((req_bands, indices))
res_path = os.path.join('ms_and_ind', land_class, path.split('/')[-1])
imwrite(res_path, req_bands)
print(tf.test.is_gpu_available())
path = os.path.join(data_path, "ms_and_ind")
#path = "/home/fatima_tuz_zehra/Dataset/eurosat-all"
labels = os.listdir(path)
labels
data = dict()
for label in labels:
imgs = os.listdir(os.path.join(DATA_DIR,label))
for img in imgs:
data[os.path.join(DATA_DIR,label,img)] = label
data = pd.DataFrame(data.items())
y = data[1]
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=42,shuffle=True)
X_train.to_csv(os.path.join(ms_dir,"train.csv"),index = False)
X_test.to_csv(os.path.join(ms_dir,"test.csv"),index = False)
tr = pd.read_csv(os.path.join(ms_dir,"train.csv"))
ts = pd.read_csv(os.path.join(ms_dir,"test.csv"))
tr[0:6]['0']
for each in tr[0:6]['0']:
print(each)
def data_generator(file = "train", batch_size = 32):
batch_Y= []
idx = 0
if file == "train":
df = pd.read_csv(os.path.join(ms_dir,"train.csv"))
y = pd.get_dummies(df['1'])
else:
df = pd.read_csv(os.path.join(ms_dir,"test.csv"))
y = pd.get_dummies(df['1'])
num_records = len(df)
i = 0
while i != -1:
batch_X = []
idx += batch_size
if idx < len(df)-1:
samples = df[idx-batch_size:idx]['0']
batch_Y = y[idx-batch_size:idx]
else:
i = -1
samples = df[idx-batch_size:len(df)]['0']
batch_Y = y[idx-batch_size:len(df)]
for sample in samples:
image = tifffile.imread(sample)
x = np.moveaxis(image,0,-1)
batch_X.append(x)
yield [np.array(batch_X)], batch_Y.to_numpy()
return
def get_model(lr, tune = None):
resnet = ResNet50(include_top=False,
weights=None ,
input_shape=(64,64,10))
model = resnet.output
model = Flatten()(model)
model = Dense(2048, activation='relu')(model)
model = Dropout(0.3)(model)
output = Dense(10, activation='softmax')(model)
model = Model(inputs=resnet.input, outputs=output)
if(tune == 0):
for layer in resnet.layers:
layer.trainable = True
else:
for layer in resnet.layers:
layer.trainable = False
model.compile(optimizer=Adam(lr = lr), loss='categorical_crossentropy',
metrics=['categorical_accuracy'])
return model
rgb_model = tf.keras.models.load_model(os.path.join(model_dir,"best.hdf5"))
model = get_model(lr = 1e-3,tune=0)
for layer in rgb_model.layers:
print(layer)
weights = layer.get_weights()
for i in range(len(weights)):
print(weights[i].shape)
for layer in model.layers:
print(layer)
weights = layer.get_weights()
for i in range(len(weights)):
print(weights[i].shape)
channel_weights = rgb_model.layers[2].get_weights()
diff_w = channel_weights[0]
multi_channel_weights = model.layers[2].get_weights()
rep_w = multi_channel_weights[0]
for i in range(10):
if i < 3 :
rep_w[:,:,i,:] = diff_w[:,:,i,:]
else:
rep_w[:,:,i,:] = diff_w[:,:,0,:]
rep_single = rgb_model.layers[2].get_weights()[1]
arr_weights = [rep_w, rep_single]
for i in range(len(model.layers)):
if i == 2:
model.layers[i].set_weights(arr_weights)
else:
model.layers[i].set_weights(rgb_model.layers[i].get_weights())
model.save(os.path.join(model_dir,"loaded-ms.hdf5"))
pretrained = tf.keras.models.load_model(os.path.join(model_dir,"loaded-ms.hdf5"))
checkpoint = ModelCheckpoint(filepath = ms_dir + '/all-bands.hdf5',
monitor='val_categorical_accuracy',
save_best_only=True,
verbose=1)
for i in range(10):
train_gen = data_generator(file = "train", batch_size = 32)
test_gen = data_generator(file = "test", batch_size = 16)
pretrained.fit(train_gen, steps_per_epoch=len(tr)//32, epochs=1, validation_data=test_gen,validation_steps=len(ts)//16,callbacks=[checkpoint])
for i in range(3):
train_gen = data_generator(file = "train", batch_size = 32)
test_gen = data_generator(file = "test", batch_size = 16)
pretrained.fit(train_gen, steps_per_epoch=len(tr)//32, epochs=1, validation_data=test_gen,validation_steps=len(ts)//16,callbacks=[checkpoint])
pretrained.load_weights(ms_dir + '/all-bands.hdf5')
pretrained.optimizer.lr = 0.0001
for i in range(10):
train_gen = data_generator(file = "train", batch_size = 32)
test_gen = data_generator(file = "test", batch_size = 16)
pretrained.fit(train_gen, steps_per_epoch=len(tr)//32, epochs=1, validation_data=test_gen,validation_steps=len(ts)//16,callbacks=[checkpoint])
```
| github_jupyter |
# Think Bayes
Second Edition
Copyright 2020 Allen B. Downey
License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/)
```
# If we're running on Colab, install empiricaldist
# https://pypi.org/project/empiricaldist/
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
!pip install empiricaldist
# Get utils.py and create directories
import os
if not os.path.exists('utils.py'):
!wget https://github.com/AllenDowney/ThinkBayes2/raw/master/code/soln/utils.py
if not os.path.exists('figs'):
!mkdir figs
if not os.path.exists('tables'):
!mkdir tables
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from empiricaldist import Pmf
from utils import decorate, savefig
```
## The Euro Problem
In *Information Theory, Inference, and Learning Algorithms*, David MacKay poses this problem:
"A statistical statement appeared in *The Guardian* on Friday January 4, 2002:
>When spun on edge 250 times, a Belgian one-euro coin came
up heads 140 times and tails 110. 'It looks very suspicious
to me,' said Barry Blight, a statistics lecturer at the London
School of Economics. 'If the coin were unbiased, the chance of
getting a result as extreme as that would be less than 7\%.'
"But [MacKay asks] do these data give evidence that the coin is biased rather than fair?"
To answer that question, we'll proceed in two steps.
First we'll use the binomial distribution to see where that 7% came from; then we'll use Bayes's Theorem to estimate the probability that this coin comes up heads.
## The binomial distribution
Suppose I tell you that a coin is "fair", that is, the probability of heads is 50%. If you spin it twice, there are four outcomes: `HH`, `HT`, `TH`, and `TT`. All four outcomes have the same probability, 25%.
If we add up the total number of heads, there are three possible outcomes: 0, 1, or 2. The probability of 0 and 2 is 25%, and the probability of 1 is 50%.
More generally, suppose the probability of heads is `p` and we spin the coin `n` times. What is the probability that we get a total of `k` heads?
The answer is given by the binomial distribution:
$P(k; n, p) = \binom{n}{k} p^k (1-p)^{n-k}$
where $\binom{n}{k}$ is the [binomial coefficient](https://en.wikipedia.org/wiki/Binomial_coefficient), usually pronounced "n choose k".
We can compute the binomial distribution ourselves, but we can also use the SciPy function `binom.pmf`:
```
from scipy.stats import binom
n = 2
p = 0.5
ks = np.arange(n+1)
a = binom.pmf(ks, n, p)
a
```
If we put this array in a `Pmf`, the result is the distribution of `k` for the given values of `n` and `p`.
```
pmf_k = Pmf(a, ks)
pmf_k
from utils import write_pmf
write_pmf(pmf_k, 'table03-01')
```
The following function computes the binomial distribution for given values of `n` and `p`:
```
def make_binomial(n, p):
"""Make a binomial PMF.
n: number of spins
p: probability of heads
returns: Pmf representing the distribution
"""
ks = np.arange(n+1)
a = binom.pmf(ks, n, p)
return Pmf(a, ks)
```
And here's what it looks like with `n=250` and `p=0.5`:
```
pmf_k = make_binomial(n=250, p=0.5)
pmf_k.plot(label='n=250, p=0.5')
decorate(xlabel='Number of heads (k)',
ylabel='PMF',
title='Binomial distribution')
savefig('fig03-01')
```
The most likely value in this distribution is 125:
```
pmf_k.max_prob()
```
But even though it is the most likely value, the probability that we get exactly 125 heads is only about 5%.
```
pmf_k[125]
```
In MacKay's example, we got 140 heads, which is less likely than 125:
```
pmf_k[140]
```
In the article MacKay quotes, the statistician says, ‘If the coin were unbiased the chance of getting a result as extreme as that would be less than 7%’.
We can use the binomial distribution to check his math. The following function takes a PMF and computes the total probability of values greater than or equal to `threshold`.
```
def ge_dist(pmf, threshold):
"""Probability of values greater than a threshold.
pmf: Series representing a PMF
threshold: value to compare to
returns: probability
"""
ge = (pmf.index >= threshold)
total = pmf[ge].sum()
return total
```
Here's the probability of getting 140 heads or more:
```
ge_dist(pmf_k, 140)
```
`Pmf` provides a method that does the same computation.
```
pmf_k.ge_dist(140)
```
The result is about 3.3%, which is less than 7%. The reason is that the statistician includes all values "as extreme as" 140, which includes values less than or equal to 110, because 140 exceeds the expected value by 15 and 110 falls short by 15.
```
pmf_k.le_dist(110)
```
The probability of values less than or equal to 110 is also 3.3%,
so the total probability of values "as extreme" as 140 is 6.6%.
The point of this calculation is that these extreme values are unlikely if the coin is fair.
That's interesting, but it doesn't answer MacKay's question. Let's see if we can.
## The Euro problem
Any given coin has some probability of landing heads up when spun
on edge; I'll call this probability `x`.
It seems reasonable to believe that `x` depends
on physical characteristics of the coin, like the distribution
of weight.
If a coin is perfectly balanced, we expect `x` to be close to 50%, but
for a lopsided coin, `x` might be substantially different. We can use
Bayes's theorem and the observed data to estimate `x`.
For simplicity, I'll start with a uniform prior, which assume that all values of `x` are equally likely.
That might not be a reasonable assumption, so we'll come back and consider other priors later.
We can make a uniform prior like this:
```
hypos = np.linspace(0, 1, 101)
prior = Pmf(1, hypos)
```
I'll use a dictionary to store the likelihoods for `H` and `T`:
```
likelihood = {
'H': hypos,
'T': 1 - hypos
}
```
I'll use a string to represent the dataset:
```
dataset = 'H' * 140 + 'T' * 110
```
The following function does the update.
```
def update_euro(pmf, dataset):
"""Updates the Suite with the given number of heads and tails.
pmf: Pmf representing the prior
data: tuple of heads and tails
"""
for data in dataset:
pmf *= likelihood[data]
pmf.normalize()
```
And here's how we use it.
```
posterior = prior.copy()
update_euro(posterior, dataset)
```
Here's what the posterior looks like.
```
def decorate_euro(title):
decorate(xlabel='Proportion of heads (x)',
ylabel='Probability',
title=title)
posterior.plot(label='140 heads out of 250')
decorate_euro(title='Posterior distribution of x')
savefig('fig03-02')
```
The peak of the posterior is at 56%, which is the proportion of heads in the dataset.
```
posterior.max_prob()
```
## Different priors
Let's see how that looks with different priors. Here's the uniform prior again.
```
uniform = Pmf(1, hypos, name='uniform')
uniform.normalize()
```
And here's a triangle-shaped prior.
```
ramp_up = np.arange(50)
ramp_down = np.arange(50, -1, -1)
a = np.append(ramp_up, ramp_down)
triangle = Pmf(a, hypos, name='triangle')
triangle.normalize()
```
Here's what they look like:
```
uniform.plot()
triangle.plot()
decorate_euro(title='Uniform and triangle prior distributions')
savefig('fig03-03')
```
If we update them both with the same data:
```
update_euro(uniform, dataset)
update_euro(triangle, dataset)
```
Here are the posteriors.
```
uniform.plot()
triangle.plot()
decorate_euro(title='Posterior distributions')
savefig('fig03-04')
```
The results are almost identical; the remaining difference is unlikely to matter in practice.
## The binomial likelihood function
We can make the Euro class more efficient by computing the likelihood of the entire dataset at once, rather than one coin toss at a time.
If the probability of heads is `p`, we can compute the probability of `k=140` heads in `n=250` tosses using the binomial PMF.
```
from scipy.stats import binom
def update_binomial(pmf, data):
"""Update the PMF using the binomial distribution.
pmf: Pmf representing the prior
data: tuple of integers k and n
"""
k, n = data
xs = pmf.qs
likelihood = binom.pmf(k, n, xs)
pmf *= likelihood
pmf.normalize()
```
The data are represented with a tuple of values for `k` and `n`, rather than a long string of outcomes.
Here's the update.
```
uniform2 = Pmf(1, hypos, name='uniform2')
data = 140, 250
update_binomial(uniform2, data)
```
Here's what the posterior looks like.
```
uniform.plot()
uniform2.plot()
decorate_euro(title='Posterior distributions computed two ways')
```
The results are the same, within floating-point error.
```
np.max(np.abs(uniform-uniform2))
```
## Exercises
**Exercise:** In Major League Baseball, most players have a batting average between 200 and 330, which means that the probability of getting a hit is between 0.2 and 0.33.
Suppose a new player appearing in his first game gets 3 hits out of 3 attempts. What is the posterior distribution for his probability of getting a hit?
For this exercise, I will construct the prior distribution by starting with a uniform distribution and updating it with imaginary data until it has a shape that reflects my background knowledge of batting averages.
```
hypos = np.linspace(0.1, 0.4, 101)
prior = Pmf(1, hypos)
likelihood = {
'Y': hypos,
'N': 1-hypos
}
dataset = 'Y' * 25 + 'N' * 75
for data in dataset:
prior *= likelihood[data]
prior.normalize()
prior.plot(label='prior')
decorate(xlabel='Probability of getting a hit',
ylabel='PMF')
```
This distribution indicates that most players have a batting average near 250, with only a few players below 175 or above 350. I'm not sure how accurately this prior reflects the distribution of batting averages in Major League Baseball, but it is good enough for this exercise.
Now update this distribution with the data and plot the posterior. What is the most likely value in the posterior distribution?
```
# Solution
posterior = prior.copy()
for data in 'YYY':
posterior *= likelihood[data]
posterior.normalize()
# Solution
prior.plot(label='prior')
posterior.plot(label='posterior ')
decorate(xlabel='Probability of getting a hit',
ylabel='PMF')
# Solution
prior.max_prob()
# Solution
posterior.max_prob()
```
**Exercise:** Whenever you survey people about sensitive issues, you have to deal with [social desirability bias](https://en.wikipedia.org/wiki/Social_desirability_bias), which is the tendency of people to shade their answers to show themselves in the most positive light.
One of the ways to improve the accuracy of the results is [randomized response](https://en.wikipedia.org/wiki/Randomized_response).
As an example, suppose you ask 100 people to flip a coin and:
* If they get heads, they report YES.
* If they get tails, they honestly answer the question "Do you cheat on your taxes?"
And suppose you get 80 YESes and 20 NOs. Based on this data, what is the posterior distribution for the fraction of people who cheat on their taxes? What is the most likely value in the posterior distribution?
```
# Solution
hypos = np.linspace(0, 1, 101)
prior = Pmf(1, hypos)
# Solution
likelihood = {
'Y': 0.5 + hypos/2,
'N': (1-hypos)/2
}
# Solution
dataset = 'Y' * 80 + 'N' * 20
posterior = prior.copy()
for data in dataset:
posterior *= likelihood[data]
posterior.normalize()
# Solution
posterior.plot(label='80 YES, 20 NO')
decorate(xlabel='Proportion of cheaters',
ylabel='PMF')
# Solution
posterior.idxmax()
```
**Exercise:** Suppose that instead of observing coin spins directly, you measure the outcome using an instrument that is not always correct. Specifically, suppose the probability is `y=0.2` that an actual heads is reported
as tails, or actual tails reported as heads.
If we spin a coin 250 times and the instrument reports 140 heads, what is the posterior distribution of `x`?
What happens as you vary the value of `y`?
```
# Solution
def update_unreliable(pmf, dataset, y):
likelihood = {
'H': (1-y) * hypos + y * (1-hypos),
'T': y * hypos + (1-y) * (1-hypos)
}
for data in dataset:
pmf *= likelihood[data]
pmf.normalize()
# Solution
hypos = np.linspace(0, 1, 101)
prior = Pmf(1, hypos)
dataset = 'H' * 140 + 'T' * 110
posterior00 = prior.copy()
update_unreliable(posterior00, dataset, 0.0)
posterior02 = prior.copy()
update_unreliable(posterior02, dataset, 0.2)
posterior04 = prior.copy()
update_unreliable(posterior04, dataset, 0.4)
# Solution
posterior00.plot(label='y = 0.0')
posterior02.plot(label='y = 0.2')
posterior04.plot(label='y = 0.4')
decorate(xlabel='Proportion of heads',
ylabel='PMF')
# Solution
posterior00.idxmax(), posterior02.idxmax(), posterior04.idxmax()
```
**Exercise:** In preparation for an alien invasion, the Earth Defense League (EDL) has been working on new missiles to shoot down space invaders. Of course, some missile designs are better than others; let's assume that each design has some probability of hitting an alien ship, `x`.
Based on previous tests, the distribution of `x` in the population of designs is approximately uniform between 0.1 and 0.4.
Now suppose the new ultra-secret Alien Blaster 9000 is being tested. In a press conference, an EDL general reports that the new design has been tested twice, taking two shots during each test. The results of the test are confidential, so the general won't say how many targets were hit, but they report: "The same number of targets were hit in the two tests, so we have reason to think this new design is consistent."
Is this data good or bad; that is, does it increase or decrease your estimate of `x` for the Alien Blaster 9000?
Hint: If the probability of hitting each target is $x$, the probability of hitting one target in both tests is $[2x(1-x)]^2$.
```
# Solution
hypos = np.linspace(0.1, 0.4, 101)
prior = Pmf(1, hypos)
# Solution
# specific version for n=2 shots
x = hypos
likes = [(1-x)**4, (2*x*(1-x))**2, x**4]
likelihood = np.sum(likes, axis=0)
# Solution
# general version for any n shots per test
from scipy.stats import binom
n = 2
likes2 = [binom.pmf(k, n, x)**2 for k in range(n+1)]
likelihood2 = np.sum(likes2, axis=0)
# Solution
plt.plot(x, likelihood, label='special case')
plt.plot(x, likelihood2, label='general formula')
decorate(xlabel='Probability of hitting the target',
ylabel='Likelihood',
title='Likelihood of getting the same result')
# Solution
posterior = prior * likelihood
posterior.normalize()
# Solution
posterior.plot(label='Two tests, two shots, same outcome')
decorate(xlabel='Probability of hitting the target',
ylabel='PMF',
title='Posterior distribution',
ylim=[0, 0.015])
# Solution
# Getting the same result in both tests is more likely for
# extreme values of `x` and least likely when `x=0.5`.
# In this example, the prior suggests that `x` is less than 0.5,
# and the update gives more weight to extreme values.
# So the data makes lower values of `x` more likely.
```
| github_jupyter |
```
!pip install --upgrade tables
!pip install eli5
!pip install xgboost
import pandas as pd
import numpy as np
from sklearn.dummy import DummyRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_val_score, KFold
import eli5
from eli5.sklearn import PermutationImportance
cd "/content/drive/My Drive/Colab Notebooks/dw_matrix/dw_matrix_car"
df = pd.read_hdf('data/car.h5')
df.shape
```
## Feature Enginnering
```
SUFFIX_CAT = '_cat'
for feat in df.columns:
if isinstance (df[feat][0], list):continue
factorized_values = df[feat].factorize()[0]
if SUFFIX_CAT in feat:
df[feat] = factorized_values
else:
df[feat + SUFFIX_CAT] = factorized_values
cat_feats = [x for x in df.columns if SUFFIX_CAT in x ]
cat_feats = [x for x in cat_feats if 'price' not in x ]
len(cat_feats)
x = df[cat_feats].values
y = df['price_value'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model, x, y, cv=3, scoring='neg_mean_absolute_error')
np.mean(scores), np.std(scores)
def run_model(model, feats):
x = df[feats].values
y = df['price_value'].values
scores = cross_val_score(model, x, y, cv=3, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
```
## DecisionTree
```
run_model( DecisionTreeRegressor(max_depth=5), cat_feats )
```
## RandomForest
```
model = RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0)
run_model( model, cat_feats )
```
## XGBoost
```
xgb_params = {
'max_depth':5,
'n_estimators':50,
'learning_rate':0.1,
'seed':0
}
run_model(xgb.XGBRegressor(**xgb_params), cat_feats )
m = xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0)
m.fit(x,y)
imp = PermutationImportance(m, random_state=0).fit(x,y)
eli5.show_weights(imp, feature_names=cat_feats)
len(cat_feats)
feats = ['param_napęd_cat','param_rok-produkcji_cat','param_stan_cat','param_skrzynia-biegów_cat','param_faktura-vat_cat','param_moc_cat','param_marka-pojazdu_cat','feature_kamera-cofania_cat','param_typ_cat','param_pojemność-skokowa_cat','seller_name_cat','feature_wspomaganie-kierownicy_cat','param_model-pojazdu_cat','param_wersja_cat','param_kod-silnika_cat','feature_system-start-stop_cat','feature_asystent-pasa-ruchu_cat','feature_czujniki-parkowania-przednie_cat','feature_łopatki-zmiany-biegów_cat','feature_regulowane-zawieszenie_cat']
len(feats)
feats = ['param_napęd_cat','param_rok-produkcji_cat','param_stan_cat','param_skrzynia-biegów_cat','param_faktura-vat_cat','param_moc_cat','param_marka-pojazdu_cat','feature_kamera-cofania_cat','param_typ_cat','param_pojemność-skokowa_cat','seller_name_cat','feature_wspomaganie-kierownicy_cat','param_model-pojazdu_cat','param_wersja_cat','param_kod-silnika_cat','feature_system-start-stop_cat','feature_asystent-pasa-ruchu_cat','feature_czujniki-parkowania-przednie_cat','feature_łopatki-zmiany-biegów_cat','feature_regulowane-zawieszenie_cat']
run_model(xgb.XGBRegressor(**xgb_params), feats )
df['param_napęd'].unique()
df['param_rok-produkcji'].unique()
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x:-1 if str(x)=='None' else int(x))
feats = ['param_napęd_cat','param_rok-produkcji','param_stan_cat','param_skrzynia-biegów_cat','param_faktura-vat_cat','param_moc_cat','param_marka-pojazdu_cat','feature_kamera-cofania_cat','param_typ_cat','param_pojemność-skokowa_cat','seller_name_cat','feature_wspomaganie-kierownicy_cat','param_model-pojazdu_cat','param_wersja_cat','param_kod-silnika_cat','feature_system-start-stop_cat','feature_asystent-pasa-ruchu_cat','feature_czujniki-parkowania-przednie_cat','feature_łopatki-zmiany-biegów_cat','feature_regulowane-zawieszenie_cat']
run_model(xgb.XGBRegressor(**xgb_params), feats )
df['param_moc'].unique()
df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0]))
df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0]))
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x:-1 if str(x)=='None' else int(x))
feats = ['param_napęd_cat','param_rok-produkcji','param_stan_cat','param_skrzynia-biegów_cat','param_faktura-vat_cat','param_moc','param_marka-pojazdu_cat','feature_kamera-cofania_cat','param_typ_cat','param_pojemność-skokowa_cat','seller_name_cat','feature_wspomaganie-kierownicy_cat','param_model-pojazdu_cat','param_wersja_cat','param_kod-silnika_cat','feature_system-start-stop_cat','feature_asystent-pasa-ruchu_cat','feature_czujniki-parkowania-przednie_cat','feature_łopatki-zmiany-biegów_cat','feature_regulowane-zawieszenie_cat']
run_model(xgb.XGBRegressor(**xgb_params), feats )
df['param_pojemność-skokowa'].unique()
df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int( str(x).split('cm')[0].replace(' ', '')))
feats = ['param_napęd_cat','param_rok-produkcji','param_stan_cat','param_skrzynia-biegów_cat','param_faktura-vat_cat','param_moc','param_marka-pojazdu_cat','feature_kamera-cofania_cat','param_typ_cat','param_pojemność-skokowa','seller_name_cat','feature_wspomaganie-kierownicy_cat','param_model-pojazdu_cat','param_wersja_cat','param_kod-silnika_cat','feature_system-start-stop_cat','feature_asystent-pasa-ruchu_cat','feature_czujniki-parkowania-przednie_cat','feature_łopatki-zmiany-biegów_cat','feature_regulowane-zawieszenie_cat']
run_model(xgb.XGBRegressor(**xgb_params), feats )
```
| github_jupyter |
# Run a SageMaker Experiment with MNIST Handwritten Digits Classification
This demo shows how you can use the [SageMaker Experiments Python SDK](https://sagemaker-experiments.readthedocs.io/en/latest/) to organize, track, compare, and evaluate your machine learning (ML) model training experiments.
You can track artifacts for experiments, including data sets, algorithms, hyperparameters, and metrics. Experiments executed on SageMaker such as SageMaker Autopilot jobs and training jobs are automatically tracked. You can also track artifacts for additional steps within an ML workflow that come before or after model training, such as data pre-processing or post-training model evaluation.
The APIs also let you search and browse your current and past experiments, compare experiments, and identify best-performing models.
We demonstrate these capabilities through an MNIST handwritten digits classification example. The experiment is organized as follows:
1. Download and prepare the MNIST dataset.
2. Train a Convolutional Neural Network (CNN) Model. Tune the hyperparameter that configures the number of hidden channels in the model. Track the parameter configurations and resulting model accuracy using the SageMaker Experiments Python SDK.
3. Finally use the search and analytics capabilities of the SDK to search, compare and evaluate the performance of all model versions generated from model tuning in Step 2.
4. We also show an example of tracing the complete lineage of a model version: the collection of all the data pre-processing and training configurations and inputs that went into creating that model version.
Make sure you select the `Python 3 (Data Science)` kernel in Studio, or `conda_pytorch_p36` in a notebook instance.
## Runtime
This notebook takes approximately 25 minutes to run.
## Contents
1. [Install modules](#Install-modules)
1. [Setup](#Setup)
1. [Download the dataset](#Download-the-dataset)
1. [Step 1: Set up the Experiment](#Step-1:-Set-up-the-Experiment)
1. [Step 2: Track Experiment](#Step-2:-Track-Experiment)
1. [Deploy an endpoint for the best training job / trial component](#Deploy-an-endpoint-for-the-best-training-job-/-trial-component)
1. [Cleanup](#Cleanup)
1. [Contact](#Contact)
## Install modules
```
import sys
```
### Install the SageMaker Experiments Python SDK
```
!{sys.executable} -m pip install sagemaker-experiments==0.1.35
```
### Install PyTorch
```
# PyTorch version needs to be the same in both the notebook instance and the training job container
# https://github.com/pytorch/pytorch/issues/25214
!{sys.executable} -m pip install torch==1.1.0
!{sys.executable} -m pip install torchvision==0.2.2
!{sys.executable} -m pip install pillow==6.2.2
!{sys.executable} -m pip install --upgrade sagemaker
```
## Setup
```
import time
import boto3
import numpy as np
import pandas as pd
from IPython.display import set_matplotlib_formats
from matplotlib import pyplot as plt
from torchvision import datasets, transforms
import sagemaker
from sagemaker import get_execution_role
from sagemaker.session import Session
from sagemaker.analytics import ExperimentAnalytics
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
from smexperiments.tracker import Tracker
set_matplotlib_formats("retina")
sm_sess = sagemaker.Session()
sess = sm_sess.boto_session
sm = sm_sess.sagemaker_client
role = get_execution_role()
```
## Download the dataset
We download the MNIST handwritten digits dataset, and then apply a transformation on each image.
```
bucket = sm_sess.default_bucket()
prefix = "DEMO-mnist"
print("Using S3 location: s3://" + bucket + "/" + prefix + "/")
datasets.MNIST.urls = [
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/train-images-idx3-ubyte.gz",
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/train-labels-idx1-ubyte.gz",
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/t10k-images-idx3-ubyte.gz",
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/t10k-labels-idx1-ubyte.gz",
]
# Download the dataset to the ./mnist folder, and load and transform (normalize) them
train_set = datasets.MNIST(
"mnist",
train=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
download=True,
)
test_set = datasets.MNIST(
"mnist",
train=False,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
download=False,
)
```
View an example image from the dataset.
```
plt.imshow(train_set.data[2].numpy())
```
After transforming the images in the dataset, we upload it to S3.
```
inputs = sagemaker.Session().upload_data(path="mnist", bucket=bucket, key_prefix=prefix)
```
Now let's track the parameters from the data pre-processing step.
```
with Tracker.create(display_name="Preprocessing", sagemaker_boto_client=sm) as tracker:
tracker.log_parameters(
{
"normalization_mean": 0.1307,
"normalization_std": 0.3081,
}
)
# We can log the S3 uri to the dataset we just uploaded
tracker.log_input(name="mnist-dataset", media_type="s3/uri", value=inputs)
```
## Step 1: Set up the Experiment
Create an experiment to track all the model training iterations. Experiments are a great way to organize your data science work. You can create experiments to organize all your model development work for: [1] a business use case you are addressing (e.g. create experiment named “customer churn prediction”), or [2] a data science team that owns the experiment (e.g. create experiment named “marketing analytics experiment”), or [3] a specific data science and ML project. Think of it as a “folder” for organizing your “files”.
### Create an Experiment
```
mnist_experiment = Experiment.create(
experiment_name=f"mnist-hand-written-digits-classification-{int(time.time())}",
description="Classification of mnist hand-written digits",
sagemaker_boto_client=sm,
)
print(mnist_experiment)
```
## Step 2: Track Experiment
### Now create a Trial for each training run to track its inputs, parameters, and metrics.
While training the CNN model on SageMaker, we experiment with several values for the number of hidden channel in the model. We create a Trial to track each training job run. We also create a TrialComponent from the tracker we created before, and add to the Trial. This enriches the Trial with the parameters we captured from the data pre-processing stage.
```
from sagemaker.pytorch import PyTorch, PyTorchModel
hidden_channel_trial_name_map = {}
```
If you want to run the following five training jobs in parallel, you may need to increase your resource limit. Here we run them sequentially.
```
preprocessing_trial_component = tracker.trial_component
for i, num_hidden_channel in enumerate([2, 5, 10, 20, 32]):
# Create trial
trial_name = f"cnn-training-job-{num_hidden_channel}-hidden-channels-{int(time.time())}"
cnn_trial = Trial.create(
trial_name=trial_name,
experiment_name=mnist_experiment.experiment_name,
sagemaker_boto_client=sm,
)
hidden_channel_trial_name_map[num_hidden_channel] = trial_name
# Associate the proprocessing trial component with the current trial
cnn_trial.add_trial_component(preprocessing_trial_component)
# All input configurations, parameters, and metrics specified in
# the estimator definition are automatically tracked
estimator = PyTorch(
py_version="py3",
entry_point="./mnist.py",
role=role,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version="1.1.0",
instance_count=1,
instance_type="ml.c4.xlarge",
hyperparameters={
"epochs": 2,
"backend": "gloo",
"hidden_channels": num_hidden_channel,
"dropout": 0.2,
"kernel_size": 5,
"optimizer": "sgd",
},
metric_definitions=[
{"Name": "train:loss", "Regex": "Train Loss: (.*?);"},
{"Name": "test:loss", "Regex": "Test Average loss: (.*?),"},
{"Name": "test:accuracy", "Regex": "Test Accuracy: (.*?)%;"},
],
enable_sagemaker_metrics=True,
)
cnn_training_job_name = "cnn-training-job-{}".format(int(time.time()))
# Associate the estimator with the Experiment and Trial
estimator.fit(
inputs={"training": inputs},
job_name=cnn_training_job_name,
experiment_config={
"TrialName": cnn_trial.trial_name,
"TrialComponentDisplayName": "Training",
},
wait=True,
)
# Wait two seconds before dispatching the next training job
time.sleep(2)
```
### Compare the model training runs for an experiment
Now we use the analytics capabilities of the Experiments SDK to query and compare the training runs for identifying the best model produced by our experiment. You can retrieve trial components by using a search expression.
### Some Simple Analyses
```
search_expression = {
"Filters": [
{
"Name": "DisplayName",
"Operator": "Equals",
"Value": "Training",
}
],
}
trial_component_analytics = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
experiment_name=mnist_experiment.experiment_name,
search_expression=search_expression,
sort_by="metrics.test:accuracy.max",
sort_order="Descending",
metric_names=["test:accuracy"],
parameter_names=["hidden_channels", "epochs", "dropout", "optimizer"],
)
trial_component_analytics.dataframe()
```
To isolate and measure the impact of change in hidden channels on model accuracy, we vary the number of hidden channel and fix the value for other hyperparameters.
Next let's look at an example of tracing the lineage of a model by accessing the data tracked by SageMaker Experiments for the `cnn-training-job-2-hidden-channels` trial.
```
lineage_table = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
search_expression={
"Filters": [
{
"Name": "Parents.TrialName",
"Operator": "Equals",
"Value": hidden_channel_trial_name_map[2],
}
]
},
sort_by="CreationTime",
sort_order="Ascending",
)
lineage_table.dataframe()
```
## Deploy an endpoint for the best training job / trial component
Now we take the best model and deploy it to an endpoint so it is available to perform inference.
```
# Pulling best based on sort in the analytics/dataframe, so first is best....
best_trial_component_name = trial_component_analytics.dataframe().iloc[0]["TrialComponentName"]
best_trial_component = TrialComponent.load(best_trial_component_name)
model_data = best_trial_component.output_artifacts["SageMaker.ModelArtifact"].value
env = {
"hidden_channels": str(int(best_trial_component.parameters["hidden_channels"])),
"dropout": str(best_trial_component.parameters["dropout"]),
"kernel_size": str(int(best_trial_component.parameters["kernel_size"])),
}
model = PyTorchModel(
model_data,
role,
"./mnist.py",
py_version="py3",
env=env,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version="1.1.0",
name=best_trial_component.trial_component_name,
)
predictor = model.deploy(instance_type="ml.m5.xlarge", initial_instance_count=1)
```
## Cleanup
Once we're done, clean up the endpoint to prevent unnecessary billing.
```
predictor.delete_endpoint()
```
Trial components can exist independently of trials and experiments. You might want keep them if you plan on further exploration. If not, delete all experiment artifacts.
```
mnist_experiment.delete_all(action="--force")
```
## Contact
Submit any questions or issues to https://github.com/aws/sagemaker-experiments/issues or mention @aws/sagemakerexperimentsadmin
| github_jupyter |
```
import numpy as np
import pandas as pd
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from matplotlib import pyplot as plt
%matplotlib inline
from scipy.stats import entropy
from google.colab import drive
drive.mount('/content/drive')
path="/content/drive/MyDrive/Research/alternate_minimisation/"
name="_50_50_10runs_entropy"
# mu1 = np.array([3,3,3,3,0])
# sigma1 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu2 = np.array([4,4,4,4,0])
# sigma2 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu3 = np.array([10,5,5,10,0])
# sigma3 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu4 = np.array([-10,-10,-10,-10,0])
# sigma4 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu5 = np.array([-21,4,4,-21,0])
# sigma5 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu6 = np.array([-10,18,18,-10,0])
# sigma6 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu7 = np.array([4,20,4,20,0])
# sigma7 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu8 = np.array([4,-20,-20,4,0])
# sigma8 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu9 = np.array([20,20,20,20,0])
# sigma9 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu10 = np.array([20,-10,-10,20,0])
# sigma10 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# sample1 = np.random.multivariate_normal(mean=mu1,cov= sigma1,size=500)
# sample2 = np.random.multivariate_normal(mean=mu2,cov= sigma2,size=500)
# sample3 = np.random.multivariate_normal(mean=mu3,cov= sigma3,size=500)
# sample4 = np.random.multivariate_normal(mean=mu4,cov= sigma4,size=500)
# sample5 = np.random.multivariate_normal(mean=mu5,cov= sigma5,size=500)
# sample6 = np.random.multivariate_normal(mean=mu6,cov= sigma6,size=500)
# sample7 = np.random.multivariate_normal(mean=mu7,cov= sigma7,size=500)
# sample8 = np.random.multivariate_normal(mean=mu8,cov= sigma8,size=500)
# sample9 = np.random.multivariate_normal(mean=mu9,cov= sigma9,size=500)
# sample10 = np.random.multivariate_normal(mean=mu10,cov= sigma10,size=500)
# X = np.concatenate((sample1,sample2,sample3,sample4,sample5,sample6,sample7,sample8,sample9,sample10),axis=0)
# Y = np.concatenate((np.zeros((500,1)),np.ones((500,1)),2*np.ones((500,1)),3*np.ones((500,1)),4*np.ones((500,1)),
# 5*np.ones((500,1)),6*np.ones((500,1)),7*np.ones((500,1)),8*np.ones((500,1)),9*np.ones((500,1))),axis=0).astype(int)
# print(X.shape,Y.shape)
# # plt.scatter(sample1[:,0],sample1[:,1],label="class_0")
# # plt.scatter(sample2[:,0],sample2[:,1],label="class_1")
# # plt.scatter(sample3[:,0],sample3[:,1],label="class_2")
# # plt.scatter(sample4[:,0],sample4[:,1],label="class_3")
# # plt.scatter(sample5[:,0],sample5[:,1],label="class_4")
# # plt.scatter(sample6[:,0],sample6[:,1],label="class_5")
# # plt.scatter(sample7[:,0],sample7[:,1],label="class_6")
# # plt.scatter(sample8[:,0],sample8[:,1],label="class_7")
# # plt.scatter(sample9[:,0],sample9[:,1],label="class_8")
# # plt.scatter(sample10[:,0],sample10[:,1],label="class_9")
# # plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
# class SyntheticDataset(Dataset):
# """MosaicDataset dataset."""
# def __init__(self, x, y):
# """
# Args:
# csv_file (string): Path to the csv file with annotations.
# root_dir (string): Directory with all the images.
# transform (callable, optional): Optional transform to be applied
# on a sample.
# """
# self.x = x
# self.y = y
# #self.fore_idx = fore_idx
# def __len__(self):
# return len(self.y)
# def __getitem__(self, idx):
# return self.x[idx] , self.y[idx] #, self.fore_idx[idx]
# trainset = SyntheticDataset(X,Y)
# # testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
# classes = ('zero','one','two','three','four','five','six','seven','eight','nine')
# foreground_classes = {'zero','one','two'}
# fg_used = '012'
# fg1, fg2, fg3 = 0,1,2
# all_classes = {'zero','one','two','three','four','five','six','seven','eight','nine'}
# background_classes = all_classes - foreground_classes
# background_classes
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=100, shuffle=True)
# dataiter = iter(trainloader)
# background_data=[]
# background_label=[]
# foreground_data=[]
# foreground_label=[]
# batch_size=100
# for i in range(50):
# images, labels = dataiter.next()
# for j in range(batch_size):
# if(classes[labels[j]] in background_classes):
# img = images[j].tolist()
# background_data.append(img)
# background_label.append(labels[j])
# else:
# img = images[j].tolist()
# foreground_data.append(img)
# foreground_label.append(labels[j])
# foreground_data = torch.tensor(foreground_data)
# foreground_label = torch.tensor(foreground_label)
# background_data = torch.tensor(background_data)
# background_label = torch.tensor(background_label)
# def create_mosaic_img(bg_idx,fg_idx,fg):
# """
# bg_idx : list of indexes of background_data[] to be used as background images in mosaic
# fg_idx : index of image to be used as foreground image from foreground data
# fg : at what position/index foreground image has to be stored out of 0-8
# """
# image_list=[]
# j=0
# for i in range(9):
# if i != fg:
# image_list.append(background_data[bg_idx[j]])
# j+=1
# else:
# image_list.append(foreground_data[fg_idx])
# label = foreground_label[fg_idx] - fg1 # minus fg1 because our fore ground classes are fg1,fg2,fg3 but we have to store it as 0,1,2
# #image_list = np.concatenate(image_list ,axis=0)
# image_list = torch.stack(image_list)
# return image_list,label
# desired_num = 3000
# mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
# fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9
# mosaic_label=[] # label of mosaic image = foreground class present in that mosaic
# list_set_labels = []
# for i in range(desired_num):
# set_idx = set()
# np.random.seed(i)
# bg_idx = np.random.randint(0,3500,8)
# set_idx = set(background_label[bg_idx].tolist())
# fg_idx = np.random.randint(0,1500)
# set_idx.add(foreground_label[fg_idx].item())
# fg = np.random.randint(0,9)
# fore_idx.append(fg)
# image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
# mosaic_list_of_images.append(image_list)
# mosaic_label.append(label)
# list_set_labels.append(set_idx)
# def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number):
# """
# mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point
# labels : mosaic_dataset labels
# foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average
# dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9
# """
# avg_image_dataset = []
# for i in range(len(mosaic_dataset)):
# img = torch.zeros([5], dtype=torch.float64)
# for j in range(9):
# if j == foreground_index[i]:
# img = img + mosaic_dataset[i][j]*dataset_number/9
# else :
# img = img + mosaic_dataset[i][j]*(9-dataset_number)/(8*9)
# avg_image_dataset.append(img)
# return torch.stack(avg_image_dataset) , torch.stack(labels) , foreground_index
class MosaicDataset1(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list, mosaic_label,fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] , self.fore_idx[idx]
# data = [{"mosaic_list":mosaic_list_of_images, "mosaic_label": mosaic_label, "fore_idx":fore_idx}]
# np.save("mosaic_data.npy",data)
data = np.load(path+"mosaic_data.npy",allow_pickle=True)
mosaic_list_of_images = data[0]["mosaic_list"]
mosaic_label = data[0]["mosaic_label"]
fore_idx = data[0]["fore_idx"]
batch = 250
msd = MosaicDataset1(mosaic_list_of_images, mosaic_label, fore_idx)
train_loader = DataLoader( msd,batch_size= batch ,shuffle=True)
```
**Focus Net**
```
class Focus_deep(nn.Module):
'''
deep focus network averaged at zeroth layer
input : elemental data
'''
def __init__(self,inputs,output,K,d):
super(Focus_deep,self).__init__()
self.inputs = inputs
self.output = output
self.K = K
self.d = d
self.linear1 = nn.Linear(self.inputs,50) #,self.output)
self.linear2 = nn.Linear(50,self.output)
def forward(self,z):
batch = z.shape[0]
x = torch.zeros([batch,self.K],dtype=torch.float64)
y = torch.zeros([batch,self.d], dtype=torch.float64)
x,y = x.to("cuda"),y.to("cuda")
for i in range(self.K):
x[:,i] = self.helper(z[:,i] )[:,0] # self.d*i:self.d*i+self.d
log_x = F.log_softmax(x,dim=1) # log alpha to calculate entropy
x = F.softmax(x,dim=1) # alphas
x1 = x[:,0]
for i in range(self.K):
x1 = x[:,i]
y = y+torch.mul(x1[:,None],z[:,i]) # self.d*i:self.d*i+self.d
return y , x,log_x
def helper(self,x):
x = F.relu(self.linear1(x))
x = self.linear2(x)
return x
```
**Classification Net**
```
class Classification_deep(nn.Module):
'''
input : elemental data
deep classification module data averaged at zeroth layer
'''
def __init__(self,inputs,output):
super(Classification_deep,self).__init__()
self.inputs = inputs
self.output = output
self.linear1 = nn.Linear(self.inputs,50)
self.linear2 = nn.Linear(50,self.output)
def forward(self,x):
x = F.relu(self.linear1(x))
x = self.linear2(x)
return x
criterion = nn.CrossEntropyLoss()
def my_cross_entropy(x, y,alpha,log_alpha,k):
# log_prob = -1.0 * F.log_softmax(x, 1)
# loss = log_prob.gather(1, y.unsqueeze(1))
# loss = loss.mean()
loss = criterion(x,y)
#alpha = torch.clamp(alpha,min=1e-10)
b = -1.0* alpha * log_alpha
b = torch.mean(torch.sum(b,dim=1))
closs = loss
entropy = b
loss = (1-k)*loss + ((k)*b)
return loss,closs,entropy
```
```
def calculate_attn_loss(dataloader,what,where,criter,k):
what.eval()
where.eval()
r_loss = 0
cc_loss = 0
cc_entropy = 0
alphas = []
lbls = []
pred = []
fidices = []
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels,fidx = data
lbls.append(labels)
fidices.append(fidx)
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
avg,alpha,log_alpha = where(inputs)
outputs = what(avg)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
alphas.append(alpha.cpu().numpy())
#ent = np.sum(entropy(alpha.cpu().detach().numpy(), base=2, axis=1))/batch
# mx,_ = torch.max(alpha,1)
# entropy = np.mean(-np.log2(mx.cpu().detach().numpy()))
# print("entropy of batch", entropy)
#loss = (1-k)*criter(outputs, labels) + k*ent
loss,closs,entropy = my_cross_entropy(outputs,labels,alpha,log_alpha,k)
r_loss += loss.item()
cc_loss += closs.item()
cc_entropy += entropy.item()
alphas = np.concatenate(alphas,axis=0)
pred = np.concatenate(pred,axis=0)
lbls = np.concatenate(lbls,axis=0)
fidices = np.concatenate(fidices,axis=0)
#print(alphas.shape,pred.shape,lbls.shape,fidices.shape)
analysis = analyse_data(alphas,lbls,pred,fidices)
return r_loss/i,cc_loss/i,cc_entropy/i,analysis
def analyse_data(alphas,lbls,predicted,f_idx):
'''
analysis data is created here
'''
batch = len(predicted)
amth,alth,ftpt,ffpt,ftpf,ffpf = 0,0,0,0,0,0
for j in range (batch):
focus = np.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
amth +=1
else:
alth +=1
if(focus == f_idx[j] and predicted[j] == lbls[j]):
ftpt += 1
elif(focus != f_idx[j] and predicted[j] == lbls[j]):
ffpt +=1
elif(focus == f_idx[j] and predicted[j] != lbls[j]):
ftpf +=1
elif(focus != f_idx[j] and predicted[j] != lbls[j]):
ffpf +=1
#print(sum(predicted==lbls),ftpt+ffpt)
return [ftpt,ffpt,ftpf,ffpf,amth,alth]
number_runs = 10
full_analysis =[]
FTPT_analysis = pd.DataFrame(columns = ["FTPT","FFPT", "FTPF","FFPF"])
k = 0.005
every_what_epoch = 1
for n in range(number_runs):
print("--"*40)
# instantiate focus and classification Model
torch.manual_seed(n)
where = Focus_deep(5,1,9,5).double()
torch.manual_seed(n)
what = Classification_deep(5,3).double()
where = where.to("cuda")
what = what.to("cuda")
# instantiate optimizer
optimizer_where = optim.Adam(where.parameters(),lr =0.01)
optimizer_what = optim.Adam(what.parameters(), lr=0.01)
#criterion = nn.CrossEntropyLoss()
acti = []
analysis_data = []
loss_curi = []
epochs = 2000
# calculate zeroth epoch loss and FTPT values
running_loss ,_,_,anlys_data= calculate_attn_loss(train_loader,what,where,criterion,k)
loss_curi.append(running_loss)
analysis_data.append(anlys_data)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
# training starts
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
what.train()
where.train()
if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 :
print(epoch+1,"updating what_net, where_net is freezed")
print("--"*40)
elif ((epoch) % (every_what_epoch*2)) > every_what_epoch-1 :
print(epoch+1,"updating where_net, what_net is freezed")
print("--"*40)
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels,_ = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_where.zero_grad()
optimizer_what.zero_grad()
# forward + backward + optimize
avg, alpha,log_alpha = where(inputs)
outputs = what(avg)
my_loss,_,_ = my_cross_entropy(outputs,labels,alpha,log_alpha,k)
# print statistics
running_loss += my_loss.item()
my_loss.backward()
if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 :
optimizer_what.step()
elif ( (epoch) % (every_what_epoch*2)) > every_what_epoch-1 :
optimizer_where.step()
# optimizer_where.step()
# optimizer_what.step()
#break
running_loss,ccloss,ccentropy,anls_data = calculate_attn_loss(train_loader,what,where,criterion,k)
analysis_data.append(anls_data)
print('epoch: [%d] loss: %.3f celoss: %.3f entropy: %.3f' %(epoch + 1,running_loss,ccloss,ccentropy))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.001:
break
print('Finished Training run ' +str(n))
#break
analysis_data = np.array(analysis_data)
FTPT_analysis.loc[n] = analysis_data[-1,:4]/30
full_analysis.append((epoch, analysis_data))
correct = 0
total = 0
with torch.no_grad():
for data in train_loader:
images, labels,_ = data
images = images.double()
images, labels = images.to("cuda"), labels.to("cuda")
avg, alpha,log_alpha = where(images)
outputs = what(avg)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 3000 train images: %d %%' % ( 100 * correct / total))
a,b= full_analysis[0]
print(a)
cnt=1
for epoch, analysis_data in full_analysis:
analysis_data = np.array(analysis_data)
# print("="*20+"run ",cnt,"="*20)
plt.figure(figsize=(6,6))
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,0],label="ftpt")
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,1],label="ffpt")
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,2],label="ftpf")
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,3],label="ffpf")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.title("Training trends for run "+str(cnt))
plt.savefig(path+"50_50_10runs_entropy/every1/run"+str(cnt)+".png",bbox_inches="tight")
plt.savefig(path+"50_50_10runs_entropy/every1/run"+str(cnt)+".pdf",bbox_inches="tight")
cnt+=1
np.mean(np.array(FTPT_analysis),axis=0) #array([87.85333333, 5.92 , 0. , 6.22666667])
FTPT_analysis.to_csv(path+"50_50_10runs_entropy/FTPT_analysis_every1"+name+".csv",index=False)
FTPT_analysis
```
| github_jupyter |
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torchvision import transforms, datasets
apply_transform = transforms.Compose([
transforms.Resize(32)
,transforms.ToTensor()
])
BatchSize = 256
trainset = datasets.MNIST(root = './MNIST', train = True, download = True, transform = apply_transform)
trainLoader = torch.utils.data.DataLoader(trainset, batch_size = BatchSize, shuffle = True, num_workers = 10)
testset = datasets.MNIST(root = './MNIST', train = False, download = True, transform = apply_transform)
testLoader = torch.utils.data.DataLoader(testset, batch_size = BatchSize, shuffle = True, num_workers = 10)
print(len(trainLoader.dataset))
print(len(testLoader.dataset))
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, kernel_size = 5, bias = True)
self.maxpool1 = nn.MaxPool2d(kernel_size = 2, stride = 2)
self.conv2 = nn.Conv2d(6, 16, kernel_size = 5, bias =True)
self.maxpool2 = nn.MaxPool2d(kernel_size = 2, stride = 2)
self.fc1 = nn.Linear(400, 120, bias = True)
self.fc2 = nn.Linear(120, 84, bias = True)
self.fc3 = nn.Linear(84, 10, bias = True)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.maxpool1(x)
x = F.relu(self.conv2(x))
x = self.maxpool2(x)
x = x.view(-1, 400)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return F.log_softmax(x, dim = 1)
net = LeNet()
print(net)
use_gpu = torch.cuda.is_available()
if use_gpu:
print("GPU is available")
net = net.cuda()
learning_rate = 0.01
criterion = nn.CrossEntropyLoss()
num_epochs = 50
train_loss = []
train_acc = []
for epoch in range(num_epochs):
running_loss = 0.0
running_corr = 0.0
for i, data in enumerate(trainLoader):
inputs, labels = data
if use_gpu:
inputs, labels = inputs.cuda(), labels.cuda()
net.zero_grad()
output = net(inputs)
loss = criterion(output, labels)
running_loss += loss
preds = torch.argmax(output, dim = 1)
running_corr = torch.sum(preds == labels)
totalLoss = running_loss / (i + 1)
totalLoss.backward()
for f in net.parameters():
f.data.sub_(f.grad.data * learning_rate)
epoch_loss = running_loss.item()/(i+1)
epoch_acc = running_corr.item()/60000
train_loss.append(epoch_loss)
train_acc.append(epoch_acc)
print('Epoch {:.0f}/{:.0f} : Training loss: {:.4f} | Training Accuracy: {:.4f}'.format(epoch+1,num_epochs,epoch_loss,epoch_acc*100))
```
| github_jupyter |
# Software Carpentry
### EPFL Library, November 2018
## Program
| | 4 afternoons | 4 workshops |
| :-- | :----------- | :---------- |
| > | `Today` | `Unix Shell` |
| | Thursday 22 | Version Control with Git |
| | Tuesday 27 | Python I |
| | Thursday 29 | More Python |
## Why did you decide to attend this workshop?
## Today's program
| | activity |
| :-- | :-------- |
| 13:00 | Introducing the Unix Shell |
| 13:15 | Navigating Files and Directories |
| 14:00 | Working with Files and Directories |
| 14:50 | **break** |
| 15:20 | Loops |
| 16:10 | Shell Scripts |
| 16:55 | Finding Things |
| 17:30 | Wrap-up / **END** |
## How we'll work
Live coding
Sticky notes : use a red sticky note to say you are stuck, put the green one when all is good
Instructors : Raphaël and Mathilde
Helpers : Antoine, Ludovic, Raphaël and Mathilde
Slides for exercices : Find the link to the slides on go.epfl.ch/swc-pad
## Introducing the Shell
### Key points about the Shell
- A shell is a program whose primary purpose is to read commands and run other programs.
- The shell’s main advantages are its high action-to-keystroke ratio, its support for automating repetitive tasks, and its capacity to access networked machines.
- The shell’s main disadvantages are its primarily textual nature and how cryptic its commands and operation can be.
## Navigating Files and Directories
### [Exercise] Exploring more `rm` flags
What does the command ls do when used with the `-l` and `-h` flags?
### [Exercise] Listing Recursively and By Time
The command `ls -R` lists the contents of directories recursively, i.e., lists their sub-directories, sub-sub-directories, and so on at each level. The command `ls -t` lists things by time of last change, with most recently changed files or directories first. In what order does `ls -R -t` display things?
### [Exercise] Absolute vs Relative Paths
Starting from /Users/amanda/data/, which of the following commands could Amanda use to navigate to her home directory, which is /Users/amanda?
1. `cd .`
2. `cd /`
3. `cd /home/amanda`
4. `cd ../..`
5. `cd ~`
6. `cd home`
7. `cd ~/data/..`
8. `cd`
9. `cd ..`
### [Exercise] Relative Path Resolution
Using the filesystem diagram below, if pwd displays /Users/thing, what will ls -F ../backup display?
1. `../backup: No such file or directory`
2. `2012-12-01 2013-01-08 2013-01-27`
3. `2012-12-01/ 2013-01-08/ 2013-01-27/`
4. `original/ pnas_final/ pnas_sub/`

### [Exercise ] `ls` Reading comprehension

Assuming a directory structure as in the above Figure (File System for Challenge Questions), if `pwd` displays `/Users/backup`, and `-r` tells ls to display things in reverse order, what command will display:
`pnas_sub/ pnas_final/ original/`
1. `ls pwd`
2. `ls -r -F`
3. `ls -r -F /Users/backup`
4. Either #2 or #3 above, but not #1.
### Key Points about Navigating Files and Directories
- The file system is responsible for managing information on the disk.
- Information is stored in files, which are stored in directories (folders).
- Directories can also store other directories, which forms a directory tree.
- `cd path` changes the current working directory.
- `ls path` prints a listing of a specific file or directory; `ls` on its own lists the current working directory.
- `pwd` prints the user’s current working directory.
- `/` on its own is the root directory of the whole file system.
### More key Points about Navigating Files and Directories
- A relative path specifies a location starting from the current location.
- An absolute path specifies a location from the root of the file system.
- Directory names in a path are separated with `/` on Unix, but `\`on Windows.
- `..` means ‘the directory above the current one’; `.` on its own means ‘the current directory’.
- Most files’ names are `something.extension`. The extension isn’t required, and doesn’t guarantee anything, but is normally used to indicate the type of data in the file.
## Working with Files and Directories
### [Exercise] Creating Files a Different Way
We have seen how to create text files using the `nano` editor. Now, try the following command in your home directory:
```
$ cd # go to your home directory
$ touch my_file.txt
```
1. What did the touch command do? When you look at your home directory using the GUI file explorer, does the file show up?
2. Use `ls -l` to inspect the files. How large is `my_file.txt`?
3. When might you want to create a file this way?
### [Exercise] Using `rm` Safely
What happens when we type `rm -i thesis/quotations.txt`? Why would we want this protection when using `rm`?
### [Exercise] Moving to the Current Folder
After running the following commands, Jamie realizes that she put the files `sucrose.dat` and `maltose.dat` into the wrong folder:
```
$ ls -F
```
> analyzed/ raw/
```
$ ls -F analyzed
```
> fructose.dat glucose.dat maltose.dat sucrose.dat
```
$ cd raw/
```
Fill in the blanks to move these files to the current folder (i.e., the one she is currently in):
```
$ mv ___/sucrose.dat ___/maltose.dat ___
```
### [Exercise] Renaming Files
Suppose that you created a `.txt` file in your current directory to contain a list of the statistical tests you will need to do to analyze your data, and named it: `statstics.txt`
After creating and saving this file you realize you misspelled the filename! You want to correct the mistake, which of the following commands could you use to do so?
1. `cp statstics.txt statistics.txt`
2. `mv statstics.txt statistics.txt`
3. `mv statstics.txt .`
4. `cp statstics.txt .`
### [Exercise] Moving and Copying
What is the output of the closing `ls` command in the sequence shown below?
```
$ pwd
```
> /Users/jamie/data
```
$ ls
```
> proteins.dat
```
$ mkdir recombine
$ mv proteins.dat recombine/
$ cp recombine/proteins.dat ../proteins-saved.dat
$ ls
```
1. `proteins-saved.dat recombine`
2. `recombine`
3. `proteins.dat recombine`
4. `proteins-saved.dat`
### Additional exercises
If you were quick, check out these exercises to dig a little more into details.
### [Exercise] Copy with Multiple Filenames
For this exercise, you can test the commands in the `data-shell/data` directory.
In the example below, what does `cp` do when given several filenames and a directory name?
```
$ mkdir backup
$ cp amino-acids.txt animals.txt backup/
```
In the example below, what does `cp` do when given three or more file names?
```
$ ls -F
```
> amino-acids.txt animals.txt backup/ elements/ morse.txt pdb/ planets.txt salmon.txt sunspot.txt
```
$ cp amino-acids.txt animals.txt morse.txt
```
### [Exercise] Using Wildcards
When run in the `molecules` directory, which `ls` command(s) will produce this output?
`ethane.pdb methane.pdb`
1. `ls *t*ane.pdb`
2. `ls *t?ne.*`
3. `ls *t??ne.pdb`
4. `ls ethane.*`
### [Exercise] More on Wildcards
Sam has a directory containing calibration data, datasets, and descriptions of the datasets:
```
2015-10-23-calibration.txt
2015-10-23-dataset1.txt
2015-10-23-dataset2.txt
2015-10-23-dataset_overview.txt
2015-10-26-calibration.txt
2015-10-26-dataset1.txt
2015-10-26-dataset2.txt
2015-10-26-dataset_overview.txt
2015-11-23-calibration.txt
2015-11-23-dataset1.txt
2015-11-23-dataset2.txt
2015-11-23-dataset_overview.txt
```
Before heading off to another field trip, she wants to back up her data and send some datasets to her colleague Bob. Sam uses the following commands to get the job done:
```
$ cp *dataset* /backup/datasets
$ cp ____calibration____ /backup/calibration
$ cp 2015-____-____ ~/send_to_bob/all_november_files/
$ cp ____ ~/send_to_bob/all_datasets_created_on_a_23rd/
```
Help Sam by filling in the blanks.
### [Exercise] Organizing Directories and Files
Jamie is working on a project and she sees that her files aren’t very well organized:
```
$ ls -F
```
> analyzed/ fructose.dat raw/ sucrose.dat
The `fructose.dat` and sucrose.dat` files contain output from her data analysis. What command(s) covered in this lesson does she need to run so that the commands below will produce the output shown?
```
$ ls -F
```
> analyzed/ raw/
```
$ ls analyzed
```
> fructose.dat sucrose.dat
### [Exercise] Copy a folder structure but not the files
You’re starting a new experiment, and would like to duplicate the file structure from your previous experiment without the data files so you can add new data.
Assume that the file structure is in a folder called ‘2016-05-18-data’, which contains a `data` folder that in turn contains folders named `raw` and `processed` that contain data files. The goal is to copy the file structure of the `2016-05-18-data` folder into a folder called `2016-05-20-data` and remove the data files from the directory you just created.
Which of the following set of commands would achieve this objective? What would the other commands do?
```
$ cp -r 2016-05-18-data/ 2016-05-20-data/
$ rm 2016-05-20-data/raw/*
$ rm 2016-05-20-data/processed/*
```
```
$ rm 2016-05-20-data/raw/*
$ rm 2016-05-20-data/processed/*
$ cp -r 2016-05-18-data/ 2016-5-20-data/
```
```
$ cp -r 2016-05-18-data/ 2016-05-20-data/
$ rm -r -i 2016-05-20-data/
```
### Key Points about Working with Files and Directories
`cp old new` copies a file.
`mkdir path` creates a new directory.
`mv old new` moves (renames) a file or directory.
`rm path` removes (deletes) a file.
`*` matches zero or more characters in a filename, so `*.txt` matches all files ending in `.txt`.
`?` matches any single character in a filename, so `?.txt` matches `a.txt` but not `any.txt`.
Use of the Control key may be described in many ways, including `Ctrl-X`, `Control-X`, and `^X`.
The shell does not have a trash bin: once something is deleted, it’s really gone.
Depending on the type of work you do, you may need a more powerful text editor than Nano.
## Pipes and Filters
### Key Points about Pipes and Filters
`cat` displays the contents of its inputs.
`head` displays the first 10 lines of its input.
`tail` displays the last 10 lines of its input.
`sort` sorts its inputs.
`wc` counts lines, words, and characters in its inputs.
`command > file` redirects a command’s output to a file.
`first | second` is a pipeline: the output of the first command is used as the input to the second.
[More information on this topic](https://swcarpentry.github.io/shell-novice/04-pipefilter/index.html) on the Software Carpentry website
## Loops
### [Exercise] Variables in Loops
This exercise refers to the `data-shell/molecules` directory. `ls` gives the following output:
> cubane.pdb ethane.pdb methane.pdb octane.pdb pentane.pdb propane.pdb
What is the output of the following code?
```
$ for datafile in *.pdb
> do
> ls *.pdb
> done
```
Now, what is the output of the following code?
```
$ for datafile in *.pdb
> do
> ls $datafile
> done
```
Why do these two loops give different outputs?
### [Exercise] Limiting Sets of Files
What would be the output of running the following loop in the `data-shell/molecules` directory?
```
$ for filename in c*
> do
> ls $filename
> done
```
1. No files are listed.
2. All files are listed.
3. Only `cubane.pdb`, `octane.pdb` and `pentane.pdb` are listed.
4. Only `cubane.pdb` is listed.
### [Exercise] Limiting Sets of Files (part 2)
How would the output differ from using this command instead?
```
$ for filename in *c*
> do
> ls $filename
> done
```
1. The same files would be listed.
2. All the files are listed this time.
3. No files are listed this time.
4. The files `cubane.pdb` and `octane.pdb` will be listed.
5. Only the file `octane.pdb` will be listed.
### [Exercise] Saving to a File in a Loop - Part One
In the `data-shell/molecules` directory, what is the effect of this loop?
```
$ for alkanes in *.pdb
> do
> echo $alkanes
> cat $alkanes > alkanes.pdb
> done
```
1. Prints `cubane.pdb`, `ethane.pdb`, `methane.pdb`, `octane.pdb`, `pentane.pdb` and `propane.pdb`, and the text from `propane.pdb` will be saved to a file called `alkanes.pdb`.
2. Prints `cubane.pdb`, `ethane.pdb`, and `methane.pdb`, and the text from all three files would be concatenated and saved to a file called `alkanes.pdb`.
3. Prints `cubane.pdb`, `ethane.pdb`, `methane.pdb`, `octane.pdb`, and `pentane.pdb`, and the text from `propane.pdb` will be saved to a file called `alkanes.pdb`.
4. None of the above.
### [Exercise] Saving to a File in a Loop - Part Two
Also in the `data-shell/molecules` directory, what would be the output of the following loop?
```
$ for datafile in *.pdb
> do
> cat $datafile >> all.pdb
> done
```
1. All of the text from `cubane.pdb`, `ethane.pdb`, `methane.pdb`, `octane.pdb`, and `pentane.pdb` would be concatenated and saved to a file called `all.pdb`.
2. The text from `ethane.pdb` will be saved to a file called `all.pdb`.
3. All of the text from `cubane.pdb`, `ethane.pdb`, `methane.pdb`, `octane.pdb`, `pentane.pdb` and `propane.pdb` would be concatenated and saved to a file called `all.pdb`.
4. All of the text from `cubane.pdb`, `ethane.pdb`, `methane.pdb`, `octane.pdb`, `pentane.pdb` and `propane.pdb` would be printed to the screen and saved to a file called `all.pdb`.
### Additional exercises
If you were quick, check out these exercises to dig a little more into details.
### [Exercise] Doing a Dry Run
A loop is a way to do many things at once — or to make many mistakes at once if it does the wrong thing. One way to check what a loop would do is to `echo` the commands it would run instead of actually running them.
Suppose we want to preview the commands the following loop will execute without actually running those commands:
```
$ for file in *.pdb
> do
> analyze $file > analyzed-$file
> done
```
What is the difference between the two loops below, and which one would we want to run?
```
# Version 1
$ for file in *.pdb
> do
> echo analyze $file > analyzed-$file
> done
```
```
# Version 2
$ for file in *.pdb
> do
> echo "analyze $file > analyzed-$file"
> done
```
### [Exercise] Nested Loops
Suppose we want to set up up a directory structure to organize some experiments measuring reaction rate constants with different compounds *and* different temperatures. What would be the result of the following code:
```
$ for species in cubane ethane methane
> do
> for temperature in 25 30 37 40
> do
> mkdir $species-$temperature
> done
> done
```
### Key Points for Loops
* A `for` loop repeats commands once for every thing in a list.
* Every `for` loop needs a variable to refer to the thing it is currently operating on.
* Use `$name` to expand a variable (i.e., get its value). `${name}` can also be used.
* Do not use spaces, quotes, or wildcard characters such as ‘\*’ or ‘?’ in filenames, as it complicates variable expansion.
* Give files consistent names that are easy to match with wildcard patterns to make it easy to select them for looping.
* Use the up-arrow key to scroll up through previous commands to edit and repeat them.
* Use `Ctrl-R` to search through the previously entered commands.
* Use `history` to display recent commands, and `!number` to repeat a command by number.
## Shell Scripts
### [Exercise] List Unique Species
Leah has several hundred data files, each of which is formatted like this:
```
2013-11-05,deer,5
2013-11-05,rabbit,22
2013-11-05,raccoon,7
2013-11-06,rabbit,19
2013-11-06,deer,2
2013-11-06,fox,1
2013-11-07,rabbit,18
2013-11-07,bear,1
```
An example of this type of file is given in `data-shell/data/animal-counts/animals.txt`.
Write a shell script called `species.sh` that takes any number of filenames as command-line arguments, and uses `cut`, `sort`, and `uniq` to print a list of the unique species appearing in each of those files separately.
### [Exercise] Why Record Commands in the History Before Running Them?
If you run the command:
```
$ history | tail -n 5 > recent.sh
```
the last command in the file is the `history` command itself, i.e., the shell has added `history` to the command log before actually running it. In fact, the shell always adds commands to the log before running them. Why do you think it does this?
### [Exercise] Variables in Shell Scripts
In the `molecules` directory, imagine you have a shell script called `script.sh` containing the following commands:
```
head -n $2 $1
tail -n $3 $1
```
While you are in the `molecules` directory, you type the following command:
`bash script.sh '*.pdb' 1 1`
Which of the following outputs would you expect to see?
1. All of the lines between the first and the last lines of each file ending in `.pdb` in the `molecules` directory
2. The first and the last line of each file ending in `.pdb` in the `molecules directory
3. The first and the last line of each file in the `molecules` directory
4. An error because of the quotes around `*.pdb`
### [Exercise] Find the Longest File With a Given Extension
Write a shell script called `longest.sh` that takes the name of a directory and a filename extension as its arguments, and prints out the name of the file with the most lines in that directory with that extension. For example:
`$ bash longest.sh /tmp/data pdb`
would print the name of the `.pdb` file in `/tmp/data` that has the most lines.
### Additional exercises
If you were quick, check out these exercises to dig a little more into details.
### [Exercise] Script Reading Comprehension
For this question, consider the `data-shell/molecules` directory once again. This contains a number of `.pdb` files in addition to any other files you may have created. Explain what a script called `example.sh` would do when run as `bash example.sh *.pdb` if it contained the following lines:
```
# Script 1
echo *.*
# Script 2
for filename in $1 $2 $3
do
cat $filename
done
# Script 3
echo $@.pdb
### [Exercise] Debugging Scripts
Suppose you have saved the following script in a file called do-errors.sh in Nelle’s north-pacific-gyre/2012-07-03 directory:
```
# Calculate stats for data files.
for datafile in "$@"
do
echo $datfile
bash goostats $datafile stats-$datafile
done
```
When you run it:
`$ bash do-errors.sh NENE*[AB].txt`
the output is blank. To figure out why, re-run the script using the -x option:
`bash -x do-errors.sh NENE*[AB].txt`
What is the output showing you? Which line is responsible for the error?
## Finding Things
### [Exercise] Using `grep`
Which command would result in the following output:
```
and the presence of absence:
```
1. `grep "of" haiku.txt`
2. `grep -E "of" haiku.txt`
3. `grep -w "of" haiku.txt`
4. `grep -i "of" haiku.txt`
### [Exercise] Tracking a Species
Leah has several hundred data files saved in one directory, each of which is formatted like this:
```
2013-11-05,deer,5
2013-11-05,rabbit,22
2013-11-05,raccoon,7
2013-11-06,rabbit,19
2013-11-06,deer,2
```
She wants to write a shell script that takes a species as the first command-line argument and a directory as the second argument. The script should return one file called `species.txt` containing a list of dates and the number of that species seen on each date. For example using the data shown above, `rabbit.txt` would contain:
```
2013-11-05,22
2013-11-06,19
```
Put these commands and pipes in the right order to achieve this:
```
cut -d : -f 2
>
|
grep -w $1 -r $2
|
$1.txt
cut -d , -f 1,3
```
Hint: use `man grep` to look for how to grep text recursively in a directory and `man cut` to select more than one field in a line.
An example of such a file is provided in `data-shell/data/animal-counts/animals.txt`.
### [Exercise] Little Women
You and your friend, having just finished reading *Little Women* by Louisa May Alcott, are in an argument. Of the four sisters in the book, Jo, Meg, Beth, and Amy, your friend thinks that Jo was the most mentioned. You, however, are certain it was Amy. Luckily, you have a file `LittleWomen.txt` containing the full text of the novel (`data-shell/writing/data/LittleWomen.txt`). Using a `for` loop, how would you tabulate the number of times each of the four sisters is mentioned?
Hint: one solution might employ the commands `grep` and `wc` and a `|`, while another might utilize `grep` options. There is often more than one way to solve a programming task, so a particular solution is usually chosen based on a combination of yielding the correct result, elegance, readability, and speed.
### Additional exercises
If you were quick, check out these exercises to dig a little more into details.
### [Exercise] Matching and Subtracting
The `-v` flag to `grep` inverts pattern matching, so that only lines which do not match the pattern are printed. Given that, which of the following commands will find all files in `/data` whose names end in `s.txt` (e.g., `animals.txt` or `planets.txt`), but do not contain the word `net`? Once you have thought about your answer, you can test the commands in the `data-shell` directory.
1. `find data -name '*s.txt' | grep -v net`
2. `find data -name *s.txt | grep -v net`
3. `grep -v "temp" $(find data -name '*s.txt')`
4. None of the above.
### [Exercise] `find` Pipeline Reading Comprehension
Write a short explanatory comment for the following shell script:
```
wc -l $(find . -name '*.dat') | sort -n
```
### [Exercise] Finding Files With Different Properties
The `find` command can be given several other criteria known as “tests” to locate files with specific attributes, such as creation time, size, permissions, or ownership. Use `man find` to explore these, and then write a single command to find all files in or below the current directory that were modified by the user `ahmed` in the last 24 hours.
Hint 1: you will need to use three tests: `-type`, `-mtime`, and `-user`.
Hint 2: The value for `-mtime` will need to be negative—why?
### Key Points for Finding Things
* `find` finds files with specific properties that match patterns.
* `grep` selects lines in files that match patterns.
* `--help` is a flag supported by many bash commands, and programs that can be run from within Bash, to display more information on how to use these commands or programs.
* `man` command displays the manual page for a given command.
* `$(command)` inserts a command’s output in place.
### Additional resources
| resource | description |
| :------- | :---------- |
| https://explainshell.com | dissects any shell command you type in |
| https://tldr.sh | simplified and community-driven Shell manual pages |
| https://www.shellcheck.net | checks shell scripts for common errors |
| github_jupyter |
## Dependencies
```
import json, glob
from tweet_utility_scripts import *
from tweet_utility_preprocess_roberta_scripts import *
from transformers import TFRobertaModel, RobertaConfig
from tokenizers import ByteLevelBPETokenizer
from tensorflow.keras import layers
from tensorflow.keras.models import Model
```
# Load data
```
test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv')
print('Test samples: %s' % len(test))
display(test.head())
```
# Model parameters
```
input_base_path = '/kaggle/input/114roberta-base/'
with open(input_base_path + 'config.json') as json_file:
config = json.load(json_file)
config
# vocab_path = input_base_path + 'vocab.json'
# merges_path = input_base_path + 'merges.txt'
base_path = '/kaggle/input/qa-transformers/roberta/'
vocab_path = base_path + 'roberta-base-vocab.json'
merges_path = base_path + 'roberta-base-merges.txt'
config['base_model_path'] = base_path + 'roberta-base-tf_model.h5'
config['config_path'] = base_path + 'roberta-base-config.json'
model_path_list = glob.glob(input_base_path + '*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep = "\n")
```
# Tokenizer
```
tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path,
lowercase=True, add_prefix_space=True)
```
# Pre process
```
test['text'].fillna('', inplace=True)
test["text"] = test["text"].apply(lambda x: x.lower())
test["text"] = test["text"].apply(lambda x: x.strip())
x_test = get_data_test(test, tokenizer, config['MAX_LEN'], preprocess_fn=preprocess_roberta_test)
```
# Model
```
module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)
def model_fn(MAX_LEN):
input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name="base_model")
last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})
x = layers.Dropout(.1)(last_hidden_state)
x_start = layers.Dense(1)(x)
x_start = layers.Flatten()(x_start)
y_start = layers.Activation('softmax', name='y_start')(x_start)
x_end = layers.Dense(1)(x)
x_end = layers.Flatten()(x_end)
y_end = layers.Activation('softmax', name='y_end')(x_end)
model = Model(inputs=[input_ids, attention_mask], outputs=[y_start, y_end])
return model
```
# Make predictions
```
NUM_TEST_IMAGES = len(test)
test_start_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN']))
test_end_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN']))
for model_path in model_path_list:
print(model_path)
model = model_fn(config['MAX_LEN'])
model.load_weights(model_path)
test_preds = model.predict(x_test)
test_start_preds += test_preds[0]
test_end_preds += test_preds[1]
```
# Post process
```
test['start'] = test_start_preds.argmax(axis=-1)
test['end'] = test_end_preds.argmax(axis=-1)
test['text_len'] = test['text'].apply(lambda x : len(x))
test['text_wordCnt'] = test['text'].apply(lambda x : len(x.split(' ')))
test["end"].clip(0, test["text_len"], inplace=True)
test["start"].clip(0, test["end"], inplace=True)
test['selected_text'] = test.apply(lambda x: decode(x['start'], x['end'], x['text'], config['question_size'], tokenizer), axis=1)
test["selected_text"].fillna(test["text"], inplace=True)
```
# Visualize predictions
```
display(test.head(10))
```
# Test set predictions
```
submission = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/sample_submission.csv')
submission['selected_text'] = test["selected_text"]
submission.to_csv('submission.csv', index=False)
submission.head(10)
```
| github_jupyter |
# Machine Learning and Statistics for Physicists
Material for a [UC Irvine](https://uci.edu/) course offered by the [Department of Physics and Astronomy](https://www.physics.uci.edu/).
Content is maintained on [github](github.com/dkirkby/MachineLearningStatistics) and distributed under a [BSD3 license](https://opensource.org/licenses/BSD-3-Clause).
##### ► [View table of contents](Contents.ipynb)
```
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
import pandas as pd
from mls import locate_data
from sklearn import model_selection, neighbors, tree, ensemble
import scipy.stats
```
## Case Study: Redshift Inference
Our goal is to predict the [cosmological redshift](https://en.wikipedia.org/wiki/Redshift) of a galaxy based on its brightness measured through 17 different filters. Redshift is a proxy for distance or, equivalently, look back time, so is a key observable for learning about past conditions in the universe.
### Load and Explore Data
Read the data to train and test on:
```
X = pd.read_hdf(locate_data('photoz_data.hf5'))
y = pd.read_hdf(locate_data('photoz_targets.hf5'))
X.describe()
y.describe()
sns.pairplot(X[:500], vars=X.columns.tolist()[:6]);
plt.hist(y['Z'], bins=np.arange(0, 6, 0.2))
plt.xlabel('Redshift $z$');
plt.ylabel('Galaxies / ($\Delta z=0.2$)');
```
### Split Data Randomly into Training and Testing Subsamples
```
gen = np.random.RandomState(seed=123)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.25, random_state=gen)
print(f'{len(X)} = {len(X_train)} TRAIN + {len(X_test)} TEST')
```
### Nearest Neighbor Regression
Use the K-nearest neighbors (KNN) of an input sample to estimate its properties with [KNeighborsRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor):
```
knn_fit = neighbors.KNeighborsRegressor(n_jobs=8).fit(X_train, y_train)
```
Scores are calculated using the [coefficient of determination](https://en.wikipedia.org/wiki/Coefficient_of_determination) $R^2$, for which perfect accuracy is $R^2 = 1$:
```
knn_fit.score(X_train, y_train), knn_fit.score(X_test, y_test)
knn_fit.n_neighbors
```
#### Hyperparameter Optimization
The main hyperparameter is the value of K: the number of nearest neighbors that contribute to the final decision.
```
def knn_study(n=(1, 2, 4, 6, 8, 12, 16), max_score_samples=2000):
train_score, test_score = [], []
for n_neighbors in n:
fit = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors, n_jobs=8).fit(X_train, y_train)
train_score.append(fit.score(X_train[:max_score_samples], y_train[:max_score_samples]))
test_score.append(fit.score(X_test[:max_score_samples], y_test[:max_score_samples]))
plt.plot(n, train_score, 'rx-', label='TRAIN')
plt.plot(n, test_score, 'bo-', label='TEST')
plt.xlabel('KNN n_neighbors')
plt.ylabel('KNN $R^2$ score')
plt.legend()
knn_study()
```
### Decision Tree Regression
Use a [binary decision tree](https://en.wikipedia.org/wiki/Decision_tree_learning) to sort each input sample into a small "peer group" with [DecisionTreeRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html). Note the similarity with KNN, except now we ask a set of questions to identify the "peer group", instead of using nearest neighbors.
```
gen = np.random.RandomState(seed=123)
tree_fit = tree.DecisionTreeRegressor(random_state=gen).fit(X_train, y_train)
tree_fit.score(X_train, y_train), tree_fit.score(X_test, y_test)
tree_fit.tree_.max_depth
```
#### Feature Importance
```
importance = pd.DataFrame(
{'feature': X.columns, 'importance': tree_fit.feature_importances_}
).sort_values(by='importance', ascending=False)
importance.plot('feature', 'importance', 'barh', figsize=(10, 10), legend=False);
```
Re-train using only the 8 most important features:
```
importance[:8]
best_features = importance[:8]['feature']
```
The re-trained tree is much simpler and almost equally accurate on the test data:
```
tree_fit = tree.DecisionTreeRegressor(random_state=gen).fit(X_train[best_features], y_train)
tree_fit.score(X_train[best_features], y_train), tree_fit.score(X_test[best_features], y_test)
tree_fit.tree_.max_depth
```
#### Hyperparameter Optimization
```
def tree_study(n=(3, 4, 5, 6, 8, 10, 15, 20, 25, 30, 35), seed=123):
gen = np.random.RandomState(seed)
train_score, test_score = [], []
for max_depth in n:
fit = tree.DecisionTreeRegressor(max_depth=max_depth, random_state=gen).fit(X_train[best_features], y_train)
train_score.append(fit.score(X_train[best_features], y_train))
test_score.append(fit.score(X_test[best_features], y_test))
plt.plot(n, train_score, 'rx-', label='TRAIN')
plt.plot(n, test_score, 'bo-', label='TEST')
plt.xlabel('DecisionTree max_depth')
plt.ylabel('DecisionTree $R^2$ score')
plt.legend()
tree_study()
```
Chose a `max_depth` of 5 to minimize overfitting the training data (or choose 10 to balance overfitting with accuracy on the test data):
```
gen = np.random.RandomState(seed=123)
tree_fit = tree.DecisionTreeRegressor(max_depth=5, random_state=gen).fit(X_train[best_features], y_train)
tree_fit.score(X_train[best_features], y_train), tree_fit.score(X_test[best_features], y_test)
```
Note that a tree of depth $n$ sorts each sample into one of $2^n$ leaf nodes, each with a fixed prediction. This leads to a visible discretization error for small $n$, which is not necessarily a problem if the uncertainties are even larger:
```
y_predict = tree_fit.predict(X_test[best_features])
plt.scatter(y_test, y_predict, lw=0)
plt.xlabel('Target value')
plt.ylabel('Predicted value');
```
#### Tree Visualization
```
tree.export_graphviz(tree_fit, out_file='tree.dot')
!dot -Tpng tree.dot -o tree.png
def plot_branch(path=[], fit=tree_fit, X=X_train[best_features], y=y_train.values):
tree = fit.tree_
n_nodes = tree.node_count
children_left = tree.children_left
children_right = tree.children_right
feature = tree.feature
threshold = tree.threshold
# Traverse the tree using the specified path.
node = 0
sel = np.ones(len(X), bool)
cut = threshold[node]
x = X.iloc[:, feature[node]]
print('nsel', np.count_nonzero(sel), 'cut', cut, 'value', np.mean(y[sel]))
for below_threshold in path:
if below_threshold:
sel = sel & (x <= cut)
node = children_left[node]
else:
sel = sel & (x > cut)
node = children_right[node]
cut = threshold[node]
x = X.iloc[:, feature[node]]
print('nsel', np.count_nonzero(sel), 'cut', cut, 'value', np.mean(y[sel]))
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
xlim = np.percentile(x[sel], (1, 95))
below = sel & (x <= cut)
above = sel & (x > cut)
ax[0].hist(x[below], range=xlim, bins=50, histtype='stepfilled', color='r', alpha=0.5)
ax[0].hist(x[above], range=xlim, bins=50, histtype='stepfilled', color='b', alpha=0.5)
ax[0].set_xlim(*xlim)
ax[0].set_xlabel(X.columns[feature[node]])
ylim = np.percentile(y, (1, 99))
y_pred = np.empty_like(y)
y_pred[below] = np.mean(y[below])
y_pred[above] = np.mean(y[above])
mse2 = np.mean((y[sel] - y_pred[sel]) ** 2)
n_below = np.count_nonzero(below)
n_above = np.count_nonzero(above)
mse = (np.var(y[below]) * n_below + np.var(y[above]) * n_above) / (n_below + n_above)
#print('mse', mse, mse2)
ax[1].hist(y[below], range=ylim, bins=25, histtype='stepfilled', color='r', alpha=0.5)
ax[1].axvline(np.mean(y[below]), c='r', ls='--')
ax[1].hist(y[above], range=ylim, bins=25, histtype='stepfilled', color='b', alpha=0.5)
ax[1].axvline(np.mean(y[above]), c='b', ls='--')
ax[1].set_xlabel('Redshift target')
plot_branch([])
plot_branch([True,])
plot_branch([False,])
```
### Random Forest Regression
Use an ensemble of decision trees that are individually less accurate but collectively more accurate, with [RandomForestRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html). The individual trees are trained on random sub-samples of the data and the resulting "forest" of predictions are averaged. The random subsets for each tree are created by:
- Using a "bootstrap" resampling of the rows, and
- Finding the best split at each branch from a random subset of `max_features` features (columns).
```
gen = np.random.RandomState(seed=123)
forest_fit = ensemble.RandomForestRegressor(
n_estimators=15, max_features=0.5, random_state=gen, n_jobs=8).fit(X_train, y_train.values.reshape(-1))
forest_fit.score(X_train, y_train), forest_fit.score(X_test, y_test)
```
Compare the first branch for two of the trees in our forest:
```
plot_branch(fit=forest_fit.estimators_[0], X=X_train)
plot_branch(fit=forest_fit.estimators_[1], X=X_train)
```
#### Hyperparameter Optimization
```
def forest_study(n=(1, 2, 3, 5, 10, 15, 20, 25, 30), seed=123):
gen = np.random.RandomState(seed)
train_score, test_score = [], []
for n_estimators in n:
fit = ensemble.RandomForestRegressor(
n_estimators=n_estimators, max_features=0.5, random_state=gen, n_jobs=8).fit(
X_train, y_train.values.reshape(-1))
train_score.append(fit.score(X_train, y_train))
test_score.append(fit.score(X_test, y_test))
plt.plot(n, train_score, 'rx-', label='TRAIN')
plt.plot(n, test_score, 'bo-', label='TEST')
plt.xlabel('RandomForest n_estimators')
plt.ylabel('RandomForest $R^2$ score')
plt.legend()
forest_study()
```
#### Feature Importance (again)
```
importance = pd.DataFrame(
{'feature': X.columns, 'importance': forest_fit.feature_importances_}
).sort_values(by='importance', ascending=False)
importance.plot('feature', 'importance', 'barh', figsize=(10, 10), legend=False);
```
#### Prediction uncertainty
Since we now have multiple predictions for each sample, we can use their spread as an estimate of the uncertainty in the mean prediction:
```
y_pred = forest_fit.predict(X_test)
y_pred_each = np.array([tree.predict(X_test) for tree in forest_fit.estimators_])
y_pred_each.shape
np.all(y_pred == np.mean(y_pred_each, axis=0))
y_pred_error = y_test.values.reshape(-1) - y_pred
y_pred_spread = np.std(y_pred_each, axis=0)
plt.scatter(np.abs(y_pred_error), y_pred_spread, lw=0)
plt.xlabel('$|y_{true} - y_{pred}|$')
plt.ylabel('Forest prediction spread')
plt.xlim(0, 3)
plt.ylim(0, 3);
bins = np.linspace(-2.5, 2.5, 50)
plt.hist(y_pred_error / y_pred_spread, bins=bins, density=True)
pull = 0.5 * (bins[1:] + bins[:-1])
plt.plot(pull, scipy.stats.norm.pdf(pull), 'r-', label='$\sigma=$ spread')
correction = 2.0
plt.plot(pull, correction * scipy.stats.norm.pdf(correction * pull), 'r--',
label=('$\sigma=%.1f \\times$' % correction) + 'spread')
plt.legend()
plt.xlabel('pull = dy / $\sigma$')
```
#### "Out-of-bag" Testing
Combining the trees in a forest is known as "bagging". Since each tree leaves out some samples, we can use these omitted (aka "out-of-bag") samples to test our model. This means we no longer need to set aside a separate test dataset and can use all of our data for training the forest.
*Technical note: since RandomForestRegressor does not support a max_samples parameter, the out-of-bag samples are only due to bootstrap sampling with replacement, which generally needs more estimators for reasonable statistics.*
```
gen = np.random.RandomState(seed=123)
forest_fit = ensemble.RandomForestRegressor(
n_estimators=100, max_features=0.5, oob_score=True, random_state=gen, n_jobs=8).fit(X, y.values.reshape(-1))
forest_fit.score(X_train, y_train), forest_fit.oob_score_
```
| github_jupyter |
# Simple Go-To-Goal for Cerus
The following code implements a simple go-to-goal behavior for Cerus. It uses a closed feedback loop to continuously asses Cerus' state (position and heading) in the world using data from two wheel encoders. It subsequently calculates the error between a given goal location and its current pose and will attempt to minimize the error until it reaches the goal location. A P-regulator (see PID regulator) script uses the error as an input and outputs the angular velocity for the Arduino and motor controllers that drive the robot.
All models used in this program are adapted from Georgia Tech's "Control of Mobile Robots" by Dr. Magnus Egerstedt.
```
#Import useful libraries
import serial
import time
import math
import numpy as np
from traitlets import HasTraits, List
#Open a serial connection with the Arduino Mega
#Opening a serial port on the Arduino resets it, so our encoder count is also reset to 0,0
ser = serial.Serial('COM3', 115200)
#Defining our goal location. Units are metric, real-world coordinates in an X/Y coordinate system
goal_x = 1
goal_y = 0
#Create a class for our Cerus robot
class Cerus():
def __init__(self, pose_x, pose_y, pose_phi, R_wheel, N_ticks, L_track):
self.pose_x = pose_x #X Position
self.pose_y = pose_y #Y Position
self.pose_phi = pose_phi #Heading
self.R_wheel = R_wheel #wheel radius in meters
self.N_ticks = N_ticks #encoder ticks per wheel revolution
self.L_track = L_track #wheel track in meters
#Create a Cerus instance and initialize it to a 0,0,0 world position and with some physical dimensions
cerus = Cerus(0,0,0,0.03,900,0.23)
```
We'll use the Traitlets library to implement an observer pattern that will recalculate the pose of the robot every time an update to the encoder values is detected and sent to the Jetson nano by the Arduino.
```
#Create an encoder class with traits
class Encoders(HasTraits):
encoderValues = List() #We store the left and right encoder value in a list
def __init__(self, encoderValues, deltaTicks):
self.encoderValues = encoderValues
self.deltaTicks = deltaTicks
#Create an encoder instance
encoders = Encoders([0,0], [0,0])
#Create a function that is triggered when a change to encoders is detected
def monitorEncoders(change):
if change['new']:
oldVals = np.array(change['old'])
newVals = np.array(change['new'])
deltaTicks = newVals - oldVals
#print("Old values: ", oldVals)
#print("New values: ", newVals)
#print("Delta values: ", deltaTicks)
calculatePose(deltaTicks)
encoders.observe(monitorEncoders, names = "encoderValues")
```
The functions below are helpers and will be called through our main loop.
```
#Create a move function that sends move commands to the Arduino
def move(linearVelocity, angularVelocity):
command = f"<{linearVelocity},{angularVelocity}>"
ser.write(str.encode(command))
#Create a function that calculates an updated pose of Cerus every time it is called
def calculatePose(deltaTicks):
#Calculate the centerline distance moved
distanceLeft = 2 * math.pi * cerus.R_wheel * (deltaTicks[0] / cerus.N_ticks)
distanceRight = 2 * math.pi * cerus.R_wheel * (deltaTicks[1] / cerus.N_ticks)
distanceCenter = (distanceLeft + distanceRight) / 2
#Update the position and heading
cerus.pose_x = round((cerus.pose_x + distanceCenter * math.cos(cerus.pose_phi)), 4)
cerus.pose_y = round((cerus.pose_y + distanceCenter * math.sin(cerus.pose_phi)), 4)
cerus.pose_phi = round((cerus.pose_phi + ((distanceRight - distanceLeft) / cerus.L_track)), 4)
print(f"The new position is {cerus.pose_x}, {cerus.pose_y} and the new heading is {cerus.pose_phi}.")
#Calculate the error between Cerus' heading and the goal point
def calculateError():
phi_desired = math.atan((goal_y - cerus.pose_y)/(goal_x - cerus.pose_x))
temp = phi_desired - cerus.pose_phi
error_heading = round((math.atan2(math.sin(temp), math.cos(temp))), 4) #ensure that error is within [-pi, pi]
error_x = round((goal_x - cerus.pose_x), 4)
error_y = round((goal_y - cerus.pose_y), 4)
#print("The heading error is: ", error_heading)
#print("The X error is: ", error_x)
#print("The Y error is: ", error_y)
return error_x, error_y, error_heading
atGoal = False
constVel = 0.2
K = 1 #constant for our P-regulator below
#Functions to read and format encoder data received from the Serial port
def formatData(data):
delimiter = "x"
leftVal = ""
rightVal = ""
for i in range(len(data)):
if data[i] == ",":
delimiter = ","
elif delimiter != "," and data[i].isdigit():
leftVal += data[i]
elif delimiter == "," and data[i].isdigit():
rightVal += data[i]
leftVal, rightVal = int(leftVal), int(rightVal)
encoders.encoderValues = [leftVal, rightVal]
print("Encoders: ", encoders.encoderValues)
def handleSerial():
#ser.readline() waits for the next line of encoder data, which is sent by Arduino every 50 ms
if ser.inWaiting():
#Get the serial data and format it
temp = ser.readline()
data = temp.decode()
formatData(data)
#Calculate the current pose to goal error
error_x, error_y, error_heading = calculateError()
print(f"Error X: {error_x}, Error Y: {error_y}")
#If we're within 5 cm of the goal
if error_x <= 0.05:# and error_y <= 0.05:
print("Goal reached!")
move(0.0,0.0)
time.sleep(0.1)
atGoal = True
#Otherwise keep driving
else:
omega = - (K * error_heading)
handleSerial()
move(constVel, 0.0)
print("Moving at angular speed: ", omega)
def moveRobot():
#The Arduino sends data every 50ms, we first check if data is in the buffer
if ser.inWaiting():
#Get the serial data and format it if data is in the buffer
temp = ser.readline()
data = temp.decode()
formatData(data)
#Calculate the current pose to goal error
error_x, error_y, error_heading = calculateError()
print(f"Error X: {error_x}, Error Y: {error_y}")
#If we're within 5 cm of the goal
if error_x <= 0.05:# and error_y <= 0.05:
print("Goal reached!")
move(0.0,0.0)
time.sleep(0.1)
atGoal = True
#Otherwise keep driving
else:
omega = - (K * error_heading)
handleSerial()
move(constVel, 0.0)
print("Moving at angular speed: ", omega)
```
This is the main part for our program that will loop over and over until Cerus has reached its goal. For our simple go-to-goal behavior, we will drive the robot at a constant speed and only adjust our heading so that we reach the goal location.
__WARNING: This will move the robot!__
```
while not atGoal:
try:
moveRobot()
except(KeyboardInterrupt):
print("Program interrupted by user!")
move(0.0,0.0) #Stop motors
break
"Loop exited..."
move(0.0,0.0) #Stop motors
#Close the serial connection when done
ser.close()
atGoal = False
constVel = 0.2
K = 1 #constant for our P-regulator below
while not atGoal:
try:
#Calculate the current pose to goal error
error_x, error_y, error_heading = calculateError()
print(f"Error X: {error_x}, Error Y: {error_y}")
#If we're within 5 cm of the goal
if error_x <= 0.05 and error_y <= 0.05:
print("Goal reached!")
move(0.0,0.0)
time.sleep(0.1)
atGoal = True
#Otherwise keep driving
else:
omega = - (K * error_heading)
handleSerial()
move(constVel, 0.0)
print("Moving at angular speed: ", omega)
except(KeyboardInterrupt):
print("Program interrupted by user!")
move(0.0,0.0) #Stop motors
break
"Loop exited..."
move(0.0,0.0) #Stop motors
```
| github_jupyter |
# Chatbot Tutorial
- https://pytorch.org/tutorials/beginner/chatbot_tutorial.html
```
import torch
from torch.jit import script, trace
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import csv
import random
import re
import os
import unicodedata
import codecs
from io import open
import itertools
import math
USE_CUDA = torch.cuda.is_available()
device = torch.device('cuda' if USE_CUDA else 'cpu')
```
## データの前処理
```
corpus_name = 'cornell_movie_dialogs_corpus'
corpus = os.path.join('data', corpus_name)
def printLines(file, n=10):
with open(file, 'rb') as datafile:
lines = datafile.readlines()
for line in lines[:n]:
print(line)
printLines(os.path.join(corpus, 'movie_lines.txt'))
# Splits each line of the file into a dictionary of fields
def loadLines(fileName, fields):
lines = {}
with open(fileName, 'r', encoding='iso-8859-1') as f:
for line in f:
values = line.split(' +++$+++ ')
# Extract fields
lineObj = {}
for i, field in enumerate(fields):
lineObj[field] = values[i]
lines[lineObj['lineID']] = lineObj
return lines
MOVIE_LINES_FIELDS = ['lineID', 'characterID', 'movieID', 'character', 'text']
lines = loadLines(os.path.join(corpus, 'movie_lines.txt'), MOVIE_LINES_FIELDS)
lines['L1045']
# Groups fields of lines from loadLines() into conversations based on movie_conversations.txt
def loadConversations(fileName, lines, fields):
conversations = []
with open(fileName, 'r', encoding='iso-8859-1') as f:
for line in f:
values = line.split(' +++$+++ ')
# Extract fields
convObj = {}
for i, field in enumerate(fields):
convObj[field] = values[i]
# Convert string to list
utterance_id_pattern = re.compile('L[0-9]+')
lineIds = utterance_id_pattern.findall(convObj['utteranceIDs'])
# Reassemble lines
convObj['lines'] = []
for lineId in lineIds:
convObj['lines'].append(lines[lineId])
conversations.append(convObj)
return conversations
MOVIE_CONVERSATIONS_FIELDS = ['character1ID', 'character2ID', 'movieID', 'utteranceIDs']
conversations = loadConversations(os.path.join(corpus, 'movie_conversations.txt'), lines, MOVIE_CONVERSATIONS_FIELDS)
# utteranceIDsの会話系列IDがlinesに展開されていることがわかる!
conversations[0]
# Extracts pairs of sentences from conversations
def extractSentencePairs(conversations):
qa_pairs = []
for conversation in conversations:
# Iterate over all the lines of the conversation
for i in range(len(conversation['lines']) - 1): # 最後の会話は回答がないので無視する
inputLine = conversation['lines'][i]['text'].strip()
targetLine = conversation['lines'][i + 1]['text'].strip()
if inputLine and targetLine:
qa_pairs.append([inputLine, targetLine])
return qa_pairs
datafile = os.path.join(corpus, 'formatted_movie_lines.txt')
delimiter = '\t'
delimiter = str(codecs.decode(delimiter, 'unicode_escape'))
with open(datafile, 'w', encoding='utf-8') as outputfile:
writer = csv.writer(outputfile, delimiter=delimiter, lineterminator='\n')
for pair in extractSentencePairs(conversations):
writer.writerow(pair)
printLines(datafile)
```
## Vocabularyの構築
```
# Default word tokens
PAD_token = 0 # Used for padding short sentences
SOS_token = 1 # Start-of-sentence token
EOS_token = 2 # End-of-sentence token
class Voc:
def __init__(self, name):
self.name = name
self.trimmed = False
self.word2index = {}
self.word2count = {}
self.index2word = {PAD_token: 'PAD', SOS_token: 'SOS', EOS_token: 'EOS'}
self.num_words = 3 # SOS, EOS, PAD
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.num_words
self.word2count[word] = 1
self.index2word[self.num_words] = word
self.num_words += 1
else:
self.word2count[word] += 1
# Remove words below a certain count threshold
def trim(self, min_count):
if self.trimmed:
return
self.trimmed = True
keep_words = []
for k, v in self.word2count.items():
if v >= min_count:
keep_words.append(k)
print('keep_words {} / {} = {:.4f}'.format(len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)))
# Reinitialize dictionaries
self.word2index = {}
self.word2count = {}
self.index2word = {PAD_token: 'PAD', SOS_token: 'SOS', EOS_token: 'EOS'}
self.num_words = 3
for word in keep_words:
self.addWord(word)
MAX_LENGTH = 10 # Maximum sentence length to consider
# Turn a Unicode string to plain ASCII
def unicodeToAscii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
s = re.sub(r"\s+", r" ", s).strip()
return s
# Read query/response paiers and return a voc object
def readVocs(datafile, corpus_name):
# Read the file and split into lines
lines = open(datafile, encoding='utf-8').read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
voc = Voc(corpus_name)
return voc, pairs
# Returns True iff both sentences in a pair p are under the MAX_LENGTH threshold
def filterPair(p):
# Input sequences need to preserve the last word for EOS token
return len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH
# Filter pairs using filterPair condition
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
# Using the functions defined above, return a populated voc object and pairs list
# save_dirが使われていない
def loadPrepareData(corpus, corpus_name, datafile, save_dir):
print('Start preparing training data ...')
voc, pairs = readVocs(datafile, corpus_name)
print('Read {!s} sentence pairs'.format(len(pairs)))
pairs = filterPairs(pairs)
print('Trimmed to {!s} sentence pairs'.format(len(pairs)))
print('Counting words...')
for pair in pairs:
voc.addSentence(pair[0])
voc.addSentence(pair[1])
print('Counted words:', voc.num_words)
return voc, pairs
# Load/Assemble voc and pairs
save_dir = os.path.join('data', 'save')
voc, pairs = loadPrepareData(corpus, corpus_name, datafile, save_dir)
# Print some pairs to validate
print('\npairs:')
for pair in pairs[:10]:
print(pair)
MIN_COUNT = 3 # Minimum word count threshold for trimming
def trimRareWords(voc, pairs, MIN_COUNT):
# Trim words used unser the MIN_COUNT from the voc
voc.trim(MIN_COUNT)
# Filter out pairs with trimmed words
keep_pairs = []
for pair in pairs:
input_sentence = pair[0]
output_sentence = pair[1]
keep_input = True
keep_output = True
# Check input sentence
for word in input_sentence.split(' '):
if word not in voc.word2index:
keep_input = False
break
# Check output sentence
for word in output_sentence.split(' '):
if word not in voc.word2index:
keep_output = False
break
# Only keep pairs that do not contain trimmed word(s) in their input or output sentence
if keep_input and keep_output:
keep_pairs.append(pair)
print('Trimmed from {} pairs to {}, {:.4f} of total'.format(len(pairs), len(keep_pairs), len(keep_pairs) / len(pairs)))
return keep_pairs
# Trim voc and pairs
pairs = trimRareWords(voc, pairs, MIN_COUNT)
```
## Minibatchの構成
```
def indexesFromSentence(voc, sentence):
return [voc.word2index[word] for word in sentence.split(' ')] + [EOS_token]
def zeroPadding(l, fillvalue=PAD_token):
# ここで (batch_size, max_length) => (max_length, batch_size) に転置している
return list(itertools.zip_longest(*l, fillvalue=fillvalue))
def binaryMatrix(l, value=PAD_token):
m = []
for i, seq in enumerate(l):
m.append([])
for token in seq:
if token == PAD_token:
m[i].append(0)
else:
m[i].append(1)
return m
# Returns padded input sentence tensor and lengths
def inputVar(l, voc):
indexes_batch = [indexesFromSentence(voc, sentence) for sentence in l]
lengths = torch.tensor([len(indexes) for indexes in indexes_batch])
padList = zeroPadding(indexes_batch)
padVar = torch.LongTensor(padList)
return padVar, lengths
# Returns padded target sequence tneosr, padding mask, and max target lengths
# maskは出力と同じサイズのテンソルでPADが入ってるところが0でそれ以外は1
def outputVar(l, voc):
indexes_batch = [indexesFromSentence(voc, sentence) for sentence in l]
max_target_len = max([len(indexes) for indexes in indexes_batch])
padList = zeroPadding(indexes_batch)
mask = binaryMatrix(padList)
mask = torch.ByteTensor(mask)
padVar = torch.LongTensor(padList)
return padVar, mask, max_target_len
# Returns all items for a given batch of pairs
def batch2TrainData(voc, pair_batch):
# 入力文章の長い順にソートする
pair_batch.sort(key=lambda x: len(x[0].split(' ')), reverse=True)
input_batch, output_batch = [], []
for pair in pair_batch:
input_batch.append(pair[0])
output_batch.append(pair[1])
inp, lengths = inputVar(input_batch, voc)
output, mask, max_target_len = outputVar(output_batch, voc)
return inp, lengths, output, mask, max_target_len
# Example for validation
small_batch_size = 5
batches = batch2TrainData(voc, [random.choice(pairs) for _ in range(small_batch_size)])
input_variable, lengths, target_variable, mask, max_target_len = batches
print('input_variable:', input_variable)
print('lengths:', lengths)
print('target_variable:', target_variable)
print('mask:', mask)
print('max_target_len:', max_target_len)
```
## Seq2Seq Model
```
class EncoderRNN(nn.Module):
def __init__(self, hidden_size, embedding, n_layers=1, dropout=0):
super(EncoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.embedding = embedding
self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=(0 if n_layers == 1 else dropout), bidirectional=True)
def forward(self, input_seq, input_lengths, hidden=None):
# Convert word indexes to embedding
embedded = self.embedding(input_seq)
# Pack padded batch of sequences for RNN module
packed = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)
# Forward pass through GRU
# output of shape (seq_len, batch, num_directions * hidden_size)
# h_n of shape (num_layers * num_directions, batch, hidden_size)
outputs, hidden = self.gru(packed, hidden)
# Unpack padding
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs)
# Sum bidirectional GRU outputs
# bidirectionalの場合、outputsのhidden_sizeは2倍の長さで出てくる
outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:]
# Return output and final hidden state
return outputs, hidden
```
## Attention
```
class Attn(nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
if self.method not in ['dot', 'general', 'concat']:
raise ValueError(self.method, 'is not an appropriate attention method.')
self.hidden_size = hidden_size
if self.method == 'general':
self.attn = nn.Linear(hidden_size, hidden_size)
elif self.method == 'concat':
self.attn = nn.Linear(hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.FloatTensor(hidden_size))
def dot_score(self, hidden, encoder_output):
return torch.sum(hidden * encoder_output, dim=2)
def general_score(self, hidden, encoder_output):
energy = self.attn(encoder_output)
return torch.sum(hidden * energy, dim=2)
def concat_score(self, hidden, encoder_output):
energy = self.attn(torch.cat((hidden.expand(encoder_output.size(0), -1, -1), encoder_output), 2)).tanh()
return torch.sum(self.v * energy, dim=2)
def forward(self, hidden, encoder_outputs):
# Calculate the attention weights (energies) based on the given method
if self.method == 'general':
attn_energies = self.general_score(hidden, encoder_outputs)
elif self.method == 'concat':
attn_energies = self.concat_score(hidden, encoder_outputs)
elif self.method == 'dot':
attn_energies = self.dot_score(hidden, encoder_outputs)
# Transpose max_length and batch_size dimensions
attn_energies = attn_energies.t()
# Return the softmax normalized probability scores (with added dimension)
return F.softmax(attn_energies, dim=1).unsqueeze(1)
```
## Decoder
```
class LuongAttnDecoderRNN(nn.Module):
def __init__(self, attn_model, embedding, hidden_size, output_size, n_layers=1, dropout=0.1):
super(LuongAttnDecoderRNN, self).__init__()
self.attn_model = attn_model
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout = dropout
# Define layers
self.embedding = embedding
self.embedding_dropout = nn.Dropout(dropout)
self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=(0 if n_layers == 1 else dropout))
self.concat = nn.Linear(hidden_size * 2, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.attn = Attn(attn_model, hidden_size)
def forward(self, input_step, last_hidden, encoder_outputs):
# Decoderは各タイムステップごとに実行される
embedded = self.embedding(input_step)
embedded = self.embedding_dropout(embedded)
# Forward through unidirectional GRU
rnn_output, hidden = self.gru(embedded, last_hidden)
# Calculate attention weights from the current GRU output
attn_weights = self.attn(rnn_output, encoder_outputs)
# Multiply attention weights to encoder outputs to get new weighted sum context vector
context = attn_weights.bmm(encoder_outputs.transpose(0, 1))
# Concatenate weighted context vector and GRU output using Luong eq. 5
rnn_output = rnn_output.squeeze(0)
context = context.squeeze(1)
concat_input = torch.cat((rnn_output, context), 1)
concat_output = torch.tanh(self.concat(concat_input))
# Predict next word using Luong eq.6
output = self.out(concat_output)
output = F.softmax(output, dim=1)
# Return output and final hidden state
return output, hidden
```
## Masked loss
```
def maskNLLLoss(inp, target, mask):
nTotal = mask.sum()
crossEntropy = -torch.log(torch.gather(inp, 1, target.view(-1, 1)).squeeze(1))
loss = crossEntropy.masked_select(mask).mean()
loss = loss.to(device)
return loss, nTotal.item()
```
## Training
```
def train(input_variable, lengths, target_variable, mask, max_target_len, encoder, decoder, embedding,
encoder_optimizer, decoder_optimizer, batch_size, clip, max_length=MAX_LENGTH):
pass
```
| github_jupyter |
#### This project is a code along for the article I read here: https://www.analyticsvidhya.com/blog/2020/11/create-your-own-movie-movie-recommendation-system/
```
# Importing required libraries and packages
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
import seaborn as sns
# Importing the datasets
movies = pd.read_csv('Movie_recommendation_datasets/movies.csv')
ratings = pd.read_csv("Movie_recommendation_datasets/ratings.csv")
movies.head()
ratings.head()
final_dataset = ratings.pivot(index='movieId',columns='userId',values='rating')
final_dataset.head()
final_dataset.fillna(0,inplace=True)
final_dataset.head()
```
#### Visualizing the filters
```
no_user_voted = ratings.groupby('movieId')['rating'].agg('count')
no_movies_voted = ratings.groupby('userId')['rating'].agg('count')
f,ax = plt.subplots(1,1,figsize=(16,4))
# ratings['rating'].plot(kind='hist')
plt.scatter(no_user_voted.index,no_user_voted,color='mediumseagreen')
plt.axhline(y=10,color='r')
plt.xlabel('MovieId')
plt.ylabel('No. of users voted')
plt.show()
```
#### Making the necessary modifications as per the threshold set
```
final_dataset = final_dataset.loc[no_user_voted[no_user_voted > 10].index,:]
f,ax = plt.subplots(1,1,figsize=(16,4))
plt.scatter(no_movies_voted.index,no_movies_voted,color='mediumseagreen')
plt.axhline(y=50,color='r')
plt.xlabel('UserId')
plt.ylabel('No. of votes by user')
plt.show()
final_dataset=final_dataset.loc[:,no_movies_voted[no_movies_voted > 50].index]
final_dataset
```
#### Removing sparsity
```
sample = np.array([[0,0,3,0,0],[4,0,0,0,2],[0,0,0,0,1]])
sparsity = 1.0 - ( np.count_nonzero(sample) / float(sample.size) )
print(sparsity)
csr_sample = csr_matrix(sample)
print(csr_sample)
csr_data = csr_matrix(final_dataset.values)
final_dataset.reset_index(inplace=True)
```
#### Making the movie recommendation system model
```
knn = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=20, n_jobs=-1)
knn.fit(csr_data)
```
#### Making the recommendation function
```
def get_movie_recommendation(movie_name):
n_movies_to_reccomend = 10
movie_list = movies[movies['title']].str.contains(movie_name)
if len(movie_list):
movie_idx = movie_list.iloc[0]['movieId']
movie_idx = final_dataset[final_dataset['movieId'] == movie_idx].index[0]
distances , indices = knn.kneighbors(csr_data[movie_idx],n_neighbors=n_movies_to_reccomend+1)
rec_movie_indices = sorted(list(zip(indices.squeeze().tolist(),distances.squeeze().tolist())),key=lambda x: x[1])[:0:-1]
recommend_frame = []
for val in rec_movie_indices:
movie_idx = final_dataset.iloc[val[0]]['movieId']
idx = movies[movies['movieId'] == movie_idx].index
recommend_frame.append({'Title':movies.iloc[idx]['title'].values[0],'Distance':val[1]})
df = pd.DataFrame(recommend_frame,index=range(1,n_movies_to_reccomend+1))
return df
else:
return "No movies found. Please check your input"
```
| github_jupyter |
# Work with Data
Data is the foundation on which machine learning models are built. Managing data centrally in the cloud, and making it accessible to teams of data scientists who are running experiments and training models on multiple workstations and compute targets is an important part of any professional data science solution.
In this notebook, you'll explore two Azure Machine Learning objects for working with data: *datastores*, and *datasets*.
## Connect to your workspace
To get started, connect to your workspace.
> **Note**: If you haven't already established an authenticated session with your Azure subscription, you'll be prompted to authenticate by clicking a link, entering an authentication code, and signing into Azure.
```
import azureml.core
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
```
## Work with datastores
In Azure ML, *datastores* are references to storage locations, such as Azure Storage blob containers. Every workspace has a default datastore - usually the Azure storage blob container that was created with the workspace. If you need to work with data that is stored in different locations, you can add custom datastores to your workspace and set any of them to be the default.
### View datastores
Run the following code to determine the datastores in your workspace:
```
# Get the default datastore
default_ds = ws.get_default_datastore()
# Enumerate all datastores, indicating which is the default
for ds_name in ws.datastores:
print(ds_name, "- Default =", ds_name == default_ds.name)
```
You can also view and manage datastores in your workspace on the **Datastores** page for your workspace in [Azure Machine Learning studio](https://ml.azure.com).
### Upload data to a datastore
Now that you have determined the available datastores, you can upload files from your local file system to a datastore so that it will be accessible to experiments running in the workspace, regardless of where the experiment script is actually being run.
```
default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data
target_path='diabetes-data/', # Put it in a folder path in the datastore
overwrite=True, # Replace existing files of the same name
show_progress=True)
```
## Work with datasets
Azure Machine Learning provides an abstraction for data in the form of *datasets*. A dataset is a versioned reference to a specific set of data that you may want to use in an experiment. Datasets can be *tabular* or *file*-based.
### Create a tabular dataset
Let's create a dataset from the diabetes data you uploaded to the datastore, and view the first 20 records. In this case, the data is in a structured format in a CSV file, so we'll use a *tabular* dataset.
```
from azureml.core import Dataset
# Get the default datastore
default_ds = ws.get_default_datastore()
#Create a tabular dataset from the path on the datastore (this may take a short while)
tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv'))
# Display the first 20 rows as a Pandas dataframe
tab_data_set.take(20).to_pandas_dataframe()
```
As you can see in the code above, it's easy to convert a tabular dataset to a Pandas dataframe, enabling you to work with the data using common python techniques.
### Create a file Dataset
The dataset you created is a *tabular* dataset that can be read as a dataframe containing all of the data in the structured files that are included in the dataset definition. This works well for tabular data, but in some machine learning scenarios you might need to work with data that is unstructured; or you may simply want to handle reading the data from files in your own code. To accomplish this, you can use a *file* dataset, which creates a list of file paths in a virtual mount point, which you can use to read the data in the files.
```
#Create a file dataset from the path on the datastore (this may take a short while)
file_data_set = Dataset.File.from_files(path=(default_ds, 'diabetes-data/*.csv'))
# Get the files in the dataset
for file_path in file_data_set.to_path():
print(file_path)
```
### Register datasets
Now that you have created datasets that reference the diabetes data, you can register them to make them easily accessible to any experiment being run in the workspace.
We'll register the tabular dataset as **diabetes dataset**, and the file dataset as **diabetes files**.
```
# Register the tabular dataset
try:
tab_data_set = tab_data_set.register(workspace=ws,
name='diabetes dataset',
description='diabetes data',
tags = {'format':'CSV'},
create_new_version=True)
except Exception as ex:
print(ex)
# Register the file dataset
try:
file_data_set = file_data_set.register(workspace=ws,
name='diabetes file dataset',
description='diabetes files',
tags = {'format':'CSV'},
create_new_version=True)
except Exception as ex:
print(ex)
print('Datasets registered')
```
You can view and manage datasets on the **Datasets** page for your workspace in [Azure Machine Learning studio](https://ml.azure.com). You can also get a list of datasets from the workspace object:
```
print("Datasets:")
for dataset_name in list(ws.datasets.keys()):
dataset = Dataset.get_by_name(ws, dataset_name)
print("\t", dataset.name, 'version', dataset.version)
```
The ability to version datasets enables you to redefine datasets without breaking existing experiments or pipelines that rely on previous definitions. By default, the latest version of a named dataset is returned, but you can retrieve a specific version of a dataset by specifying the version number, like this:
```python
dataset_v1 = Dataset.get_by_name(ws, 'diabetes dataset', version = 1)
```
### Train a model from a tabular dataset
Now that you have datasets, you're ready to start training models from them. You can pass datasets to scripts as *inputs* in the estimator being used to run the script.
Run the following two code cells to create:
1. A folder named **diabetes_training_from_tab_dataset**
2. A script that trains a classification model by using a tabular dataset that is passed to it as an argument.
```
import os
# Create a folder for the experiment files
experiment_folder = 'diabetes_training_from_tab_dataset'
os.makedirs(experiment_folder, exist_ok=True)
print(experiment_folder, 'folder created')
%%writefile $experiment_folder/diabetes_training.py
# Import libraries
import os
import argparse
from azureml.core import Run, Dataset
import pandas as pd
import numpy as np
import joblib
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
# Get the script arguments (regularization rate and training dataset ID)
parser = argparse.ArgumentParser()
parser.add_argument('--regularization', type=float, dest='reg_rate', default=0.01, help='regularization rate')
parser.add_argument("--input-data", type=str, dest='training_dataset_id', help='training dataset')
args = parser.parse_args()
# Set regularization hyperparameter (passed as an argument to the script)
reg = args.reg_rate
# Get the experiment run context
run = Run.get_context()
# Get the training dataset
print("Loading Data...")
diabetes = run.input_datasets['training_data'].to_pandas_dataframe()
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a logistic regression model
print('Training a logistic regression model with regularization rate of', reg)
run.log('Regularization Rate', np.float(reg))
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
joblib.dump(value=model, filename='outputs/diabetes_model.pkl')
run.complete()
```
> **Note**: In the script, the dataset is passed as a parameter (or argument). In the case of a tabular dataset, this argument will contain the ID of the registered dataset; so you could write code in the script to get the experiment's workspace from the run context, and then get the dataset using its ID; like this:
>
> ```
> run = Run.get_context()
> ws = run.experiment.workspace
> dataset = Dataset.get_by_id(ws, id=args.training_dataset_id)
> diabetes = dataset.to_pandas_dataframe()
> ```
>
> However, Azure Machine Learning runs automatically identify arguments that reference named datasets and add them to the run's **input_datasets** collection, so you can also retrieve the dataset from this collection by specifying its "friendly name" (which as you'll see shortly, is specified in the argument definition in the script run configuration for the experiment). This is the approach taken in the script above.
Now you can run a script as an experiment, defining an argument for the training dataset, which is read by the script.
> **Note**: The **Dataset** class depends on some components in the **azureml-dataprep** package, which includes optional support for **pandas** that is used by the **to_pandas_dataframe()** method. So you need to include this package in the environment where the training experiment will be run.
```
from azureml.core import Experiment, ScriptRunConfig, Environment
from azureml.core.conda_dependencies import CondaDependencies
from azureml.widgets import RunDetails
# Create a Python environment for the experiment
sklearn_env = Environment("sklearn-env")
# Ensure the required packages are installed (we need scikit-learn, Azure ML defaults, and Azure ML dataprep)
packages = CondaDependencies.create(conda_packages=['scikit-learn','pip'],
pip_packages=['azureml-defaults','azureml-dataprep[pandas]'])
sklearn_env.python.conda_dependencies = packages
# Get the training dataset
diabetes_ds = ws.datasets.get("diabetes dataset")
# Create a script config
script_config = ScriptRunConfig(source_directory=experiment_folder,
script='diabetes_training.py',
arguments = ['--regularization', 0.1, # Regularizaton rate parameter
'--input-data', diabetes_ds.as_named_input('training_data')], # Reference to dataset
environment=sklearn_env)
# submit the experiment
experiment_name = 'mslearn-train-diabetes'
experiment = Experiment(workspace=ws, name=experiment_name)
run = experiment.submit(config=script_config)
RunDetails(run).show()
run.wait_for_completion()
```
> **Note:** The **--input-data** argument passes the dataset as a *named input* that includes a *friendly name* for the dataset, which is used by the script to read it from the **input_datasets** collection in the experiment run. The string value in the **--input-data** argument is actually the registered dataset's ID. As an alternative approach, you could simply pass `diabetes_ds.id`, in which case the script can access the dataset ID from the script arguments and use it to get the dataset from the workspace, but not from the **input_datasets** collection.
The first time the experiment is run, it may take some time to set up the Python environment - subsequent runs will be quicker.
When the experiment has completed, in the widget, view the **azureml-logs/70_driver_log.txt** output log and the metrics generated by the run.
### Register the trained model
As with any training experiment, you can retrieve the trained model and register it in your Azure Machine Learning workspace.
```
from azureml.core import Model
run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model',
tags={'Training context':'Tabular dataset'}, properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']})
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
```
### Train a model from a file dataset
You've seen how to train a model using training data in a *tabular* dataset; but what about a *file* dataset?
When you're using a file dataset, the dataset argument passed to the script represents a mount point containing file paths. How you read the data from these files depends on the kind of data in the files and what you want to do with it. In the case of the diabetes CSV files, you can use the Python **glob** module to create a list of files in the virtual mount point defined by the dataset, and read them all into Pandas dataframes that are concatenated into a single dataframe.
Run the following two code cells to create:
1. A folder named **diabetes_training_from_file_dataset**
2. A script that trains a classification model by using a file dataset that is passed to is as an *input*.
```
import os
# Create a folder for the experiment files
experiment_folder = 'diabetes_training_from_file_dataset'
os.makedirs(experiment_folder, exist_ok=True)
print(experiment_folder, 'folder created')
%%writefile $experiment_folder/diabetes_training.py
# Import libraries
import os
import argparse
from azureml.core import Dataset, Run
import pandas as pd
import numpy as np
import joblib
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import glob
# Get script arguments (rgularization rate and file dataset mount point)
parser = argparse.ArgumentParser()
parser.add_argument('--regularization', type=float, dest='reg_rate', default=0.01, help='regularization rate')
parser.add_argument('--input-data', type=str, dest='dataset_folder', help='data mount point')
args = parser.parse_args()
# Set regularization hyperparameter (passed as an argument to the script)
reg = args.reg_rate
# Get the experiment run context
run = Run.get_context()
# load the diabetes dataset
print("Loading Data...")
data_path = run.input_datasets['training_files'] # Get the training data path from the input
# (You could also just use args.dataset_folder if you don't want to rely on a hard-coded friendly name)
# Read the files
all_files = glob.glob(data_path + "/*.csv")
diabetes = pd.concat((pd.read_csv(f) for f in all_files), sort=False)
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a logistic regression model
print('Training a logistic regression model with regularization rate of', reg)
run.log('Regularization Rate', np.float(reg))
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
joblib.dump(value=model, filename='outputs/diabetes_model.pkl')
run.complete()
```
Just as with tabular datasets, you can retrieve a file dataset from the **input_datasets** collection by using its friendly name. You can also retrieve it from the script argument, which in the case of a file dataset contains a mount path to the files (rather than the dataset ID passed for a tabular dataset).
Next we need to change the way we pass the dataset to the script - it needs to define a path from which the script can read the files. You can use either the **as_download** or **as_mount** method to do this. Using **as_download** causes the files in the file dataset to be downloaded to a temporary location on the compute where the script is being run, while **as_mount** creates a mount point from which the files can be streamed directly from the datasetore.
You can combine the access method with the **as_named_input** method to include the dataset in the **input_datasets** collection in the experiment run (if you omit this, for example by setting the argument to `diabetes_ds.as_mount()`, the script will be able to access the dataset mount point from the script arguments, but not from the **input_datasets** collection).
```
from azureml.core import Experiment
from azureml.widgets import RunDetails
# Get the training dataset
diabetes_ds = ws.datasets.get("diabetes file dataset")
# Create a script config
script_config = ScriptRunConfig(source_directory=experiment_folder,
script='diabetes_training.py',
arguments = ['--regularization', 0.1, # Regularizaton rate parameter
'--input-data', diabetes_ds.as_named_input('training_files').as_download()], # Reference to dataset location
environment=sklearn_env) # Use the environment created previously
# submit the experiment
experiment_name = 'mslearn-train-diabetes'
experiment = Experiment(workspace=ws, name=experiment_name)
run = experiment.submit(config=script_config)
RunDetails(run).show()
run.wait_for_completion()
```
When the experiment has completed, in the widget, view the **azureml-logs/70_driver_log.txt** output log to verify that the files in the file dataset were downloaded to a temporary folder to enable the script to read the files.
### Register the trained model
Once again, you can register the model that was trained by the experiment.
```
from azureml.core import Model
run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model',
tags={'Training context':'File dataset'}, properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']})
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
```
> **More Information**: For more information about training with datasets, see [Training with Datasets](https://docs.microsoft.com/azure/machine-learning/how-to-train-with-datasets) in the Azure ML documentation.
| github_jupyter |
```
import numpy as np
import pandas as pd
data=pd.read_csv("tech_sort1k.csv")
data.head(15)
data=data.drop(columns=["id","Note"])
#replacing null vals with empty list
data['exact_matched_patt_contextual'] = [ [] if x is np.NaN else x for x in data['exact_matched_patt_contextual'] ]
import nltk
import re
from bs4 import BeautifulSoup
nltk.download("punkt")
REPLACE_BY_SPACE_RE = re.compile('[/(){}\[\]\|@,;]')
BAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]')
def clean_text(text):
"""
text: a string
return: modified initial string
"""
text = BeautifulSoup(text, "lxml").text # HTML decoding
text = text.lower() # lowercase text
text = REPLACE_BY_SPACE_RE.sub(' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text
text = BAD_SYMBOLS_RE.sub('', text) # delete symbols which are in BAD_SYMBOLS_RE from text
return text
data['exact_matched_patt_contextual']=data['exact_matched_patt_contextual'].astype(str)
data.head(15)
#converting lists to strings with proper cleaning
data["to_match"]=data["exact_matched_patt_contextual"].apply(clean_text)
data.head(10)
data["summaries"]=data["summaries"].apply(clean_text)
def convert(lst):
return ' '.join(lst).split()
def givedata(tech_words,sentence):
broken_list = convert([sentence])
prev_tag="O" # inital tag will be "O" or "B" or "I"
tech_words=convert([tech_words])
tag_col=[]
for word in broken_list:
curr_tag="O"
if word in tech_words:
if (prev_tag=='B' or prev_tag=='I'): #if word found in dictionary
curr_tag='I' #if prev_tag was already a tech word
else:
curr_tag='B' #if prev_tag was "O" (Not a tech word)
prev_tag=curr_tag
tag_col.append(curr_tag) #adding tag
return pd.DataFrame(list(zip(broken_list, tag_col)),columns =['Word', 'Tag'])
final_data=pd.DataFrame()
for i in range(0,len(data)):
# print(i)
temp = givedata(data.iloc[i,2],data["summaries"][i])
length = len(temp)
wordd="Sentence :"+str(i+1) #sentence no.
a=[wordd]*length
temp.insert(0,"Sentence #",a)
final_data = final_data.append(temp, ignore_index=True) #appending sentences in the required format
import seaborn as sns
sns.countplot(final_data['Tag'])
final_data['Tag'].value_counts()
# final_data.loc[final_data['Tag']=='I']
# final_data.loc[final_data['Tag'].isin(['I','B'])]
"""All tagged data, without any filter"""
# final_data.to_csv('TECH_bio_tagging.csv',index=False)
final_data.head()
tech_dict = {}
for j in data['exact_matched_patt_contextual']:
if j not in tech_dict:
tech_dict[j] = 1
else:
tech_dict[j] += 1
tech_dict
data["exact_matched_patt_contextual"] = data["exact_matched_patt_contextual"].apply(eval)
for i, l in enumerate(data["exact_matched_patt_contextual"]):
print("list",i,"is",type(l))
tech_dict = {}
for i in data['exact_matched_patt_contextual']:
for j in i:
if j not in tech_dict:
tech_dict[j] = 1
else:
tech_dict[j] += 1
tech_dict
len(tech_dict)
tech_list=[]
for i in tech_dict:
tech_list.append(i)
tech_list
def in_techwords(tech_words,sentence):
broken_list = convert([sentence])
prev_tag="O" # inital tag will be "O" or "B" or "I"
tech_words=convert(tech_words)
tag_col=[]
for word in broken_list:
curr_tag="O"
if word in tech_words:
if (prev_tag=='B' or prev_tag=='I'): #if word found in dictionary
curr_tag='I' #if prev_tag was already a tech word
else:
curr_tag='B' #if prev_tag was "O" (Not a tech word)
prev_tag=curr_tag
tag_col.append(curr_tag) #adding tag
return pd.DataFrame(list(zip(broken_list, tag_col)),columns =['Word', 'Tag'])
final_data2=pd.DataFrame()
for i in range(0,len(data)):
temp = in_techwords(tech_list,data["summaries"][i])
length = len(temp)
wordd="Sentence :"+str(i+1) #sentence no.
a=[wordd]*length
temp.insert(0,"Sentence #",a)
final_data2 = final_data2.append(temp, ignore_index=True) #appending sentences in the required format
final_data2["Tag"].value_counts()
def in_techwords_B(tech_words,sentence):
broken_list = convert([sentence])
prev_tag="O" # inital tag will be "O" or "B" or "I"
got_b=False
tech_words=convert(tech_words)
tag_col=[]
for word in broken_list:
curr_tag="O"
if word in tech_words:
got_b=True
if (prev_tag=='B' or prev_tag=='I'): #if word found in dictionary
curr_tag='I' #if prev_tag was already a tech word
else:
curr_tag='B' #if prev_tag was "O" (Not a tech word)
prev_tag=curr_tag
tag_col.append(curr_tag) #adding tag
return pd.DataFrame(list(zip(broken_list, tag_col)),columns =['Word', 'Tag']),got_b
final_data3=pd.DataFrame()
for i in range(0,len(data)):
temp,flag = in_techwords_B(tech_list,data["summaries"][i])
length = len(temp)
wordd="Sentence :"+str(i+1) #sentence no.
a=[wordd]*length
temp.insert(0,"Sentence #",a)
if flag==True:
final_data3 = final_data3.append(temp, ignore_index=True) #appending sentences in the required format
final_data3["Tag"].value_counts()
final_data2.to_csv("Combined_Match_tag.csv")
```
| github_jupyter |
```
import speech_recognition as sr
from transformers import Wav2Vec2Processor, HubertForCTC,Wav2Vec2ForCTC
import soundfile as sf
from datasets import load_dataset
import torch
pathSave = 'C:\\Users\\chushengtan\\Desktop\\'
filename = 'audio_file_test.wav'
timeout = 0.5
waiting_time = 10
r = sr.Recognizer()
with sr.Microphone(device_index=1,sample_rate = 16000) as source:
r.adjust_for_ambient_noise(source)
print('請開始說話.....')
audio = r.listen(source,
timeout = timeout,
phrase_time_limit = waiting_time)
print('錄音結束.....')
with open(pathSave + filename,'wb') as file:
file.write(audio.get_wav_data())
```
# 完成版
```
def speech2Wave(pathSave,filename,sample_rate = 16000,timeout = 0.5,waiting_time = 10):
"""
phrase_time_limit : waiting time for ending programming
mic = sr.Microphone() # 查詢全部 microphones 的裝置名稱
ref : https://realpython.com/python-speech-recognition/#working-with-microphones
ref : https://github.com/Uberi/speech_recognition
ref : https://github.com/pytorch/fairseq/tree/main/examples/hubert
"""
r = sr.Recognizer()
with sr.Microphone(sample_rate = sample_rate) as source:
r.adjust_for_ambient_noise(source)
print('請開始說話.....')
audio = r.listen(source,
timeout = timeout,
phrase_time_limit = waiting_time)
print('錄音結束.....')
with open(pathSave + filename,'wb') as file:
file.write(audio.get_wav_data())
"""
------------------- loading model -------------------
"""
def Load_Model(processor_name,model_name):
"""
Load_Model(processor_name,model_name) : 載入使用模型
"""
processor = Wav2Vec2Processor.from_pretrained(processor_name)
model = HubertForCTC.from_pretrained(model_name)
return processor,model
def Speech2Text(audio_path,processor_name,model_name):
"""
Speech2Text(audio_path,processor_name,model_name) : 將語音轉換成文字
--> audio_path : 語音檔案存放路徑位置 ; format : .wav
"""
processor , model = Load_Model(processor_name,model_name)
# processor = Wav2Vec2Processor.from_pretrained(processor_name)
# model = HubertForCTC.from_pretrained(model_name)
speech,_ = sf.read(audio_path)
input_values = processor(speech,return_tensors='pt',padding='longest').input_values
logits = model(input_values).logits
predicted_ids = torch.argmax(logits,dim=-1)
return processor.decode(predicted_ids[0])
%%time
pathSave = 'C:\\Users\\chushengtan\\Desktop\\'
filename = 'audio_file_test.wav'
speech2Wave(pathSave=pathSave,filename=filename)
%%time
processor_name = 'facebook/hubert-xlarge-ls960-ft'
model_name = 'facebook/hubert-xlarge-ls960-ft'
audio_path = 'C:\\Users\\chushengtan\\Desktop\\audio_file_test.wav'
Speech2Text(audio_path = audio_path,
processor_name = processor_name,
model_name = model_name)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Shrayansh19/Bike_Rentals_Forecast/blob/main/Bike_Rentals_Forecast_Model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from sklearn import linear_model
import matplotlib.pyplot as plt
import os
np.random.seed(42)
import warnings
warnings.filterwarnings('ignore')
```
Loading the Dataset
```
from pandas import read_csv
filePath = '/content/bikes.csv'
bikesData = pd.read_csv(filePath)
print(bikesData.info())
bikesData.head(3)
columnsToDrop = ['instant','casual','registered','atemp','dteday']
bikesData = bikesData.drop(columnsToDrop,1)
bikesData.head(3)
np.random.seed(42)
from sklearn.model_selection import train_test_split
bikesData['dayCount'] = pd.Series(range(bikesData.shape[0]))/24
train_set, test_set = train_test_split(bikesData, test_size=0.3, random_state=42)
print(len(train_set), "train +", len(test_set), "test")
train_set.sort_values('dayCount', axis= 0, inplace=True)
test_set.sort_values('dayCount', axis= 0, inplace=True)
columnsToScale = ['temp','hum','windspeed']
scaler = StandardScaler()
train_set[columnsToScale] = scaler.fit_transform(train_set[columnsToScale])
test_set[columnsToScale] = scaler.transform(test_set[columnsToScale])
train_set[columnsToScale].describe()
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
import xgboost
from xgboost import XGBRegressor
trainingCols = train_set.drop(['cnt'], axis=1)
trainingLabels = train_set['cnt']
#Train a Decision Tree Regressor
from sklearn.tree import DecisionTreeRegressor
dec_reg = DecisionTreeRegressor(random_state = 42)
dt_mae_scores = -cross_val_score(dec_reg, trainingCols, trainingLabels, cv=10, scoring="neg_mean_absolute_error")
print(dt_mae_scores)
print('\n')
dt_mse_scores = np.sqrt(-cross_val_score(dec_reg, trainingCols, trainingLabels, cv=10, scoring="neg_mean_squared_error"))
print(dt_mse_scores)
lin_reg = LinearRegression()
lr_mae_scores = -cross_val_score(lin_reg, trainingCols, trainingLabels, cv=10, scoring="neg_mean_absolute_error")
print(lr_mae_scores)
print('\n')
lr_mse_scores = np.sqrt(-cross_val_score(lin_reg, trainingCols, trainingLabels, cv=10, scoring="neg_mean_squared_error"))
print(lr_mse_scores)
forest_reg = RandomForestRegressor(n_estimators=150, random_state=42)
rf_mae_scores = -cross_val_score(forest_reg, trainingCols, trainingLabels, cv=10, scoring="neg_mean_absolute_error")
print(rf_mae_scores)
rf_mse_scores = np.sqrt(-cross_val_score(forest_reg, trainingCols, trainingLabels, cv=10, scoring="neg_mean_squared_error"))
print(rf_mse_scores)
from sklearn.model_selection import GridSearchCV
param_grid = [
# combinations of hyperparameters
{'n_estimators': [120, 150], 'max_features': [10, 12], 'max_depth': [15, 28]},
]
grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error')
grid_search.fit(trainingCols, trainingLabels)
print(grid_search.best_estimator_)
print(grid_search.best_params_)
feature_importances = grid_search.best_estimator_.feature_importances_
print(feature_importances)
final_model = grid_search.best_estimator_
test_set.sort_values('dayCount', axis= 0, inplace=True)
test_x_cols = (test_set.drop(['cnt'], axis=1)).columns.values
test_y_cols = 'cnt'
X_test = test_set.loc[:,test_x_cols]
y_test = test_set.loc[:,test_y_cols]
test_set.loc[:,'predictedCounts_test'] = final_model.predict(X_test)
mse = mean_squared_error(y_test, test_set.loc[:,'predictedCounts_test'])
final_mse = np.sqrt(mse)
print(final_mse)
test_set.describe()
times = [9,18]
for time in times:
fig = plt.figure(figsize=(8, 6))
fig.clf()
ax = fig.gca()
test_set_freg_time = test_set[test_set.hr == time]
test_set_freg_time.plot(kind = 'line', x = 'dayCount', y = 'cnt', ax = ax)
test_set_freg_time.plot(kind = 'line', x = 'dayCount', y = 'predictedCounts_test', ax =ax)
plt.show()
```
| github_jupyter |
```
# -*- coding: utf-8 -*-
"""Example NumPy style docstrings.
This module demonstrates documentation as specified by the `NumPy
Documentation HOWTO`_. Docstrings may extend over multiple lines. Sections
are created with a section header followed by an underline of equal length.
Example
-------
Examples can be given using either the ``Example`` or ``Examples``
sections. Sections support any reStructuredText formatting, including
literal blocks::
$ python example_numpy.py
Section breaks are created with two blank lines. Section breaks are also
implicitly created anytime a new section starts. Section bodies *may* be
indented:
Notes
-----
This is an example of an indented section. It's like any other section,
but the body is indented to help it stand out from surrounding text.
If a section is indented, then a section break is created by
resuming unindented text.
Attributes
----------
module_level_variable1 : int
Module level variables may be documented in either the ``Attributes``
section of the module docstring, or in an inline docstring immediately
following the variable.
Either form is acceptable, but the two should not be mixed. Choose
one convention to document module level variables and be consistent
with it.
.. _NumPy Documentation HOWTO:
https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
"""
module_level_variable1 = 12345
module_level_variable2 = 98765
"""int: Module level variable documented inline.
The docstring may span multiple lines. The type may optionally be specified
on the first line, separated by a colon.
"""
def function_with_types_in_docstring(param1, param2):
"""Example function with types documented in the docstring.
`PEP 484`_ type annotations are supported. If attribute, parameter, and
return types are annotated according to `PEP 484`_, they do not need to be
included in the docstring:
Parameters
----------
param1 : int
The first parameter.
param2 : str
The second parameter.
Returns
-------
bool
True if successful, False otherwise.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484/
"""
def function_with_pep484_type_annotations(param1: int, param2: str) -> bool:
"""Example function with PEP 484 type annotations.
The return type must be duplicated in the docstring to comply
with the NumPy docstring style.
Parameters
----------
param1
The first parameter.
param2
The second parameter.
Returns
-------
bool
True if successful, False otherwise.
"""
def module_level_function(param1, param2=None, *args, **kwargs):
"""This is an example of a module level function.
Function parameters should be documented in the ``Parameters`` section.
The name of each parameter is required. The type and description of each
parameter is optional, but should be included if not obvious.
If \*args or \*\*kwargs are accepted,
they should be listed as ``*args`` and ``**kwargs``.
The format for a parameter is::
name : type
description
The description may span multiple lines. Following lines
should be indented to match the first line of the description.
The ": type" is optional.
Multiple paragraphs are supported in parameter
descriptions.
Parameters
----------
param1 : int
The first parameter.
param2 : :obj:`str`, optional
The second parameter.
*args
Variable length argument list.
**kwargs
Arbitrary keyword arguments.
Returns
-------
bool
True if successful, False otherwise.
The return type is not optional. The ``Returns`` section may span
multiple lines and paragraphs. Following lines should be indented to
match the first line of the description.
The ``Returns`` section supports any reStructuredText formatting,
including literal blocks::
{
'param1': param1,
'param2': param2
}
Raises
------
AttributeError
The ``Raises`` section is a list of all exceptions
that are relevant to the interface.
ValueError
If `param2` is equal to `param1`.
"""
if param1 == param2:
raise ValueError('param1 may not be equal to param2')
return True
def example_generator(n):
"""Generators have a ``Yields`` section instead of a ``Returns`` section.
Parameters
----------
n : int
The upper limit of the range to generate, from 0 to `n` - 1.
Yields
------
int
The next number in the range of 0 to `n` - 1.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> print([i for i in example_generator(4)])
[0, 1, 2, 3]
"""
for i in range(n):
yield i
class ExampleError(Exception):
"""Exceptions are documented in the same way as classes.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note
----
Do not include the `self` parameter in the ``Parameters`` section.
Parameters
----------
msg : str
Human readable string describing the exception.
code : :obj:`int`, optional
Numeric error code.
Attributes
----------
msg : str
Human readable string describing the exception.
code : int
Numeric error code.
"""
def __init__(self, msg, code):
self.msg = msg
self.code = code
class ExampleClass(object):
"""The summary line for a class docstring should fit on one line.
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Alternatively, attributes may be documented
inline with the attribute's declaration (see __init__ method below).
Properties created with the ``@property`` decorator should be documented
in the property's getter method.
Attributes
----------
attr1 : str
Description of `attr1`.
attr2 : :obj:`int`, optional
Description of `attr2`.
"""
def __init__(self, param1, param2, param3):
"""Example of docstring on the __init__ method.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note
----
Do not include the `self` parameter in the ``Parameters`` section.
Parameters
----------
param1 : str
Description of `param1`.
param2 : :obj:`list` of :obj:`str`
Description of `param2`. Multiple
lines are supported.
param3 : :obj:`int`, optional
Description of `param3`.
"""
self.attr1 = param1
self.attr2 = param2
self.attr3 = param3 #: Doc comment *inline* with attribute
#: list of str: Doc comment *before* attribute, with type specified
self.attr4 = ["attr4"]
self.attr5 = None
"""str: Docstring *after* attribute, with type specified."""
@property
def readonly_property(self):
"""str: Properties should be documented in their getter method."""
return "readonly_property"
@property
def readwrite_property(self):
""":obj:`list` of :obj:`str`: Properties with both a getter and setter
should only be documented in their getter method.
If the setter method contains notable behavior, it should be
mentioned here.
"""
return ["readwrite_property"]
@readwrite_property.setter
def readwrite_property(self, value):
value
def example_method(self, param1, param2):
"""Class methods are similar to regular functions.
Note
----
Do not include the `self` parameter in the ``Parameters`` section.
Parameters
----------
param1
The first parameter.
param2
The second parameter.
Returns
-------
bool
True if successful, False otherwise.
"""
return True
def __special__(self):
"""By default special members with docstrings are not included.
Special members are any methods or attributes that start with and
end with a double underscore. Any special member with a docstring
will be included in the output, if
``napoleon_include_special_with_doc`` is set to True.
This behavior can be enabled by changing the following setting in
Sphinx's conf.py::
napoleon_include_special_with_doc = True
"""
pass
def __special_without_docstring__(self):
pass
def _private(self):
"""By default private members are not included.
Private members are any methods or attributes that start with an
underscore and are *not* special. By default they are not included
in the output.
This behavior can be changed such that private members *are* included
by changing the following setting in Sphinx's conf.py::
napoleon_include_private_with_doc = True
"""
pass
def _private_without_docstring(self):
pass
```
| github_jupyter |
# odm2api demo with Little Bear SQLite sample DB
Largely from https://github.com/ODM2/ODM2PythonAPI/blob/master/Examples/Sample.py
- 4/25/2016. Started testing with the new `odm2` conda channel, based on the new `0.5.0-alpha` odm2api release. See my `odm2api_odm2channel` env. Ran into problems b/c the SQLite database needed to be updated to have a `SamplingFeature.FeatureGeometryWKT` field; so I added and populated it manually with `SQLite Manager`.
- 2/7/2016. Tested successfully with `sfgeometry_em_1` branch, with my overhauls. Using `odm2api_dev` env.
- 2/1 - 1/31. Errors with SamplingFeatures code, with latest odm2api from master (on env `odm2api_jan31test`). *The code also fails the same way with the `odm2api` env, but it does still run fine with the `odm2api_jan21` env! I'm investigating the differences between those two envs.*
- 1/22-20,9/2016.
Emilio Mayorga
```
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import dates
from odm2api.ODMconnection import dbconnection
from odm2api.ODM2.services.readService import ReadODM2
# Create a connection to the ODM2 database
# ----------------------------------------
odm2db_fpth = '/home/mayorga/Desktop/TylerYeats/ODM2-LittleBear1.sqlite'
session_factory = dbconnection.createConnection('sqlite', odm2db_fpth, 2.0)
read = ReadODM2(session_factory)
# Run some basic sample queries.
# ------------------------------
# Get all of the variables from the database and print their names to the console
allVars = read.getVariables()
for x in allVars:
print x.VariableCode + ": " + x.VariableNameCV
# Get all of the people from the database
allPeople = read.getPeople()
for x in allPeople:
print x.PersonFirstName + " " + x.PersonLastName
try:
print "\n-------- Information about an Affiliation ---------"
allaff = read.getAffiliations()
for x in allaff:
print x.PersonObj.PersonFirstName + ": " + str(x.OrganizationID)
except Exception as e:
print "Unable to demo getAllAffiliations", e
allaff = read.getAffiliations()
type(allaff)
```
## SamplingFeatures tests
```
# from odm2api.ODM2.models import SamplingFeatures
# read._session.query(SamplingFeatures).filter_by(SamplingFeatureTypeCV='Site').all()
# Get all of the SamplingFeatures from the database that are Sites
try:
siteFeatures = read.getSamplingFeatures(type='Site')
numSites = len(siteFeatures)
for x in siteFeatures:
print x.SamplingFeatureCode + ": " + x.SamplingFeatureName
except Exception as e:
print "Unable to demo getSamplingFeatures(type='Site')", e
read.getSamplingFeatures()
read.getSamplingFeatures(codes=['USU-LBR-Mendon'])
# Now get the SamplingFeature object for a SamplingFeature code
sf_lst = read.getSamplingFeatures(codes=['USU-LBR-Mendon'])
vars(sf_lst[0])
sf = sf_lst[0]
print sf, "\n"
print type(sf)
print type(sf.FeatureGeometryWKT), sf.FeatureGeometryWKT
print type(sf.FeatureGeometry)
vars(sf.FeatureGeometry)
sf.FeatureGeometry.__doc__
sf.FeatureGeometry.geom_wkb, sf.FeatureGeometry.geom_wkt
# 4/25/2016: Don't know why the shape is listed 4 times ...
type(sf.shape()), sf.shape().wkt
```
## Back to the rest of the demo
```
read.getResults()
firstResult = read.getResults()[0]
firstResult.FeatureActionObj.ActionObj
```
### Foreign Key Example
Drill down and get objects linked by foreign keys
```
try:
# Call getResults, but return only the first result
firstResult = read.getResults()[0]
action_firstResult = firstResult.FeatureActionObj.ActionObj
print "The FeatureAction object for the Result is: ", firstResult.FeatureActionObj
print "The Action object for the Result is: ", action_firstResult
print ("\nThe following are some of the attributes for the Action that created the Result: \n" +
"ActionTypeCV: " + action_firstResult.ActionTypeCV + "\n" +
"ActionDescription: " + action_firstResult.ActionDescription + "\n" +
"BeginDateTime: " + str(action_firstResult.BeginDateTime) + "\n" +
"EndDateTime: " + str(action_firstResult.EndDateTime) + "\n" +
"MethodName: " + action_firstResult.MethodObj.MethodName + "\n" +
"MethodDescription: " + action_firstResult.MethodObj.MethodDescription)
except Exception as e:
print "Unable to demo Foreign Key Example: ", e
```
### Example of Retrieving Attributes of a Time Series Result using a ResultID
```
tsResult = read.getResults(ids=[1])[0]
type(tsResult), vars(tsResult)
```
**Why are `ProcessingLevelObj`, `VariableObj` and `UnitsObj` objects not shown in the above `vars()` listing!?** They **are** actually available, as demonstrated in much of the code below.
```
try:
tsResult = read.getResults(ids=[1])[0]
# Get the site information by drilling down
sf_tsResult = tsResult.FeatureActionObj.SamplingFeatureObj
print(
"Some of the attributes for the TimeSeriesResult retrieved using getResults(ids=[]): \n" +
"ResultTypeCV: " + tsResult.ResultTypeCV + "\n" +
# Get the ProcessingLevel from the TimeSeriesResult's ProcessingLevel object
"ProcessingLevel: " + tsResult.ProcessingLevelObj.Definition + "\n" +
"SampledMedium: " + tsResult.SampledMediumCV + "\n" +
# Get the variable information from the TimeSeriesResult's Variable object
"Variable: " + tsResult.VariableObj.VariableCode + ": " + tsResult.VariableObj.VariableNameCV + "\n" +
"AggregationStatistic: " + tsResult.AggregationStatisticCV + "\n" +
# Get the site information by drilling down
"Elevation_m: " + str(sf_tsResult.Elevation_m) + "\n" +
"SamplingFeature: " + sf_tsResult.SamplingFeatureCode + " - " +
sf_tsResult.SamplingFeatureName)
except Exception as e:
print "Unable to demo Example of retrieving Attributes of a time Series Result: ", e
```
### Example of Retrieving Time Series Result Values, then plotting them
```
# Get the values for a particular TimeSeriesResult
tsValues = read.getResultValues(resultid=1) # Return type is a pandas dataframe
# Print a few Time Series Values to the console
# tsValues.set_index('ValueDateTime', inplace=True)
tsValues.head()
# Plot the time series
try:
fig = plt.figure()
ax = fig.add_subplot(111)
tsValues.plot(x='ValueDateTime', y='DataValue', kind='line',
title=tsResult.VariableObj.VariableNameCV + " at " +
tsResult.FeatureActionObj.SamplingFeatureObj.SamplingFeatureName,
ax=ax)
ax.set_ylabel(tsResult.VariableObj.VariableNameCV + " (" +
tsResult.UnitsObj.UnitsAbbreviation + ")")
ax.set_xlabel("Date/Time")
ax.xaxis.set_minor_locator(dates.MonthLocator())
ax.xaxis.set_minor_formatter(dates.DateFormatter('%b'))
ax.xaxis.set_major_locator(dates.YearLocator())
ax.xaxis.set_major_formatter(dates.DateFormatter('\n%Y'))
ax.grid(True)
except Exception as e:
print "Unable to demo plotting of tsValues: ", e
```
| github_jupyter |
# Part 5: Competing Journals Analysis
In this notebook we are going to
* Load the researchers impact metrics data previously extracted (see parts 1-2-3)
* Get the full publications history for these researchers
* Use this new publications dataset to determine which are the most frequent journals the researchers have also published in
* Build some visualizations in order to have a quick overview of the results
## Prerequisites: Installing the Dimensions Library and Logging in
```
# @markdown # Get the API library and login
# @markdown Click the 'play' button on the left (or shift+enter) after entering your API credentials
username = "" #@param {type: "string"}
password = "" #@param {type: "string"}
endpoint = "https://app.dimensions.ai" #@param {type: "string"}
!pip install dimcli plotly tqdm -U --quiet
import dimcli
from dimcli.shortcuts import *
dimcli.login(username, password, endpoint)
dsl = dimcli.Dsl()
#
# load common libraries
import time
import sys
import os
import json
import pandas as pd
from pandas.io.json import json_normalize
from tqdm.notebook import tqdm as progress
#
# charts libs
# import plotly_express as px
import plotly.express as px
if not 'google.colab' in sys.modules:
# make js dependecies local / needed by html exports
from plotly.offline import init_notebook_mode
init_notebook_mode(connected=True)
#
# create output data folder
if not(os.path.exists("data")):
os.mkdir("data")
```
## Competing Journals
From our researchers master list, we now want to extract the following:
* full list of publications for a 5 year period
* full list of journals with counts of how many publications per journal
This new dataset will let us draw up some conclusions re. which are the competing journals of the one we selected at the beginning.
### First let's reload the data obtained in previous steps
```
#
researchers = pd.read_csv("data/2.researchers_impact_metrics.csv")
#
print("Total researchers:", len(researchers))
researchers.head(5)
```
### What the query looks like
The approach we're taking consists in pulling all publications data, so that we can count journals as a second step.
This approach may take some time (as we're potentially retrieving a lot of publications data), but it will lead to precise results.
The query template to use looks like this (for a couple of researchers only):
```
%%dsldf
search publications where researchers.id in ["ur.01277776417.51", "ur.0637651205.48"]
and year >= 2015 and journal is not empty
and journal.id != "jour.1103138"
return publications[id+journal] limit 10
```
## Extracting all publications/journals information
This part may take some time to run (depending on how many years back one wants to go) so you may want to get a coffee while you wait..
```
#
journal_id = "jour.1103138" # Nature genetics
start_year = 2018
# our list of researchers
llist = list(researchers['researcher_id'])
#
# the query
q2 = """search publications
where researchers.id in {}
and year >= {} and journal is not empty and journal.id != "{}"
return publications[id+journal+year]"""
VERBOSE = False
RESEARCHER_ITERATOR_NO = 400
pubs = pd.DataFrame
for chunk in progress(list(chunks_of(llist, RESEARCHER_ITERATOR_NO))):
# get all pubs
query = q2.format(json.dumps(chunk), start_year, journal_id)
res = dsl.query_iterative(query, verbose=VERBOSE)
if pubs.empty:
# first time, init the dataframe
pubs = res.as_dataframe()
else:
pubs.append(res.as_dataframe())
# remove duplicate publications, if they have the same PUB_ID
pubs = pubs.drop_duplicates(subset="id")
# save
pubs.to_csv("data/5.journals-via-publications-RAW.csv", index=False)
# preview the data
pubs
```
Now we can create a journals-only dataset that includes counts per year, and grant total.
```
journals = pubs.copy()
# drop pub_id column
journals = journals.drop(['id'], axis=1)
#
# add total column
journals['total'] = journals.groupby('journal.id')['journal.id'].transform('count')
journals['total_year'] = journals.groupby(['journal.id', 'year'])['journal.id'].transform('count')
#
# remove multiple counts for same journal
journals = journals.drop_duplicates()
journals.reset_index(drop=True)
#
# sort by total count
journals = journals.sort_values('total', ascending=False)
# #
# # save
journals.to_csv("data/5.journals-via-publications.csv", index=False)
print("======\nDone")
# download the data
if COLAB_ENV:
files.download("data/5.journals-via-publications.csv")
#preview the data
journals.head(10)
```
# Visualizations
```
threshold = 100
temp = journals.sort_values("total", ascending=False)[:threshold]
px.bar(journals[:threshold],
x="journal.title", y="total_year",
color="year",
hover_name="journal.title",
hover_data=['journal.id', 'journal.title', 'total' ],
title=f"Top {threshold} competitors for {journal_id} (based on publications data from {start_year})")
threshold = 200
temp = journals.sort_values("year", ascending=True).groupby("year").head(threshold)
px.bar(journals[:threshold],
x="journal.title", y="total_year",
color="year",
facet_row="year",
height=900,
hover_name="journal.title",
hover_data=['journal.id', 'journal.title', 'total' ],
title=f"Top {threshold} competitors for {journal_id} - segmented by year")
```
NOTE the European Neuropsychopharmacology journal has a massive jump in 2019 cause they [published a lot of conference proceedings](https://www.sciencedirect.com/journal/european-neuropsychopharmacology/issues)! See also the journal [Dimensions page](https://app.dimensions.ai/analytics/publication/overview/timeline?and_facet_source_title=jour.1101548) for comparison..
| github_jupyter |
# Optimizer Notebook
## Julia needs to compile once 🤷
```
#Force Notebook to work on the parent Directory
import os
if ("Optimizer" in os.getcwd()):
os.chdir("..")
from julia.api import Julia
jl = Julia(compiled_modules=False)
from julia import Main
Main.include("./Optimizer/eval_NN.jl")
NN_path = "/home/freshstart/DiplomaThesisData/NeuralNetSaves_050/"
#----- TEST ------
RESup,RESdwn = Main.NN_eval(NN_path,"./Optimizer/RAE_var.png")
```
## Geometry and Image Processing
```
import Airfoil_Generation.Airfoil_Range_Creator as arg
import Airfoil_Generation.Images_Generator as ig
import numpy as np
from scipy.integrate import simps
import matplotlib.pyplot as plt
#--- Read Geometry ---
geom = arg.readfile("./Misc/data/RAE_2822.geom")
geom = (geom[0],geom[1][1:-1,:])
plt.plot(geom[1][:,0],geom[1][:,1])
plt.plot(geom[0][:,0],geom[0][:,1])
```
### Normals Calculation
```
def normals2D(geom,flip_n = False):
eta = np.ndarray((len(geom)-1,2))
for i in range(len(geom)-1):
xba = geom[i+1,0]-geom[i,0]
yba = geom[i+1,1]-geom[i,1]
if flip_n:
yba = - yba
xba = - xba
nrm2 = np.sqrt(yba**2+xba**2)
eta[i,0] = yba/nrm2
eta[i,1] = -xba/nrm2
return eta
def partials2D(Cp,geom,flip_norm = True,show_norms = False):
eta = normals2D(geom,flip_n=flip_norm)
if show_norms:
l = len(eta)
fig, ax = plt.subplots()
ax.plot(geom[0:-2,0],geom[0:-2,1])
ax.quiver(geom[0:-2,0],geom[0:-2,1],eta[:,0],eta[:,1])
xClCd = np.ndarray((len(Cp),3))
for i in range(len(Cp)):
for j in range(len(eta)):
if ((Cp[i,0]<= geom[j+1,0])&(Cp[i,0]>geom[j,0])):
xClCd[i,0] = Cp[i,0]
xClCd[i,1] = eta[j,1]*Cp[i,1]
xClCd[i,2] = eta[j,0]*Cp[i,1]
break
return xClCd
def calc2D(Cpup,Cpdwn,geom,show_norms = False):
Up = partials2D(Cpup,geom[0],flip_norm = False,show_norms=show_norms)
Dn = partials2D(Cpdwn,geom[1],flip_norm = True,show_norms=show_norms)
Cl = -simps(Up[:,0],Up[:,1])+simps(Dn[:,0],Dn[:,1]) #invert y-axis to match the global axis
Cd = simps(Up[:,0],Up[:,2])+simps(Dn[:,0],Dn[:,2])
return Cl,Cd
Cl,Cd = calc2D(RESup,RESdwn,geom)
print("Cl = ", Cl)
print("Cd = ", Cd)
phi = np.linspace(0,2*3.14159)
x = np.array(np.cos(phi),ndmin = 2).reshape((len(phi),1))
y = np.array(np.sin(phi),ndmin = 2).reshape((len(phi),1))
plt.plot(x,y)
plt.axis("equal")
f = np.concatenate((x,y),axis = 1)
plt.plot(f[:,0],f[:,1])
a=partials2D(RESup,f,show_norms=True)
```
## Optimizer
```
import openmdao.api as om
```
### Class Definition and problem set-up
```
#---- Preparing the X coordinates for use in the optimizer -------
X_UP = np.array(geom[0][:,0],ndmin=2)
X_DN = np.array(geom[1][:,0],ndmin=2)
X_UP = X_UP.reshape((X_UP.shape[1],X_UP.shape[0]))
X_DN = X_DN.reshape((X_DN.shape[1],X_DN.shape[0]))
Y_UP = np.array(geom[0][:,1],ndmin=2)
Y_DN = np.array(geom[1][:,1],ndmin=2)
Y_UP = Y_UP.reshape((Y_UP.shape[1],Y_UP.shape[0]))
Y_DN = Y_DN.reshape((Y_DN.shape[1],Y_DN.shape[0]))
##################################################################
class Airfoil(om.ExplicitComponent):
"""
Creates the most efficient airfoil for specific Mach and Reynolds numbers
Changing each y-coords the deformation rate is more efficient to confine
than flat y-coordinates
"""
def setup(self):
self.add_input("y_up_rate", val = 0.0)#np.zeros((len(geom[0]),1)) )
self.add_input("y_dwn_rate", val = 0.0)#np.zeros((len(geom[1]),1)) )
self.add_output("Cl", val = 0.0)
self.add_output("Cd", val = 0.0)
def setup_partials(self):
self.declare_partials("*","*", method = "fd")
def compute(self, inputs, outputs):
r1 = inputs["y_up_rate"]
r2 = inputs["y_dwn_rate"]
y1 = (1+r1)*Y_UP
y2 = (1+r2)*Y_DN
temp_geom = (np.concatenate((X_UP,y1),axis = 1),np.concatenate((X_DN,y2),axis = 1))
ig.image_generator(np.concatenate((temp_geom[0],temp_geom[1]),axis = 0),"./Optimizer/temp.png",32,32)
Cpup,Cpdwn = Main.NN_eval(NN_path,"./Optimizer/temp.png")
res = calc2D(Cpup,Cpdwn,temp_geom)
outputs["Cl"] = res[0]
outputs["Cd"] = res[1]
#--------- Testing --------
model = om.Group()
model.add_subsystem("airfoil",Airfoil(),promotes_inputs=["y_up_rate","y_dwn_rate"])
prob = om.Problem(model)
prob.setup()
prob.run_model()
print(prob.get_val("airfoil.Cl"))
prob.get_val("airfoil.Cd")
```
### Optimization
```
model = om.Group()
model.add_subsystem("airfoil",Airfoil(),promotes_inputs=["y_up_rate","y_dwn_rate"])
prob = om.Problem(model)
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options["optimizer"] = "COBYLA"
# prob.driver.options["optimizer"] = "SLSQP"
L_BOUND = -0.2
U_BOUND = 0.2
prob.model.add_design_var("y_up_rate",lower = L_BOUND,upper= U_BOUND)
prob.model.add_design_var("y_dwn_rate",lower = L_BOUND,upper= U_BOUND)
prob.model.add_objective("airfoil.Cl",scaler=-1)
# prob.model.add_objective("airfoil.Cd",scaler=1)
prob.setup()
prob.run_driver();
#---------- SLSQP Optimizer ------------
print("Cl = ", prob.get_val("airfoil.Cl"))
print("Cd = ", prob.get_val("airfoil.Cd"))
print("Rate up = ", prob.get_val("y_up_rate"))
print("Rate dwn = ", prob.get_val("y_dwn_rate"))
#----- Maximize Cl COBYLA -----
print("Cl = ", prob.get_val("airfoil.Cl"))
print("Cd = ", prob.get_val("airfoil.Cd"))
print("Rate up = ", prob.get_val("y_up_rate"))
print("Rate dwn = ", prob.get_val("y_dwn_rate"))
Cpup,Cpdwn = Main.NN_eval(NN_path,"./Optimizer/temp.png")
Cl,Cd = calc2D(Cpup,Cpdwn,geom)
print("Cl = ", Cl)
print("Cd = ", Cd)
```
| github_jupyter |
This notebook is sample of the HAL QCD potential,
the effective mass fitting, and the effective energy shifts of two-baryon system
from compressed NBS wavefunction sample_data.
In order to decompress the wave function, hal_pot_single_ch.py requires
binary "PH1.compress48" in "yukawa library."
```
%pylab inline
import seaborn as sns
sns.set_style('ticks', {'axes.grid': True})
sns.set_context('poster', font_scale=2.0)
%config InlineBackend.figure_format = 'retina'
plt.rcParams['figure.figsize'] = (12.8, 9.6)
plt.rcParams['figure.facecolor'] = 'white'
ls ../data/sample_data/
# import library
from hal_pot_single_ch import HAL_pot
from corr_baryon import Corr_2pt_Baryon, Corr_2pt_2Baryons, Delta_Eeff
```
lattice spacing
```
ainv = 2.194e3 # MeV
hbarc = 0.197e3 # MeV fm
lat_unit = hbarc/ainv
```
analyze baryon correlator and mass
\begin{equation}
m_\mathrm{eff}(t) = \log \frac{C_\mathrm{B}(t)}{C_\mathrm{B}(t+1)}
\end{equation}
```
cb = Corr_2pt_Baryon('Xi_CG05_CG05', bin_size=1, result_dir='../data/sample_data/')
cb.plot_meff()
fig, ax = plt.subplots()
cb.fit_meff(fit_min=15, fit_max=20, ax=ax)
ax.set_ylim(0.65, 0.68)
leg = ax.legend(frameon=True)
leg.get_frame().set_edgecolor('black')
leg.get_frame().set_linewidth(2.0)
ax.set_title('$\Xi$-baryon effective mass');
m_red = 0.5 * 0.665
```
initialize HAL_pot object with induced mass as an input parameter
```
hal = HAL_pot(m_red=m_red, result_dir='../data/sample_data/',
it0 = 10, Ns=48, channel='xixi', bin_size=2)
```
calc_pot method evaluate (effective) central potential of (3S1) 1S0 channel, and central, tensor part of 3S1 channel.
```
# potential
result = hal.calc_pot(pot_type='cen', spin='1s0')
```
text data of potential is stored in results.pot dir.
```
pot = np.loadtxt('results.pot/pot_1s0_cen_xixi_t010_004conf_002bin.dat')
pot.shape
```
columns of potential data
0, 1, 2, 3, 4, 5, 6, 7, 8
r, H0-term (av, err) , d/dt-term (av, err), d2/dt2-term (av, err), total (av, err)
```
fig, ax = plt.subplots()
ax.errorbar(pot[:,0]*lat_unit, pot[:,1]*ainv, pot[:,2]*ainv,
fmt='bs', mfc='none', mew=2, capsize=10, capthick=2, label=r'$H_0$-term')
ax.errorbar(pot[:,0]*lat_unit, pot[:,3]*ainv, pot[:,4]*ainv,
fmt='g^', mfc='none', mew=2, capsize=10, capthick=2, label=r'$\partial/\partial t$-term')
ax.errorbar(pot[:,0]*lat_unit, pot[:,5]*ainv, pot[:,6]*ainv,
fmt='kd', mfc='none', mew=2, capsize=10, capthick=2, label=r'$\partial^2/\partial t^2$-term')
ax.errorbar(pot[:,0]*lat_unit, pot[:,7]*ainv, pot[:,8]*ainv,
fmt='ro', mfc='none', mew=2, capsize=10, capthick=2, label='total')
ax.set_ylim(-50, 50)
ax.axhline(0, color='black')
ax.set_xlabel(r'$r$ [fm]', size=48)
ax.set_ylabel(r'$V(r)$ [MeV]', size=48)
leg = ax.legend(frameon=True)
leg.get_frame().set_edgecolor('black')
leg.get_frame().set_linewidth(2.0)
```
# baryon mass and energy shift
if lattice unit is given, observables are plotted in physical scale
```
cb = Corr_2pt_Baryon('Xi_CG05_CG05', bin_size=1, result_dir='../data/sample_data/', lat_unit=lat_unit)
fig, ax = plt.subplots()
cb.fit_meff(fit_min=10, fit_max=20, ax=ax)
leg = ax.legend(frameon=True)
leg.get_frame().set_edgecolor('black')
leg.get_frame().set_linewidth(2.0)
ax.set_ylim(1400, 1500)
```
Two-baryon correlator and the effective energy
\begin{equation}
E_\mathrm{eff}(t) = \log \frac{C_\mathrm{BB}(t)}{C_\mathrm{BB}(t+1)}
\end{equation}
```
# BB two baryon correlator
cbb = Corr_2pt_2Baryons('xixi', bin_size=1, result_dir='../data/sample_data/', lat_unit=lat_unit)
fig, ax = plt.subplots()
cbb.fit_Eeff(ax=ax)
ax.set_ylim(2800, 3000)
```
effective energy shift
\begin{equation}
\Delta E_\mathrm{eff}(t) = \log \frac{R(t)}{R(t+1)}
\end{equation}
with
\begin{equation}
R(t) = \frac{C_\mathrm{BB}(t)}{\{C_\mathrm{B}(t)\}^2}
\end{equation}
```
# effective energy shifts
dEeff = Delta_Eeff('xixi', bin_size=1, result_dir='../data/sample_data', lat_unit=lat_unit)
fig, ax = plt.subplots()
dEeff.fit_dEeff(fit_min=7, fit_max=10, spin='1s0', ax=ax)
ax.set_ylim(-10, 10)
leg = ax.legend(frameon=True)
leg.get_frame().set_edgecolor('black')
leg.get_frame().set_linewidth(2.0)
```
| github_jupyter |
# CAMS functions
```
def get_ADS_API_key():
""" Get ADS API key to download CAMS datasets
Returns:
API_key (str): ADS API key
"""
keys_path = os.path.join('/', '/'.join(
os.getcwd().split('/')[1:3]), 'adc-toolbox',
os.path.relpath('data/keys.txt'))
try:
keys_file = open(keys_path, 'r')
keys = keys_file.readlines()
environ_keys = [key.rstrip() for key in keys]
ADS_key = environ_keys[0]
except:
print('ERROR: You need to create a keys.txt file in the data folder with the ADS API key.')
print('Get your ADS API key by registering at https://ads.atmosphere.copernicus.eu/api-how-to.')
raise KeyboardInterrupt
return ADS_key
def CAMS_download(dates, start_date, end_date, component, component_nom, lat_min, lat_max, lon_min, lon_max,
area_name, model_full_name, model_level, CAMS_UID = None, CAMS_key = None):
""" Query and download the CAMS levels dataset from CDS API
Args:
dates (arr): Query dates
start_date (str): Query start date
end_date (str): Query end date
component (str): Component name
component_nom (str): Component chemical nomenclature
lat_min (int): Minimum latitude
lat_max (int): Maximum latitude
lon_min (int): Minimum longitude
lon_max (int): Maximum longitude
area_name (str): User defined area name
model_full_name (str): Full name of the CAMS model among:
- 'cams-global-atmospheric-composition-forecasts'
- 'cams-global-reanalysis-eac4-monthly'
model_level (str): Model levels:
- 'Simple' for total columns
- 'Multiple' for levels
CAMS_UID (str): ADS user ID
CAMS_key (str): ADS key
Returns:
CAMS_product_name (str): Product name of CAMS product
CAMS_type (str): Model type:
- 'Forecast'
- 'Reanalysis'
"""
# Get API key
if CAMS_UID != None and CAMS_key != None:
ADS_key = CAMS_UID + ':' + CAMS_key
else:
ADS_key = get_ADS_API_key()
# Connect to the server
c = cdsapi.Client(url = 'https://ads.atmosphere.copernicus.eu/api/v2', key = ADS_key)
# Download component concentration dataset
if model_full_name == 'cams-global-atmospheric-composition-forecasts':
CAMS_type = 'Forecast'
if model_level == 'Multiple':
CAMS_product_name = ('CAMS_FORECAST_' + component_nom + '_137_LEVELS_' + start_date + '_' + end_date +
'_' + area_name + '.grib')
CAMS_product_path = os.path.join('/', '/'.join(
os.getcwd().split('/')[1:3]), 'adc-toolbox',
os.path.relpath('data/cams/' + component_nom + '/' + CAMS_product_name))
if os.path.isfile(CAMS_product_path):
print('The file exists, it will not be downloaded again.')
else:
print('The file does not exist, it will be downloaded.')
c.retrieve(
model_full_name,
{
'date': start_date + '/' + end_date,
'type': 'forecast',
'format': 'grib',
'variable': component,
'model_level': [str(x + 1) for x in range(137)],
'time': '00:00',
'leadtime_hour': [str(x) for x in range(0, 24, 3)],
'area': [lat_max, lon_min, lat_min, lon_max],
},
CAMS_product_path)
elif model_level == 'Single':
CAMS_product_name = ('CAMS_FORECAST_' + component_nom + '_TC_' + start_date + '_' + end_date +
'_' + area_name + '.grib')
CAMS_product_path = os.path.join('/', '/'.join(
os.getcwd().split('/')[1:3]), 'adc-toolbox',
os.path.relpath('data/cams/' + component_nom + '/' + CAMS_product_name))
if os.path.isfile(CAMS_product_path):
print('The file exists, it will not be downloaded again.')
else:
print('The file does not exist, it will be downloaded.')
c = cdsapi.Client(url = 'https://ads.atmosphere.copernicus.eu/api/v2', key = ADS_key)
c.retrieve(
'cams-global-atmospheric-composition-forecasts',
{
'date': start_date + '/' + end_date,
'type': 'forecast',
'format': 'grib',
'variable': 'total_column_' + component,
'time': '00:00',
'leadtime_hour': [str(x) for x in range(0, 24, 3)],
'area': [lat_max, lon_min, lat_min, lon_max],
},
CAMS_product_path)
elif model_full_name == 'cams-global-reanalysis-eac4-monthly':
CAMS_type = 'Reanalysis'
if model_level == 'Single':
CAMS_product_name = ('CAMS_REANALYSIS_' + component_nom + '_TC_' + start_date + '_' + end_date +
'_' + area_name + '.grib')
CAMS_product_path = os.path.join('/', '/'.join(
os.getcwd().split('/')[1:3]), 'adc-toolbox',
os.path.relpath('data/cams/' + component_nom + '/' + CAMS_product_name))
if os.path.isfile(CAMS_product_path):
print('The file exists, it will not be downloaded again.')
else:
print('The file does not exist, it will be downloaded.')
months = []
years = []
for date in dates:
year = date.split('-')[0]
month = date.split('-')[1]
if year not in years:
years.append(year)
if month not in months:
months.append(month)
c.retrieve(
model_full_name,
{
'format': 'grib',
'variable': 'total_column_' + component,
'year': years,
'month': months,
'product_type': 'monthly_mean',
'area': [lat_max, lon_min, lat_min, lon_max],
},
CAMS_product_path)
elif model_level == 'Multiple':
start_dates = pd.date_range(np.datetime64(start_date), np.datetime64(end_date), freq='MS')
start_dates = tuple(np.unique([date.strftime('%Y-%m-%d') for date in start_dates]))
end_dates = pd.date_range(np.datetime64(start_date), np.datetime64(end_date), freq='M')
end_dates = tuple(np.unique([date.strftime('%Y-%m-%d') for date in end_dates]))
# Download month by month (to avoid crashing the server)
CAMS_product_name = []
for start_date, end_date in zip(start_dates, end_dates):
CAMS_product_name_month = ('CAMS_REANALYSIS_' + component_nom + '_60_LEVELS_' + start_date + '_' + end_date +
'_' + area_name + '.grib')
CAMS_product_path = os.path.join('/', '/'.join(
os.getcwd().split('/')[1:3]), 'adc-toolbox',
os.path.relpath('data/cams/' + component_nom + '/' + CAMS_product_name_month))
if os.path.isfile(CAMS_product_path):
print('The file exists, it will not be downloaded again.')
else:
print('The file does not exist, it will be downloaded.')
c.retrieve(
'cams-global-reanalysis-eac4',
{
'date': start_date + '/' + end_date,
'format': 'grib',
'variable': component,
'model_level': [str(x + 1) for x in range(60)],
'time': ['00:00', '03:00', '06:00', '09:00', '12:00', '15:00', '18:00', '21:00',],
'area': [lat_max, lon_min, lat_min, lon_max],
},
CAMS_product_path)
CAMS_product_name.append(CAMS_product_name_month)
return CAMS_product_name, CAMS_type
def CAMS_read(CAMS_product_name, component, component_nom, dates):
""" Read CAMS levels dataset as xarray dataset object
Args:
CAMS_product_name (str): Product name of CAMS product
component (str): Component name
component_nom (str): Component chemical nomenclature
dates (arr): Query dates
Returns:
CAMS_ds (xarray): CAMS levels dataset in xarray format
"""
# Read as xarray dataset object
if isinstance(CAMS_product_name, list):
CAMS_ds = xr.open_mfdataset(os.path.join('/', '/'.join(
os.getcwd().split('/')[1:3]), 'adc-toolbox',
os.path.relpath('data/cams/' + component_nom + '/CAMS_REANALYSIS_' + component_nom + '_60_LEVELS_*')),
concat_dim = 'time')
else:
CAMS_ds = xr.open_dataset(os.path.join('/', '/'.join(
os.getcwd().split('/')[1:3]), 'adc-toolbox',
os.path.relpath('data/cams/' + component_nom + '/' + CAMS_product_name)))
# Change name to component
if 'hybrid' in CAMS_ds.keys():
if component == 'ozone':
CAMS_ds = CAMS_ds.rename({'go3': 'component'})
else:
CAMS_ds = CAMS_ds.rename({component_nom.lower(): 'component'})
else:
if component == 'ozone':
CAMS_ds = CAMS_ds.rename({'gtco3': 'component'})
else:
CAMS_ds = CAMS_ds.rename({'tc' + component_nom.lower(): 'component'})
if 'REANALYSIS_' + component_nom + '_TC_' in CAMS_product_name:
# Remove data for dates that have been downloaded but not asked for (error of the CAMS API!)
all_datetimes = []
for date in dates:
year = int(date.split('-')[0])
month = int(date.split('-')[1])
time_str = np.datetime64(dt.datetime(year, month, 1, 0, 0, 0, 0))
all_datetimes.append(time_str)
# Drop datetimes
datetimes_to_delete = np.setdiff1d(CAMS_ds.time.values, np.array(all_datetimes))
if datetimes_to_delete.size != 0:
CAMS_ds = CAMS_ds.drop_sel(time = datetimes_to_delete)
# Available dates
dates_to_keep = np.intersect1d(CAMS_ds.time.values, np.array(all_datetimes))
dates = tuple(dates_to_keep.astype('datetime64[M]').astype(str))
# Remove step since there is only one
CAMS_ds = CAMS_ds.drop('step')
# Arrange coordinates
CAMS_ds = CAMS_ds.assign_coords(longitude = (((CAMS_ds.longitude + 180) % 360) - 180)).sortby('longitude')
CAMS_ds = CAMS_ds.sortby('latitude')
# Assign time as dimension (when there is only one time)
if CAMS_ds.time.values.size == 1:
CAMS_ds = CAMS_ds.expand_dims(dim = ['time'])
# Get model levels
CAMS_levels_df = CAMS_levels(CAMS_ds, CAMS_product_name)
return CAMS_ds, dates, CAMS_levels_df
def CAMS_levels(CAMS_ds, CAMS_product_name):
""" Create table with information about the CAMS model levels
Args:
CAMS_ds (xarray): CAMS levels dataset in xarray format
CAMS_product_name (str): Product name of CAMS product
Returns:
CAMS_levels_df (dataframe): Table with CAMS levels data
"""
# Read CSV table with information about the model levels
if '60_LEVELS' in CAMS_product_name:
CAMS_levels_df = pd.read_csv(os.path.join('/', '/'.join(
os.getcwd().split('/')[1:3]), 'adc-toolbox',
os.path.relpath('data/cams/60-levels-definition.csv')))
else:
CAMS_levels_df = pd.read_csv(os.path.join('/', '/'.join(
os.getcwd().split('/')[1:3]), 'adc-toolbox',
os.path.relpath('data/cams/137-levels-definition.csv')))
# Drop first row and set n as index hybrid
CAMS_levels_df = CAMS_levels_df.drop(0).reset_index(drop = True)
CAMS_levels_df = CAMS_levels_df.set_index('n')
CAMS_levels_df.index.names = ['hybrid']
# Change important columns to numeric
CAMS_levels_df['ph [Pa]'] = pd.to_numeric(CAMS_levels_df['ph [hPa]']) * 100
CAMS_levels_df['Geopotential Altitude [m]'] = pd.to_numeric(CAMS_levels_df['Geopotential Altitude [m]'])
CAMS_levels_df['Density [kg/m^3]'] = pd.to_numeric(CAMS_levels_df['Density [kg/m^3]'])
# Calculate difference from geopotential altitude
CAMS_levels_df['Depth [m]'] = CAMS_levels_df['Geopotential Altitude [m]'].diff(-1)
CAMS_levels_df['Depth [m]'].iloc[-1] = CAMS_levels_df['Geopotential Altitude [m]'].iloc[-1]
return CAMS_levels_df
def CAMS_pressure(CAMS_ds, CAMS_product_name, CAMS_levels_df, start_date, end_date, component_nom,
lat_min, lat_max, lon_min, lon_max, area_name, CAMS_UID = None, CAMS_key = None):
""" Download surface pressure and calculate levels pressure following the instructions given at:
https://confluence.ecmwf.int/display/OIFS/4.4+OpenIFS%3A+Vertical+Resolution+and+Configurations
Args:
CAMS_ds (xarray): CAMS levels dataset in xarray format
CAMS_product_name (str): Product name of CAMS product
CAMS_levels_df (dataframe): Table with 137 CAMS levels data
start_date (str): Query start date
end_date (str): Query end date
component_nom (str): Component chemical nomenclature
lat_min (int): Minimum latitude
lat_max (int): Maximum latitude
lon_min (int): Minimum longitude
lon_max (int): Maximum longitude
area_name (str): User defined area name
CAMS_UID (str): ADS user ID
CAMS_key (str): ADS key
Returns:
CAMS_ds (xarray): CAMS levels dataset in xarray format
"""
CAMS_pressure_product_name = ('_SURFACE_PRESSURE_' + start_date + '_' + end_date +
'_' + area_name + '.grib')
# Get API key
if CAMS_UID != None and CAMS_key != None:
ADS_key = CAMS_UID + ':' + CAMS_key
else:
ADS_key = get_ADS_API_key()
# Connect to the server
c = cdsapi.Client(url = 'https://ads.atmosphere.copernicus.eu/api/v2', key = ADS_key)
# Dowload surface pressure data
if 'FORECAST' in CAMS_product_name:
CAMS_surface_pressure_path = os.path.join('/', '/'.join(
os.getcwd().split('/')[1:3]), 'adc-toolbox',
os.path.relpath('data/cams/' + component_nom + '/CAMS_FORECAST' + CAMS_pressure_product_name))
c.retrieve(
'cams-global-atmospheric-composition-forecasts',
{
'date': start_date + '/' + end_date,
'type': 'forecast',
'format': 'grib',
'variable': 'surface_pressure',
'leadtime_hour': [str(x) for x in range(0, 24, 3)],
'time': '00:00',
'area': [lat_max, lon_min, lat_min, lon_max],
},
CAMS_surface_pressure_path)
elif 'REANALYSIS' in CAMS_product_name:
CAMS_surface_pressure_path = os.path.join('/', '/'.join(
os.getcwd().split('/')[1:3]), 'adc-toolbox',
os.path.relpath('data/cams/' + component_nom + '/CAMS_REANALYSIS' + CAMS_pressure_product_name))
c.retrieve(
'cams-global-reanalysis-eac4',
{
'date': start_date + '/' + end_date,
'format': 'grib',
'variable': 'surface_pressure',
'time': ['00:00', '03:00', '06:00',
'09:00', '12:00', '15:00',
'18:00', '21:00',],
'area': [lat_max, lon_min, lat_min, lon_max],
},
CAMS_surface_pressure_path)
hybrid = CAMS_ds['hybrid'].data
time = CAMS_ds['time'].data
step = CAMS_ds['step'].data
latitude = CAMS_ds['latitude'].data
longitude = CAMS_ds['longitude'].data
# Read surface pressure
model_pressure_ds = xr.open_dataarray(CAMS_surface_pressure_path)
# Arrange coordinates
model_pressure_ds = model_pressure_ds.assign_coords(longitude = (((model_pressure_ds.longitude + 180) % 360) - 180)).sortby('longitude')
model_pressure_ds = model_pressure_ds.sortby('latitude')
# Assign time as dimension (when there is only one time)
if model_pressure_ds.time.values.size == 1:
model_pressure_ds = model_pressure_ds.expand_dims(dim = ['time'])
# Transpose dimensions
model_pressure_ds = model_pressure_ds.transpose('time', 'step', 'latitude', 'longitude')
# Subset surface pressure dataset
model_pressure_ds = subset(model_pressure_ds, bbox, sensor, component_nom, sensor_type, subset_type = 'model_subset')
sp_array = xr.DataArray(
model_pressure_ds.values,
dims = ('time', 'step', 'latitude', 'longitude'),
coords = {
'time': ('time', time),
'step': ('step', step),
'latitude': ('latitude', latitude),
'longitude': ('longitude', longitude),
},
name = 'surface_pressure'
)
a_array = xr.DataArray(
CAMS_levels_df['a [Pa]'],
dims = ('hybrid'),
coords = {'hybrid': ('hybrid', hybrid),},
name = 'a'
)
b_array = xr.DataArray(
CAMS_levels_df['b'],
dims = ('hybrid'),
coords = {'hybrid': ('hybrid', hybrid),},
name = 'b'
)
CAMS_ds['surface_pressure'] = sp_array
CAMS_ds['a'] = a_array
CAMS_ds['b'] = b_array
CAMS_ds['pressure_1/2'] = CAMS_ds['a'] + CAMS_ds['surface_pressure'] * CAMS_ds['b']
CAMS_ds['pressure_-1/2'] = CAMS_ds['pressure_1/2'].shift(hybrid = 1)
CAMS_ds['pressure_-1/2'] = CAMS_ds['pressure_-1/2'].where(~np.isnan(CAMS_ds['pressure_-1/2']), 0, drop = False)
CAMS_ds['pressure'] = 0.5 * (CAMS_ds['pressure_-1/2'] + CAMS_ds['pressure_1/2'])
CAMS_ds = CAMS_ds.drop_vars(['a', 'b', 'surface_pressure', 'pressure_1/2', 'pressure_-1/2'])
return CAMS_ds
def CAMS_get_levels_data(CAMS_ds, CAMS_product_name, CAMS_levels_df, column_type,
lat_min, lat_max, lon_min, lon_max):
""" Get the tropospheric or column model data, depending on the nature of the sensor data
Args:
CAMS_ds (xarray): CAMS levels dataset in xarray format
CAMS_product_name (str): Product name of CAMS product
CAMS_levels_df (dataframe): Table with 137 CAMS levels data
column_type (str): Tropospheric or total column
lat_min (int): Minimum latitude
lat_max (int): Maximum latitude
lon_min (int): Minimum longitude
lon_max (int): Maximum longitude
Returns:
CAMS_ds (xarray): CAMS levels dataset in xarray format
"""
# Get units and calculate tropospheric columns if needed
units = CAMS_ds.component.attrs['units']
if 'REANALYSIS' in CAMS_product_name:
if column_type == 'tropospheric':
print('The model total columns will be directly compared to the tropospheric sensor columns.')
elif column_type == 'total':
print('The model total columns will be compared to the total sensor columns.')
elif 'FORECAST' in CAMS_product_name:
if column_type == 'tropospheric':
print('The model tropospheric columns will be compared to the tropospheric sensor columns.')
print('The model tropospheric columns will be estimated (pressures above or equal to 300 hPa).')
# Calculate levels pressure
CAMS_ds = CAMS_pressure(CAMS_ds, CAMS_product_name, CAMS_levels_df, start_date, end_date, component_nom,
lat_min, lat_max, lon_min, lon_max, area_name, CAMS_UID = None, CAMS_key = None)
if apply_kernels == False:
CAMS_ds = CAMS_ds.where(CAMS_ds.pressure >= 30000, drop = True)
CAMS_ds = CAMS_ds.sum(dim = 'hybrid')
CAMS_ds['component'] = CAMS_ds.component.assign_attrs({'units': units})
if column_type == 'total':
print('The model total columns will be compared to the total sensor columns.')
return CAMS_ds
def CAMS_kg_kg_to_kg_m2(CAMS_ds, CAMS_levels_df, sensor, start_date, end_date,
component_nom, apply_kernels = False, CAMS_UID = None, CAMS_key = None):
""" Convert the units of the CAMS partial columns for any component from kg/kg to kg/m2. To do this,
calculate columns above each CAMS half level assuming it is 0 at the top of the atmosphere
Args:
CAMS_ds (xarray): CAMS levels dataset in xarray format
CAMS_levels_df (dataframe): Table with 137 CAMS levels data
sensor (str): Name of the sensor
start_date (str): Query start date
end_date (str): Query end date
component_nom (str): Component chemical nomenclature
apply_kernels (bool): Apply (True) or not (False) the averaging kernels
CAMS_UID (str): ADS user ID
CAMS_key (str): ADS key
Returns:
CAMS_ds (xarray): CAMS levels dataset in xarray format
"""
# Calculate columns above each CAMS half level
if sensor == 'tropomi' and apply_kernels == True:
print('The columns above each CAMS half level will be calculated.')
# Initialize new array
CAMS_ds_all = []
for time in CAMS_ds.time:
# Select data for each timestep
CAMS_ds_time_old = CAMS_ds.sel(time = time)
# Initialize partial columns at the top of the atmosphere (hybrid = 1) as 0
PC_hybrid_0 = CAMS_ds_time_old.sel(hybrid = 1)
PC_hybrid_0['component'] = PC_hybrid_0['component'].where(PC_hybrid_0['component'] <= 0, 0, drop = False)
PC_hybrid_0 = PC_hybrid_0.expand_dims(dim = ['hybrid'])
# Create new model dataset
PC_above_all = []
PC_above_all.append(PC_hybrid_0)
CAMS_ds_time_new = PC_hybrid_0
for hybrid in range(1, 137):
# Get current and previous partial columns and level pressures
PC_last = CAMS_ds_time_new.component.sel(hybrid = hybrid)
PC_current = CAMS_ds_time_old.component.sel(hybrid = hybrid + 1)
pressure_last = CAMS_ds_time_old.pressure.sel(hybrid = hybrid)
pressure_current = CAMS_ds_time_old.pressure.sel(hybrid = hybrid + 1)
# Calculate pressure difference
pressure_diff = pressure_current - pressure_last
# Calculate partial columns above each model level
# Units: (kg/kg * kg/m*s2) * s2/m -> kg/m2
PC_above = CAMS_ds_time_old.sel(hybrid = hybrid + 1)
PC_above['component'] = PC_last + PC_current * pressure_diff * (1/9.81)
# Append result
PC_above_all.append(PC_above)
CAMS_ds_time_new = xr.concat(PC_above_all, pd.Index(range(1, hybrid + 2), name = 'hybrid'))
CAMS_ds_all.append(CAMS_ds_time_new)
CAMS_ds = xr.concat(CAMS_ds_all, dim = 'time')
else:
# Create xarray object from CAMS model levels information
CAMS_levels_ds = CAMS_levels_df.to_xarray()
# Convert units from kg/kg to kg/m3
CAMS_ds = CAMS_ds * CAMS_levels_ds['Density [kg/m^3]']
# Convert units from kg/m3 to kg/m2
CAMS_ds = CAMS_ds * CAMS_levels_ds['Depth [m]']
return CAMS_ds
def CAMS_kg_m2_to_molecules_cm2(CAMS_ds, component_mol_weight):
""" Convert the units of the CAMS dataset for any component from kg/m2 to molecules/cm2
Args:
CAMS_ds (xarray): CAMS levels dataset in xarray format
component_mol_weight (float): Component molecular weight
Returns:
CAMS_ds (xarray): CAMS levels dataset in xarray format
"""
# Convert units from kg/m2 to molecules/cm2
NA = 6.022*10**23
CAMS_ds['component'] = (CAMS_ds['component'] * NA * 1000) / (10000 * component_mol_weight)
return CAMS_ds
def CAMS_molecules_cm2_to_DU(CAMS_ds):
""" Convert the units of the CAMS dataset for any component from molecules/cm2 to DU for ozone
Args:
CAMS_ds (xarray): CAMS levels dataset in xarray format
Returns:
CAMS_ds (xarray): CAMS levels dataset in xarray format
"""
# Convert units from molecules/cm2 to DU
CAMS_ds = CAMS_ds / (2.69*10**16)
return CAMS_ds
```
| github_jupyter |
# SARK-110 Time Domain and Gating Example
Example adapted from: https://scikit-rf.readthedocs.io/en/latest/examples/networktheory/Time%20Domain.html
- Measurements with a 2.8m section of rg58 coax cable not terminated at the end
This notebooks demonstrates how to use scikit-rf for time-domain analysis and gating. A quick example is given first, followed by a more detailed explanation.
S-parameters are measured in the frequency domain, but can be analyzed in time domain if you like. In many cases, measurements are not made down to DC. This implies that the time-domain transform is not complete, but it can be very useful non-theless. A major application of time-domain analysis is to use gating to isolate a single response in space. More information about the details of time domain analysis.
Please ensure that the analyzer is connected to the computer using the USB cable and in Computer Control mode.
```
from sark110 import *
import skrf as rf
rf.stylely()
from pylab import *
```
Enter frequency limits:
```
fr_start = 100000 # Frequency start in Hz
fr_stop = 230000000 # Frequency stop in Hz
points = 401 # Number of points
```
## Utility functions
```
def z2vswr(rs: float, xs: float, z0=50 + 0j) -> float:
gamma = math.sqrt((rs - z0.real) ** 2 + xs ** 2) / math.sqrt((rs + z0.real) ** 2 + xs ** 2)
if gamma > 0.980197824:
return 99.999
swr = (1 + gamma) / (1 - gamma)
return swr
def z2mag(r: float, x: float) -> float:
return math.sqrt(r ** 2 + x ** 2)
def z2gamma(rs: float, xs: float, z0=50 + 0j) -> complex:
z = complex(rs, xs)
return (z - z0) / (z + z0)
```
## Connect to the device
```
sark110 = Sark110()
sark110.open()
sark110.connect()
if not sark110.is_connected:
print("Device not connected")
exit(-1)
else:
print("Device connected")
sark110.buzzer()
print(sark110.fw_protocol, sark110.fw_version)
```
## Acquire and plot the data
```
y = []
x = []
rs = [0]
xs = [0]
for i in range(points):
fr = int(fr_start + i * (fr_stop - fr_start) / (points - 1))
sark110.measure(fr, rs, xs)
x.append(fr / 1e9) # Units in GHz
y.append(z2gamma(rs[0][0], xs[0][0]))
probe = rf.Network(frequency=x, s=y, z0=50)
probe.frequency.unit = 'mhz'
print (probe)
```
# Quick example
```
# we will focus on s11
s11 = probe.s11
# time-gate the first largest reflection
s11_gated = s11.time_gate(center=0, span=50)
s11_gated.name='gated probe'
# plot frequency and time-domain s-parameters
figure(figsize=(8,4))
subplot(121)
s11.plot_s_db()
s11_gated.plot_s_db()
title('Frequency Domain')
subplot(122)
s11.plot_s_db_time()
s11_gated.plot_s_db_time()
title('Time Domain')
tight_layout()
```
# Interpreting Time Domain
Note there are two time-domain plotting functions in scikit-rf:
- Network.plot_s_db_time()
- Network.plot_s_time_db()
The difference is that the former, plot_s_db_time(), employs windowing before plotting to enhance impluse resolution. Windowing will be discussed in a bit, but for now we just use plot_s_db_time().
Plotting all four s-parameters of the probe in both frequency and time-domain.
```
# plot frequency and time-domain s-parameters
figure(figsize=(8,4))
subplot(121)
probe.plot_s_db()
title('Frequency Domain')
subplot(122)
probe.plot_s_db_time()
title('Time Domain')
tight_layout()
```
Focusing on the reflection coefficient from the waveguide port (s11), you can see there is an interference pattern present.
```
probe.plot_s_db(0,0)
title('Reflection Coefficient From \nWaveguide Port')
```
This ripple is evidence of several discrete reflections. Plotting s11 in the time-domain allows us to see where, or when, these reflections occur.
```
probe_s11 = probe.s11
probe_s11.plot_s_db_time(0,0)
title('Reflection Coefficient From \nWaveguide Port, Time Domain')
ylim(-100,0)
```
# Gating The Reflection of Interest
To isolate the reflection from the waveguide port, we can use time-gating. This can be done by using the method Network.time_gate(), and provide it an appropriate center and span (in ns). To see the effects of the gate, both the original and gated reponse are compared.
```
probe_s11_gated = probe_s11.time_gate(center=0, span=50)
probe_s11_gated.name='gated probe'
s11.plot_s_db_time()
s11_gated.plot_s_db_time()
```
Next, compare both responses in frequency domain to see the effect of the gate.
```
s11.plot_s_db()
s11_gated.plot_s_db()
```
# Auto-gate
The time-gating method in skrf has an auto-gating feature which can also be used to gate the largest reflection. When no gate parameters are provided, time_gate() does the following:
find the two largest peaks
center the gate on the tallest peak
set span to distance between two tallest peaks
You may want to plot the gated network in time-domain to see what the determined gate shape looks like.
```
title('Waveguide Interface of Probe')
s11.plot_s_db(label='original')
s11.time_gate().plot_s_db(label='autogated') #autogate on the fly
```
# Determining Distance
To make time-domain useful as a diagnostic tool, one would like to convert the x-axis to distance. This requires knowledge of the propagation velocity in the device. skrf provides some transmission-line models in the module skrf.media, which can be used for this.
However...
For dispersive media, such as rectangular waveguide, the phase velocity is a function of frequency, and transforming time to distance is not straightforward. As an approximation, you can normalize the x-axis to the speed of light.
Alternatively, you can simulate the a known device and compare the two time domain responses. This allows you to attribute quantatative meaning to the axes. For example, you could create an ideal delayed load as shown below. Note: the magnitude of a response behind a large impulse doesn not have meaningful units.
```
from skrf.media import DistributedCircuit
# create a Media object for RG-58, based on distributed ckt values
rg58 = DistributedCircuit(
frequency = probe.frequency,
C =93.5e-12,#F/m
L =273e-9, #H/m
R =0, #53e-3, #Ohm/m
G =0, #S/m
)
# create an ideal delayed load, parameters are adjusted until the
# theoretical response agrees with the measurement
theory = rg58.delay_load(Gamma0=rf.db_2_mag(-20),
d=280, unit='cm')
probe.plot_s_db_time(0,0, label = 'Measurement')
theory.plot_s_db_time(label='-20dB @ 280cm from test-port')
ylim(-100,0)
xlim(-500,500)
```
This plot demonstrates a few important points:
the theortical delayed load is not a perfect impulse in time. This is due to the dispersion in waveguide.
the peak of the magnitude in time domain is not identical to that specified, also due to disperison (and windowing).
# What the hell is Windowing?
The 'plot_s_db_time()' function does a few things.
windows the s-parameters.
converts to time domain
takes magnitude component, convert to dB
calculates time-axis s
plots
A word about step 1: windowing. A FFT represents a signal with a basis of periodic signals (sinusoids). If your frequency response is not periodic, which in general it isnt, taking a FFT will introduces artifacts in the time-domain results. To minimize these effects, the frequency response is windowed. This makes the frequency response more periodic by tapering off the band-edges.
Windowing is just applied to improve the plot appearance,d it does not affect the original network.
In skrf this can be done explicitly using the 'windowed()' function. By default this function uses the hamming window, but can be adjusted through arguments. The result of windowing is show below.
```
probe_w = probe.windowed()
probe.plot_s_db(0,0, label = 'Original')
probe_w.plot_s_db(0,0, label = 'Windowed')
```
Comparing the two time-domain plotting functions, we can see the difference between windowed and not.
```
probe.plot_s_time_db(0,0, label = 'Original')
probe_w.plot_s_time_db(0,0, label = 'Windowed')
```
# The end!
```
sark110.close()
```
| github_jupyter |
# Mouse Bone Marrow - merging annotated samples from MCA
```
import scanpy as sc
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib import colors
import seaborn as sb
import glob
import rpy2.rinterface_lib.callbacks
import logging
from rpy2.robjects import pandas2ri
import anndata2ri
# Ignore R warning messages
#Note: this can be commented out to get more verbose R output
rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR)
# Automatically convert rpy2 outputs to pandas dataframes
pandas2ri.activate()
anndata2ri.activate()
%load_ext rpy2.ipython
plt.rcParams['figure.figsize']=(8,8) #rescale figures
sc.settings.verbosity = 3
#sc.set_figure_params(dpi=200, dpi_save=300)
sc.logging.print_versions()
results_file = './write/MCA_mou_BM_pp.h5ad'
%%R
# Load all the R libraries we will be using in the notebook
library(scran)
```
## Load
Here we load the pre-processed datasets (which has been annotated), and the raw matrices (which won't be filtered on the gene level).
### Raw data
```
file_paths = '../../Munich/datasets/mouse/MCA_boneMarrow/ckit/'
adatas_raw = []
for i in glob.glob(file_paths+'*.txt.gz'):
print(i)
adatas_raw.append(sc.read(i, cache=True))
samples = ['BM_1', 'BM_3', 'BM_2']
# Loop to annotate data
for i in range(len(adatas_raw)):
adata_tmp = adatas_raw[i]
adata_tmp = adata_tmp.transpose()
#Annotate data
adata_tmp.obs.index.rename('barcode', inplace=True)
adata_tmp.obs['batch'] = ['MCA_'+samples[i]]*adata_tmp.n_obs
adata_tmp.obs['study'] = ['MCA_BM']*adata_tmp.n_obs
adata_tmp.obs['chemistry'] = ['microwell-seq']*adata_tmp.n_obs
adata_tmp.obs['tissue'] = ['Bone_Marrow']*adata_tmp.n_obs
adata_tmp.obs['species'] = ['Mouse']*adata_tmp.n_obs
adata_tmp.obs['data_type'] = ['UMI']*adata_tmp.n_obs
adata_tmp.var.index.names = ['gene_symbol']
adata_tmp.var_names_make_unique()
adatas_raw[i] = adata_tmp
adatas_raw[0].obs.head()
# Concatenate to unique adata object
adata_raw = adatas_raw[0].concatenate(adatas_raw[1:], batch_key='sample_ID', index_unique=None)
adata_raw.obs.head()
adata_raw.obs.drop(columns=['sample_ID'], inplace=True)
adata_raw.obs.head()
adata_raw.shape
```
### Pre-processed data
```
file_paths = '../../Bone_Marrow_mouse/write/'
adatas_pp = []
for i in glob.glob(file_paths+'*.h5ad'):
print(i)
adatas_pp.append(sc.read(i, cache=True))
for i in range(len(adatas_pp)):
adata_tmp = adatas_pp[i]
adata_obs = adata_tmp.obs.reset_index()
adata_obs = adata_obs[['index', 'final_annotation', 'dpt_pseudotime_y', 'n_counts', 'n_genes', 'mt_frac']].rename(columns = {'index':'barcode'})
adata_obs.set_index('barcode', inplace = True)
adatas_pp[i].obs = adata_obs
# Concatenate to unique adata object
adata_pp = adatas_pp[0].concatenate(adatas_pp[1:], batch_key='sample_ID',
index_unique=None)
adata_pp.obs.drop(columns=['sample_ID'], inplace = True)
adata_pp.obs.head()
adata_raw.shape
adata_pp.shape
# Restrict to cells that passed QC and were annotated
adata_obs_raw = adata_raw.obs.reset_index()
adata_obs_pp = adata_pp.obs.reset_index()
adata_merged = adata_obs_raw.merge(adata_obs_pp, on='barcode', how='left')
adata_merged.set_index('barcode', inplace = True)
adata_raw.obs = adata_merged
adata_raw.obs.head()
adata_raw = adata_raw[~pd.isnull(adata_raw.obs['final_annotation'])]
adata_raw.shape
```
### Normalization
```
# Exclude genes that are = 0 in all cells
#Filter genes:
print('Total number of genes: {:d}'.format(adata_raw.n_vars))
# Min 20 cells - filters out 0 count genes
sc.pp.filter_genes(adata_raw, min_cells=1)
print('Number of genes after cell filter: {:d}'.format(adata_raw.n_vars))
#Perform a clustering for scran normalization in clusters
adata_pp = adata_raw.copy()
sc.pp.normalize_per_cell(adata_pp, counts_per_cell_after=1e6)
sc.pp.log1p(adata_pp)
sc.pp.pca(adata_pp, n_comps=15, svd_solver='arpack')
sc.pp.neighbors(adata_pp)
sc.tl.louvain(adata_pp, key_added='groups', resolution=0.5)
# Check if the minimum number of cells per cluster is < 21:in that case, sizes will be also passed as input to the normalization
adata_pp.obs['groups'].value_counts()
#Preprocess variables for scran normalization
input_groups = adata_pp.obs['groups']
data_mat = adata_raw.X.T
%%R -i data_mat -i input_groups -o size_factors
size_factors = computeSumFactors(data_mat, clusters=input_groups, min.mean=0.1)
#Delete adata_pp
del adata_pp
# Visualize the estimated size factors
adata_raw.obs['size_factors'] = size_factors
sc.pl.scatter(adata_raw, 'size_factors', 'n_counts')
sc.pl.scatter(adata_raw, 'size_factors', 'n_genes')
sb.distplot(size_factors, bins=50, kde=False)
plt.show()
#Keep the count data in a counts layer
adata_raw.layers["counts"] = adata_raw.X.copy()
#Normalize adata
adata_raw.X /= adata_raw.obs['size_factors'].values[:,None]
sc.pp.log1p(adata_raw)
adata_raw.write(results_file)
```
| github_jupyter |
```
# importing the required libraries
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# function for reading the image
# this image is taken from a video
# and the video is taken from a thermal camera
# converting image from BGR to RGB
def read_image(image_path):
image = cv2.imread(image_path)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# thermal camera takes the heat/thermal energy
# more heat means the pixel value is closer to 255
# if it is cool then pixel value is closer to 0
# displaying the image, where white portion means that part is having more temprature
# and vice versa
image = read_image("thermal_scr_img.png")
plt.imshow(image)
# converting the image into grayscale
# changing and applying the ColorMap, black and white to black and red
heatmap_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
heatmap = cv2.applyColorMap(heatmap_gray, cv2.COLORMAP_HOT)
heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)
plt.imshow(heatmap)
# now taking the heatmap_gray and converting it to black and white image
# and performing threshold operation
# the pixels having values more than 200 will become white pixels and the one having values less than 200 will become black pixels
heatmap_gray = cv2.cvtColor(heatmap, cv2.COLOR_RGB2GRAY)
ret, binary_thresh = cv2.threshold(heatmap_gray, 200, 255, cv2.THRESH_BINARY)
plt.imshow(binary_thresh, cmap='gray')
# then cleaning the small white pixels to calculate the temperature for bigger blocks/portions of the image
# doing erosion operation by taking binary threshold (it makes image pixels thinner)
# doing dilution operation by taking the image erosion (and then that's why we are removing/cleaning all small pixels)
# kernel is some kind of filter and it changes the values of these pixels
kernel = np.ones((5,5), np.uint8)
image_erosion = cv2.erode(binary_thresh, kernel, iterations=1)
image_opening = cv2.dilate(image_erosion, kernel, iterations=1)
plt.imshow(image_opening, cmap='gray')
# now creating some masks
# using function zeros_like() it will take all the structures like zero
# x, y, w, h are the coordinate for rectangle
# copying the small rectangle part from this image using mask
# and printing the avg. value of pixels to get the temperature
contours, _ = cv2.findContours(image_opening, 1, 2)
contour = contours[11]
mask = np.zeros_like(heatmap_gray)
x, y, w, h = cv2.boundingRect(contour)
mask[y:y+h, x:x+w] = image_opening[y:y+h, x:x+w]
print(cv2.mean(heatmap_gray, mask= mask))
plt.imshow(mask, cmap='gray')
# performing the bitwise and operator on heatmap
# here we have created not mask
masked = cv2.bitwise_and(heatmap, heatmap, mask=~mask)
plt.imshow(masked)
# displaying the heatmap_gray
plt.imshow(heatmap_gray)
image_with_rectangles = np.copy(heatmap)
for contour in contours:
# rectangle over each contour
x, y, w, h = cv2.boundingRect(contour)
# mask is boolean type of matrix
mask = np.zeros_like(heatmap_gray)
mask[y:y+h, x:x+w] = image_opening[y:y+h, x:x+w]
# temperature calculation
temp = round(cv2.mean(heatmap_gray, mask=mask)[0] / 2.25, 2)
# draw rectangles for visualisation
image_with_rectangles = cv2.rectangle(
image_with_rectangles, (x,y), (x+w, y+h), (0, 255, 0), 2)
# write temperature for each rectangle
cv2.putText(image_with_rectangles, f"{temp} F", (x,y),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2, cv2.LINE_AA)
plt.imshow(image_with_rectangles)
```
| github_jupyter |
## Histograms of Oriented Gradients (HOG)
As we saw with the ORB algorithm, we can use keypoints in images to do keypoint-based matching to detect objects in images. These type of algorithms work great when you want to detect objects that have a lot of consistent internal features that are not affected by the background. For example, these algorithms work well for facial detection because faces have a lot of consistent internal features that don’t get affected by the image background, such as the eyes, nose, and mouth. However, these type of algorithms don’t work so well when attempting to do more general object recognition, say for example, pedestrian detection in images. The reason is that people don’t have consistent internal features, like faces do, because the body shape and style of every person is different (see Fig. 1). This means that every person is going to have a different set of internal features, and so we need something that can more generally describe a person.
<br>
<figure>
<img src = "./in_cell_images/pedestrians.jpeg" width = "100%" style = "border: thin silver solid; padding: 10px">
<figcaption style = "text-align:left; font-style:italic">Fig. 1. - Pedestrians.</figcaption>
</figure>
<br>
One option is to try to detect pedestrians by their contours instead. Detecting objects in images by their contours (boundaries) is very challenging because we have to deal with the difficulties brought about by the contrast between the background and the foreground. For example, suppose you wanted to detect a pedestrian in an image that is walking in front of a white building and she is wearing a white coat and black pants (see Fig. 2). We can see in Fig. 2, that since the background of the image is mostly white, the black pants are going to have a very high contrast, but the coat, since it is white as well, is going to have very low contrast. In this case, detecting the edges of pants is going to be easy but detecting the edges of the coat is going to be very difficult. This is where **HOG** comes in. HOG stands for **Histograms of Oriented Gradients** and it was first introduced by Navneet Dalal and Bill Triggs in 2005.
<br>
<figure>
<img src = "./in_cell_images/woman.jpg" width = "100%" style = "border: thin silver solid; padding: 10px">
<figcaption style = "text-align:left; font-style:italic">Fig. 2. - High and Low Contrast.</figcaption>
</figure>
<br>
The HOG algorithm works by creating histograms of the distribution of gradient orientations in an image and then normalizing them in a very special way. This special normalization is what makes HOG so effective at detecting the edges of objects even in cases where the contrast is very low. These normalized histograms are put together into a feature vector, known as the HOG descriptor, that can be used to train a machine learning algorithm, such as a Support Vector Machine (SVM), to detect objects in images based on their boundaries (edges). Due to its great success and reliability, HOG has become one of the most widely used algorithms in computer vison for object detection.
In this notebook, you will learn:
* How the HOG algorithm works
* How to use OpenCV to create a HOG descriptor
* How to visualize the HOG descriptor.
# The HOG Algorithm
As its name suggests, the HOG algorithm, is based on creating histograms from the orientation of image gradients. The HOG algorithm is implemented in a series of steps:
1. Given the image of particular object, set a detection window (region of interest) that covers the entire object in the image (see Fig. 3).
2. Calculate the magnitude and direction of the gradient for each individual pixel in the detection window.
3. Divide the detection window into connected *cells* of pixels, with all cells being of the same size (see Fig. 3). The size of the cells is a free parameter and it is usually chosen so as to match the scale of the features that want to be detected. For example, in a 64 x 128 pixel detection window, square cells 6 to 8 pixels wide are suitable for detecting human limbs.
4. Create a Histogram for each cell, by first grouping the gradient directions of all pixels in each cell into a particular number of orientation (angular) bins; and then adding up the gradient magnitudes of the gradients in each angular bin (see Fig. 3). The number of bins in the histogram is a free parameter and it is usually set to 9 angular bins.
5. Group adjacent cells into *blocks* (see Fig. 3). The number of cells in each block is a free parameter and all blocks must be of the same size. The distance between each block (known as the stride) is a free parameter but it is usually set to half the block size, in which case you will get overlapping blocks (*see video below*). The HOG algorithm has been shown empirically to work better with overlapping blocks.
6. Use the cells contained within each block to normalize the cell histograms in that block (see Fig. 3). If you have overlapping blocks this means that most cells will be normalized with respect to different blocks (*see video below*). Therefore, the same cell may have several different normalizations.
7. Collect all the normalized histograms from all the blocks into a single feature vector called the HOG descriptor.
8. Use the resulting HOG descriptors from many images of the same type of object to train a machine learning algorithm, such as an SVM, to detect those type of objects in images. For example, you could use the HOG descriptors from many images of pedestrians to train an SVM to detect pedestrians in images. The training is done with both positive a negative examples of the object you want detect in the image.
9. Once the SVM has been trained, a sliding window approach is used to try to detect and locate objects in images. Detecting an object in the image entails finding the part of the image that looks similar to the HOG pattern learned by the SVM.
<br>
<figure>
<img src = "./in_cell_images/HOG Diagram2.png" width = "100%" style = "border: thin silver solid; padding: 1px">
<figcaption style = "text-align:left; font-style:italic">Fig. 3. - HOG Diagram.</figcaption>
</figure>
<br>
<figure>
<video src = "./in_cell_images/HOG Animation - Medium.mp4" width="100%" controls autoplay loop> </video>
<figcaption style = "text-align:left; font-style:italic">Vid. 1. - HOG Animation.</figcaption>
</figure>
# Why The HOG Algorithm Works
As we learned above, HOG creates histograms by adding the magnitude of the gradients in particular orientations in localized portions of the image called *cells*. By doing this we guarantee that stronger gradients will contribute more to the magnitude of their respective angular bin, while the effects of weak and randomly oriented gradients resulting from noise are minimized. In this manner the histograms tell us the dominant gradient orientation of each cell.
### Dealing with contrast
Now, the magnitude of the dominant orientation can vary widely due to variations in local illumination and the contrast between the background and the foreground.
To account for the background-foreground contrast differences, the HOG algorithm tries to detect edges locally. In order to do this, it defines groups of cells, called **blocks**, and normalizes the histograms using this local group of cells. By normalizing locally, the HOG algorithm can detect the edges in each block very reliably; this is called **block normalization**.
In addition to using block normalization, the HOG algorithm also uses overlapping blocks to increase its performance. By using overlapping blocks, each cell contributes several independent components to the final HOG descriptor, where each component corresponds to a cell being normalized with respect to a different block. This may seem redundant but, it has been shown empirically that by normalizing each cell several times with respect to different local blocks, the performance of the HOG algorithm increases dramatically.
### Loading Images and Importing Resources
The first step in building our HOG descriptor is to load the required packages into Python and to load our image.
We start by using OpenCV to load an image of a triangle tile. Since, the `cv2.imread()` function loads images as BGR we will convert our image to RGB so we can display it with the correct colors. As usual we will convert our BGR image to Gray Scale for analysis.
```
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Set the default figure size
plt.rcParams['figure.figsize'] = [17.0, 7.0]
# Load the image
image = cv2.imread('./images/triangle_tile.jpeg')
# Convert the original image to RGB
original_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Convert the original image to gray scale
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Print the shape of the original and gray scale images
print('The original image has shape: ', original_image.shape)
print('The gray scale image has shape: ', gray_image.shape)
# Display the images
plt.subplot(121)
plt.imshow(original_image)
plt.title('Original Image')
plt.subplot(122)
plt.imshow(gray_image, cmap='gray')
plt.title('Gray Scale Image')
plt.show()
```
# Creating The HOG Descriptor
We will be using OpenCV’s `HOGDescriptor` class to create the HOG descriptor. The parameters of the HOG descriptor are setup using the `HOGDescriptor()` function. The parameters of the `HOGDescriptor()` function and their default values are given below:
`cv2.HOGDescriptor(win_size = (64, 128),
block_size = (16, 16),
block_stride = (8, 8),
cell_size = (8, 8),
nbins = 9,
win_sigma = DEFAULT_WIN_SIGMA,
threshold_L2hys = 0.2,
gamma_correction = true,
nlevels = DEFAULT_NLEVELS)`
Parameters:
* **win_size** – *Size*
Size of detection window in pixels (*width, height*). Defines the region of interest. Must be an integer multiple of cell size.
* **block_size** – *Size*
Block size in pixels (*width, height*). Defines how many cells are in each block. Must be an integer multiple of cell size and it must be smaller than the detection window. The smaller the block the finer detail you will get.
* **block_stride** – *Size*
Block stride in pixels (*horizontal, vertical*). It must be an integer multiple of cell size. The `block_stride` defines the distance between adjecent blocks, for example, 8 pixels horizontally and 8 pixels vertically. Longer `block_strides` makes the algorithm run faster (because less blocks are evaluated) but the algorithm may not perform as well.
* **cell_size** – *Size*
Cell size in pixels (*width, height*). Determines the size fo your cell. The smaller the cell the finer detail you will get.
* **nbins** – *int*
Number of bins for the histograms. Determines the number of angular bins used to make the histograms. With more bins you capture more gradient directions. HOG uses unsigned gradients, so the angular bins will have values between 0 and 180 degrees.
* **win_sigma** – *double*
Gaussian smoothing window parameter. The performance of the HOG algorithm can be improved by smoothing the pixels near the edges of the blocks by applying a Gaussian spatial window to each pixel before computing the histograms.
* **threshold_L2hys** – *double*
L2-Hys (Lowe-style clipped L2 norm) normalization method shrinkage. The L2-Hys method is used to normalize the blocks and it consists of an L2-norm followed by clipping and a renormalization. The clipping limits the maximum value of the descriptor vector for each block to have the value of the given threshold (0.2 by default). After the clipping the descriptor vector is renormalized as described in *IJCV*, 60(2):91-110, 2004.
* **gamma_correction** – *bool*
Flag to specify whether the gamma correction preprocessing is required or not. Performing gamma correction slightly increases the performance of the HOG algorithm.
* **nlevels** – *int*
Maximum number of detection window increases.
As we can see, the `cv2.HOGDescriptor()`function supports a wide range of parameters. The first few arguments (`block_size, block_stride, cell_size`, and `nbins`) are probably the ones you are most likely to change. The other parameters can be safely left at their default values and you will get good results.
In the code below, we will use the `cv2.HOGDescriptor()`function to set the cell size, block size, block stride, and the number of bins for the histograms of the HOG descriptor. We will then use `.compute(image)`method to compute the HOG descriptor (feature vector) for the given `image`.
```
# Specify the parameters for our HOG descriptor
# Cell Size in pixels (width, height). Must be smaller than the size of the detection window
# and must be chosen so that the resulting Block Size is smaller than the detection window.
cell_size = (6, 6)
# Number of cells per block in each direction (x, y). Must be chosen so that the resulting
# Block Size is smaller than the detection window
num_cells_per_block = (2, 2)
# Block Size in pixels (width, height). Must be an integer multiple of Cell Size.
# The Block Size must be smaller than the detection window
block_size = (num_cells_per_block[0] * cell_size[0],
num_cells_per_block[1] * cell_size[1])
# Calculate the number of cells that fit in our image in the x and y directions
x_cells = gray_image.shape[1] // cell_size[0]
y_cells = gray_image.shape[0] // cell_size[1]
# Horizontal distance between blocks in units of Cell Size. Must be an integer and it must
# be set such that (x_cells - num_cells_per_block[0]) / h_stride = integer.
h_stride = 1
# Vertical distance between blocks in units of Cell Size. Must be an integer and it must
# be set such that (y_cells - num_cells_per_block[1]) / v_stride = integer.
v_stride = 1
# Block Stride in pixels (horizantal, vertical). Must be an integer multiple of Cell Size
block_stride = (cell_size[0] * h_stride, cell_size[1] * v_stride)
# Number of gradient orientation bins
num_bins = 9
# Specify the size of the detection window (Region of Interest) in pixels (width, height).
# It must be an integer multiple of Cell Size and it must cover the entire image. Because
# the detection window must be an integer multiple of cell size, depending on the size of
# your cells, the resulting detection window might be slightly smaller than the image.
# This is perfectly ok.
win_size = (x_cells * cell_size[0] , y_cells * cell_size[1])
# Print the shape of the gray scale image for reference
print('\nThe gray scale image has shape: ', gray_image.shape)
print()
# Print the parameters of our HOG descriptor
print('HOG Descriptor Parameters:\n')
print('Window Size:', win_size)
print('Cell Size:', cell_size)
print('Block Size:', block_size)
print('Block Stride:', block_stride)
print('Number of Bins:', num_bins)
print()
# Set the parameters of the HOG descriptor using the variables defined above
hog = cv2.HOGDescriptor(win_size, block_size, block_stride, cell_size, num_bins)
# Compute the HOG Descriptor for the gray scale image
hog_descriptor = hog.compute(gray_image)
```
# Number of Elements In The HOG Descriptor
The resulting HOG Descriptor (feature vector), contains the normalized histograms from all cells from all blocks in the detection window concatenated in one long vector. Therefore, the size of the HOG feature vector will be given by the total number of blocks in the detection window, multiplied by the number of cells per block, times the number of orientation bins:
<span class="mathquill">
\begin{equation}
\mbox{total_elements} = (\mbox{total_number_of_blocks})\mbox{ } \times \mbox{ } (\mbox{number_cells_per_block})\mbox{ } \times \mbox{ } (\mbox{number_of_bins})
\end{equation}
</span>
If we don’t have overlapping blocks (*i.e.* the `block_stride`equals the `block_size`), the total number of blocks can be easily calculated by dividing the size of the detection window by the block size. However, in the general case we have to take into account the fact that we have overlapping blocks. To find the total number of blocks in the general case (*i.e.* for any `block_stride` and `block_size`), we can use the formula given below:
<span class="mathquill">
\begin{equation}
\mbox{Total}_i = \left( \frac{\mbox{block_size}_i}{\mbox{block_stride}_i} \right)\left( \frac{\mbox{window_size}_i}{\mbox{block_size}_i} \right) - \left [\left( \frac{\mbox{block_size}_i}{\mbox{block_stride}_i} \right) - 1 \right]; \mbox{ for } i = x,y
\end{equation}
</span>
Where <span class="mathquill">Total$_x$</span>, is the total number of blocks along the width of the detection window, and <span class="mathquill">Total$_y$</span>, is the total number of blocks along the height of the detection window. This formula for <span class="mathquill">Total$_x$</span> and <span class="mathquill">Total$_y$</span>, takes into account the extra blocks that result from overlapping. After calculating <span class="mathquill">Total$_x$</span> and <span class="mathquill">Total$_y$</span>, we can get the total number of blocks in the detection window by multiplying <span class="mathquill">Total$_x$ $\times$ Total$_y$</span>. The above formula can be simplified considerably because the `block_size`, `block_stride`, and `window_size`are all defined in terms of the `cell_size`. By making all the appropriate substitutions and cancelations the above formula reduces to:
<span class="mathquill">
\begin{equation}
\mbox{Total}_i = \left(\frac{\mbox{cells}_i - \mbox{num_cells_per_block}_i}{N_i}\right) + 1\mbox{ }; \mbox{ for } i = x,y
\end{equation}
</span>
Where <span class="mathquill">cells$_x$</span> is the total number of cells along the width of the detection window, and <span class="mathquill">cells$_y$</span>, is the total number of cells along the height of the detection window. And <span class="mathquill">$N_x$</span> is the horizontal block stride in units of `cell_size` and <span class="mathquill">$N_y$</span> is the vertical block stride in units of `cell_size`.
Let's calculate what the number of elements for the HOG feature vector should be and check that it matches the shape of the HOG Descriptor calculated above.
```
# Calculate the total number of blocks along the width of the detection window
tot_bx = np.uint32(((x_cells - num_cells_per_block[0]) / h_stride) + 1)
# Calculate the total number of blocks along the height of the detection window
tot_by = np.uint32(((y_cells - num_cells_per_block[1]) / v_stride) + 1)
# Calculate the total number of elements in the feature vector
tot_els = (tot_bx) * (tot_by) * num_cells_per_block[0] * num_cells_per_block[1] * num_bins
# Print the total number of elements the HOG feature vector should have
print('\nThe total number of elements in the HOG Feature Vector should be: ',
tot_bx, 'x',
tot_by, 'x',
num_cells_per_block[0], 'x',
num_cells_per_block[1], 'x',
num_bins, '=',
tot_els)
# Print the shape of the HOG Descriptor to see that it matches the above
print('\nThe HOG Descriptor has shape:', hog_descriptor.shape)
print()
```
# Visualizing The HOG Descriptor
We can visualize the HOG Descriptor by plotting the histogram associated with each cell as a collection of vectors. To do this, we will plot each bin in the histogram as a single vector whose magnitude is given by the height of the bin and its orientation is given by the angular bin that its associated with. Since any given cell might have multiple histograms associated with it, due to the overlapping blocks, we will choose to average all the histograms for each cell to produce a single histogram for each cell.
OpenCV has no easy way to visualize the HOG Descriptor, so we have to do some manipulation first in order to visualize it. We will start by reshaping the HOG Descriptor in order to make our calculations easier. We will then compute the average histogram of each cell and finally we will convert the histogram bins into vectors. Once we have the vectors, we plot the corresponding vectors for each cell in an image.
The code below produces an interactive plot so that you can interact with the figure. The figure contains:
* the grayscale image,
* the HOG Descriptor (feature vector),
* a zoomed-in portion of the HOG Descriptor, and
* the histogram of the selected cell.
**You can click anywhere on the gray scale image or the HOG Descriptor image to select a particular cell**. Once you click on either image a *magenta* rectangle will appear showing the cell you selected. The Zoom Window will show you a zoomed in version of the HOG descriptor around the selected cell; and the histogram plot will show you the corresponding histogram for the selected cell. The interactive window also has buttons at the bottom that allow for other functionality, such as panning, and giving you the option to save the figure if desired. The home button returns the figure to its default value.
**NOTE**: If you are running this notebook in the Udacity workspace, there is around a 2 second lag in the interactive plot. This means that if you click in the image to zoom in, it will take about 2 seconds for the plot to refresh.
```
%matplotlib notebook
import copy
import matplotlib.patches as patches
# Set the default figure size
plt.rcParams['figure.figsize'] = [9.8, 9]
# Reshape the feature vector to [blocks_y, blocks_x, num_cells_per_block_x, num_cells_per_block_y, num_bins].
# The blocks_x and blocks_y will be transposed so that the first index (blocks_y) referes to the row number
# and the second index to the column number. This will be useful later when we plot the feature vector, so
# that the feature vector indexing matches the image indexing.
hog_descriptor_reshaped = hog_descriptor.reshape(tot_bx,
tot_by,
num_cells_per_block[0],
num_cells_per_block[1],
num_bins).transpose((1, 0, 2, 3, 4))
# Print the shape of the feature vector for reference
print('The feature vector has shape:', hog_descriptor.shape)
# Print the reshaped feature vector
print('The reshaped feature vector has shape:', hog_descriptor_reshaped.shape)
# Create an array that will hold the average gradients for each cell
ave_grad = np.zeros((y_cells, x_cells, num_bins))
# Print the shape of the ave_grad array for reference
print('The average gradient array has shape: ', ave_grad.shape)
# Create an array that will count the number of histograms per cell
hist_counter = np.zeros((y_cells, x_cells, 1))
# Add up all the histograms for each cell and count the number of histograms per cell
for i in range (num_cells_per_block[0]):
for j in range(num_cells_per_block[1]):
ave_grad[i:tot_by + i,
j:tot_bx + j] += hog_descriptor_reshaped[:, :, i, j, :]
hist_counter[i:tot_by + i,
j:tot_bx + j] += 1
# Calculate the average gradient for each cell
ave_grad /= hist_counter
# Calculate the total number of vectors we have in all the cells.
len_vecs = ave_grad.shape[0] * ave_grad.shape[1] * ave_grad.shape[2]
# Create an array that has num_bins equally spaced between 0 and 180 degress in radians.
deg = np.linspace(0, np.pi, num_bins, endpoint = False)
# Each cell will have a histogram with num_bins. For each cell, plot each bin as a vector (with its magnitude
# equal to the height of the bin in the histogram, and its angle corresponding to the bin in the histogram).
# To do this, create rank 1 arrays that will hold the (x,y)-coordinate of all the vectors in all the cells in the
# image. Also, create the rank 1 arrays that will hold all the (U,V)-components of all the vectors in all the
# cells in the image. Create the arrays that will hold all the vector positons and components.
U = np.zeros((len_vecs))
V = np.zeros((len_vecs))
X = np.zeros((len_vecs))
Y = np.zeros((len_vecs))
# Set the counter to zero
counter = 0
# Use the cosine and sine functions to calculate the vector components (U,V) from their maginitudes. Remember the
# cosine and sine functions take angles in radians. Calculate the vector positions and magnitudes from the
# average gradient array
for i in range(ave_grad.shape[0]):
for j in range(ave_grad.shape[1]):
for k in range(ave_grad.shape[2]):
U[counter] = ave_grad[i,j,k] * np.cos(deg[k])
V[counter] = ave_grad[i,j,k] * np.sin(deg[k])
X[counter] = (cell_size[0] / 2) + (cell_size[0] * i)
Y[counter] = (cell_size[1] / 2) + (cell_size[1] * j)
counter = counter + 1
# Create the bins in degress to plot our histogram.
angle_axis = np.linspace(0, 180, num_bins, endpoint = False)
angle_axis += ((angle_axis[1] - angle_axis[0]) / 2)
# Create a figure with 4 subplots arranged in 2 x 2
fig, ((a,b),(c,d)) = plt.subplots(2,2)
# Set the title of each subplot
a.set(title = 'Gray Scale Image\n(Click to Zoom)')
b.set(title = 'HOG Descriptor\n(Click to Zoom)')
c.set(title = 'Zoom Window', xlim = (0, 18), ylim = (0, 18), autoscale_on = False)
d.set(title = 'Histogram of Gradients')
# Plot the gray scale image
a.imshow(gray_image, cmap = 'gray')
a.set_aspect(aspect = 1)
# Plot the feature vector (HOG Descriptor)
b.quiver(Y, X, U, V, color = 'white', headwidth = 0, headlength = 0, scale_units = 'inches', scale = 5)
b.invert_yaxis()
b.set_aspect(aspect = 1)
#b.set_facecolor('black')
b.set_axis_bgcolor('black')
# Define function for interactive zoom
def onpress(event):
#Unless the left mouse button is pressed do nothing
if event.button != 1:
return
# Only accept clicks for subplots a and b
if event.inaxes in [a, b]:
# Get mouse click coordinates
x, y = event.xdata, event.ydata
# Select the cell closest to the mouse click coordinates
cell_num_x = np.uint32(x / cell_size[0])
cell_num_y = np.uint32(y / cell_size[1])
# Set the edge coordinates of the rectangle patch
edgex = x - (x % cell_size[0])
edgey = y - (y % cell_size[1])
# Create a rectangle patch that matches the the cell selected above
rect = patches.Rectangle((edgex, edgey),
cell_size[0], cell_size[1],
linewidth = 1,
edgecolor = 'magenta',
facecolor='none')
# A single patch can only be used in a single plot. Create copies
# of the patch to use in the other subplots
rect2 = copy.copy(rect)
rect3 = copy.copy(rect)
# Update all subplots
a.clear()
a.set(title = 'Gray Scale Image\n(Click to Zoom)')
a.imshow(gray_image, cmap = 'gray')
a.set_aspect(aspect = 1)
a.add_patch(rect)
b.clear()
b.set(title = 'HOG Descriptor\n(Click to Zoom)')
b.quiver(Y, X, U, V, color = 'white', headwidth = 0, headlength = 0, scale_units = 'inches', scale = 5)
b.invert_yaxis()
b.set_aspect(aspect = 1)
#b.set_facecolor('black')
b.set_axis_bgcolor('black')
b.add_patch(rect2)
c.clear()
c.set(title = 'Zoom Window')
c.quiver(Y, X, U, V, color = 'white', headwidth = 0, headlength = 0, scale_units = 'inches', scale = 1)
c.set_xlim(edgex - cell_size[0], edgex + (2 * cell_size[0]))
c.set_ylim(edgey - cell_size[1], edgey + (2 * cell_size[1]))
c.invert_yaxis()
c.set_aspect(aspect = 1)
#c.set_facecolor('black')
c.set_axis_bgcolor('black')
c.add_patch(rect3)
d.clear()
d.set(title = 'Histogram of Gradients')
d.grid()
d.set_xlim(0, 180)
d.set_xticks(angle_axis)
d.set_xlabel('Angle')
d.bar(angle_axis,
ave_grad[cell_num_y, cell_num_x, :],
180 // num_bins,
align = 'center',
alpha = 0.5,
linewidth = 1.2,
edgecolor = 'k')
fig.canvas.draw()
# Create a connection between the figure and the mouse click
fig.canvas.mpl_connect('button_press_event', onpress)
plt.show()
```
# Understanding The Histograms
Let's take a look at a couple of snapshots of the above figure to see if the histograms for the selected cell make sense. Let's start looking at a cell that is inside a triangle and not near an edge:
<br>
<figure>
<img src = "./in_cell_images/snapshot1.png" width = "70%" style = "border: thin silver solid; padding: 1px">
<figcaption style = "text-align:center; font-style:italic">Fig. 4. - Histograms Inside a Triangle.</figcaption>
</figure>
<br>
In this case, since the triangle is nearly all of the same color there shouldn't be any dominant gradient in the selected cell. As we can clearly see in the Zoom Window and the histogram, this is indeed the case. We have many gradients but none of them clearly dominates over the other.
Now let’s take a look at a cell that is near a horizontal edge:
<br>
<figure>
<img src = "./in_cell_images/snapshot2.png" width = "70%" style = "border: thin silver solid; padding: 1px">
<figcaption style = "text-align:center; font-style:italic">Fig. 5. - Histograms Near a Horizontal Edge.</figcaption>
</figure>
<br>
Remember that edges are areas of an image where the intensity changes abruptly. In these cases, we will have a high intensity gradient in some particular direction. This is exactly what we see in the corresponding histogram and Zoom Window for the selected cell. In the Zoom Window, we can see that the dominant gradient is pointing up, almost at 90 degrees, since that’s the direction in which there is a sharp change in intensity. Therefore, we should expect to see the 90-degree bin in the histogram to dominate strongly over the others. This is in fact what we see.
Now let’s take a look at a cell that is near a vertical edge:
<br>
<figure>
<img src = "./in_cell_images/snapshot3.png" width = "70%" style = "border: thin silver solid; padding: 1px">
<figcaption style = "text-align:center; font-style:italic">Fig. 6. - Histograms Near a Vertical Edge.</figcaption>
</figure>
<br>
In this case we expect the dominant gradient in the cell to be horizontal, close to 180 degrees, since that’s the direction in which there is a sharp change in intensity. Therefore, we should expect to see the 170-degree bin in the histogram to dominate strongly over the others. This is what we see in the histogram but we also see that there is another dominant gradient in the cell, namely the one in the 10-degree bin. The reason for this, is because the HOG algorithm is using unsigned gradients, which means 0 degrees and 180 degrees are considered the same. Therefore, when the histograms are being created, angles between 160 and 180 degrees, contribute proportionally to both the 10-degree bin and the 170-degree bin. This results in there being two dominant gradients in the cell near the vertical edge instead of just one.
To conclude let’s take a look at a cell that is near a diagonal edge.
<br>
<figure>
<img src = "./in_cell_images/snapshot4.png" width = "70%" style = "border: thin silver solid; padding: 1px">
<figcaption style = "text-align:center; font-style:italic">Fig. 7. - Histograms Near a Diagonal Edge.</figcaption>
</figure>
<br>
To understand what we are seeing, let’s first remember that gradients have an *x*-component, and a *y*-component, just like vectors. Therefore, the resulting orientation of a gradient is going to be given by the vector sum of its components. For this reason, on vertical edges the gradients are horizontal, because they only have an x-component, as we saw in Figure 4. While on horizontal edges the gradients are vertical, because they only have a y-component, as we saw in Figure 3. Consequently, on diagonal edges, the gradients are also going to be diagonal because both the *x* and *y* components are non-zero. Since the diagonal edges in the image are close to 45 degrees, we should expect to see a dominant gradient orientation in the 50-degree bin. This is in fact what we see in the histogram but, just like in Figure 4., we see there are two dominant gradients instead of just one. The reason for this is that when the histograms are being created, angles that are near the boundaries of bins, contribute proportionally to the adjacent bins. For example, a gradient with an angle of 40 degrees, is right in the middle of the 30-degree and 50-degree bin. Therefore, the magnitude of the gradient is split evenly into the 30-degree and 50-degree bin. This results in there being two dominant gradients in the cell near the diagonal edge instead of just one.
Now that you know how HOG is implemented, in the workspace you will find a notebook named *Examples*. In there, you will be able set your own paramters for the HOG descriptor for various images. Have fun!
| github_jupyter |
### Topic Modelling Demo Code
#### Things I want to do -
- Identify a package to build / train LDA model
- Use visualization to explore Documents -> Topics Distribution -> Word distribution
```
!pip install pyLDAvis, gensim
import numpy as np
import pandas as pd
# Visualization
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import seaborn as sns
import pyLDAvis.gensim
# Text Preprocessing and model building
from gensim.corpora import Dictionary
import nltk
from nltk.stem import WordNetLemmatizer
import re
# Iteratively read files
import glob
import os
# For displaying images in ipython
from IPython.display import HTML, display
%matplotlib inline
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (14.0, 8.7)
#warnings.filterwarnings('ignore')
pd.options.display.float_format = '{:,.2f}'.format
```
<h2>Latent Dirichlet Allocation</h2>
<h3>From Documents -- DTM -- LDA Model</h3>
Topic modeling aims to automatically summarize large collections of documents to facilitate organization and management, as well as search and recommendations. At the same time, it can enable the understanding of documents to the extent that humans can interpret the descriptions of topics
<img src="images/lda2.png" alt="lda" style="width:60%">
<img src="images/docs_to_lda.png" alt="ldaflow" style="width:100%">
### Load Data
```
# User defined function to read and store bbc data from multipe folders
def load_data(folder_names,root_path):
fileNames = [path + '/' + 'bbc' +'/'+ folder + '/*.txt' for path,folder in zip([root_path]*len(folder_names),
folder_names )]
doc_list = []
tags = folder_names
for docs in fileNames:
#print(docs)
#print(type(docs))
doc = glob.glob(docs) # glob method iterates through the all the text documents in a folder
for text in doc:
with open(text, encoding='latin1') as f:
topic = docs.split('/')[-2]
lines = f.readlines()
heading = lines[0].strip()
body = ' '.join([l.strip() for l in lines[1:]])
doc_list.append([topic, heading, body])
print("Completed loading data from folder: %s"%topic)
print("Completed Loading entire text")
return doc_list
folder_names = ['business','entertainment','politics','sport','tech']
docs = load_data(folder_names = folder_names, root_path = os.getcwd())
docs = pd.DataFrame(docs, columns=['Category', 'Heading', 'Article'])
print(docs.head())
print('\nShape of data is {}\n'.format(docs.shape))
```
### Extract Raw Corpus
```
articles = docs.Article.tolist()
print(type(articles))
print(articles[0:2])
wordnet_lemmatizer = WordNetLemmatizer()
```
### Preprocessing of Raw Text
```
from nltk.corpus import stopwords
import nltk
# nltk.download('punkt')
# nltk.download('wordnet')
# nltk.download('stopwords')
# nltk.download('stopwords')
stopwords = stopwords.word('english')
# Method to preprocess my raw data
def preprocessText(x):
temp = x.lower()
temp = re.sub(r'[^\w]', ' ', temp)
temp = nltk.word_tokenize(temp)
temp = [wordnet_lemmatizer.lemmatize(w) for w in temp]
temp = [word for word in temp if word not in stopwords ]
return temp
articles_final = [preprocessText(article) for article in articles]
articles_final[0:2]
```
### Transformation of Preprocessed text into Vector form using Gensim
```
# Create a dictionary representation of the documents.
dictionary = Dictionary(articles_final)
# Filter out words that occur less than 20 documents, or more than 50% of the documents.
dictionary.filter_extremes(no_below=20, no_above=0.5)
print(dictionary)
# Bag-of-words representation of the documents.
corpus = [dictionary.doc2bow(doc) for doc in articles_final]
print('Number of unique tokens: %d' % len(dictionary))
print('Number of documents: %d' % len(corpus))
```
### Train LDA model using Gensim
```
dictionary
# Train LDA model.
from gensim.models import LdaModel
# Set training parameters.
num_topics = 5
chunksize = 2000
passes = 10
# iterations = 400
eval_every = None # Don't evaluate model perplexity, takes too much time.
# Make a index to word dictionary.
temp = dictionary[0] # This is only to "load" the dictionary.
id2word = dictionary.id2token
model = LdaModel(
corpus=corpus,
id2word=id2word,
chunksize=chunksize,
alpha='auto',
eta='auto',
# iterations=iterations,
num_topics=num_topics,
passes=passes,
eval_every=eval_every
)
```
### Model exploration: Top K words in each topic
```
# Print the Keyword in the 10 topics
pprint.pprint(model.print_topics(num_words= 20))
doc_lda = model[corpus]
```
### Model Visualization using PyLDAvis
```
pyLDAvis.enable_notebook()
vis = pyLDAvis.gensim.prepare(model, corpus, dictionary=dictionary)
vis
```
### Assign Topic Model Numbers to original Data Frame as Column
```
# Assigns the topics to the documents in corpus
lda_corpus = model[corpus]
topics = []
for doc in lda_corpus:
temp_id = []
temp_score = []
for doc_tuple in doc:
temp_id.append(doc_tuple[0])
temp_score.append(doc_tuple[1])
index = np.argmax(temp_score)
topics.append(temp_id[index])
docs["Topic_num"] = topics
docs.tail(n= 40)
```
| github_jupyter |
### Introduction
The `Lines` object provides the following features:
1. Ability to plot a single set or multiple sets of y-values as a function of a set or multiple sets of x-values
2. Ability to style the line object in different ways, by setting different attributes such as the `colors`, `line_style`, `stroke_width` etc.
3. Ability to specify a marker at each point passed to the line. The marker can be a shape which is at the data points between which the line is interpolated and can be set through the `markers` attribute
The `Lines` object has the following attributes
| Attribute | Description | Default Value |
|:-:|---|:-:|
| `colors` | Sets the color of each line, takes as input a list of any RGB, HEX, or HTML color name | `CATEGORY10` |
| `opacities` | Controls the opacity of each line, takes as input a real number between 0 and 1 | `1.0` |
| `stroke_width` | Real number which sets the width of all paths | `2.0` |
| `line_style` | Specifies whether a line is solid, dashed, dotted or both dashed and dotted | `'solid'` |
| `interpolation` | Sets the type of interpolation between two points | `'linear'` |
| `marker` | Specifies the shape of the marker inserted at each data point | `None` |
| `marker_size` | Controls the size of the marker, takes as input a non-negative integer | `64` |
|`close_path`| Controls whether to close the paths or not | `False` |
|`fill`| Specifies in which way the paths are filled. Can be set to one of `{'none', 'bottom', 'top', 'inside'}`| `None` |
|`fill_colors`| `List` that specifies the `fill` colors of each path | `[]` |
| **Data Attribute** | **Description** | **Default Value** |
|`x` |abscissas of the data points | `array([])` |
|`y` |ordinates of the data points | `array([])` |
|`color` | Data according to which the `Lines` will be colored. Setting it to `None` defaults the choice of colors to the `colors` attribute | `None` |
## pyplot's plot method can be used to plot lines with meaningful defaults
```
import numpy as np
from pandas import date_range
import bqplot.pyplot as plt
from bqplot import *
security_1 = np.cumsum(np.random.randn(150)) + 100.
security_2 = np.cumsum(np.random.randn(150)) + 100.
```
## Basic Line Chart
```
fig = plt.figure(title='Security 1')
axes_options = {'x': {'label': 'Index'}, 'y': {'label': 'Price'}}
# x values default to range of values when not specified
line = plt.plot(security_1, axes_options=axes_options)
fig
```
**We can explore the different attributes by changing each of them for the plot above:**
```
line.colors = ['DarkOrange']
```
In a similar way, we can also change any attribute after the plot has been displayed to change the plot. Run each of the cells below, and try changing the attributes to explore the different features and how they affect the plot.
```
# The opacity allows us to display the Line while featuring other Marks that may be on the Figure
line.opacities = [.5]
line.stroke_width = 2.5
```
To switch to an area chart, set the `fill` attribute, and control the look with `fill_opacities` and `fill_colors`.
```
line.fill = 'bottom'
line.fill_opacities = [0.2]
line.line_style = 'dashed'
line.interpolation = 'basis'
```
While a `Lines` plot allows the user to extract the general shape of the data being plotted, there may be a need to visualize discrete data points along with this shape. This is where the `markers` attribute comes in.
```
line.marker = 'triangle-down'
```
The `marker` attributes accepts the values `square`, `circle`, `cross`, `diamond`, `square`, `triangle-down`, `triangle-up`, `arrow`, `rectangle`, `ellipse`. Try changing the string above and re-running the cell to see how each `marker` type looks.
## Plotting a Time-Series
The `DateScale` allows us to plot time series as a `Lines` plot conveniently with most `date` formats.
```
# Here we define the dates we would like to use
dates = date_range(start='01-01-2007', periods=150)
fig = plt.figure(title='Time Series')
axes_options = {'x': {'label': 'Date'}, 'y': {'label': 'Security 1'}}
time_series = plt.plot(dates, security_1,
axes_options=axes_options)
fig
```
## Plotting multiples sets of data
The `Lines` mark allows the user to plot multiple `y`-values for a single `x`-value. This can be done by passing an `ndarray` or a list of the different `y`-values as the y-attribute of the `Lines` as shown below.
```
dates_new = date_range(start='06-01-2007', periods=150)
```
We pass each data set as an element of a `list`
```
fig = plt.figure()
axes_options = {'x': {'label': 'Date'}, 'y': {'label': 'Price'}}
line = plt.plot(dates, [security_1, security_2],
labels=['Security 1', 'Security 2'],
axes_options=axes_options,
display_legend=True)
fig
```
Similarly, we can also pass multiple `x`-values for multiple sets of `y`-values
```
line.x, line.y = [dates, dates_new], [security_1, security_2]
```
### Coloring Lines according to data
The `color` attribute of a `Lines` mark can also be used to encode one more dimension of data. Suppose we have a portfolio of securities and we would like to color them based on whether we have bought or sold them. We can use the `color` attribute to encode this information.
```
fig = plt.figure()
axes_options = {'x': {'label': 'Date'},
'y': {'label': 'Security 1'},
'color' : {'visible': False}}
# add a custom color scale to color the lines
plt.scales(scales={'color': ColorScale(colors=['Red', 'Green'])})
dates_color = date_range(start='06-01-2007', periods=150)
securities = 100. + np.cumsum(np.random.randn(150, 10), axis=0)
# we generate 10 random price series and 10 random positions
positions = np.random.randint(0, 2, size=10)
# We pass the color scale and the color data to the plot method
line = plt.plot(dates_color, securities.T, color=positions,
axes_options=axes_options)
fig
```
We can also reset the colors of the Line to their defaults by setting the `color` attribute to `None`.
```
line.color = None
```
## Patches
The `fill` attribute of the `Lines` mark allows us to fill a path in different ways, while the `fill_colors` attribute lets us control the color of the `fill`
```
fig = plt.figure(animation_duration=1000)
patch = plt.plot([],[],
fill_colors=['orange', 'blue', 'red'],
fill='inside',
axes_options={'x': {'visible': False}, 'y': {'visible': False}},
stroke_width=10,
close_path=True,
display_legend=True)
patch.x = [[0, 2, 1.2, np.nan, np.nan, np.nan, np.nan], [0.5, 2.5, 1.7 , np.nan, np.nan, np.nan, np.nan], [4, 5, 6, 6, 5, 4, 3]],
patch.y = [[0, 0, 1 , np.nan, np.nan, np.nan, np.nan], [0.5, 0.5, -0.5, np.nan, np.nan, np.nan, np.nan], [1, 1.1, 1.2, 2.3, 2.2, 2.7, 1.0]]
fig
patch.opacities = [0.1, 0.2]
patch.x = [[2, 3, 3.2, np.nan, np.nan, np.nan, np.nan], [0.5, 2.5, 1.7, np.nan, np.nan, np.nan, np.nan], [4,5,6, 6, 5, 4, 3]]
patch.close_path = False
```
| github_jupyter |
# Clean-Label Feature Collision Attacks on a Keras Classifier
In this notebook, we will learn how to use ART to run a clean-label feature collision poisoning attack on a neural network trained with Keras. We will be training our data on a subset of the CIFAR-10 dataset. The methods described are derived from [this paper](https://arxiv.org/abs/1804.00792) by Shafahi, Huang, et. al. 2018.
```
import os, sys
from os.path import abspath
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import warnings
warnings.filterwarnings('ignore')
from keras.models import load_model
from art import config
from art.utils import load_dataset, get_file
from art.estimators.classification import KerasClassifier
from art.attacks.poisoning import FeatureCollisionAttack
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
np.random.seed(301)
(x_train, y_train), (x_test, y_test), min_, max_ = load_dataset('cifar10')
num_samples_train = 1000
num_samples_test = 1000
x_train = x_train[0:num_samples_train]
y_train = y_train[0:num_samples_train]
x_test = x_test[0:num_samples_test]
y_test = y_test[0:num_samples_test]
class_descr = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
```
## Load Model to be Attacked
In this example, we using a RESNET50 model pretrained on the CIFAR dataset.
```
path = get_file('cifar_alexnet.h5',extract=False, path=config.ART_DATA_PATH,
url='https://www.dropbox.com/s/ta75pl4krya5djj/cifar_alexnet.h5?dl=1')
classifier_model = load_model(path)
classifier = KerasClassifier(clip_values=(min_, max_), model=classifier_model, use_logits=False,
preprocessing=(0.5, 1))
```
## Choose Target Image from Test Set
```
target_class = "bird" # one of ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
target_label = np.zeros(len(class_descr))
target_label[class_descr.index(target_class)] = 1
target_instance = np.expand_dims(x_test[np.argmax(y_test, axis=1) == class_descr.index(target_class)][3], axis=0)
fig = plt.imshow(target_instance[0])
print('true_class: ' + target_class)
print('predicted_class: ' + class_descr[np.argmax(classifier.predict(target_instance), axis=1)[0]])
feature_layer = classifier.layer_names[-2]
```
## Poison Training Images to Misclassify Test
The attacker wants to make it such that whenever a prediction is made on this particular cat the output will be a horse.
```
base_class = "frog" # one of ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
base_idxs = np.argmax(y_test, axis=1) == class_descr.index(base_class)
base_instances = np.copy(x_test[base_idxs][:10])
base_labels = y_test[base_idxs][:10]
x_test_pred = np.argmax(classifier.predict(base_instances), axis=1)
nb_correct_pred = np.sum(x_test_pred == np.argmax(base_labels, axis=1))
print("New test data to be poisoned (10 images):")
print("Correctly classified: {}".format(nb_correct_pred))
print("Incorrectly classified: {}".format(10-nb_correct_pred))
plt.figure(figsize=(10,10))
for i in range(0, 9):
pred_label, true_label = class_descr[x_test_pred[i]], class_descr[np.argmax(base_labels[i])]
plt.subplot(330 + 1 + i)
fig=plt.imshow(base_instances[i])
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
fig.axes.text(0.5, -0.1, pred_label + " (" + true_label + ")", fontsize=12, transform=fig.axes.transAxes,
horizontalalignment='center')
```
The captions on the images can be read: `predicted label (true label)`
## Creating Poison Frogs
```
attack = FeatureCollisionAttack(classifier, target_instance, feature_layer, max_iter=10, similarity_coeff=256, watermark=0.3)
poison, poison_labels = attack.poison(base_instances)
poison_pred = np.argmax(classifier.predict(poison), axis=1)
plt.figure(figsize=(10,10))
for i in range(0, 9):
pred_label, true_label = class_descr[poison_pred[i]], class_descr[np.argmax(poison_labels[i])]
plt.subplot(330 + 1 + i)
fig=plt.imshow(poison[i])
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
fig.axes.text(0.5, -0.1, pred_label + " (" + true_label + ")", fontsize=12, transform=fig.axes.transAxes,
horizontalalignment='center')
```
Notice how the network classifies most of theses poison examples as frogs, and it's not incorrect to do so. The examples look mostly froggy. A slight watermark of the target instance is also added to push the poisons closer to the target class in feature space.
## Training with Poison Images
```
classifier.set_learning_phase(True)
print(x_train.shape)
print(base_instances.shape)
adv_train = np.vstack([x_train, poison])
adv_labels = np.vstack([y_train, poison_labels])
classifier.fit(adv_train, adv_labels, nb_epochs=5, batch_size=4)
```
## Fooled Network Misclassifies Bird
```
fig = plt.imshow(target_instance[0])
print('true_class: ' + target_class)
print('predicted_class: ' + class_descr[np.argmax(classifier.predict(target_instance), axis=1)[0]])
```
These attacks allow adversaries who can poison your dataset the ability to mislabel any particular target instance of their choosing without manipulating labels.
| github_jupyter |
```
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
test_data = mnist.test
train_data = mnist.train
valid_data = mnist.validation
epsilon = 1e-3
class FC(object):
def __init__(self, learning_rate=0.01):
self.lr = learning_rate
self.sess = tf.Session()
self.x = tf.placeholder(tf.float32,[None, 784], 'x')
self.y_ = tf.placeholder(tf.float32, [None, 10], 'y_')
self.training = tf.placeholder(tf.bool, name='training')
self._build_net(self.x,'FC')
with tf.variable_scope('Accuracy'):
self.correct_prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_,1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
with tf.variable_scope('Train'):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y))
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
self.train_opt = tf.train.GradientDescentOptimizer(self.lr).minimize(self.loss)
self.sess.run(tf.global_variables_initializer())
def batch_norm_wrapper(self, inputs, is_training, decay = 0.999):
scale = tf.Variable(tf.ones([inputs.get_shape()[-1]]))
beta = tf.Variable(tf.zeros([inputs.get_shape()[-1]]))
pop_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), trainable=False)
pop_var = tf.Variable(tf.ones([inputs.get_shape()[-1]]), trainable=False)
if is_training==tf.constant(True):
batch_mean, batch_var = tf.nn.moments(inputs,[0])
train_mean = tf.assign(pop_mean,
pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var,
pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
return tf.nn.batch_normalization(inputs,
batch_mean, batch_var, beta, scale, epsilon)
else:
return tf.nn.batch_normalization(inputs,
pop_mean, pop_var, beta, scale, epsilon)
def _build_net(self, x, scope):
with tf.variable_scope(scope):
bn = tf.layers.batch_normalization(x, axis=1, training=self.training, name = 'bn')
#bn = self.batch_norm_wrapper(x, self.training)
hidden = tf.layers.dense(bn, 50, activation=tf.nn.relu, name='l1')
self.y = tf.layers.dense(hidden, 10, name='o')
def learn(self, x, y):
loss,_ = self.sess.run([self.loss,self.train_opt],{self.x:x, self.y_:y, self.training:True})
return loss
def inference(self, x, y=None):
y = self.sess.run(self.y,{self.x:x, self.training:False})
#loss,_ = self.sess.run(self.loss,{self.x:x, self.y_:y, self.training:False})
return y
fc = FC()
OUTPUT_GRAPH = True
if OUTPUT_GRAPH:
tf.summary.FileWriter("logs/", fc.sess.graph)
for i in range(1000):
batch = train_data.next_batch(100)
loss = fc.learn(batch[0],batch[1])
if i%200 == 0:
print(loss)
batch = valid_data.next_batch(5000)
print("validation accuracy: %f" % fc.sess.run(fc.accuracy,{fc.x:batch[0], fc.y_:batch[1], fc.training:True}))
nm = 1000
count = 0
for _ in range(nm):
t = test_data.next_batch(1)
x = t[0]
y = fc.inference(x)
a = np.argmax(y,axis=1)
b = np.argmax(t[1],axis=1)
if a==b:
count += 1
print count/float(nm)
```
| github_jupyter |
# Strings
Lesson goals:
1. Examine the string class in greater detail.
2. Use `open()` to open, read, and write to files.
To start understanding the string type, let's use the built in helpsystem.
```
help(str)
```
The help page for string is very long, and it may be easier to keep it open
in a browser window by going to the [online Python
documentation](http://docs.python.org/library/stdtypes.html#sequence-types-str-unicode-list-tuple-bytearray-buffer-xrange)
while we talk about its properties.
At its heart, a string is just a sequence of characters. Basic strings are
defined using single or double quotes.
```
s = "This is a string."
s2 = 'This is another string that uses single quotes'
```
The reason for having two types of quotes to define a string is
emphasized in these examples:
```
s = "Bob's mom called to say hello."
s = 'Bob's mom called to say hello.'
```
The second one should be an error: Python interprets it as `s = 'Bob'` then the
rest of the line breaks the language standard.
Characters in literal strings must come from the ASCII character set,
which is a set of 127 character codes that is used by all modern
programming languages and computers. Unfortunately, ASCII does not have
room for non-Roman characters like accents or Eastern scripts. Unicode
strings in Python are specified with a leading u:
```
u = u'abcdé'
```
For the rest of this lecture, we will deal with ASCII strings, because
most scientific data that is stored as text is stored with ASCII.
## Escape Characters
How can you have multiline line strings in python? We can represent an "enter" using the escape character '\n'. An [escape character](https://en.wikipedia.org/wiki/Escape_character) starts with a '\\' and is followed by another character. This invokes an alternative interpretation in the string. Try running the example below to see how \n changes the string:
```
s = "Hello\n World"
print(s)
```
Notice how it didn't print \n, but replaced it with a newline. There are more characters like this, such as \t which is replaced with a tab or \b that is equal to a backspace. Use [this guide](https://linuxconfig.org/list-of-python-escape-sequence-characters-with-examples) to print the following output as one string:
"If you think you can do a thing or think you can't do a thing, you're right."
- Henry Ford
/\
/ \
```
s = "Your string here"
print(s) # do not modify the print statement
```
## Working with Strings
Strings are iterables, which means many of the ideas from lists can also
be applied directly to string manipulation. For instance, characters can
be accessed individually or in sequences:
```
s = 'abcdefghijklmnopqrstuvwxyz'
s[0]
s[-1]
s[1:4] #this include char at index 1, but excludes char at index 4
```
They can also be compared using sort and equals.
```
'str1' == 'str2'
'str1' == 'str1'
'str1' < 'str2'
```
In the help screen, which we looked at above, there are lots of
functions that look like this:
| __add__(...)
| x.__add__(y) <==> x+y
| __le__(...)
| x.__le__(y) <==> x<y
These are special Python functions that interpret operations like < and \+.
We'll talk more about these in the next lecture on Classes.
Some special functions introduce handy text functions.
**Hands on example**
Try each of the following functions on a few strings. What do these
functions do?
```
s = "This is a string "
s.startswith("This")
s.split(" ")
s.strip() # This won't change every string!
s.capitalize()
s.lower()
s.upper()
```
## Formatting
Try printing "Dave was traveling at 50 mph for 4.5 hours" using these given variables:
```
name = "Dave"
v = 50
t = 4.5
```
Here is an easy way to do this is using string formatting:
```
print("%s was traveling at %d mph for %f hours" % (name, v, t))
```
**%s** is used to represent a string (name), **%d** is used to represented an integer (v), and **%f** is replaced with a float (t). Now, try printing this data as "Dave drove 10 miles faster than Sally for 4.5 hours."
```
name = "Sally"
v1 = 40
print("Your print here" % ())
```
## File I/O
Python has a built-in function called "open()" that can be used to
manipulate files. The help information for open is below:
```
help(open)
```
The main two parameters we'll need to worry about are 'file', the name of the
file, and 'mode', which determines whether we can read from or write to the file. `open(...)` returns a file object, which acts like a pointer into the file, similarily to how an assigned variable can 'point' to a list/array.
An example will make this clear. In the code below, I've opened a file
that contains one line:
$ cat ./OtherFiles/testfile.txt
abcde
fghij
Now let's open this file in Python:
```
f = open('./OtherFiles/testfile.txt','r')
```
The second input, 'r' means I want to open the file for reading only. I
cannot write to this handle. The read() command will read a specified
number of bytes:
```
s = f.read(3)
print(s)
```
We read the first three characters, where each character is a byte long.
We can see that the file handle points to the 4th byte (index number 3)
in the file:
```
f.tell() # which index we are pointing at
f.read(1) # read the 1st byte, starting from where the file handle is pointing
f.close() # close the old handle
f.read() # can't read anymore because the file is closed.
```
The file we are using is a long series of characters, but two of the
characters are new line characters. If we looked at the file in
sequence, it would look like "abcdenfghijn". Separating a file into
lines is popular enough that there are two ways to read whole lines in a
file. The first is to use the `readlines()` method:
```
f = open('OtherFiles/testfile.txt','r')
lines = f.readlines()
print(lines)
f.close() # Always close the file when you are done with it
```
A very important point about the readline method is that it *keeps* the
newline character at the end of each line. You can use the `strip()`
method to get rid of the escape characters at the beggining and end of the string.
File handles are also iterable, which means we can use them in for loops
or list extensions. You will learn more about this iteration later:
```
f = open('OtherFiles/testfile.txt','r')
lines = [line.strip() for line in f]
f.close()
print(lines)
lines = []
f = open('OtherFiles/testfile.txt','r')
for line in f:
lines.append(line.strip())
f.close()
print(lines)
```
These are equivalent operations. It's often best to handle a file one
line at a time, particularly when the file is so large it might not fit
in memory.
The other half of the story is writing output to files. We'll talk about
two techniques: writing to the shell and writing to files directly.
If your program only creates one stream of output, it's often a good
idea to write to the shell using the print function. There are several
advantages to this strategy, including the fact that it allows the user
to select where they want to store the output without worrying about any
command line flags. You can use "\>" to direct the output of your
program to a file or use "|" to pipe it to another program (this was covered in the 01-shell notebook).
Sometimes, you need to direct your output directly to a file handle. For
instance, if your program produces two output streams, you may want to
assign two open file handles. Opening a file for reading simply requires
changing the second option from 'r' to 'w' or 'a'.
*Caution!* Opening a file with the 'w' option means start writing *at
the beginning*, which may overwrite old material. If you want to append
to the file without losing what is already there, open it with 'a'.
Writing to a file uses the `write()` command, which accepts a string. Check outfile.txt before and after running the following code.
```
outfile = open('OtherFiles/outfile.txt','w')
outfile.write('This is the first line!')
outfile.close()
```
Another way to write to a file is to use `writelines()`, which accepts a
list of strings and writes them in order.
*Caution!* `writelines()` does not
append newlines. If you really want to write a newline at the end of
each string in the list, add it yourself.
### Aside About File Editing
How is it possible that you can edit a file in place. You can use `f.seek()`
and `f.tell()` to verify that even if your file handle is pointing to the
middle of a file, write commands go to the end of the file in append
mode. The best way to change a file is to open a temporary file in
/tmp/, fill it, and then move it to overwrite the original. On large
clusters, /tmp/ is often local to each node, which means it reduces I/O
bottlenecks associated with writing large amounts of data.
## Exercise
Find the index of the string 'needle' in the file OtherFiles/haystack.txt using the `.find()` method of a String. Recall that `file.read()` will return the file as a string.
```
f = None; # open the file here
n = 0 # find the index here
print("Needle at Index %d" % n) # n should be 185
f.close()
```
Create a new file OtherFiles/haystack1.txt by opening in "w+" mode. "a+" and "r+" also create a file if it does not exist. Write the contents of haystack.txt into this new file and add an extra 'needle' at the end.
```
# Your code here
f = None;
f.close()
# Prints the file to check your answer; don't write anything here
f = open("OtherFiles/haystack1.txt","r")
print(f.read())
f.close()
```
| github_jupyter |
# Lecture 12: Canonical Economic Models
[Download on GitHub](https://github.com/NumEconCopenhagen/lectures-2022)
[<img src="https://mybinder.org/badge_logo.svg">](https://mybinder.org/v2/gh/NumEconCopenhagen/lectures-2022/master?urlpath=lab/tree/12/Canonical_economic_models.ipynb)
1. [OverLapping Generations (OLG) model](#OverLapping-Generations-(OLG)-model)
2. [Ramsey model](#Ramsey-model)
3. [Further perspectives](#Further-perspectives)
You will learn how to solve **two canonical economic models**:
1. The **overlapping generations (OLG) model**
2. The **Ramsey model**
**Main take-away:** Hopefully inspiration to analyze such models on your own.
```
%load_ext autoreload
%autoreload 2
import numpy as np
from scipy import optimize
# plotting
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
plt.rcParams.update({'font.size': 12})
# models
from OLGModel import OLGModelClass
from RamseyModel import RamseyModelClass
```
<a id="OverLapping-Generations-(OLG)-model"></a>
# 1. OverLapping Generations (OLG) model
## 1.1 Model description
**Time:** Discrete and indexed by $t\in\{0,1,\dots\}$.
**Demographics:** Population is constant. A life consists of
two periods, *young* and *old*.
**Households:** As young a household supplies labor exogenously, $L_{t}=1$, and earns a after tax wage $(1-\tau_w)w_{t}$. Consumption as young and old
are denoted by $C_{1t}$ and $C_{2t+1}$. The after-tax return on saving is $(1-\tau_{r})r_{t+1}$. Utility is
$$
\begin{aligned}
U & =\max_{s_{t}\in[0,1]}\frac{C_{1t}^{1-\sigma}}{1-\sigma}+\beta\frac{C_{1t+1}^{1-\sigma}}{1-\sigma},\,\,\,\beta > -1, \sigma > 0\\
& \text{s.t.}\\
& S_{t}=s_{t}(1-\tau_{w})w_{t}\\
& C_{1t}=(1-s_{t})(1-\tau_{w})w_{t}\\
& C_{2t+1}=(1+(1-\tau_{r})r_{t+1})S_{t}
\end{aligned}
$$
The problem is formulated in terms of the saving rate $s_t\in[0,1]$.
**Firms:** Firms rent capital $K_{t-1}$ at the rental rate $r_{t}^{K}$,
and hires labor $E_{t}$ at the wage rate $w_{t}$. Firms have access
to the production function
$$
\begin{aligned}
Y_{t}=F(K_{t-1},E_{t})=(\alpha K_{t-1}^{-\theta}+(1-\alpha)E_{t}^{-\theta})^{\frac{1}{-\theta}},\,\,\,\theta>-1,\alpha\in(0,1)
\end{aligned}
$$
Profits are
$$
\begin{aligned}
\Pi_{t}=Y_{t}-w_{t}E_{t}-r_{t}^{K}K_{t-1}
\end{aligned}
$$
**Government:** Choose public consumption, $G_{t}$, and tax rates $\tau_w \in [0,1]$ and $\tau_r \in [0,1]$. Total tax revenue is
$$
\begin{aligned}
T_{t} &=\tau_r r_{t} (K_{t-1}+B_{t-1})+\tau_w w_{t}
\end{aligned}
$$
Government debt accumulates according to
$$
\begin{aligned}
B_{t} &=(1+r^b_{t})B_{t-1}-T_{t}+G_{t}
\end{aligned}
$$
A *balanced budget* implies $G_{t}=T_{t}-r_{t}B_{t-1}$.
**Capital:** Depreciates with a rate of $\delta \in [0,1]$.
**Equilibrium:**
1. Households maximize utility
2. Firms maximize profits
3. No-arbitrage between bonds and capital
$$
r_{t}=r_{t}^{K}-\delta=r_{t}^{b}
$$
4. Labor market clears: $E_{t}=L_{t}=1$
5. Goods market clears: $Y_{t}=C_{1t}+C_{2t}+G_{t}+I_{t}$
6. Asset market clears: $S_{t}=K_{t}+B_{t}$
7. Capital follows its law of motion: $K_{t}=(1-\delta)K_{t-1}+I_{t}$
**For more details on the OLG model:** See chapter 3-4 [here](https://web.econ.ku.dk/okocg/VM/VM-general/Material/Chapters-VM.htm).
## 1.2 Solution and simulation
**Implication of profit maximization:** From FOCs
$$
\begin{aligned}
r_{t}^{k} & =F_{K}(K_{t-1},E_{t})=\alpha K_{t-1}^{-\theta-1}Y_{t}^{1+\theta}\\
w_{t} & =F_{E}(K_{t-1},E_{t})=(1-\alpha)E_{t}^{-\theta-1}Y_{t}^{1+\theta}
\end{aligned}
$$
**Implication of utility maximization:** From FOC
$$
\begin{aligned}
C_{1t}^{-\sigma}=\beta (1+(1-\tau_r)r_{t+1})C_{2t+1}^{-\sigma}
\end{aligned}
$$
**Simulation algorithm:** At the beginning of period $t$, the
economy can be summarized in the state variables $K_{t-1}$ and $B_{t-1}$. *Before* $s_t$ is known, we can calculate:
$$
\begin{aligned}
Y_{t} & =F(K_{t-1},1)\\
r_{t}^{k} & =F_{K}(K_{t-1},1)\\
w_{t} & =F_{E}(K_{t-1},1)\\
r_{t} & =r^k_{t}-\delta\\
r_{t}^{b} & =r_{t}\\
\tilde{r}_{t} & =(1-\tau_{r})r_{t}\\
C_{2t} & =(1+\tilde{r}_{t})(K_{t-1}+B_{t-1})\\
T_{t} & =\tau_{r}r_{t}(K_{t-1}+B_{t-1})+\tau_{w}w_{t}\\
B_{t} & =(1+r^b_{t})B_{t-1}+T_{t}-G_{t}\\
\end{aligned}
$$
*After* $s_t$ is known we can calculate:
$$
\begin{aligned}
C_{1t} & = (1-s_{t})(1-\tau_{w})w_{t}\\
I_{t} & =Y_{t}-C_{1t}-C_{2t}-G_{t}\\
K_{t} & =(1-\delta)K_{t-1} + I_t
\end{aligned}
$$
**Solution algorithm:** Simulate forward choosing $s_{t}$ so
that we always have
$$
\begin{aligned}
C_{1t}^{-\sigma}=\beta(1+\tilde{r}_{t+1})C_{2t+1}^{-\sigma}
\end{aligned}
$$
**Implementation:**
1. Use a bisection root-finder to determine $s_t$
2. Low $s_t$: A lot of consumption today. Low marginal utility. LHS < RHS.
3. High $s_t$: Little consumption today. High marginal utility. LHS > RHS.
4. Problem: Too low $s_t$ might not be feasible if $B_t > 0$.
**Note:** Never errors in the Euler-equation due to *perfect foresight*.
**Question:** Are all the requirements for the equilibrium satisfied?
## 1.3 Test case
1. Production is Cobb-Douglas ($\theta = 0$)
2. Utility is logarithmic ($\sigma = 1$)
3. The government is not doing anything ($\tau_w=\tau_r=0$, $T_t = G_t = 0$ and $B_t = 0$)
**Analytical steady state:** It can be proven
$$ \lim_{t\rightarrow\infty} K_t = \left(\frac{1-\alpha}{1+1/\beta}\right)^{\frac{1}{1-\alpha}} $$
**Setup:**
```
model = OLGModelClass()
par = model.par # SimpeNamespace
sim = model.sim # SimpeNamespace
# a. production
par.production_function = 'cobb-douglas'
par.theta = 0.0
# b. households
par.sigma = 1.0
# c. government
par.tau_w = 0.0
par.tau_r = 0.0
sim.balanced_budget[:] = True # G changes to achieve this
# d. initial values
K_ss = ((1-par.alpha)/((1+1.0/par.beta)))**(1/(1-par.alpha))
par.K_lag_ini = 0.1*K_ss
```
### Simulate first period manually
```
from OLGModel import simulate_before_s, simulate_after_s, find_s_bracket, calc_euler_error
```
**Make a guess:**
```
s_guess = 0.41
```
**Evaluate first period:**
```
# a. initialize
sim.K_lag[0] = par.K_lag_ini
sim.B_lag[0] = par.B_lag_ini
simulate_before_s(par,sim,t=0)
print(f'{sim.C2[0] = : .4f}')
simulate_after_s(par,sim,s=s_guess,t=0)
print(f'{sim.C1[0] = : .4f}')
simulate_before_s(par,sim,t=1)
print(f'{sim.C2[1] = : .4f}')
print(f'{sim.rt[1] = : .4f}')
LHS_Euler = sim.C1[0]**(-par.sigma)
RHS_Euler = (1+sim.rt[1])*par.beta * sim.C2[1]**(-par.sigma)
print(f'euler-error = {LHS_Euler-RHS_Euler:.8f}')
```
**Implemented as function:**
```
euler_error = calc_euler_error(s_guess,par,sim,t=0)
print(f'euler-error = {euler_error:.8f}')
```
**Find bracket to search in:**
```
s_min,s_max = find_s_bracket(par,sim,t=0,do_print=True);
```
**Call root-finder:**
```
obj = lambda s: calc_euler_error(s,par,sim,t=0)
result = optimize.root_scalar(obj,bracket=(s_min,s_max),method='bisect')
print(result)
```
**Check result:**
```
euler_error = calc_euler_error(result.root,par,sim,t=0)
print(f'euler-error = {euler_error:.8f}')
```
### Full simulation
```
model.simulate()
```
**Check euler-errors:**
```
for t in range(5):
LHS_Euler = sim.C1[t]**(-par.sigma)
RHS_Euler = (1+sim.rt[t+1])*par.beta * sim.C2[t+1]**(-par.sigma)
print(f't = {t:2d}: euler-error = {LHS_Euler-RHS_Euler:.8f}')
```
**Plot and check with analytical solution:**
```
fig = plt.figure(figsize=(6,6/1.5))
ax = fig.add_subplot(1,1,1)
ax.plot(model.sim.K_lag,label=r'$K_{t-1}$')
ax.axhline(K_ss,ls='--',color='black',label='analytical steady state')
ax.legend(frameon=True)
fig.tight_layout()
K_lag_old = model.sim.K_lag.copy()
```
**Task:** Test if the starting point matters?
**Additional check:** Not much should change with only small parameter changes.
```
# a. production (close to cobb-douglas)
par.production_function = 'ces'
par.theta = 0.001
# b. household (close to logarithmic)
par.sigma = 1.1
# c. goverment (weakly active)
par.tau_w = 0.001
par.tau_r = 0.001
# d. simulate
model.simulate()
fig = plt.figure(figsize=(6,6/1.5))
ax = fig.add_subplot(1,1,1)
ax.plot(model.sim.K_lag,label=r'$K_{t-1}$')
ax.plot(K_lag_old,label=r'$K_{t-1}$ ($\theta = 0.0, \sigma = 1.0$, inactive government)')
ax.axhline(K_ss,ls='--',color='black',label='analytical steady state (wrong)')
ax.legend(frameon=True)
fig.tight_layout()
```
## 1.4 Active government
```
model = OLGModelClass()
par = model.par
sim = model.sim
```
**Baseline:**
```
model.simulate()
fig = plt.figure(figsize=(6,6/1.5))
ax = fig.add_subplot(1,1,1)
ax.plot(sim.K_lag/(sim.Y),label=r'$\frac{K_{t-1}}{Y_t}$')
ax.plot(sim.B_lag/(sim.Y),label=r'$\frac{B_{t-1}}{Y_t}$')
ax.legend(frameon=True)
fig.tight_layout()
```
**Remember steady state:**
```
K_ss = sim.K_lag[-1]
B_ss = sim.B_lag[-1]
G_ss = sim.G[-1]
```
**Spending spree of 5% in $T=3$ periods:**
```
# a. start from steady state
par.K_lag_ini = K_ss
par.B_lag_ini = B_ss
# b. spending spree
T0 = 0
dT = 3
sim.G[T0:T0+dT] = 1.05*G_ss
sim.balanced_budget[:T0] = True #G adjusts
sim.balanced_budget[T0:T0+dT] = False # B adjusts
sim.balanced_budget[T0+dT:] = True # G adjusts
```
**Simulate:**
```
model.simulate()
```
**Crowding-out of capital:**
```
fig = plt.figure(figsize=(6,6/1.5))
ax = fig.add_subplot(1,1,1)
ax.plot(sim.K/(sim.Y),label=r'$\frac{K_{t-1}}{Y_t}$')
ax.plot(sim.B/(sim.Y),label=r'$\frac{B_{t-1}}{Y_t}$')
ax.legend(frameon=True)
fig.tight_layout()
```
**Question:** Would the households react today if the spending spree is say 10 periods in the future?
## 1.5 Getting an overview
1. Spend 3 minutes looking at `OLGModel.py`
2. Write one question at [https://b.socrative.com/login/student/](https://b.socrative.com/login/student/) with `ROOM=NUMECON`
## 1.6 Potential analysis and extension
**Potential analysis:**
1. Over-accumulation of capital relative to golden rule?
2. Calibration to actual data
3. Generational inequality
4. Multiple equilibria
**Extensions:**
1. Add population and technology growth
2. More detailed tax and transfer system
3. Utility and productive effect of government consumption/investment
4. Endogenous labor supply
5. Bequest motive
6. Uncertain returns on capital
7. Additional assets (e.g. housing)
8. More than two periods in the life-cycle (life-cycle)
9. More than one dynasty (cross-sectional inequality dynamics)
<a id="Ramsey-model"></a>
# 2. Ramsey model
... also called the Ramsey-Cass-Koopman model.
## 2.1 Model descripton
**Time:** Discrete and indexed by $t\in\{0,1,\dots\}$.
**Demographics::** Population is constant. Everybody lives forever.
**Household:** Households supply labor exogenously, $L_{t}=1$, and earns a wage $w_{t}$. The return on saving is $r_{t+1}$. Utility is
$$
\begin{aligned}
U & =\max_{\{C_{t}\}_{t=0}^{\infty}}\sum_{t=0}^{\infty}\beta^{t}\frac{C_{t}^{1-\sigma}}{1-\sigma},\beta\in(0,1),\sigma>0\\
& \text{s.t.}\\
& M_{t}=(1+r_{t})N_{t-1}+w_{t}\\
& N_{t}=M_{t}-C_{t}
\end{aligned}
$$
where $M_{t}$ is cash-on-hand and $N_{t}$ is end-of-period assets.
**Firms:** Firms rent capital $K_{t-1}$ at the rental rate $r_{t}^{K}$
and hires labor $E_{t}$ at the wage rate $w_{t}$. Firms have access
to the production function
$$
\begin{aligned}
Y_{t}= F(K_{t-1},E_{t})=A_t(\alpha K_{t-1}^{-\theta}+(1-\alpha)E_{t}^{-\theta})^{\frac{1}{-\theta}},\,\,\,\theta>-1,\alpha\in(0,1),A_t>0
\end{aligned}
$$
Profits are
$$
\begin{aligned}
\Pi_{t}=Y_{t}-w_{t}E_{t}-r_{t}^{K}K_{t-1}
\end{aligned}
$$
**Equilibrium:**
1. Households maximize utility
2. Firms maximize profits
3. Labor market clear: $E_{t}=L_{t}=1$
4. Goods market clear: $Y_{t}=C_{t}+I_{t}$
5. Asset market clear: $N_{t}=K_{t}$ and $r_{t}=r_{t}^{k}-\delta$
6. Capital follows its law of motion: $K_{t}=(1-\delta)K_{t-1}+I_{t}$
**Implication of profit maximization:** From FOCs
$$
\begin{aligned}
r_{t}^{k} & = F_{K}(K_{t-1},E_{t})=A_t \alpha K_{t-1}^{-\theta-1}Y_{t}^{-1}\\
w_{t} & = F_{E}(K_{t-1},E_{t})=A_t (1-\alpha)E_{t}^{-\theta-1}Y_{t}^{-1}
\end{aligned}
$$
**Implication of utility maximization:** From FOCs
$$
\begin{aligned}
C_{t}^{-\sigma}=\beta(1+r_{t+1})C_{t+1}^{-\sigma}
\end{aligned}
$$
**Solution algorithm:**
We can summarize the model in the **non-linear equation system**
$$
\begin{aligned}
\boldsymbol{H}(\boldsymbol{K},\boldsymbol{C},K_{-1})=\left[\begin{array}{c}
H_{0}\\
H_{1}\\
\begin{array}{c}
\vdots\end{array}
\end{array}\right]=\left[\begin{array}{c}
0\\
0\\
\begin{array}{c}
\vdots\end{array}
\end{array}\right]
\end{aligned}
$$
where $\boldsymbol{K} = [K_0,K_1\dots]$, $\boldsymbol{C} = [C_0,C_1\dots]$, and
$$
\begin{aligned}
H_{t}
=\left[\begin{array}{c}
C_{t}^{-\sigma}-\beta(1+r_{t+1})C_{t+1}^{-\sigma}\\
K_{t}-[(1-\delta)K_{t-1}+Y_t-C_{t}]
\end{array}\right]
=\left[\begin{array}{c}
C_{t}^{-\sigma}-\beta(1+F_{K}(K_{t},1))C_{t+1}^{-\sigma}\\
K_{t}-[(1-\delta)K_{t-1} + F(K_{t-1},1)-C_{t}])
\end{array}\right]
\end{aligned}
$$
**Path:** We refer to $\boldsymbol{K}$ and $\boldsymbol{C}$ as *transition paths*.
**Implementation:** We solve this equation system in **two steps**:
1. Assume all variables are in steady state after some **truncation horizon**.
1. Calculate the numerical **jacobian** of $\boldsymbol{H}$ wrt. $\boldsymbol{K}$
and $\boldsymbol{C}$ around the steady state
2. Solve the equation system using a **hand-written Broyden-solver**
**Note:** The equation system can also be solved directly using `scipy.optimize.root`.
**Remember:** The jacobian is just a gradient. I.e. the matrix of what the implied errors are in $\boldsymbol{H}$ when a *single* $K_t$ or $C_t$ change.
## 2.2 Solution
```
model = RamseyModelClass()
par = model.par
ss = model.ss
path = model.path
```
**Find steady state:**
1. Target steady-state capital-output ratio, $K_{ss}/Y_{ss}$ of 4.0.
2. Force steady-state output $Y_{ss} = 1$.
3. Adjust $\beta$ and $A_{ss}$ to achieve this.
```
model.find_steady_state(KY_ss=4.0)
```
**Test that errors and the path are 0:**
```
# a. set initial value
par.K_lag_ini = ss.K
# b. set path
path.A[:] = ss.A
path.C[:] = ss.C
path.K[:] = ss.K
# c. check errors
errors_ss = model.evaluate_path_errors()
assert np.allclose(errors_ss,0.0)
model.calculate_jacobian()
```
**Solve:**
```
par.K_lag_ini = 0.50*ss.K # start away from steady state
model.solve() # find transition path
fig = plt.figure(figsize=(6,6/1.5))
ax = fig.add_subplot(1,1,1)
ax.plot(path.K_lag,label=r'$K_{t-1}$')
ax.legend(frameon=True)
fig.tight_layout()
```
## 2.3 Comparison with scipy solution
**Note:** scipy computes the jacobian internally
```
model_scipy = RamseyModelClass()
model_scipy.par.solver = 'scipy'
model_scipy.find_steady_state(KY_ss=4.0)
model_scipy.par.K_lag_ini = 0.50*model_scipy.ss.K
model_scipy.path.A[:] = model_scipy.ss.A
model_scipy.solve()
fig = plt.figure(figsize=(6,6/1.5))
ax = fig.add_subplot(1,1,1)
ax.plot(path.K_lag,label=r'$K_{t-1}$, broyden')
ax.plot(model_scipy.path.K_lag,ls='--',label=r'$K_{t-1}$, scipy')
ax.legend(frameon=True)
fig.tight_layout()
```
## 2.4 Persistent technology shock
**Shock:**
```
par.K_lag_ini = ss.K # start from steady state
path.A[:] = 0.95**np.arange(par.Tpath)*0.1*ss.A + ss.A # shock path
```
**Terminology:** This is called an MIT-shock. Households do not expect shocks. Know the full path of the shock when it arrives. Continue to believe no future shocks will happen.
**Solve:**
```
model.solve()
fig = plt.figure(figsize=(2*6,6/1.5))
ax = fig.add_subplot(1,2,1)
ax.set_title('Capital, $K_{t-1}$')
ax.plot(path.K_lag)
ax = fig.add_subplot(1,2,2)
ax.plot(path.A)
ax.set_title('Technology, $A_t$')
fig.tight_layout()
```
**Question:** Could a much more persistent shock be problematic?
## 2.5 Future persistent technology shock
**Shock happing after period $H$:**
```
par.K_lag_ini = ss.K # start from steady state
# shock
H = 50
path.A[:] = ss.A
path.A[H:] = 0.95**np.arange(par.Tpath-H)*0.1*ss.A + ss.A
```
**Solve:**
```
model.solve()
fig = plt.figure(figsize=(2*6,6/1.5))
ax = fig.add_subplot(1,2,1)
ax.set_title('Capital, $K_{t-1}$')
ax.plot(path.K_lag)
ax = fig.add_subplot(1,2,2)
ax.plot(path.A)
ax.set_title('Technology, $A_t$')
fig.tight_layout()
par.K_lag_ini = path.K[30]
path.A[:] = ss.A
model.solve()
```
**Take-away:** Households are forward looking and responds before the shock hits.
## 2.6 Getting an overview
1. Spend 3 minutes looking at `RamseyModel.py`
2. Write one question at [https://b.socrative.com/login/student/](https://b.socrative.com/login/student/) with `ROOM=NUMECON`
## 2.7 Potential analysis and extension
**Potential analysis:**
1. Different shocks (e.g. discount factor)
2. Multiple shocks
3. Permanent shocks ($\rightarrow$ convergence to new steady state)
4. Transition speed
**Extensions:**
1. Add a government and taxation
2. Endogenous labor supply
3. Additional assets (e.g. housing)
4. Add nominal rigidities (New Keynesian)
<a id="Further-perspectives"></a>
# 3. Further perspectives
**The next steps beyond this course:**
1. The **Bewley-Huggett-Aiyagari** model. A multi-period OLG model or Ramsey model with households making decisions *under uncertainty and borrowing constraints* as in lecture 11 under "dynamic optimization". Such heterogenous agent models are used in state-of-the-art research, see [Quantitative Macroeconomics with Heterogeneous Households](https://www.annualreviews.org/doi/abs/10.1146/annurev.economics.050708.142922).
2. Further adding nominal rigidities this is called a **Heterogenous Agent New Keynesian (HANK)** model. See [Macroeconomics with HANK models](https://drive.google.com/file/d/16Qq7NJ_AZh5NmjPFSrLI42mfT7EsCUeH/view).
3. This extends the **Representative Agent New Keynesian (RANK)** model, which itself is a Ramsey model extended with nominal rigidities.
4. The final frontier is including **aggregate risk**, which either requires linearization or using a **Krussell-Smith method**. Solving the model in *sequence-space* as we did with the Ramsey model is a frontier method (see [here](https://github.com/shade-econ/sequence-jacobian/#sequence-space-jacobian)).
**Next lecture:** Agent Based Models
| github_jupyter |
```
##### Copyright 2020 Google LLC.
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# We are using NitroML on Kubeflow:
This notebook allows users to analyze NitroML benchmark results.
```
# This notebook assumes you have followed the following steps to setup port-forwarding:
# Step 1: Configure your cluster with gcloud
# `gcloud container clusters get-credentials <cluster_name> --zone <cluster-zone> --project <project-id>
# Step 2: Get the port where the gRPC service is running on the cluster
# `kubectl get configmap metadata-grpc-configmap -o jsonpath={.data}`
# Use `METADATA_GRPC_SERVICE_PORT` in the next step. The default port used is 8080.
# Step 3: Port forwarding
# `kubectl port-forward deployment/metadata-grpc-deployment 9898:<METADATA_GRPC_SERVICE_PORT>`
# Troubleshooting
# If getting error related to Metadata (For examples, Transaction already open). Try restarting the metadata-grpc-service using:
# `kubectl rollout restart deployment metadata-grpc-deployment`
import sys, os
PROJECT_DIR=os.path.join(sys.path[0], '..')
%cd {PROJECT_DIR}
from ml_metadata.proto import metadata_store_pb2
from ml_metadata.metadata_store import metadata_store
from nitroml.benchmark import results
```
## Connect to the ML Metadata (MLMD) database
First we need to connect to our MLMD database which stores the results of our
benchmark runs.
```
connection_config = metadata_store_pb2.MetadataStoreClientConfig()
connection_config.host = 'localhost'
connection_config.port = 9898
store = metadata_store.MetadataStore(connection_config)
```
## Display benchmark results
Next we load and visualize `pd.DataFrame` containing our benchmark results.
These results contain contextual features such as the pipeline ID, and
benchmark metrics as computed by the downstream Evaluators. If your
benchmark included an `EstimatorTrainer` component, its hyperparameters may also
display in the table below.
```
#@markdown ### Choose how to aggregate metrics:
mean = False #@param { type: "boolean" }
stdev = False #@param { type: "boolean" }
min_and_max = False #@param { type: "boolean" }
agg = []
if mean:
agg.append("mean")
if stdev:
agg.append("std")
if min_and_max:
agg += ["min", "max"]
df = results.overview(store, metric_aggregators=agg)
df.head()
```
### We can display an interactive table using qgrid
Please follow the latest instructions on downloading qqgrid package from here: https://github.com/quantopian/qgrid
```
import qgrid
qgid_wdget = qgrid.show_grid(df, show_toolbar=True)
qgid_wdget
```
| github_jupyter |
# Group Metrics
The `fairlearn` package contains algorithms which enable machine learning models to minimise disparity between groups. The `metrics` portion of the package provides the means required to verify that the mitigation algorithms are succeeding.
```
import numpy as np
import pandas as pd
import sklearn.metrics as skm
```
## Ungrouped Metrics
At their simplest, metrics take a set of 'true' values $Y_{true}$ (from the input data) and predicted values $Y_{pred}$ (by applying the model to the input data), and use these to compute a measure. For example, the _recall_ or _true positive rate_ is given by
\begin{equation}
P( Y_{pred}=1 | Y_{true}=1 )
\end{equation}
That is, a measure of whether the model finds all the positive cases in the input data. The `scikit-learn` package implements this in [sklearn.metrics.recall_score](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html).
Suppose we have the following data:
```
Y_true = [0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1]
Y_pred = [0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1]
```
we can see that the prediction is 1 in five of the ten cases where the true value is 1, so we expect the recall to be 0.0.5:
```
skm.recall_score(Y_true, Y_pred)
```
## Metrics with Grouping
When considering fairness, each row of input data will have an associated group label $g \in G$, and we will want to know how the metric behaves for each $g$. To help with this, Fairlearn provides wrappers, which take an existing (ungrouped) metric function, and apply it to each group within a set of data.
Suppose in addition to the $Y_{true}$ and $Y_{pred}$ above, we had the following set of labels:
```
group_membership_data = ['d', 'a', 'c', 'b', 'b', 'c', 'c', 'c', 'b', 'd', 'c', 'a', 'b', 'd', 'c', 'c']
df = pd.DataFrame({ 'Y_true': Y_true, 'Y_pred': Y_pred, 'group_membership_data': group_membership_data})
df
```
```
import fairlearn.metrics as flm
group_metrics = flm.group_summary(skm.recall_score, Y_true, Y_pred, sensitive_features=group_membership_data, sample_weight=None)
print("Overall recall = ", group_metrics.overall)
print("recall by groups = ", group_metrics.by_group)
```
Note that the overall recall is the same as that calculated above in the Ungrouped Metric section, while the `by_group` dictionary matches the values we calculated by inspection from the table above.
In addition to these basic scores, `fairlearn.metrics` also provides convenience functions to recover the maximum and minimum values of the metric across groups and also the difference and ratio between the maximum and minimum:
```
print("min recall over groups = ", flm.group_min_from_summary(group_metrics))
print("max recall over groups = ", flm.group_max_from_summary(group_metrics))
print("difference in recall = ", flm.difference_from_summary(group_metrics))
print("ratio in recall = ", flm.ratio_from_summary(group_metrics))
```
## Supported Ungrouped Metrics
To be used by `group_summary`, the supplied Python function must take arguments of `y_true` and `y_pred`:
```python
my_metric_func(y_true, y_pred)
```
An additional argument of `sample_weight` is also supported:
```python
my_metric_with_weight(y_true, y_pred, sample_weight=None)
```
The `sample_weight` argument is always invoked by name, and _only_ if the user supplies a `sample_weight` argument.
## Convenience Wrapper
Rather than require a call to `group_summary` each time, we also provide a function which turns an ungrouped metric into a grouped one. This is called `make_metric_group_summary`:
```
recall_score_group_summary = flm.make_metric_group_summary(skm.recall_score)
results = recall_score_group_summary(Y_true, Y_pred, sensitive_features=group_membership_data)
print("Overall recall = ", results.overall)
print("recall by groups = ", results.by_group)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/thomle295/CartPole_RL/blob/main/main.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
%cd '/content/gdrive/My Drive/Research/Reinforcement_Learning/CartPole/'
import urllib.request
urllib.request.urlretrieve('http://www.atarimania.com/roms/Roms.rar','Roms.rar')
!pip install tensorflow==2.3.0
!pip install gym
!pip install keras
!pip install keras-rl2
!pip install unrar
!unrar x Roms.rar
!mkdir rars
!mv HC\ ROMS.zip rars
!mv ROMS.zip rars
!python -m atari_py.import_roms rars
!pip install gym pyvirtualdisplay > /dev/null 2>&1
!apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1
!apt-get update > /dev/null 2>&1
!apt-get install cmake > /dev/null 2>&1
!pip install --upgrade setuptools 2>&1
!pip install ez_setup > /dev/null 2>&1
!pip install gym[atari] > /dev/null 2>&1
```
#Helper Functions
```
import gym
def query_environment(name):
env = gym.make(name)
spec = gym.spec(name)
print(f"Action Space: {env.action_space}")
print(f"Observation Space: {env.observation_space}")
print(f"Max Episode Steps: {spec.max_episode_steps}")
print(f"Nondeterministic: {spec.nondeterministic}")
print(f"Reward Range: {env.reward_range}")
print(f"Reward Threshold: {spec.reward_threshold}")
import gym
from gym.wrappers import Monitor
import glob
import io
import base64
from IPython.display import HTML
from pyvirtualdisplay import Display
from IPython import display as ipythondisplay
display = Display(visible=0, size=(1400, 900))
display.start()
"""
Utility functions to enable video recording of gym environment
and displaying it.
To enable video, just do "env = wrap_env(env)""
"""
def show_video():
mp4list = glob.glob('video/*.mp4')
if len(mp4list) > 0:
mp4 = mp4list[0]
video = io.open(mp4, 'r+b').read()
encoded = base64.b64encode(video)
ipythondisplay.display(HTML(data='''<video alt="test" autoplay
loop controls style="height: 400px;">
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii'))))
else:
print("Could not find video")
def wrap_env(env):
env = Monitor(env, './video', force=True)
return env
```
## Check Env OpenAI Gym
```
query_environment('CartPole-v0')
env = wrap_env(gym.make('CartPole-v0'))
observation = env.reset()
while True:
env.render()
#your agent goes here
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
break;
env.close()
show_video()
import random
env = wrap_env(gym.make("CartPole-v0"))
episodes = 10
for episode in range(1, episodes+1):
state = env.reset()
done = False
score = 0
while not done:
env.render()
action = random.choice([0,1])
n_state, reward, done, info = env.step(action)
score+=reward
print('Episode:{} Score:{}'.format(episode, score))
```
#Create a Deep Learning Model
```
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import Adam
def build_model(states, actions):
model = Sequential()
model.add(Flatten(input_shape=(1,states)))
model.add(Dense(24, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(actions, activation='linear'))
return model
env = gym.make('CartPole-v0')
states = env.observation_space.shape[0]
actions = env.action_space.n
model = build_model(states, actions)
model.summary()
```
# Build Agent
```
from rl.agents import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
def build_agent(model, actions):
policy = BoltzmannQPolicy()
memory = SequentialMemory(limit=50000, window_length=1)
dqn = DQNAgent(model=model, memory=memory, policy=policy,
nb_actions=actions, nb_steps_warmup=10, target_model_update=1e-2)
return dqn
env = gym.make('CartPole-v0')
actions = env.action_space.n
states = env.observation_space.shape[0]
model = build_model(states, actions)
dqn = build_agent(model, actions)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
dqn.fit(env, nb_steps=50000, visualize=False, verbose=1)
scores = dqn.test(env, nb_episodes=100, visualize=False)
print(np.mean(scores.history['episode_reward']))
_ = dqn.test(env, nb_episodes=15, visualize=True)
dqn.save_weights('dqn_weights.h5f', overwrite=True)
```
#Reloading Agent
```
env = wrap_env(gym.make('CartPole-v0'))
actions = env.action_space.n
states = env.observation_space.shape[0]
model = build_model(states, actions)
dqn = build_agent(model, actions)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
dqn.load_weights('dqn_weights.h5f')
_ = dqn.test(env, nb_episodes=5, visualize=True)
show_video()
```
| github_jupyter |
# The importance of space
Agent based models are useful when the aggregate system behavior emerges out of local interactions amongst the agents. In the model of the evolution of cooperation, we created a set of agents and let all agents play against all other agents. Basically, we pretended as if all our agents were perfectly mixed. In practice, however, it is much more common that agents only interact with some, but not all, other agents. For example, in models of epidemiology, social interactions are a key factors. Thus, interactions are dependend on your social network. In other situations, our behavior might be based on what we see around us. Phenomena like fashion are at least partly driven by seeing what others are doing and mimicking this behavior. The same is true for many animals. Flocking dynamics as exhibited by starling, or shoaling behavior in fish, can be explained by the animal looking at its neirest neighbors and staying within a given distance of them. In agent based models, anything that structures the interaction amongst agents is typically called a space. This space can be a 2d or 3d space with euclidian distances (as in models of flocking and shoaling), it can also be a grid structure (as we will show below), or it can be a network structure.
MESA comes with several spaces that we can readily use. These are
* **SingleGrid;** an 'excel-like' space with each agent occopying a single grid cell
* **MultiGrid;** like grid, but with more than one agent per grid cell
* **HexGrid;** like grid, but on a hexagonal grid (*e.g.*, the board game Catan) thus changing who your neighbours are
* **ConinuousSpace;** a 2d continous space were agents can occupy any coordinate
* **NetworkGrid;** a network structure were one or more agents occupy a given node.
A key concern when using a none-networked space, is to think carefull about what happens at the edges of the space. In a basic implementation, agents in for example the top left corner has only 2 neighbors, while an agent in the middle has four neighbors. This can give rise to artifacts in the results. Basically, the dynamics at the edges are different from the behavior further away from the edges. It is therefore quite common to use a torus, or donut, shape for the space. In this way, there is no longer any edge and artifacts are thus removed.
# The emergence of cooperation in space
The documentation of MESA on the different spaces is quite limited. Therefore, this assignment is largely a tutorial continuing on the evolution of cooperation.
We make the following changes to the model
* Each agent gets a position, which is an x,y coordinate indicating the grid cell the agent occupies.
* The model has a grid, with an agent of random class. We initialize the model with equal probabilities for each type of class.
* All agents play against their neighbors. On a grid, neighborhood can be defined in various ways. For example, a Von Neumann neighborhood contains the four cells that share a border with the central cell. A Moore neighborhood with distance one contains 8 cells by also considering the diagonal. Below, we use a neighborhood distance of 1, and we do include diagonal neighbors. So we set Moore to True. Feel free to experiment with this model by setting it to False,
* The evolutionary dynamic, after all agents having played, is that each agent compares its scores to its neighbors. It will adopt whichever strategy within its neighborhood performed best.
* Next to using a SingleGrid from MESA, we also use a DataCollector to handle collecting statistics.
Below, I discuss in more detail the code containing the most important modifications.
```
from collections import deque, Counter, defaultdict
from enum import Enum
from itertools import combinations
from math import floor
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from mesa import Model, Agent
from mesa.space import SingleGrid
from mesa.datacollection import DataCollector
class Move(Enum):
COOPERATE = 1
DEFECT = 2
class AxelrodAgent(Agent):
"""An Abstract class from which all strategies have to be derived
Attributes
----------
points : int
pos : tuple
"""
def __init__(self, unique_id, pos, model):
super().__init__(unique_id, model)
self.points = 0
self.pos = pos
def step(self):
'''
This function defines the move and any logic for deciding
on the move goes here.
Returns
-------
Move.COOPERATE or Move.DEFECT
'''
raise NotImplemetedError
def receive_payoff(self, payoff, my_move, opponent_move):
'''receive payoff and the two moves resulting in this payoff
Parameters
----------
payoff : int
my_move : {Move.COOPERATE, Move.DEFECT}
opponements_move : {Move.COOPERATE, Move.DEFECT}
'''
self.points += payoff
def reset(self):
'''
This function is called after playing N iterations against
another player.
'''
raise NotImplementedError
class TitForTat(AxelrodAgent):
"""This class defines the following strategy: play nice, unless,
in the previous move, the other player betrayed you."""
def __init__(self, unique_id, pos, model):
super().__init__(unique_id, pos, model)
self.opponent_last_move = Move.COOPERATE
def step(self):
return self.opponent_last_move
def receive_payoff(self, payoff, my_move, opponent_move):
super().receive_payoff(payoff, my_move, opponent_move)
self.opponent_last_move = opponent_move
def reset(self):
self.opponent_last_move = Move.COOPERATE
class ContriteTitForTat(AxelrodAgent):
"""This class defines the following strategy: play nice, unless,
in the previous two moves, the other player betrayed you."""
def __init__(self, unique_id, pos, model):
super().__init__(unique_id, pos, model)
self.opponent_last_two_moves = deque([Move.COOPERATE, Move.COOPERATE], maxlen=2)
def step(self):
if (self.opponent_last_two_moves[0] == Move.DEFECT) and\
(self.opponent_last_two_moves[1] == Move.DEFECT):
return Move.DEFECT
else:
return Move.COOPERATE
def receive_payoff(self, payoff, my_move, opponent_move):
super().receive_payoff(payoff, my_move, opponent_move)
self.opponent_last_two_moves.append(opponent_move)
def reset(self):
self.opponent_last_two_moves = deque([Move.COOPERATE, Move.COOPERATE], maxlen=2)
class NoisySpatialEvolutionaryAxelrodModel(Model):
def __init__(self, N, noise_level=0.01, seed=None,
height=20, width=20,):
super().__init__(seed=seed)
self.noise_level = noise_level
self.num_iterations = N
self.agents = set()
self.payoff_matrix = {}
self.payoff_matrix[(Move.COOPERATE, Move.COOPERATE)] = (2, 2)
self.payoff_matrix[(Move.COOPERATE, Move.DEFECT)] = (0, 3)
self.payoff_matrix[(Move.DEFECT, Move.COOPERATE)] = (3, 0)
self.payoff_matrix[(Move.DEFECT, Move.DEFECT)] = (1, 1)
self.grid = SingleGrid(width, height, torus=True)
strategies = AxelrodAgent.__subclasses__()
num_strategies = len(strategies)
self.agent_id = 0
for cell in self.grid.coord_iter():
_, x, y = cell
pos = (x, y)
self.agent_id += 1
strategy_index = int(floor(self.random.random()*num_strategies))
agent = strategies[strategy_index](self.agent_id, pos, self)
self.grid.position_agent(agent, (x, y))
self.agents.add(agent)
self.datacollector = DataCollector(model_reporters={klass.__name__:klass.__name__
for klass in strategies})
def count_agent_types(self):
counter = Counter()
for agent in self.agents:
counter[agent.__class__.__name__] += 1
for k,v in counter.items():
setattr(self, k, v)
def step(self):
'''Advance the model by one step.'''
self.count_agent_types()
self.datacollector.collect(self)
for (agent_a, x, y) in self.grid.coord_iter():
for agent_b in self.grid.neighbor_iter((x,y), moore=True):
for _ in range(self.num_iterations):
move_a = agent_a.step()
move_b = agent_b.step()
#insert noise in movement
if self.random.random() < self.noise_level:
if move_a == Move.COOPERATE:
move_a = Move.DEFECT
else:
move_a = Move.COOPERATE
if self.random.random() < self.noise_level:
if move_b == Move.COOPERATE:
move_b = Move.DEFECT
else:
move_b = Move.COOPERATE
payoff_a, payoff_b = self.payoff_matrix[(move_a, move_b)]
agent_a.receive_payoff(payoff_a, move_a, move_b)
agent_b.receive_payoff(payoff_b, move_b, move_a)
agent_a.reset()
agent_b.reset()
# evolution
# tricky, we need to determine for each grid cell
# is a change needed, if so, log position, agent, and type to change to
agents_to_change = []
for agent_a in self.agents:
neighborhood = self.grid.iter_neighbors(agent_a.pos, moore=True,
include_center=True)
neighborhood = ([n for n in neighborhood])
neighborhood.sort(key=lambda x:x.points, reverse=True)
best_strategy = neighborhood[0].__class__
# if best type of strategy in neighborhood is
# different from strategy type of agent, we need
# to change our strategy
if not isinstance(agent_a, best_strategy):
agents_to_change.append((agent_a, best_strategy))
for entry in agents_to_change:
agent, klass = entry
self.agents.remove(agent)
self.grid.remove_agent(agent)
pos = agent.pos
self.agent_id += 1
new_agent = klass(self.agent_id, pos, self)
self.grid.position_agent(new_agent, pos)
self.agents.add(new_agent)
```
In the `__init__`, we now instantiate a SingleGrid, with a specified width and height. We set the kwarg torus to True indicating we are using a donut shape grid to avoid edge effects. Next, we fill this grid with random agents of the different types. This can be implemented in various ways. What I do here is using a list with the different classes (*i.e.*, types of strategies). By drawing a random number from a unit interval, multiplying it with the lenght of the list of classes and flooring the resulting number to an integer, I now have a random index into this list with the different classes. Next, I can get the class from the list and instantiate the agent object.
Some minor points with instantiating the agents. First, we give the agent a position, called pos, this is a default attribute assumed by MESA. We also still need a unique ID for the agent, we do this with a simple counter (`self.agent_id`). `self.grid.coord_iter` is a method on the grid. It returns an iterator over the cells in the grid. This iterator returns the agent occupying the cell and the x and y coordinate. Since the first item is `null` because we are filling the grid, we can ignore this. We do this by using the underscore variable name (`_`). This is a python convention.
Once we have instantiated the agent, we place the agent in the grid and add it to our collection of agents. If you look in more detail at the model class, you will see that I use a set for agents, rather than a list. The reason for this is that we are going to remove agents in the evolutionary phase. Removing agents from a list is memory and compute intensive, while it is computationally easy and cheap when we have a set.
```python
self.grid = SingleGrid(width, height, torus=True)
strategies = AxelrodAgent.__subclasses__()
num_strategies = len(strategies)
self.agent_id = 0
for cell in self.grid.coord_iter():
_, x, y = cell
pos = (x, y)
self.agent_id += 1
strategy_index = int(floor(self.random.random()*num_strategies))
agent = strategies[strategy_index](self.agent_id, pos, self)
self.grid.position_agent(agent, (x, y))
self.agents.add(agent)
```
We also use a DataCollector. This is a default class provided by MESA that can be used for keeping track of relevant statistics. It can store both model level variables as well as agent level variables. Here we are only using model level variables (i.e. attributes of the model). Specifically, we are going to have an attribute on the model for each type of agent strategy (i.e. classes). This attribute is the current count of agents in the grid of the specific type. To implement this, we need to do several things.
1. initialize a data collector instance
2. at every step update the current count of agents of each strategy
3. collect the current counts with the data collector.
For step 1, we set a DataCollector as an attribute. This datacollector needs to know the names of the attributes on the model it needs to collect. So we pass a dict as kwarg to model_reporters. This dict has as key the name by which the variable will be known in the DataCollector. As value, I pass the name of the attribute on the model, but it can also be a function or method which returns a number. Note th at the ``klass`` misspelling is deliberate. The word ``class`` is protected in Python, so you cannot use it as a variable name. It is common practice to use ``klass`` instead in the rare cases were you are having variable refering to a specific class.
```python
self.datacollector = DataCollector(model_reporters={klass.__name__:klass.__name__
for klass in strategies})
```
For step 2, we need to count at every step the number of agents per strategy type. To help keep track of this, we define a new method, `count_agent_types`. The main magic is the use of `setattr` which is a standard python function for setting attributes to a particular value on a given object. This reason for writing our code this way is that we automatically adapt our attributes to the classes of agents we have, rather than hardcoding the agent classes as attributes on our model. If we now add new classes of agents, we don't need to change the model code itself. There is also a ``getattr`` function, which is used by for example the DataCollector to get the values for the specified attribute names.
```python
def count_agent_types(self):
counter = Counter()
for agent in self.agents:
counter[agent.__class__.__name__] += 1
for k,v in counter.items():
setattr(self, k, v)
```
For step 3, we modify the first part of the ``step`` method. We first count the types of agents and next collect this data with the datacollector.
```python
self.count_agent_types()
self.datacollector.collect(self)
```
The remainder of the ``step`` method has also been changed quite substantially. First, We have to change against whom each agent is playing. We do this by iterating over all agents in the model. Next, we use the grid to give us the neighbors of a given agent. By setting the kwarg ``moore`` to ``True``, we indicate that we include also our diagonal neighbors. Next, we play as we did before in the noisy version of the Axelrod model.
```python
for agent_a in self.agents:
for agent_b in self.grid.neighbor_iter(agent_a.pos, moore=True):
for _ in range(self.num_iterations):
```
Second, we have to add the evolutionary dynamic. This is a bit tricky. First, we loop again over all agents in the model. We check its neighbors and see which strategy performed best. If this is of a different type (``not isinstance(agent_a, best_strategy)``, we add it to a list of agents that needs to be changed and the type of agent to which it needs to be changed. Once we know all agents that need to be changed, we can make this change.
Making the change is quite straighforward. We remove the agent from the set of agents (`self.agents`) and from the grid. Next we get the position of the agent, we increment our unique ID counter, and create a new agent. This new agent is than added to the grid and to the set of agents.
```python
# evolution
agents_to_change = []
for agent_a in self.agents:
neighborhood = self.grid.iter_neighbors(agent_a.pos, moore=True,
include_center=True)
neighborhood = ([n for n in neighborhood])
neighborhood.sort(key=lambda x:x.points, reverse=True)
best_strategy = neighborhood[0].__class__
# if best type of strategy in neighborhood is
# different from strategy type of agent, we need
# to change our strategy
if not isinstance(agent_a, best_strategy):
agents_to_change.append((agent_a, best_strategy))
for entry in agents_to_change:
agent, klass = entry
self.agents.remove(agent)
self.grid.remove_agent(agent)
pos = agent.pos
self.agent_id += 1
new_agent = klass(self.agent_id, pos, self)
self.grid.position_agent(new_agent, pos)
self.agents.add(new_agent)
```
## Assignment 1
Can you explain why we need to first loop over all agents before we are changing a given agent to a different strategy?
## Assignment 2
Add all agents classes (i.e., strategie) from the previous assignment to this model. Note that you might have to update the ``__init__`` method to reflect the new pos keyword argument and attribute.
## Assignment 3
Run the model for 50 steps, and with 200 rounds of the iterated game. Use the defaults for all other keyword arguments.
Plot the results.
*hint: the DataCollector can return the statistics it has collected as a dataframe, which in turn you can plot directly.*
This new model is quite a bit noisier than previously. We have a random initialization of the grid and depending on the initial neighborhood, different evolutionary dynamics can happen. On top, we have the noise in game play, and the random agent.
## Assignment 4
Let's explore the model for 10 replications. Run the model 10 times, with 200 rounds of the iterated prisoners dilemma. Run each model for fifty steps. Plot the results for each run.
1. Can you say anything generalizable about the behavioral dynamics of the model?
2. What do you find striking in the results and why?
3. If you compare the results for this spatially explicit version of the Emergence of Cooperation with the none spatially explicit version, what are the most important differences in dynamics. Can you explain why adding local interactions results in these changes?
| github_jupyter |
Code adapted from https://github.com/patrickcgray/open-geo-tutorial
```
from IPython.display import Audio, display
from timeit import default_timer as timer
start = timer()
def color_stretch(image, index):
colors = image[:, :, index].astype(np.float64)
for b in range(colors.shape[2]):
colors[:, :, b] = rasterio.plot.adjust_band(colors[:, :, b])
return colors
def alert():
global start
end= timer()
print("\n\nDuration in minutes : " + str((end - start)/60) + " minutes." )
display(Audio(url='https://sound.peal.io/ps/audios/000/000/537/original/woo_vu_luvub_dub_dub.wav', autoplay=True))
```
# Preparing Dataset
#### Cutting images for faster process
```
# 0 to 10980 for this img_data
#training
#xmin = 6700
#xmax = 6900
#ymin = 6700
#ymax = 6900
xmin = 6700
xmax = 7000
ymin = 6600
ymax = 6900
xmin_test = 6500
xmax_test = 6700
ymin_test = 6750
ymax_test = 6950
shapemin = min(xmin,ymin,xmin_test,ymin_test)
shapemax = max(xmax,ymax,xmax_test,ymax_test)
print(shapemin)
shapemax
import rasterio
from rasterio.mask import mask
import geopandas as gp
import numpy as np
import shapely
from shapely import geometry
from shapely.geometry import shape, Point, LineString, Polygon , mapping
import matplotlib.pyplot as plt
from rasterio.plot import show
import pyproj
from pyproj import CRS
import fiona
import os # we need os to do some basic file operations
import re
from rasterio.plot import adjust_band
from rasterio.plot import reshape_as_raster, reshape_as_image
import matplotlib.pyplot as plt
from rasterio.plot import show
from pyproj import transform
from pyproj import Proj
import ast
# create a products directory within the data dir which won't be uploaded to Github
img_dir = './data/'
# check to see if the dir it exists, if not, create it
if not os.path.exists(img_dir):
os.makedirs(img_dir)
# filepath for image we're writing out
img_fp = img_dir + 'sentinel_bands_img_data_1.tif'
full_dataset = rasterio.open(img_fp)
img_rows, img_cols = full_dataset.shape
img_bands = full_dataset.count
print(full_dataset.shape) # dimensions
print(full_dataset.count) # bands
with rasterio.open(img_fp) as src:
# may need to reduce this image size if your kernel crashes, takes a lot of memory
img = src.read()[:, : , : ]
# Take our full image and reshape into long 2d array (nrow * ncol, nband) for classification
img_train = img[:, xmin : xmax , ymin : ymax ]
img_test = img[:, xmin_test : xmax_test , ymin_test : ymax_test ]
reshaped_img_train = reshape_as_image(img_train)
reshaped_img_test = reshape_as_image(img_test)
fig, axs = plt.subplots(2,1,figsize=(15,15))
img_stretched_train = color_stretch(reshaped_img_train, [2, 1, 0])
axs[0].imshow(img_stretched_train)
img_stretched_test = color_stretch(reshaped_img_test, [2, 1, 0])
axs[1].imshow(img_stretched_test)
```
# Intercepting label data with cutting image
```
# Open the dataset from the file
shapefile = gp.read_file('./data/shapefiles/shapefile_compressed/shp_2012_IMG_DATA_1.shp')
shapefile = shapefile.filter(['Legenda','geometry','CLC2012'])
shapefile.crs = {'init': 'epsg:4326'}
shapefile
shapefile.bounds
from pyproj import transform
from pyproj import Proj
def cut_bounds( xmin , xmax , ymin , ymax ):
# this will get our four corner points
xmin = xmin
ymin = ymin
xmax = xmax
ymax = ymax
raster_gps_points = full_dataset.transform * (xmin, ymin),full_dataset.transform * (xmax, ymin),full_dataset.transform * (xmax, ymax), full_dataset.transform * (xmin, ymax),
# Project all longitudes, latitudes using the pyproj package
p1 = Proj(full_dataset.crs) # our current crs
print(p1)
print('raster bounds in current crs :\n', raster_gps_points)
return raster_gps_points
raster_gps_points = cut_bounds( shapemin , shapemax , shapemin , shapemax )
def shapefile_from_rasterbounds(raster_gps_points,shapefile) :
polygon = Polygon( list(raster_gps_points))
print("Polygon raster gps points",polygon)
imageBounds = gp.GeoDataFrame(crs=CRS.from_epsg(32629))
imageBounds.geometry = [polygon]
imageBounds = imageBounds.to_crs(fiona.crs.from_epsg(4326))
print("Imaged cutted bounds",imageBounds.bounds)
shapefile = shapefile.to_crs(fiona.crs.from_epsg(4326))
print("Shapefile bounds",shapefile.bounds)
intersected = gp.sjoin( shapefile, imageBounds, how='inner', op='intersects', lsuffix='left', rsuffix='right')
intersected = intersected.reset_index()
print("Intersected df",intersected)
print("Intersected crs",intersected.crs)
print("Intersected bounds",intersected.bounds)
return intersected
shapefile = shapefile_from_rasterbounds(raster_gps_points,shapefile)
import re
def class_from_CLCcode(clc) :
mega_classes = {
'^1.*' : 'Territórios artificializados',
'^2.*' : 'Agricultura',
'^31.*' : 'Floresta',
'^3.4' : 'Floresta',
'^32[123]' : 'Vegetação natural',
'^33[123]' : 'Espaços descobertos ou com vegetação esparsa',
'^41.*' : 'Zonas húmidas',
'^42[13]' : 'Zonas húmidas',
'^5.*' : 'Corpos de água',
'^422' : 'Corpos de água'
}
for mega_class_key in mega_classes :
searched = re.search(mega_class_key, str(clc))
if searched is not None:
return mega_classes[mega_class_key]
return 'Not Defined'
x = shapefile['CLC2012'][0]
print (x)
class_from_CLCcode (x)
for i in shapefile.index:
clc = shapefile.at[i, 'CLC2012']
new_class = class_from_CLCcode(clc)
shapefile.at[i, 'Mega_Legenda'] = new_class
shapefile.head()
```
# Setting up label data with bands
```
def setup_shafile_to_data(shapefile,full_dataset) :
shapefile = shapefile.filter(['Legenda','Mega_Legenda', 'geometry','CLC2012'])
print("Data crs: " + str(full_dataset.crs))
print("Before crs: " + str(shapefile.crs))
if shapefile.crs != full_dataset.crs :
shapefile = shapefile.to_crs(full_dataset.crs)
print("After crs: " + str(shapefile.crs))
return shapefile
shapefile = setup_shafile_to_data(shapefile,full_dataset)
print("SIZE:",shapefile.size)
shapefile.head()
```
Choose target column = Legenda or Mega_Legenda
```
Class = 'Mega_Legenda'
#Class = 'Legenda'
unique= np.unique(shapefile[Class])
print('List of Land Cover Classes:')
unique
import matplotlib.pyplot as plt
unique, counts = np.unique(shapefile[Class], return_counts=True)
plt.bar(unique, counts, 1)
plt.title('Class Frequency')
plt.xlabel('Class')
plt.ylabel('Frequency')
plt.show()
print('Number of different classes :' + str(len(unique)))
print(unique)
band_count = full_dataset.count
transf = full_dataset.transform
full_dataset_array = img
cutted =full_dataset_array[:, xmin : xmax , ymin : ymax ]
cutted_test = full_dataset_array [:, xmin_test : xmax_test , ymin_test : ymax_test ]
print(cutted.shape)
cutted_test.shape
import numba
from numba import jit,prange
#@jit(parallel=True)
def setup_cell_noglobals(i,j,xmin,xmax, ymin, ymax , cutted ,transf , band_count,shapefile):
coordinates = transf * (xmin + i,ymin + j)
point = Point(coordinates)
geometry = shapefile['geometry']
geometry = geometry[geometry.contains(point)]
if geometry.size>0 :
return geometry.index[0]
else :
print("Not found in shapefile")
return -1
#@jit(parallel=True)
def setup_X_and_y_noglobals(xmin,xmax, ymin, ymax , cutted ,transf , band_count,shapefile):
rX = np.array([], dtype=np.int8).reshape(0,band_count) # pixels for training
ry = np.array([], dtype=np.string_) # labels for training
for i in range(xmax-xmin):
print(i)
for j in range(ymax-ymin):
result = setup_cell_noglobals(i,j,xmin,xmax, ymin, ymax , cutted ,transf , band_count,shapefile)
if (result!=-1) :
target = shapefile.at[ result ,Class]
list_x = []
for band in range(band_count):
list_x.append( cutted[band][i][j] )
x_entry = np.array( list_x )
rX = np.vstack((rX, x_entry ))
ry = np.append(ry,target)
return rX,ry
X,y = setup_X_and_y_noglobals(xmin,xmax, ymin, ymax, cutted ,transf , band_count,shapefile)
X_test,y_test = setup_X_and_y_noglobals(xmin_test,xmax_test, ymin_test, ymax_test, cutted_test ,transf , band_count,shapefile)
print(y.shape)
print(y_test.shape)
# What are our classification labels?
labels = np.unique( y )
print('The training data include {n} classes: {classes}\n'.format(n=labels.size, classes=labels))
labels_test = np.unique( y_test )
print('The testing data include {n} classes: {classes}\n'.format(n=labels_test.size, classes=labels_test))
# We will need a "X" matrix containing our features, and a "y" array containing our labels
print('Our X matrix is sized: {sz}'.format(sz=X.shape))
print('Our y array is sized: {sz}'.format(sz=y.shape))
print('Our X_test matrix is sized: {sz}'.format(sz=X_test.shape))
print('Our y_test array is sized: {sz}'.format(sz=y_test.shape))
fig, ax = plt.subplots(1,3, figsize=[20,8])
# numbers 1-4
band_count = np.arange(1,5)
classes = np.unique(y)
for class_type in classes:
band_intensity = np.mean(X[y==class_type, :], axis=0)
ax[0].plot(band_count, band_intensity, label=class_type)
ax[1].plot(band_count, band_intensity, label=class_type)
ax[2].plot(band_count, band_intensity, label=class_type)
# plot them as lines
# Add some axis labels
ax[0].set_xlabel('Band #')
ax[0].set_ylabel('Reflectance Value')
ax[1].set_ylabel('Reflectance Value')
ax[1].set_xlabel('Band #')
ax[2].set_ylabel('Reflectance Value')
ax[2].set_xlabel('Band #')
#ax[0].set_ylim(32,38)
ax[1].set_ylim(32,38)
ax[2].set_ylim(70,140)
#ax.set
ax[1].legend(loc="upper right")
# Add a title
ax[0].set_title('Band Intensities Full Overview')
ax[1].set_title('Band Intensities Lower Ref Subset')
ax[2].set_title('Band Intensities Higher Ref Subset')
def str_class_to_int(class_array):
class_array[class_array == 'Territórios artificializados'] = 0
class_array[class_array == 'Agricultura'] = 1
class_array[class_array == 'Floresta'] = 2
class_array[class_array == 'Vegetação natural'] = 3
class_array[class_array == 'Espaços descobertos ou com vegetação esparsa'] = 4
class_array[class_array == 'Zonas húmidas'] = 5
class_array[class_array == 'Corpos de água'] = 6
return(class_array.astype(int))
def str_class_to_int_v2(class_array,class_test):
uniques = np.unique(class_array)
i=0
for class_name in uniques :
class_array[class_array == class_name ] = i
class_test[class_test == class_name ] = i
i+=1
return(class_array.astype(int) , class_test.astype(int) )
print(np.unique(y))
print(np.unique(y_test))
y = str_class_to_int(y)
y_test = str_class_to_int(y_test)
print(np.unique(y))
print(np.unique(y_test))
```
# Supervised Classification Algorithm
```
from sklearn.svm import LinearSVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
#model = LinearSVC(C=1,verbose=1)
model = KNeighborsClassifier()
model.fit(X, y)
score = model.score(X_test, y_test)
score
class_prediction = model.predict(reshaped_img_test.reshape(-1, 4))
before = class_prediction.shape
# Reshape our classification map back into a 2D matrix so we can visualize it
class_prediction = class_prediction.reshape(reshaped_img_test[:, :, 0].shape)
class_prediction.shape
real_img = y_test
print(before)
print(real_img.shape)
print(y_test.shape)
# Reshape our classification map back into a 2D matrix so we can visualize it
real_class = real_img.reshape(reshaped_img_test[:, :, 0].shape) #fixme
real_class.shape
real_class = real_class.astype(int)
print(real_class)
class_prediction = class_prediction.astype(int)
class_prediction
```
# Visualizing results
```
# find the highest pixel value in the prediction image
n = int(np.max(class_prediction))
# next setup a colormap for our map
colors = dict((
(0, (60, 60, 60, 255)), # Grey - Territórios artificializados
(1, (20,230,20, 255)), # Light Green - Agricultura
(2, (5, 80, 5, 255)), # Dark Green - Floresta
(3, (170, 200, 20, 255)), # Yellow - Vegetação natural
(4, (50, 20, 0, 255)), # Brown - Espaços descobertos ou com vegetação esparsa
(5, (0, 250, 250, 255)), # Light Blue - Zonas húmidas
(6, (0, 0, 200, 255)), # Dark Blue - Corpos de água
))
# Put 0 - 255 as float 0 - 1
for k in colors:
v = colors[k]
_v = [_v / 255.0 for _v in v]
colors[k] = _v
index_colors = [colors[key] if key in colors else
(255, 255, 255, 0) for key in range(0, n+1)]
cmap = plt.matplotlib.colors.ListedColormap(index_colors, 'Classification', n+1)
def discrete_matshow(data):
#get discrete colormap
cmap = plt.get_cmap('RdBu', np.max(data)-np.min(data)+1)
# set limits .5 outside true range
mat = plt.matshow(data,cmap=cmap,vmin = np.min(data)-.5, vmax = np.max(data)+.5)
#tell the colorbar to tick at integers
cax = plt.colorbar(mat, ticks=np.arange(np.min(data),np.max(data)+1))
fig, axs = plt.subplots(1,2,figsize=(15,15))
axs[0].imshow(img_stretched_test)
prediction_plot= axs[1].imshow(class_prediction, cmap=cmap, interpolation='none')
fig.colorbar(prediction_plot, ax=axs[1],ticks=range(0,7),fraction=0.046, pad=0.04)
#real_plot = axs[2].imshow(real_class, cmap=cmap, interpolation='none')
#fig.colorbar(real_plot, ax=axs[2],ticks=range(0,6),fraction=0.046, pad=0.04)
fig.show()
fig, axs = plt.subplots(1,2,figsize=(15,15))
axs[0].imshow(img_stretched_test[50:150, 50:150])
prediction_plot = axs[1].imshow(class_prediction[50:150, 50:150], cmap=cmap, interpolation='none')
fig.colorbar(prediction_plot, ax=axs[1],ticks=range(0,7),fraction=0.046, pad=0.04)
fig.show()
alert() # audio alert
```
| github_jupyter |
## Getting Data
```
#import os
#import requests
#DATASET = (
# "https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data",
# "https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.names"
#)
#def download_data(path='data', urls=DATASET):
# if not os.path.exists(path):
# os.mkdir(path)
#
# for url in urls:
# response = requests.get(url)
# name = os.path.basename(url)
# with open(os.path.join(path, name), 'wb') as f:
# f.write(response.content)
#download_data()
#DOWNLOAD AND LOAD IN DATA FROM URL!!!!!!!!!!!!!!!!!!!
#import requests
#import io
#data = io.BytesIO(requests.get('URL HERE'))
#whitedata = pd.read_csv(data.content)
#whitedata.head()
```
## Load Data
```
import pandas as pd
import yellowbrick
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import io
import sklearn
columns = [
"fixed acidity",
"volatile acidity",
"citric acid",
"residual sugar",
"chlorides",
"free sulfur dioxide",
"total sulfur dioxide",
"density",
"pH",
"sulphates",
"alcohol",
"quality"
]
reddata = pd.read_csv('data/winequality-red.csv', sep=";")
whitedata = pd.read_csv('data/winequality-white.csv', sep=";")
```
## Check it out
```
whitedata.head(10)
whitedata.describe()
whitedata.info()
whitedata.pH.describe()
whitedata['poor'] = np.where(whitedata['quality'] < 5, 1, 0)
whitedata['poor'].value_counts()
whitedata['expected'] = np.where(whitedata['quality'] > 4, 1, 0)
whitedata['expected'].value_counts()
whitedata['expected'].describe()
whitedata.head()
#set up the figure size
%matplotlib inline
plt.rcParams['figure.figsize'] = (30, 30)
#make the subplot
fig, axes = plt.subplots(nrows = 6, ncols = 2)
#specify the features of intersest
num_features = ['pH', 'alcohol', 'citric acid', 'chlorides', 'residual sugar', 'free sulfur dioxide', 'total sulfur dioxide', 'density', 'sulphates']
xaxes = num_features
yaxes = ['Counts', 'Counts', 'Counts', 'Counts', 'Counts', 'Counts', 'Counts', 'Counts', 'Counts']
#draw the histogram
axes = axes.ravel()
for idx, ax in enumerate(axes):
ax.hist(whitedata[num_features[idx]].dropna(), bins = 30)
ax.set_xlabel(xaxes[idx], fontsize = 20)
ax.set_ylabel(yaxes[idx], fontsize = 20)
ax.tick_params(axis = 'both', labelsize = 20)
features = ['pH', 'alcohol', 'citric acid', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density', 'sulphates']
classes = ['unexpected', 'expected']
X = whitedata[features].as_matrix()
y = whitedata['quality'].as_matrix()
from sklearn.naive_bayes import GaussianNB
viz = GaussianNB()
viz.fit(X, y)
viz.score(X, y)
from sklearn.naive_bayes import MultinomialNB
viz = MultinomialNB()
viz.fit(X, y)
viz.score(X, y)
from sklearn.naive_bayes import BernoulliNB
viz = BernoulliNB()
viz.fit(X, y)
viz.score(X, y)
```
## Features and Targets
```
y = whitedata["quality"]
y
X = whitedata.iloc[:,1:-1]
X
from sklearn.ensemble import RandomForestClassifier as rfc
estimator = rfc(n_estimators=7)
estimator.fit(X,y)
y_hat = estimator.predict(X)
print(y_hat)
%matplotlib notebook
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
data = pd.read_csv(".../data/occupency.csv")
data.head()
X = data(['temperature', 'relative humidity', 'light', 'CO2', 'humidity'])
y = data('occupency')
def n_estimators_tuning(X, y, min_estimator=1, max_estimator=50, ax=None, save=None):
if ax is None:
_, ax = plt.subplots()
mean = []
stds = []
n_estimators = np.arrange(min_estimators, mx_estimators+1)
for n in n_estimators:
model = RandomForestClassifier(n_estimators=n)
scores = cross_val_score(model, X, y, cv=cv)
means.append(scores.mean())
stds.append(scores.std())
means = np.array(means)
stds = np.array(stds)
ax.plot(n_estimatrors, scores, label='CV+{} scores'.forest(cv))
ax.fill_between(n_estimators, means-stds, means+stds, alpha=0.3)
max_score = means.max()
max_score_idx = np.where(means==max_score)[0]
ax.hline#???????????????
ax.set_xlim(min_estimators, max_estimators)
ax.set_xlabel("n_estimators")
ax.set_ylabel("F1 Score")
ax.set_title("Random Forest Hyperparameter Tuning")
ax.legend(loc='best')
if save:
plt.savefig(save)
return ax
#print(scores)
n_estimators_tuning(X, y)
whitedata[winecolor] = 0
reddata[winecolor] = 1
df3 = [whitedata, reddata]
df = pd.concat(df3)
df.reset_index(drop = True, inplace = True)
df.isnull().sum()
whitedata.head()
reddata.head()
df = df.drop(columns=['poor', 'expected'])
df.isnull().sum()
df.head()
df.describe()
df['recommended'] = np.where(df['quality'] < 6, 0, 1)
df.head(50)
df["quality"].value_counts().sort_values(ascending = False)
df["recommended"].value_counts().sort_values(ascending = False)
df['recommended'] = np.where(df['quality'] < 6, 0, 1)
df["recommended"].value_counts().sort_values(ascending = False)
from pandas.plotting import radviz
plt.figure(figsize=(8,8))
radviz(df, 'recommended', color=['blue', 'red'])
plt.show()
features = ['pH', 'alcohol', 'citric acid', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density', 'sulphates']
#classes = ['unexpected', 'expected']
X = df[features].as_matrix()
y = df['recommended'].as_matrix()
viz = GaussianNB()
viz.fit(X, y)
viz.score(X, y)
viz = MultinomialNB()
viz.fit(X, y)
viz.score(X, y)
viz = BernoulliNB()
viz.fit(X, y)
viz.score(X, y)
from sklearn.ensemble import RandomForestClassifier
viz = RandomForestClassifier()
viz.fit(X, y)
viz.score(X, y)
from sklearn.metrics import (auc, roc_curve, recall_score, accuracy_score, confusion_matrix, classification_report, f1_score, precision_score)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=11)
predicted = viz.predict(X_test)
cm = confusion_matrix(y_test, predicted)
fig = plt.figure(figsize=(7, 5))
ax = plt.subplot()
cm1 = (cm.astype(np.float64) / cm.sum(axis=1, keepdims=1))
cmap = sns.cubehelix_palette(light=1, as_cmap=True)
sns.heatmap(cm1, annot=True, ax = ax, cmap=cmap); #annot=True to annotate cells
# labels, title and ticks
ax.set_xlabel('Features');
ax.set_ylabel('Recommended');
ax.set_title('Normalized confusion matrix');
ax.xaxis.set_ticklabels(['Good', 'Bad']);
ax.yaxis.set_ticklabels(['Good', 'Bad']);
print(cm)
# Recursive Feature Elimination (RFE)
from sklearn.feature_selection import (chi2, RFE)
model = RandomForestClassifier()
rfe = RFE(model, 38)
fit = rfe.fit(X, y)
print("Num Features: ", fit.n_features_)
print("Selected Features: ", fit.support_)
print("Feature Ranking: ", fit.ranking_)
from sklearn.model_selection import StratifiedKFold
from sklearn.datasets import make_classification
from yellowbrick.features import RFECV
sns.set(font_scale=3)
cv = StratifiedKFold(5)
oz = RFECV(RandomForestClassifier(), cv=cv, scoring='f1')
oz.fit(X, y)
oz.poof()
# Ridge
# Create a new figure
#mpl.rcParams['axes.prop_cycle'] = cycler('color', ['red'])
from yellowbrick.features.importances import FeatureImportances
from sklearn.linear_model import (LogisticRegression, LogisticRegressionCV, RidgeClassifier, Ridge, Lasso, ElasticNet)
fig = plt.gcf()
fig.set_size_inches(10,10)
ax = plt.subplot(311)
labels = features
viz = FeatureImportances(Ridge(alpha=0.5), ax=ax, labels=labels, relative=False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.grid(False) # Fit and display
viz.fit(X, y)
viz.poof()
estimator = RandomForestClassifier(class_weight='balanced')
y_pred_proba = RandomForestClassifier(X_test)
#y_pred_proba[:5]
def plot_roc_curve(y_test, y_pred_proba):
fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba[:, 1])
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % roc_auc, color='darkblue')
plt.plot([0, 1], [0, 1], 'k--') # random predictions curve
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate or (1 - Specifity)')
plt.ylabel('True Positive Rate or (Sensitivity)')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.grid(False)
plot_roc_curve(y_test, y_pred_proba)
```
| github_jupyter |
```
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import matplotlib.pyplot as plt
from keras.callbacks import TensorBoard
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from time import time
batch_size = 128
num_classes = 10
epochs = 10
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
#tbCallBack = keras.callbacks.TensorBoard(log_dir='./Graph', histogram_freq=2, write_graph=True, write_images=True)
callback=[keras.callbacks.TensorBoard(log_dir="/tmp/mnist/2".format(time()), histogram_freq=1, write_graph=True, write_images=True)]
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
X = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
callbacks=callback)
plt.plot(X.history['loss'])
plt.plot(X.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
weights, biases = model.layers[0].get_weights()
print("Weights\n")
print(weights)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
LOG_DIR = '/tmp/mnist/2'
get_ipython().system_raw(
'tensorboard --logdir {} --host 0.0.0.0 --port 6006 &'
.format(LOG_DIR)
)
# Install
! npm install -g localtunnel
# Tunnel port 6006 (TensorBoard assumed running)
get_ipython().system_raw('lt --port 6006 >> url.txt 2>&1 &')
# Get url
! cat url.txt
weights, biases = model.layers[0].get_weights()
print(biases)
def plot_weights(layers,x,y):
weights, biases = model.layers[0].get_weights()
fig = plt.figure(figsize=(106,108), dpi=80)
for j in range(len(weights)):
ax = fig.add_subplot(y,x,j+1)
ax.matshow(weights[j][0], cmap = matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.rcParams['figure.figsize'] = 4.5, 4.
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
return plt
plot_weights(model.layers[0],8,4)
```
| github_jupyter |
# Generic DKRZ national archive ingest form
This form is intended to request data to be made locally available in the DKRZ nationl data archive besides the Data which is ingested as part of the CMIP6 replication. For replication requests a separate form is available.
Please provide information on the following aspects of your data ingest request:
* scientific context of data
* specific data access rights
* technical details, like
* amount of data
* source of data
```
from dkrz_forms import form_widgets
form_widgets.show_status('form-submission')
```
## Please provide information to unlock your form
- last name
- password
```
from dkrz_forms import form_handler, form_widgets
#please provide your last name - replacing ... below
MY_LAST_NAME = "ki"
form_info = form_widgets.check_pwd(MY_LAST_NAME)
sf = form_handler.init_form(form_info)
form = sf.sub.entity_out.form_info
import pprint
from dkrz_forms import form_handler
pprint.pprint(form_handler.form_to_dict(sf))
```
## Please provide the following information
Please provide some generic context information about the data, which should be availabe as part of the DKRZ CMIP Data Pool (CDP)
```
# (informal) type of data
form.data_type = "...." # e.g. model data, observational data, ..
# # free text describing scientific context of data
form.scientific_context ="..."
# free text describing the expected usage as part of the DKRZ CMIP Data pool
form.usage = "...."
# free text describing access rights (who is allowed to read the data)
form.access_rights = "...."
# generic terms of policy information
form.terms_of_use = "...." # e.g. unrestricted, restricted
# any additional comment on context
form.access_group = "...."
form.context_comment = "...."
```
## technical information concerning your request
```
# information on where the data is stored and can be accessed
# e.g. file system path if on DKRZ storage, url etc. if on web accessible resources (cloud,thredds server,..)
form.data_path = "...."
# timing constraints, when the data ingest should be completed
# (e.g. because the data source is only accessible in specific time frame)
form.best_ingest_before = "...."
# directory structure information, especially
form.directory_structure = "..." # e.g. institute/experiment/file.nc
form.directory_structure_convention = "..." # e.g. CMIP5, CMIP6, CORDEX, your_convention_name
form.directory_structure_comment = "..." # free text, e.g. with link describing the directory structure convention you used
# metadata information
form.metadata_convention_name = "..." # e.g. CF1.6 etc. None if not applicable
form.metadata_comment = "..." # information about metadata, e.g. links to metadata info etc.
```
## Check your submission form
Please evaluate the following cell to check your submission form.
In case of errors, please go up to the corresponden information cells and update your information accordingly...
```
# to be completed ..
```
# Save your form
your form will be stored (the form name consists of your last name plut your keyword)
```
form_handler.save_form(sf,"..my comment..") # edit my comment info
```
# officially submit your form
the form will be submitted to the DKRZ team to process
you also receive a confirmation email with a reference to your online form for future modifications
```
form_handler.email_form_info(sf)
form_handler.form_submission(sf)
```
| github_jupyter |
# What is torch.nn ?
## MNIST data setup
We will use the classic MNIST dataset, which consists of black-and-white images of hand-drawn digits (between 0 and 9).
We will use pathlib for dealing with paths (part of the Python 3 standard library), and will download the dataset using requests. We will only import modules when we use them, so you can see exactly what’s being used at each point.
```
from pathlib import Path
import requests
DATA_PATH = Path('data')
PATH = DATA_PATH / 'mnist'
PATH.mkdir(parents=True, exist_ok=True)
URL = 'https://github.com/pytorch/tutorials/raw/master/_static/'
FILENAME = 'mnist.pkl.gz'
if not (PATH / FILENAME).exists():
content = requests.get(URL + FILENAME).content
(PATH / FILENAME).open('wb').write(content)
import pickle
import gzip
with gzip.open((PATH / FILENAME).as_posix(), 'rb') as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1')
```
Each image is 28 x 28, and is being stored as a flattened row of length 784 (=28x28). Let’s take a look at one; we need to reshape it to 2d first.
```
from matplotlib import pyplot
import numpy as np
print(x_train.shape)
pyplot.imshow(x_train[0].reshape((28, 28)), cmap='gray')
```
PyTorch uses torch.tensor, rather than numpy arrays, so we need to convert our data.
```
import torch
x_train, y_train, x_valid, y_valid = map(
torch.tensor, (x_train, y_train, x_valid, y_valid)
)
n, c = x_train.shape
x_train, x_train.shape, y_train.min(), y_train.max()
print(x_train, y_train)
print(x_train.shape)
print(y_train.min(), y_train.max())
```
## neural net from scratch (no torch.nn)
Let’s first create a model using nothing but PyTorch tensor operations. We’re assuming you’re already familiar with the basics of neural networks. (If you’re not, you can learn them at [course.fast.ai](https://course.fast.ai/)).
PyTorch provides methods to create random or zero-filled tensors, which we will use to create our weights and bias for a simple linear model. These are just regular tensors, with one very special addition: we tell PyTorch that they require a gradient. This causes PyTorch to record all of the operations done on the tensor, so that it can calculate the gradient during back-propagation _automatically_!
For the weights, we set `requires_grad` **after** the initialization, since we don’t want that step included in the gradient. (Note that a trailing `_` in PyTorch signifies that the operation is performed in-place.)
We are initializing the weights here with [Xavier initialisation](http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf) (by multiplying with 1/sqrt(n)).
```
import math
# 10 neurons in the output layer
weights = torch.randn(784, 10) / math.sqrt(784)
weights.requires_grad_() # set requires_grad in-place
bias = torch.zeros(10, requires_grad=True)
print(weights.shape, bias.shape)
```
Thanks to PyTorch’s ability to calculate gradients automatically, we can use any standard Python function (or callable object) as a model!
So let’s just write a plain **matrix multiplication** and **broadcasted addition** to create a simple **linear model**.
We also need an **activation function**, so we’ll write _log_softmax_ and use it.
Remember: although PyTorch provides lots of pre-written loss functions, activation functions, and so forth, you can easily write your own using plain python. PyTorch will even create fast GPU or vectorized CPU code for your function automatically.
```
def log_softmax(x):
return x - x.exp().sum(-1).log().unsqueeze(-1)
def model(xb):
return log_softmax( xb @ weights + bias )
```
In the above, the `@` stands for the **dot product** operation.
We will call our function on **one batch** of data (in this case, 64 images).
This is one $\color{yellow}{\textbf{foward}}$ pass.
Note that our predictions won’t be any better than random at this stage, since we start with random weights.
```
bs = 64 # batch size
xb = x_train[0 : bs] # one mini-batch from x
preds = model(xb)
print(preds[0], preds.shape)
```
As you see, the preds tensor contains not only the tensor values, but also a gradient function. We’ll use this later to do backprop.
Let’s implement **negative log-likelihood** to use as the **loss function** (again, we can just use standard Python):
```
def nll(input, target):
return -input[range(target.shape[0]), target].mean()
loss_func = nll
```
Let’s check our loss with our random model, so we can see if we improve after a backprop pass later.
```
yb = y_train[0:bs]
print(loss_func(preds, yb))
```
Let’s also implement a function to calculate the **accuracy** of our model.
For each prediction, if the index with the largest value matches the target value, then the prediction was correct.
```
def accuracy(out, yb):
preds = torch.argmax(out, dim=1)
return (preds == yb).float().mean()
print(accuracy(preds, yb))
```
We can now run a training loop. For each iteration, we will:
- select a mini-batch of data (of size bs)
- use the model to make predictions
- calculate the loss
- loss.backward() updates the gradients of the model, in this case, weights and bias.
We now use these gradients to update the weights and bias. We do this within the `torch.no_grad()` context manager, because we do not want these actions to be recorded for our next calculation of the gradient. You can read more about how PyTorch’s Autograd records operations [here](https://pytorch.org/docs/stable/notes/autograd.html).
We then set the gradients to zero, so that we are ready for the next loop. Otherwise, our gradients would record a running tally of all the operations that had happened (i.e. `loss.backward()` **_adds_** the gradients to whatever is already stored, rather than replacing them).
You can use the standard python debugger to step through PyTorch code, allowing you to check the various variable values at each step. Uncomment `set_trace()` below to try it out.
```
from IPython.core.debugger import set_trace
lr = 0.5
epochs = 2
for epoch in range(epochs):
for i in range( (n-1) // bs + 1 ):
# set_trace()
start_i = i * bs
end_i = start_i + bs
xb = x_train[start_i : end_i]
yb = y_train[start_i : end_i]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
with torch.no_grad():
weights -= weights.grad * lr
bias -= bias.grad * lr
weights.grad.zero_()
bias.grad.zero_()
```
That’s it: we’ve created and trained a minimal neural network (in this case, a logistic regression, since we have no hidden layers) entirely from scratch!
Let’s check the loss and accuracy and compare those to what we got earlier. We expect that the loss will have decreased and accuracy to have increased, and they have.
```
print(
loss_func(model(xb), yb),
accuracy(model(xb), yb)
)
```
## Using torch.nn.functional
We will now refactor our code, so that it does the same thing as before, only we’ll start taking advantage of PyTorch’s `nn` classes to make it more concise and flexible. At each step from here, we should be making our code one or more of: **shorter, more understandable, and/or more flexible**.
The first and easiest step is to make our code shorter by replacing our hand-written activation and loss functions with those from `torch.nn.functional` (which is generally imported into the namespace `F` by convention). This module contains all the **functions** in the `torch.nn` library (whereas other parts of the library contain **classes**). As well as a wide range of loss and activation functions, you’ll also find here some convenient functions for creating neural nets, such as pooling functions. (There are also functions for doing convolutions, linear layers, etc, but as we’ll see, these are usually better handled using other parts of the library.)
If you’re using negative log likelihood loss and log softmax activation, then Pytorch provides a single function `F.cross_entropy` that combines the two. So we can even remove the activation function from our model.
```
import torch.nn.functional as F
loss_func = F.cross_entropy
def model(xb):
return xb @ weights + bias
```
Note that we no longer call `log_softmax` in the model function.
Let’s confirm that our loss and accuracy are the same as before:
```
print(
loss_func(model(xb), yb),
accuracy(model(xb), yb)
)
```
## Refactor using nn.Module
Next up, we’ll use `nn.Module` and `nn.Parameter`, for a clearer and more concise training loop.
We subclass `nn.Module` (which itself is a class and able to keep track of state).
In this case, we want to create a class that holds our weights, bias, and method for the forward step.
`nn.Module` has a number of **attributes** and **methods** (such as `.parameters()` and `.zero_grad()`) which we will be using.
```
from torch import nn
class Mnist_Logistic(nn.Module):
def __init__(self):
super().__init__()
self.weights = nn.Parameter( torch.randn(784, 10) / math.sqrt(784) )
self.bias = nn.Parameter( torch.zeros(10) )
def forward(self, xb):
return xb @ self.weights + self.bias
```
Since we’re now using an **object** instead of just using a function, we first have to **instantiate** our model:
```
model = Mnist_Logistic()
```
Now we can calculate the loss in the same way as before.
Note that `nn.Module` objects are used as if they are **functions** (i.e they are callable), but behind the scenes Pytorch will call our forward method automatically.
```
print(loss_func(model(xb), yb))
```
Previously for our training loop we had to update the values for each parameter by name, and manually zero out the grads for each parameter separately, like this:
```
with torch.no_grad():
weights -= weights.grad * lr
bias -= bias.grad * lr
weights.grad.zero_()
bias.grad.zero_()
```
Now we can take advantage of `model.parameters()` and `model.zero_grad()` (which are both defined by PyTorch for nn.Module) to make those steps more concise and less prone to the error of forgetting some of our parameters, particularly if we had a more complicated model:
```
with torch.no_grad():
for p in model.parameters(): p -= p.grad * lr
model.zero_grad()
```
We’ll wrap our little training loop in a fit function so we can run it again later.
```
def fit():
for epoch in range(epochs):
for i in range( (n-1) // bs + 1 ):
# set_trace()
start_i = i * bs
end_i = start_i + bs
xb = x_train[start_i : end_i]
yb = y_train[start_i : end_i]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
with torch.no_grad():
for p in model.parameters():
p -= p.grad * lr
model.zero_grad()
fit()
```
Let’s double-check that our loss has gone down:
```
print(loss_func(model(xb), yb))
```
## Refactor using nn.Linear
We continue to refactor our code.
Instead of manually defining and initializing `self.weights` and `self.bias`, and calculating `xb @ self.weights + self.bias`, we will instead use the Pytorch class `nn.Linear` for a **linear** layer, which does all that for us.
Pytorch has many types of predefined layers that can greatly simplify our code, and often makes it faster too.
```
class Mnist_Logistic(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(784, 10)
def forward(self, xb):
return self.lin(xb)
```
We instantiate our model and calculate the loss in the same way as before:
```
model = Mnist_Logistic()
print(loss_func(model(xb), yb))
```
We are still able to use our same fit method as before.
```
fit()
print(loss_func(model(xb), yb))
```
## Refactor using optim
Pytorch also has a package with various optimization algorithms, `torch.optim`.
We can use the `step` method from our optimizer to take a forward step, instead of manually updating each parameter.
This will let us replace our previous manually coded optimization step:
```
with torch.no_grad():
for p in model.parameters(): p -= p.grad * lr
model.zero_grad()
```
and instead use just:
```
opt.step()
opt.zero_grad()
```
`optim.zero_grad()` resets the gradient to 0 and we need to call it before computing the gradient for the next minibatch.
We’ll define a little function to create our model and optimizer so we can reuse it in the future.
```
from torch import optim
def get_model():
model = Mnist_Logistic()
return model, optim.SGD(model.parameters(), lr=lr)
model, opt = get_model()
print('before training:', loss_func(model(xb), yb))
for epoch in range(epochs):
for i in range( (n-1) // bs + 1 ):
# set_trace()
start_i = i * bs
end_i = start_i + bs
xb = x_train[start_i : end_i]
yb = y_train[start_i : end_i]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
opt.step()
opt.zero_grad()
print('after training:', loss_func(model(xb), yb))
```
## Refactor using Dataset
PyTorch has an abstract `Dataset` class. A Dataset can be anything that has a `__len__` function (called by Python’s standard len function) and a `__getitem__` function as a way of indexing into it.
[This tutorial](https://pytorch.org/tutorials/beginner/data_loading_tutorial.html) walks through a nice example of creating a custom `FacialLandmarkDataset` class as a subclass of `Dataset`.
PyTorch’s `TensorDataset` is a Dataset wrapping tensors. By defining a length and way of indexing, this also gives us a way to iterate, index, and slice along the first dimension of a tensor. This will make it easier to access both the **independent** and **dependent** variables in the same line as we train.
```
from torch.utils.data import TensorDataset
```
Both `x_train` and `y_train` can be combined in a single `TensorDataset`, which will be easier to iterate over and slice.
```
train_ds = TensorDataset(x_train, y_train)
```
Previously, we had to iterate through minibatches of x and y values separately:
```
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
```
Now, we can do these two steps together:
```
xb,yb = train_ds[i*bs : i*bs+bs]
```
```
model, opt = get_model()
for epoch in range(epochs):
for i in range( (n-1) // bs + 1 ):
xb, yb = train_ds[i * bs : i * bs + bs]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
opt.step()
opt.zero_grad()
print(loss_func(model(xb), yb))
```
## Refactor using DataLoader
Pytorch’s `DataLoader` is responsible for managing **batches**.
You can create a `DataLoader` from any Dataset. `DataLoader` makes it easier to **iterate** over batches.
Rather than having to use `train_ds[i*bs : i*bs+bs]`, the DataLoader gives us each **minibatch** automatically.
```
from torch.utils.data import DataLoader
train_ds = TensorDataset(x_train, y_train)
train_dl = DataLoader(train_ds, batch_size=bs)
```
Previously, our loop iterated over batches (xb, yb) like this:
```
for i in range((n-1)//bs + 1):
xb,yb = train_ds[i*bs : i*bs+bs]
pred = model(xb)
```
Now, our loop is much cleaner, as (xb, yb) are loaded automatically from the data loader:
```
for xb,yb in train_dl:
pred = model(xb)
```
```
model, opt = get_model()
for epoch in range(epochs):
for xb, yb in train_dl:
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
opt.step()
opt.zero_grad()
print(loss_func(model(xb), yb))
```
Thanks to Pytorch’s `nn.Module`, `nn.Parameter`, `Dataset`, and `DataLoader`, our training loop is now dramatically smaller and easier to understand.
Let’s now try to add the basic features necessary to create effective models in practice.
## Add validation
In section 1, we were just trying to get a reasonable training loop set up for use on our training data.
In reality, you always should also have a **validation set**, in order to identify if you are overfitting.
**Shuffling** the training data is important to prevent correlation between batches and overfitting.
On the other hand, the validation loss will be identical whether we shuffle the validation set or not. Since shuffling takes extra time, it makes no sense to shuffle the validation data.
We’ll use a batch size for the validation set that is twice as large as that for the training set. This is because the **validation set does not need _backpropagation_** and thus takes less memory (it doesn’t need to store the gradients). We take advantage of this to use a larger batch size and compute the loss more quickly.
```
train_ds = TensorDataset(x_train, y_train)
train_dl = DataLoader(train_ds, batch_size=bs)
valid_ds = TensorDataset(x_valid, y_valid)
valid_dl = DataLoader(valid_ds, batch_size=bs*2)
```
We will calculate and print the **validation loss** at the end of each epoch.
(Note that we always call **`model.train()`** before training, and **`model.eval()`** before inference, because these are used by layers such as `nn.BatchNorm2d` and `nn.Dropout` to ensure appropriate behaviour for these different phases.)
```
model, opt = get_model()
for epoch in range(epochs):
model.train()
for xb, yb in train_dl:
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
opt.step()
opt.zero_grad()
model.eval()
with torch.no_grad():
valid_loss = sum(loss_func(model(xb), yb) for xb, yb in valid_dl)
print(epoch, valid_loss / len(valid_dl))
```
## Create fit() and get_data()
We’ll now do a little refactoring of our own.
Since we go through a similar process twice of calculating the loss for both the training set and the validation set, let’s make that into its own function, `loss_batch`, which computes the loss for one batch.
We pass an optimizer in for the training set, and use it to perform backprop.
For the validation set, we don’t pass an optimizer, so the method doesn’t perform backprop.
```
def loss_batch(model, loss_func, xb, yb, opt=None):
loss = loss_func(model(xb), yb)
if opt is not None:
loss.backward()
opt.step()
opt.zero_grad()
return loss.item(), len(xb)
```
`fit` runs the necessary operations to train our model and compute the training and validation losses for each epoch.
```
import numpy as np
def fit(epoch, model, loss_func, opt, train_dl, valid_dl):
for epoch in range(epochs):
model.train()
for xb, yb in train_dl:
loss_batch(model, loss_func, xb, yb, opt)
model.eval()
with torch.no_grad():
losses, nums = zip(
*[ loss_batch(model, loss_func, xb, yb) for xb, yb in valid_dl ]
)
val_loss = np.sum(np.multiply(losses, nums)) / np.sum(nums)
print(epoch, ':\t', val_loss)
```
`get_data` returns dataloaders for the training and validation sets.
```
def get_data(train_dl, valid_dl, bs):
return (
DataLoader(train_dl, batch_size=bs, shuffle=True),
DataLoader(valid_dl, batch_size=bs*2),
)
```
Now, our whole process of obtaining the data loaders and fitting the model can be run in 3 lines of code:
```
train_dl, valid_dl = get_data(train_ds, valid_ds, bs)
model, opt = get_model()
fit(epochs, model, loss_func, opt, train_dl, valid_dl)
```
You can use these basic 3 lines of code to train a wide variety of models.
Let’s see if we can use them to train a convolutional neural network (CNN)!
## Switch to CNN
We are now going to build our neural network with three convolutional layers.
Because none of the functions in the previous section assume anything about the model form, we’ll be able to use them to train a CNN without any modification.
We will use Pytorch’s predefined `Conv2d` class as our convolutional layer.
We define a CNN with 3 convolutional layers. Each convolution is followed by a ReLU. At the end, we perform an average pooling. (Note that **`view`** is PyTorch’s version of numpy’s reshape)
```
import torch.nn.functional as F
class Mnist_CNN(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1)
self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1)
def forward(self, xb):
xb = xb.view(-1, 1, 28, 28)
xb = F.relu(self.conv1(xb))
xb = F.relu(self.conv2(xb))
xb = F.relu(self.conv3(xb))
xb = F.avg_pool2d(xb, 4)
return xb.view(-1, xb.size(1))
```
`Momentum` is a variation on stochastic gradient descent that takes previous updates into account as well and generally leads to faster training.
```
lr = 0.1
model = Mnist_CNN()
opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
fit(epochs, model, loss_func, opt, train_dl, valid_dl)
```
## nn.Sequential
torch.nn has another handy class we can use to simplify our code: `Sequential`.
A `Sequential` object runs each of the modules contained within it, in a sequential manner. This is a simpler way of writing our neural network.
To take advantage of this, we need to be able to easily define a custom layer from a given function.
For instance, PyTorch doesn’t have a **view** layer, and we need to create one for our network.
The following class `Lambda` will create a layer that we can then use when defining a network with Sequential.
```
class Lambda(nn.Module):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
def preprocess(x):
return x.view(-1, 1, 28, 28)
```
The model created with `Sequential` is simply:
```
model = nn.Sequential(
Lambda(preprocess),
nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.AvgPool2d(4),
Lambda(lambda x: x.view(x.size(0), -1)),
)
opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
fit(epochs, model, loss_func, opt, train_dl, valid_dl)
```
## Wrapping DataLoader
Our CNN is fairly concise, but it only works with MNIST, because:
- It assumes the input is a 28*28 long vector
- It assumes that the final CNN grid size is 4*4 (since that’s the average pooling kernel size we used)
Let’s get rid of these two assumptions, so our model works with any 2d single channel image. First, we can remove the initial Lambda layer by moving the data preprocessing into a generator:
```
def preprocess(x, y):
return x.view(-1, 1, 28, 28), y
class WrappedDataLoader:
def __init__(self, dl, func):
self.dl = dl
self.func = func
def __len__(self):
return len(self.dl)
def __iter__(self):
batches = iter(self.dl)
for b in batches:
yield (self.func(*b))
train_dl, valid_dl = get_data(train_ds, valid_ds, bs)
train_dl = WrappedDataLoader(train_dl, preprocess)
valid_dl = WrappedDataLoader(valid_dl, preprocess)
```
Next, we can replace `nn.AvgPool2d` with `nn.AdaptiveAvgPool2d`, which allows us to define the size of the output tensor we want, rather than the input tensor we have. As a result, our model will work with any size input.
```
model = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.AdaptiveAvgPool2d(1),
Lambda(lambda x: x.view(x.size(0), -1)),
)
opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
fit(epochs, model, loss_func, opt, train_dl, valid_dl)
```
## Using your GPU
If you’re lucky enough to have access to a CUDA-capable GPU (you can rent one for about $0.50/hour from most cloud providers) you can use it to speed up your code. First check that your GPU is working in Pytorch:
```
print(torch.cuda.is_available())
```
And then create a device object for it:
```
dev = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
```
Let’s update preprocess to move batches to the GPU:
```
def preprocess(x, y):
return x.view(-1, 1, 28, 28).to(dev), y.to(dev)
train_dl, valid_dl = get_data(train_ds, valid_ds, bs)
train_dl = WrappedDataLoader(train_dl, preprocess)
valid_dl = WrappedDataLoader(valid_dl, preprocess)
```
Finally, we can move our model to the GPU.
```
model.to(dev)
opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
fit(epochs, model, loss_func, opt, train_dl, valid_dl)
```
## Closing thoughts
We now have a general data pipeline and training loop which you can use for training many types of models using Pytorch. To see how simple training a model can now be, take a look at the mnist_sample sample notebook.
Of course, there are many things you’ll want to add, such as data augmentation, hyperparameter tuning, monitoring training, transfer learning, and so forth. These features are available in the fastai library, which has been developed using the same design approach shown in this tutorial, providing a natural next step for practitioners looking to take their models further.
We promised at the start of this tutorial we’d explain through example each of torch.nn, torch.optim, Dataset, and DataLoader. So let’s summarize what we’ve seen:
- torch.nn
- Module: creates a callable which behaves like a function, but can also contain state(such as neural net layer weights). It knows what Parameter (s) it contains and can zero all their gradients, loop through them for weight updates, etc.
- Parameter: a wrapper for a tensor that tells a Module that it has weights that need updating during backprop. Only tensors with the requires_grad attribute set are updated
- functional: a module(usually imported into the F namespace by convention) which contains activation functions, loss functions, etc, as well as non-stateful versions of layers such as convolutional and linear layers.
- torch.optim: Contains optimizers such as SGD, which update the weights of Parameter during the backward step
- Dataset: An abstract interface of objects with a __len__ and a __getitem__, including classes provided with Pytorch such as TensorDataset
- DataLoader: Takes any Dataset and creates an iterator which returns batches of data.
| github_jupyter |
# Draw an isochrone map with OSMnx
How far can you travel on foot in 15 minutes?
- [Overview of OSMnx](http://geoffboeing.com/2016/11/osmnx-python-street-networks/)
- [GitHub repo](https://github.com/gboeing/osmnx)
- [Examples, demos, tutorials](https://github.com/gboeing/osmnx-examples)
- [Documentation](https://osmnx.readthedocs.io/en/stable/)
- [Journal article/citation](http://geoffboeing.com/publications/osmnx-complex-street-networks/)
```
import geopandas as gpd
import matplotlib.pyplot as plt
import networkx as nx
import osmnx as ox
from descartes import PolygonPatch
from shapely.geometry import Point, LineString, Polygon
ox.config(log_console=True, use_cache=True)
ox.__version__
# configure the place, network type, trip times, and travel speed
place = 'Berkeley, CA, USA'
network_type = 'walk'
trip_times = [5, 10, 15, 20, 25] #in minutes
travel_speed = 4.5 #walking speed in km/hour
```
## Download and prep the street network
```
# download the street network
G = ox.graph_from_place(place, network_type=network_type)
# find the centermost node and then project the graph to UTM
gdf_nodes = ox.graph_to_gdfs(G, edges=False)
x, y = gdf_nodes['geometry'].unary_union.centroid.xy
center_node = ox.get_nearest_node(G, (y[0], x[0]))
G = ox.project_graph(G)
# add an edge attribute for time in minutes required to traverse each edge
meters_per_minute = travel_speed * 1000 / 60 #km per hour to m per minute
for u, v, k, data in G.edges(data=True, keys=True):
data['time'] = data['length'] / meters_per_minute
```
## Plots nodes you can reach on foot within each time
How far can you walk in 5, 10, 15, 20, and 25 minutes from the origin node? We'll use NetworkX to induce a subgraph of G within each distance, based on trip time and travel speed.
```
# get one color for each isochrone
iso_colors = ox.get_colors(n=len(trip_times), cmap='Reds', start=0.3, return_hex=True)
# color the nodes according to isochrone then plot the street network
node_colors = {}
for trip_time, color in zip(sorted(trip_times, reverse=True), iso_colors):
subgraph = nx.ego_graph(G, center_node, radius=trip_time, distance='time')
for node in subgraph.nodes():
node_colors[node] = color
nc = [node_colors[node] if node in node_colors else 'none' for node in G.nodes()]
ns = [20 if node in node_colors else 0 for node in G.nodes()]
fig, ax = ox.plot_graph(G, fig_height=8, node_color=nc, node_size=ns, node_alpha=0.8, node_zorder=2)
```
## Plot the time-distances as isochrones
How far can you walk in 5, 10, 15, 20, and 25 minutes from the origin node? We'll use a convex hull, which isn't perfectly accurate. A concave hull would be better, but shapely doesn't offer that.
```
# make the isochrone polygons
isochrone_polys = []
for trip_time in sorted(trip_times, reverse=True):
subgraph = nx.ego_graph(G, center_node, radius=trip_time, distance='time')
node_points = [Point((data['x'], data['y'])) for node, data in subgraph.nodes(data=True)]
bounding_poly = gpd.GeoSeries(node_points).unary_union.convex_hull
isochrone_polys.append(bounding_poly)
# plot the network then add isochrones as colored descartes polygon patches
fig, ax = ox.plot_graph(G, fig_height=8, show=False, close=False, edge_color='k', edge_alpha=0.2, node_color='none')
for polygon, fc in zip(isochrone_polys, iso_colors):
patch = PolygonPatch(polygon, fc=fc, ec='none', alpha=0.6, zorder=-1)
ax.add_patch(patch)
plt.show()
```
## Or, plot isochrones as buffers to get more faithful isochrones than convex hulls can offer
in the style of http://kuanbutts.com/2017/12/16/osmnx-isochrones/
```
def make_iso_polys(G, edge_buff=25, node_buff=50, infill=False):
isochrone_polys = []
for trip_time in sorted(trip_times, reverse=True):
subgraph = nx.ego_graph(G, center_node, radius=trip_time, distance='time')
node_points = [Point((data['x'], data['y'])) for node, data in subgraph.nodes(data=True)]
nodes_gdf = gpd.GeoDataFrame({'id': subgraph.nodes()}, geometry=node_points)
nodes_gdf = nodes_gdf.set_index('id')
edge_lines = []
for n_fr, n_to in subgraph.edges():
f = nodes_gdf.loc[n_fr].geometry
t = nodes_gdf.loc[n_to].geometry
edge_lines.append(LineString([f,t]))
n = nodes_gdf.buffer(node_buff).geometry
e = gpd.GeoSeries(edge_lines).buffer(edge_buff).geometry
all_gs = list(n) + list(e)
new_iso = gpd.GeoSeries(all_gs).unary_union
# try to fill in surrounded areas so shapes will appear solid and blocks without white space inside them
if infill:
new_iso = Polygon(new_iso.exterior)
isochrone_polys.append(new_iso)
return isochrone_polys
isochrone_polys = make_iso_polys(G, edge_buff=25, node_buff=0, infill=True)
fig, ax = ox.plot_graph(G, fig_height=8, show=False, close=False, edge_color='k', edge_alpha=0.2, node_color='none')
for polygon, fc in zip(isochrone_polys, iso_colors):
patch = PolygonPatch(polygon, fc=fc, ec='none', alpha=0.6, zorder=-1)
ax.add_patch(patch)
plt.show()
```
| github_jupyter |
***
***
# 15. 파이썬 함수
***
***
***
## 1 함수의 정의와 호출
***
- 함수: 여러 개의 Statement들을 하나로 묶은 단위
- 함수 사용의 장점
- 반복적인 수행이 가능하다
- 코드를 논리적으로 이해하는 데 도움을 준다
- 코드의 일정 부분을 별도의 논리적 개념으로 독립화할 수 있음
- 수학에서 복잡한 개념을 하나의 단순한 기호로 대치하는 것과 비슷
### 1-1 간단한 함수의 정의
- 함수 정의시 사용하는 키워드: def
```
def add(a, b):
return a + b
print(add(1, 2))
print()
def myabs(x):
if x < 0 :
x = -x
return x
print(abs(-4))
print(myabs(-4))
```
### 1-2 함수 객체와 함수 호출
- 함수의 이름 자체는 함수 객체의 레퍼런스(Reference)를 지니고 있다.
```
def add(a, b):
return a + b
print(add)
c = add(10, 30)
print(c)
```
- 함수 이름에 저장된 레퍼런스를 다른 변수에 할당하여 그 변수를 이용한 함수 호출 가능
```
f = add
print(f(4, 5))
print(f)
print(f is add)
```
- 함수의 몸체에는 최소한 한개 이상의 statement가 존재해야 함
- 아무런 내용이 없는 몸체를 지닌 함수를 만들 때에는 pass 라는 키워드를 몸체에 적어주어야 함
```
def simpleFunction():
pass
simpleFunction()
```
- 함수 사용 예
```
def addmember(members, newmember):
if newmember not in members: # 기존 멤버가 아니면
members.append(newmember) # 추가
members = ['hwang', 'lee', 'park', 'youn'] # 리스트에 초기 멤버 설정
addmember(members, 'jo') # 새로운 멤버 추가
addmember(members, 'hwang') # (이미 존재하는) 새로운 멤버 추가
print(members)
```
### 1-3 함수 인수값 전달방법
- 파이썬에서의 인수값 전달 방법
- 기본적으로 값에 의한 호출(Call-by-Value)
- 하지만 변수에 저장된 값이 참조값(Reference)이므로 실제로는 참조에 의한 호출(Call-by-Reference)로 실행됨
- 함수 인자에 변경불가능(Immutable) 객체인 숫자값을 전달
- 함수 내에서 다른 숫자값으로 치환 --> 의미 없는 인자 전달
```
def f1(b):
b = 100
a = 200
f1(a)
print(a)
```

- 함수 인자에 변경불가능(Immutable) 객체인 문자열을 전달
- 함수 내에서 다른 문자열로 치환 --> 의미 없는 인자 전달
```
def f2(b):
b = "abc"
a = "def"
f2(a)
print(a)
```

- 함수 인자에 변경불가능(Immutable) 객체인 튜플을 전달
- 함수 내에서 다른 튜플로 치환 --> 의미 없는 인자 전달
```
def f3(b):
b = (1,2,3)
a = (4,5,6)
f3(a)
print(a)
```

- 함수 인자에 변경가능한(Mutable)한 객체인 리스트 전달 및 내용 수정
- 전형적인 함수 인자 전달법 및 활용법
```
def f4(b):
b[1] = 10
a = [4,5,6]
f4(a)
print(a)
```

- 함수 인자에 변경가능한(Mutable)한 객체인 사전 전달 및 내용 수정
- 전형적인 함수 인자 전달법 및 활용법
```
def f5(b):
b['a'] = 10
a = {"a":1, "b":2}
f5(a)
print(a)
```

### 1-4 반환(return)문
- 인수 없이 return 문을 사용하면 실제로는 None 객체가 전달된다.
- None 객체: 파이썬 내장 객체로서 아무 값도 없음을 나타내기 위한 객체
```
def nothing():
return
print(nothing())
```
- return문 없이 리턴하기
```
def print_menu():
print('1. Snack')
print('2. Snake')
print('3. Snick')
print_menu()
```
- return문이 없는 함수라 해도, 실제로는 None 객체가 리턴됨
```
a = print_menu()
print(a)
```
- 한 개의 값을 리턴할 때
```
def abs_function(x):
if x < 0 : return -x
return x
print(abs_function(-10))
```
- 두 개 이상의 값을 리턴할 때
```
def swap(x, y):
return y, x # 튜플로 리턴된다.
a = 10
b = 20
print(a, b)
print()
a, b = swap(a, b) # 결과적으로 a, b = b, a와 동일
print(a, b)
print()
a = 10
b = 20
x = swap(a, b)
print(x[0], x[1]) # 하나의 이름으로 튜플을 받아서 처리할 수 도있다.
```
- 새로운 리스트를 리턴하는 함수의 예
- 문자열 리스트를 받아서 각 문자열의 길이 정보를 지닌 리스트를 리턴
```
def length_list(l):
res = []
for el in l:
res.append(len(el))
return res
l = ['python', 'pyson', 'pythong', 'pydon']
print(length_list(l))
l = ['python', 'pyson', 'pythong', 'pydon']
print([len(s) for s in l])
```
### 1-5 함수 인자에 대한 동적인 자료형 결정
- 파이썬에서는 모든 객체는 동적으로 (실행 시간에) 그 타입이 결정된다.
- 그러므로, 함수 인자는 함수가 호출되는 순간 해당 인자에 전달되는 객체에 따라 그 타입이 결정된다.
- 함수 몸체 내에서 사용되는 여러가지 연산자들은 함수 호출시에 결정된 객체 타입에 맞게 실행된다.
```
def add(a, b):
return a + b
c = add(1, 3.4)
d = add('dynamic', 'typing')
e = add(['list'], ['and', 'list'])
print(c)
print(d)
print(e)
```
***
## 2 함수 인수 처리
***
### 2-1 기본 인수 값
- 기본 인수 값
- 함수를 호출할 때 인수를 넘겨주지 않아도 인수가 기본적으로 가지는 값
```
def incr(a, step=1):
return a + step
b = 1
b = incr(b) # 1 증가
print(b)
b = incr(b, 10) # 10 증가
print(b)
```
- [주의] 함수 정의를 할 때 기본 값을 지닌 인수 뒤에 일반적인 인수가 올 수 없음
```
def incr(step=1, a):
return a + step
```
- 함수 정의 시에 여러 개의 기본 인수 값 정의 가능
```
def incr(a, step=1, step2=10):
return a + step + step2
print(incr(10))
```
### 2-2 키워드 인수
- 키워드 인수
- 인수 값 전달 시에 인수 이름과 함께 값을 전달하는 방식을 일컫는다.
```
def area(height, width):
return height * width
#순서가 아닌 이름으로 값이 전달
a = area(height='height string ', width=3)
print(a)
b = area(width=20, height=10)
print(b)
```
- 함수를 호출 할 때에 키워드 인수는 마지막에 놓여져야 한다.
```
print(area(20, width=5))
```
- [주의] 함수 호출시에 키워드 인수 뒤에 일반 인수 값이 올 수 없다.
```
area(width=5, 20)
# 기존: SyntaxError: non-keyword arg after keyword arg
```
- 기본 인수값 및 키워드 인수의 혼용
```
def incr(a, step=1, step2=10, step3=100):
return a + step + step2 + step3
print(incr(10, 2, step2=100))
```
- 함수 호출 시에 키워드 인수 뒤에 일반 인수 값이 오면 에러
```
def incr(a, step=1, step2=10, step3=100):
return a + step + step2 + step3
print(incr(10, 2, step2=100, 200))
# 기존: SyntaxError: non-keyword arg after keyword arg
def incr(a, step=1, step2=10, step3=100):
return a + step + step2 + step3
print(incr(10, 2, step2=100, step3=200))
```
### 2-3 가변 인수 리스트
- 함수 정의시에 일반적인 인수 선언 뒤에 <code>*args</code> 형식의 인수로 가변 인수를 선언할 수 있음
- The special syntax <code>*args</code> in function definitions in python is used to pass a variable number of arguments to a function.
```
def myFun(*args):
print(type(args), args)
print()
for arg in args:
print(arg)
myFun('Hello', 'Welcome', 'to', 'GeeksforGeeks')
```
- 함수 호출시 넣어주는 인수 값들 중 일반 인수에 할당되는 값을 제외한 나머지 값들만을 지닌 튜플 객체가 할당된다.
```
def varg(a, *args):
print(a, args)
varg(1)
varg(2, 3)
varg(2, 3, 4, 5, 6)
```
- C언어의 printf문과 유사한 형태의 printf 정의 방법
```
def printf(format, *args):
print(format % args)
printf("I've spent %d days and %d night to do this", 6, 5)
```
- The special syntax ****kwargs** in function definitions in python is used to pass a keyworded, variable-length argument list.
```
def myFun(**kwargs):
print(type(kwargs), kwargs)
print()
for key, value in kwargs.items():
print("{0}: {1}".format(key, value))
myFun(first ='Geeks', mid ='for', last='Geeks')
def myFun(arg1, **kwargs):
for key, value in kwargs.items():
print ("{0} - {1}: {2}".format(arg1, key, value))
myFun("Hi", first ='Geeks', mid ='for', last='Geeks')
```
### 2-5 튜플 인수와 사전 인수로 함수 호출하기
- 함수 호출에 사용될 인수값들이 튜플에 있다면 "<code>*튜플변수</code>"를 이용하여 함수 호출이 가능
```
def h(a, b, c):
print(a, b, c)
args = (1, 2, 3)
h(*args)
```
- 함수 호출에 사용될 인수값들이 사전에 있다면 "<code>**사전변수</code>"를 이용하여 함수 호출이 가능
```
def h(a, b, c):
print(a, b, c)
dargs = {'aa':1, 'bb':2, 'cc':3}
h(*dargs)
dargs = {'a':1, 'b':2, 'c':3}
h(**dargs)
def myFun(arg1, arg2, arg3):
print("arg1:", arg1)
print("arg2:", arg2)
print("arg3:", arg3)
args = ("Geeks", "for", "Geeks")
myFun(*args)
kwargs = {"arg1" : "Geeks", "arg2" : "for", "arg3" : "Geeks"}
myFun(**kwargs)
def function(**arg):
for i in arg:
print (i, arg[i])
function(a=1, b=2, c=3, d=4)
```
- Asterisks in Python: what they are and how to use them
- https://treyhunner.com/2018/10/asterisks-in-python-what-they-are-and-how-to-use-them/
```
a, *b = [1, 2, 3, 4]
print(a)
print(b)
def func(a, *b):
print(a, b)
func(1, 2, 3, 4)
numbers = [2, 1, 3, 4, 7]
more_numbers = [*numbers, 11, 18]
print(more_numbers)
print(*more_numbers)
fruits = ['lemon', 'pear', 'watermelon', 'tomato']
print(fruits[0], fruits[1], fruits[2], fruits[3])
print(*fruits)
def transpose_list(list_of_lists):
return [list(row) for row in zip(*list_of_lists)]
t_l = transpose_list([[1, 4, 7], [2, 5, 8], [3, 6, 9]])
print(t_l)
date_info = {'year': "2020", 'month': "01", 'day': "01"}
filename = "{year}-{month}-{day}.txt".format(**date_info)
print(filename)
fruits = ['lemon', 'pear', 'watermelon', 'tomato']
numbers = [2, 1, 3, 4, 7]
print(*numbers, *fruits)
date_info = {'year': "2020", 'month': "01", 'day': "01"}
track_info = {'artist': "Beethoven", 'title': 'Symphony No 5'}
filename = "{year}-{month}-{day}-{artist}-{title}.txt".format(
**date_info,
**track_info,
)
print(filename)
from random import randint
def roll(*dice):
print(type(dice), dice)
return sum([randint(1, die) for die in dice])
print(roll(20))
print(roll(6, 6))
print(roll(6, 6, 6))
def tag(tag_name, **attributes):
attribute_list = ["{0}='{1}'".format(name, value) for name, value in attributes.items()]
return "<{0} ".format(tag_name) + ' '.join(attribute_list) + ">"
print(tag('a', href="http://treyhunner.com"))
print(tag('img', height=20, width=40, src="face.jpg"))
```
<p style='text-align: right;'>참고 문헌: 파이썬(열혈강의)(개정판 VER.2), 이강성, FreeLec, 2005년 8월 29일</p>
| github_jupyter |
```
# Imports
import pandas as pd
import numpy as np
# machine learning
from sklearn import svm
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
# xgboost
import xgboost as xgb
# matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
# 自定義的function
# 算 accuracy, precision, recall
def performance(clf, X_train, Y_train, cv_num = 4):
scores = cross_val_score(clf, X_train, Y_train, cv=cv_num , scoring='precision')
print "precision is {}".format(scores.mean())
scores = cross_val_score(clf, X_train, Y_train, cv=cv_num , scoring='recall')
print "recall is {}".format(scores.mean())
scores = cross_val_score(clf, X_train, Y_train, cv=cv_num , scoring='accuracy')
print "accuracy is {}".format(scores.mean())
# get titanic & test csv files as a DataFrame
train = pd.read_csv("/Users/wy/notebook/kaggle_competitions/titanic/train.csv")
test = pd.read_csv("/Users/wy/notebook/kaggle_competitions/titanic/test.csv")
test_passengerId = test['PassengerId']
train.info()
print "--------------"
test.info()
#Combine into data:
train['source']= 'train'
test['source'] = 'test'
data=pd.concat([train, test],ignore_index=True)
data.shape
# 稍微看一下 data長怎樣
data.head()
```
##Check missing:
```
data.apply(lambda x: sum(x.isnull()))
```
##Look at categories of all object variables:
```
var = ['Sex','Ticket','Cabin','Embarked']
for v in var:
print '\nFrequency count for variable %s'%v
print data[v].value_counts()
```
##Missing values on Embarked
```
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
ax = data.boxplot(column='Fare', by=['Embarked','Pclass'], ax=ax)
plt.axhline(y=80, color='green')
ax.set_title('', y=1.1)
data[data.Embarked.isnull()][['Fare', 'Pclass', 'Embarked']]
```
####From the above boxplot, we should replace NA with C because most people who had Pclass 1 and Fare 80 would be Embarked C
```
data['Embarked'].fillna('C', inplace=True)
```
##Missing values on Fare
```
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
data[(data.Pclass==3)&(data.Embarked=='S')].Fare.hist(bins=100, ax=ax)
data[data.Fare.isnull()][['Pclass', 'Fare', 'Embarked']]
plt.xlabel('Fare')
plt.ylabel('Frequency')
plt.title('Histogram of Fare, Plcass 3 and Embarked S')
data[data.Fare.isnull()][['Pclass', 'Fare', 'Embarked']]
print ("The top 5 most common value of Fare")
data[(data.Pclass==3)&(data.Embarked=='S')].Fare.value_counts().head()
data['Fare'].fillna(8.05, inplace=True)
```
###Replace the missing value of Cabin with U0
```
data['Cabin_Missing'] = data['Cabin'].apply(lambda x: 1 if pd.isnull(x) else 0)
data['Cabin'].fillna('U0', inplace=True)
```
##Feature Engineering
###Create a feature, Names, to store the length of words in name.
```
import re
data['Names'] = data['Name'].map(lambda x: len(re.split(' ', x)))
```
###Create a feature, Title.
```
title = data['Name'].map(lambda x: re.compile(', (.*?)\.').findall(x)[0])
title[title=='Mme'] = 'Mrs'
title[title.isin(['Ms','Mlle'])] = 'Miss'
title[title.isin(['Don', 'Jonkheer'])] = 'Sir'
title[title.isin(['Dona', 'Lady', 'the Countess'])] = 'Lady'
title[title.isin(['Capt', 'Col', 'Major', 'Dr', 'Officer', 'Rev'])] = 'Officer'
data['Title'] = title
del title
```
###Create a feature, Deck. It may represents the socioeconomic status.
```
deck = data['Cabin'].map( lambda x : re.compile("([a-zA-Z]+)").search(x).group())
data['Deck'] = deck
del deck
data.head()
```
###Create a feature, Room. It may represents the geo lacation.
```
checker = re.compile("([0-9]+)")
def roomNum(x):
nums = checker.search(x)
if nums:
return int(nums.group())+1
else:
return 1
rooms = data['Cabin'].map(lambda x: roomNum(x))
data['Cabin_Room'] = rooms / rooms.sum()
del checker, roomNum
data.head()
```
###Create a feature, Group_num. It may represents the size of family.
```
data['Group_num'] = data['Parch'] + data['SibSp'] + 1
```
###Create a feature, Group_size. When the size is between 2 and 4, more people are survived.
```
def groupSize(x):
if x > 4 :
return 'L'
elif x == 1 :
return 'S'
else:
return 'M'
group_size = data['Group_num'].map(lambda x: groupSize(x))
data['Group_size'] = group_size
data.head()
data.dtypes
```
###Normalized the fare.
```
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
data['Nor_Fare'] = pd.Series(scaler.fit_transform(data['Fare'].values.reshape(-1,1)).reshape(-1), index=data.index)
```
###Numerical Coding:
```
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
var_to_encode = ['Embarked','Sex','Deck','Group_size','Title']
for col in var_to_encode:
data[col] = le.fit_transform(data[col])
```
###One-Hot Coding
```
data = pd.get_dummies(data, columns=var_to_encode)
data.columns
```
###Predict Age
```
label_y = data[data['source'] == 'train']['Survived']
from sklearn.model_selection import train_test_split
data.drop(labels=['PassengerId', 'Name', 'Cabin', 'Survived', 'Ticket', 'Fare'], axis=1, inplace=True)
X = data[data['Age'].notnull()].drop(['Age','source'], axis=1)
y = data[data['Age'].notnull()].Age
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import make_scorer
from sklearn.model_selection import GridSearchCV
def get_model(estimator, parameters, X_train, y_train, scoring):
model = GridSearchCV(estimator, param_grid=parameters, scoring=scoring)
model.fit(X_train, y_train)
return model.best_estimator_
import xgboost as xgb
XGB = xgb.XGBRegressor(max_depth=4, seed= 42)
scoring = make_scorer(mean_absolute_error, greater_is_better=False)
parameters = {'reg_alpha':np.linspace(0.1,1.0,5), 'reg_lambda': np.linspace(1.0,3.0,5)}
reg_xgb = get_model(XGB, parameters, X_train, y_train, scoring)
print (reg_xgb)
print ("Mean absolute error of test data: {}".format(mean_absolute_error(y_test, reg_xgb.predict(X_test))))
fig = plt.figure(figsize=(15, 6))
alpha = 0.5
data['Age'].value_counts().plot(kind='density', color='#FA2379', label='Before', alpha=alpha)
pred = reg_xgb.predict(data[data['Age'].isnull()].drop(['Age','source'], axis=1))
data.set_value(data['Age'].isnull(), 'Age', pred)
data['Age'].value_counts().plot(kind='density', label='After', alpha=alpha)
plt.xlabel('Age')
plt.title("What's the distribution of Age after predicting?" )
plt.legend(loc='best')
plt.grid()
```
###Separate train & test:
```
# label_y
train = data.loc[data['source']=='train']
test = data.loc[data['source']=='test']
train.drop('source',axis=1,inplace=True)
test.drop('source',axis=1,inplace=True)
```
##Build Model
```
def modelfit(alg, train, label_y,useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
if useTrainCV:
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(train, label=label_y)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
metrics='auc', early_stopping_rounds=early_stopping_rounds)
alg.set_params(n_estimators=cvresult.shape[0])
#Fit the algorithm on the data
alg.fit(train, label_y,eval_metric='auc')
#Predict training set:
dtrain_predictions = alg.predict(train)
dtrain_predprob = alg.predict_proba(train)[:,1]
#Print model report:
print "\nModel Report"
print "Accuracy : %.4g" % metrics.accuracy_score(label_y, dtrain_predictions)
print "AUC Score (Train): %f" % metrics.roc_auc_score(label_y, dtrain_predprob)
feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
from xgboost.sklearn import XGBClassifier
from sklearn import cross_validation, metrics
xgb1 = XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=4,
# scale_pos_weight=1,
seed=27)
modelfit(xgb1, train, label_y)
#Grid seach on subsample and max_features
#Choose all predictors except target & IDcols
param_test1 = {
'max_depth':range(3,10,2),
'min_child_weight':range(1,6,2)
}
gsearch1 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=140, max_depth=5,
min_child_weight=1, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, seed=27),
param_grid = param_test1, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch1.fit(train,label_y)
gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_
#Grid seach on subsample and max_features
#Choose all predictors except target & IDcols
param_test2 = {
'max_depth':[8,9,10,11,12],
'min_child_weight':[4,5,6]
}
gsearch2 = GridSearchCV(estimator = XGBClassifier( learning_rate=0.1, n_estimators=140, max_depth=5,
min_child_weight=2, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4,seed=27),
param_grid = param_test2, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch2.fit(train,label_y)
gsearch2.grid_scores_, gsearch2.best_params_, gsearch2.best_score_
#Grid seach on subsample and max_features
#Choose all predictors except target & IDcols
param_test3 = {
'gamma':[i/10.0 for i in range(0,15)]
}
gsearch3 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=140, max_depth=10,
min_child_weight=5, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4,seed=27),
param_grid = param_test3, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch3.fit(train,label_y)
gsearch3.grid_scores_, gsearch3.best_params_, gsearch3.best_score_
xgb2 = XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=10,
min_child_weight=5,
gamma=0.9,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=4,
seed=27)
modelfit(xgb2, train, label_y)
#Grid seach on subsample and max_features
#Choose all predictors except target & IDcols
param_test4 = {
'subsample':[i/10.0 for i in range(6,10)],
'colsample_bytree':[i/10.0 for i in range(6,10)]
}
gsearch4 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test4, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch4.fit(train,label_y)
gsearch4.grid_scores_, gsearch4.best_params_, gsearch4.best_score_
#Grid seach on subsample and max_features
#Choose all predictors except target & IDcols
param_test5 = {
'reg_alpha':[1e-5, 1e-2, 0.1, 1, 100]
}
gsearch5 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
min_child_weight=6, gamma=0.1, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test5, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch5.fit(train,label_y)
gsearch5.grid_scores_, gsearch5.best_params_, gsearch5.best_score_
#Grid seach on subsample and max_features
#Choose all predictors except target & IDcols
param_test6 = {
'reg_alpha':[0, 0.01, 0.05, 0.1, 0.16, 0.19]
}
gsearch6 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
min_child_weight=6, gamma=0.1, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test6, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch6.fit(train,label_y)
gsearch6.grid_scores_, gsearch6.best_params_, gsearch6.best_score_
xgb3 = XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=10,
min_child_weight=5,
gamma=0.9,
subsample=0.7,
colsample_bytree=0.7,
reg_alpha=0.1,
objective= 'binary:logistic',
nthread=4,
seed=27)
modelfit(xgb3, train, label_y)
xgb4 = XGBClassifier(
learning_rate =0.01,
n_estimators=5000,
max_depth=10,
min_child_weight=5,
gamma=0.9,
subsample=0.7,
colsample_bytree=0.7,
reg_alpha=0.1,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
modelfit(xgb4, train, label_y)
```
##Make submission
```
test_predict = xgb4.predict(test)
submission = pd.DataFrame({
"PassengerId": test_passengerId,
"Survived": test_predict
})
submission['Survived'] = submission['Survived'].astype('int64')
submission.to_csv('/Users/wy/Desktop/titanic_xgboost2.csv', index=False)
```
| github_jupyter |
# Kmer frequency Bacillus
Generate code to embed Bacillus sequences by calculating kmer frequency
import to note that this requires biopython version 1.77. Alphabet was deprecated in 1.78 (September 2020). Alternatively we could not reduce the alphabet though the kmer frequency table is sparse so could be a computational nightmare.
Using the murphy10 reduced alphabet. There are other amino acid reduced alphabets could be tried as well https://biopython.org/docs/1.75/api/Bio.Alphabet.Reduced.html
Sequences containing 'X' have been excluded. The murphy10 alphabet has been used with options to try alphabets with a different number of amino acids. Sequeces longer than 1024 amino acids are also excluded.
```
#imports
import numpy as np
import pandas as pd
from Bio.Seq import Seq
from Bio import Alphabet
from Bio.Alphabet import Reduced
import itertools
from Bio import SeqIO
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
from matplotlib import pyplot as plt
from collections import Counter
import seaborn as sns
from sklearn.utils.extmath import randomized_svd
from scipy.spatial import distance
import random
def seq_3mers(sequence):
"""Takes a sequence to overlapping 3-mers"""
seq_size = len(sequence)
seq_3mers = list() #intialise list
#iterate through sequence to obtain 3mers
for i in range (1,seq_size-1):
seq_3mers.append(sequence[i-1]+sequence[i]+sequence[i+1])
return seq_3mers
def murphy10(seq_str):
"""Takes an amino acid sequence using the standard 20 amino acid code to reduced 10 letter alphabet.
This funcseqs_swiss_keystion requires biopython version 1.77 or lower. Input is a a string of amino acids"""
#turn starting sequence into a sequence object
intial_seq = Seq(seq_str, Alphabet.ProteinAlphabet())
#intialise sequence object
new_seq = Seq('', Alphabet.Reduced.Murphy10())
#iterate through the letters in the sequence and convert to murphy10
for aa in intial_seq:
new_seq += Alphabet.Reduced.murphy_10_tab[aa]
return str(new_seq)
def seq_vector(seq, embedding):
"""Embeds a sequence as a kmer frequency embedding"""
#break the seq into kmers
seq_kmers = seq_3mers(seq)
#intialise a vector for the sequence to be embedded
seq_vec = np.zeros(len(embedding))
#iterate through the kmers in the sequence
for kmer in seq_kmers:
#add the kmer vector to make the sequence vector
seq_vec += embedding[kmer]
#divide the sequence by the number of kmers (number of kmer counts) (NOT SURE IF THIS IS CORRECT - PLAY AROUND WITH this)
seq_vec = seq_vec/len(seq_kmers)
return seq_vec
def embedkmers_seqs(seqs, embedding):
"""Embed a list of sequences with a dataframe of kmer frequency"""
#intialise an array to hold the embeddings
embed_kmerfreq = np.zeros((len(seqs), len(embedding)))
#iterate through the sequences
for i in range(len(seqs)):
#get the sequence
seq = seqs[i]
#get the vector
seq_vec= seq_vector(seq, embedding)
#add the sequnce vector to the embeddings matrix
embed_kmerfreq[i] = seq_vec
return embed_kmerfreq
#import the embedding sequences
embed_seqs_dict = SeqIO.index("../../sequences/bacillus_embeddingset.fa", 'fasta')
embed_seqs_keys = list(embed_seqs_dict.keys()) #gives md5 hashes of the sequences
embed_seqs = [str(embed_seqs_dict.get(key).seq) for key in embed_seqs_keys]
#get a random subset of 16763 sequences to embed (this was the number of sequences embedded for bacteroides)
randint = random.sample(range(len(embed_seqs)), 16763)
embed_seqs_keys = embed_seqs_keys
#determine which sequences contain the invalid character 'X' and remove them from the set of sequences to embed
embed_seqs_containsX = ['X' not in seqs for seqs in embed_seqs]
keys_containsX = [embed_seqs_keys[i] for i in range(len(embed_seqs_keys)) if embed_seqs_containsX[i] == True]
embed_seqs = [str(embed_seqs_dict.get(key).seq) for key in keys_containsX]
embed_seqs_keys = keys_containsX
#remove sequences which contain more than 1024 amino acids
embed_seqs_1024 = [len(seqs)<= 1024 for seqs in embed_seqs]
keys_1024 = [embed_seqs_keys[i] for i in range(len(embed_seqs)) if embed_seqs_1024[i] == True]
embed_seqs = [str(embed_seqs_dict.get(key).seq) for key in keys_1024]
embed_seqs_keys = keys_1024
#generate a list of all possible kmeres for the murphy10 alphabet
murphy10_sub = Alphabet.Reduced.murphy_10_tab
murphy10_l = set([d[1] for d in list(murphy10_sub.items())]) #list of letters in the murphy10 alphabet
k = 3 #intialise the length of the kmer
kmers = [''.join(kmer) for kmer in list(itertools.product(murphy10_l, repeat = k))]
#intialise idnetity matrix size of kmers to represent the kmer embedding (each 1 is denotes a different kmer)
kmerfreq = np.identity(len(kmers))
#represent as a dataframe
kmerfreq_df = pd.DataFrame(kmerfreq)
kmerfreq_df.columns = kmers
kmerfreq_df.index = kmers
#convert the embedded sequences to murphy 10
embed_seqs_murphy10 = [murphy10(seq) for seq in embed_seqs]
#embed the sequences
embed_kmerfreq = embedkmers_seqs(embed_seqs_murphy10, kmerfreq_df)
#read in the ontology info and filter out the sequences we choose to ignore
#need to read in some file which will map the sequences to the known hierachical classification (KEGG, Subsystems)
sub_sys = pd.read_csv('../../subsystems_labels/bacillus.ids.tsv', sep = '\t', header = None)
sub_sys.columns = ['fig_ID', 'species', 'superclass', 'class', 'subclass', 'subsystem', 'product role_name']
#get the file which takes the md5 hashes to the fig IDs
md5_fig = pd.read_csv('../../subsystems_labels/bacillus.md5.ids', sep = '\t', header = None)
md5_fig.columns = ['md5', 'fig_ID']
#assemble as a dictionary which takes a seqence key to the ontological represenation
seqs_keys_figID = pd.concat([md5_fig[md5_fig['md5'] == key] for key in embed_seqs_keys])
#convert the embedded sequences to murphy 10
embed_seqs_murphy10 = [murphy10(seq) for seq in embed_seqs]
embed_kmerfreq = embedkmers_seqs(embed_seqs_murphy10, kmerfreq_df)
#make dictionaries for subclass/superclass but this time include the entry 'dual'
fig2subclass = sub_sys[['fig_ID', 'subclass']].drop_duplicates()
fig2subsystem = sub_sys[['fig_ID', 'subsystem']].drop_duplicates()
#change fig_IDs which have more than one subclass to 'dual'
duplicate_subclasses = pd.DataFrame(fig2subclass['fig_ID'].value_counts())
duplicate_subclasses = duplicate_subclasses[duplicate_subclasses['fig_ID'] > 1].index.values
duplicate_removed_subclasses = fig2subclass[~fig2subclass['fig_ID'].isin(duplicate_subclasses)]
dual_vec = ['dual' for i in range(0,len(duplicate_subclasses))]
dual_subclasses = pd.DataFrame({'fig_ID': duplicate_subclasses, 'subclass': dual_vec})
fig2subclass = pd.concat([duplicate_removed_subclasses, dual_subclasses], axis = 0)
#change fig_IDs which have more than one subsystem to 'dual'
duplicate_subsystems = pd.DataFrame(fig2subsystem['fig_ID'].value_counts())
duplicate_subsystems = duplicate_subsystems[duplicate_subsystems['fig_ID'] > 1].index.values
duplicate_removed_subsystems = fig2subsystem[~fig2subsystem['fig_ID'].isin(duplicate_subsystems)]
dual_vec = ['dual' for i in range(0,len(duplicate_subsystems))]
dual_subsystems = pd.DataFrame({'fig_ID': duplicate_subsystems, 'subsystem': dual_vec})
fig2subsystem = pd.concat([duplicate_removed_subsystems, dual_subsystems], axis = 0)
#make these dataframes into dictionaries
subclass_dict = dict(zip(fig2subclass['fig_ID'].values, fig2subclass['subclass'].values))
subsystem_dict = dict(zip(fig2subsystem['fig_ID'].values, fig2subsystem['subsystem'].values))
#add columns to dataframes for the subsystem and subclasses
seqs_keys_figID['Subsystem'] = [subsystem_dict.get(fig_id) for fig_id in seqs_keys_figID['fig_ID']]
seqs_keys_figID['Subclass'] = [subclass_dict.get(fig_id) for fig_id in seqs_keys_figID['fig_ID']]
#collapse by subclass and subsystem
seqs_subclass = seqs_keys_figID[['md5', 'Subclass']].drop_duplicates()
seqs_subsystem = seqs_keys_figID[['md5', 'Subsystem']].drop_duplicates()
seqs_subsystem = seqs_keys_figID[['md5', 'Subsystem']].drop_duplicates()
seqs_subsystem_count = Counter(seqs_subsystem['md5'].values)
count_df = pd.DataFrame.from_dict(seqs_subsystem_count, orient='index').reset_index()
duplicates = count_df[count_df[0]>1] #this gives the 2 sequences with duplicates
duplicates_md5 = duplicates['index'].values
duplicates_df = seqs_subsystem[seqs_subsystem['md5'].isin(duplicates_md5)]
duplicates_idx = duplicates_df[duplicates_df['Subsystem'] != 'dual'].index.values
seqs_subsystem = seqs_subsystem.drop(duplicates_idx)
seqs_subclass = seqs_subclass.replace('dual', 'CO2 fixation and C-1 compound metabolism')
cmap = ('#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a' ) #colour map for the plots
#save the embedding - useful to save as can take a long time to run
embed_kmerfreqDf = pd.DataFrame(embed_kmerfreq, index = embed_seqs_keys)
embed_kmerfreqDf.columns = kmers
embed_kmerfreqDf.to_csv('kmer_frequency_bacillus.csv')
#do the PCA
embedding_scaled = StandardScaler().fit_transform(embed_kmerfreq)
pca = PCA()
embedding_pca = pca.fit_transform(embedding_scaled)
#do the scree plot - see how the PCA went
per_var = np.round(pca.explained_variance_ratio_* 100, decimals=1)
labels = ['PC' + str(x) for x in range(1, len(per_var)+1)]
plt.bar(x=range(1,len(per_var)+1), height=per_var, tick_label=labels)
plt.ylabel('Percentage of Explained Variance')
plt.xlabel('Principal Component')
plt.title('Scree Plot')
plt.show()
#plot the PCA
labels = ['PC' + str(x) for x in range(1, len(per_var)+1)]
pca_df = pd.DataFrame(embedding_pca, columns=labels)
pca_df.index = embed_seqs_keys
pca_df['Subclass'] = seqs_subclass['Subclass'].values
#sort the pca df by the subclass labels - allows to keep colours consistent between models
pca_df = pca_df.sort_values('Subclass')
#get the labels for the plot
x_label = 'PC1 ('+str(np.round(per_var[0],2))+"%)"
y_label = 'PC2 ('+str(np.round(per_var[1],2))+"%)"
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(1, 1, figsize = (12,8))
ax.set_facecolor('white')
plt.xlabel(x_label, fontsize = 18)
plt.ylabel(y_label, fontsize = 18)
sns.scatterplot(x = 'PC1', y = 'PC2', hue = 'Subclass',data = pca_df, legend = 'full', s = 8,linewidth=0, alpha = 0.7, palette = cmap)
ax.spines['left'].set_color('black')
ax.spines['bottom'].set_color('black')
plt.legend([],[], frameon=False)
#do the tSNE
tsne = TSNE(perplexity = 50, learning_rate = 100)
embedding_tsne = tsne.fit_transform(embedding_scaled)
#plot it scatter plot
tsne_df = pd.DataFrame(embedding_tsne, columns = ['Dimension 1', 'Dimension 2'])
tsne_df.index = embed_seqs_keys
#colour by subclass
tsne_df['Subclass'] = seqs_subclass['Subclass'].values
#sort so that the colouring is consistent
tsne_df = tsne_df.sort_values('Subclass')
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(1, 1, figsize = (12,8))
ax.set_facecolor('white')
plt.xlabel('Dimension 1', fontsize = 18)
plt.ylabel('Dimension 2', fontsize = 18)
sns.scatterplot(x = 'Dimension 1', y = 'Dimension 2', hue = 'Subclass',data = tsne_df, s = 8,linewidth=0, alpha = 0.7, palette = cmap)
ax.spines['left'].set_color('black')
ax.spines['bottom'].set_color('black')
plt.legend([],[], frameon=False)
tsne_df = pd.DataFrame(embedding_tsne, columns = ['Dimension 1', 'Dimension 2'])
#colour by subclass
tsne_df['Subclass'] = seqs_subclass['Subclass'].values
tsne_df.index = embed_seqs_keys
#sort so that the colouring is consistent
tsne_df = tsne_df.sort_values('Subclass')
sns.set(font_scale = 1.5)
fig, ax = plt.subplots(1, 1, figsize = (12,8))
ax.set_facecolor('white')
plt.xlabel('Dimension 1', fontsize = 18)
plt.ylabel('Dimension 2', fontsize = 18)
sns.scatterplot(x = 'Dimension 1', y = 'Dimension 2', hue = 'Subclass',data = tsne_df, s = 8,linewidth=0, alpha = 0.7, palette = cmap)
ax.spines['left'].set_color('black')
ax.spines['bottom'].set_color('black')
plt.legend([],[], frameon=False)
```
| github_jupyter |
# Raven annotations
Raven Sound Analysis Software enables users to inspect spectrograms, draw time and frequency boxes around sounds of interest, and label these boxes with species identities. OpenSoundscape contains functionality to prepare and use these annotations for machine learning.
## Download annotated data
We published an example Raven-annotated dataset here: https://doi.org/10.1002/ecy.3329
```
from opensoundscape.commands import run_command
from pathlib import Path
```
Download the zipped data here:
```
link = "https://esajournals.onlinelibrary.wiley.com/action/downloadSupplement?doi=10.1002%2Fecy.3329&file=ecy3329-sup-0001-DataS1.zip"
name = 'powdermill_data.zip'
out = run_command(f"wget -O powdermill_data.zip {link}")
```
Unzip the files to a new directory, `powdermill_data/`
```
out = run_command("unzip powdermill_data.zip -d powdermill_data")
```
Keep track of the files we have now so we can delete them later.
```
files_to_delete = [Path("powdermill_data"), Path("powdermill_data.zip")]
```
## Preprocess Raven data
The `opensoundscape.raven` module contains preprocessing functions for Raven data, including:
* `annotation_check` - for all the selections files, make sure they all contain labels
* `lowercase_annotations` - lowercase all of the annotations
* `generate_class_corrections` - create a CSV to see whether there are any weird names
* Modify the CSV as needed. If you need to look up files you can use `query_annotations`
* Can be used in `SplitterDataset`
* `apply_class_corrections` - replace incorrect labels with correct labels
* `query_annotations` - look for files that contain a particular species or a typo
```
import pandas as pd
import opensoundscape.raven as raven
import opensoundscape.audio as audio
raven_files_raw = Path("./powdermill_data/Annotation_Files/")
```
### Check Raven files have labels
Check that all selections files contain labels under one column name. In this dataset the labels column is named `"species"`.
```
raven.annotation_check(directory=raven_files_raw, col='species')
```
### Create lowercase files
Convert all the text in the files to lowercase to standardize them. Save these to a new directory. They will be saved with the same filename but with ".lower" appended.
```
raven_directory = Path('./powdermill_data/Annotation_Files_Standardized')
if not raven_directory.exists(): raven_directory.mkdir()
raven.lowercase_annotations(directory=raven_files_raw, out_dir=raven_directory)
```
Check that the outputs are saved as expected.
```
list(raven_directory.glob("*.lower"))[:5]
```
### Generate class corrections
This function generates a table that can be modified by hand to correct labels with typos in them. It identifies the unique labels in the provided column (here `"species"`) in all of the lowercase files in the directory `raven_directory`.
For instance, the generated table could be something like the following:
```
raw,corrected
sparrow,sparrow
sparow,sparow
goose,goose
```
```
print(raven.generate_class_corrections(directory=raven_directory, col='species'))
```
The released dataset has no need for class corrections, but if it did, we could save the return text to a CSV and use the CSV to apply corrections to future dataframes.
### Query annotations
This function can be used to print all annotations of a particular class, e.g. "amro" (American Robin)
```
output = raven.query_annotations(directory=raven_directory, cls='amro', col='species', print_out=True)
```
## Split Raven annotations and audio files
The Raven module's `raven_audio_split_and_save` function enables splitting of both audio data and associated annotations. It requires that the annotation and audio filenames are unique, and that corresponding annotation and audiofilenames are named the same filenames as each other.
```
audio_directory = Path('./powdermill_data/Recordings/')
destination = Path('./powdermill_data/Split_Recordings')
out = raven.raven_audio_split_and_save(
# Where to look for Raven files
raven_directory = raven_directory,
# Where to look for audio files
audio_directory = audio_directory,
# The destination to save clips and the labels CSV to
destination = destination,
# The column name of the labels
col = 'species',
# Desired audio sample rate
sample_rate = 22050,
# Desired duration of clips
clip_duration = 5,
# Verbose (uncomment the next line to see progress--this cell takes a while to run)
#verbose=True,
)
```
The results of the splitting are saved in the destination folder under the name `labels.csv`.
```
labels = pd.read_csv(destination.joinpath("labels.csv"), index_col='filename')
labels.head()
```
The `raven_audio_split_and_save` function contains several options. Notable options are:
* `clip_duration`: the length of the clips
* `clip_overlap`: the overlap, in seconds, between clips
* `final_clip`: what to do with the final clip if it is not exactly `clip_duration` in length (see API docs for more details)
* `labeled_clips_only`: whether to only save labeled clips
* `min_label_length`: minimum length, in seconds, of an annotation for a clip to be considered labeled. For instance, if an annotation only overlaps 0.1s with a 5s clip, you might want to exclude it with `min_label_length=0.2`.
* `species`: a subset of species to search for labels of (by default, finds all species labels in dataset)
* `dry_run`: if `True`, produces print statements and returns dataframe of labels, but does not save files.
* `verbose`: if `True`, prints more information, e.g. clip-by-clip progress.
For instance, let's extract labels for one species, American Redstart (AMRE) only saving clips that contain at least 0.5s of label for that species. The "verbose" flag causes the function to print progress splitting each clip.
```
btnw_split_dir = Path('./powdermill_data/btnw_recordings')
out = raven.raven_audio_split_and_save(
raven_directory = raven_directory,
audio_directory = audio_directory,
destination = btnw_split_dir,
col = 'species',
sample_rate = 22050,
clip_duration = 5,
clip_overlap = 0,
verbose=True,
species='amre',
labeled_clips_only=True,
min_label_len=1
)
```
The labels CSV only has a column for the species of interest:
```
btnw_labels = pd.read_csv(btnw_split_dir.joinpath("labels.csv"), index_col='filename')
btnw_labels.head()
```
The split files and associated labels csv can now be used to train machine learning models (see additional tutorials).
The command below cleans up after the tutorial is done -- only run it if you want to delete all of the files.
```
from shutil import rmtree
for file in files_to_delete:
if file.is_dir():
rmtree(file)
else:
file.unlink()
```
| github_jupyter |
# CER043 - Install signed Master certificates
This notebook installs into the Big Data Cluster the certificates signed
using:
- [CER033 - Sign Master certificates with generated
CA](../cert-management/cer033-sign-master-generated-certs.ipynb)
## Steps
### Parameters
```
app_name = "master"
scaledset_name = "master"
container_name = "mssql-server"
common_name = "master-svc"
user = "mssql"
group = "mssql"
mode = "550"
prefix_keyfile_name = "sql"
certificate_names = {"master-0" : "master-0-certificate.pem", "master-1" : "master-1-certificate.pem", "master-2" : "master-2-certificate.pem"}
key_names = {"master-0" : "master-0-privatekey.pem", "master-1" : "master-1-privatekey.pem", "master-2" : "master-2-privatekey.pem"}
test_cert_store_root = "/var/opt/secrets/test-certificates"
timeout = 600 # amount of time to wait before cluster is healthy: default to 10 minutes
check_interval = 10 # amount of time between health checks - default 10 seconds
min_pod_count = 10 # minimum number of healthy pods required to assert health
```
### Common functions
Define helper functions used in this notebook.
```
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportability, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
# Display an install HINT, so the user can click on a SOP to install the missing binary
#
if which_binary == None:
print(f"The path used to search for '{cmd_actual[0]}' was:")
print(sys.path)
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
# Hints for tool retry (on transient fault), known errors and install guide
#
retry_hints = {'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use', 'Login timeout expired (0) (SQLDriverConnect)', 'SSPI Provider: No Kerberos credentials available', ], 'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond', ], 'python': [ ], }
error_hints = {'azdata': [['Please run \'azdata login\' to first authenticate', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Can\'t open lib \'ODBC Driver 17 for SQL Server', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb'], ['NameError: name \'azdata_login_secret_name\' is not defined', 'SOP013 - Create secret for azdata login (inside cluster)', '../common/sop013-create-secret-for-azdata-login.ipynb'], ['ERROR: No credentials were supplied, or the credentials were unavailable or inaccessible.', 'TSG124 - \'No credentials were supplied\' error from azdata login', '../repair/tsg124-no-credentials-were-supplied.ipynb'], ['Please accept the license terms to use this product through', 'TSG126 - azdata fails with \'accept the license terms to use this product\'', '../repair/tsg126-accept-license-terms.ipynb'], ], 'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb'], ], 'python': [['Library not loaded: /usr/local/opt/unixodbc', 'SOP012 - Install unixodbc for Mac', '../install/sop012-brew-install-odbc-for-sql-server.ipynb'], ['WARNING: You are using pip version', 'SOP040 - Upgrade pip in ADS Python sandbox', '../install/sop040-upgrade-pip.ipynb'], ], }
install_hint = {'azdata': [ 'SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb' ], 'kubectl': [ 'SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb' ], }
print('Common functions defined successfully.')
```
### Get the Kubernetes namespace for the big data cluster
Get the namespace of the Big Data Cluster use the kubectl command line
interface .
**NOTE:**
If there is more than one Big Data Cluster in the target Kubernetes
cluster, then either:
- set \[0\] to the correct value for the big data cluster.
- set the environment variable AZDATA_NAMESPACE, before starting Azure
Data Studio.
```
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True)
except:
from IPython.display import Markdown
print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.")
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}')
```
### Create a temporary directory to stage files
```
# Create a temporary directory to hold configuration files
import tempfile
temp_dir = tempfile.mkdtemp()
print(f"Temporary directory created: {temp_dir}")
```
### Helper function to save configuration files to disk
```
# Define helper function 'save_file' to save configuration files to the temporary directory created above
import os
import io
def save_file(filename, contents):
with io.open(os.path.join(temp_dir, filename), "w", encoding='utf8', newline='\n') as text_file:
text_file.write(contents)
print("File saved: " + os.path.join(temp_dir, filename))
print("Function `save_file` defined successfully.")
```
### Instantiate Kubernetes client
```
# Instantiate the Python Kubernetes client into 'api' variable
import os
from IPython.display import Markdown
try:
from kubernetes import client, config
from kubernetes.stream import stream
except ImportError:
# Install the Kubernetes module
import sys
!{sys.executable} -m pip install kubernetes
try:
from kubernetes import client, config
from kubernetes.stream import stream
except ImportError:
display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.'))
raise
if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ:
config.load_incluster_config()
else:
try:
config.load_kube_config()
except:
display(Markdown(f'HINT: Use [TSG118 - Configure Kubernetes config](../repair/tsg118-configure-kube-config.ipynb) to resolve this issue.'))
raise
api = client.CoreV1Api()
print('Kubernetes client instantiated')
```
### Helper functions for waiting for the cluster to become healthy
```
import threading
import time
import sys
import os
from IPython.display import Markdown
isRunning = True
def all_containers_ready(pod):
"""helper method returns true if all the containers within the given pod are ready
Arguments:
pod {v1Pod} -- Metadata retrieved from the api call to.
"""
return all(map(lambda c: c.ready is True, pod.status.container_statuses))
def pod_is_ready(pod):
"""tests that the pod, and all containers are ready
Arguments:
pod {v1Pod} -- Metadata retrieved from api call.
"""
return "job-name" in pod.metadata.labels or (pod.status.phase == "Running" and all_containers_ready(pod))
def waitReady():
"""Waits for all pods, and containers to become ready.
"""
while isRunning:
try:
time.sleep(check_interval)
pods = get_pods()
allReady = len(pods.items) >= min_pod_count and all(map(pod_is_ready, pods.items))
if allReady:
return True
else:
display(Markdown(get_pod_failures(pods)))
display(Markdown(f"cluster not healthy, rechecking in {check_interval} seconds."))
except Exception as ex:
last_error_message = str(ex)
display(Markdown(last_error_message))
time.sleep(check_interval)
def get_pod_failures(pods=None):
"""Returns a status message for any pods that are not ready.
"""
results = ""
if not pods:
pods = get_pods()
for pod in pods.items:
if "job-name" not in pod.metadata.labels:
if pod.status and pod.status.container_statuses:
for container in filter(lambda c: c.ready is False, pod.status.container_statuses):
results = results + "Container {0} in Pod {1} is not ready. Reported status: {2} <br/>".format(container.name, pod.metadata.name, container.state)
else:
results = results + "Pod {0} is not ready. <br/>".format(pod.metadata.name)
return results
def get_pods():
"""Returns a list of pods by namespace, or all namespaces if no namespace is specified
"""
pods = None
if namespace is not None:
display(Markdown(f'Checking namespace {namespace}'))
pods = api.list_namespaced_pod(namespace, _request_timeout=30)
else:
display(Markdown('Checking all namespaces'))
pods = api.list_pod_for_all_namespaces(_request_timeout=30)
return pods
def wait_for_cluster_healthy():
isRunning = True
mt = threading.Thread(target=waitReady)
mt.start()
mt.join(timeout=timeout)
if mt.is_alive():
raise SystemExit("Timeout waiting for all cluster to be healthy.")
isRunning = False
```
### Get name of the ‘Running’ `controller` `pod`
```
# Place the name of the 'Running' controller pod in variable `controller`
controller = run(f'kubectl get pod --selector=app=controller -n {namespace} -o jsonpath={{.items[0].metadata.name}} --field-selector=status.phase=Running', return_output=True)
print(f"Controller pod name: {controller}")
```
### Get the name of the `master` `pods`
```
# Place the name of the master pods in variable `pods`
podNames = run(f'kubectl get pod --selector=app=master -n {namespace} -o jsonpath={{.items[*].metadata.name}}', return_output=True)
pods = podNames.split(" ")
print(f"Master pod names: {pods}")
```
### Validate certificate common name and alt names
```
import json
from urllib.parse import urlparse
kubernetes_default_record_name = 'kubernetes.default'
kubernetes_default_svc_prefix = 'kubernetes.default.svc'
default_dns_suffix = 'svc.cluster.local'
dns_suffix = ''
nslookup_output=run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "nslookup {kubernetes_default_record_name} > /tmp/nslookup.out; cat /tmp/nslookup.out; rm /tmp/nslookup.out" ', return_output=True)
name = re.findall('Name:\s+(.[^,|^\s|^\n]+)', nslookup_output)
if not name or kubernetes_default_svc_prefix not in name[0]:
dns_suffix = default_dns_suffix
else:
dns_suffix = 'svc' + name[0].replace(kubernetes_default_svc_prefix, '')
pods.sort()
for pod_name in pods:
alt_names = ""
bdc_fqdn = ""
alt_names += f"DNS.1 = {common_name}\n"
alt_names += f"DNS.2 = {common_name}.{namespace}.{dns_suffix} \n"
hdfs_vault_svc = "hdfsvault-svc"
bdc_config = run("azdata bdc config show", return_output=True)
bdc_config = json.loads(bdc_config)
dns_counter = 3 # DNS.1 and DNS.2 are already in the certificate template
# Stateful set related DNS names
#
if app_name == "gateway" or app_name == "master":
alt_names += f'DNS.{str(dns_counter)} = {pod_name}.{common_name}\n'
dns_counter = dns_counter + 1
alt_names += f'DNS.{str(dns_counter)} = {pod_name}.{common_name}.{namespace}.{dns_suffix}\n'
dns_counter = dns_counter + 1
# AD related DNS names
#
if "security" in bdc_config["spec"] and "activeDirectory" in bdc_config["spec"]["security"]:
domain_dns_name = bdc_config["spec"]["security"]["activeDirectory"]["domainDnsName"]
subdomain_name = bdc_config["spec"]["security"]["activeDirectory"]["subdomain"]
if subdomain_name:
bdc_fqdn = f"{subdomain_name}.{domain_dns_name}"
else:
bdc_fqdn = f"{namespace}.{domain_dns_name}"
alt_names += f"DNS.{str(dns_counter)} = {common_name}.{bdc_fqdn}\n"
dns_counter = dns_counter + 1
if app_name == "gateway" or app_name == "master":
alt_names += f'DNS.{str(dns_counter)} = {pod_name}.{bdc_fqdn}\n'
dns_counter = dns_counter + 1
# Endpoint DNS names for bdc certificates
#
if app_name in bdc_config["spec"]["resources"]:
app_name_endpoints = bdc_config["spec"]["resources"][app_name]["spec"]["endpoints"]
for endpoint in app_name_endpoints:
if "dnsName" in endpoint:
alt_names += f'DNS.{str(dns_counter)} = {endpoint["dnsName"]}\n'
dns_counter = dns_counter + 1
# Endpoint DNS names for control plane certificates
#
if app_name == "controller" or app_name == "mgmtproxy":
bdc_endpoint_list = run("azdata bdc endpoint list", return_output=True)
bdc_endpoint_list = json.loads(bdc_endpoint_list)
# Parse the DNS host name from:
#
# "endpoint": "https://monitor.aris.local:30777"
#
for endpoint in bdc_endpoint_list:
if endpoint["name"] == app_name:
url = urlparse(endpoint["endpoint"])
alt_names += f"DNS.{str(dns_counter)} = {url.hostname}\n"
dns_counter = dns_counter + 1
# Special case for the controller certificate
#
if app_name == "controller":
alt_names += f"DNS.{str(dns_counter)} = localhost\n"
dns_counter = dns_counter + 1
# Add hdfsvault-svc host for key management calls.
#
alt_names += f"DNS.{str(dns_counter)} = {hdfs_vault_svc}\n"
dns_counter = dns_counter + 1
# Add hdfsvault-svc FQDN for key management calls.
#
if bdc_fqdn:
alt_names += f"DNS.{str(dns_counter)} = {hdfs_vault_svc}.{bdc_fqdn}\n"
dns_counter = dns_counter + 1
required_dns_names = re.findall('DNS\.[0-9] = ([^,|^\s|^\n]+)', alt_names)
# Get certificate common name and DNS names
# use nameopt compat, to generate CN= format on all versions of openssl
#
cert = run(f'kubectl exec {controller} -c controller -n {namespace} -- openssl x509 -nameopt compat -in {test_cert_store_root}/{app_name}/{certificate_names[pod_name]} -text -noout', return_output=True)
subject = re.findall('Subject:(.+)', cert)[0]
certficate_common_name = re.findall('CN=(.[^,|^\s|^\n]+)', subject)[0]
certficate_dns_names = re.findall('DNS:(.[^,|^\s|^\n]+)', cert)
# Validate the common name
#
if (common_name != certficate_common_name):
run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "rm -rf {test_cert_store_root}/{app_name}"')
raise SystemExit(f'Certficate common name does not match the expected one: {common_name}')
# Validate the DNS names
#
if not all(dns_name in certficate_dns_names for dns_name in required_dns_names):
run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "rm -rf {test_cert_store_root}/{app_name}"')
raise SystemExit(f'Certficate does not have all required DNS names: {required_dns_names}')
```
### Copy certifcate files from `controller` to local machine
```
import os
cwd = os.getcwd()
os.chdir(temp_dir) # Use chdir to workaround kubectl bug on Windows, which incorrectly processes 'c:\' on kubectl cp cmd line
for pod_name in pods:
run(f'kubectl cp {controller}:{test_cert_store_root}/{app_name}/{certificate_names[pod_name]} {certificate_names[pod_name]} -c controller -n {namespace}')
run(f'kubectl cp {controller}:{test_cert_store_root}/{app_name}/{key_names[pod_name]} {key_names[pod_name]} -c controller -n {namespace}')
os.chdir(cwd)
```
### Copy certifcate files from local machine to `controldb`
```
import os
cwd = os.getcwd()
os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line
for pod_name in pods:
run(f'kubectl cp {certificate_names[pod_name]} controldb-0:/var/opt/mssql/{certificate_names[pod_name]} -c mssql-server -n {namespace}')
run(f'kubectl cp {key_names[pod_name]} controldb-0:/var/opt/mssql/{key_names[pod_name]} -c mssql-server -n {namespace}')
os.chdir(cwd)
```
### Get the `controller-db-rw-secret` secret
Get the controller SQL symmetric key password for decryption.
```
import base64
controller_db_rw_secret = run(f'kubectl get secret/controller-db-rw-secret -n {namespace} -o jsonpath={{.data.encryptionPassword}}', return_output=True)
controller_db_rw_secret = base64.b64decode(controller_db_rw_secret).decode('utf-8')
print("controller_db_rw_secret retrieved")
```
### Update the files table with the certificates through opened SQL connection
```
import os
sql = f"""
OPEN SYMMETRIC KEY ControllerDbSymmetricKey DECRYPTION BY PASSWORD = '{controller_db_rw_secret}'
DECLARE @FileData VARBINARY(MAX), @Key uniqueidentifier;
SELECT @Key = KEY_GUID('ControllerDbSymmetricKey');
"""
for pod_name in pods:
insert = f"""
SELECT TOP 1 @FileData = doc.BulkColumn FROM OPENROWSET(BULK N'/var/opt/mssql/{certificate_names[pod_name]}', SINGLE_BLOB) AS doc;
EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/{scaledset_name}/pods/{pod_name}/containers/{container_name}/files/{prefix_keyfile_name}-certificate.pem',
@Data = @FileData,
@KeyGuid = @Key,
@Version = '0',
@User = '{user}',
@Group = '{group}',
@Mode = '{mode}';
SELECT TOP 1 @FileData = doc.BulkColumn FROM OPENROWSET(BULK N'/var/opt/mssql/{key_names[pod_name]}', SINGLE_BLOB) AS doc;
EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/{scaledset_name}/pods/{pod_name}/containers/{container_name}/files/{prefix_keyfile_name}-privatekey.pem',
@Data = @FileData,
@KeyGuid = @Key,
@Version = '0',
@User = '{user}',
@Group = '{group}',
@Mode = '{mode}';
"""
sql += insert
save_file("insert_certificates.sql", sql)
cwd = os.getcwd()
os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line
run(f'kubectl cp insert_certificates.sql controldb-0:/var/opt/mssql/insert_certificates.sql -c mssql-server -n {namespace}')
run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "SQLCMDPASSWORD=`cat /var/run/secrets/credentials/mssql-sa-password/password` /opt/mssql-tools/bin/sqlcmd -b -U sa -d controller -i /var/opt/mssql/insert_certificates.sql" """)
# Clean up
run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/insert_certificates.sql" """)
for pod_name in pods:
run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/{certificate_names[pod_name]}" """)
run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/{key_names[pod_name]}" """)
os.chdir(cwd)
```
### Clear out the controller_db_rw_secret variable
```
controller_db_rw_secret= ""
```
### Get the name of the `master` `pods`
```
# Place the name of the master pods in variable `pods`
podNames = run(f'kubectl get pod --selector=app=master -n {namespace} -o jsonpath={{.items[*].metadata.name}}', return_output=True)
pods = podNames.split(" ")
print(f"Master pod names: {pods}")
```
### Restart Pods
```
import threading
import time
if len(pods) == 1:
# One master pod indicates non-HA environment, just delete it
run(f'kubectl delete pod {pods[0]} -n {namespace}')
wait_for_cluster_healthy()
else:
# HA setup, delete secondaries before primary
timeout_s = 300
check_interval_s = 20
master_primary_svc_ip = run(f'kubectl get service master-p-svc -n {namespace} -o jsonpath={{.spec.clusterIP}}', return_output=True)
master_password = run(f'kubectl exec master-0 -c mssql-server -n {namespace} -- cat /var/run/secrets/credentials/pool/mssql-system-password', return_output=True)
def get_number_of_unsynchronized_replicas(result):
cmd = 'select count(*) from sys.dm_hadr_database_replica_states where synchronization_state <> 2'
res = run(f"kubectl exec controldb-0 -c mssql-server -n {namespace} -- /opt/mssql-tools/bin/sqlcmd -S {master_primary_svc_ip} -U system -P {master_password} -h -1 -q \"SET NOCOUNT ON; {cmd}\" ", return_output=True)
rows = res.strip().split("\n")
result[0] = int(rows[0])
return True
def get_primary_replica():
cmd = 'select distinct replica_server_name from sys.dm_hadr_database_replica_states s join sys.availability_replicas r on s.replica_id = r.replica_id where is_primary_replica = 1'
res = run(f"kubectl exec controldb-0 -c mssql-server -n {namespace} -- /opt/mssql-tools/bin/sqlcmd -S {master_primary_svc_ip} -U system -P {master_password} -h -1 -q \"SET NOCOUNT ON; {cmd}\" ", return_output=True)
rows = res.strip().split("\n")
return rows[0]
def get_secondary_replicas():
cmd = 'select distinct replica_server_name from sys.dm_hadr_database_replica_states s join sys.availability_replicas r on s.replica_id = r.replica_id where is_primary_replica = 0'
res = run(f"kubectl exec controldb-0 -c mssql-server -n {namespace} -- /opt/mssql-tools/bin/sqlcmd -S {master_primary_svc_ip} -U system -P {master_password} -h -1 -q \"SET NOCOUNT ON; {cmd}\" ", return_output=True)
rows = res.strip().split("\n")
res = []
for row in rows:
if (row != "" and "Sqlcmd: Warning" not in row):
res.append(row.strip())
return res
def all_replicas_syncrhonized():
while True:
unsynchronized_replicas_cnt = len(pods)
rows = [None]
time.sleep(check_interval_s)
getNumberOfReplicasThread = threading.Thread(target=get_number_of_unsynchronized_replicas, args=(rows,) )
getNumberOfReplicasThread.start()
getNumberOfReplicasThread.join(timeout=timeout_s)
if getNumberOfReplicasThread.is_alive():
raise SystemExit("Timeout getting the number of unsynchronized replicas.")
unsynchronized_replicas_cnt = rows[0]
if (unsynchronized_replicas_cnt == 0):
return True
def wait_for_replicas_to_synchronize():
waitForReplicasToSynchronizeThread = threading.Thread(target=all_replicas_syncrhonized)
waitForReplicasToSynchronizeThread.start()
waitForReplicasToSynchronizeThread.join(timeout=timeout_s)
if waitForReplicasToSynchronizeThread.is_alive():
raise SystemExit("Timeout waiting for all replicas to be synchronized.")
secondary_replicas = get_secondary_replicas()
for replica in secondary_replicas:
wait_for_replicas_to_synchronize()
run(f'kubectl delete pod {replica} -n {namespace}')
primary_replica = get_primary_replica()
wait_for_replicas_to_synchronize()
key = "/var/run/secrets/certificates/sqlha/mssql-ha-operator-controller-client/mssql-ha-operator-controller-client-privatekey.pem"
cert = "/var/run/secrets/certificates/sqlha/mssql-ha-operator-controller-client/mssql-ha-operator-controller-client-certificate.pem"
content_type_header = "Content-Type: application/json"
authorization_header = "Authorization: Certificate"
data = f'{{"TargetReplicaName":"{secondary_replicas[0]}","ForceFailover":"false"}}'
request_url = f'https://controller-svc:443/internal/api/v1/bdc/services/sql/resources/master/availabilitygroups/containedag/failover'
manual_failover_api_command = f"curl -sS --key {key} --cert {cert} -X POST --header '{content_type_header}' --header '{authorization_header}' --data '{data}' {request_url}"
operator_pod = run(f'kubectl get pod --selector=app=mssql-operator -n {namespace} -o jsonpath={{.items[0].metadata.name}}', return_output=True)
run(f'kubectl exec {operator_pod} -c mssql-ha-operator -n {namespace} -- {manual_failover_api_command}')
wait_for_replicas_to_synchronize()
run(f'kubectl delete pod {primary_replica} -n {namespace}')
wait_for_replicas_to_synchronize()
```
### Clean up certificate staging area
Remove the certificate files generated on disk (they have now been
placed in the controller database).
```
cmd = f"rm -r {test_cert_store_root}/{app_name}"
run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "{cmd}"')
```
### Clean up temporary directory for staging configuration files
```
# Delete the temporary directory used to hold configuration files
import shutil
shutil.rmtree(temp_dir)
print(f'Temporary directory deleted: {temp_dir}')
print("Notebook execution is complete.")
```
Related
-------
- [CER023 - Create Master certificates](../cert-management/cer023-create-master-certs.ipynb)
- [CER033 - Sign Master certificates with generated CA](../cert-management/cer033-sign-master-generated-certs.ipynb)
- [CER044 - Install signed Controller certificate](../cert-management/cer044-install-controller-cert.ipynb)
| github_jupyter |
# <span style="color:orange"> Exercise 12.2 </span>
## <span style="color:green"> Task </span>
Change the architecture of your DNN using convolutional layers. Use `Conv2D`, `MaxPooling2D`, `Dropout`, but also do not forget `Flatten`, a standard `Dense` layer and `soft-max` in the end. I have merged step 2 and 3 in the following definition of `create_CNN()` that **<span style="color:red">you should complete</span>**<br><br>
The cell below shows some information about the dataset, such as the size and a set of 10 images to reveal the appearance of the data.
```
import tensorflow as tf
tf.get_logger().setLevel('INFO')
from keras.datasets import mnist
from matplotlib import pyplot as plt
from numba import jit, njit
# load dataset
(trainX, trainy), (testX, testy) = mnist.load_data()
# summarize loaded dataset
print('Train: X=%s, y=%s' % (trainX.shape, trainy.shape))
print('Test: X=%s, y=%s' % (testX.shape, testy.shape))
# plot first few images
for i in range(9):
# define subplot
plt.subplot(330 + 1 + i)
# plot raw pixel data
plt.imshow(trainX[i], cmap=plt.get_cmap('gray'))
# show the figure
plt.show()
```
### Preparing Data
The following functions respectively load the MNIST dataset and scale the pixels of the images. In fact, the images are made up of pixels whose values are integers describing a color between black and white, or an integer from 0 to 255. Therefore, the second function normalizes the values of the pixels to the range [0,1] after having converted them to floats.
```
from numpy import mean
from numpy import std
from matplotlib import pyplot
from sklearn.model_selection import KFold
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.optimizers import SGD
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainY), (testX, testY) = mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
print(len(trainX),"-",len(trainY),"-",len(testX),"-",len(testY))
return trainX, trainY, testX, testY
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
```
### Defining model
The model that I used to define the convolutional neural network starts with a Convolutional Layer with a moderate amount of filters (32) and a filter size of (3,3). The layer is followed by a Max Pooling Layer, while the filter map is then flattened using a Flatten layer.
The output layer should consist of 10 nodes because the images must be classified across 10 different classes, while the same layer should follow the softmax activation function. The softmax activation function $\sigma : {\rm I\!R}^K \longrightarrow [0,1]^K$ is defined by: $$ \sigma(\vec{z})_i = \frac{e^{z_i}}{\sum_{j=1}^{K}e^{z_j}}, $$
used to normalize the output of a neural network to a probability distribution. Between the convolutional layers and the output layer, I introduced a dense layer with 100 neurons and a ReLU activation function. <br>
The model uses a Stochastic Descent Gradient (SDG) optimizer with a learning rate of 0.01 and a momentum of 0.9, a categorical cross-entropy loss function and will monitor the classification accuracy metric.
```
# define CNN model
def define_model(summarize:bool=False,save:bool=False):
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(learning_rate=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
if summarize:
model.summary()
if save:
from pathlib import Path
dir = "./models/"
Path(dir).mkdir(parents=True, exist_ok=True)
print("[MODEL] Saved model to directory: "+dir)
model.save(dir)
return model
```
#### Using a 5-fold validation
<img src="../media/images/kfold-validation.png" alt="Five-fold cross validation" width="800" height="600">
<br>The model will be evaluated using a five-fold cross validation. In general, a k-fold cross validation is used to effectively split a small part of the full dataset for testing purposes, while the rest is used for training purposes. The K-fold cross validation splits the dataset into K partitions of equal size, so that K-1 can be used for training while the last one can be used for testing. Finally, the procedure is repeated K times, with a different partition for each iteration. In the case of a five-fold cross validation, the dataset is split into 80/20, each time with a different portion used for testing. <br>
The model is trained with a standard batch size of 32 examples and with 10 epochs. The latter is lower than usual, primarily because the five-fold validation process requires five times for computational power than manual validation, while the number is still valid for accurate results.
```
# evaluate a model using k-fold cross-validation
def evaluate_model(dataX, dataY, n_folds=5, model=None):
scores, histories = list(), list()
# prepare cross validation
kfold = KFold(n_folds, shuffle=True, random_state=1)
# enumerate splits
iter = 0
for train_ix, test_ix in kfold.split(dataX):
print("[EVAL] Running iteration {}".format(iter))
# define model
if model is None:
if iter<n_folds-1:
model = define_model(save=False)
else:
model = define_model(save=True)
else:
model = model
# select rows for train and test
trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]
# fit model
history = model.fit(trainX, trainY, epochs=10, batch_size=32, validation_data=(testX, testY), verbose=0)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
# stores scores
scores.append(acc)
histories.append(history)
iter += 1
return scores, histories
```
### Validation
The evaluation method defined in the previous cell returns a list of accuracy scores and training histories used to summarize and validate the model. The diagnostics function aims to plot the model performance for training and testing during each fold of the five-fold cross-validation, for both the loss and the accuracy of the model. The performance is then summarized by calculated the mean and standard deviation of the accuracy scores.
```
# plot diagnostic learning curves ---> training
def summarize_diagnostics(histories):
fig, ax = plt.subplots(2,1, gridspec_kw={'hspace': 0.6})
for i in range(len(histories)):
ax[0].set_title('Cross Entropy Loss')
ax[0].plot(histories[i].history['loss'], color='blue', label='train')
ax[0].plot(histories[i].history['val_loss'], color='orange', label='test')
ax[0].set_xlabel("Epoch")
ax[0].set_xlabel("Loss")
# plot accuracy
ax[1].set_title('Classification Accuracy')
ax[1].plot(histories[i].history['accuracy'], color='blue', label='train')
ax[1].plot(histories[i].history['val_accuracy'], color='orange', label='test')
ax[1].set_xlabel("Epoch")
ax[1].set_ylabel("Accuracy")
pyplot.show()
# summarize model performance ---> testing
def summarize_performance(scores):
# print summary
print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))
# box and whisker plots of results
plt.boxplot(scores)
plt.ylabel("Accuracy")
plt.show()
# run the test harness for evaluating a model
def run(model=None):
# load dataset
print("\r[RUN] Loading dataset..")
trainX, trainY, testX, testY = load_dataset()
# prepare pixel data
print("\r[RUN] Preparing pixels")
trainX, testX = prep_pixels(trainX, testX)
# evaluate model
print("\r[RUN] Creating and evaluating the model")
scores, histories = evaluate_model(trainX, trainY, model=model)
# learning curves
print("\r[RUN] Learning curves")
summarize_diagnostics(histories)
# summarize estimated performance
print("\r[RUN] Evaluating performance")
summarize_performance(scores)
run()
```
## <span style="color:green"> Results </span>
When running the CNN, each fold of the cross-validation produces an excellent accuracy score, which means that the network is classifying the images very well. The plots show how the model is learning during each fold, where both <span style="color:blue"> training </span> and <span style="color:orange"> testing </span> curves converge, meaning that the data is not being over-fitted or under-fitted.<br>
To summarize the model performance, the model has an estimated accuracy of 98.7\%, which is a good result. This is also shown in a box and whisker plot, which exposes quantities like the maximum value, the minimum value, the median, the first and third quartile of the data.<br>
## <span style="color:green"> How to improve the model </span>
The following section explores how the CNN model could be improved to correctly predict and classify images.
### Learning rate
Probably, the most impactful variable is the learning rate of the Stochastic Descent Gradient, which is the hyperparameter that defines the rate at which the weights of the model are updated during training. It is a difficult task to find a good balance for its value, since a value that is too large can cause the model to converge too quickly to an inaccurate solution, while a value that is too low can cause the model to get stuck or take very long times to converge.
### Batch Normalization
Batch normaization is a process to standardize the inputs to a layer in a Neural Network. This can accelerate the learning pace of a model and sometimes improve its accuracy. The Batch Normalization layer is typically introduced after convolutional and fully-connected layers, to standardize inputs so that the mean is zero and the standard deviation is one. Finally, the standardized outputs can be eventually scaled to extract the new mean and standard deviation for the final output.<br>
For this specific CNN model, I've decided to introduce the Batch Normalization layer after the activation function, between the convolutional and the max pooling layers:<br>
```
from keras.layers import BatchNormalization
# define CNN model
def define_model(summarize:bool=False,save:bool=False):
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(learning_rate =0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
if summarize:
model.summary()
if save:
from pathlib import Path
dir = "./models/"
Path(dir).mkdir(parents=True, exist_ok=True)
print("[MODEL] Saved model to directory: "+dir)
model.save(dir)
return model
run()
```
| github_jupyter |
```
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import os
import pickle
import timeit
# numpy settings
import numpy as np
np.random.seed(42) # to make this notebook's output stable across runs
# pandas settings
import pandas as pd
pd.set_option('display.max_columns', None)
# matplotlib settings
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Datasets
DATASET_PATH = os.path.join(".", "datasets")
# Figures
IMAGE_PATH = os.path.join(".", "images")
if not os.path.isdir(IMAGE_PATH):
os.makedirs(IMAGE_PATH)
def save_fig(fig_id, tight_layout=True):
path = os.path.join(IMAGE_PATH, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
# Models
MODEL_PATH = os.path.join(".", "models")
if not os.path.isdir(MODEL_PATH):
os.makedirs(MODEL_PATH)
# Features
FEATURE_PATH = os.path.join(".", "features")
if not os.path.isdir(FEATURE_PATH):
os.makedirs(FEATURE_PATH)
def save_features(feature_score, model_name):
path = os.path.join(FEATURE_PATH, model_name + ".csv")
with open(path, 'w') as f:
for idx in range(len(feature_score)):
f.write("{0},{1}\n".format(feature_score[idx][0], feature_score[idx][1]))
# Color for print
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# Print a line
def print_lines(num, length):
for i in range(num):
print('-'*length)
```
***
## Data Preprocessing
This section preprocesses the raw data.
```
# Import packages
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_selection import SelectKBest
# Load data
with open(os.path.join(DATASET_PATH, 'new_theorem_data.p'), 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
loan_data = u.load()
```
Labels are extracted and removed from the dataset.
```
# Lables processing
loan_labels = loan_data['EnumListingStatus']
loan_labels.replace(to_replace=7, value=0, inplace=True) # cancelled loans
loan_labels.replace(to_replace=6, value=1, inplace=True) # originated loans
# loan_labels.shape
# Correct data type
bool_list = ['BoolIsLender']
loan_data[bool_list] = loan_data[bool_list].astype(bool)
cat_list = ['EnumListingCategory', 'EnumLoanFractionalType']
for cat in cat_list:
loan_data[cat] = loan_data[cat].astype('object')
```
The feature selection needs careful treatment.
Some features are identified as unnecessary either manually or by through model-based approach and are removed from the features.
Speficically, features with low scores (given by the classfier) are removed.
```
# Drop unnecessary features
feature_drop_list = ['EnumListingStatus',
'ListingID',
'DateWholeLoanEnd',
'NumPublicRecords12',
'NumOpenTradesDelinqOrPastDue6',
'BoolInGroup',
'BoolOwnsHome',
'BoolIsLender',
'BoolPartialFundingApproved',
'BoolEverWholeLoan',
'BoolIsFractionalLoan',
'NumTradesCurr30DPDOrDerog6',
'NumTradesDelinqOrPastDue6',
'EnumLoanFractionalType',
]
loan_data.drop(feature_drop_list, axis=1, inplace=True)
prosper_list = ['NumPriorProsperLoans61dpd',
'NumPriorProsperLoans31dpd',
'NumPriorProsperLoansEarliestPayOff',
'NumPriorProsperLoansOnTimePayments',
'DolPriorProsperLoansPrincipalBorrowed',
'DolPriorProsperLoansPrincipalOutstanding',
'DolPriorProsperLoansBalanceOutstanding',
'NumPriorProsperLoansLatePayments',
'NumPriorProsperLoansCyclesBilled',
'NumPriorProsperLoansLateCycles',
'DolMaxPriorProsperLoan',
'DolMinPriorProsperLoan']
```
The following code defines a class that returns a sub-dataframe that consists of specified data types from a dataframe.
```
# Data selection
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, dtypes):
self.dtypes = dtypes
def fit(self, X, y=None):
return self
def transform(self, X):
return X.select_dtypes(self.dtypes).copy()
# Test
# DataFrameSelector(["int64"]).transform(loan_data).head()
```
The following code handles numerical features. `NAN` values are filled with 0 expcet for `FracDebtToIncomeRatio` and `ProsperScore`, which are filled with `mean` values.
I tried to add some new features such as `IncomePaymentRatio` and `IncomeAmountRatio`. However, it turns out these new features do not contribute to the classification accuracy and thus are not adopted.
In fact, when time permits, it is recommended to generate various combinations of new features using both heuristics and pure techniques such as `PolynomialFeatures()` from `sklearn`. A good understanding of the loan market will definitely help.
```
# Numerical Feature Handling
class NumericalTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Replace NA values
for col in ['FracDebtToIncomeRatio', 'ProsperScore']:
X[col].fillna(X[col].mean(), inplace=True)
X.fillna(0, inplace=True)
# Drop some features
# if y is not None:
# # Select K best features, X is 2-d array with each column representing a feature
# kbest_selector = SelectKBest(lambda X, y: np.array(map(lambda x:mic(x, y), X.T)), k=2)
# kbest_selector.fit_transform(X.values, y)
# selected_cols = kbest_selector.get_support(indices=True)
# X = X[selected_cols]
# Generate new features
# X['IncomePaymentRatio'] = X['DolMonthlyIncome'] / X['DolMonthlyLoanPayment']
# X['IncomeAmountRatio'] = X['DolMonthlyIncome'] / X['DolLoanAmountRequested']
return X
# Test
# loan_data_num = NumericalTransformer().transform(DataFrameSelector(['float64', 'int64']).transform(loan_data))
# loan_data_num.head()
```
The following code handles categorical features. `NAN` values are filled with `-`.
Categorical features are encoded as integers. Another popular encoding, `One-hot Enconding`, is also tested.
Yet, the benefit of using `One-hot Enconding` is not obvious here and thus is not used.
```
# Categorical features handling
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
class CategoricalTransformer(BaseEstimator, TransformerMixin):
def __init__(self, encoding='integer'):
self.encoding = encoding
def fit(self, X, y=None):
return self
def transform(self, X):
# Replace NAN values
X.fillna('-', inplace=True)
# Encode
le = LabelEncoder()
X = X.apply(le.fit_transform)
if self.encoding == 'onehot':
ohe = OneHotEncoder()
X = ohe.fit_transform(X.values)
return X
# Test
# loan_data_cat = CategoricalTransformer().transform(DataFrameSelector(['object']).transform(loan_data))
# loan_data_cat
```
The following code handles Datetime features. NAN values are filled with `2017-12-31 23:59:59`.
All Datetime values are converted to days from `1900-01-01 00:00:00`.
Two new features are added, i.e., `CreditLength` and `ListingTime`.
```
# Datetime features handling
class DatetimeTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
# Replace NA values
X.fillna(pd.Timestamp('2017-12-31 23:59:59'), inplace=True)
# Convert to days
X['TimeBase'] = pd.Timestamp('1900-01-01 00:00:00')
for col in X.columns:
if col == 'TimeBase':
pass
else:
X[col] = (X[col] - X['TimeBase']).dt.days
X.drop(['TimeBase'], axis=1, inplace=True)
# Generate new features
X['CreditLength'] = X["DateCreditPulled"] - X["DateFirstCredit"]
X['ListingTime'] = X["DateListingStart"] - X["DateListingCreation"]
return X
# Test
# loan_data_dt = DatetimeTransformer().transform(DataFrameSelector(['datetime64']).transform(loan_data))
# loan_data_dt.head()
```
The following code handles bool features. All values are converted to 0 or 1.
```
# Bool feature handling
class BoolTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.astype(int)
return X
# Test
# loan_data_bool = BoolTransformer().transform(DataFrameSelector(['bool']).transform(loan_data))
# loan_data_bool.head()
# Scaler
from sklearn.preprocessing import StandardScaler
class Scaler(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
X = pd.DataFrame(StandardScaler().fit_transform(X.values),
index=X.index, columns=X.columns)
return X
# Test
# loan_data_scaled = Scaler().fit_transform(DataFrameSelector(['int64']).transform(loan_data))
# loan_data_scaled.head()
# Transformation pipelines
from sklearn.pipeline import Pipeline
num_pipeline = Pipeline([
('selector', DataFrameSelector(['float64', 'int64'])),
('transformer', NumericalTransformer()),
('scaler', Scaler()),
])
cat_pipeline = Pipeline([
('selector', DataFrameSelector(['object'])),
('transformer', CategoricalTransformer()),
])
dt_pipeline = Pipeline([
('selector', DataFrameSelector(['datetime64'])),
('transformer', DatetimeTransformer()),
('scaler', Scaler()),
])
bool_pipeline = Pipeline([
('selector', DataFrameSelector(['bool'])),
('transformer', BoolTransformer()),
])
# Pandas dataframe returned
loan_data_clean = num_pipeline.fit_transform(loan_data)
loan_data_clean = loan_data_clean.join(cat_pipeline.fit_transform(loan_data))
loan_data_clean = loan_data_clean.join(dt_pipeline.fit_transform(loan_data))
loan_data_clean = loan_data_clean.join(bool_pipeline.fit_transform(loan_data))
# loan_data_clean.head()
# # Prepare data using FeatureUnion
# from sklearn.pipeline import FeatureUnion
# data_pipeline = FeatureUnion(transformer_list=[
# ("num_pipeline", num_pipeline),
# ("cat_pipeline", cat_pipeline),
# ("dt_pipeline", dt_pipeline),
# ("bool_pipeline", bool_pipeline)
# ])
# # Numpy array returned
# loan_data_clean = data_pipeline.fit_transform(loan_data)
# Drop unnecessary features
null_cols = [
'DolPriorProsperLoansPrincipalBorrowed',
'DolPriorProsperLoansPrincipalOutstanding',
'DolPriorProsperLoansBalanceOutstanding',
'NumPriorProsperLoansCyclesBilled',
'NumPriorProsperLoansOnTimePayments',
'NumPriorProsperLoansLateCycles',
'NumPriorProsperLoansLatePayments',
'DolMaxPriorProsperLoan',
'DolMinPriorProsperLoan',
'NumPriorProsperLoansEarliestPayOff',
'NumPriorProsperLoans31dpd',
'NumPriorProsperLoans61dpd',
'DolMonthlyIncome'
]
loan_data_clean.drop(null_cols, axis=1, inplace=True)
def drop_date_columns(data, col):
data.drop(col, axis=1, inplace=True)
return None
drop_date_columns(loan_data_clean, 'DateCreditPulled')
drop_date_columns(loan_data_clean, 'DateListingStart')
drop_date_columns(loan_data_clean, 'DateListingCreation')
drop_date_columns(loan_data_clean, 'DateFirstCredit')
drop_date_columns(loan_data_clean, 'DateWholeLoanStart')
# Split the data into a training set and a test set
# Regular sampling
from sklearn.model_selection import train_test_split
# Full set for mode tunning
X_train, X_test, y_train, y_test = train_test_split(loan_data_clean, loan_labels, test_size=0.2, random_state=42)
print([X_train.shape, y_train.shape, X_test.shape, y_test.shape])
loan_data_clean.head()
```
## `Deep Learning`
#### Split data into 80/2 training/test
```
train_size = 80000
test_size = 2000
X_train_s, y_train_s = X_train[:train_size], y_train[:train_size]
X_test_s, y_test_s = X_test[:test_size], y_test[:test_size]
print([X_train_s.shape, y_train_s.shape, X_test_s.shape, y_test_s.shape])
DL_X_train, DL_X_test = X_train_s.values, X_test_s.values
DL_Y_train, DL_Y_test = y_train_s.values, y_test_s.values
DL_Y_train = np.array([DL_Y_train, -(DL_Y_train-1)]).T
DL_Y_test = np.array([DL_Y_test, -(DL_Y_test-1)]).T
import tensorflow as tf
# Parameters
lr_0 = 0.001
decay_rate = 9/1500
training_epochs = 1000
batch_size = 256
display_step = 100
drop_rate_1 = 0.25
drop_rate_2 = 0.5
# Network Parameters
n_classes = 2 # Number of classes to predict
n_inputs = X_train_s.shape[1] # Number of feature
n_hidden_1 = 1024 # 1st layer number of features
n_hidden_2 = 10 # 2st layer number of features
n_hidden_3 = 128 # 3nd layer number of features
n_hidden_4 = 128 # 4nd layer number of features
n_hidden_5 = 64 # 5nd layer number of features
n_hidden_6 = 64 # 6nd layer number of features
n_hidden_7 = 64 # 7nd layer number of features
```
#### Construct 7-layer CNN with regularization, drop-out, batch normalization and relu as activation function
```
def dense(inputs, num_outputs, activation_fn):
regularizer = tf.contrib.layers.l2_regularizer(scale=0.2)
return tf.contrib.layers.fully_connected(inputs, num_outputs, activation_fn=None, weights_regularizer=regularizer)
def convnn(inputs, filt, knsize, activation_fn):
return tf.layers.conv1d(inputs=inputs, filters=filt, kernel_size=knsize, padding="same", activation=None)
def pooling(inputs):
return tf.layers.max_pooling1d(inputs=inputs, pool_size=2, strides=2)
def dense_relu(inputs, num_outputs, is_training, scope):
with tf.variable_scope(scope):
h1 = dense(inputs, num_outputs, scope)
return tf.nn.relu(h1, 'relu')
def dense_batch_relu(inputs, num_outputs, is_training, scope):
with tf.variable_scope(scope):
h1 = dense(inputs, num_outputs, scope)
h2 = tf.contrib.layers.batch_norm(h1, center=True, scale=True, is_training=is_training, scope='bn')
return tf.nn.relu(h2, 'relu')
def cnn_batch_relu(inputs, filt, knsize, is_training, scope):
with tf.variable_scope(scope):
h1 = convnn(inputs, filt, knsize, scope)
# h2 = tf.contrib.layers.batch_norm(h1, center=True, scale=True, is_training=is_training, scope='bn')
return tf.nn.relu(h1, 'relu')
def dense_dropout_relu(inputs, num_outputs, dropout_rate, is_training, scope):
with tf.variable_scope(scope):
h1 = dense(inputs, num_outputs, scope)
h2 = tf.layers.dropout(inputs=h1, rate=dropout_rate, training=is_training)
return tf.nn.relu(h2, 'relu')
```
#### Implement CNN and output accuracy and loss
```
tf.reset_default_graph()
X = tf.placeholder('float32', [None, n_inputs, 1], name='X')
Y = tf.placeholder('float32', (None, n_classes), name='Y')
is_training = tf.placeholder(tf.bool, name='is_training')
learning_rate = tf.placeholder('float32', name='lr')
input_layer = tf.reshape(X, [-1, n_inputs, 1])
h1 = cnn_batch_relu(X, 32, 5, is_training=is_training, scope='cnn1')
h2 = pooling(h1)
h2_dr = tf.layers.dropout(inputs=h2, rate=drop_rate_1, training=is_training)
h3 = cnn_batch_relu(h2_dr, 64, 5, is_training=is_training, scope='cnn2')
h4 = pooling(h3)
h4_dr = tf.layers.dropout(inputs=h4, rate=drop_rate_1, training=is_training)
h4_flatten = tf.contrib.layers.flatten(h4_dr)
h5 = dense_dropout_relu(inputs=h4_flatten, dropout_rate=drop_rate_2, num_outputs=n_hidden_1, is_training=is_training, scope='layer5')
logits = dense(inputs=h5, num_outputs=2, activation_fn='logits')
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(Y, 1), tf.argmax(logits, 1)), 'float32'))
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=logits))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# Ensures that we execute the update_ops before performing the train_step
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# gvs = optimizer.compute_gradients(loss)
# capped_gvs = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gvs]
# train_op = optimizer.apply_gradients(capped_gvs)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
n_batches = int(len(DL_X_train)/batch_size)
X_batches = np.array_split(DL_X_train, n_batches)
Y_batches = np.array_split(DL_Y_train, n_batches)
lr_value = 1/(1 + decay_rate * epoch) * lr_0
# Loop over all batches
for i in range(n_batches):
batch_X, batch_Y = X_batches[i], Y_batches[i]
batch_X = np.reshape(batch_X, (np.shape(batch_X)[0], n_inputs, 1))
# Run optimization op (backprop) and cost op (to get loss value)
# _, c = sess.run([optimizer, loss], feed_dict={X: batch_X, Y: batch_Y, is_training:1, learning_rate:lr_value})
_, c = sess.run([optimizer, loss], feed_dict={X: batch_X, Y: batch_Y, is_training:1, learning_rate:lr_value})
# Compute average loss
avg_cost += c / n_batches
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
DL_X_train_tr = np.reshape(DL_X_train, (np.shape(DL_X_train)[0], n_inputs, 1))
DL_X_test_tr = np.reshape(DL_X_test, (np.shape(DL_X_test)[0], n_inputs, 1))
print("Training Accuracy:", accuracy.eval({X: DL_X_train_tr, Y: DL_Y_train, is_training:1}))
print("Test Accuracy:", accuracy.eval({X: DL_X_test_tr, Y: DL_Y_test, is_training:0}))
# global result
# result = tf.argmax(pred, 1).eval({X: DL_X_test, Y: DL_Y_test})
# # plot the cost
# plt.plot(np.squeeze(loss))
# plt.ylabel('cost')
# plt.xlabel('iterations (per tens)')
# plt.title("Learning rate =" + str(learning_rate))
# plt.show()
```
| github_jupyter |
# Exploring and Processing Data - Part 1
```
# imports
import pandas as pd
import numpy as np
import os
```
# Import Data
```
# set the path of the raw data
raw_data_path = os.path.join(os.path.pardir, 'data', 'raw')
train_file_path = os.path.join(raw_data_path, 'train.csv')
test_file_path = os.path.join(raw_data_path, 'test.csv')
# read the data with all default parameters
train_df = pd.read_csv(train_file_path, index_col='PassengerId')
test_df = pd.read_csv(test_file_path, index_col='PassengerId')
# get the type
type(train_df)
```
# Basic Structure
```
# information about dataframe
train_df.info()
test_df.info()
# add survived column in test dataframe
test_df['Survived'] = -888
df = pd.concat((train_df, test_df), axis=0)
df.info()
# to get top 5 rows
df.head()
# use to get n-rows
df.head(10)
# use last 5 rows
df.tail()
# column selection using dot
df.Name
# selecting column using square brackets
df['Name']
# multiple selection
df[['Name', 'Age']]
# Indexing : use loc for label based indexing
# all columns
df.loc[5:10,]
# selecting column range
df.loc[5:10, 'Age' : 'Pclass']
# discrete columns
df.loc[5:10, ['Age', 'Fare', 'Embarked']]
# indexing using iloc for postion based indexing
df.iloc[5:10, 3:8]
# filter rows based on the condition
male_passengers = df.loc[df.Sex == 'male',:]
print('Number of male passengers in first class: {0}'.format(len(male_passengers)))
# use & or | operators to build complex logic
male_passengers_first_class = df.loc[((df.Sex == 'male') & (df.Pclass == 1)),:]
print('Number of male passengers in first class: {0}'.format(len(male_passengers_first_class)))
```
# Summary Statistics
```
# to get summary statistics for all numeric columns
df.describe()
# numerical feature
# centrality measures
print('Mean Fare : {0}'.format(df.Fare.mean()))
print('Median Fare : {0}'.format(df.Fare.median()))
# dispersion measures
print('Min fare : {0}'.format(df.Fare.min()))
print('Max fare : {0}'.format(df.Fare.max()))
print('Fare Range : {0}'.format(df.Fare.max() - df.Fare.min()))
print('25 Percentile : {0}'.format(df.Fare.quantile(.25)))
print('50 Percentile : {0}'.format(df.Fare.quantile(.5)))
print('75 Percentile : {0}'.format(df.Fare.quantile(.75)))
print('Variance fare : {0}'.format(df.Fare.var()))
print('Standard Deviation fare : {0}'.format(df.Fare.std()))
# magic function to view plot
%matplotlib inline
# box-whisker plot
df.Fare.plot(kind='box')
# get statistics for all columns including non-numeric ones
df.describe(include='all')
# categorical column: counts
df.Sex.value_counts()
# categorical column: proportions
df.Sex.value_counts(normalize=True)
# apply on other columns
df[df.Survived != -888].Survived.value_counts()
# count : passenger class
df.Pclass.value_counts()
# visualize counts
df.Pclass.value_counts().plot(kind='bar')
# to set title, color, and rotate labels
df.Pclass.value_counts().plot(kind='bar', rot = 0, title='Class wise passenger count', color='c');
```
# Distributions
## Univariate distributions
```
# use hist to create histogram
df.Age.plot(kind='hist', title='Histogram for Age', color='c');
# adding or removing bins
df.Age.plot(kind='hist', title='Histogram dor Age', color='c', bins=20);
# kde for density plot
df.Age.plot(kind='kde', title='Density plot for Age', color='c');
# histogram for fare
df.Fare.plot(kind='hist', title='histogram for Fare', color='c', bins=20);
print('skewness for age : {0:.2f}'.format(df.Age.skew()))
print('skewness for fare : {0:.2f}'.format(df.Fare.skew()))
```
### Bi-variate distribution
```
# scatter plot for bi-variate distribution
df.plot.scatter(x='Age', y='Fare', color='c', title='scatter plot : Age vs Fare');
# use to set the opacity of the dots
df.plot.scatter(x='Age', y='Fare', color='c', title='scatter plot : Age vs Fare', alpha=0.1);
df.plot.scatter(x='Pclass', y='Fare', color='c', title='scatter plot : Passenger class vs Fare', alpha=0.15);
```
# Grouping and Aggregations
```
# group by
df.groupby('Sex').Age.median()
# creating groups based on pclass
df.groupby(['Pclass']).Fare.median()
df.groupby(['Pclass'])['Fare', 'Age'].median()
# extracting more than one summary statistic
df.groupby(['Pclass']).agg({'Fare' : 'mean', 'Age' : 'median'})
# more complicated aggregations
aggregations = {
'Fare': { # work on the "Fare" column
'mean_Fare': 'mean',
'median_Fare': 'median',
'max_Fare': max,
'min_Fare': np.min
},
'Age': { # work on the "Age" column
'median_Age': 'median',
'min_Age': min,
'max-Age': max,
'range_Age': lambda x: max(x) - min(x) #calculate the age range per group
}
}
df.groupby(['Pclass']).agg(aggregations)
df.groupby(['Pclass', 'Embarked']).Fare.median()
```
# Crosstabs
```
# crosstabs on Sex and Pclass
pd.crosstab(df.Sex, df.Pclass)
pd.crosstab(df.Sex, df.Pclass).plot(kind='bar');
```
# Pivot table
```
# Pivot table
df.pivot_table(index='Sex', columns='Pclass', values='Age', aggfunc='mean')
df.groupby(['Sex', 'Pclass']).Age.mean()
df.groupby(['Sex', 'Pclass']).Age.mean().unstack()
```
# Data Munging: Working with missing values
```
# detect missing values
df.info()
```
### Feature: Embarked
```
# extract rows with Embarked as Null
df[df.Embarked.isnull()]
# find most common embarkment point
df.Embarked.value_counts()
# which embarkment point has higher survival count
pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].Embarked)
# input the missing value with 'S'
# df.loc[df.Embarked.isnull(), 'Embarked'] = 'S'
# df.Embarked.fillna('S', inplace=True)
# Option 2 : explore the fare of each class for each embarkment point
df.groupby(['Pclass', 'Embarked']).Fare.median()
# replace the missing value with 'C'
df.Embarked.fillna('C', inplace=True)
# check if any null value is remaining
df[df.Embarked.isnull()]
# check info again
df.info()
```
### Feature: Fare
```
df[df.Fare.isnull()]
median_fare = df.loc[(df.Pclass == 3) & (df.Embarked == 'S'), 'Fare'].median()
print(median_fare)
df.Fare.fillna(median_fare, inplace=True)
df.info()
```
### Feature : Age
```
# set maximum number of rows to be displayed
pd.options.display.max_rows = 15
# return null rows
df[df.Age.isnull()]
```
### option 1: Replace all missing age with mean value
```
df.Age.plot(kind='hist', bins=20, color='c');
# get mean
df.Age.mean()
```
#### issue: due to few high values of 70's and 80's pushing the overall mean
```
# replace the missing values
# df.Age.fillna(df.Age.mean(), inplace=True)
```
### option 2: replace with median age of gender
```
# median values
df.groupby('Sex').Age.median()
# boxplot to visualize
df[df.Age.notnull()].boxplot('Age', 'Sex');
# replace:
# age_sex_median = df.groupby('Sex').Age.transform('median')
# df.Age.fillna(age_sex_mediaan, inplace=True)
```
### option 3: replace with median age of Pclass
```
df[df.Age.notnull()].boxplot('Age', 'Pclass');
# replace:
# pclass_age_median = df.groupby('Pclass').Age.transform('median')
# df.Age.fillna(age_sex_mediaan, inplace=True)
```
### option 4: replace with median age of title
```
df.Name
# Function to extract the title from the name
def getTitle(name):
first_name_with_title = name.split(',')[1]
title = first_name_with_title.split('.')[0]
title = title.strip().lower()
return title
# use map function to apply the function on each Name value row i
df.Name.map(lambda x : getTitle(x)) # alternatively you can use : df.Name.map(getTitle)
df.Name.map(lambda x : getTitle(x)).unique()
# function to extract the title from the name
def GetTitle(name):
title_group = {'mr' : 'Mr',
'mrs' : 'Mrs',
'miss' : 'Miss',
'master' : 'Master',
'don' : 'Sir',
'rev' : 'Sir',
'dr' : 'Officer',
'mme' : 'Mrs',
'ms' : 'Mrs',
'major' : 'Officer',
'lady' : 'Lady',
'sir' : 'Sir',
'mlle' : 'Miss',
'col' : 'Officer',
'capt' : 'Officer',
'the countess' : 'Lady',
'jonkheer' : 'Sir',
'dona' : 'Lady'
}
first_name_with_title = name.split(',')[1]
title = first_name_with_title.split('.')[0]
title = title.strip().lower()
return title_group[title]
# create Title feature
df['Title'] = df.Name.map(lambda x : GetTitle(x))
# head
df.head()
# Box plot of Age with title
df[df.Age.notnull()].boxplot('Age', 'Title');
# replace missing values
title_age_median = df.groupby('Title').Age.transform('median')
df.Age.fillna(title_age_median, inplace=True)
# check info
df.info()
```
# Working with outliers
# Age
```
# use histogram to understand distribution
df.Age.plot(kind='hist', bins=20, color='c');
df.loc[df.Age > 70]
```
# Fare
```
# histogram for fare
df.Fare.plot(kind='hist', title='histogram for Fare', bins=20, color='c');
# box plot to indentiify outliers
df.Fare.plot(kind='box');
# look into the outliers
df.loc[df.Fare == df.Fare.max()]
# Try some transformation to reduce the skewness
LogFare = np.log(df.Fare + 1.0) # Adding 1 to accomodate zero fares : log(0) is not defined
# Histogram of Logfare
LogFare.plot(kind='hist', color='c', bins=20);
# binning
pd.qcut(df.Fare, 4)
pd.qcut(df.Fare, 4, labels=['very_low', 'low', 'high', 'very_high']) #discretization
pd.qcut(df.Fare, 4, labels=['very_low', 'low', 'high', 'very_high']).value_counts().plot(kind='bar', color='c', rot=0);
# create fare bin feature
df['Fare_Bin'] = pd.qcut(df.Fare, 4, labels=['very_low', 'low', 'high', 'very_high'])
```
# Feature Engineering
### Feature: Age State (Adult or Child)
```
# AgeState based on Age
df['AgeState'] = np.where(df['Age'] >= 18, 'Adult', 'Child')
# AgeState counts
df['AgeState'].value_counts()
pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].AgeState)
```
### Feature : FamilySize
```
# Family : Adding Parents with Siblings
df['FamilySize'] = df.Parch + df.SibSp + 1 # 1 for self
# explore the family feature
df['FamilySize'].plot(kind='hist', color='c');
# Explore families with max family members
df.loc[df.FamilySize == df.FamilySize.max(), ['Name', 'Survived', 'FamilySize', 'Ticket']]
pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].FamilySize)
```
### Feature : IsMother
```
# a Lady who is aged more than 18 who has at least a child and is married
df['IsMother'] = np.where(((df.Sex == 'female') & (df.Parch > 0) & (df.Age > 18) & (df.Title != 'Miss')), 1, 0)
# crosstab with IsMother
pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].IsMother)
```
### Feature : Deck
```
# explore Cabin values
df.Cabin
# use unique to get unqiue values for cabin feature
df.Cabin.unique()
# look at the cabin = T
df.loc[df.Cabin == 'T']
# set the value to NaN
df.loc[df.Cabin == 'T', 'Cabin'] = np.NaN
df.Cabin.unique()
# extract the first character of cabin string o the deck
def get_deck(cabin):
return np.where(pd.notnull(cabin),str(cabin)[0].upper(), 'Z')
df['Deck'] = df['Cabin'].map(lambda x : get_deck(x))
# check counts
df.Deck.value_counts()
# use crosstab to look into survived feature cabin wise
pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].Deck)
# info command
df.info()
```
# Categorical Feature Encoding
```
# Sex or Gender
df['IsMale'] = np.where(df.Sex == 'male', 1, 0)
# columns Deck, Pclass, Title, AgeState
df = pd.get_dummies(df, columns=['Deck', 'Pclass', 'Title', 'Fare_Bin', 'Embarked', 'AgeState'])
print(df.info())
```
# Drop and Reorder Columns
```
# drop columns
df.drop(['Cabin', 'Name', 'Ticket', 'Parch', 'SibSp', 'Sex'], axis=1, inplace=True)
# reorder columns
columns = [column for column in df.columns if column != 'Survived']
columns = ['Survived'] + columns
df = df[columns]
# check data frame status
df.info()
```
# Save Processed Dataset
```
processed_data_path = os.path.join(os.path.pardir, 'data', 'processed')
write_train_path = os.path.join(processed_data_path, 'train.csv')
write_test_path = os.path.join(processed_data_path, 'test.csv')
# train data
df.loc[df.Survived != -888].to_csv(write_train_path)
# test data
columns = [column for column in df.columns if column != 'Survived']
df.loc[df.Survived == -888, columns].to_csv(write_test_path)
```
# Building the data processing script
```
get_processed_data_script_file = os.path.join(os.path.pardir, 'src','data','get_processed_data.py')
%%writefile $get_processed_data_script_file
import numpy as np
import pandas as pd
import os
def read_data():
# set the path of the raw data
raw_data_path = os.path.join(os.path.pardir, 'data', 'raw')
train_file_path = os.path.join(raw_data_path, 'train.csv')
test_file_path = os.path.join(raw_data_path, 'test.csv')
# read the data with all default parameters
train_df = pd.read_csv(train_file_path, index_col='PassengerId')
test_df = pd.read_csv(test_file_path, index_col='PassengerId')
test_df['Survived'] = -888
df = pd.concat((train_df, test_df), axis=0)
return df
def process_data(df):
# using the method chaining concept
return (df
# create title attribute - then add this
.assign(Title = lambda x: x.Name.map(get_title))
# working missing values - start with this
.pipe(fill_missing_values)
# create fare bin feature
.assign(Fare_Bin = lambda x: pd.qcut(x.Fare, 4, labels=['very_low', 'low', 'high', 'very_high']))
# create age state
.assign(AgeState = lambda x: np.where(x.Age >= 18, 'Adult', 'Child'))
.assign(FamilySize = lambda x: x.Parch + x.SibSp + 1)
.assign(IsMother = lambda x : np.where(((x.Sex == 'female') & (x.Parch > 0) & (x.Age > 18) & (x.Title != 'Miss')), 1, 0))
# create deck feature
.assign(Cabin = lambda x: np.where(x.Cabin == 'T', np.nan, x.Cabin))
.assign(Deck = lambda x: x.Cabin.map(get_deck))
# feature encoding
.assign(IsMale = lambda x : np.where(x.Sex == 'male', 1, 0))
.pipe(pd.get_dummies, columns=['Deck', 'Pclass', 'Title', 'Fare_Bin', 'Embarked', 'AgeState'])
# add code to drop unnecessary columns
.drop(['Cabin', 'Name', 'Ticket', 'Parch', 'SibSp', 'Sex'], axis=1)
# reorder columns
.pipe(reorder_columns)
)
def get_title(name):
title_group = {'mr' : 'Mr',
'mrs' : 'Mrs',
'miss' : 'Miss',
'master' : 'Master',
'don' : 'Sir',
'rev' : 'Sir',
'dr' : 'Officer',
'mme' : 'Mrs',
'ms' : 'Mrs',
'major' : 'Officer',
'lady' : 'Lady',
'sir' : 'Sir',
'mlle' : 'Miss',
'col' : 'Officer',
'capt' : 'Officer',
'the countess' : 'Lady',
'jonkheer' : 'Sir',
'dona' : 'Lady'
}
first_name_with_title = name.split(',')[1]
title = first_name_with_title.split('.')[0]
title = title.strip().lower()
return title_group[title]
def get_deck(cabin):
return np.where(pd.notnull(cabin), str(cabin)[0].upper(), 'Z')
def fill_missing_values(df):
# embarked
df.Embarked.fillna('C', inplace=True)
# fare
median_fare = df[(df.Pclass == 3) & (df.Embarked == 'S')]['Fare'].median()
df.Fare.fillna(median_fare, inplace=True)
# age
title_age_median = df.groupby('Title').Age.transform('median')
df.Age.fillna(title_age_median, inplace=True)
return df
def reorder_columns(df):
columns = [column for column in df.columns if column != 'Survived']
columns = ['Survived'] + columns
df = df[columns]
return df
def write_data(df):
processed_data_path = os.path.join(os.path.pardir, 'data', 'processed')
write_train_path = os.path.join(processed_data_path, 'train.csv')
write_test_path = os.path.join(processed_data_path, 'test.csv')
# train data
df[df.Survived != -888].to_csv(write_train_path)
# test data
columns = [column for column in df.columns if column != 'Survived']
df[df.Survived == -888][columns].to_csv(write_test_path)
if __name__ == '__main__':
df = read_data()
df = process_data(df)
write_data(df)
!python $get_processed_data_script_file
```
# Advanced visualization using MatPlotlib
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(df.Age)
plt.hist(df.Age, bins=20, color='c')
plt.show()
plt.hist(df.Age, bins=20, color='c')
plt.title('Histogram : Age')
plt.xlabel('Bins')
plt.ylabel('Counts')
plt.show()
f , ax = plt.subplots()
ax.hist(df.Age, bins=20, color='c')
ax.set_title('Histogram : Age')
ax.set_xlabel('Bins')
ax.set_ylabel('Counts')
plt.show()
# Add subplots
f , (ax1, ax2) =plt.subplots(1,2, figsize=(14,3))
ax1.hist(df.Fare, bins=20, color='c')
ax1.set_title('Histogram : Fare')
ax1.set_xlabel('Bins')
ax1.set_ylabel('Counts')
ax2.hist(df.Age, bins=20, color='tomato')
ax2.set_title('Histogram : Age')
ax2.set_xlabel('Bins')
ax2.set_ylabel('Counts')
plt.show()
# Adding subplots
f, ax_arr = plt.subplots(3,2, figsize=(14, 7))
# plot 1
ax_arr[0,0].hist(df.Fare, bins=20, color='c')
ax_arr[0,0].set_title('Histogram : Fare')
ax_arr[0,0].set_xlabel('Bins')
ax_arr[0,0].set_ylabel('Counts')
# plot 2
ax_arr[0,1].hist(df.Age, bins=20, color='c')
ax_arr[0,1].set_title('Histogram : Age')
ax_arr[0,1].set_xlabel('Bins')
ax_arr[0,1].set_ylabel('Counts')
# plot 3
ax_arr[1,0].boxplot(df.Fare.values)
ax_arr[1,0].set_title('Boxplot : Fare')
ax_arr[1,0].set_xlabel('Fare')
ax_arr[1,0].set_ylabel('Fare')
# plot 4
ax_arr[1,1].hist(df.Age.values)
ax_arr[1,1].set_title('Boxplot : Age')
ax_arr[1,1].set_xlabel('Age')
ax_arr[1,1].set_ylabel('Age')
# plot 5
ax_arr[2,0].scatter(df.Age, df.Fare, color='c', alpha=0.15)
ax_arr[2,0].set_title('Scatter Plot : Age vs Fare')
ax_arr[2,0].set_xlabel('Age')
ax_arr[2,0].set_ylabel('Age')
# remove empty subplot
ax_arr[2,1].axis('off')
plt.tight_layout()
plt.show()
```
| github_jupyter |
# Electiva Técnica I - Introducción *Robot Operating System* (ROS1)
### David Rozo Osorio, I.M, M.Sc.
## Introducción a Linux System
- Objetivo: comprender el funcionamiento de un sistema operativo tipo Linux.
- Procedimiento:
1. Características de la máquina virtual.
2. Introducción.
3. Características del S.O. - Gráfico.
4. Operaciones básicas con la Terminal.
### Características para una máquina virtual
El proceso de instalación puede ser implementado de 2 formas:
- **Instalación con arranque paralelo**. En el siguiente [link](https://www.youtube.com/watch?v=x1ykDpSzpKU) puede visualizar el proceso. Nota: este proceso debe ser desarrollado con cuidado porque en caso de que se presente algún incidentes puede perder toda la información que tenga en computador.
- **Máquina virtual** [link descarga](https://www.virtualbox.org/). En el siguiente [link](https://www.youtube.com/watch?v=x5MhydijWmc) puede visualizar el proceso (recomendado). Configuración máquina virtual (óptimo) [mínimo]:
- Disco virtual 25GB o más. [15 GB].
- Memoria RAM 4 GB o más. [2 GB].
- CPU 2 o más. [1].
- Se requiere de una instalación completa (no de prueba).
- Se recomienda instalar la versión en Inglés de Ubuntu.
- Ubuntu 20.04 https://ubuntu.com/download/desktop
<p style="page-break-after:always;"></p>
### Introducción Ubuntu
Sistema Operativo basado en Unix, desarrollados como parte del Proyecto GNU.
- Multitasking.
- Multiusuario.
- Software de uso libre y código abierto.
- Distribuciones populares:
- Ubuntu
- Debian
- Fedora
Ej. Android, utiliza un núcleo tipo Linux.
<img src="imag/ubuntuGUI.png" width=400 height=400 />
<img src="imag/ubuntuFileSystem.png" width=400 height=400 />
<img src="imag/ubuntuGUISoftware.png" width=400 height=400 />
### CommandLine
- `ls`: List Directory
- `ls -a`: Extra parameter that shows all hidden elements in a folder.
- `ls --help`: Extra parameters that present all extensions.
- `cd`: Change Directory.
- `cd ..`: Change directory to the previous.
- `cd --`: Change to HOME directory.
- `clear`: erase all content of the terminal window.
- `pwd`: Current terminal path.
- `mkdir`: Create a folder.
- `rm`: Delete file.
- `rmdir`: Delete folder.
- `mv`: Move file.
- `cp`: Copy file.
- `wget`: Command to download files from the internet.
- `sudo`: Run a command Admin Mode.
- `apt-get`: Install Package Ubuntu.
- `nano`: Text editor in terminal.
- `sudo apt update`: update the Operating System.
- `sudo apt upgrade`: erase older Operating System Files.
Nota: `man`: Open a manual of a command.
## Introducción a ROS1
- Objetivo: comprender el funcionamiento y las características preliminares de ROS1
- Procedimiento:
1. ROS - Master
2. ROS - Node operations
3. ROS - Topic operations
4. ROS - Launch
5. ROS - Demo
<img src="imag/ROSArq.png" width=400 height=400 />
### CommandLine
- `roscore`: inicia el ROS-Master, el servidor de parámetros y el nodo de logging.
```bash
$ roscore
```
- `rosnode`: comando que explora características de los nodos. Cada comando tiene una serie de modificadores o atributos que permiten realizar operaciones. Para identificar todos los atributos disponibles, se utiliza el modificador `-h` signficado "*help*"
- **Ej**. Listar los nodos activos.
```bash
$ rosnode list
```
- `rostopic`: comando que provee información de los *topics* activos.
- **Ej.** Listar *topics* activos.
```bash
$ rostopic list
```
- **Ej**. Imprimir información del *topic*.
```bash
$ rostopic echo /<topic_name>
```
- **Ej.** Publicar información en un *topic*.
```bash
$ rostopic pub <topic_name> <msg_type> data
$ rostopic pub /hello std_msgs/String "Hello"
```
```bash
$ rostopic pub -1 /hello std_msgs/String "Hello"
$ rostopic pub /hello std_msgs/String -r1 -- "Hello"
```
- `rosmsg`: comando que provee información sobre los tipos de mensajes.
- **Ej.** mensaje tipo *string*.
```bash
$ rosmsg show std_msgs/String
```
- **Ej.** mensaje tipo *Twist*.
```bash
$ rosmsg show geometry_msgs/Twist
```
- `rosrun`: comando que permite ejecutar nodos
```bash
$ rosrun <ros_pkg_name> <ros_node_name>
$ rosrun roscpp_tutorials talker
```
- `rqt_graph`: nodo que construye un gráfico de los *topic* y *node* activos.
```bash
$ rqt_graph
```
### ROS Demo (Talker and Listener)
- Iniciar ROS Master.
```bash
$ roscore
```
- Ejecutar nodos.
- Iniciar el nodo "talker"
```bash
$ rosrun roscpp_tutorials talker
```
- Ejecutar el nodo "listener"
```bash
$ rosrun roscpp_tutorials listener
```
- Ejecutar comandos para *topics*
```bash
$ rostopic list
```
### ROS Demo (Turtlesim)
- Iniciar ROS Master.
```bash
$ roscore
```
- Ejecutar nodos.
- Iniciar el nodo "turtlesim_node"
```bash
$ rosrun turtlesim turtlesim_node
```
- Ejecutar el nodo "teleop"
```bash
$ rosrun turtlesim turtle_teleop_key
```
- Ejecutar el nodo "movimiento autónomo"
```bash
$ rosrun turtlesim draw_square
```
- Ejecutar comandos para *topics*
```bash
$ rostopic list
```
```bash
$ rostopic pub -1 /turtle1/cmd_vel geometry_msgs/Twist '[2.0,0.0,0.0]' '[0.0,0.0,1.0]'
$ rostopic pub /turtle1/cmd_vel geometry_msgs/Twist -r1 -- '[2.0,0.0,0.0]' '[0.0,0.0,1.0]'
```
- Ejecutar el nodo "*Graph*"
```bash
$ rosrun rqt_graph rqt_graph
```
## Referencias
- Understanding ROS Nodes: http://wiki.ros.org/ROS/Tutorials/UnderstandingNodes
- Understanding ROS Topics: http://wiki.ros.org/ROS/Tutorials/UnderstandingTopics
- Lentin Joseph, Robot Operating System (ROS) for Absolute Beginners, Apress. 2018.
- Ramkumar Gandhinathan, Lentin Joseph, ROS Robotics Projects, 2ed, Packt, 2019.
- Morgan Quigley, Brian Gerkey, and William D. Smart., Programming Robots with ROS, O’ Reilly Media, Inc., 2016.
| github_jupyter |
```
from ei_net import *
from ce_net import *
import matplotlib.pyplot as plt
import datetime as dt
%matplotlib inline
##########################################
############ PLOTTING SETUP ##############
EI_cmap = "Greys"
where_to_save_pngs = "../figs/pngs/"
where_to_save_pdfs = "../figs/pdfs/"
save = True
plt.rc('axes', axisbelow=True)
##########################################
##########################################
```
# The emergence of informative higher scales in complex networks
# Chapter 06 - Causal Emergence and the Emergence of Scale
## Network Macroscales
First we must introduce how to recast a network, $G$, at a higher scale. This is represented by a new network, $G_M$. Within $G_M$, a micro-node is a node that was present in the original $G$, whereas a macro-node is defined as a node, $v_M$, that represents a subgraph, $S_i$, from the original $G$ (replacing the subgraph within the network). Since the original network has been dimensionally reduced by grouping nodes together, $G_M$ will always have fewer nodes than $G$.
A macro-node $\mu$ is defined by some $W^{out}_{\mu}$, derived from the edge weights of the various nodes within the subgraph it represents. One can think of a macro-node as being a summary statistic of the underlying subgraph's behavior, a statistic that takes the form of a single node. Ultimately there are many ways of representing a subgraph, that is, building a macro-node, and some ways are more accurate than others in capturing the subgraph's behavior, depending on the connectivity. To decide whether or not a macro-node is an accurate summary of its underlying subgraph, we check whether random walkers behave identically on $G$ and $G_M$. We do this because many important analyses and algorithms---such as using PageRank for determining a node's centrality or InfoMap for community discovery---are based on random walking.
Specifically, we define the *inaccuracy* of a macroscale as the Kullback-Leibler divergence between the expected distribution of random walkers on $G$ vs. $G_M$, given some identical initial distribution on each. The expected distribution over $G$ at some future time $t$ is $P_m(t)$, while the distribution over $G_M$ at some future time $t$ is $P_M(t)$. To compare the two, the distribution $P_m(t)$ is summed over the same nodes in the macroscale $G_M$, resulting in the distribution $P_{M|m}(t)$ (the microscale given the macroscale). We can then define the macroscale inaccuracy over some series of time steps $T$ as:
$$ \text{inaccuracy} = \sum_{t=0}^T \text{D}_{_{KL}}[P_{M}(t) || P_{M|m}(t)] $$
This measure addresses the extent to which a random dynamical process on the microscale topology will be recapitulated on a dimensionally-reduced topology.
What constitutes an accurate macroscale depends on the connectivity of the subgraph that gets grouped into a macro-node. The $W^{out}_{\mu}$ can be constructed based on the collective $W^{out}$ of the subgraph. For instance, in some cases one could just coarse-grain a subgraph by using its average $W^{out}$ as the $W^{out}_{\mu}$ of some new macro-node $\mu$. However, it may be that the subgraph has dependencies not captured by such a coarse-grain. Indeed, this is similar to the recent discovery that when constructing networks from data it is often necessary to explicitly model higher-order dependencies by using higher-order nodes so that the dynamics of random walks to stay true to the original data. We therefore introduce *higher-order macro-nodes* (HOMs), which draw on similar techniques to accurately represent subgraphs as single nodes.
____________
<img src="../figs/pngs/CoarseGraining.png" width=800>
- Top: The original network, $G$ along with its adjacency matrix (left). The shaded oval indicates that subgraph $S$ member nodes $v_B$ and $v_C$ will be grouped together, forming a macro-node, ${\mu}$. All macro-nodes are some transformation of the original adjacency matrix via recasting it as a new adjacency matrix (right). The manner of this recasting depends on the type of macro-node.
- Bottom left: The simplest form of a macro-node is when $W^{out}_{\mu}$ is an average of the $W^{out}_{i}$ of each node in the subgraph.
- Bottom center left: A macro-node that represents some path-dependency, such as input from $A$. Here, in averaging to create the $W^{out}_{\mu}$ the out-weights of nodes $v_B$ and $v_C$ are weighted by their input from $v_A$.
- Bottom center right: A macro-node that represents the subgraph's output over the network's stationary dynamics. Each node has some associated ${\pi}_{i}$, which is the probability of ${v}_{i}$ in the stationary distribution of the network. The $W^{out}_{\mu}$ of a $\mu | \pi$ macro-node is created by weighting each $W^{out}_{i}$ of the micro-nodes in the subgraph $S$ by $\frac{{\pi}_{i}}{\sum_{k \in S} {\pi}_{k}}$.
- Bottom right: A macro-node with a single timestep delay between input $\mu | j$ and its output $\mu | \pi$, each constructed using the same techniques as its components. However, $\mu | j$ always deterministically outputs to $\mu | \pi$.
Different subgraph connectivities require different types of HOMs to accurately represent. For instance, HOMs can be based on the input weights to the macro-node, which take the form $\mu | j$. In these cases the $W^{out}_{\mu|j}$ is a weighted average of each node's $W^{out}$ in the subgraph, where the weight is based on the input weight to each node in the subgraph. Another type of HOM that generally leads to accurate macro-nodes over time is when the $W^{out}_{\mu}$ is based on the stationary output from the subgraph to the rest of the network, which we represent as $\mu | \pi$. These types of HOMs may sometimes have minor inaccuracies given some initial state, but will almost always trend toward perfect accuracy as the network approaches its stationary dynamics.
Subgraphs with complex internal dynamics can require a more complex type of HOM in order to preserve the network's accuracy. For instance, in cases where subgraphs have a delay between their inputs and outputs, this can be represented by a combination of $\mu | j$ and $\mu | \pi$, which when combined captures that delay. In these cases the macro-node $\mu$ has two components, one of which acts as a buffer over a timestep. This means that macro-nodes can possess memory even when constructed from networks that are at the microscale memoryless, and in fact this type of HOM is sometimes necessary to accurately capture the microscale dynamics.
We present these types of macro-nodes not as an exhaustive list of all possible HOMs, but rather as examples of how to construct higher scales in a network by representing subgraphs as nodes, and also sometimes using higher-order dependencies to ensure those nodes are accurate. This approach offers a complete generalization of previous work on coarse-grains and also black boxes, while simultaneously solving the previously unresolved issue of macroscale accuracy by using higher-order dependencies. The types of macro-nodes formed by subgraphs also provides substantive information about the network, such as whether the macroscale of a network possesses memory or path-dependency.
## Causal emergence reveals the scale of networks
Causal emergence occurs when a recast network, $G_M$ (a macroscale), has more $EI$ than the original network, $G$ (the microscale). In general, networks with lower effectiveness (low $EI$ given their size) have a higher potential for causal emergence, since they can be recast to reduce their uncertainty. Searching across groupings allows the identification or approximation of a macroscale that maximizes the $EI$.
Checking all possible groupings is computationally intractable for all but the smallest networks. Therefore, in order to find macro-nodes which increase the $EI$, we use a greedy algorithm that groups nodes together and checks if the $EI$ increases. By choosing a node and then pairing it iteratively with its surrounding nodes we can grow macro-nodes until pairings no longer increase the $EI$, and then move on to a new node.
By generating undirected preferential attachment networks and varying the degree of preferential attachment, $\alpha$, we observe a crucial relationship between preferential attachment and causal emergence. One of the central results in network science has been the identification of "scale-free" networks. Our results show that networks that are not "scale-free" can be further separated into micro-, meso-, and macroscales depending on their connectivity. This scale can be identified based on their degree of causal emergence. In cases of sublinear preferential attachment ($\alpha < 1.0$) networks lack higher scales. Linear preferential attachment ($\alpha=1.0$) produces networks that are scale-free, which is the zone of preferential attachment right before the network develops higher scales. Such higher scales only exist in cases of superlinear preferential attachment ($\alpha > 1.0$). And past $\alpha > 3.0$ the network begins to converge to a macroscale where almost all the nodes are grouped into a single macro-node. The greatest degree of causal emergence is found in mesoscale networks, which is when $\alpha$ is between 1.5 and 3.0, when networks possess a rich array of macro-nodes.
Correspondingly the size of $G_M$ decreases as $\alpha$ increases and the network develops an informative higher scale, which can be seen in the ratio of macroscale network size, $N_M$, to the original network size, $N$. As discussed in previous sections, on the upper end of the spectrum of $\alpha$ the resulting network will approximate a hub-and-spoke, star-like network. Star-like networks have higher degeneracy and thus less $EI$, and because of this, we expect that there are more opportunities to increase the network's $EI$ through grouping nodes into macro-nodes. Indeed, the ideal grouping of a star network is when $N_M=2$ and $EI$ is 1 bit. This result is similar to recent advances in spectral coarse-graining that also observe that the ideal coarse-graining of a star network is to collapse it into a two-node network, grouping all the spokes into a single macro-node, which is what happens to star networks that are recast as macroscales.
Our results offer a principled and general approach to such community detection by asking when there is an informational gain from replacing a subgraph with a single node. Therefore we can define *causal communities* as being when a cluster of nodes, or some subgraph, forms a viable macro-node. Fundamentally causal communities represent noise at the microscale. The closer a subgraph is to complete noise, the greater the gain in $EI$ by replacing it with a macro-node. Minimizing the noise in a given network also identifies the optimal scale to represent that network. However, there must be some structure that can be revealed by noise minimization in the first place. In cases of random networks that form a single large component which lacks any such structure, causal emergence does not occur.
____________
## 6.1 Causal Emergence in Preferential Attachment Networks
```
def preferential_attachment_network(N, alpha=1.0, m=1):
"""
Generates a network based off of a preferential attachment
growth rule. Under this growth rule, new nodes place their
$m$ edges to nodes already present in the graph, G, with
a probability proportional to $k^\alpha$.
Params
------
N (int): the desired number of nodes in the final network
alpha (float): the exponent of preferential attachment.
When alpha is less than 1.0, we describe it
as sublinear preferential attachment. At
alpha > 1.0, it is superlinear preferential
attachment. And at alpha=1.0, the network
was grown under linear preferential attachment,
as in the case of Barabasi-Albert networks.
m (int): the number of new links that each new node joins
the network with.
Returns
-------
G (nx.Graph): a graph grown under preferential attachment.
"""
G = nx.Graph()
G = nx.complete_graph(m+1)
for node_i in range(m+1,N):
degrees = np.array(list(dict(G.degree()).values()))
probs = (degrees**alpha) / sum(degrees**alpha)
eijs = np.random.choice(G.number_of_nodes(),
size=(m,), replace=False, p=probs)
for node_j in eijs:
G.add_edge(node_i, node_j)
return G
Nvals = sorted([30,60,90,120,150])
alphas= np.linspace(-1,5,25)
Niter = 2
m = 1
pa_ce = {'alpha' :[],
'N_micro':[],
'N_macro':[],
'EI_micro':[],
'EI_macro':[],
'CE' :[],
'N_frac' :[],
'runtime' :[]}
```
### Note: the following cell was run on a super-computing cluster. It is included as an example computation.
```
for N in Nvals:
for alpha in alphas:
for _ in range(Niter):
G = preferential_attachment_network(N,alpha,m)
startT = dt.datetime.now()
CE = causal_emergence(G, printt=False)
finisH = dt.datetime.now()
diff = finisH-startT
diff = diff.total_seconds()
pa_ce['alpha'].append(alpha)
pa_ce['N_micro'].append(N)
pa_ce['N_macro'].append(CE['G_macro'].number_of_nodes())
pa_ce['EI_micro'].append(CE['EI_micro'])
pa_ce['EI_macro'].append(CE['EI_macro'])
pa_ce['CE'].append(CE['EI_macro']-CE['EI_micro'])
pa_ce['N_frac'].append(CE['G_macro'].number_of_nodes()/N)
pa_ce['runtime'].append(diff)
NCE = pa_ce.copy()
# import cmocean as cmo
# colorz = cmo.cm.amp(np.linspace(0.2,0.9,len(Nvals)))
colorz = plt.cm.viridis(np.linspace(0,0.9,len(Nvals)))
mult=0.95
fig,ax=plt.subplots(1,1,figsize=(5.0*mult,4.5*mult))
plt.subplots_adjust(wspace=0.24, hspace=0.11)
ymax_so_far = 0
xmin_so_far = 0
xmax_so_far = 0
for i,Nn in enumerate(Nvals):
col = colorz[i]
means = [np.mean(NCE[Nn][i]['CE']) for i in NCE[Nn].keys()]
stdvs = [np.std(NCE[Nn][i]['CE']) for i in NCE[Nn].keys()]
alphs = list(NCE[Nn].keys())
alphs = np.array([(alphs[i]+alphs[i+1])/2
for i in range(0,len(alphs)-1,2)])
means = np.array([(means[i]+means[i+1])/2
for i in range(0,len(means)-1,2)])
stdvs = np.array([(stdvs[i]+stdvs[i+1])/2
for i in range(0,len(stdvs)-1,2)])
xmin_so_far = min([xmin_so_far, min(alphs)])
xmax_so_far = max([xmax_so_far, max(alphs)])
ymax_so_far = max([ymax_so_far, max(means+stdvs)])
ax.plot(alphs, means,
markeredgecolor=col, color=col,
markerfacecolor='w',
markeredgewidth=1.5,markersize=5.0,
linestyle='-',marker='o',linewidth=2.2,label='N = %i'%Nn)
ax.fill_between(alphs, means-stdvs, means+stdvs,
facecolor=col, alpha=0.2,
edgecolors='w', linewidth=1)
cols = ["#a7d6ca","#dbb9d1","#d6cdae","#a5c9e3"]
ax.fill_between([-2,0.90],[-1,-1],[3,3],
facecolor=cols[0],alpha=0.3,edgecolors='w',linewidth=0)
ax.fill_between([0.90,1.1],[-1,-1],[3,3],
facecolor=cols[1],alpha=0.7,edgecolors='w',linewidth=0)
ax.fill_between([1.1,3.0],[-1,-1],[3,3],
facecolor=cols[2],alpha=0.3,edgecolors='w',linewidth=0)
ax.fill_between([3.0,6],[-1,-1],[3,3],
facecolor=cols[3],alpha=0.3,edgecolors='w',linewidth=0)
ax.text(-0.500, 2.65, '|', fontsize=14)
ax.text(0.9425, 2.65, '|', fontsize=14)
ax.text(0.9425, 2.72, '|', fontsize=14)
ax.text(0.9425, 2.79, '|', fontsize=14)
ax.text(2.4000, 2.65, '|', fontsize=14)
ax.text(4.2500, 2.65, '|', fontsize=14)
ax.text(-1.1, 2.81,'microscale',fontsize=12)
ax.text(0.35, 2.95,'scale-free',fontsize=12)
ax.text(1.70, 2.81,'mesoscale',fontsize=12)
ax.text(3.45, 2.81,'macroscale',fontsize=12)
ax.set_ylim(-0.025*ymax_so_far,ymax_so_far*1.05)
ax.set_xlim(-1.075,5*1.01)
ax.set_xlabel(r'$\alpha$',fontsize=14)
ax.set_ylabel('Causal emergence',fontsize=14, labelpad=10)
ax.legend(loc=6,framealpha=0.99)
ax.set_xticks(np.linspace(-1,5,7))
ax.set_xticklabels(["%i"%i for i in np.linspace(-1,5,7)])
ax.grid(linestyle='-', linewidth=2.0, color='#999999', alpha=0.3)
if save:
plt.savefig(
where_to_save_pngs+\
'CE_pa_alpha_labs.png',
dpi=425, bbox_inches='tight')
plt.savefig(
where_to_save_pdfs+\
'CE_pa_alpha_labs.pdf',
dpi=425, bbox_inches='tight')
plt.show()
mult=0.95
fig,ax=plt.subplots(1,1,figsize=(5.0*mult,4.5*mult))
plt.subplots_adjust(wspace=0.24, hspace=0.11)
ymax_so_far = 0
xmin_so_far = 0
xmax_so_far = 0
for i,Nn in enumerate(Nvals):
col = colorz[i]
means = [np.mean(NCE[Nn][i]['N_frac']) for i in NCE[Nn].keys()]
stdvs = [np.std(NCE[Nn][i]['N_frac']) for i in NCE[Nn].keys()]
alphs = list(NCE[Nn].keys())
alphs = np.array([(alphs[i]+alphs[i+1])/2
for i in range(0,len(alphs)-1,2)])
means = np.array([(means[i]+means[i+1])/2
for i in range(0,len(means)-1,2)])
stdvs = np.array([(stdvs[i]+stdvs[i+1])/2
for i in range(0,len(stdvs)-1,2)])
xmin_so_far = min([xmin_so_far, min(alphs)])
xmax_so_far = max([xmax_so_far, max(alphs)])
ymax_so_far = max([ymax_so_far, max(means+stdvs)])
ax.semilogy(alphs, means, markeredgecolor=col,
color=col,markerfacecolor='w',
markeredgewidth=1.5, markersize=5.0,
linestyle='-',marker='o',linewidth=2.0,
alpha=0.99,label='N = %i'%Nn)
ax.fill_between(alphs, means-stdvs, means+stdvs,
facecolor=col,alpha=0.2,
edgecolors='w',linewidth=1)
cols = ["#a7d6ca","#dbb9d1","#d6cdae","#a5c9e3"]
ax.fill_between([-2,0.9],[-1,-1],[3,3],
facecolor=cols[0],alpha=0.3,edgecolors='w',linewidth=0)
ax.fill_between([0.9,1.1],[-1,-1],[3,3],
facecolor=cols[1],alpha=0.7,edgecolors='w',linewidth=0)
ax.fill_between([1.1,3.0],[-1,-1],[3,3],
facecolor=cols[2],alpha=0.3,edgecolors='w',linewidth=0)
ax.fill_between([3.0,6],[-1,-1],[3,3],
facecolor=cols[3],alpha=0.3,edgecolors='w',linewidth=0)
ax.text(-0.50, 1.036,'|', fontsize=14)
ax.text(0.935, 1.036,'|', fontsize=14)
ax.text(0.935, 1.170,'|', fontsize=14)
ax.text(0.935, 1.320,'|', fontsize=14)
ax.text(2.400, 1.036,'|', fontsize=14)
ax.text(4.250, 1.036,'|', fontsize=14)
ax.text(-1.1, 1.368, 'microscale', fontsize=12)
ax.text(0.35, 1.750, 'scale-free', fontsize=12)
ax.text(1.70, 1.368, 'mesoscale', fontsize=12)
ax.text(3.45, 1.368, 'macroscale', fontsize=12)
ax.set_ylim(0.009*ymax_so_far,ymax_so_far*1.075)
ax.set_xlim(-1.075,5*1.01)
ax.set_xlabel(r'$\alpha$',fontsize=14)
ax.set_ylabel('Size ratio: macro to micro',
fontsize=14, labelpad=2)
ax.legend(loc=6,framealpha=0.99)
ax.set_xticks(np.linspace(-1,5,7))
ax.set_xticklabels(["%i"%i for i in np.linspace(-1,5,7)])
ax.grid(linestyle='-', linewidth=2.0, color='#999999', alpha=0.3)
if save:
plt.savefig(
where_to_save_pngs+\
'Nfrac_pa_alpha_labs.png',
dpi=425, bbox_inches='tight')
plt.savefig(
where_to_save_pdfs+\
'Nfrac_pa_alpha_labs.pdf',
dpi=425, bbox_inches='tight')
plt.show()
```
_______________
## 6.2 Causal Emergence of Random Networks
```
Ns = [20,30,40,50]
ps = np.round(np.logspace(-3.25,-0.4,31),5)
Niter = 40
er_ce = {'p' :[],
'N_micro':[],
'N_macro':[],
'EI_micro':[],
'EI_macro':[],
'CE_mean' :[],
'CE_stdv' :[],
'N_frac' :[],
'runtime' :[]}
ER_CE = {N:er_ce for N in Ns}
```
### Note: the following cell was run on a super-computing cluster. It is included as an example computation.
```
for N in Ns:
print(N, dt.datetime.now())
er_ce = {'p' :[],
'N_micro':[],
'N_macro':[],
'EI_micro':[],
'EI_macro':[],
'CE_mean' :[],
'CE_stdv' :[],
'N_frac' :[],
'runtime' :[]}
for p in ps:
print('\t',p)
cee = []
for rr in range(Niter):
G = nx.erdos_renyi_graph(N,p)
startT = dt.datetime.now()
CE = causal_emergence(G,printt=False)
finisH = dt.datetime.now()
diff = finisH-startT
diff = diff.total_seconds()
ce = CE['EI_macro']-CE['EI_micro']
cee.append(ce)
er_ce['p'].append(p)
er_ce['N_micro'].append(N)
er_ce['N_macro'].append(CE['G_macro'].number_of_nodes())
er_ce['EI_micro'].append(CE['EI_micro'])
er_ce['EI_macro'].append(CE['EI_macro'])
er_ce['CE_mean'].append(np.mean(cee))
er_ce['CE_stdv'].append(np.std( cee))
er_ce['runtime'].append(diff)
ER_CE[N] = er_ce.copy()
# import cmocean as cmo
# colors = cmo.cm.thermal(np.linspace(0.1,0.95,len(Ns)))
colors = plt.cm.viridis(np.linspace(0.0,1,len(Ns)))
i = 0
ymax = 0
plt.vlines(100, -1, 1,
label=r'$\langle k \rangle=1$', linestyle='--',
color="#333333", linewidth=3.5, alpha=0.99)
for N in Ns:
CE1 = np.array(ER_CE1[N]['CE_mean'].copy())
CE2 = np.array(ER_CE2[N]['CE_mean'].copy())
CE3 = np.array(ER_CE3[N]['CE_mean'].copy())
CE4 = np.array(ER_CE4[N]['CE_mean'].copy())
CE5 = np.array(ER_CE5[N]['CE_mean'].copy())
CE6 = np.array(ER_CE6[N]['CE_mean'].copy())
CEs = (CE1 + CE2 + CE3 + CE4 + CE5 + CE6)/6
CEs = list(CEs)
CEs = [(CEs[i] + CEs[i+1])/2 for i in range(0,len(CEs)-1)]
CEs = [0] + CEs
CEs.append(0)
x1 = np.array(ER_CE1[N]['p'].copy())
x2 = np.array(ER_CE2[N]['p'].copy())
x3 = np.array(ER_CE3[N]['p'].copy())
x4 = np.array(ER_CE4[N]['p'].copy())
x5 = np.array(ER_CE5[N]['p'].copy())
x6 = np.array(ER_CE6[N]['p'].copy())
xx = (x1 + x2 + x3 + x4 + x5 + x6)/6
xx = list(xx)
xx = [(xx[i] + xx[i+1])/2 for i in range(0,len(xx)-1)]
xx = [1e-4] + xx
xx.append(1)
std1 = np.array(ER_CE1[N]['CE_stdv'].copy())
std2 = np.array(ER_CE2[N]['CE_stdv'].copy())
std3 = np.array(ER_CE3[N]['CE_stdv'].copy())
std4 = np.array(ER_CE4[N]['CE_stdv'].copy())
std5 = np.array(ER_CE5[N]['CE_stdv'].copy())
std6 = np.array(ER_CE6[N]['CE_stdv'].copy())
stds = (std1 + std2 + std3 + std4 + std5 + std6)/6
stds = list(stds)
stds = [(stds[i] + stds[i+1])/2 for i in range(0,len(stds)-1)]
stds = [0] + stds
stds.append(0)
ytop = np.array(CEs) + np.array(stds)
ybot = np.array(CEs) - np.array(stds)
ybot[ybot<0] = 0
ymax = max([ymax, max(ytop)])
plt.semilogx(xx, CEs, label='N=%i'%N,
color=colors[i], linewidth=4.0, alpha=0.95)
plt.vlines(1/(N-1), -1, 1, linestyle='--',
color=colors[i], linewidth=3.5, alpha=0.95)
i += 1
plt.xlim(2.5e-4,max(xx))
plt.ylim(-0.0015, ymax*0.6)
plt.grid(linestyle='-', linewidth=2.5, alpha=0.3, color='#999999')
plt.ylabel('Causal emergence', fontsize=14)
plt.xlabel(r'$p$', fontsize=14)
plt.legend(fontsize=12)
if save:
plt.savefig(
where_to_save_pngs+\
'CE_ER_p_N.png', dpi=425, bbox_inches='tight')
plt.savefig(
where_to_save_pdfs+\
'CE_ER_p_N.pdf', dpi=425, bbox_inches='tight')
plt.show()
# import cmocean as cmo
# colors = cmo.cm.thermal(np.linspace(0.1,0.95,len(Ns)))
colors = plt.cm.viridis(np.linspace(0.0,1,len(Ns)))
i = 0
ymax = 0
plt.vlines(100, -1, 1, label=r'$\langle k \rangle=1$', linestyle='--',
color="#333333", linewidth=3.5, alpha=0.99)
for N in Ns:
CE1 = np.array(ER_CE1[N]['CE_mean'].copy())
CE2 = np.array(ER_CE2[N]['CE_mean'].copy())
CE3 = np.array(ER_CE3[N]['CE_mean'].copy())
CE4 = np.array(ER_CE4[N]['CE_mean'].copy())
CE5 = np.array(ER_CE5[N]['CE_mean'].copy())
CE6 = np.array(ER_CE6[N]['CE_mean'].copy())
CEs = (CE1 + CE2 + CE3 + CE4 + CE5 + CE6)/6
CEs = list(CEs)
CEs = [(CEs[i] + CEs[i+1])/2 for i in range(0,len(CEs)-1)]
CEs = [0] + CEs
CEs.append(0)
x1 = np.array(ER_CE1[N]['p'].copy())
x2 = np.array(ER_CE2[N]['p'].copy())
x3 = np.array(ER_CE3[N]['p'].copy())
x4 = np.array(ER_CE4[N]['p'].copy())
x5 = np.array(ER_CE5[N]['p'].copy())
x6 = np.array(ER_CE6[N]['p'].copy())
xx = (x1 + x2 + x3 + x4 + x5 + x6)/6
xx = list(xx)
xx = [(xx[i] + xx[i+1])/2 for i in range(0,len(xx)-1)]
xx = [1e-4] + xx
xx.append(1)
std1 = np.array(ER_CE1[N]['CE_stdv'].copy())
std2 = np.array(ER_CE2[N]['CE_stdv'].copy())
std3 = np.array(ER_CE3[N]['CE_stdv'].copy())
std4 = np.array(ER_CE4[N]['CE_stdv'].copy())
std5 = np.array(ER_CE5[N]['CE_stdv'].copy())
std6 = np.array(ER_CE6[N]['CE_stdv'].copy())
stds = (std1 + std2 + std3 + std4 + std5 + std6)/6
stds = list(stds)
stds = [(stds[i] + stds[i+1])/2 for i in range(0,len(stds)-1)]
stds = [0] + stds
stds.append(0)
ytop = np.array(CEs) + np.array(stds)
ybot = np.array(CEs) - np.array(stds)
ybot[ybot<0] = 0
ymax = max([ymax, max(ytop)])
plt.semilogx(xx, CEs, label='N=%i'%N, color=colors[i],
linewidth=4.0, alpha=0.95)
plt.fill_between(xx, ytop, ybot, facecolor=colors[i],
linewidth=2.0, alpha=0.35, edgecolor='w')
plt.vlines(1/(N-1), -1, 1, linestyle='--',
color=colors[i], linewidth=3.5, alpha=0.95)
i += 1
plt.xlim(2.5e-4,max(xx))
plt.ylim(-0.0015, ymax)
plt.grid(linestyle='-', linewidth=2.5,
alpha=0.3, color='#999999')
plt.ylabel('Causal emergence', fontsize=14)
plt.xlabel(r'$p$', fontsize=14)
plt.legend(fontsize=12)
if save:
plt.savefig(
where_to_save_pngs+\
'CE_ER_p_N0.png', dpi=425, bbox_inches='tight')
plt.savefig(
where_to_save_pdfs+\
'CE_ER_p_N0.pdf', dpi=425, bbox_inches='tight')
plt.show()
for n in ER_CE1.keys():
ER_CE1[n]['k'] = np.array(ER_CE1[n]['p'])*n
for n in ER_CE2.keys():
ER_CE2[n]['k'] = np.array(ER_CE2[n]['p'])*n
for n in ER_CE3.keys():
ER_CE3[n]['k'] = np.array(ER_CE3[n]['p'])*n
for n in ER_CE4.keys():
ER_CE4[n]['k'] = np.array(ER_CE4[n]['p'])*n
for n in ER_CE5.keys():
ER_CE5[n]['k'] = np.array(ER_CE5[n]['p'])*n
for n in ER_CE6.keys():
ER_CE6[n]['k'] = np.array(ER_CE6[n]['p'])*n
# import cmocean as cmo
# colors = cmo.cm.thermal(np.linspace(0.1,0.95,len(Ns)))
colors = plt.cm.viridis(np.linspace(0.0,1,len(Ns)))
i = 0
ymax = 0
for N in Ns:
CE1 = np.array(ER_CE1[N]['CE_mean'].copy())
CE2 = np.array(ER_CE2[N]['CE_mean'].copy())
CE3 = np.array(ER_CE3[N]['CE_mean'].copy())
CE4 = np.array(ER_CE4[N]['CE_mean'].copy())
CE5 = np.array(ER_CE5[N]['CE_mean'].copy())
CE6 = np.array(ER_CE6[N]['CE_mean'].copy())
CEs = (CE1 + CE2 + CE3 + CE4 + CE5 + CE6)/6
CEs = list(CEs)
CEs = [(CEs[i] + CEs[i+1])/2 for i in range(0,len(CEs)-1)]
CEs = [0] + CEs
x1 = np.array(ER_CE1[N]['k'].copy())
x2 = np.array(ER_CE2[N]['k'].copy())
x3 = np.array(ER_CE3[N]['k'].copy())
x4 = np.array(ER_CE4[N]['k'].copy())
x5 = np.array(ER_CE5[N]['k'].copy())
x6 = np.array(ER_CE6[N]['k'].copy())
xx = (x1 + x2 + x3 + x4 + x5 + x6)/6
xx = list(xx)
xx = [(xx[i] + xx[i+1])/2 for i in range(0,len(xx)-1)]
xx = [1e-4] + xx
std1 = np.array(ER_CE1[N]['CE_stdv'].copy())
std2 = np.array(ER_CE2[N]['CE_stdv'].copy())
std3 = np.array(ER_CE3[N]['CE_stdv'].copy())
std4 = np.array(ER_CE4[N]['CE_stdv'].copy())
std5 = np.array(ER_CE5[N]['CE_stdv'].copy())
std6 = np.array(ER_CE6[N]['CE_stdv'].copy())
stds = (std1 + std2 + std3 + std4 + std5 + std6)/6
stds = list(stds)
stds = [(stds[i] + stds[i+1])/2 for i in range(0,len(stds)-1)]
stds = [0] + stds
ytop = np.array(CEs) + np.array(stds)
ybot = np.array(CEs) - np.array(stds)
ybot[ybot<0] = 0
ymax = max([ymax, max(ytop)])
plt.semilogx(xx, CEs, label='N=%i'%N,
color=colors[i], linewidth=4.0, alpha=0.95)
plt.fill_between(xx, ytop, ybot,
facecolor=colors[i],
linewidth=2.0, alpha=0.3, edgecolor='w')
i += 1
plt.vlines(1, -1, 1, linestyle='--',label=r'$\langle k \rangle=1$',
color='k', linewidth=3.0, alpha=0.95)
plt.xlim(1.0e-2,max(xx))
plt.ylim(-0.0015, ymax*1.01)
plt.grid(linestyle='-', linewidth=2.5, alpha=0.3, color='#999999')
plt.ylabel('Causal emergence', fontsize=14)
plt.xlabel(r'$\langle k \rangle$', fontsize=14)
plt.legend(fontsize=12)
if save:
plt.savefig(
where_to_save_pngs+\
'CE_ER_k_N0.png', dpi=425, bbox_inches='tight')
plt.savefig(
where_to_save_pdfs+\
'CE_ER_k_N0.pdf', dpi=425, bbox_inches='tight')
plt.show()
# import cmocean as cmo
# colors = cmo.cm.thermal(np.linspace(0.1,0.95,len(Ns)))
colors = plt.cm.viridis(np.linspace(0.0,1,len(Ns)))
i = 0
ymax = 0
for N in Ns:
CE1 = np.array(ER_CE1[N]['CE_mean'].copy())
CE2 = np.array(ER_CE2[N]['CE_mean'].copy())
CE3 = np.array(ER_CE3[N]['CE_mean'].copy())
CE4 = np.array(ER_CE4[N]['CE_mean'].copy())
CE5 = np.array(ER_CE5[N]['CE_mean'].copy())
CE6 = np.array(ER_CE6[N]['CE_mean'].copy())
CEs = (CE1 + CE2 + CE3 + CE4 + CE5 + CE6)/6
CEs = list(CEs)
CEs = [(CEs[i] + CEs[i+1])/2 for i in range(0,len(CEs)-1)]
CEs = [0] + CEs
x1 = np.array(ER_CE1[N]['k'].copy())
x2 = np.array(ER_CE2[N]['k'].copy())
x3 = np.array(ER_CE3[N]['k'].copy())
x4 = np.array(ER_CE4[N]['k'].copy())
x5 = np.array(ER_CE5[N]['k'].copy())
x6 = np.array(ER_CE6[N]['k'].copy())
xx = (x1 + x2 + x3 + x4 + x5 + x6)/6
xx = list(xx)
xx = [(xx[i] + xx[i+1])/2 for i in range(0,len(xx)-1)]
xx = [1e-4] + xx
std1 = np.array(ER_CE1[N]['CE_stdv'].copy())
std2 = np.array(ER_CE2[N]['CE_stdv'].copy())
std3 = np.array(ER_CE3[N]['CE_stdv'].copy())
std4 = np.array(ER_CE4[N]['CE_stdv'].copy())
std5 = np.array(ER_CE5[N]['CE_stdv'].copy())
std6 = np.array(ER_CE6[N]['CE_stdv'].copy())
stds = (std1 + std2 + std3 + std4 + std5 + std6)/6
stds = list(stds)
stds = [(stds[i] + stds[i+1])/2 for i in range(0,len(stds)-1)]
stds = [0] + stds
ytop = np.array(CEs) + np.array(stds)
ybot = np.array(CEs) - np.array(stds)
ybot[ybot<0] = 0
ymax = max([ymax, max(ytop)])
plt.semilogx(xx, CEs, label='N=%i'%N,
color=colors[i],
linewidth=4.0, alpha=0.95)
i += 1
plt.vlines(1, -1, 1, linestyle='--',
label=r'$\langle k \rangle=1$',
color='k', linewidth=3.0, alpha=0.95)
plt.xlim(1.0e-2,max(xx))
plt.ylim(-0.0015, ymax*0.6)
plt.grid(linestyle='-', linewidth=2.5,
alpha=0.3, color='#999999')
plt.ylabel('Causal emergence', fontsize=14)
plt.xlabel(r'$\langle k \rangle$', fontsize=14)
plt.legend(fontsize=12)
if save:
plt.savefig(
where_to_save_pngs+'CE_ER_k.png',
dpi=425, bbox_inches='tight')
plt.savefig(
where_to_save_pdfs+'CE_ER_k.pdf',
dpi=425, bbox_inches='tight')
plt.show()
```
## End of Chapter 06. In [Chapter 07](https://nbviewer.jupyter.org/github/jkbren/einet/blob/master/code/Chapter%2007%20-%20Estimating%20Causal%20Emergence%20in%20Real%20Networks.ipynb) we'll estimate causal emergence in real networks.
_______________
| github_jupyter |
<a href="https://colab.research.google.com/github/Lord-Kanzler/DS-Unit-2-Linear-Models/blob/master/module3-ridge-regression/LS_DS_213_assignment_ALEX_KAISER.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Lambda School Data Science
*Unit 2, Sprint 1, Module 3*
---
# Ridge Regression
## Assignment
We're going back to our other **New York City** real estate dataset. Instead of predicting apartment rents, you'll predict property sales prices.
But not just for condos in Tribeca...
- [ ] Use a subset of the data where `BUILDING_CLASS_CATEGORY` == `'01 ONE FAMILY DWELLINGS'` and the sale price was more than 100 thousand and less than 2 million.
- [ ] Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test.
- [ ] Do one-hot encoding of categorical features.
- [ ] Do feature selection with `SelectKBest`.
- [ ] Fit a ridge regression model with multiple features. Use the `normalize=True` parameter (or do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html) beforehand — use the scaler's `fit_transform` method with the train set, and the scaler's `transform` method with the test set)
- [ ] Get mean absolute error for the test set.
- [ ] As always, commit your notebook to your fork of the GitHub repo.
The [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal.
## Stretch Goals
Don't worry, you aren't expected to do all these stretch goals! These are just ideas to consider and choose from.
- [ ] Add your own stretch goal(s) !
- [ ] Instead of `Ridge`, try `LinearRegression`. Depending on how many features you select, your errors will probably blow up! 💥
- [ ] Instead of `Ridge`, try [`RidgeCV`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html).
- [ ] Learn more about feature selection:
- ["Permutation importance"](https://www.kaggle.com/dansbecker/permutation-importance)
- [scikit-learn's User Guide for Feature Selection](https://scikit-learn.org/stable/modules/feature_selection.html)
- [mlxtend](http://rasbt.github.io/mlxtend/) library
- scikit-learn-contrib libraries: [boruta_py](https://github.com/scikit-learn-contrib/boruta_py) & [stability-selection](https://github.com/scikit-learn-contrib/stability-selection)
- [_Feature Engineering and Selection_](http://www.feat.engineering/) by Kuhn & Johnson.
- [ ] Try [statsmodels](https://www.statsmodels.org/stable/index.html) if you’re interested in more inferential statistical approach to linear regression and feature selection, looking at p values and 95% confidence intervals for the coefficients.
- [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way.
- [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).
```
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
import pandas as pd
import pandas_profiling
# Read New York City property sales data
df = pd.read_csv(DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv')
# Change column names: replace spaces with underscores
df.columns = [col.replace(' ', '_') for col in df]
# SALE_PRICE was read as strings.
# Remove symbols, convert to integer
df['SALE_PRICE'] = (
df['SALE_PRICE']
.str.replace('$','')
.str.replace('-','')
.str.replace(',','')
.astype(int)
)
# BOROUGH is a numeric column, but arguably should be a categorical feature,
# so convert it from a number to a string
df['BOROUGH'] = df['BOROUGH'].astype(str)
# Reduce cardinality for NEIGHBORHOOD feature
# Get a list of the top 10 neighborhoods
top10 = df['NEIGHBORHOOD'].value_counts()[:10].index
# At locations where the neighborhood is NOT in the top 10,
# replace the neighborhood with 'OTHER'
df.loc[~df['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'
import numpy as np
print(df.shape)
df.head()
df.describe(include='all')
df.dtypes
df['SALE_PRICE'].describe()
df['SALE_PRICE'][23035]
# subsetting
df = df.copy()
df_subset = df[(df['BUILDING_CLASS_CATEGORY'] == '01 ONE FAMILY DWELLINGS') & (df['SALE_PRICE'] >= 100000) & (df['SALE_PRICE'] < 20000000)]
#everything_after_nov1st_2018 = df[(df['DATE']>=datetime.datetime(2018,11,1))]
df_subset.head()
df_subset['SALE_PRICE'].describe()
df_subset['SALE_PRICE'].shape
# hot encoding prep
df_subset.dtypes
df_subset.head()
# SALE_DATE to datetime
df_subset['SALE_DATE'] = pd.to_datetime(df_subset['SALE_DATE'])
df_subset.dtypes
# dropping columns
df_subset.drop(['EASE-MENT','ADDRESS', 'APARTMENT_NUMBER','BUILDING_CLASS_AT_TIME_OF_SALE'], axis= 1, inplace=True)
# LAND_SQUARE_FEET str to float
df_subset['LAND_SQUARE_FEET'] = df_subset['LAND_SQUARE_FEET'].str.replace(',', '').astype(int)
df_subset['LAND_SQUARE_FEET'].head()
print(df_subset.shape)
df_subset.dtypes
# train/test split
import datetime as dt
train = df_subset[(df_subset['SALE_DATE'] < dt.datetime(2019,4,1))]
test = df_subset[(df_subset['SALE_DATE'] >= dt.datetime(2019,4,1))]
# feature selection
target = 'SALE_PRICE'
features = train.columns.drop([target])
X_train = train[features]
y_train = train[target]
X_test = test[features]
y_test = test[target]
# hot encoding
import category_encoders as ce
encoder = ce.OneHotEncoder(use_cat_names=True)
X_train = encoder.fit_transform(X_train)
X_train.head()
# transform
X_test = encoder.transform(X_test)
X_test.head()
X_train.drop(['SALE_DATE'], axis= 1, inplace=True)
X_test.drop(['SALE_DATE'], axis= 1, inplace=True)
df_subset.shape, X_train.shape, X_test.shape
# feature selection with SelectKBest
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
from sklearn.feature_selection import SelectKBest, f_regression
selector = SelectKBest(score_func=f_regression, k=35)
X_train_selected = selector.fit_transform(X_train, y_train)
X_train_selected.shape
# Which features in selection
selected_mask = selector.get_support()
all_names = X_train.columns
selected_names = all_names[selected_mask]
unselected_names = all_names[~selected_mask]
print('Features selected:')
for name in selected_names:
print(name)
print('\n')
print('Features not selected:')
for name in unselected_names:
print(name)
# How many features should be selected
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
for k in range(1, len(X_train.columns)+1):
print(f'{k} features')
selector = SelectKBest(score_func=f_regression, k=k)
X_train_selected = selector.fit_transform(X_train, y_train)
X_test_selected = selector.transform(X_test)
model = LinearRegression()
model.fit(X_train_selected, y_train)
y_pred = model.predict(X_test_selected)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test Mean Absolute Error: ${mae:,.0f} \n')
```
35 features
Test Mean Absolute Error: $218,793
```
# imports
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import Ridge
from IPython.display import display, HTML
from sklearn.linear_model import RidgeCV
# ridge regression for multiple features
for alpha in [0.001, 0.01, 0.1, 1.0, 1, 100.0, 1000.0]:
# Fit Ridge Regression model
display(HTML(f'Ridge Regression, with alpha={alpha}'))
model = Ridge(alpha=alpha, normalize=True)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# Get Test MAE
mae = mean_absolute_error(y_test, y_pred)
display(HTML(f'Test Mean Absolute Error: ${mae:,.0f}'))
# Plot coefficients
coefficients = pd.Series(model.coef_, X_train.columns)
plt.figure(figsize=(16,8))
coefficients.sort_values().plot.barh(color='grey')
plt.xlim(-700000,7000000)
plt.show()
coefficients = pd.Series(model.coef_, X_train.columns)
coefficients
```
| github_jupyter |
```
# HIDDEN
from datascience import *
from prob140 import *
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
%matplotlib inline
import math
from scipy import stats
```
## Moment Generating Functions ##
The probability mass function and probability density, cdf, and survival functions are all ways of specifying the probability distribution of a random variable. They are all defined as probabilities or as probability per unit length, and thus have natural interpretations and visualizations.
But there are also more abstract ways of describing distributions. One that you have encountered is the probability generating function (pgf), which we defined for random variables with finitely many non-negative integer values.
We now define another such *transform* of a distribution. More general than the pgf, it is a powerful tool for studying distributions.
Let $X$ be a random variable. The *moment generating function* (mgf) of $X$ is a function defined on the real numbers by the formula
$$
M_X(t) ~ = ~ E(e^{tX})
$$
for all $t$ for which the expectation is finite. It is a fact (which we will not prove) that the domain of the mgf has to be an interval, not necessarily finite but necessarily including 0 because $M_X(0) = 1$.
For $X$ with finitely many non-negative integer values, we had defined the pgf by $G_X(s) = E(s^X)$. Notice that this is a special case of the mgf with $s = e^t$ and hence positive. For a random variable $X$ that has both a pgf $G_X$ and an mgf $M_X$, the two functions are related by $M_X(\log(s)) = G_X(s)$. Therefore the properties of $M_X$ near 0 reflect the properties of $G_X$ near 1.
This section presents three ways in which the mgf is useful. Other ways are demonstrated in the subsequent sections of this chapter. Much of what we say about mgf's will not be accompanied by complete proofs as the math required is beyond the scope of this class. But the results should seem reasonable, even without formal proofs.
We will list the three ways first, and then use them all in examples.
### Generating Moments ###
For non-negative integers $k$, the expectation $E(X^k)$ is called *$k$th moment* of $X$. You saw in Data 8 and again in this course that the mean $E(X)$ is the center of gravity of the probability histogram of $X$. In physics, the center of mass is called the *first moment*. The terminology of moments is used in probability theory as well.
In this course we are only going to work with mgf's that are finite in some interval around 0. The interval could be the entire real line. It is a fact that if the mgf is finite around 0 (not just to one side of 0), then all the moments exist.
Expand $e^{tX}$ to see that
$$
\begin{align*}
M_X(t) ~ &= ~ E \big{(} 1 + t \frac{X}{1!} + t^2 \frac{X^2}{2!} + t^3 \frac{X^3}{3!} + \cdots \big{)} \\ \\
&= ~ 1 + t \frac{E(X)}{1!} + t^2 \frac{E(X^2)}{2!} + t^3 \frac{E(X^3)}{3!} + \cdots
\end{align*}
$$
by blithely switching the expectation and the infinite sum. This requires justification, which we won't go into.
Continue to set aside questions about whether we can switch infinite sums with other operations. Just go ahead and differentiate $M_X$ term by term. Let $M_X^{(n)}$ denote the $n$th derivative. Then
$$
M_X^{(1)} (t) ~ = ~ \frac{d}{dt} M_X(t) ~ = \frac{E(X)}{1!} + 2t \frac{E(X^2)}{2!} + 3t^2 \frac{E(X^3)}{3!} + \cdots
$$
and hence
$$
M^{(1)} (0) ~ = ~ E(X)
$$
Now differentiate $M_X^{(1)}$ to see that $M_X^{(2)}(0) = E(X^2)$, and, by induction,
$$
M^{(n)} (0) ~ = ~ E(X^n), ~~~~ n = 1, 2, 3, \ldots
$$
Hence we can *generate the moments of $X$* by evaluating successive derivatives of $M_X$ at $t=0$. This is one way in which mgf's are helpful.
### Identifying the Distribution ###
In this class we have made heavy use of the first and second moments, and no use at all of the higher moments. That will continue to be the case. But mgf's do involve all the moments, and this results in a property that is very useful for proving facts about distributions. This property is valid if the mgf exists in an interval around 0, which we assumed earlier in this section.
**If two distributions have the same mgf, then they must be the same distribution.** For example, if you recognize the mgf of a random variable as the mgf of a normal distribution, then the random variable must be normal.
By contrast, if you know the expectation of a random variable you can't identify the distribution of the random variable; even if you know both the mean and the SD (equivalently, the first and second moments), you can't identify the distribution. But if you know the moment generating function, and hence all the moments, then you can.
### Working Well with Sums ###
The third reason mgf's are useful is that like the pgf, the mgf of the sum of independent random variables is easily computed as a product.
Let $X$ and $Y$ be independent. Then
$$
M_{X+Y} (t) ~ = ~ E(e^{t(X+Y)}) ~ = ~ E(e^{tX} \cdot e^{tY})
$$
So if $X$ and $Y$ are independent,
$$
M_{X+Y}(t) ~ = ~ M_X(t) M_Y(t)
$$
It's time for some examples. Remember that the mgf of $X$ is the expectation of a function of $X$. In some cases we will calculate it using the non-linear function rule for expectations. In other cases we will use the multiplicative property of the mgf of the sum of independent random variables.
### MGFs of Some Discrete Random Variables ###
#### Bernoulli $(p)$ ####
$P(X = 1) = p$ and $P(X = 0) = 1 - p = q$. So
$$
M_X(t) ~ = ~ qe^{t \cdot 0} + pe^{t \cdot 1} ~ = ~ q + pe^t ~ = ~ 1 - p(e^t - 1) ~~~ \text{for all } t
$$
#### Binomial $(n, p)$ ####
A binomial random variable is the sum of $n$ i.i.d. indicators. So
$$
M_X(t) ~ = ~ (q + pe^t)^n ~~~ \text{for all } t
$$
#### Poisson $(\mu)$ ####
This one is an exercise.
$$
M_X(t) ~ = ~ e^{\mu(e^t - 1)} ~~~ \text{for all } t
$$
You can also use this to show that the sum of independent Poisson variables is Poisson.
### MGF of a Gamma $(r, \lambda )$ Random Variable ###
Let $X$ have the gamma $(r, \lambda)$ distribution. Then
$$
\begin{align*}
M_X(t) ~ &= ~ \int_0^\infty e^{tx} \frac{\lambda^r}{\Gamma(r)} x^{r-1} e^{-\lambda x} dx \\ \\
&= ~ \frac{\lambda^r}{\Gamma(r)} \int_0^\infty x^{r-1} e^{-(\lambda - t)x} dx \\ \\
&= ~ \frac{\lambda^r}{\Gamma(r)} \cdot \frac{\Gamma(r)}{(\lambda - t)^r} ~~~~ t < \lambda \\ \\
&= \big{(} \frac{\lambda}{\lambda - t} \big{)}^r ~~~~ t < \lambda
\end{align*}
$$
#### Sums of Independent Gamma Variables with the Same Rate ####
If $X$ has gamma $(r, \lambda)$ distribution and $Y$ independent of $X$ has gamma $(s, \lambda)$ distribution, then
$$
\begin{align*}
M_{X+Y} (t) ~ &= ~ \big{(} \frac{\lambda}{\lambda - t} \big{)}^r \cdot \big{(} \frac{\lambda}{\lambda - t} \big{)}^s ~~~~ t < \lambda \\ \\
&= ~ \big{(} \frac{\lambda}{\lambda - t} \big{)}^{r+s} ~~~~ t < \lambda
\end{align*}
$$
That's the mgf of the gamma $(r+s, \lambda)$ distribution. Because the mgf identifies the distribution, $X+Y$ must have the gamma $(r+s, \lambda)$ distribution.
| github_jupyter |
# Test differentation
Test differentiation of distance functions, by implementing gradient descent.
```
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import collections, lines, markers, path, patches
%matplotlib inline
from geometry import *
```
## Set up 1-D hyperboloid manifold
```
theta = np.linspace(-0.323*np.pi, 0.323*np.pi, 1000)
x0 = np.sinh(theta)
x1 = np.cosh(theta)
x = np.array([np.array([x0i, x1i]) for x0i, x1i in zip(x0, x1)])
target = np.array([0., 1.])
initial_point = np.array([np.sinh(0.9), np.cosh(0.9)])
def gradient_descent(pt_i, target, differential_fn, geometry="hyperbolic", learning_rate=1.):
'''
Calculate local gradient of differential, given the current pt and the target.
Inputs:
Two (d+1)-dimensional vectors in ambient space co-ordinates, pt_i and target
pt_i: (d+1)-dimensional vector in ambient space co-ordinates,
the point to evaluate the gradient at.
target: (d+1)-dimensional vectors in ambient space co-ordinates, the target point
differential_fn: function that calculates the derivative
learning_rate: dictates how far to step in gradient direction
'''
# Calculate gradient in ambient space co-ordinates
step = differential_fn(pt_i, target, geometry)
print("step =",step)
# Project this gradient onto tangent space
projection = project_to_tangent(pt_i, step, geometry)
print("projection on tangent space = ",projection)
# Map to manifold and return this updated pt
return exponential_map(-learning_rate*projection, pt_i, geometry)
# return exponential_map(-projection, pt_i, geometry)
def error_differential_eucl(u, v, geometry="hyperbolic"):
'''
Calculate differential of distance between points u and v, **with respect to u**,
accounting for different geometries by implementing an appropriate metric.
Inputs:
u: (d+1)-dimensional vector, expressed in ambient space coordinates
v: (d+1)-dimensional vector, expressed in ambient space coordinates
geometry: specifies which metric to use (and hence how inner product calculated)
Outputs:
gradient of the distance in (d+1)-dimensional ambient space coordinates
'''
if np.array_equal(u,v):
return np.zeros(u.shape)
# If u and v are different, calculate the gradient
metric = get_metric(u.shape[0], geometry)
print("u = {}, v = {}, u.v = {}".format(u, v, dot(u, v, geometry)))
if geometry == "spherical":
coeff = -1./np.sqrt(1.-dot(u, v, geometry)**2)
if geometry == "hyperbolic":
coeff = -1./np.sqrt(dot(u, v, geometry)**2-1.)
#return coeff*metric.dot(v)
return coeff*v
updated_pts = [initial_point]
for i in range(1,10):
updated_pts.append(
gradient_descent(
updated_pts[i-1],
target,
error_differential_eucl,
geometry="hyperbolic",
learning_rate=0.1
)
)
print("updated_pt = ", updated_pts[i])
print("-"*80)
print(updated_pts)
fig = plt.figure(figsize=(12,12))
ax = plt.gca(xlim=[-1.2, 1.2], ylim=[0.9, 1.6])
ax.scatter(x0, x1, s=1)
ax.scatter(target[0], target[1], color='r', marker='*',s=200,label = "Target")
for i, pt in enumerate(updated_pts):
ax.scatter(
pt[0], pt[1],
marker=markers.MarkerStyle('o',fillstyle="none"),
s=100, label="Update {}".format(i)
)
ax.legend()
updated_pts = [np.array([0., 1.])]
for i in range(1,10):
updated_pts.append(
gradient_descent(
updated_pts[i-1],
target,
error_differential_eucl,
geometry="hyperbolic",
learning_rate=0.1
)
)
# print("updated_pt = ", updated_pts[i])
# print("-"*80)
print(updated_pts)
fig = plt.figure(figsize=(12,12))
ax = plt.gca(xlim=[-1.2, 1.2], ylim=[0.9, 1.6])
ax.scatter(x0, x1, s=1)
ax.scatter(target[0], target[1], color='r', marker='*',s=200,label = "Target")
for i, pt in enumerate(updated_pts):
ax.scatter(
pt[0], pt[1],
marker=markers.MarkerStyle('o',fillstyle="none"),
s=100, label="Update {}".format(i)
)
ax.legend()
updated_pts = [initial_point]
for i in range(1,10):
updated_pts.append(
gradient_descent(
updated_pts[i-1],
target,
error_differential_eucl,
geometry="hyperbolic",
learning_rate=0.18
)
)
print("****xhi =", np.arcsinh(updated_pts[i][0]))
print("updated_pt = ", updated_pts[i])
print("-"*80)
print(updated_pts)
updated_pts = [initial_point]
for i in range(1,10):
updated_pts.append(
gradient_descent(
updated_pts[i-1],
target,
error_differential_eucl,
geometry="hyperbolic",
learning_rate=0.17
)
)
print("****xhi =", np.arcsinh(updated_pts[i][0]))
print("updated_pt = ", updated_pts[i])
print("-"*80)
print(updated_pts)
fig = plt.figure(figsize=(12,12))
ax = plt.gca(xlim=[-1.2, 1.2], ylim=[0.9, 1.6])
ax.scatter(x0, x1, s=1)
ax.scatter(target[0], target[1], color='r', marker='*',s=200,label = "Target")
for i, pt in enumerate(updated_pts):
ax.scatter(
pt[0], pt[1],
marker=markers.MarkerStyle('o',fillstyle="none"),
s=100, label="Update {}".format(i)
)
ax.annotate("$\chi$ = {:.3g}".format(np.arccosh(pt[1])), xy=pt,
xytext=pt-(0.05,0.05), fontsize=12)
ax.legend()
```
Oscillates. Why does $\chi$ step down in units of learning rate?
## Test with spherical coordinates
```
target2 = np.array([0., -1.])
initial_pt2 = np.array([1., 0.])
updated_pts = [initial_pt2]
for i in range(1,10):
updated_pts.append(
gradient_descent(updated_pts[i-1],
target2,
error_differential_eucl,
geometry="spherical",
learning_rate=0.2
)
)
print(updated_pts)
fig = plt.figure(figsize=(12,12))
ax = plt.gca(xlim=[-1.2, 1.2], ylim=[-1.2,1.2], xlabel="$x^0$", ylabel="$x^1$")
circle = patches.Circle((0,0), 1., edgecolor="k", fill=False)
#ax.add_collection(collections.PatchCollection(patch_list))
ax.add_artist(circle)
ax.scatter(target2[0], target2[1], color='r', marker='*',s=200,label = "Target")
for i, pt in enumerate(updated_pts):
ax.scatter(
pt[0], pt[1],
marker=markers.MarkerStyle('o',fillstyle="none"),
s=100, label="Update {}".format(i)
)
ax.legend()
```
Solution seems to oscillate, but why? pt_N = pt_{N-2}, although pt_{N-1} was closer.
```
target2 = np.array([0., -1.])
initial_pt2 = np.array([np.cos(0.49*np.pi), np.sin(0.49*np.pi)])
updated_pts = [initial_pt2]
for i in range(1,8):
updated_pts.append(
gradient_descent(updated_pts[i-1],
target2,
error_differential_eucl,
geometry="spherical",
learning_rate=.8
)
)
print("****theta =", np.arctan2(updated_pts[i][0], updated_pts[i][1]))
print(updated_pts)
fig = plt.figure(figsize=(12,12))
ax = plt.gca(xlim=[-1.2, 1.2], ylim=[-1.2,1.2], xlabel="$x^0$", ylabel="$x^1$")
circle = patches.Circle((0,0), 1., edgecolor="k", fill=False)
#ax.add_collection(collections.PatchCollection(patch_list))
ax.add_artist(circle)
ax.scatter(target2[0], target2[1], color='r', marker='*',s=200,label = "Target")
for i, pt in enumerate(updated_pts):
ax.scatter(
pt[0], pt[1],
marker=markers.MarkerStyle('o',fillstyle="none"),
s=100, label="Update {}".format(i)
)
ax.legend()
```
Same oscillatory behaviour.
| github_jupyter |
```
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
from glob import glob
%matplotlib inline
```
# Instructions for Use
The "Main Functions" section contains functions which return the success rate to be plotted as well as lower and upper bounds for uncertainty plotting.
To plot a given log, simply copy the code below two cells and replace `log_dir` with the path of the logs you wish to plot.
```
window = 1000
max_iter = None # Can set to an integer
log_dir = 'LOG_DIR'
title = 'Stack 4 Blocks, Training'
plot_it(log_dir, title, window=window, max_iter=max_iter)
```
# Main Functions
```
def get_grasp_success_rate(actions, rewards=None, window=200, reward_threshold=0.5):
"""Evaluate moving window of grasp success rate
actions: Nx4 array of actions giving [id, rotation, i, j]
"""
grasps = actions[:, 0] == 1
if rewards is None:
places = actions[:, 0] == 2
success_rate = np.zeros(actions.shape[0] - 1)
lower = np.zeros_like(success_rate)
upper = np.zeros_like(success_rate)
for i in range(success_rate.shape[0]):
start = max(i - window, 0)
if rewards is None:
successes = places[start+1: i+2][grasps[start:i+1]]
else:
successes = (rewards[start: i+1] > reward_threshold)[grasps[start:i+1]]
success_rate[i] = successes.mean()
var = np.sqrt(success_rate[i] * (1 - success_rate[i]) / successes.shape[0])
lower[i] = success_rate[i] + 3*var
upper[i] = success_rate[i] - 3*var
lower = np.clip(lower, 0, 1)
upper = np.clip(upper, 0, 1)
return success_rate, lower, upper
def get_place_success_rate(stack_height, actions, include_push=False, window=200, hot_fix=False, max_height=4):
"""
stack_heights: length N array of integer stack heights
actions: Nx4 array of actions giving [id, rotation, i, j]
hot_fix: fix the stack_height bug, where the trial didn't end on successful pushes, which reached a stack of 4.
where id=0 is a push, id=1 is grasp, and id=2 is place.
"""
if hot_fix:
indices = np.logical_or(stack_height < 4, np.array([True] + list(stack_height[:-1] < 4)))
actions = actions[:stack_height.shape[0]][indices]
stack_height = stack_height[indices]
if include_push:
success_possible = actions[:, 0] == 2
else:
success_possible = np.logical_or(actions[:, 0] == 0, actions[:, 0] == 2)
stack_height_increased = np.zeros_like(stack_height, np.bool)
stack_height_increased[0] = False
stack_height_increased[1:] = stack_height[1:] > stack_height[:-1]
success_rate = np.zeros_like(stack_height)
lower = np.zeros_like(success_rate)
upper = np.zeros_like(success_rate)
for i in range(stack_height.shape[0]):
start = max(i - window, 0)
successes = stack_height_increased[start:i+1][success_possible[start:i+1]]
success_rate[i] = successes.mean()
success_rate[np.isnan(success_rate)] = 0
var = np.sqrt(success_rate[i] * (1 - success_rate[i]) / successes.shape[0])
lower[i] = success_rate[i] + 3*var
upper[i] = success_rate[i] - 3*var
lower = np.clip(lower, 0, 1)
upper = np.clip(upper, 0, 1)
return success_rate, lower, upper
def get_action_efficiency(stack_height, window=200, ideal_actions_per_trial=6, max_height=4):
"""Calculate the running action efficiency from successful trials.
trials: array giving the number of trials up to iteration i (TODO: unused?)
min_actions: ideal number of actions per trial
Formula: successful_trial_count * ideal_actions_per_trial / window_size
"""
success = stack_height == max_height
efficiency = np.zeros_like(stack_height, np.float64)
lower = np.zeros_like(efficiency)
upper = np.zeros_like(efficiency)
for i in range(1, efficiency.shape[0]):
start = max(i - window, 1)
window_size = min(i, window)
num_trials = success[start:i+1].sum()
efficiency[i] = num_trials * ideal_actions_per_trial / window_size
var = efficiency[i] / np.sqrt(window_size)
lower[i] = efficiency[i] + 3*var
upper[i] = efficiency[i] - 3*var
lower = np.clip(lower, 0, 1)
upper = np.clip(upper, 0, 1)
return efficiency, lower, upper
def get_grasp_action_efficiency(actions, rewards, reward_threshold=0.5, window=200, ideal_actions_per_trial=3):
"""Get grasp efficiency from when the trial count increases.
"""
grasps = actions[:, 0] == 1
efficiency = np.zeros_like(rewards, np.float64)
lower = np.zeros_like(efficiency)
upper = np.zeros_like(efficiency)
for i in range(efficiency.shape[0]):
start = max(i - window, 0)
window_size = min(i+1, window)
successful = rewards[start: i+1] > reward_threshold
successful_grasps = successful[grasps[start:start+successful.shape[0]]].sum()
efficiency[i] = successful_grasps / window_size
var = efficiency[i] / np.sqrt(window_size)
lower[i] = efficiency[i] + 3*var
upper[i] = efficiency[i] - 3*var
lower = np.clip(lower, 0, 1)
upper = np.clip(upper, 0, 1)
return efficiency, lower, upper
def plot_it(log_dir, title, window=1000, colors=['tab:blue', 'tab:green', 'tab:orange'], alpha=0.35, mult=100, max_iter=None, place=False):
if place:
heights = np.loadtxt(os.path.join(log_dir, 'transitions', 'stack-height.log.txt'))
rewards = None
else:
rewards = np.loadtxt(os.path.join(log_dir, 'transitions', 'reward-value.log.txt'))
actions = np.loadtxt(os.path.join(log_dir, 'transitions', 'executed-action.log.txt'))
trials = np.loadtxt(os.path.join(log_dir, 'transitions', 'trial.log.txt'))
if max_iter is not None:
if place:
heights = heights[:max_iter]
else:
rewards = rewards[:max_iter]
actions = actions[:max_iter]
trials = trials[:max_iter]
grasp_rate, grasp_lower, grasp_upper = get_grasp_success_rate(actions, rewards=rewards, window=window)
if place:
if 'row' in log_dir or 'row' in title.lower():
place_rate, place_lower, place_upper = get_place_success_rate(heights, actions, include_push=True, hot_fix=True, window=window)
else:
place_rate, place_lower, place_upper = get_place_success_rate(heights, actions, window=window)
eff, eff_lower, eff_upper = get_action_efficiency(heights, window=window)
else:
eff, eff_lower, eff_upper = get_grasp_action_efficiency(actions, rewards, window=window)
plt.plot(mult*grasp_rate, color=colors[0], label='Grasp Success Rate')
if place:
plt.plot(mult*place_rate, color=colors[1], label='Place Success Rate')
plt.plot(mult*eff, color=colors[2], label='Action Efficiency')
plt.fill_between(np.arange(1, grasp_rate.shape[0]+1),
mult*grasp_lower, mult*grasp_upper,
color=colors[0], alpha=alpha)
if place:
plt.fill_between(np.arange(1, place_rate.shape[0]+1),
mult*place_lower, mult*place_upper,
color=colors[1], alpha=alpha)
plt.fill_between(np.arange(1, eff.shape[0]+1),
mult*eff_lower, mult*eff_upper,
color=colors[2], alpha=alpha)
ax = plt.gca()
plt.xlabel('Iteration')
plt.ylabel('Running Mean')
plt.title(title)
plt.legend()
ax.yaxis.set_major_formatter(PercentFormatter())
plt.savefig(log_dir + '_success_plot.pdf')
```
# Any-object Stacking
```
window = 1000
max_iter = None
log_dir = 'any-stack-v2-steps-37k'
title = 'Stack 4 Blocks, Training'
plot_it(log_dir, title, window=window, max_iter=max_iter)
```
# Arranging Rows
```
window = 1000
max_iter = None
log_dir = '../logs/2019-09-13.19-55-21-train-rows-no-images-16.5k'
title = 'Arrange 4 Blocks in Rows'
plot_it(log_dir, title, window=window, max_iter=max_iter)
```
# Push + Grasp
```
window = 200
max_iter = 5000
log_dir = 'train-grasp-place-split-efficientnet-21k-acc'
title = 'Push + Grasp Training'
plot_it(log_dir, title, window=window, max_iter=max_iter, place=False)
```
| github_jupyter |
```
# import necessary packages
import json
import requests
import pandas as pd
import polyline
import geopandas as gpd
from shapely.geometry import LineString, Point
import numpy as np
from itertools import product
from haversine import haversine, Unit
from shapely.ops import nearest_points
import os
from matplotlib import pyplot as plt
%matplotlib inline
def create_pt_grid(minx, miny, maxx, maxy):
"""creates a grid of points (lat/longs) in the range specified. lat longs
are rounded to hundredth place
Args:
minx: minimum longitude
miny: minimum latitude
maxx: maximum longitude
maxy: maximum latitude
Returns: DataFrame of all lat/long combinations in region
"""
lats = range(int(miny*1000), int(maxy*1000 +1))
longs = range(int(minx*1000), int(maxx*1000 +1))
ll_df = pd.DataFrame(product(lats, longs),
columns=['lat1000', 'long1000'])
ll_df['geometry'] = [Point(x, y) for x, y in zip(ll_df['long1000'],
ll_df['lat1000'])]
return ll_df
def get_pts_near_path(line, distance):
"""returns all lat/longs within specified distance of line that are in
manhattan
Args:
line: shapely linestring of route
distance: maximum distance from path for returned points
Returns:
pandas dataframe of all points within distance from line
"""
# get line bounds
(minx, miny, maxx, maxy) = line.bounds
# extract max/min values with buffer area
minx = round(minx, 3) -0.002
miny = round(miny, 3) -0.002
maxx = round(maxx, 3) + 0.002
maxy = round(maxy, 3) + 0.002
# load manhattan lat_longs
manhattan_pts = pd.read_csv('models/man_lat_longs.csv')
# brute force fix for floating point error
manhattan_pts['latitude'] = manhattan_pts['lat1000']/1000
manhattan_pts['longitdue'] = manhattan_pts['long1000']/1000
manhattan_pts = manhattan_pts.loc[:, ['latitude', 'longitude']]
# create a df of all lat, longs w/in bounds
all_pts = create_pt_grid(minx, miny, maxx, maxy)
# remove pts not in manhattan
all_pts = pd.merge(all_pts, manhattan_pts,
on=['latitude', 'longitude'],
how='inner')
# flag points in the grid in manhattan as on/within distance of path
all_pts['on_path'] = get_on_path(all_pts['geometry'], distance, line)
return pd.DataFrame(all_pts.loc[(all_pts['on_path']==True)])
practice_grid = create_pt_grid(-74.000, 40.750, -73.960183, 40.7800)
practice_grid_gdf = gpd.GeoDataFrame(practice_grid)
fig, ax = plt.subplots(figsize=(20,20))
practice_grid_gdf.plot(ax=ax)
manhattan_pts = pd.read_csv('/Users/allisonhonold/ds0805/walk_proj/walk_risk_engine/data/csv/man_lat_longs.csv')
manhattan_pts.head()
fig5, ax5 = plt.subplots(figsize=(20,20))
ax5.scatter(manhattan_pts['long1000'], manhattan_pts['lat1000'], alpha=.3)
all_pts = pd.merge(practice_grid_gdf, manhattan_pts.loc[:,['lat1000', 'long1000']],
on=['lat1000', 'long1000'],
how='inner')
all_pts.head()
fig2, ax2 = plt.subplots(figsize=(20,20))
all_pts.plot(ax=ax2)
manhattan_pts.shape
practice_grid_gdf.shape
practice_grid_gdf.head()
man_gdf = gpd.GeoDataFrame(manhattan_pts, geometry=[Point(x, y) for x, y in zip(manhattan_pts['long1000'],
manhattan_pts['lat1000'])])
fig3, ax3 = plt.subplots(figsize=(20,20))
man_gdf.plot(ax=ax3, markersize=2, alpha=.5)
```
| github_jupyter |
## 1. Regression discontinuity: banking recovery
<p>After a debt has been legally declared "uncollectable" by a bank, the account is considered "charged-off." But that doesn't mean the bank <strong><em>walks away</em></strong> from the debt. They still want to collect some of the money they are owed. The bank will score the account to assess the expected recovery amount, that is, the expected amount that the bank may be able to receive from the customer in the future. This amount is a function of the probability of the customer paying, the total debt, and other factors that impact the ability and willingness to pay.</p>
<p>The bank has implemented different recovery strategies at different thresholds (\$1000, \$2000, etc.) where the greater the expected recovery amount, the more effort the bank puts into contacting the customer. For low recovery amounts (Level 0), the bank just adds the customer's contact information to their automatic dialer and emailing system. For higher recovery strategies, the bank incurs more costs as they leverage human resources in more efforts to obtain payments. Each additional level of recovery strategy requires an additional \$50 per customer so that customers in the Recovery Strategy Level 1 cost the company \$50 more than those in Level 0. Customers in Level 2 cost \$50 more than those in Level 1, etc. </p>
<p><strong>The big question</strong>: does the extra amount that is recovered at the higher strategy level exceed the extra \$50 in costs? In other words, was there a jump (also called a "discontinuity") of more than \$50 in the amount recovered at the higher strategy level? We'll find out in this notebook.</p>
<p></p>
<p>First, we'll load the banking dataset and look at the first few rows of data. This lets us understand the dataset itself and begin thinking about how to analyze the data.</p>
```
# Import modules
import pandas as pd
import numpy as np
# Read in dataset
df = pd.read_csv("datasets/bank_data.csv")
# Print the first few rows of the DataFrame
df.head()
```
## 2. Graphical exploratory data analysis
<p>The bank has implemented different recovery strategies at different thresholds (\$1000, \$2000, \$3000 and \$5000) where the greater the Expected Recovery Amount, the more effort the bank puts into contacting the customer. Zeroing in on the first transition (between Level 0 and Level 1) means we are focused on the population with Expected Recovery Amounts between \$0 and \$2000 where the transition between Levels occurred at \$1000. We know that the customers in Level 1 (expected recovery amounts between \$1001 and \$2000) received more attention from the bank and, by definition, they had higher Expected Recovery Amounts than the customers in Level 0 (between \$1 and \$1000).</p>
<p>Here's a quick summary of the Levels and thresholds again:</p>
<ul>
<li>Level 0: Expected recovery amounts >\$0 and <=\$1000</li>
<li>Level 1: Expected recovery amounts >\$1000 and <=\$2000</li>
<li>The threshold of \$1000 separates Level 0 from Level 1</li>
</ul>
<p>A key question is whether there are other factors besides Expected Recovery Amount that also varied systematically across the \$1000 threshold. For example, does the customer age show a jump (discontinuity) at the \$1000 threshold or does that age vary smoothly? We can examine this by first making a scatter plot of the age as a function of Expected Recovery Amount for a small window of Expected Recovery Amount, \$0 to \$2000. This range covers Levels 0 and 1.</p>
```
# Scatter plot of Age vs. Expected Recovery Amount
from matplotlib import pyplot as plt
%matplotlib inline
plt.scatter(x=df['expected_recovery_amount'], y=df['age'], c="g", s=2)
plt.xlim(0, 2000)
plt.ylim(0, 60)
plt.xlabel("Expected Recovery Amount")
plt.ylabel("Age")
plt.legend(loc=2)
plt.show()
```
## 3. Statistical test: age vs. expected recovery amount
<p>We want to convince ourselves that variables such as age and sex are similar above and below the \$1000 Expected Recovery Amount threshold. This is important because we want to be able to conclude that differences in the actual recovery amount are due to the higher Recovery Strategy and not due to some other difference like age or sex.</p>
<p>The scatter plot of age versus Expected Recovery Amount did not show an obvious jump around \$1000. We will now do statistical analysis examining the average age of the customers just above and just below the threshold. We can start by exploring the range from \$900 to \$1100.</p>
<p>For determining if there is a difference in the ages just above and just below the threshold, we will use the Kruskal-Wallis test, a statistical test that makes no distributional assumptions.</p>
```
# Import stats module
from scipy import stats
# Compute average age just below and above the threshold
era_900_1100 = df.loc[(df['expected_recovery_amount']<1100) &
(df['expected_recovery_amount']>=900)]
by_recovery_strategy = era_900_1100.groupby(['recovery_strategy'])
by_recovery_strategy['age'].describe().unstack()
# Perform Kruskal-Wallis test
Level_0_age = era_900_1100.loc[df['recovery_strategy']=="Level 0 Recovery"]['age']
Level_1_age = era_900_1100.loc[df['recovery_strategy']=="Level 1 Recovery"]['age']
stats.kruskal(Level_0_age,Level_1_age)
```
## 4. Statistical test: sex vs. expected recovery amount
<p>We have seen that there is no major jump in the average customer age just above and just
below the \$1000 threshold by doing a statistical test as well as exploring it graphically with a scatter plot. </p>
<p>We want to also test that the percentage of customers that are male does not jump across the \$1000 threshold. We can start by exploring the range of \$900 to \$1100 and later adjust this range.</p>
<p>We can examine this question statistically by developing cross-tabs as well as doing chi-square tests of the percentage of customers that are male vs. female.</p>
```
# Number of customers in each category
crosstab = pd.crosstab(df.loc[(df['expected_recovery_amount']<1100) &
(df['expected_recovery_amount']>=900)]['recovery_strategy'],
df['sex'])
print(crosstab)
# Chi-square test
chi2_stat, p_val, dof, ex = stats.chi2_contingency(crosstab)
print(p_val)
```
## 5. Exploratory graphical analysis: recovery amount
<p>We are now reasonably confident that customers just above and just below the \$1000 threshold are, on average, similar in their average age and the percentage that are male. </p>
<p>It is now time to focus on the key outcome of interest, the actual recovery amount.</p>
<p>A first step in examining the relationship between the actual recovery amount and the expected recovery amount is to develop a scatter plot where we want to focus our attention at the range just below and just above the threshold. Specifically, we will develop a scatter plot of Expected Recovery Amount (X) versus Actual Recovery Amount (Y) for Expected Recovery Amounts between \$900 to \$1100. This range covers Levels 0 and 1. A key question is whether or not we see a discontinuity (jump) around the \$1000 threshold.</p>
```
# Scatter plot of Actual Recovery Amount vs. Expected Recovery Amount
plt.scatter(x=df['expected_recovery_amount'], y=df['actual_recovery_amount'], c="g", s=2)
plt.xlim(900, 1100)
plt.ylim(0, 2000)
plt.xlabel("Expected Recovery Amount")
plt.ylabel("Actual Recovery Amount")
plt.legend(loc=2)
# ... YOUR CODE FOR TASK 5 ...
```
## 6. Statistical analysis: recovery amount
<p>As we did with age, we can perform statistical tests to see if the actual recovery amount has a discontinuity above the \$1000 threshold. We are going to do this for two different windows of the expected recovery amount \$900 to \$1100 and for a narrow range of \$950 to \$1050 to see if our results are consistent.</p>
<p>Again, we will use the Kruskal-Wallis test.</p>
<p>We will first compute the average actual recovery amount for those customers just below and just above the threshold using a range from \$900 to \$1100. Then we will perform a Kruskal-Wallis test to see if the actual recovery amounts are different just above and just below the threshold. Once we do that, we will repeat these steps for a smaller window of \$950 to \$1050.</p>
```
# Compute average actual recovery amount just below and above the threshold
by_recovery_strategy['actual_recovery_amount'].describe().unstack()
# Perform Kruskal-Wallis test
Level_0_actual = era_900_1100.loc[df['recovery_strategy']=='Level 0 Recovery']['actual_recovery_amount']
Level_1_actual = era_900_1100.loc[df['recovery_strategy']=='Level 1 Recovery']['actual_recovery_amount']
stats.kruskal(Level_0_actual,Level_1_actual)
# Repeat for a smaller range of $950 to $1050
era_950_1050 = df.loc[(df['expected_recovery_amount']<1050) &
(df['expected_recovery_amount']>=950)]
Level_0_actual = era_950_1050.loc[df['recovery_strategy']=='Level 0 Recovery']['actual_recovery_amount']
Level_1_actual = era_950_1050.loc[df['recovery_strategy']=='Level 1 Recovery']['actual_recovery_amount']
stats.kruskal(Level_0_actual,Level_1_actual)
```
## 7. Regression modeling: no threshold
<p>We now want to take a regression-based approach to estimate the program impact at the \$1000 threshold using data that is just above and below the threshold. </p>
<p>We will build two models. The first model does not have a threshold while the second will include a threshold.</p>
<p>The first model predicts the actual recovery amount (dependent variable) as a function of the expected recovery amount (independent variable). We expect that there will be a strong positive relationship between these two variables. </p>
<p>We will examine the adjusted R-squared to see the percent of variance explained by the model. In this model, we are not representing the threshold but simply seeing how the variable used for assigning the customers (expected recovery amount) relates to the outcome variable (actual recovery amount).</p>
```
# Import statsmodels
import statsmodels.api as sm
# Define X and y
X = era_900_1100['expected_recovery_amount']
y = era_900_1100['actual_recovery_amount']
X = sm.add_constant(X)
# Build linear regression model
model = sm.OLS(y, X).fit()
predictions = model.predict(X)
# Print out the model summary statistics
model.summary()
```
## 8. Regression modeling: adding true threshold
<p>From the first model, we see that the expected recovery amount's regression coefficient is statistically significant. </p>
<p>The second model adds an indicator of the true threshold to the model (in this case at \$1000). </p>
<p>We will create an indicator variable (either a 0 or a 1) that represents whether or not the expected recovery amount was greater than \$1000. When we add the true threshold to the model, the regression coefficient for the true threshold represents the additional amount recovered due to the higher recovery strategy. That is to say, the regression coefficient for the true threshold measures the size of the discontinuity for customers just above and just below the threshold.</p>
<p>If the higher recovery strategy helped recovery more money, then the regression coefficient of the true threshold will be greater than zero. If the higher recovery strategy did not help recovery more money, then the regression coefficient will not be statistically significant.</p>
```
#Create indicator (0 or 1) for expected recovery amount >= $1000
df['indicator_1000'] = np.where(df['expected_recovery_amount']<1000, 0, 1)
era_900_1100 = df.loc[(df['expected_recovery_amount']<1100) &
(df['expected_recovery_amount']>=900)]
# Define X and y
X = era_900_1100[['expected_recovery_amount','indicator_1000']]
y = era_900_1100['actual_recovery_amount']
X = sm.add_constant(X)
# Build linear regression model
model = sm.OLS(y,X).fit()
# Print the model summary
model.summary()
```
## 9. Regression modeling: adjusting the window
<p>The regression coefficient for the true threshold was statistically significant with an estimated impact of around \$278. This is much larger than the \$50 per customer needed to run this higher recovery strategy. </p>
<p>Before showing this to our manager, we want to convince ourselves that this result wasn't due to choosing an expected recovery amount window of \$900 to \$1100. Let's repeat this analysis for the window from \$950 to \$1050 to see if we get similar results.</p>
<p>The answer? Whether we use a wide (\$900 to \$1100) or narrower window (\$950 to \$1050), the incremental recovery amount at the higher recovery strategy is much greater than the \$50 per customer it costs for the higher recovery strategy. So we conclude that the higher recovery strategy is worth the extra cost of \$50 per customer.</p>
```
# Redefine era_950_1050 so the indicator variable is included
era_950_1050 = df.loc[(df['expected_recovery_amount']<1050) &
(df['expected_recovery_amount']>=950)]
# Define X and y
X = era_950_1050[['expected_recovery_amount','indicator_1000']]
y = era_950_1050['actual_recovery_amount']
X = sm.add_constant(X)
# Build linear regression model
model = sm.OLS(y,X).fit()
# Print the model summary
model.summary()
```
| github_jupyter |
```
from scipy.ndimage.measurements import label
import numpy as np
import json
with open(r"C:\data\Dropbox\Projekte\Code\CCC_Linz18Fall\data\level5\level5_2.json", "r") as f:
input = json.load(f)
grid = np.array(input["rows"])
plt.figure(figsize=(10, 10))
plt.imshow(grid)
plt.figure(figsize=(10, 10))
#plt.imshow(grid[grid == 24])
plt.imshow(np.where(grid == 24, grid, 0))
mask, ncomponents = label(grid)
print(ncomponents)
plt.figure(figsize=(10, 10))
plt.imshow(mask)
assert grid[0, 0] == 0
def get_building(grid, mask, building_index):
r1, c1 = None, None
r2, c2 = None, None
for i, row in enumerate(mask):
if any(row == building_index):
fr = i
fc_start = np.argmax(row == building_index)
fc_end = len(row) - 1 - np.argmax(row[::-1] == building_index)
# set upper left corner point (first match)
if not r1 and not c1:
r1, c1 = fr, fc_start
# lower right corner point (last match)
r2, c2 = fr, fc_end
return r1, c1, r2, c2
def is_hotspot(size, r1, c1, r2, c2):
return (r2 - r1) + 1 >= size \
and (c2 - c1) + 1 >= size
def get_center_point(r1, c1, r2, c2):
rx = r1 + (r2 - r1) // 2
cx = c1 + (c2 - c1) // 2
return rx, cx
big_mask = np.zeros_like(mask)
iii = 1
def get_hotspots(grid, mask, building, ncomponent, size):
r1, c1, r2, c2 = building
hotspots_grid = np.zeros_like(mask)
def _does_fit(row_, col_):
# extract possible hotspot
submatrix = mask[row_:row_ + size, col_:col_ + size]
if submatrix.shape[0] != size or submatrix.shape[1] != size:
return False
# check if all cells are on the building
return np.all(submatrix == ncomponent)
for row in range(r1, r2 + 1):
for col in range(c1, c2 + 1):
if _does_fit(row, col):
hotspots_grid[row:row + size, col:col + size] = 1
big_mask[row:row + size, col:col + size] = iii
hotspots_mask, nhotspots = label(hotspots_grid)
#if np.any(hotspots_mask):
# plt.figure(figsize=(10,10))
# plt.imshow(hotspots_mask)
# plt.show()
# use the building algorithm again ...
hotspots = []
for nhotspots in range(1, nhotspots + 1):
hotspot = get_building(hotspots_grid, hotspots_mask, nhotspots)
hotspots.append(hotspot)
# get center points of hotspots
hotspots = [get_center_point(*a) for a in hotspots]
# hotspot center must be in on the building
hotspots = [e for e in hotspots if hotspots_grid[e[0], e[1]] == 1]
return hotspots
buildings = []
heights = sorted(np.unique(grid))
for height in heights[1:]:
grid_on_height = np.where(grid == height, grid, 0)
mask_on_height, ncomponents = label(grid_on_height)
#plt.figure(figsize=(10,10))
#plt.imshow(mask_on_height)
#plt.show()
# is the floor in the upper left corner?
assert grid_on_height[0, 0] == 0
for ncomponent in range(1, ncomponents + 1):
building = get_building(grid_on_height, mask_on_height, ncomponent)
iii += 1
hotspots = get_hotspots(grid_on_height, mask_on_height, building, ncomponent, input["s"])
buildings.extend(hotspots)
# sort by row and by col
buildings = sorted(buildings, key=lambda x: (x[0], x[1]))
# prepend id and only output upper left corner
buildings = [(i, *a) for i, a in enumerate(buildings)]
print(buildings)
# [' '.join([' '.join(f) for f in e]) for e in buildings]
result = ' '.join([' '.join(map(str, e)) for e in buildings])
print(result)
plt.figure(figsize=(10,10))
plt.imshow(big_mask)
for row in mask:
if any(row == 2):
print(1)
(6 - 2 + 1) // 2
plt.imshow(grid)
np.unique(labeled)
structure = np.ones((3, 3), dtype=np.int)
for x in range(grid.shape[0]):
for y in range(grid.shape[1]):
#print((x, y))
```
| github_jupyter |
# Retrieve Poetry
## Poetry Retriever using the Poly-encoder Transformer architecture (Humeau et al., 2019) for retrieval
```
# This notebook is based on :
# https://aritter.github.io/CS-7650/
# This Project was developed at the Georgia Institute of Technology by Ashutosh Baheti (ashutosh.baheti@cc.gatech.edu),
# borrowing from the Neural Machine Translation Project (Project 2)
# of the UC Berkeley NLP course https://cal-cs288.github.io/sp20/
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from torch.jit import script, trace
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import numpy as np
import csv
import random
import re
import os
import unicodedata
import codecs
from io import open
import itertools
import math
import pickle
import statistics
import sys
from functools import partial
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
import tqdm
import nltk
#from google.colab import files
# General util functions
def make_dir_if_not_exists(directory):
if not os.path.exists(directory):
logging.info("Creating new directory: {}".format(directory))
os.makedirs(directory)
def print_list(l, K=None):
# If K is given then only print first K
for i, e in enumerate(l):
if i == K:
break
print(e)
print()
def remove_multiple_spaces(string):
return re.sub(r'\s+', ' ', string).strip()
def save_in_pickle(save_object, save_file):
with open(save_file, "wb") as pickle_out:
pickle.dump(save_object, pickle_out)
def load_from_pickle(pickle_file):
with open(pickle_file, "rb") as pickle_in:
return pickle.load(pickle_in)
def save_in_txt(list_of_strings, save_file):
with open(save_file, "w") as writer:
for line in list_of_strings:
line = line.strip()
writer.write(f"{line}\n")
def load_from_txt(txt_file):
with open(txt_file, "r") as reader:
all_lines = list()
for line in reader:
line = line.strip()
all_lines.append(line)
return all_lines
import pandas as pd
print(torch.cuda.is_available())
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
print("Using device:", device)
bert_model_name = 'distilbert-base-uncased'
# Bert Imports
from transformers import DistilBertTokenizer, DistilBertModel
#bert_model = DistilBertModel.from_pretrained(bert_model_name)
tokenizer = DistilBertTokenizer.from_pretrained(bert_model_name)
```
## Load Data
### Poetry Database
```
data_file = '../data/with_epoque.csv'
data = pd.read_csv(data_file)
print(len(data))
print(data.head())
```
## Dataset Preparation
```
def make_data_training(df, char_max_line = 20):
inputs = []
context = []
targets = []
for i,rows in df.iterrows():
splitted = rows['content'].split('\r\n')
for line in splitted:
if len(line.strip()) > 0 and len(line.split(' ')) <= char_max_line:
inputs.append(line)
targets.append(line)
context.append(' '.join([str(rows['poem name'])]))
return pd.DataFrame(list(zip(inputs, context, targets)),columns =['text', 'context','target'])
#Defining torch dataset class for poems
class PoemDataset(Dataset):
def __init__(self, df):
self.df = df
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
return self.df.iloc[idx]
df = make_data_training(data, char_max_line = 30)
pad_word = "<pad>"
bos_word = "<bos>"
eos_word = "<eos>"
unk_word = "<unk>"
sep_word = "sep"
pad_id = 0
bos_id = 1
eos_id = 2
unk_id = 3
sep_id = 4
def normalize_sentence(s):
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
s = re.sub(r"\s+", r" ", s).strip()
return s
class Vocabulary:
def __init__(self):
self.word_to_id = {pad_word: pad_id, bos_word: bos_id, eos_word:eos_id, unk_word: unk_id, sep_word: sep_id}
self.word_count = {}
self.id_to_word = {pad_id: pad_word, bos_id: bos_word, eos_id: eos_word, unk_id: unk_word, sep_id: sep_word}
self.num_words = 5
def get_ids_from_sentence(self, sentence):
sentence = normalize_sentence(sentence)
sent_ids = [bos_id] + [self.word_to_id[word.lower()] if word.lower() in self.word_to_id \
else unk_id for word in sentence.split()] + \
[eos_id]
return sent_ids
def tokenized_sentence(self, sentence):
sent_ids = self.get_ids_from_sentence(sentence)
return [self.id_to_word[word_id] for word_id in sent_ids]
def decode_sentence_from_ids(self, sent_ids):
words = list()
for i, word_id in enumerate(sent_ids):
if word_id in [bos_id, eos_id, pad_id]:
# Skip these words
continue
else:
words.append(self.id_to_word[word_id])
return ' '.join(words)
def add_words_from_sentence(self, sentence):
sentence = normalize_sentence(sentence)
for word in sentence.split():
if word not in self.word_to_id:
# add this word to the vocabulary
self.word_to_id[word] = self.num_words
self.id_to_word[self.num_words] = word
self.word_count[word] = 1
self.num_words += 1
else:
# update the word count
self.word_count[word] += 1
vocab = Vocabulary()
for src in df['text']:
vocab.add_words_from_sentence(src.lower())
print(f"Total words in the vocabulary = {vocab.num_words}")
class Poem_dataset(Dataset):
"""Single-Turn version of Cornell Movie Dialog Cropus dataset."""
def __init__(self, poems, context,vocab, device):
"""
Args:
conversations: list of tuple (src_string, tgt_string)
- src_string: String of the source sentence
- tgt_string: String of the target sentence
vocab: Vocabulary object that contains the mapping of
words to indices
device: cpu or cuda
"""
l = []
for i in range(len(poems)):
l.append( ( context[i] + ' sep ' + poems[i] , poems[i] ))
self.conversations = l.copy()
self.vocab = vocab
self.device = device
def encode(src, tgt):
src_ids = self.vocab.get_ids_from_sentence(src)
tgt_ids = self.vocab.get_ids_from_sentence(tgt)
return (src_ids, tgt_ids)
# We will pre-tokenize the conversations and save in id lists for later use
self.tokenized_conversations = [encode(src, tgt) for src, tgt in self.conversations]
def __len__(self):
return len(self.conversations)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
return {"conv_ids":self.tokenized_conversations[idx], "conv":self.conversations[idx]}
def collate_fn(data):
"""Creates mini-batch tensors from the list of tuples (src_seq, tgt_seq).
We should build a custom collate_fn rather than using default collate_fn,
because merging sequences (including padding) is not supported in default.
Seqeuences are padded to the maximum length of mini-batch sequences (dynamic padding).
Args:
data: list of dicts {"conv_ids":(src_ids, tgt_ids), "conv":(src_str, trg_str)}.
- src_ids: list of src piece ids; variable length.
- tgt_ids: list of tgt piece ids; variable length.
- src_str: String of src
- tgt_str: String of tgt
Returns: dict { "conv_ids": (src_ids, tgt_ids),
"conv": (src_str, tgt_str),
"conv_tensors": (src_seqs, tgt_seqs)}
src_seqs: torch tensor of shape (src_padded_length, batch_size).
tgt_seqs: torch tensor of shape (tgt_padded_length, batch_size).
src_padded_length = length of the longest src sequence from src_ids
tgt_padded_length = length of the longest tgt sequence from tgt_ids
"""
# Sort conv_ids based on decreasing order of the src_lengths.
# This is required for efficient GPU computations.
src_ids = [torch.LongTensor(e["conv_ids"][0]) for e in data]
tgt_ids = [torch.LongTensor(e["conv_ids"][1]) for e in data]
src_str = [e["conv"][0] for e in data]
tgt_str = [e["conv"][1] for e in data]
data = list(zip(src_ids, tgt_ids, src_str, tgt_str))
data.sort(key=lambda x: len(x[0]), reverse=True)
src_ids, tgt_ids, src_str, tgt_str = zip(*data)
# Pad the src_ids and tgt_ids using token pad_id to create src_seqs and tgt_seqs
# Implementation tip: You can use the nn.utils.rnn.pad_sequence utility
# function to combine a list of variable-length sequences with padding.
# YOUR CODE HERE
src_seqs = nn.utils.rnn.pad_sequence(src_ids, padding_value = pad_id,
batch_first = False)
tgt_seqs = nn.utils.rnn.pad_sequence(tgt_ids, padding_value = pad_id,
batch_first = False)
src_padded_length = len(src_seqs[0])
tgt_padded_length = len(tgt_seqs[0])
return {"conv_ids":(src_ids, tgt_ids), "conv":(src_str, tgt_str), "conv_tensors":(src_seqs.to(device), tgt_seqs.to(device))}
# Create the DataLoader for all_conversations
all_poems = df['text'].tolist()
context = df['context'].tolist()
dataset = Poem_dataset(all_poems, context, vocab, device)
batch_size = 5
data_loader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=True, collate_fn=collate_fn)
for src, tgt in dataset.conversations[:3]:
sentence = src
word_tokens = vocab.tokenized_sentence(sentence)
# Automatically adds bos_id and eos_id before and after sentence ids respectively
word_ids = vocab.get_ids_from_sentence(sentence)
print(sentence)
print(word_tokens)
print(word_ids)
print(vocab.decode_sentence_from_ids(word_ids))
print()
word = "the"
word_id = vocab.word_to_id[word.lower()]
print(f"Word = {word}")
print(f"Word ID = {word_id}")
print(f"Word decoded from ID = {vocab.decode_sentence_from_ids([word_id])}")
# Test one batch of training data
first_batch = next(iter(data_loader))
print(f"Testing first training batch of size {len(first_batch['conv'][0])}")
print(f"List of source strings:")
print_list(first_batch["conv"][0])
print(f"Tokenized source ids:")
print_list(first_batch["conv_ids"][0])
print(f"Padded source ids as tensor (shape {first_batch['conv_tensors'][0].size()}):")
print(first_batch["conv_tensors"][0])
def transformer_collate_fn(batch, tokenizer):
bert_vocab = tokenizer.get_vocab()
bert_pad_token = bert_vocab['[PAD]']
bert_unk_token = bert_vocab['[UNK]']
bert_cls_token = bert_vocab['[CLS]']
inputs, masks_input, outputs, masks_output = [], [], [], []
sentences, masks_sentences, targets, masks_targets = [], [], [], []
for data in batch:
tokenizer_output = tokenizer([data['text']])
tokenized_sent = tokenizer_output['input_ids'][0]
tokenizer_target = tokenizer([data['target']])
tokenized_sent_target = tokenizer_target['input_ids'][0]
mask_sentence = tokenizer_output['attention_mask'][0]
mask_target = tokenizer_target['attention_mask'][0]
sentences.append(torch.tensor(tokenized_sent))
targets.append(torch.tensor(tokenized_sent_target))
masks_targets.append(torch.tensor(mask_targets))
masks_sentences.append(torch.tensor(mask_sentences))
sentences = pad_sequence(sentences, batch_first=True, padding_value=bert_pad_token)
targets = pad_sequence(targets, batch_first=True, padding_value=bert_pad_token)
masks = pad_sequence(masks, batch_first=True, padding_value=0.0)
return sentences, targets, masks
#create pytorch dataloaders from train_dataset, val_dataset, and test_datset
batch_size=5
train_dataloader = DataLoader(dataset,batch_size=batch_size,collate_fn=partial(transformer_collate_fn, tokenizer=tokenizer), shuffle = True)
#tokenizer.batch_decode(transformer_collate_fn(train_dataset,tokenizer)[0], skip_special_tokens=True)
```
## Polyencoder Model
```
#torch.cuda.empty_cache()
#bert1 = DistilBertModel.from_pretrained(bert_model_name)
#bert2 = DistilBertModel.from_pretrained(bert_model_name)
bert = DistilBertModel.from_pretrained(bert_model_name)
#Double Bert
class RetrieverPolyencoder(nn.Module):
def __init__(self, contextBert, candidateBert, vocab, max_len = 300, hidden_dim = 768, out_dim = 64, num_layers = 2, dropout=0.1, device=device):
super().__init__()
self.device = device
self.hidden_dim = hidden_dim
self.max_len = max_len
self.out_dim = out_dim
# Context layers
self.contextBert = contextBert
self.contextDropout = nn.Dropout(dropout)
self.contextFc = nn.Linear(self.hidden_dim, self.out_dim)
# Candidates layers
self.candidatesBert = candidateBert
self.pos_emb = nn.Embedding(self.max_len, self.hidden_dim)
self.candidatesDropout = nn.Dropout(dropout)
self.candidatesFc = nn.Linear(self.hidden_dim, self.out_dim)
self.att_dropout = nn.Dropout(dropout)
def attention(self, q, k, v, vMask=None):
w = torch.matmul(q, k.transpose(-1, -2))
if vMask is not None:
w *= vMask.unsqueeze(1)
w = F.softmax(w, -1)
w = self.att_dropout(w)
score = torch.matmul(w, v)
return score
def score(self, context, context_mask, responses, responses_mask):
"""Run the model on the source and compute the loss on the target.
Args:
source: An integer tensor with shape (max_source_sequence_length,
batch_size) containing subword indices for the source sentences.
target: An integer tensor with shape (max_target_sequence_length,
batch_size) containing subword indices for the target sentences.
Returns:
A scalar float tensor representing cross-entropy loss on the current batch
divided by the number of target tokens in the batch.
Many of the target tokens will be pad tokens. You should mask the loss
from these tokens using appropriate mask on the target tokens loss.
"""
batch_size, nb_cand, seq_len = responses.shape
# Context
context_encoded = self.contextBert(context,context_mask)[-1]
pos_emb = self.pos_emb(torch.arange(self.max_len).to(self.device))
context_att = self.attention(pos_emb, context_encoded, context_encoded, context_mask)
# Response
responses_encoded = self.candidatesBert(responses.view(-1,responses.shape[2]), responses_mask.view(-1,responses.shape[2]))[-1][:,0,:]
responses_encoded = responses_encoded.view(batch_size,nb_cand,-1)
context_emb = self.attention(responses_encoded, context_att, context_att).squeeze()
dot_product = (context_emb*responses_encoded).sum(-1)
return dot_product
def compute_loss(self, context, context_mask, response, response_mask):
"""Run the model on the source and compute the loss on the target.
Args:
source: An integer tensor with shape (max_source_sequence_length,
batch_size) containing subword indices for the source sentences.
target: An integer tensor with shape (max_target_sequence_length,
batch_size) containing subword indices for the target sentences.
Returns:
A scalar float tensor representing cross-entropy loss on the current batch
divided by the number of target tokens in the batch.
Many of the target tokens will be pad tokens. You should mask the loss
from these tokens using appropriate mask on the target tokens loss.
"""
batch_size = context.shape[0]
# Context
context_encoded = self.contextBert(context,context_mask)[-1]
pos_emb = self.pos_emb(torch.arange(self.max_len).to(self.device))
context_att = self.attention(pos_emb, context_encoded, context_encoded, context_mask)
# Response
response_encoded = self.candidatesBert(response, response_mask)[-1][:,0,:]
response_encoded = response_encoded.unsqueeze(0).expand(batch_size, batch_size, response_encoded.shape[1])
context_emb = self.attention(response_encoded, context_att, context_att).squeeze()
dot_product = (context_emb*response_encoded).sum(-1)
mask = torch.eye(batch_size).to(self.device)
loss = F.log_softmax(dot_product, dim=-1) * mask
loss = (-loss.sum(dim=1)).mean()
return loss
#Single Bert
class RetrieverPolyencoder_single(nn.Module):
def __init__(self, bert, max_len = 300, hidden_dim = 768, out_dim = 64, num_layers = 2, dropout=0.1, device=device):
super().__init__()
self.device = device
self.hidden_dim = hidden_dim
self.max_len = max_len
self.out_dim = out_dim
self.bert = bert
# Context layers
self.contextDropout = nn.Dropout(dropout)
# Candidates layers
self.pos_emb = nn.Embedding(self.max_len, self.hidden_dim)
self.candidatesDropout = nn.Dropout(dropout)
self.att_dropout = nn.Dropout(dropout)
def attention(self, q, k, v, vMask=None):
w = torch.matmul(q, k.transpose(-1, -2))
if vMask is not None:
w *= vMask.unsqueeze(1)
w = F.softmax(w, -1)
w = self.att_dropout(w)
score = torch.matmul(w, v)
return score
def score(self, context, context_mask, responses, responses_mask):
"""Run the model on the source and compute the loss on the target.
Args:
source: An integer tensor with shape (max_source_sequence_length,
batch_size) containing subword indices for the source sentences.
target: An integer tensor with shape (max_target_sequence_length,
batch_size) containing subword indices for the target sentences.
Returns:
A scalar float tensor representing cross-entropy loss on the current batch
divided by the number of target tokens in the batch.
Many of the target tokens will be pad tokens. You should mask the loss
from these tokens using appropriate mask on the target tokens loss.
"""
batch_size, nb_cand, seq_len = responses.shape
# Context
context_encoded = self.bert(context,context_mask)[0][:,0,:]
pos_emb = self.pos_emb(torch.arange(self.max_len).to(self.device))
context_att = self.attention(pos_emb, context_encoded, context_encoded, context_mask)
# Response
responses_encoded = self.bert(responses.view(-1,responses.shape[2]), responses_mask.view(-1,responses.shape[2]))[0][:,0,:]
responses_encoded = responses_encoded.view(batch_size,nb_cand,-1)
response_encoded = self.candidatesFc(response_encoded)
context_emb = self.attention(responses_encoded, context_att, context_att).squeeze()
dot_product = (context_emb*responses_encoded).sum(-1)
return dot_product
def compute_loss(self, context, context_mask, response, response_mask):
"""Run the model on the source and compute the loss on the target.
Args:
source: An integer tensor with shape (max_source_sequence_length,
batch_size) containing subword indices for the source sentences.
target: An integer tensor with shape (max_target_sequence_length,
batch_size) containing subword indices for the target sentences.
Returns:
A scalar float tensor representing cross-entropy loss on the current batch
divided by the number of target tokens in the batch.
Many of the target tokens will be pad tokens. You should mask the loss
from these tokens using appropriate mask on the target tokens loss.
"""
batch_size = context.shape[0]
seq_len = response.shape[1]
# Context
context_encoded = self.bert(context,context_mask)[0][:,0,:]
pos_emb = self.pos_emb(torch.arange(self.max_len).to(self.device))
context_att = self.attention(pos_emb, context_encoded, context_encoded, context_mask)
# Response
print(response.shape)
response_encoded = self.bert(response, response_mask)[0][:,0,:]
print(response_encoded.shape)
response_encoded = response_encoded.view(batch_size, -1)
response_encoded = response_encoded.unsqueeze(0).expand(batch_size, batch_size, response_encoded.shape[1])
context_emb = self.attention(response_encoded, context_att, context_att).squeeze()
dot_product = (context_emb*response_encoded).sum(-1)
mask = torch.eye(batch_size).to(self.device)
loss = F.log_softmax(dot_product, dim=-1) * mask
loss = (-loss.sum(dim=1)).mean()
return loss
#Bi-encoder
class RetrieverBiencoder(nn.Module):
def __init__(self, bert):
super().__init__()
self.bert = bert
def score(self, context, context_mask, responses, responses_mask):
context_vec = self.bert(context, context_mask)[0][:,0,:] # [bs,dim]
batch_size, res_length = response.shape
responses_vec = self.bert(responses_input_ids, responses_input_masks)[0][:,0,:] # [bs,dim]
responses_vec = responses_vec.view(batch_size, 1, -1)
responses_vec = responses_vec.squeeze(1)
context_vec = context_vec.unsqueeze(1)
dot_product = torch.matmul(context_vec, responses_vec.permute(0, 2, 1)).squeeze()
return dot_product
def compute_loss(self, context, context_mask, response, response_mask):
context_vec = self.bert(context, context_mask)[0] # [bs,dim]
batch_size, res_length = response.shape
responses_vec = self.bert(response, response_mask)[0][:,0,:] # [bs,dim]
#responses_vec = responses_vec.view(batch_size, 1, -1)
print(context_vec.shape)
print(responses_vec.shape)
responses_vec = responses_vec.squeeze(1)
dot_product = torch.matmul(context_vec, responses_vec.t()) # [bs, bs]
mask = torch.eye(context.size(0)).to(context_mask.device)
loss = F.log_softmax(dot_product, dim=-1) * mask
loss = (-loss.sum(dim=1)).mean()
return loss
def train(model, data_loader, num_epochs, model_file, learning_rate=0.0001):
"""Train the model for given µnumber of epochs and save the trained model in
the final model_file.
"""
decoder_learning_ratio = 5.0
#encoder_parameter_names = ['word_embedding', 'encoder']
encoder_parameter_names = ['encode_emb', 'encode_gru', 'l1', 'l2']
encoder_named_params = list(filter(lambda kv: any(key in kv[0] for key in encoder_parameter_names), model.named_parameters()))
decoder_named_params = list(filter(lambda kv: not any(key in kv[0] for key in encoder_parameter_names), model.named_parameters()))
encoder_params = [e[1] for e in encoder_named_params]
decoder_params = [e[1] for e in decoder_named_params]
optimizer = torch.optim.AdamW([{'params': encoder_params},
{'params': decoder_params, 'lr': learning_rate * decoder_learning_ratio}], lr=learning_rate)
clip = 50.0
for epoch in tqdm.notebook.trange(num_epochs, desc="training", unit="epoch"):
# print(f"Total training instances = {len(train_dataset)}")
# print(f"train_data_loader = {len(train_data_loader)} {1180 > len(train_data_loader)/20}")
with tqdm.notebook.tqdm(
data_loader,
desc="epoch {}".format(epoch + 1),
unit="batch",
total=len(data_loader)) as batch_iterator:
model.train()
total_loss = 0.0
for i, batch_data in enumerate(batch_iterator, start=1):
source, mask_source, target, mask_target = batch_data["conv_tensors"]
optimizer.zero_grad()
loss = model.compute_loss(source, mask_source, target, mask_target)
total_loss += loss.item()
loss.backward()
# Gradient clipping before taking the step
_ = nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
batch_iterator.set_postfix(mean_loss=total_loss / i, current_loss=loss.item())
# Save the model after training
torch.save(model.state_dict(), model_file)
# You are welcome to adjust these parameters based on your model implementation.
num_epochs = 10
batch_size = 32
learning_rate = 0.001
# Reloading the data_loader to increase batch_size
baseline_model = RetrieverBiencoder(bert).to(device)
train(baseline_model, train_dataloader, num_epochs, "baseline_model.pt",learning_rate=learning_rate)
# Download the trained model to local for future use
#files.download('baseline_model.pt')
baseline_model = RetrieverPolyencoder(bert1,bert2,vocab).to(device)
baseline_model.load_state_dict(torch.load("baseline_model3.pt", map_location=device))
vals = transformer_collate_fn(all_conversations[0:100],tokenizer)
i=3
scores = baseline_model.score(vals[0][i].unsqueeze(0).cuda(),vals[1][i].unsqueeze(0).cuda(),vals[2].unsqueeze(0).cuda(),vals[3].unsqueeze(0).cuda()).detach().cpu().numpy()
all_conversations[i][0]
all_conversations[np.argmax(scores)][1]
max_v = 100
vals = transformer_collate_fn(all_conversations[0:max_v],tokenizer)
correct = 0
for i in range(max_v):
scores = baseline_model.score(vals[0][i].unsqueeze(0).cuda(),vals[1][i].unsqueeze(0).cuda(),vals[2].unsqueeze(0).cuda(),vals[3].unsqueeze(0).cuda()).detach().cpu().numpy()
if np.argmax(scores)==i:
correct+=1
print(all_conversations[i][0])
print(all_conversations[np.argmax(scores)][1]+"\n")
print(correct/max_v)
```
| github_jupyter |
# Cruise collocation with gridded data
Authors
* [Dr Chelle Gentemann](mailto:gentemann@esr.org) - Earth and Space Research, USA
* [Dr Marisol Garcia-Reyes](mailto:marisolgr@faralloninstitute.org) - Farallon Institute, USA
-------------
# Structure of this tutorial
1. Opening data
1. Collocating satellite data with a cruise dataset
-------------------
## Import python packages
You are going to want numpy, pandas, matplotlib.pyplot and xarray
```
import warnings
warnings.simplefilter('ignore') # filter some warning messages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import xarray as xr
import cartopy.crs as ccrs
```
## A nice cartopy tutorial is [here](http://earthpy.org/tag/visualization.html)
# Collocating Saildrone cruise data with satellite SSTs
* read in the Saildrone data
## The NCEI trajectory format uses 'obs' as the coordinate. This is an example of an 'older' style of data formatting that doesn't really mesh well with modern software capabilities.
* So, let's change that by using [.swap_dims](http://xarray.pydata.org/en/stable/generated/xarray.DataArray.swap_dims.html) to change the coordinate from `obs` to `time`
* Another thing, `latitude` and `longitude` are just long and annoying, lets [.rename](http://xarray.pydata.org/en/stable/generated/xarray.Dataset.rename.html) them to `lat` and `lon`
* Finally, the first and last part of the cruise the USV is being towed, so let's only include data from `2018-04-12T02` to `2018-06-10T18`
```
#use first url if not online
#url = '../data/saildrone-gen_4-baja_2018-sd1002-20180411T180000-20180611T055959-1_minutes-v1.nc'
url = 'https://podaac-opendap.jpl.nasa.gov/opendap/hyrax/allData/insitu/L2/saildrone/Baja/saildrone-gen_4-baja_2018-sd1002-20180411T180000-20180611T055959-1_minutes-v1.nc'
ds_usv =
ds_usv2 = ds_usv.isel(trajectory=0)\
.swap_dims({'obs':'time'})\
.rename({'longitude':'lon','latitude':'lat'})
ds_usv_subset = ds_usv2.sel(time=slice('2018-04-12T02','2018-06-10T18'))
start_time = pd.to_datetime(str(ds_usv_subset.time.min().data)).strftime('%Y-%m-%dT%H:%m:%SZ')
end_time = pd.to_datetime(str(ds_usv_subset.time.max().data)).strftime('%Y-%m-%dT%H:%m:%SZ')
print('start: ',start_time,'end: ',end_time)
```
## Let's open 2 months of 0.2 km AVHRR OI SST data
`xarray`can open multiple files at once using string pattern matching. `xr.open_mfdataset()`
## Now open multiple files (lazy) using [.open_mfdataset](http://xarray.pydata.org/en/stable/generated/xarray.open_mfdataset.html#xarray.open_mfdataset)
* use the option `coords = 'minimal'`
```
files = '../data/avhrr_oi/*.nc'
ds_sst = xr.open_mfdataset(files,coords = 'minimal')
```
# Let's see what one day looks like
* add coastlines `ax.coastlines()`
* add gridlines `ax.gridlines()`
```
sst = ds_sst.sst[0,:,:]
ax = plt.axes(projection=ccrs.Orthographic(-80, 35))
sst.plot(ax=ax, transform=ccrs.PlateCarree())
```
# Change the figure
* colormap `cmap='jet'
* colorscale `vmin=-1,vmax=34`
* add land `ax.stock_imag()
```
ax = plt.axes(projection=ccrs.Orthographic(-80, 35))
sst.plot(ax=ax, transform=ccrs.PlateCarree())
```
# Look at the sst data and notice the longitude range
## Again with the 0-360 vs -180-180. Change it up below!
* `ds_sst.coords['lon'] = np.mod(ds_sst.coords['lon'] + 180,360) - 180`
* remember to sort by lon, `.sortby(ds_sst.lon)`
* Also, look at the coordinates, there is an extra one `zlev`. Drop it using .isel
```
ds_sst.coords['lon'] = #change lon -180to180
ds_sst = ds_sst # sort lon
ds_sst = ds_sst #isel zlev
ds_sst
```
`open_mfdataset` even puts them in the right order for you.
```
ds_sst.time
```
How big is all this data uncompressed? Will it fit into memory?
Use `.nbytes` / 1e9 to convert it into gigabytes
```
print('file size (GB):', ds_sst.nbytes / 1e9)
```
# Xarray interpolation won't run on chunked dimensions.
1. First let's subset the data to make it smaller to deal with by using the cruise lat/lons
* Find the max/min of the lat/lon using `.lon.min().data`
1. Now load the data into memory (de-Dask-ify) it using `.load()`
```
#Step 1 from above
lon_min,lon_max = ds_usv_subset.lon.min().data,ds_usv_subset.lon.max().data
lat_min,lat_max = ds_usv_subset.lat.min().data,ds_usv_subset.lat.max().data
subset = ds_sst.sel(lon=slice(lon_min,lon_max),
lat=slice(lat_min,lat_max))
print('file size (GB):', subset.nbytes / 1e9)
#Step 2 from above
subset.load()
```
# Collocate USV data with SST data
There are different options when you interpolate. First, let's just do a linear interpolation using [.interp()](http://xarray.pydata.org/en/stable/generated/xarray.Dataset.interp.html#xarray.Dataset.interp)
`Dataset.interp(coords=None, method='linear', assume_sorted=False, kwargs={}, **coords_kwargs))`
```
ds_collocated = subset.interp(lat=ds_usv_subset.lat,lon=ds_usv_subset.lon,time=ds_usv_subset.time,method='linear')
```
# Collocate USV data with SST data
There are different options when you interpolate. First, let's just do a nearest point rather than interpolate the data
`method = 'nearest'`
```
ds_collocated_nearest = subset.interp(lat=ds_usv_subset.lat,lon=ds_usv_subset.lon,time=ds_usv_subset.time,method='nearest')
```
## Now, calculate the different in SSTs and print the [.mean()](http://xarray.pydata.org/en/stable/generated/xarray.DataArray.mean.html#xarray.DataArray.mean) and [.std()](http://xarray.pydata.org/en/stable/generated/xarray.DataArray.std.html#xarray.DataArray.std)
For the satellite data we need to use `sst` and for the USV data we need to use `TEMP_CTD_MEAN`
```
dif = ds_collocated_nearest.sst-ds_usv_subset.TEMP_CTD_MEAN
print('mean difference = ',dif.mean().data)
print('STD = ',dif.std().data)
```
# xarray can do more!
* concatentaion
* open network located files with openDAP
* import and export Pandas DataFrames
* .nc dump to
* groupby_bins
* resampling and reduction
For more details, read this blog post: http://continuum.io/blog/xray-dask
```
#ds_collocated_nearest.to_netcdf('./data/new file.nc')
```
## Where can I find more info?
### For more information about xarray
- Read the [online documentation](http://xarray.pydata.org/)
- Ask questions on [StackOverflow](http://stackoverflow.com/questions/tagged/python-xarray)
- View the source code and file bug reports on [GitHub](http://github.com/pydata/xarray/)
### For more doing data analysis with Python:
- Thomas Wiecki, [A modern guide to getting started with Data Science and Python](http://twiecki.github.io/blog/2014/11/18/python-for-data-science/)
- Wes McKinney, [Python for Data Analysis](http://shop.oreilly.com/product/0636920023784.do) (book)
### Packages building on xarray for the geophysical sciences
For analyzing GCM output:
- [xgcm](https://github.com/xgcm/xgcm) by Ryan Abernathey
- [oogcm](https://github.com/lesommer/oocgcm) by Julien Le Sommer
- [MPAS xarray](https://github.com/pwolfram/mpas_xarray) by Phil Wolfram
- [marc_analysis](https://github.com/darothen/marc_analysis) by Daniel Rothenberg
Other tools:
- [windspharm](https://github.com/ajdawson/windspharm): wind spherical harmonics by Andrew Dawson
- [eofs](https://github.com/ajdawson/eofs): empirical orthogonal functions by Andrew Dawson
- [infinite-diff](https://github.com/spencerahill/infinite-diff) by Spencer Hill
- [aospy](https://github.com/spencerahill/aospy) by Spencer Hill and Spencer Clark
- [regionmask](https://github.com/mathause/regionmask) by Mathias Hauser
- [salem](https://github.com/fmaussion/salem) by Fabien Maussion
Resources for teaching and learning xarray in geosciences:
- [Fabien's teaching repo](https://github.com/fmaussion/teaching): courses that combine teaching climatology and xarray
| github_jupyter |
```
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import stopwords
stopset = list(set(stopwords.words('english')))
import re
import csv
import nltk.classify
def replaceTwoOrMore(s):
pattern = re.compile(r"(.)\1{1,}", re.DOTALL)
return pattern.sub(r"\1\1", s)
def processTweet(tweet):
tweet = tweet.lower()
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))', 'URL', tweet)
tweet = re.sub('@[^\s]+', 'AT_USER', tweet)
tweet = re.sub('[\s]+', ' ', tweet)
tweet = re.sub(r'#([^\s]+)', r'\1', tweet)
tweet = tweet.strip('\'"')
return tweet
def getStopWordList(stopWordListFileName):
stopWords = []
stopWords.append('AT_USER')
stopWords.append('URL')
fp = open(stopWordListFileName, 'r')
line = fp.readline()
while line:
word = line.strip()
stopWords.append(word)
line = fp.readline()
fp.close()
return stopWords
def getFeatureVector(tweet, stopWords):
featureVector = []
words = tweet.split()
for w in words:
w = replaceTwoOrMore(w)
w = w.strip('\'"?,.')
val = re.search(r"^[a-zA-Z][a-zA-Z0-9]*[a-zA-Z]+[a-zA-Z0-9]*$", w)
if (w in stopWords or val is None):
continue
else:
featureVector.append(w.lower())
return featureVector
def extract_features(tweet):
tweet_words = set(tweet)
features = {}
for word in featureList:
features['contains(%s)' % word] = (word in tweet_words)
return features
inpTweets = csv.reader(open('data/training.csv', 'r'), delimiter=',', quotechar='|')
stopWords = getStopWordList('data/stopwordsID.txt')
count = 0;
featureList = []
tweets = []
for row in inpTweets:
sentiment = row[0]
tweet = row[1]
processedTweet = processTweet(tweet) # preprocessing
featureVector = getFeatureVector(processedTweet, stopWords) # get feature vector
featureList.extend(featureVector)
tweets.append((featureVector, sentiment));
featureList = list(set(featureList))
training_set = nltk.classify.util.apply_features(extract_features, tweets)
NBClassifier = nltk.NaiveBayesClassifier.train(training_set)
testTweet = 'pantai di lombok bersih bersih. pasirnya juga indah'
processedTestTweet = processTweet(testTweet)
sentiment = NBClassifier.classify(extract_features(getFeatureVector(processedTestTweet, stopWords)))
print("Test Tweets = %s, Sentiment = %s\n" % (testTweet, sentiment))
# print("Show Most Informative Features", NBClassifier.show_most_informative_features(32))
# print()
# print("Extract Features", extract_features(testTweet.split()))
print("Akurasi Hasil Klasifikasi :", (nltk.classify.accuracy(processedTestTweet, NBClassifier)) * 100)
# print("Akurasi Hasil Klasifikasi :", accuracy_score(testTweet, sentiment))
# kal = getFeatureVector(processTweet(testTweet), stopWords)
# kal = " ".join(str(x) for x in kal)
# print(kal)
# d = {}
# for word in kal.split():
# word = int(word) if word.isdigit() else word
# if word in d:
# d[word] += 1
# else:
# d[word] = 1
# print(d)
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.