text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
# Monkey Bread
```
from SpectralCV import ecog_pipe as ep
import numpy as np
import scipy as sp
import scipy.io as io
import scipy.signal as sig
import math as math
import random
from scipy import integrate
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
plt.style.use('seaborn-colorblind')
plt.rcParams['image.cmap'] = 'RdBu'
```
## Baking all 4 loaves
```
data_path1 ="\\Users\\Lauren\\Data\\NeuroTycho\\20120730PF_Anesthesia+and+Sleep_Chibi_Toru+Yanagawa_mat_ECoG128\\Session%d\\"
data_path2 ="\\Users\\Lauren\\Data\\NeuroTycho\\20120802PF_Anesthesia+and+Sleep_Chibi_Toru+Yanagawa_mat_ECoG128\\Session%d\\"
data_path3 ="\\Users\\Lauren\\Data\\NeuroTycho\\20120731PF_Anesthesia+and+Sleep_George_Toru+Yanagawa_mat_ECoG128\\Session%d\\"
data_path4 ="\\Users\\Lauren\\Data\\NeuroTycho\\20120803PF_Anesthesia+and+Sleep_George_Toru+Yanagawa_mat_ECoG128\\Session%d\\"
chan = 129
fs = 1000
nperseg = 500
noverlap = nperseg/2
#data1 = ep.monkeyBread(data_path1, chan, fs, nperseg, noverlap)
#create h5py path to chibi bread
import h5py
scvh5 = h5py.File('scv.h5', 'a')
monkey = scvh5['monkey']
monkey.create_dataset('chibiPF0730_2', data=ep.monkeyBread(data_path1, chan, fs, nperseg, noverlap))
monkey.create_dataset('chibiPF0802_2', data=ep.monkeyBread(data_path2, chan, fs, nperseg, noverlap))
monkey.create_dataset('georgePF0731_2', data=ep.monkeyBread(data_path3, chan, fs, nperseg, noverlap))
monkey.create_dataset('georgePF0803_2', data=ep.monkeyBread(data_path4, chan, fs, nperseg, noverlap))
scvh5.close()
```
## Using the data
```
import matplotlib.pyplot as plt
import h5py
from SpectralCV import ecog_pipe as ep
#load data from h5
h5_file = '../Voytek/scv.h5'
def addattrs(dset, fs, nperseg, noverlap):
dset.attrs['fs'] = fs
dset.attrs['nperseg'] = nperseg
dset.attrs['noverlap'] = noverlap
with h5py.File(h5_file, 'a') as h5:
dset = h5['monkey/chibiPF0730_05']
addattrs(dset, fs, nperseg, noverlap)
dset = h5['monkey/chibiPF0802_05']
addattrs(dset, fs, nperseg, noverlap)
dset = h5['monkey/georgePF0731_05']
addattrs(dset, fs, nperseg, noverlap)
dset = h5['monkey/georgePF0803_05']
addattrs(dset, fs, nperseg, noverlap)
with h5py.File(h5_file, 'a') as h5:
print(h5['monkey/georgePF0803_2'].attrs['nperseg'])
# plotting
with h5py.File(h5_file, 'r') as h5:
bread = h5['monkey/chibiPF0730_05k']
print(bread.shape)
print(bread[0][1][:])
#for i in range(5):
# plt.figure(i+1)
# xaxis = np.arange(0,501,2)
# plt.loglog(xaxis, bread[i][:][:].T)
with h5py.File(h5_file, 'a') as h5:
print([k for k in h5['monkey'].items()])
with h5py.File(h5_file, 'a') as h5:
#del h5['monkey/georgePF0731_2']
#del h5['monkey/georgePF0731_05']
h5['monkey/georgePF0803_05'] = h5['monkey/georgePF0803_05k']
del h5['monkey/georgePF0803_05k']
```
| github_jupyter |
```
import sklearn
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, LogisticRegression, Lasso
from sklearn import svm
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.dummy import DummyRegressor
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier,AdaBoostRegressor, AdaBoostClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.cluster import KMeans
from sklearn.model_selection import cross_val_score, cross_validate
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.inspection import permutation_importance
import pickle
```
# Fetching and preparing data
```
# features = pd.read_csv('../data/features.csv', index_col= 0)
features = pd.read_csv('../data/features_with_vars.csv', index_col=0)
# Uncomment desired rating mode
# ratings = pd.read_csv('../data/average_ratings.csv',index_col = 0)
ratings = pd.read_csv('../data/average_ratings_no_outliers.csv',index_col = 0)
# uncomment the line below if you are using features.csv instead of features_with_vars.csv
# features_reduced = features.iloc[:,2:-6]
# comment the line below if you are using features.csv instead of features_with_vars.csv
features_reduced = features.iloc[:,2:-1]
X = features_reduced.values
y = ratings.iloc[:,1].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=12)
array_of_accuracies = {}
```
# Linear Regression
```
# Training model
linear_regression_model = LinearRegression()
reg = linear_regression_model.fit(X_train, y_train)
# Evaluating model
reg_y_pred = reg.predict(X_test)
acc_mse = mean_squared_error(reg_y_pred,y_test)
print(mean_squared_error(reg_y_pred,y_test))
array_of_accuracies["Linear Regression"] = acc_mse
# save the model to disk
filename = 'linear_model.sav'
pickle.dump(reg, open(filename, 'wb'))
```
# Support Vector Machines
```
# Training svm model with rbf kernel
SVM = svm.SVR()
# SVM HyperParameter Tuning
K = 10
# specifying list of parameters to be searched over
max_iter_int = -1
kernel = ["linear", "poly", "rbf", "sigmoid"]
degree = [2,4,6,8,10]
tolfloat = 1e-5
# creating variables that will keep track of best score and parameters
best_score = 10
hp = {"kernel" : kernel[0], "degree" : 0}
counter = 1
# looping over said parameters
for j in kernel:
count = 0
for k in degree:
# creating rf with the parameters and then getting the cross val score for it
if(j == "poly" or count == 0):
svm_reg = svm.SVR(kernel = j, degree = k)
score = cross_val_score(svm_reg,X_train,y_train,cv = K, scoring = "neg_mean_squared_error")
# storing the param if they show improvements
if(abs(np.mean(score)) < best_score):
best_score = abs(np.mean(score))
hp = {"kernel" : j, "degree": k}
counter+=1
count += 1
# Assigning the best hyperparameters to hp_svm_reg
hp_svm_reg = hp
# creating the svm_reg based on those parameters
svm_reg = svm.SVR(kernel = hp_svm_reg["kernel"], degree = hp_svm_reg["degree"])
svm_reg.fit(X_train,y_train)
# using svm_reg to evaluate the test set
svm_y_pred = svm_reg.predict(X_test)
# getting the accuracy of the prediction
acc_mse = mean_squared_error(svm_y_pred,y_test)
print(acc_mse)
array_of_accuracies["SVM"] = (acc_mse)
```
# Random Forests
```
K = 10
# specifying list of parameters to be searched over
max_d = [1,2,3,4,5]
max_f = ["sqrt","log2","auto"]
estimators = np.linspace(10,400,5)
# creating variables that will keep track of best score and parameters
best_score = 10
hp = {"max_depth" : max_d[0], "max_features": max_f[0], "n_estimators": int(estimators[0])}
counter = 1
# looping over said parameters
for j in max_d:
for k in max_f:
for l in estimators:
# creating rf with the parameters and then getting the cross val score for it
rf = RandomForestRegressor(random_state=123, n_estimators = int(l),max_features = k,max_depth = j)
score = cross_val_score(rf,X_train,y_train,cv = K, scoring = "neg_mean_squared_error")
# storing the param if they show improvements
if(abs(np.mean(score)) < best_score):
best_score = abs(np.mean(score))
hp = {"max_depth" : j, "max_features": k, "n_estimators": int(l)}
# print(abs((np.mean(score))))
# print(counter)
counter+=1
# print the best params found from the previous snippet
hp_rf = hp
# Evaluating model
rf = RandomForestRegressor(random_state=123,n_estimators = hp_rf["n_estimators"], max_features = hp_rf["max_features"]
, max_depth = hp_rf["max_depth"])
# fit the model on the training data
rf.fit(X_train,y_train)
# use the model to predict the test set
rf_y_pred = rf.predict(X_test)
acc_mse = mean_squared_error(rf_y_pred,y_test)
print(acc_mse)
array_of_accuracies["Random Forest"] = (acc_mse)
```
# Ada Boost
```
# Basic RF model better than linear regression
K = 10
# specifying list of parameters to be searched over
lr = np.linspace(0.01,1,10)
estimators = np.linspace(10,400,5)
alg = ["SAMME","SAMME.R"]
# creating variables that will keep track of best score and parameters
best_score = 10
hp = {"learning_rate" : 0.01, "algorithm": "SAMME", "n_estimators": int(estimators[0])}
counter = 1
# looping over said parameters
for j in lr:
for l in estimators:
# creating rf with the parameters and then getting the cross val score for it
ab = AdaBoostRegressor(random_state=123, n_estimators = int(l),learning_rate = j)
score = cross_val_score(ab,X_train,y_train,cv = K, scoring = "neg_mean_squared_error")
# storing the param if they show improvements
if(abs(np.mean(score)) < best_score):
best_score = abs(np.mean(score))
hp = {"learning_rate" : j, "n_estimators": int(l)}
counter+=1
k = 10
ab = AdaBoostRegressor(random_state=123)
score = cross_val_score(ab,X_train,y_train,cv = k)
# Assigning the best hyperparameters to hp_ab
hp_ab = hp
# Creating hp_ab based on the best hyperparameters
ab = AdaBoostRegressor(random_state=123, n_estimators = hp_ab["n_estimators"],learning_rate = hp_ab["learning_rate"])
# fitting the model
ab.fit(X_train,y_train)
# using the model to make prediction on the test set
ab_y_pred = ab.predict(X_test)
acc_mse = mean_squared_error(ab_y_pred,y_test)
print(acc_mse)
array_of_accuracies["AdaBoost"] = (acc_mse)
print(array_of_accuracies)
```
# Nearest Neighboors
```
counter = 0
training_error = np.zeros(25)
for i in range(1,26):
nn = KNeighborsRegressor(n_neighbors=i)
nn.fit(X_train, y_train)
training_error[counter] = (mean_squared_error(nn.predict(X_train),y_train))
counter += 1
x = np.linspace(1,25,25)
a = sns.scatterplot(x = x,y = training_error)
a.set(xlabel = 'number of neighbors', ylabel = 'training error')
plt.show()
nn = KNeighborsRegressor(n_neighbors = 1)
nn.fit(X_train,y_train)
# evaluating the model
y_test_nn = nn.predict(X_test)
acc_mse = mean_squared_error(y_test_nn,y_test)
array_of_accuracies["KNN"] = acc_mse
```
# Neural Net
```
regr = MLPRegressor(random_state=1, learning_rate_init = 0.01, max_iter=500).fit(X_train, y_train)
y_pred_nn = regr.predict(X_test)
acc_mse = mean_squared_error(y_pred_nn,y_test)
print(acc_mse)
array_of_accuracies["Neural Network"] = acc_mse
```
# Make Plot of all the Accuracies
```
print(array_of_accuracies.keys())
print(list(array_of_accuracies.values()))
plt.figure(figsize = (9,8))
plt.ylabel('Mean Squred Error')
sns.set_context("paper", font_scale=1)
sns.set_style('whitegrid')
sns.set_style({'font.family': 'Times New Roman'})
sns.barplot(x = list(array_of_accuracies.keys()), y = list(array_of_accuracies.values()))
plt.tight_layout()
```
# Performing the Binary Task for Comparison with the Michigan Study
```
binary_ratings = 1*(ratings['snippet rating'].values > 5)
y = binary_ratings
X_train, X_test, y_train, y_test = train_test_split(X, binary_ratings, test_size=0.2, random_state=12)
```
# Logistic Regression, Random Forests, AdaBoost, SVM
```
supervised_model_classes = {
"Random Forest": RandomForestClassifier,
"AdaBoost": AdaBoostClassifier,
"Logistic R.": LogisticRegression,
"Naive Bayes": GaussianNB,
"Bernoulli NB": BernoulliNB
}
for model_name, model_class in supervised_model_classes.items():
fit_model = model_class().fit(X_train, y_train)
y_pred = fit_model.predict(X_test)
l1 = accuracy_score(y_pred, y_test)
print(f"{model_name} \t{l1}")
```
| github_jupyter |
<div class="alert alert-block alert-info">
Section of the book chapter: <b>5.2.2 Active Learning</b>
</div>
# 4. Active learning
**Table of Contents**
* [4.1 Active Learning Setup](#4.1-Active-Learning-Setup)
* [4.2 Initial Estimation](#4.2-Initial-Estimation)
* [4.3 Including Active Learning](#4.3-Including-Active-Learning)
**Learnings:**
- how to implement basic active learning approaches models,
- how active learning can improve estimations.
### Packages
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, RBF
from modAL.models import ActiveLearner
import utils
```
### Read in Data
**Dataset:** Felix M. Riese and Sina Keller, "Hyperspectral benchmark dataset on soil moisture", Dataset, Zenodo, 2018. [DOI:10.5281/zenodo.1227836](http://doi.org/10.5281/zenodo.1227836) and [GitHub](https://github.com/felixriese/hyperspectral-soilmoisture-dataset)
**Introducing paper:** Felix M. Riese and Sina Keller, “Introducing a Framework of Self-Organizing Maps for Regression of Soil Moisture with Hyperspectral Data,” in IGARSS 2018 - 2018 IEEE International Geoscience and Remote Sensing Symposium, Valencia, Spain, 2018, pp. 6151-6154. [DOI:10.1109/IGARSS.2018.8517812](https://doi.org/10.1109/IGARSS.2018.8517812)
```
X_train, X_test, y_train, y_test, y_train_full = utils.get_xy_split(missing_rate=0.8)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape, y_train_full.shape)
print(y_train[y_train>=0.].shape, y_test[y_test>=0.].shape, y_train_full[y_train>=0.].shape)
index_initial = np.where(y_train>=0.)[0]
```
***
## 4.1 Active Learning Setup
Source: [modAL/active_regression.py](https://github.com/modAL-python/modAL/blob/master/examples/active_regression.py)
```
# defining the kernel for the Gaussian process
kernel = RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1, noise_level_bounds=(1e-10, 1e+1))
# query strategy for regression
def regression_std(regressor, X):
_, std = regressor.predict(X, return_std=True)
query_idx = np.argmax(std)
return query_idx, X[query_idx]
# initializing the active learner
regressor = ActiveLearner(
estimator=GaussianProcessRegressor(kernel=kernel),
query_strategy=regression_std,
X_training=X_train[index_initial],
y_training=y_train[index_initial])
print(X_train[index_initial].shape, y_train[index_initial].shape)
```
***
## 4.2 Initial Estimation
```
# plot initial estimation
plt.figure(figsize=(6,6))
pred, std = regressor.predict(X_train, return_std=True)
# plot prediction of supervised samples
plt.scatter(y_train_full[index_initial], pred[index_initial], alpha=0.5)
# plot prediction of unsupervised samples
not_initial = [i for i in range(y_train.shape[0]) if i not in index_initial]
plt.scatter(y_train_full[not_initial], pred[not_initial], alpha=0.5)
# plot std
plt.fill_between(np.linspace(22, 45, 339), pred-std, pred+std, alpha=0.2)
plt.xlim(22.0, 45.0)
plt.ylim(22.0, 45.0)
plt.xlabel("Soil Moisture (Ground Truth) in %")
plt.ylabel("Soil Moisture (Prediction) in %")
plt.show()
```
***
## 4.3 Including Active Learning
```
n_queries = 150
for idx in range(n_queries):
query_idx, query_instance = regressor.query(X_train)
# print(query_idx, query_instance)
# print(X_train[query_idx].reshape(1,125).shape)
# print(y_train_full[query_idx].reshape(-1, ).shape)
regressor.teach(X_train[query_idx].reshape(1, 125), y_train_full[query_idx].reshape(-1, ))
# plot initial estimation
plt.figure(figsize=(6,6))
pred, std = regressor.predict(X_train, return_std=True)
# plot prediction of supervised samples
plt.scatter(y_train_full[index_initial], pred[index_initial], alpha=0.5)
# plot prediction of unsupervised samples
not_initial = [i for i in range(y_train.shape[0]) if i not in index_initial]
plt.scatter(y_train_full[not_initial], pred[not_initial], alpha=0.5)
# plot std
plt.fill_between(np.linspace(22, 45, 339), pred-std, pred+std, alpha=0.2)
plt.xlim(22.0, 45.0)
plt.ylim(22.0, 45.0)
plt.xlabel("Soil Moisture (Ground Truth) in %")
plt.ylabel("Soil Moisture (Prediction) in %")
plt.show()
```
| github_jupyter |
```
import os
import random
import torch
import numpy as np
from torch.nn import functional as F
dataset_dir = "./family/"
all_trip_file = os.path.join(dataset_dir, "all.txt")
relations_file = os.path.join(dataset_dir, "relations.txt")
entities_file = os.path.join(dataset_dir, "entities.txt")
def read_xxx_to_id(file_path):
xxx2id = {}
with open(file_path, 'r') as file:
for line in file:
line = line.strip()
xxx2id[line] = len(xxx2id)
return xxx2id
def parse_triplets(triplets_file: str,
rel2id: dict,
ent2id: dict):
"""Read triplets (relation, head, tail)."""
triplets = []
with open(triplets_file, 'r') as file:
for line in file:
line = line.strip().split('\t')
assert(len(line) == 3)
try:
triplets.append(
(
rel2id[line[1]],
ent2id[line[0]],
ent2id[line[2]]
)
)
except KeyError:
pass
return triplets
```
## Read file and resample
```
resample = False
num_entities = False
rel2id = read_xxx_to_id(relations_file)
id2rel = {ident: rel for rel, ident in rel2id.items()}
ent2id = read_xxx_to_id(entities_file)
if resample:
ent2id = {ent: ident for (ent, ident) in random.sample(list(ent2id.items()), num_entities)}
id2ent = {ident: ent for ent, ident in ent2id.items()}
all_facts = parse_triplets(all_trip_file, rel2id, ent2id)
# relation to (head, tail)
rel2ht = {rel: [] for rel in id2rel.keys()}
for (r, h, t) in all_facts:
rel2ht[r].append((h, t))
num_rel, num_ent, num_trip = len(rel2id), len(ent2id), len(all_facts)
num_rel, num_ent, num_trip
```
## Compute macro, micro and comprehensive saturations
```
def get_adjacency_matrices(triplets,
num_relations: int,
num_entities: int
):
"""Compute adjacency matrix from all triplets
in preparation for creating sparse matrix in torch.
"""
matrix = {
r: ([[0, 0]], [0.], [num_entities, num_entities])
for r in range(num_relations)
}
for triplet in triplets:
rel = triplet[0]
head = triplet[1]
tail = triplet[2]
value = 1.
matrix[rel][0].append([head, tail])
matrix[rel][1].append(value)
for rel, mat in matrix.items():
matrix[rel] = torch.sparse.FloatTensor(
torch.LongTensor(mat[0]).t(),
torch.FloatTensor(mat[1]),
mat[2]
)
return matrix
# `adj_matrices`: adjacency matrices, ORDER matters!!!
# `head_nodes`: head nodes list
# return: a list of `batch_size` nodes
def from_head_hops(adj_matrices: list,
head_nodes: list
):
# (batch_size, num_entities)
v_x = F.one_hot(torch.LongTensor(head_nodes), adj_matrix[0].size(0)).float()
# (num_entities, num_entities)
result = torch.matmul(adj_matrices[0].t(), v_x.t())
for mat in adj_matrices[1:]:
result = torch.mm(mat.t(), result)
# (batch_size, num_entites)
result = result.t().numpy()
indices = np.argwhere(result > 0)
# {head: {tail: num_paths}}
ret = {head: {} for head in head_nodes}
for row, col in indices:
# `row`: (row, column) ==> (head, tail)
ret[head_nodes[row]][col] = result[row, col]
return ret
adj_matrix = get_adjacency_matrices(all_facts, num_rel, num_ent)
adj_matrix[0], from_head_hops([adj_matrix[1], adj_matrix[2]], list(ent2id.values())[:2])
from itertools import permutations
topk_macro = 10
topk_micro = 10
topk_comp = 10
max_rule_len = 2
relations = list(id2rel.keys())
paths_permut = [(rel1, rel2) for rel1 in relations for rel2 in relations]
len(paths_permut)
from time import time
from collections import defaultdict
start = time()
macro_saturations = {rel: {path: 0. for path in paths_permut} for rel in id2rel.keys()}
tmp_micro_saturations = {rel: {path: {} for path in paths_permut} for rel in id2rel.keys()} # {path: {(head, tail): num_paths}}
micro_saturations = {rel: {path: 0. for path in paths_permut} for rel in id2rel.keys()}
total_paths_pairs = {rel: defaultdict(int) for rel in id2rel.keys()} # {(head, tail): num_total_paths}
# get number of triplets under each relation
num_rel2trip = {rel: len(rel2ht[rel]) for rel in id2rel.keys()}
# get triplets under each relation
rel_head2tails = {rel: defaultdict(list) for rel in id2rel.keys()}
for (r, h, t) in all_facts:
rel_head2tails[r][h].append(t)
for rel in rel_head2tails:
if not rel_head2tails[rel]:
continue
for path in macro_saturations[rel].keys():
matrices = [adj_matrix[r] for r in path]
heads = list(rel_head2tails[rel].keys())
num_paths_from_heads = from_head_hops(matrices, heads)
for head, tails in rel_head2tails[rel].items():
for tail in tails:
if tail in num_paths_from_heads[head]:
macro_saturations[rel][path] += 1.
tmp_micro_saturations[rel][path][(head, tail)] = num_paths_from_heads[head][tail]
total_paths_pairs[rel][(head, tail)] += num_paths_from_heads[head][tail]
macro_saturations[rel][path] /= num_rel2trip[rel]
for path, pairs in tmp_micro_saturations[rel].items():
for pair, num_path in pairs.items():
# `pair`: (head, tail)
micro_saturations[rel][path] += num_path / total_paths_pairs[rel][pair]
if len(tmp_micro_saturations[rel][path]) != 0:
micro_saturations[rel][path] /= num_rel2trip[rel]
print(f"{time() - start}s")
```
### Macro saturation
```
for rel in macro_saturations:
print(f"{id2rel[rel]:=^50}")
sorted_items = sorted(macro_saturations[rel].items(), key=lambda x: x[1], reverse=True)
for i, (path, saturation) in enumerate(sorted_items):
if i == topk_macro:
break
print(f"{tuple(id2rel[r] for r in path)}: {saturation:.2f}")
print("\n")
```
### Micro saturation
```
for rel in micro_saturations:
print(f"{id2rel[rel]:=^50}")
sorted_items = sorted(micro_saturations[rel].items(), key=lambda x: x[1], reverse=True)
for i, (path, saturation) in enumerate(sorted_items):
if i == topk_micro:
break
print(f"{tuple(id2rel[r] for r in path)}: {saturation:.2f}")
print("\n")
```
### Comprehensive saturation
```
comp_saturations = {
rel: {} for rel in micro_saturations
}
for rel in micro_saturations:
for path, value in micro_saturations[rel].items():
comp_saturations[rel][path] = value * macro_saturations[rel][path]
for rel in comp_saturations:
print(f"{id2rel[rel]:=^50}")
sorted_items = sorted(comp_saturations[rel].items(), key=lambda x: x[1], reverse=True)
for i, (path, saturation) in enumerate(sorted_items):
if i == topk_comp:
break
print(f"{tuple(id2rel[r] for r in path)}: {saturation:.2f}%")
print("\n")
```
| github_jupyter |
##### Copyright 2021 The TF-Agents Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# REINFORCE agent
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/agents/tutorials/6_reinforce_tutorial">
<img src="https://www.tensorflow.org/images/tf_logo_32px.png" />
View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/docs/tutorials/6_reinforce_tutorial.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/agents/blob/master/docs/tutorials/6_reinforce_tutorial.ipynb">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/agents/docs/tutorials/6_reinforce_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
## Introduction
This example shows how to train a [REINFORCE](https://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf) agent on the Cartpole environment using the TF-Agents library, similar to the [DQN tutorial](1_dqn_tutorial.ipynb).

We will walk you through all the components in a Reinforcement Learning (RL) pipeline for training, evaluation and data collection.
## Setup
If you haven't installed the following dependencies, run:
```
!sudo apt-get install -y xvfb ffmpeg
!pip install 'imageio==2.4.0'
!pip install pyvirtualdisplay
!pip install tf-agents
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import imageio
import IPython
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import PIL.Image
import pyvirtualdisplay
import tensorflow as tf
from tf_agents.agents.reinforce import reinforce_agent
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.networks import actor_distribution_network
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
tf.compat.v1.enable_v2_behavior()
# Set up a virtual display for rendering OpenAI gym environments.
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()
```
## Hyperparameters
```
env_name = "CartPole-v0" # @param {type:"string"}
num_iterations = 250 # @param {type:"integer"}
collect_episodes_per_iteration = 2 # @param {type:"integer"}
replay_buffer_capacity = 2000 # @param {type:"integer"}
fc_layer_params = (100,)
learning_rate = 1e-3 # @param {type:"number"}
log_interval = 25 # @param {type:"integer"}
num_eval_episodes = 10 # @param {type:"integer"}
eval_interval = 50 # @param {type:"integer"}
```
## Environment
Environments in RL represent the task or problem that we are trying to solve. Standard environments can be easily created in TF-Agents using `suites`. We have different `suites` for loading environments from sources such as the OpenAI Gym, Atari, DM Control, etc., given a string environment name.
Now let us load the CartPole environment from the OpenAI Gym suite.
```
env = suite_gym.load(env_name)
```
We can render this environment to see how it looks. A free-swinging pole is attached to a cart. The goal is to move the cart right or left in order to keep the pole pointing up.
```
#@test {"skip": true}
env.reset()
PIL.Image.fromarray(env.render())
```
The `time_step = environment.step(action)` statement takes `action` in the environment. The `TimeStep` tuple returned contains the environment's next observation and reward for that action. The `time_step_spec()` and `action_spec()` methods in the environment return the specifications (types, shapes, bounds) of the `time_step` and `action` respectively.
```
print('Observation Spec:')
print(env.time_step_spec().observation)
print('Action Spec:')
print(env.action_spec())
```
So, we see that observation is an array of 4 floats: the position and velocity of the cart, and the angular position and velocity of the pole. Since only two actions are possible (move left or move right), the `action_spec` is a scalar where 0 means "move left" and 1 means "move right."
```
time_step = env.reset()
print('Time step:')
print(time_step)
action = np.array(1, dtype=np.int32)
next_time_step = env.step(action)
print('Next time step:')
print(next_time_step)
```
Usually we create two environments: one for training and one for evaluation. Most environments are written in pure python, but they can be easily converted to TensorFlow using the `TFPyEnvironment` wrapper. The original environment's API uses numpy arrays, the `TFPyEnvironment` converts these to/from `Tensors` for you to more easily interact with TensorFlow policies and agents.
```
train_py_env = suite_gym.load(env_name)
eval_py_env = suite_gym.load(env_name)
train_env = tf_py_environment.TFPyEnvironment(train_py_env)
eval_env = tf_py_environment.TFPyEnvironment(eval_py_env)
```
## Agent
The algorithm that we use to solve an RL problem is represented as an `Agent`. In addition to the REINFORCE agent, TF-Agents provides standard implementations of a variety of `Agents` such as [DQN](https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf), [DDPG](https://arxiv.org/pdf/1509.02971.pdf), [TD3](https://arxiv.org/pdf/1802.09477.pdf), [PPO](https://arxiv.org/abs/1707.06347) and [SAC](https://arxiv.org/abs/1801.01290).
To create a REINFORCE Agent, we first need an `Actor Network` that can learn to predict the action given an observation from the environment.
We can easily create an `Actor Network` using the specs of the observations and actions. We can specify the layers in the network which, in this example, is the `fc_layer_params` argument set to a tuple of `ints` representing the sizes of each hidden layer (see the Hyperparameters section above).
```
actor_net = actor_distribution_network.ActorDistributionNetwork(
train_env.observation_spec(),
train_env.action_spec(),
fc_layer_params=fc_layer_params)
```
We also need an `optimizer` to train the network we just created, and a `train_step_counter` variable to keep track of how many times the network was updated.
```
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
train_step_counter = tf.compat.v2.Variable(0)
tf_agent = reinforce_agent.ReinforceAgent(
train_env.time_step_spec(),
train_env.action_spec(),
actor_network=actor_net,
optimizer=optimizer,
normalize_returns=True,
train_step_counter=train_step_counter)
tf_agent.initialize()
```
## Policies
In TF-Agents, policies represent the standard notion of policies in RL: given a `time_step` produce an action or a distribution over actions. The main method is `policy_step = policy.action(time_step)` where `policy_step` is a named tuple `PolicyStep(action, state, info)`. The `policy_step.action` is the `action` to be applied to the environment, `state` represents the state for stateful (RNN) policies and `info` may contain auxiliary information such as log probabilities of the actions.
Agents contain two policies: the main policy that is used for evaluation/deployment (agent.policy) and another policy that is used for data collection (agent.collect_policy).
```
eval_policy = tf_agent.policy
collect_policy = tf_agent.collect_policy
```
## Metrics and Evaluation
The most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes. We can compute the average return metric as follows.
```
#@test {"skip": true}
def compute_avg_return(environment, policy, num_episodes=10):
total_return = 0.0
for _ in range(num_episodes):
time_step = environment.reset()
episode_return = 0.0
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = environment.step(action_step.action)
episode_return += time_step.reward
total_return += episode_return
avg_return = total_return / num_episodes
return avg_return.numpy()[0]
# Please also see the metrics module for standard implementations of different
# metrics.
```
## Replay Buffer
In order to keep track of the data collected from the environment, we will use the TFUniformReplayBuffer. This replay buffer is constructed using specs describing the tensors that are to be stored, which can be obtained from the agent using `tf_agent.collect_data_spec`.
```
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=train_env.batch_size,
max_length=replay_buffer_capacity)
```
For most agents, the `collect_data_spec` is a `Trajectory` named tuple containing the observation, action, reward etc.
## Data Collection
As REINFORCE learns from whole episodes, we define a function to collect an episode using the given data collection policy and save the data (observations, actions, rewards etc.) as trajectories in the replay buffer.
```
#@test {"skip": true}
def collect_episode(environment, policy, num_episodes):
episode_counter = 0
environment.reset()
while episode_counter < num_episodes:
time_step = environment.current_time_step()
action_step = policy.action(time_step)
next_time_step = environment.step(action_step.action)
traj = trajectory.from_transition(time_step, action_step, next_time_step)
# Add trajectory to the replay buffer
replay_buffer.add_batch(traj)
if traj.is_boundary():
episode_counter += 1
# This loop is so common in RL, that we provide standard implementations of
# these. For more details see the drivers module.
```
## Training the agent
The training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.
The following will take ~3 minutes to run.
```
#@test {"skip": true}
try:
%%time
except:
pass
# (Optional) Optimize by wrapping some of the code in a graph using TF function.
tf_agent.train = common.function(tf_agent.train)
# Reset the train step
tf_agent.train_step_counter.assign(0)
# Evaluate the agent's policy once before training.
avg_return = compute_avg_return(eval_env, tf_agent.policy, num_eval_episodes)
returns = [avg_return]
for _ in range(num_iterations):
# Collect a few episodes using collect_policy and save to the replay buffer.
collect_episode(
train_env, tf_agent.collect_policy, collect_episodes_per_iteration)
# Use data from the buffer and update the agent's network.
experience = replay_buffer.gather_all()
train_loss = tf_agent.train(experience)
replay_buffer.clear()
step = tf_agent.train_step_counter.numpy()
if step % log_interval == 0:
print('step = {0}: loss = {1}'.format(step, train_loss.loss))
if step % eval_interval == 0:
avg_return = compute_avg_return(eval_env, tf_agent.policy, num_eval_episodes)
print('step = {0}: Average Return = {1}'.format(step, avg_return))
returns.append(avg_return)
```
## Visualization
### Plots
We can plot return vs global steps to see the performance of our agent. In `Cartpole-v0`, the environment gives a reward of +1 for every time step the pole stays up, and since the maximum number of steps is 200, the maximum possible return is also 200.
```
#@test {"skip": true}
steps = range(0, num_iterations + 1, eval_interval)
plt.plot(steps, returns)
plt.ylabel('Average Return')
plt.xlabel('Step')
plt.ylim(top=250)
```
### Videos
It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab.
```
def embed_mp4(filename):
"""Embeds an mp4 file in the notebook."""
video = open(filename,'rb').read()
b64 = base64.b64encode(video)
tag = '''
<video width="640" height="480" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4">
Your browser does not support the video tag.
</video>'''.format(b64.decode())
return IPython.display.HTML(tag)
```
The following code visualizes the agent's policy for a few episodes:
```
num_episodes = 3
video_filename = 'imageio.mp4'
with imageio.get_writer(video_filename, fps=60) as video:
for _ in range(num_episodes):
time_step = eval_env.reset()
video.append_data(eval_py_env.render())
while not time_step.is_last():
action_step = tf_agent.policy.action(time_step)
time_step = eval_env.step(action_step.action)
video.append_data(eval_py_env.render())
embed_mp4(video_filename)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/henrywoo/MyML/blob/master/Copy_of_nlu_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#### Copyright 2018 Google LLC.
```
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Natural Language Understanding: WordNet
Please **make a copy** of this Colab notebook before starting this lab. To do so, choose **File**->**Save a copy in Drive**.
## Topics covered
1. Synsets
1. Lemmas and synonyms
1. Word hierarchies
1. Measuring similarities
One of the earliest attempts to create useful representations of meaning for language is [WordNet](https://en.wikipedia.org/wiki/WordNet) -- a lexical database of words and their relationships.
NLTK provides a [WordNet wrapper](http://www.nltk.org/howto/wordnet.html) that we'll use here.
```
import nltk
assert(nltk.download('wordnet')) # Make sure we have the wordnet data.
from nltk.corpus import wordnet as wn
```
## Synsets
The fundamental WordNet unit is a **synset**, specified by a word form, a part of speech, and an index. The synsets() function retrieves the synsets that match the given word. For example, there are 4 synsets for the word "surf", one of which is a noun (n) and three of which are verbs (v). WordNet provides a definition and sometimes glosses (examples) for each synset. **Polysemy**, by the way, means having multiple senses.
```
for s in wn.synsets('surf'):
print s
print '\t', s.definition()
print '\t', s.examples()
```
## Lemmas and synonyms
Each synset includes its corresponding **lemmas** (word forms).
We can construct a set of synonyms by looking up all the lemmas for all the synsets for a word.
```
synonyms = set()
for s in wn.synsets('triumphant'):
for l in s.lemmas():
synonyms.add(l.name())
print 'synonyms:', ', '.join(synonyms)
```
## Word hierarchies
WordNet organizes nouns and verbs into hierarchies according to **hypernym** or is-a relationships.
Let's examine the path from "rutabaga" to its root in the tree, "entity".
```
s = wn.synsets('rutabaga')
while s:
print s[0].hypernyms()
s = s[0].hypernyms()
```
Actually, the proper way to do this is with a transitive closure, which repeatedly applies the specified function (in this case, hypernyms()).
```
hyper = lambda x: x.hypernyms()
s = wn.synset('rutabaga.n.01')
for i in list(s.closure(hyper)):
print i
print
ss = wn.synset('root_vegetable.n.01')
for i in list(ss.closure(hyper)):
print i
```
## Measuring similarity
WordNet's word hierarchies (for nouns and verbs) allow us to measure similarity in various ways.
Path similarity is defined as:
> $1 / (ShortestPathDistance(s_1, s_2) + 1)$
where $ShortestPathDistance(s_1, s_2)$ is computed from the hypernym/hyponym graph.
```
s1 = wn.synset('dog.n.01')
s2 = wn.synset('cat.n.01')
s3 = wn.synset('potato.n.01')
print s1, '::', s1, s1.path_similarity(s1)
print s1, '::', s2, s1.path_similarity(s2)
print s1, '::', s3, s1.path_similarity(s3)
print s2, '::', s3, s2.path_similarity(s3)
print
hyper = lambda x: x.hypernyms()
print(s1.hypernyms())
for i in list(s1.closure(hyper)):
print i
```
## Takeaways
WordNet gives us ways to compare words and understand their relationships in a much more meaningful way than relying on the raw strings (sequences of characters). We know that 'cat' and 'dog', for example, are somewhat similar even though they have no string similarity. As a result, WordNet has been used in lots of practical applications over the years. However, WordNet has a few important shortcomings:
1. WordNet was built by people. This makes it hard to maintain as new words are added (e.g. 'iphone' isn't in WordNet) and definitions evolve. It also has limited language coverage. NLTK wraps Open Multilingual WordNet which includes 22 additional languages, but these are less extensive than the English WordNet. A fundamental question addressed by subsequent sections is: can we build WordNet-like resources automatically from text, of which there is an abundance?
1. WordNet, like any dictionary or thesaurus, represents the meaning of a word with its relationships to other words. That is, it lacks *grounding* in the real world. This is fine for people who have plenty of working knowledge of the world, who have seen and interacted with dogs and cats and potatoes, but would be much less helpful for aliens arriving on Earth for the first time. This deficiency, where language is only defined with respect to itself, and not with respect to images for example, is at the frontier of research in Natural Language Understanding.
## Quiz Questions
(1) Use the closure function to enumerate the **hyponyms** (the inverse of a hypernym) of 'root_vegetable.n.01'.
(2) We used the path_similarity function to compute the similarity between 'dog' and 'cat'. Use the hypernyms() function (see above) to find the path between these two words. Does the path similarity 0.2 make sense?
| github_jupyter |
<a href="https://colab.research.google.com/github/AryanMethil/Brain_Tumor_Detection/blob/master/constants.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Folders Details :
**brain_tumor_dataset => Input Dataset which also contains the test dataset folder brain_tumor_test_10 => Test folder of 20 images consisting of yes and no folders**
**new_brain_tumor_dataset => Preprocessing applied on Input Dataset**
**Final_Tumor_Dataset => Final Dataset consisting of Train , Validation and Test folders each consisting of yes and no folder**
**models_directory => Folder containing all the .h5 models**
# Constant Details :
**yes, no => yes and no folders of brain_tumor_dataset**
**image_preprocessed_yes, image_preprocessed_no => yes and no folders of new_brain_tumor_dataset formed after preprocessing brain_tumor_dataset folder**
**image_generator_train, image_generator_validator, image_generator_test => train, validation and test subfolders of Final_Tumor_Dataset**
**image_generator_train_yes, image_generator_train_no => yes and no folders of train folder inside the Final_Tumor_Dataset**
```
yes='/content/drive/My Drive/Brain_Tumor_Classification/input/brain_tumor_dataset/yes'
image_preprocessed_yes='/content/drive/My Drive/Brain_Tumor_Classification/input/new_brain_tumor_dataset/yes/'
no='/content/drive/My Drive/Brain_Tumor_Classification/input/brain_tumor_dataset/no'
image_preprocessed_no='/content/drive/My Drive/Brain_Tumor_Classification/input/new_brain_tumor_dataset/no/'
image_generator_train='/content/drive/My Drive/Brain_Tumor_Classification/input/Final_Tumor_Dataset/Train/'
image_generator_validation='/content/drive/My Drive/Brain_Tumor_Classification/input/Final_Tumor_Dataset/Validation/'
image_generator_test='/content/drive/My Drive/Brain_Tumor_Classification/input/Final_Tumor_Dataset/Test/'
image_generator_train_yes='/content/drive/My Drive/Brain_Tumor_Classification/input/Final_Tumor_Dataset/Train/yes'
image_generator_train_no='/content/drive/My Drive/Brain_Tumor_Classification/input/Final_Tumor_Dataset/Train/no'
image_generator_validation_yes='/content/drive/My Drive/Brain_Tumor_Classification/input/Final_Tumor_Dataset/Validation/yes'
image_generator_validation_no='/content/drive/My Drive/Brain_Tumor_Classification/input/Final_Tumor_Dataset/Validation/no'
test_no='/content/drive/My Drive/Brain_Tumor_Classification/input/brain_tumor_dataset/brain_tumor_test_10/no'
test_yes='/content/drive/My Drive/Brain_Tumor_Classification/input/brain_tumor_dataset/brain_tumor_test_10/yes'
image_generator_test_yes='/content/drive/My Drive/Brain_Tumor_Classification/input/Final_Tumor_Dataset/Test/yes'
image_generator_test_no='/content/drive/My Drive/Brain_Tumor_Classification/input/Final_Tumor_Dataset/Test/no'
new_test='/content/drive/My Drive/Brain_Tumor_Classification/input/Test New Images/'
new_test_yes='/content/drive/My Drive/Brain_Tumor_Classification/input/Test New Images/yes'
models_directory='/content/drive/My Drive/Brain_Tumor_Classification/models/'
```
| github_jupyter |
```
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import os
from scipy.misc import imread,imresize
from random import shuffle
from sklearn.preprocessing import LabelEncoder
tf.__version__
```
Make sure you download this data and extract in the same directory,
https://drive.google.com/open?id=1V9fy_Me9ZjmMTJoTWz0L8AUdIW5k35bE
```
checkpoint_name = 'mobilenet_v2_1.0_224'
url = 'https://storage.googleapis.com/mobilenet_v2/checkpoints/' + checkpoint_name + '.tgz'
print('Downloading from ', url)
!wget {url}
print('Unpacking')
!tar -xvf {checkpoint_name}.tgz
checkpoint = checkpoint_name + '.ckpt'
batch_size = 32
epoch = 10
learning_rate = 1e-3
data_location = 'Crop/'
img_lists = os.listdir(data_location)
shuffle(img_lists)
img_labels = [i.split('--')[0] for i in img_lists]
img_Y = LabelEncoder().fit_transform(img_labels)
img_lists = [data_location+i for i in img_lists]
import mobilenet_v2
tf.reset_default_graph()
sess = tf.InteractiveSession()
X = tf.placeholder(tf.float32,[None,224,224,1])
Y = tf.placeholder(tf.int32, [None])
images = tf.image.grayscale_to_rgb(X)
images = images / 128. - 1
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope(is_training=True)):
logits, endpoints = mobilenet_v2.mobilenet(images)
logits = tf.nn.relu6(logits)
emotion_logits = slim.fully_connected(logits, 7, activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(1e-5),
scope='emo/emotion_1', reuse=False)
emotion_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y, logits=emotion_logits)
emotion_cross_entropy_mean = tf.reduce_mean(emotion_cross_entropy)
cost = tf.add_n([emotion_cross_entropy_mean] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
emotion_accuracy = tf.reduce_mean(tf.cast(tf.nn.in_top_k(emotion_logits, Y, 1), tf.float32))
global_step = tf.Variable(0, name="global_step", trainable=False)
# only train on our emotion layers
emotion_vars = [var for var in tf.trainable_variables() if var.name.find('emotion_') >= 0]
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost,var_list=emotion_vars)
sess.run(tf.global_variables_initializer())
var_lists = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'MobilenetV2')
saver = tf.train.Saver(var_list = var_lists)
saver.restore(sess, checkpoint)
saver = tf.train.Saver(tf.global_variables())
# test save
saver.save(sess, "new/emotion-checkpoint-mobilenet.ckpt")
from tqdm import tqdm
batching = (len(img_lists) // batch_size) * batch_size
for i in range(epoch):
total_loss, total_acc = 0, 0
for k in tqdm(range(0, batching, batch_size),desc='minibatch loop'):
batch_x = np.zeros((batch_size, 224,224,1))
for n in range(batch_size):
img = imresize(imread(img_lists[k+n]), (224,224))
batch_x[n,:,:,0] = img
loss, acc, _ = sess.run([cost,emotion_accuracy,optimizer],
feed_dict={X:batch_x,Y:img_Y[k:k+batch_size]})
total_loss += loss
total_acc += acc
total_loss /= (len(img_lists) // batch_size)
total_acc /= (len(img_lists) // batch_size)
print('epoch: %d, avg loss: %f, avg accuracy: %f'%(i+1,total_loss,total_acc))
saver.save(sess, "new/emotion-checkpoint-mobilenet.ckpt")
tf.reset_default_graph()
sess = tf.InteractiveSession()
X = tf.placeholder(tf.float32,[None,224,224,1])
Y = tf.placeholder(tf.int32, [None])
images = tf.image.grayscale_to_rgb(X)
images = images / 128. - 1
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope(is_training=False)):
logits, endpoints = mobilenet_v2.mobilenet(images)
logits = tf.nn.relu6(logits)
emotion_logits = slim.fully_connected(logits, 7, activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(1e-5),
scope='emo/emotion_1', reuse=False)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, "new/emotion-checkpoint-mobilenet.ckpt")
batching = (len(img_lists) // batch_size) * batch_size
results = []
for k in tqdm(range(0, batching, batch_size),desc='minibatch loop'):
batch_x = np.zeros((batch_size, 224,224,1))
for n in range(batch_size):
img = imresize(imread(img_lists[k+n]), (224,224))
batch_x[n,:,:,0] = img
results += sess.run(tf.argmax(emotion_logits,1), feed_dict={X:batch_x}).tolist()
from sklearn import metrics
print(metrics.classification_report(img_Y[:batching], results, target_names = np.unique(img_labels)))
```
| github_jupyter |
```
import sys
sys.path.append("/remote-home/xtzhang/CTC/CTC2021/SpecialEdition")
import os
import random
import time
import logging
import argparse
from dataclasses import dataclass, field
from typing import Optional,Dict, Union, Any, Tuple, List
import numpy as np
import datasets
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
import transformers
from transformers import (
BertConfig,
DataCollatorForSeq2Seq,
AutoConfig,
AutoTokenizer,
HfArgumentParser,
TrainingArguments,
Seq2SeqTrainingArguments,
set_seed,
)
from transformers import Trainer, Seq2SeqTrainer
from transformers import TrainingArguments
from transformers import trainer_utils, training_args
from transformers.trainer_pt_utils import nested_detach
from transformers import BertForMaskedLM
from transformers.file_utils import PaddingStrategy
from transformers.modeling_utils import PreTrainedModel
from transformers.tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase
from transformers.training_args import TrainingArguments
from core import get_dataset, get_metrics, argument_init
from lib import subTrainer
from data.DatasetLoadingHelper import load_ctc2021, load_sighan, load_lattice_sighan
#from models.bart.modeling_bart_v2 import BartForConditionalGeneration
from models.bert.modeling_bert_v2 import BertForFlat
tokenizer_model_name_path="hfl/chinese-roberta-wwm-ext"
tokenizer = AutoTokenizer.from_pretrained(tokenizer_model_name_path)
data = ["今天天气不错", "今天天气还行哦"]
res = tokenizer.batch_encode_plus(data)
res.data
res.encodings[0]
from tokenizers import Encoding
class myEncoding():
def __init__(self, ids, atten_masks, target, pos_s, pos_e):
self.ids = ids
self.atten_masks = atten_masks
self.target = target
self.pos_s = pos_s
self.pos_e = pos_e
print(res[0].ids)
print(res[0].type_ids)
print(res[0].tokens)
print(res[0].offsets)
print(res[0].attention_mask)
#print(res[0].sequence_tokens_mask)
#print(res[0].overflowing)
res[0].n_sequences
class mydataset(Dataset):
def __init__(self, data):
self.data = data
self.data_iter = [ { data[key][i] for key in data.keys() } for i in range(len(data[data.keys()[0]])) ]
def __getitem__(self, key):
if isinstance(key, str):
return self.data[key]
else:
return self.data_iter[index]
def __len__(self):
return len(self.data)
def get_offset(length):
res = []
res.append((0, 0))
for i in range(length-2):
res.append((i, i+1))
res.append((0,0))
return res
#test
get_offset(3)
test_encoding = Encoding(ids=[1], type_ids=[1], tokens=["是"], offsets=[(0)], atten_masks=[1])
test_encoding
print(res[0])
p = res.encodings[0]
son = dir(res.encodings[0])
res
from transformers.tokenization_utils_base import BatchEncoding
tmp_encoding = Encoding()
tmp = BatchEncoding({"input_ids":[[1]], "token_types_ids":[[1]], "attention_mask":[[1]], "pos_s":[[1]]})
finals = [[1],[2]]
atten_masks = [[1], [2]]
target = [[1], [2]]
pos_s = [[1], [2]]
pos_e = [[1], [2]]
from fastNLP import DataSet
src = DataSet({ "lattice":finals, "atten_masks":atten_masks, "target": target, "pos_s":pos_s, "pos_e":pos_e})
src.field_arrays.keys()
for i in src:
print(i)
new = {}
for key in src.field_arrays:
new[key] = [i for i in src.field_arrays[key]]
final = BatchEncoding(new)
print(final, type(final))
print(res, type(res), res[0])
for i in res:
print(i)
dataset = {"train":src, "test":src}
def tmp_transform(fnlp_dataset):
new = {}
for key in fnlp_dataset.field_arrays:
new[key] = [i for i in fnlp_dataset.field_arrays[key]]
res = BatchEncoding(new)
return res
test = tmp_transform(dataset["train"])
print(test)
dataset2 = dict(zip(dataset, map(tmp_transform, dataset.values())))
dataset2
```
| github_jupyter |
# Notebook-10: Wrapping Up (A Matter of Style)
### Lesson Content
- Style
- Why style matters
- Python style
- Why???
- Why did I enter this world of pain?
- Where am I going?
Welcome to the ninth, and currently _last_, Code Camp notebook! In this lesson we'll cover a number of things that don't fit anywhere else but which are essential to getting you on your way as a beginner programmer in Geocomputation.
## Style
As with any other type of creative writing -- make no mistake, writing code is a creative activity! -- different people have different styles of writing. Extending the language metaphor, different programming languages have different styles as well and you can often tell what programming language someone learned first (or best) by the way they write their code in _any_ programming language. While it's _nice_ to work in the style that is considered 'best' in each language, what is more important is that you are consistent in how you write: that way, another person reading your code can get used to the way that you write and make better sense of what you're trying to do.
### Variable Names
So here are some common styles for variable names:
| Variable Name | Language | Rationale |
|:--------------|:---------|:----------|
| my_variable | Python, Perl | Underscores ("\_") make it easy to read 'words' in a variable name |
| MyVariable | Java | Mixed Caps makes for shorter variable names that are easier to type |
| my.variable | R | R allows full-stops in variable names, no other language does |
### Comments
Commenting code that they've written is something that programmers normally hate doing, because it's not _doing_ work after all... until you have to try to understand what you've done and why two days or two months later. You'd be surprised how quickly your memory of _why_ you did something one way, and not another, will fade and, with it, the risks that you were trying to avoid. It doesn't matter how clear your variable names are (_e.g._ `url_to_get` or `do_not_do_this`) you will eventually forget what is going on. Comments help you to avoid this, and they also allow you to write more concise, efficient code because your variable and function names don't have to do all the heavy lifting of explanation as well!
How to comment well:
- There are as many ways of commenting well as there are ways of writing code well -- the point is to be clear and consistent.
- You might think of adding a _short_ comment on the end of a line of code if that line is doing something complex and not immediately obvious:
```python
import random
random.seed(123456789) # For reproducibility
```
- You might think of adding a one-line or multi-line comment at the start of a _block_ of code that is achieving something specific:
```python
# Create a new data frame to
# hold the percentage values
# and initialise it with only
# the 'mnemonic' (i.e. GeoCode)
d_pct = pd.concat(
[d[t]['mnemonic']],
axis=1,
keys=['mnemonic'])
```
- You might think of using some formatting for a long _section_ of code that forms a key stage of your analysis:
```python
##############################
# This next section deals with
# extracting data from the
# London Data Store and tidying
# up the values so that they work
# with pandas.
##############################
```
- Finally, you can also use multi-line strings as a kind of comment format:
```python
"""
=========================================================
Extract pd2-level data from the price paid medians data
=========================================================
"""
```
Many programmers will mix all of these different styles together, but they will do so in a consistent way: the more complex the formatting and layout of the comment, the larger the block of code to which the comment applies.
### Why Style Matters
As we said at the start, style matters because it makes your code _intelligible_ to you, and to others. For professional programmers that is incredibly important because almost no one works on their own any more: applications are too complex for one programmer to get very far. So programmers need others to be able to read and interpret the code that they've written. As well, you might be asked to (or want to) go back to make changes to some code that you wrote some time ago -- if you've not commented your code or used a consistent style you're much more likely to break things. You might have heard of the Facebook motto "Move fast and break things" -- that doesn't refer to Facebook's web application, it refers to _paradigms_.

## Why???
We'd like to wrap up Code Camp by revisiting the question of 'why' -- why invest a lot of time and energy in learning to code? There are a host of reasons, but we'd like to pick out a few of the ones that we think are most important for undergraduate students:
1. Coding teaches logical thinking: right from the start, coding reinforces logical thinking. A computer will try to do _exactly_ what you say, regardless of how inappropriate or illogical that statement might be. To become a better programmer you will need to develop a good idea of what you're trying to achieve, the steps involved in getting there, and you'll need an eye for detail as well since `my_variable` is not the same as `myvariable`.
2. Coding teaching abstraction: this might seem a bit of a strange point coming right after the one about an 'eye for detail', but it's true! Writing code is not just about solving for one row of data, or 1,000, it's about solving the problem in a _general_ way: mapping and analysis tools like [PySAL](http://pysal.readthedocs.io/en/latest/) or [Folium](https://github.com/python-visualization/folium) weren't create to solve one mapping or spatial analysis problem, but a _range_ of them! Well-written code is easy to adapt and re-use (and in follow-on classes we'll look at how to encapsulate useful snippets of code in functions so that it's even easier) because you aren't just thinking about "I have to process population data for 20 English cities" but about "How do I manage population data in many different formats about cities anywhere in the world?". You also need to have, in your head, a high-level _model_ of how the computer is processing the data so that you can frame your solution appropriately. Again, that's abstraction.
3. Coding is scalable: there's a lot of 'start-up cost' involved with a coding solution, but once you get over this hump it's highly scalable. One of us has been working on a Machine Learning problem to try to predict gentrification in London from 2011-2021 using the period from 2001-2011. This involves not only downloading more than 30 data sets from the [ONS/NOMIS](https://www.nomisweb.co.uk/) and the [London Data Store](https://data.london.gov.uk/) in order to build a model using 168 variables, it also involves making difficult choices about how to handle highly skewed data which makes accurate prediction much more difficult. Using Python, we can try _each_ of the options available and see which one work best, even though this involves reprocessing 168 variables, re-mapping 15 maps, and re-outputting 3 data files for use in QGIS.
4. Coding is replicable: the 'crisis of replicability' has enormous implications for medicine, psychology, policy, planning, and environmental science. Without the ability to check how data was collected, processed, and analysed we are basically 'flying blind'. You might have heard about the [Excel error used to justify austerity](http://theconversation.com/economists-an-excel-error-and-the-misguided-push-for-austerity-13584) or about the [crisis in replicability](http://www.newyorker.com/tech/elements/the-crisis-in-social-psychology-that-isnt)? Or that only [positive trials](https://www.scientificamerican.com/article/trial-sans-error-how-pharma-funded-research-cherry-picks-positive-results/) of a drug are published? These are parts of a larger debate within the sciences about how much of the scientific process should be conducted 'in the open' -- is it just data? or is it code as well? Increasingly, journals and scientists are arguing for it to be both.
5. Coding is empowering: if you stick with it you'll probably find that, one afternoon, you start coding around 3pm and suddenly realise that it's dark out, you're hungry, and you've been coding non-stop all the way to 9pm! We don't recommend this as everyday practice, but it's a sign of how engaging coding can be, how easy it is to enter '[flow](https://en.wikipedia.org/wiki/Flow_(psychology))', and how exciting it can be to solve real problems and achieve a real output. Ultimately, we think that 'bending' a computer to your will -- and, as importantly, figuring out how to frame a human problem in logical terms that a computer can handle -- is a tremendously empowering experience: that day you click 'run' or hit 'enter' and the computer chews away at a data set for 10 seconds or 10 hours **and** comes back to you with a solution to your problem is profoundly thrilling because it means that you've done work that _works_ on multiple levels simultaneously (it works at the fine-scale of syntax _and_ at the conceptual level of design).
We hope you'll get there and we hope you'll stick it out until you do! If you invest the time and effort then we're pretty confident that _any_ of you can become a competent programmer: but don't take our word for it, ask some of this year's Third Years what they think! And here is some more food for thought:
1. The National Academy's [Understanding the Changing Planet: Strategic Directions for the Geographical Sciences](https://www.nap.edu/read/12860/chapter/18#118)
2. The RGS's [Data Skills in Geography](http://www.rgs.org/OurWork/Schools/Data+skills+in+geography/Data+skills+in+geography.htm) (especially [Quantitative Skills in Geography](http://www.rgs.org/OurWork/Research+and+Higher+Education/Learning+teaching+and+research+in+higher+education/Quantitative+Teaching+and+Learning+in+Geography/Quantitative+Methods+in+Geography.htm)).
Good luck!
| github_jupyter |
<img src="imgs/dh_logo.png" align="right" width="50%">
# Aula 3.5.2 - Clustering
Fala galera! Tudo bem? Hoje continuaremos a aula de clustering/unsupervised learning. Na aula passada, vimos os conceitos básicos de clustering, bem como o algoritmo mais simples para a tarefa (simples, porém muito eficiente em vários casos!). Hoje, veremos 2 novos algoritmos e como aplicá-los na vida real.
```
# Dependencies
import numpy as np
import pandas as pd
from sklearn.datasets.samples_generator import make_blobs
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
```
## Dica de hoje:
Bom, como hoje a aula é de clustering, tentei achar alguns bons links de apoio para vocês!
- __[Esse link](https://towardsdatascience.com/the-5-clustering-algorithms-data-scientists-need-to-know-a36d136ef68)__ lista 5 dos principais tipos de algoritmos de clustering que um cientista de dados deve saber. É uma ótima leitura para saber com o que trabalhamos no nosso dia-a-dia :)
- __[K-Means vs Mean Shift](http://www.chioka.in/meanshift-algorithm-for-the-rest-of-us-python/)__
- __[Esse link](https://www.naftaliharris.com/blog/visualizing-dbscan-clustering/)__ é uma forma interativa de visualizar o DBSCAN funcionando! É sempre bom ver nossos algoritmos funcionando passo-a-passo quando estamos aprendendo sua lógica, recomendo fortemente!
## Mean Shift sobre Dados Artificiais
Vamos repetir o exercício de clusterização da aula anterior, mas com um algoritmo novo: mean shift! Uma das fraquezas que observamos no K-Means é que ele funciona de modo muito manual e repetitivo: temos que rodar um loop, extrair o cotovelo ou sihlouette scores para então achar o melhor K. Contornando essa questão, existem os algoritmos de clustering hierárquico: eles conseguem decidir sozinhos como agrupar dados em quantas seções. Vamos observar como aplicar o Mean-Shift em dados artificiais. Na célula abaixo, crie clusters com o `make_blobs` (o mesmo da aula passada), utilizando os clusters abaixo como `centers`, 800 samples e 3 features.
```
clusters = [[1,1,1],[5,5,5],[3,10,10]]
# seu código aqui
# %load solutions/solution_05.py
X, _ = make_blobs(n_samples = 800, centers = clusters, n_features=3)
```
Abaixo, vamos aplicar o `MeanShift`. Sua classe se encontra no módulo *cluster* do sci-kit, e sua documentação está __[aqui](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.MeanShift.html)__. Também será necessário criar uma variável `cluster_centers` que recebe o atributo `cluster_centers_` do modelo.
```
# seu código aqui
# %load solutions/solution_06.py
from sklearn.cluster import MeanShift
ms = MeanShift()
ms.fit(X)
cluster_centers = ms.cluster_centers_
cluster_centers
```
Agora, temos vamos visualizar os resultados do Mean Shift:
```
fig = plt.figure(figsize=(24,12))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:,0], X[:,1], X[:,2], marker='o')
ax.scatter(cluster_centers[:,0], cluster_centers[:,1], cluster_centers[:,2], marker='x', color='red', s=300, linewidth=5, zorder=10)
```
Nas células abaixo, tente criar novos clusters com uma quantidade massiva de datapoints (>10k) e quantos centros você quiser. Tente aplicar K-Means e Mean-Shift. Quais diferenças você percebe, tanto de performance quanto de facilidade de implementação?
```
# crie os novos blobs aqui
clusters = [[1,1,20],[10,10,10],[20,10,1],[1,20,20],[20,20,20]]
X, y = make_blobs(n_samples = 1000, centers = clusters, n_features=3)
# implemente K-Means
# %load solutions/solution_01.py
from sklearn.cluster import KMeans
# Initializing KMeans
kmeans = KMeans(n_clusters=5)
# Fitting with inputs
kmeans = kmeans.fit(X)
# Predicting the clusters
labels = kmeans.predict(X)
# Getting the cluster centers
C = kmeans.cluster_centers_
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y)
ax.scatter(C[:, 0], C[:, 1], C[:, 2], marker='*', c='#050505', s=1000)
counter = 1
for cluster in C:
print('Cluster ',counter)
print(cluster)
counter += 1
# implemente Mean Shift
ms = MeanShift(bandwidth=5)
ms.fit(X)
cluster_centers = ms.cluster_centers_
cluster_centers
cluster_centers[:, 0]
# faça o plotting dos resultados
fig = plt.figure(figsize=(24,12))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:,0], X[:,1], X[:,2], marker='o')
ax.scatter(cluster_centers[:,0],
cluster_centers[:,1],
cluster_centers[:,2],
# cluster_centers[:,3],
# cluster_centers[:,4],
marker='x', color='red', s=500, linewidth=5, zorder=10)
```
## DBSCAN - Clustering by Density
Até agora vimos algoritmos de clustering com base em centroides. Embora úteis, eles podem cair em armadilhas comuns de acordo com a forma dos dados (por exemplo, deixar um outlier influenciar sua média e calcular centroides errados). Há uma outro tipo de algoritmos de clustering que se baseiam no conceito de densidade! Eles observam os arredores dos dados e tomam decisões a partir disso. A premissa deles é forte, pois é de se pensar que dados semelhantes estão próximos uns dos outros, independente de sua forma. <br>
O mais famoso algoritmo de clustering por densidade é o DBSCAN (Density-Based Spatial Clustering of Applications with Noise). O DBSCAN possui 2 parâmetros:
- ɛ: o raio da vizinhança
- minPts: o número mínimo de datapoints de uma vizinhança para que ela seja considera um cluster. <br>
Dessa forma, temos 3 tipos de dados a partir desse algoritmo:
- Core points: os pontos que estão diretamente influenciando na densidade de nossa vizinhança
- Border Points: os pontos que são alcançaveis por vizinhanças de vizinhanças
- Outliers: pontos fora de qualquer vizinhança
Os passos do DBSCAN são os seguintes:
- Escolha um ponto que ainda não foi dito como outlier ou assimilado a um cluster. Calcule sua vizinhança e determine se é um Core Point. Se sim, inicie uma nova vizinhança a partir dele.
- Adicione todos os directly-reachable points desse novo cluster ao seu cluster.
- Repita esses 2 passos até todos os clusters serem encontrados
- Identifique os outliers
Vamos ver o DBSCAN na prática! Lidaremos com o __[whosale customers data](https://archive.ics.uci.edu/ml/datasets/Wholesale+customers)__: um dataset de 440 consumidores com 8 atributos cada. Seria possível descobrir segmentos de consumidores a partir desses atributos? Nas células abaixo, importe o dataset *wholesale_customers_data.csv* que está na pasta *data*. Depois, chame os métodos exploratórios básicos. Caso queira fazer algum plot, sinta-se à vontade.
```
# seu codigo aqui
df = pd.read_csv(r'Wholesale customers data.csv', sep=',')
df.head()
import seaborn as sns
sns.pairplot(df)
```
Vamos dropar as variáveis categóricas pois não as usaremos agora
```
df.head(1)
df.drop(["Channel", "Region"], axis = 1, inplace = True)
# Let's plot the data now
x = df['Grocery']
y = df['Milk']
plt.scatter(x,y)
plt.xlabel("Groceries")
plt.ylabel("Milk")
```
Na célula abaixo, mude df para que ele receba somente as colunas desejadas. No caso, eu usei *Grocery* e *Milk*. Depois, faça um casting como matriz utilizando `as_matrix()`e então um casting de datatype de modo concatenado para float32 utilizando `astype()`.
```
# %load solutions/solution_08.py
df = df[["Grocery", "Milk"]]
df = df.as_matrix().astype("float32", copy = False)
```
Abaixo, precisaremos realizar o scaling de nossos dados utilizando o `Standard Scaler`. Faça o `fit_transform()` nos nossos dados!
```
# %load solutions/solution_09.py
from sklearn.preprocessing import StandardScaler
stscaler = StandardScaler().fit(df)
df_t = stscaler.transform(df)
```
Finalmente, podemos utilizar o DBSCAN! Ele se encontra no módulo de cluster do scikit! Abaixo, vamos implementar o algoritmo de clustering com eps=.5 e min_samples=15
```
# %load solutions/solution_10.py
from sklearn.cluster import DBSCAN
dbsc = DBSCAN(eps = .5, min_samples = 15).fit(df)
labels = dbsc.labels_
core_samples = np.zeros_like(labels, dtype = bool)
core_samples[dbsc.core_sample_indices_] = True
# df0 = pd.read_csv("data/wholesale_customers_data.csv")
import seaborn as sns
filtro=list(core_samples)
df["filtro"]=filtro
sns.lmplot("Grocery","Milk",data=df,fit_reg=False,hue="filtro",size=10)
```
Desafio: tente realizar um pairplot e observar alguns padrões no nosso dataset. Podemos escolher mais features para segmentar melhor nossos consumidores. Como o dataset ficará n-dimensional (n>2), precisaremos rodar algo como um PCA ou antes para selecionar features, ou depois (um t-SNE também vale) para o plotting. Arregace as mangas aí que esse desafio é real-worlds DS! Use as aulas passadas para isso ;)
Para o desafio:
- PCA-3 e depois um DBSCAN
Aplicações:
- Segmentação de clientes
- Motor de recomendação
Para processo de entrevista, em qual parte do processo vão ser feitas as perguntas pelo entrevistador. Podemos encarar isso como um processo.
[Cross Industry Standard Process for Data Mining](https://pt.wikipedia.org/wiki/Cross_Industry_Standard_Process_for_Data_Mining)
<br>
<img src="imgs/crisp.png" align="center" width=400>
<br>
CRISP-DM é a abreviação de Cross Industry Standard Process for Data Mining, que pode ser traduzido como Processo Padrão Inter-Indústrias para Mineração de Dados. É um modelo de processo de mineração de dados que descreve abordagens comumente usadas por especialistas em mineração de dados para atacar problemas.
Em Julho de 2006 o consórcio responsável pela criação do processo anunciou que iria iniciar os trabalhos na direção da segunda versão do CRISP-DM. Em Setembro de 2006, o CRISP-DM SIG reuniu-se para discutir possíveis melhorias a serem implementadas no CRISP-DM 2.0, e traçar o curso do projeto.
Fases
- Entender o Negócio: foca em entender o objetivo do projeto a partir de uma perspectiva de negócios, definindo um plano preliminar para atingir os objetivos.
- Entender os Dados: recolhimento de dados e inicio de atividades para familiarização com os dados, identificando problemas ou conjuntos interessantes.
- Preparação dos Dados: construção do conjunto de dados final a partir dos dados iniciais. Normalmente ocorre várias vezes no processo.
- Modelagem: várias técnicas de modelagem são aplicadas, e seus parâmetros calibrados para otimização. Assim, é comum retornar à Preparação dos Dados durante essa fase.
- Avaliação: é construído um modelo que parece ter grande qualidade de uma perspectiva de análise de dados. No entanto, é necessário verificar se o modelo atinge os objetivos do negócio.
- Implantação: o conhecimento adquirido pelo modelo é organizado e apresentado de uma maneira que o cliente possa utilizar.
| github_jupyter |
# Data Processing - Overview
## Pre-requisites and Module Introduction
Let us understand prerequisites before getting into the module.
* Good understanding of Data Processing using Python.
* Data Processing Life Cycle
* Reading Data from files
* Processing Data using APIs
* Writing Processed Data back to files
* We can also use Databases as sources and sinks. It will be covered at a later point in time.
* We can also read data in streaming fashion which is out of the scope of this course.
We will get an overview of the Data Processing Life Cycle by the end of the module.
* Read airlines data from the file.
* Preview the schema and data to understand the characteristics of the data.
* Get an overview of Data Frame APIs as well as functions used to process the data.
* Check if there are any duplicates in the data.
* Get an overview of how to write data in Data Frames to Files using File Formats such as Parquet using Compression.
* Reorganize the data by month with different file format and using partitioning strategy.
* We will deep dive into Data Frame APIs to process the data in subsequent modules.
## Starting Spark Context
Let us start Spark Context using SparkSession.
* `SparkSession` is a class that is part of `pyspark.sql` package.
* It is a wrapper on top of Spark Context.
* When Spark application is submitted using `spark-submit` or `spark-shell` or `pyspark`, a web service called as Spark Context will be started.
* Spark Context maintains the context of all the jobs that are submitted until it is killed.
* `SparkSession` is nothing but wrapper on top of Spark Context.
* We need to first create SparkSession object with any name. But typically we use `spark`. Once it is created, several APIs will be exposed including `read`.
* We need to at least set Application Name and also specify the execution mode in which Spark Context should run while creating `SparkSession` object.
* We can use `appName` to specify name for the application and `master` to specify the execution mode.
* Below is the sample code snippet which will start the Spark Session object for us.
```
import org.apache.spark.sql.SparkSession
val spark = SparkSession.
builder.
config("spark.ui.port", "12903").
appName("Data Processing - Overview").
master("yarn").
getOrCreate
spark
```
## Overview of Spark read APIs
Let us get the overview of Spark read APIs to read files of different formats.
* `spark` has a bunch of APIs to read data from files of different formats.
* All APIs are exposed under `spark.read`
* `text` - to read single column data from text files as well as reading each of the whole text file as one record.
* `csv`- to read text files with delimiters. Default is a comma, but we can use other delimiters as well.
* `json` - to read data from JSON files
* `orc` - to read data from ORC files
* `parquet` - to read data from Parquet files.
* We can also read data from other file formats by plugging in and by using `spark.read.format`
* We can also pass options based on the file formats.
* `inferSchema` - to infer the data types of the columns based on the data.
* `header` - to use header to get the column names in case of text files.
* `schema` - to explicitly specify the schema.
* We can get the help on APIs like `spark.read.csv` using `help(spark.read.csv)`.
* Reading delimited data from text files.
```
spark.
read.
schema("""order_id INT,
order_date STRING,
order_customer_id INT,
order_status STRING
"""
).
csv("/public/retail_db/orders").
show
```
* Reading JSON data from text files. We can infer schema from the data as each JSON object contain both column name and value.
* Example for JSON
```
{ "order_id": 1, "order_date": "2013-07-25 00:00:00.0", "order_customer_id": 12345, "order_status": "COMPLETE" }
```
```
spark.
read.
json("/public/retail_db_json/orders").
show
```
## Understand airlines data
Let us read one of the files and understand more about the data to determine right API with right options to process data later.
* Our airlines data is in text file format.
* We can use `spark.read.text` on one of the files to preview the data and understand the following
* Whether header is present in files or not.
* Field Delimiter that is being used.
* Once we determine details about header and field delimiter we can use `spark.read.csv` with appropriate options to read the data.
```
val airlines = spark.read.
text("/public/airlines_all/airlines/part-00000")
airlines.show(false)
```
* Data have header and each field is delimited by a comma.
## Inferring Schema
Let us understand how we can quickly get schema using one file and apply on other files.
* We can pass the file name pattern to `spark.read.csv` and read all the data in files under **hdfs://public/airlines_all/airlines** into Data Frame.
* We can use options such as `header` and `inferSchema` to assign names and data types.
* However `inferSchema` will end up going through the entire data to assign schema. We can use samplingRatio to process fraction of data and then infer the schema.
* In case if the data in all the files have similar structure, we should be able to get the schema using one file and then apply it on others.
* In our airlines data schema is consistent across all the files and hence we should be able to get the schema by going through one file and apply on the entire dataset.
```
val airlines_part_00000 = spark.
read.
option("header", "true").
option("inferSchema", "true").
csv("/public/airlines_all/airlines/part-00000")
airlines_part_00000.show(false)
airlines_part_00000.printSchema
val airlines_schema = spark.
read.
option("header", "true").
option("inferSchema", "true").
csv("/public/airlines_all/airlines/part-00000").
schema
val airlines = spark.
read.
option("header", "true").
schema(airlines_schema).
csv("/public/airlines_all/airlines/part*")
airlines.count
```
## Previewing airlines data
Let us preview the airlines data to understand more about it.
* As we have too many files, we will just process one file and preview the data.
* File Name: **hdfs://public/airlines_all/airlines/part-00000**
* `spark.read.csv` will create a variable of type Data Frame.
```
val airlines_schema = spark.
read.
option("header", "true").
option("inferSchema", "true").
csv("/public/airlines_all/airlines/part-00000").
schema
val airlines = spark.
read.
option("header", "true").
schema(airlines_schema).
csv("/public/airlines_all/airlines/part*")
```
A Data Frame will have structure or schema.
* We can print the schema using `airlines.printSchema()`
* We can preview the data using `airlines.show()`. By default it shows 20 records and some of the column values might be truncated for readability purpose.
* We can review the details of show by using `help(airlines.show)`
* We can pass custom number of records and say `truncate=False` to show complete information of all the records requested. It will facilitate us to preview all columns with desired number of records.
```
airlines.show(100, false)
```
* We can get the number of records or rows in a Data Frame using `airlines.count()`
* In Databricks Notebook, we can use `display` to preview the data using Visualization feature
* We can perform all kinds of standard transformations on our data. We need to have good knowledge of functions on Data Frames as well as functions on columns to apply all standard transformations.
* Let us also validate if there are duplicates in our data, if yes we will remove duplicates while reorganizing the data later.
```
val airlines_schema = spark.
read.
option("header", "true").
option("inferSchema", "true").
csv("/public/airlines_all/airlines/part-00000").
schema
val airlines = spark.
read.
option("header", "true").
schema(airlines_schema).
csv("/public/airlines_all/airlines/part*")
airlines.printSchema
airlines.show
airlines.show(100, false)
airlines.count
airlines.distinct.count
```
## Overview of Data Frame APIs
Let us get an overview of Data Frame APIs to process data in Data Frames.
* Row Level Transformations or Projection of Data can be done using `select`, `selectExpr`, `withColumn`, `drop` on Data Frame.
* We typically apply functions from `pyspark.sql.functions` on columns using `select` and `withColumn`
* Filtering is typically done either by using `filter` or `where` on Data Frame.
* We can pass the condition to `filter` or `where` either by using SQL Style or Programming Language Style.
* Global Aggregations can be performed directly on the Data Frame.
* By Key or Grouping Aggregations are typically performed using `groupBy` and then aggregate functions using `agg`
* We can sort the data in Data Frame using `sort` or `orderBy`
* We will talk about Window Functions later. We can use use Window Functions for some advanced Aggregations and Ranking.
### Tasks
Let us understand how to project the data using different options such as `select`, `selectExpr`, `withColumn`, `drop.`
* Create Dataframe **employees** using Collection
```
val employees = List((1, "Scott", "Tiger", 1000.0, "united states"),
(2, "Henry", "Ford", 1250.0, "India"),
(3, "Nick", "Junior", 750.0, "united KINGDOM"),
(4, "Bill", "Gomes", 1500.0, "AUSTRALIA")
)
val employeesDF = employees.
toDF("employee_id",
"first_name",
"last_name",
"salary",
"nationality"
)
employeesDF.printSchema
employeesDF.show
```
* Project employee first name and last name.
```
employeesDF.
select("first_name", "last_name").
show
```
* Project all the fields except for Nationality
```
employeesDF.
drop("nationality").
show
```
**We will explore most of the APIs to process data in Data Frames as we get into the data processing at a later point in time**
## Overview of Functions
Let us get an overview of different functions that are available to process data in columns.
* While Data Frame APIs work on the Data Frame, at times we might want to apply functions on column values.
* Functions to process column values are available under `pyspark.sql.functions`. These are typically used in select or withColumn on top of Data Frame.
* There are approximately 300 pre-defined functions available for us.
* Some of the important functions can be broadly categorized into String Manipulation, Date Manipulation, Numeric Functions and Aggregate Functions.
* String Manipulation Functions
* Concatenating Strings - `concat`
* Getting Length - `length`
* Trimming Strings - `trim`,` rtrim`, `ltrim`
* Padding Strings - `lpad`, `rpad`
* Extracting Strings - `split`, `substring`
* Date Manipulation Functions
* Date Arithmetic - `date_add`, `date_sub`, `datediff`, `add_months`
* Date Extraction - `dayofmonth`, `month`, `year`
* Get beginning period - `trunc`, `date_trunc`
* Numeric Functions - `abs`, `greatest`
* Aggregate Functions - `sum`, `min`, `max`
### Tasks
Let us perform a task to understand how functions are typically used.
* Project full name by concatenating first name and last name along with other fields excluding first name and last name.
```
import org.apache.spark.sql.functions.{lit, concat}
employeesDF.
withColumn("full_name", concat($"first_name", lit(", "), $"last_name")).
drop("first_name", "last_name").
show
employeesDF.
select($"employee_id",
concat($"first_name", lit(", "), $"last_name").alias("full_name"),
$"salary",
$"nationality"
).
show
employeesDF.
selectExpr("employee_id",
"concat(first_name, ', ', last_name) AS full_name",
"salary",
"nationality"
).
show
```
**We will explore most of the functions as we get into the data processing at a later point in time**
## Overview of Spark Write APIs
Let us understand how we can write Data Frames to different file formats.
* All the batch write APIs are grouped under write which is exposed to Data Frame objects.
* All APIs are exposed under spark.read
* `text` - to write single column data to text files.
* `csv` - to write to text files with delimiters. Default is a comma, but we can use other delimiters as well.
* `json` - to write data to JSON files
* `orc` - to write data to ORC files
* `parquet` - to write data to Parquet files.
* We can also write data to other file formats by plugging in and by using `write.format`, for example **avro**
* We can use options based on the type using which we are writing the Data Frame to.
* `compression` - Compression codec (`gzip`, `snappy` etc)
* `sep` - to specify delimiters while writing into text files using **csv**
* We can `overwrite` the directories or `append` to existing directories using `mode`
* Create copy of orders data in **parquet** file format with no compression. If the folder already exists overwrite it. Target Location: **/user/[YOUR_USER_NAME]/retail_db/orders**
* When you pass options, if there are typos then options will be ignored rather than failing. Be careful and make sure that output is validated.
* By default the number of files in the output directory is equal to number of tasks that are used to process the data in the last stage. However, we might want to control number of files so that we don"t run into too many small files issue.
* We can control number of files by using `coalesce`. It has to be invoked on top of Data Frame before invoking `write`.
```
val orders = spark.
read.
schema("""order_id INT,
order_date STRING,
order_customer_id INT,
order_status STRING
"""
).
csv("/public/retail_db/orders")
val username = System.getProperty("user.name")
orders.
write.
mode("overwrite").
option("compression", "none").
parquet(s"/user/${username}/retail_db/orders")
// Alternative approach - using format
val username = System.getProperty("user.name")
orders.
write.
mode("overwrite").
option("compression", "none").
format("parquet").
save(s"/user/${username}/retail_db/orders")
import sys.process._
val username = System.getProperty("user.name")
s"hdfs dfs -ls /user/${username}/retail_db/orders" !
// File extension should not contain compression algorithms such as snappy.
```
* Read order_items data from **/public/retail_db_json/order_items** and write it to pipe delimited files with gzip compression. Target Location: **/user/[YOUR_USER_NAME]/retail_db/order_items**. Make sure to validate.
* Ignore the error if the target location already exists. Also make sure to write into only one file. We can use `coalesce` for it.
**`coalesce` will be covered in detail at a later point in time**
```
val order_items = spark.
read.
json("/public/retail_db_json/order_items")
// Using format
val username = System.getProperty("user.name")
order_items.
coalesce(1).
write.
mode("ignore").
option("compression", "gzip").
option("sep", "|").
format("csv").
save(s"/user/${username}/retail_db/order_items")
import sys.process._
val username = System.getProperty("user.name")
s"hdfs dfs -ls /user/${username}/retail_db/order_items" !
```
## Reorganizing airlines data
Let us reorganize our airlines data to fewer files where data is compressed and also partitioned by Month.
* We have ~1920 files of ~64MB Size.
* Data is in the range of 1987 October and 2008 December (255 months)
* By default it uses ~1920 threads to process the data and it might end up with too many small files. We can avoid that by using repartition and then partition by the month.
* Here are the steps we are going to follow to partition by flight month and save the data to /user/[YOUR_USER_NAME]/airlines.
* Read one file first and get the schema.
* Read the entire data by applying the schema from the previous step.
* Add additional column flightmonth using withColumn by using lpad on month column and concat functions. We need to do this as the month in our data set is of type integer and we want to pad with 0 for months till september to format it into YYYYMM.
* Repartition the data into 255 based on the number of months using flightmonth
* Partition the data by partitionBy while writing the data to the target location.
* We will use parquet file format which will automatically compresses data using Snappy algorithm.
**This process will take time, once it is done we will review the target location to which data is copied by partitioning using month**
```
spark.stop
import org.apache.spark.sql.SparkSession
val spark = SparkSession.
builder.
config("spark.dynamicAllocation.enabled", "false").
config("spark.executor.instances", 40).
appName("Data Processing - Overview").
master("yarn").
getOrCreate
spark
import org.apache.spark.sql.functions. {concat, lpad}
val airlines_schema = spark.read.
option("header", "true").
option("inferSchema", "true").
csv("/public/airlines_all/airlines/part-00000").
schema
val airlines = spark.
read.
option("header", "true").
schema(airlines_schema).
csv("/public/airlines_all/airlines/part*")
airlines.printSchema
airlines.show
spark.conf.set("spark.sql.shuffle.partitions", "255")
val username = System.getProperty("user.name")
airlines.
distinct.
withColumn("flightmonth", concat($"year", lpad($"month", 2, "0"))).
repartition(255, $"flightmonth").
write.
mode("overwrite").
partitionBy("flightmonth").
format("parquet").
save(s"/user/${username}/airlines-part")
```
## Previewing reorganized data
Let us preview the data using reorganized data.
* We will use new location going forward - **/public/airlines_all/airlines-part**. Data is already copied into that location.
* We have partitioned data by month and stored in that location.
* Instead of using complete data set we will read the data from one partition **/public/airlines_all/airlines-part/flightmonth=200801**
* First let us create a DataFrame object by using `spark.read.parquet("/public/airlines_all/airlines-part/flightmonth=200801")` - let"s say airlines.
* We can get the schema of the DataFrame using `airlines.printSchema()`
* Use `airlines.show()` or `airlines.show(100, truncate=False)` to preview the data.
* We can also use `display(airlines)` to get airlines data in tabular format as part of Databricks Notebook.
* We can also use `airlines.describe().show()` to get some statistics about the Data Frame and `airlines.count()` to get the number of records in the DataFrame.
## Analyze and Understand Data
Let us analyze and understand more about the data in detail using data of 2008 January.
* First let us read the data for the month of 2008 January.
```
val airlines_path = "/public/airlines_all/airlines-part/flightmonth=200801"
val airlines = spark.
read.
parquet(airlines_path)
airlines.count
airlines.printSchema
```
* Get number of records - `airlines.count()`
* Go through the list of columns and understand the purpose of them.
* Year
* Month
* DayOfMonth
* CRSDepTime - Scheduled Departure Time
* DepTime - Actual Departure Time.
* DepDelay - Departure Delay in Minutes
* CRSArrTime - Scheduled Arrival Time
* ArrTime - Actual Arrival Time.
* ArrDelay - Arrival Delay in Minutes.
* UniqueCarrier - Carrier or Airlines
* FlightNum - Flight Number
* Distance - Distance between Origin and Destination
* IsDepDelayed - this is set to yes for those flights where departure is delayed.
* IsArrDelayed -- this is set to yes for those flights where arrival is delayed.
* Get number of unique origins
```
airlines.
select("Origin").
distinct.
count
```
* Get number of unique destinations
```
airlines.
select("Dest").
distinct.
count
```
* Get all unique carriers
```
airlines.
select("UniqueCarrier").
distinct.
show
```
## Conclusion
Let us recap about key takeaways from this module.
* APIs to read the data from files into Data Frame.
* Previewing Schema and the data in Data Frame.
* Overview of Data Frame APIs and Functions
* Writing data from Data Frame into Files
* Reorganizing the airlines data by month
* Simple APIs to analyze the data.
Now it is time for us to deep dive into APIs to perform all the standard transformations as part of Data Processing.
| github_jupyter |
# Python (EPAM, 2020), lecture 11
# Section 0. Metaclasses one more time
```python
class DisallowPublicClassAttributes(type): # is a metaclass
def __new__(cls, name, bases, dct):
cls_instance = super().__new__(cls, name, bases, dct)
if any([not key.startswith("_") for key in dct.keys()]):
raise Exception()
return cls_instance
class NoErrorClass(metaclass=DisallowPublicClassAttributes):
__private = ""
class ErrorClass(metaclass=DisallowPublicClassAttributes):
public = ""
```
# Section 1. OOP Patterns
Three main categories:
- Creational
These patterns provide various object creation mechanisms,
which increase flexibility and reuse of existing code.
- Structural
These patterns explain how to assemble objects and classes
into larger structures while keeping these structures flexible and efficient.
- Behavioral
These patterns are concerned with algorithms and the assignment of
responsibilities between objects.
## Creational patterns
- Singleton
Ensures that a class has just a single instance
- Factory method
Provides an interface for creating objects in
a superclass, but allows subclasses to alter the
type and behaviour of objects that will be created.
and others https://refactoring.guru/design-patterns/creational-patterns
### 3 most popular ways to implement singleton
Decorator
```python
def singleton(cls):
instances = {}
def getinstance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return getinstance
@singleton
class SingletonClass:
pass
```
A base class
```python
class Singleton(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not isinstance(cls._instance, type(cls)):
cls._instance = super().__new__(cls, *args, **kwargs)
return cls._instance
class SingletonClass(Singleton):
pass
```
A metaclass
```python
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
class SingletonClass(metaclass=Singleton):
pass
```
### Factory method simple example
```python
import abc
class Shape(metaclass=abc.ABCMeta):
@abc.abstractmethod
def calculate_area(self):
pass
@abc.abstractmethod
def calculate_perimeter(self):
pass
class Rectangle(Shape):
def __init__(self, height, width):
self.height = height
self.width = width
def calculate_area(self):
return self.height * self.width
def calculate_perimeter(self):
return 2 * (self.height + self.width)
class Square(Shape):
def __init__(self, width):
self.width = width
def calculate_area(self):
return self.width ** 2
def calculate_perimeter(self):
return 4 * self.width
```
## Structural patterns
- Decorator
Lets you attach new behaviors to objects by placing these
objects inside special wrapper objects that contain the behaviors.
- Proxy
Lets you provide a substitute or placeholder for another object.
A proxy controls access to the original object, allowing you to perform
something either before or after the request gets through to the
original object.
and others https://refactoring.guru/design-patterns/structural-patterns
### Proxy pattern simple example
```python
class Product:
def request(self) -> None:
print("RealSubject: Handling request.")
class Proxy:
def __init__(self, real_product: Product) -> None:
self._real_product = real_product
def request(self) -> None:
if self.check_access():
self._real_product.request()
self.log_access()
else:
print("Forbidden")
def check_access(self) -> bool:
print("Proxy: Checking access prior to firing a real request.")
return True
def log_access(self) -> None:
print("Proxy: Logging the time of request.", end="")
```
Python example:
https://docs.python.org/3/library/types.html#types.MappingProxyType
## Behavioral patterns
- Iterator
Lets you traverse elements of a collection.
- Observer
Lets you define a subscription mechanism to notify multiple
objects about any events that happen to the object they’re observing.
and others https://refactoring.guru/design-patterns/behavioral-patterns
### Observer simple example
```python
class Product:
_observers = []
def attach(self, observer):
self._observers.append(observer)
def notify(self) -> None:
for observer in self._observers:
observer.update(self)
def do_some_logic(self) -> None:
self.value = 2
self.notify()
class ObserverA:
def update(self, product):
if product.value < 3:
print("ObserverA: Reacted to the event")
product = Product()
observer_a = ObserverA()
product.attach(observer_a)
product.do_some_logic()
```
# Section 2. Garbage collection
There are two aspects to memory management and garbage collection in CPython:
- Reference counting
- Generational garbage collection
## Reference counting
The main garbage collection mechanism in CPython is through reference counts.
Whenever you create an object in Python, the underlying C object has both a Python type
(such as list, dict, or function) and a reference count.
At a very basic level, a Python object’s reference count is incremented whenever
the object is referenced, and it’s decremented when an object is dereferenced. If an
object’s reference count is 0, the memory for the object is deallocated.
Your program’s code can’t disable Python’s reference counting.
```python
import sys
a = 'test stirng'
assert sys.getrefcount(a) == 2
```
```python
import sys
a = 'test string'
b = [a] # Make a list with a as an element.
c = {'key': a} # Create a dictionary with a as one of the values.
assert sys.getrefcount(a) == 4
```
## Generational garbage collection
```python
class MyClass:
pass
a = MyClass() # refcount: 1
a.obj = a # refcount: 2
del a # refcount: 1
```
That's why we need a generational garbage collector.
### Generation
The garbage collector is keeping track of all objects in memory. A new object
starts its life in the first generation of the garbage collector. If Python executes
a garbage collection process on a generation and an object survives, it moves up into
a second, older generation. The Python garbage collector has three generations in
total, and an object moves into an older generation whenever it survives a garbage
collection process on its current generation.
### Threshold
For each generation, the garbage collector module has a threshold number of objects.
If the number of objects exceeds that threshold, the garbage collector will trigger
a collection process. For any objects that survive that process, they’re moved into an
older generation.
Unlike the reference counting mechanism, you may change the behavior of the
generational garbage collector in your Python program. This includes changing the
thresholds for triggering a garbage collection process in your code, manually
triggering a garbage collection process, or disabling the garbage collection process
altogether.
### Threshold
```python
import gc
gc.get_threshold()
(700, 10, 10)
gc.set_threshold(1000, 15, 15)
gc.get_threshold()
(1000, 15, 15)
```
```python
import gc
gc.get_count()
(596, 2, 1)
```
```python
import gc
gc.get_count()
(595, 2, 1)
gc.collect()
57
gc.get_count()
(18, 0, 0)
```
# Section 3. Weak references
Unlike the references we discussed above, a weak reference is a reference that does
not protect the object from getting garbage collected.
Why?
There are two main applications of weak references:
- implement caches for large objects (weak dictionaries)
- reduction of Pain from circular references
To create weak references, Python has provided us with a module named weakref.
A point to keep in mind before using weakref is that some builtins such as tuple or int
does not support this. list and dict support is either but we can add support through
subclassing.
## Weakref module
`class weakref.ref(object[, callback])`
This returns a weak reference to the object.
`weakref.proxy(object[, callback])`
This returns a proxy to object which uses a weak reference.
`weakref.getweakrefcount(object)`
Return the number of weak references and proxies which refer to object.
`weakref.getweakrefs(object)`
Return a list of all weak reference and proxy objects which refer to object.
## Usage of weakref
```python
import weakref
class MyClass(list):
pass
obj = MyClass("TEST")
normal_list = obj
print(f"This is a normal list object: {normal_list}")
weak_list = weakref.ref(obj)
weak_list_obj = weak_list()
print(f"This is a object created using weak reference: {weak_list_obj}")
proxy_list = weakref.proxy(obj)
print(f"This is a proxy object: {proxy_list}")
for objects in [normal_list, weak_list_obj, proxy_list]:
print(f"Number of weak references: {weakref.getweakrefcount(objects)}")
```
```
This is a normal object: [‘T’, ‘E’, ‘S’, ‘T’]
This is a object created using weak reference: [‘T’, ‘E’, ‘S’, ‘T’]
This is a proxy object: [‘T’, ‘E’, ‘S’, ‘T’]
Number of weak references: 2
Number of weak references: 2
Number of weak references: 0
```
# Section 4. Lazy objects
Lazy evaluation is a programming implementation paradigm that
defers evaluating necessary operations until it’s requested to do so.
Why?
```python
class User:
def __init__(self, username):
self.username = username
self.profile_data = self._get_profile_data()
print(f"{self.__class__.__name__} instance created")
def _get_profile_data(self):
print("Run the expensive operation")
fetched_data = "The mock data of a large size"
return fetched_data
def get_followers(username):
usernames_fetched = ["David", "Aaron", "Zack"]
users = [User(username) for username in usernames_fetched]
return users
...
users = get_followers("user0")
```
```
Run the expensive operation
User instance created
Run the expensive operation
User instance created
Run the expensive operation
User instance created
```
## __getattr__
```python
class User2:
def __init__(self, username):
self.username = username
print(f"{self.__class__.__name__} instance created")
def __str__(self):
return f"user {self.username}"
def __getattr__(self, name):
print(f"__getattr__ called for {name}")
if name == "profile_data":
profile_data = self._get_profile_data()
setattr(self, name, profile_data)
return profile_data
else:
raise AttributeError(f"{self} has no attribute called {name}.")
def _get_profile_data(self):
print("Run the expensive operation")
fetched_data = "The mock data of a large size"
return fetched_data
```
`__getattr__` method doesn’t get called when a particular attribute is in
the instance dictionary.
| github_jupyter |
# Example Data
```
import os
def mkfile(filename, body=None):
with open(filename, 'w') as f:
f.write(body or filename)
return
def make_example_dir(top):
if not os.path.exists(top):
os.mkdir(top)
curdir = os.getcwd()
os.chdir(top)
os.mkdir('dir1')
os.mkdir('dir2')
mkfile('dir1/file_only_in_dir1')
mkfile('dir2/file_only_in_dir2')
os.mkdir('dir1/dir_only_in_dir1')
os.mkdir('dir2/dir_only_in_dir2')
os.mkdir('dir1/common_dir')
os.mkdir('dir2/common_dir')
mkfile('dir1/common_file', 'this file is the same')
mkfile('dir2/common_file', 'this file is the same')
mkfile('dir1/not_the_same')
mkfile('dir2/not_the_same')
mkfile('dir1/file_in_dir1', 'This is a file in dir1')
os.mkdir('dir2/file_in_dir1')
os.chdir(curdir)
return
os.chdir(os.path.dirname('filecomp.ipynb') or os.getcwd())
make_example_dir('example')
make_example_dir('example/dir1/common_dir')
make_example_dir('example/dir2/common_dir')
```
# Comparing Files
```
import filecmp
print('common_file :', end=' ')
print(filecmp.cmp('example/dir1/common_file',
'example/dir2/common_file'),
end=' ')
print(filecmp.cmp('example/dir1/common_file',
'example/dir2/common_file',
shallow=False))
print('not_the_same:', end=' ')
print(filecmp.cmp('example/dir1/not_the_same',
'example/dir2/not_the_same'),
end=' ')
print(filecmp.cmp('example/dir1/not_the_same',
'example/dir2/not_the_same',
shallow=False))
print('identical :', end=' ')
print(filecmp.cmp('example/dir1/file_only_in_dir1',
'example/dir1/file_only_in_dir1'),
end=' ')
print(filecmp.cmp('example/dir1/file_only_in_dir1',
'example/dir1/file_only_in_dir1',
shallow=False))
import filecmp
import os
# Determine the items that exist in both directories
d1_contents = set(os.listdir('example/dir1'))
d2_contents = set(os.listdir('example/dir2'))
common = list(d1_contents & d2_contents)
common_files = [
f
for f in common
if os.path.isfile(os.path.join('example/dir1', f))
]
print('Common files:', common_files)
# Compare the directories
match, mismatch, errors = filecmp.cmpfiles(
'example/dir1',
'example/dir2',
common_files,
)
print('Match :', match)
print('Mismatch :', mismatch)
print('Errors :', errors)
```
# Comparing Directories
```
import filecmp
dc = filecmp.dircmp('example/dir1', 'example/dir2')
dc.report()
import filecmp
dc = filecmp.dircmp('example/dir1', 'example/dir2')
dc.report_full_closure()
```
# Using Different
```
import filecmp
import pprint
dc = filecmp.dircmp('example/dir1', 'example/dir2')
print('Left:')
pprint.pprint(dc.left_list)
print('\nRight:')
pprint.pprint(dc.right_list)
import filecmp
import pprint
dc = filecmp.dircmp('example/dir1', 'example/dir2',
ignore=['common_file'])
print('Left:')
pprint.pprint(dc.left_list)
print('\nRight:')
pprint.pprint(dc.right_list)
import filecmp
import pprint
dc = filecmp.dircmp('example/dir1', 'example/dir2')
print('Common:')
pprint.pprint(dc.common)
print('\nLeft:')
pprint.pprint(dc.left_only)
print('\nRight:')
pprint.pprint(dc.right_only)
import filecmp
import pprint
dc = filecmp.dircmp('example/dir1', 'example/dir2')
print('Common:')
pprint.pprint(dc.common)
print('\nDirectories:')
pprint.pprint(dc.common_dirs)
print('\nFiles:')
pprint.pprint(dc.common_files)
print('\nFunny:')
pprint.pprint(dc.common_funny)
```
| github_jupyter |
# Plots
> Plotting is everything!
Here we provide the code to process the results as they come from the examples in the benchmarking and transfer learning notebooks. The plots have the same format as the ones in the paper.
```
from bounce.hamiltonian import XXHamiltonian
from bounce.utils import save_benchmark, load_benchmark
from bounce.utils import best_so_far, arrange_shape, state2str
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
from tqdm.auto import tqdm
from pathlib import Path
import pickle
%matplotlib inline
```
## Benchmark plots
In the paper we compare the performance of different methods. Here we provide the source code to reproduce the plots (Figure 5).
```
def load_benchmark_plot_data(suffix, **kwargs):
"Loads the data required for the plotting."
name = f"bench_plot_data_{suffix}.pkl"
bench_path = Path("../benchmarks/")/name
if bench_path.exists():
with open(bench_path, 'rb') as f:
plot_data = pickle.load(f)
else:
plot_data = process_benchmark_data(suffix, **kwargs)
return plot_data
def process_benchmark_data(suffix, max_n=16, max_trains=50, algorithms=['DQN', 'MC', 'BFS'], save_path=None):
"Processes the benchmark data for the plots."
results = {algorithm: {} for algorithm in algorithms}
for N in range(5, max_n+1):
B, J = [1]*N, [i%3 for i in range(N)]
H = XXHamiltonian(N, B, J)
for algorithm in algorithms:
try:
benchmark = load_benchmark(N, H, algorithm, suffix=suffix)
if algorithm == 'DQN':
rewards = best_so_far(arrange_shape(benchmark['exploration']['oracle_rewards'][:max_trains]))
else:
rewards = best_so_far(arrange_shape(benchmark['oracle_rewards'][:max_trains]))
best_mean = np.mean(rewards, axis=0)
best_std = np.std(rewards, axis=0)
try: over95 = np.where(best_mean >= 0.95)[0][0]
except: over95 = np.inf
results[algorithm][N] = (best_mean, best_std, over95)
except:
pass
if save_path is None: save_path = Path(f"../benchmarks/bench_plot_data_{suffix}.pkl")
with open(save_path, 'wb') as f:
pickle.dump(results, f, protocol=pickle.HIGHEST_PROTOCOL)
return results
def plot_benchmark(results, suffix, ylim=(0, 2300), lw=3.5, ms=10, fs=18, ts=16, n_max=17, algorithms=["DQN", "MC", "BFS"]):
linestyles = ['-', ':', '--']
markers = ['s', 'o', 'h']
fig = plt.figure(figsize=(12, 5))
for a, alg in enumerate(results.keys()):
if alg not in algorithms: continue
plot_vline = False
ns, times = [], []
for n, (_, _, over95) in results[alg].items():
if n <= n_max: ns.append(n); times.append(over95)
if times[-1] == np.inf: ns.pop(-1); times.pop(-1); plot_vline = True
elif ns[-1] < n_max: plot_vline = True
label = alg if alg != 'DQN' else 'RL'
plt.plot(ns, times, linestyle=linestyles[a], linewidth=lw, marker=markers[a], ms=ms, label=label)
if plot_vline:
plt.vlines(ns[-1], 0, ylim[1], linestyles='dashed', alpha=0.7)
plt.text(ns[-1], 0.93*ylim[1], r"$\rightarrow$ Not found", fontsize=fs)
plt.grid(alpha=0.5)
plt.legend(fontsize=ts, loc="upper left")
plt.xlabel("System size", fontsize=fs)
plt.ylabel("States to 95% optimality", fontsize=ts)
plt.tick_params(labelsize=ts)
plt.savefig(Path(f"../figures/benchmark_sizes_{suffix}.pdf"), bbox_inches='tight')
suffix = 'half_3'
results_half = load_benchmark_plot_data(suffix)
plot_benchmark(results_half, suffix, n_max=16)
suffix = 'all_3'
results_all = load_benchmark_plot_data(suffix)
plot_benchmark(results_all, suffix, ylim=(0, 3500), n_max=16)
n = 11
linestyles = ['-.', ':', '--']
linewidth = 3.5
fs = 18
ticksize = 15
plt.figure(figsize=(10, 5))
for a, algorithm in enumerate(algorithms):
plt.plot(results[algorithm][n][0], linestyle=linestyles[a], linewidth=linewidth, label=algorithm)
plt.grid()
plt.legend(fontsize=ticksize)
plt.tick_params(labelsize=ticksize)
plt.xlabel("New visited states", fontsize=fs)
plt.ylabel("Proximity to optimal solution", fontsize=fs);
```
## Transfer learning across phases
We analyse the effect of transfer learning across different phases of the same Hamiltonian. Here we provide the source code to reproduce the plots (Figure 6).
```
N = 6
max_limit = 185
B0 = 5
path = Path(f"../benchmarks/TL_N{N}_{max_limit}_from_B{B0}.pkl")
with open(path, 'rb') as f:
TL_evaluation = pickle.load(f)
def convergence_time(results, tol=5e-4, T=50, t_avg=20, return_diffs=False):
"Returns the convergence with criterion of not changing result by `tol` for `T` epochs."
if 'training' in results.keys(): results = results['training']
rewards = arrange_shape(results['rewards'])
mean_rewards = np.mean(rewards, axis=0)
epochs = len(mean_rewards)
moving_avg = np.convolve(np.array([1/t_avg]*t_avg), mean_rewards, mode='valid')
diffs = np.abs(moving_avg[1:] - moving_avg[:-1])
diff_variation = np.convolve(np.array([1/T]*T), diffs, mode='valid')
try: t = np.where(diff_variation <= tol)[0][0]
except: t = len(mean_rewards)
if return_diffs: return t + 2*T, moving_avg, diff_variation
return t + T
def indiv_convergence_time(results, max_epochs=800, **kwargs):
"Similar to `convergence_time` but with each agent."
results = results['rewards']
times = [convergence_time({'rewards': [res[:max_epochs]]}, **kwargs) for res in results]
return np.array(times)
def get_indiv_times(tl_eval, convergence_crit=None):
"Provides convergence times from individual ratios."
default_crit = {'T': 50, 't_avg': 100, 'tol': 2e-4}
convergence_crit = {**default_crit, **convergence_crit} if convergence_crit is not None else default_crit
Bs = list(TL_evaluation.keys()); Bs.sort()
time_ratios, time_err = [], []
for b in Bs:
ts_tl = indiv_convergence_time(tl_eval[b]['tl'], **convergence_crit)
ts_0 = indiv_convergence_time(tl_eval[b]['vanilla'], **convergence_crit)
t0, ttl = ts_0.mean(), ts_tl.mean()
ratio = ttl/t0
ratio_std = np.sqrt((1/t0)**2*ts_tl.var() + (ttl/t0**2)**2*ts_0.var())
time_ratios.append(ratio)
time_err.append(ratio_std/np.sqrt(len(ts_0)))
return Bs, np.array(time_ratios), np.array(time_err)
inset_Bs = [0., 1.5, 2., 4.]
ax_width, ax_length = 0.25, 0.2
max_epochs = 700
refs = ["(a)", "(b)", "(c)", "(d)"]
plot_rewards_tl = [np.mean(TL_evaluation[b]['tl']['rewards'], axis=0)[:max_epochs] for b in inset_Bs]
plot_rewards_cs = [np.mean(TL_evaluation[b]['vanilla']['rewards'], axis=0)[:max_epochs] for b in inset_Bs]
def plot_subplot(ax, tl_rewards, cs_rewards, xlabel=False, legend=False, ref="(a)", ylim=[-0.05, 1.05]):
ax.plot(cs_rewards, linewidth=3, label="CS")
ax.plot(tl_rewards, linewidth=3, label="TL")
ax.tick_params(labelbottom=xlabel)
ax.text(0.1, 0.7, ref, fontsize=16)
if xlabel: ax.set_xlabel("Training Episode", fontsize=20)
if legend: ax.legend(fontsize=14, loc='lower right')
ax.tick_params(labelsize=16)
ax.set_ylim(ylim)
ax.grid()
fig = plt.figure(figsize=(14, 5))
gs0 = gs.GridSpec(1, 3, figure=fig)
ax1 = fig.add_subplot(gs0[:-1])
# Times
Bs, time_ratios, time_errs = get_indiv_times(TL_evaluation)
ax1.fill_between(Bs, time_ratios-time_errs, time_ratios+time_errs, alpha=0.25)
ax1.plot(Bs, time_ratios, 's-', ms=7, lw=2)
ax1.tick_params(labelsize=16)
ax1.set_xlabel("B/J", fontsize=20)
ax1.set_ylabel(r"$t_{TL}/t_0$", fontsize=20);
ax1.grid(alpha=0.5)
for b, ref in zip(inset_Bs, refs):
if b != 2: dx, dy = 0.1, 0.05
else: dx, dy = 0.23, 0.
x, y = b-dx, time_ratios[np.where(np.array(Bs) == b)[0][0]]+dy
ax1.text(x, y, ref, fontsize=16)
ymin, ymax = ax1.get_ylim()
ax1.vlines(0.75, ymin*1.2, ymax*0.95, linestyles='dashed', alpha=0.5)
ax1.vlines(1, ymin*1.2, ymax*0.95, linestyles='dashed', alpha=0.5)
ax1.vlines(2, ymin*1.2, ymax*0.95, linestyles='dashed', alpha=0.5)
# Trainings
gs1 = gs0[-1].subgridspec(4, 1)
axes2 = [fig.add_subplot(gs1[i]) for i in range(4)]
for i, ax in enumerate(axes2):
plot_subplot(ax, plot_rewards_tl[i], plot_rewards_cs[i], xlabel=i==3, legend=i==0, ref=refs[i])
fig.text(0.635, 0.5, "Reward", va='center', rotation='vertical', fontsize=20);
plt.savefig(f"../figures/TL_bench_N{N}_{max_limit}_from_B{B0}.pdf", bbox_inches="tight")
```
## Energy bounds at different system sizes
We also study the energy bounds provided by the same qualitative solutions at different system sizes in Appendix C. With the following code you can obtain the energy bounds along one phase of the XX Hamiltonian for several sets of constraints.
```
from bounce.environment import SDPEnvironment
from bounce.budget_profiles import FlatProfile
def test_configs(confs, N, Bs):
"Tests a list of configurations for the XX Hamiltonian in the range of Bs (J=1)."
J = [1]*N
energies = []
for b in tqdm(Bs):
B = [b]*N
H = XXHamiltonian(N, np.array(B), np.array(J))
e = SDPEnvironment(N, H, FlatProfile())
c_energies, c_params, c_layouts = [], [], []
for c in confs:
e.reset()
e.state[:N] = 1
e.state[c] = 1
es, ps, _ = e.get_values()
c_energies.append(es)
c_params.append(ps)
c_layouts.append(e.layout)
energies.append(c_energies)
return np.array(energies), np.array(c_params), c_layouts
def plot_tests(energies, params, labels, Bs, norm=False, figsize=(8, 4), fontsize=16, labelsize=14):
plt.figure(figsize=figsize)
for energy, params, label in zip(energies.T, params, labels):
plt.plot(Bs, energy, label=label+f" ({params})")
plt.grid()
plt.legend(fontsize=labelsize)
plt.xticks(Bs[::2])
plt.xlabel("B/J", fontsize=fontsize)
plt.ylabel("Energy bound per spin" if norm else "Energy bound", fontsize=fontsize)
plt.tick_params(labelsize=labelsize)
plt.axvspan(0, 0.75, facecolor='C0', alpha=0.1)
plt.axvspan(0.75, 1., facecolor='C1', alpha=0.1)
plt.axvspan(1., 2., facecolor='C2', alpha=0.1)
plt.axvspan(2., 2.1, facecolor='C3', alpha=0.1);
def save_tests(energies, params, layouts, N):
save_path = Path(f"../benchmarks/test_{N}.pkl")
with open(save_path, 'wb') as f:
pickle.dump((energies, params, layouts), f, protocol=pickle.HIGHEST_PROTOCOL)
def load_tests(N):
save_path = Path(f"../benchmarks/test_{N}.pkl")
with open(save_path, 'rb') as f:
energies, params, layouts = pickle.load(f)
return energies, params, layouts
```
In every system size, we compute the bounds obtained with different layouts that we store in the list `confs`. In the code below, all the configurations that are never optimal are commented out. This way, we can call `test_configs` to compute the bounds for each layout given several values of the external magnetic field $B$ (assuming $J=1$).
We perform the entire analysis for sizes $n=6, 12, 24$ and $36$.
```
N = 6
confs = [np.arange(0, N, 2)+N,
np.concatenate((np.array([0]), np.arange(1, N-1, 2)))+N,
# np.arange(0, N//2, 1)+N, # double overlap
np.arange(0, N, 3)+N,
np.arange(0, 1)]
Bs = np.arange(0, 2.2, 0.1)
energies_6, params_6, layouts_6 = test_configs(confs, N, Bs)
save_tests(energies_6, params_6, layouts_6, N)
# Load previous results
Bs = np.arange(0, 2.2, 0.1)
energies_6, params_6, layouts_6 = load_tests(6)
labels = ['(a) 3 T, 0 P', '(b) 3 T, 1 P', '(c) 2 T, 2 P', '(d) 0 T, 6 P']
# Absolute bounds
plot_tests(energies_6, params_6, labels, Bs)
plt.title("N=6", fontsize=16);
plt.savefig("../figures/pattern_test_N6.pdf", bbox_inches="tight")
# Normalized bounds
plot_tests(energies_6/6, params_6, labels, Bs, norm=True)
plt.title("N=6", fontsize=16);
plt.savefig("../figures/pattern_test_N6_norm.pdf", bbox_inches="tight")
N = 12
confs = [np.arange(0, N, 2)+N,
np.concatenate((np.array([0]), np.arange(1, N//2-1, 2), np.array([N//2]), np.arange(N//2+1, N-1, 2)))+N,
np.arange(0, N, 3)+N,
np.arange(0, 1),
np.concatenate((np.array([0]), np.arange(1, N-1, 2)))+N,
# np.sort(np.concatenate((np.arange(0, N, 4), np.arange(1, N, 4))))+N, # tiplet-triplet-pair
# np.arange(0, N, 4)+N, # triplet-pair-pair
]
Bs = np.arange(0, 2.2, 0.1)
energies_12, params_12, layouts_12 = test_configs(confs, N, Bs)
save_tests(energies_12, params_12, layouts_12, N)
# Load previous results
Bs = np.arange(0, 2.2, 0.1)
energies_12, params_12, layouts_12 = load_tests(12)
labels = ['(a) 6 T, 0 P', '(b) 6 T, 2 P', '(c) 4 T, 4 P', '(d) 0 T, 12 P']
# Absolute bounds
plot_tests(energies_12[:, :-1], params_12[:-1], labels, Bs)
plt.title("N=12", fontsize=16);
plt.savefig("../figures/pattern_test_N12.pdf", bbox_inches="tight")
# Normalized bounds
plot_tests(energies_12[:, :-1]/12, params_12[:-1], labels, Bs, norm=True)
plt.title("N=12", fontsize=16);
plt.savefig("../figures/pattern_test_N12_norm.pdf", bbox_inches="tight")
N = 24
confs = [np.arange(0, N, 2)+N,
np.concatenate([np.arange(p*(N//4)+1, (p+1)*N//4-1, 2) for p in range(4)] + [np.array([p*N//4]) for p in range(4)])+N,
np.arange(0, N, 3)+N,
np.arange(0, 1),
np.concatenate([np.arange(p*(N//3)+1, (p+1)*N//3-1, 2) for p in range(3)] + [np.array([p*N//3]) for p in range(3)])+N,
np.concatenate((np.array([0]), np.arange(1, N//2-1, 2), np.array([N//2]), np.arange(N//2+1, N-1, 2)))+N,
np.concatenate((np.array([0]), np.arange(1, N-1, 2)))+N,
# np.sort(np.concatenate((np.arange(0, N, 4), np.arange(1, N, 4))))+N, # tiplet-triplet-pair
# np.arange(0, N, 4)+N, # triplet-pair-pair
]
Bs = np.arange(0, 2.2, 0.1)
energies_24, params_24, layouts_24 = test_configs(confs, N, Bs)
save_tests(energies_24, params_24, layouts_24, N)
# Load previous results
Bs = np.arange(0, 2.2, 0.1)
energies_24, params_24, layouts_24 = load_tests(24)
labels = ['(a) 12 T, 0 P', '(b) 12 T, 4 P', '(c) 8 T, 8 P', '(d) 0 T, 24 P']
# Absolute bounds
plot_tests(energies_24[:, :-3], params_24[:-3], labels, Bs)
plt.title("N=24", fontsize=16);
plt.savefig("../figures/pattern_test_N24.pdf", bbox_inches="tight")
# Normalized bounds
plot_tests(energies_24[:, :-3]/24, params_24[:-3], labels, Bs, norm=True)
plt.title("N=24", fontsize=16);
plt.savefig("../figures/pattern_test_N24_norm.pdf", bbox_inches="tight")
N = 36
confs = [np.arange(0, N, 2)+N,
np.concatenate([np.arange(p*(N//6)+1, (p+1)*N//6-1, 2) for p in range(6)] + [np.array([p*N//6]) for p in range(6)])+N,
np.arange(0, N, 3)+N,
np.arange(0, 1),
np.concatenate([np.arange(p*(N//4)+1, (p+1)*N//4-1, 2) for p in range(4)] + [np.array([p*N//4]) for p in range(4)])+N,
np.concatenate([np.arange(p*(N//3)+1, (p+1)*N//3-1, 2) for p in range(3)] + [np.array([p*N//3]) for p in range(3)])+N,
np.concatenate((np.array([0]), np.arange(1, N//2-1, 2), np.array([N//2]), np.arange(N//2+1, N-1, 2)))+N,
np.concatenate((np.array([0]), np.arange(1, N-1, 2)))+N,
# np.sort(np.concatenate((np.arange(0, N, 4), np.arange(1, N, 4))))+N, # tiplet-triplet-pair
# np.arange(0, N, 4)+N, # triplet-pair-pair
]
Bs = np.arange(0, 2.2, 0.1)
energies_36, params_36, layouts_36 = test_configs(confs, N, Bs)
save_tests(energies_36, params_36, layouts_36, N)
# Load previous results
Bs = np.arange(0, 2.2, 0.1)
energies_36, params_36, layouts_36 = load_tests(36)
labels = ['(a) 18 T, 0 P', '(b) 18 T, 6 P', '(c) 12 T, 12 P', '(d) 0 T, 36 P']
# Absolute bounds
plot_tests(energies_36[:, :-3], params_36[:-3], labels, Bs)
plt.title("N=36", fontsize=16);
plt.savefig("../figures/pattern_test_N36.pdf", bbox_inches="tight")
# Normalized bounds
plot_tests(energies_36[:, :-3]/36, params_36[:-3], labels, Bs, norm=True)
plt.title("N=36", fontsize=16);
plt.savefig("../figures/pattern_test_N36_norm.pdf", bbox_inches="tight")
```
## Comparison between different lower bound methods
We compare the performance of our SdP-based approach with other techniques developed to lower bound the ground state energy of many-body Hamiltonians. See the [SdP docs](https://borjarequena.github.io/BOUNCE/sdp.html) for more details about the methods.
Executing the following cells you will reproduce Figure 10 from Appendix D.
```
from bounce.sdp import solve_sdp, anderson_bound, uskov_lichkovskiy_bound
def compare_bounds(N, B_values, rdm_size=5):
"Returns the energy bounds obtained with Anderson, Uskov-Lichkovskiy and our methods."
layouts = [[np.sort(np.arange(i, i + rdm_size)%N) for i in np.arange(0, N, rdm_size - s)]
for s in range(1, rdm_size)]
anderson, uskov, our_bounds = [], [], [[] for _ in range(len(layouts))]
J = [1]*N
for b in tqdm(B_values):
B = [b]*N
H = XXHamiltonian(N, np.array(B), np.array(J))
anderson.append(anderson_bound(H, cluster_size=rdm_size))
uskov.append(uskov_lichkovskiy_bound(H, cluster_size=rdm_size))
for layout, bounds in zip(layouts, our_bounds):
bounds.append(solve_sdp(layout, H))
return (anderson, uskov, *our_bounds)
N = 8
rdm_size = 5
Bs = np.arange(0, 3.1, 0.1) # Main plot with large spacing
Bs_inset = np.arange(0, 0.11, 0.01) # Finer grid for the inset
# Data for main plot
compare_data = compare_bounds(N, Bs, rdm_size=rdm_size)
comparison_path = Path(f"../benchmarks/anderson_uskov_comparison_N{N}_cs{rdm_size}.pkl")
comparison_path.mkdir(exist_ok=True)
with open(comparison_path, 'wb') as f:
pickle.dump(compare_data, f, protocol=pickle.HIGHEST_PROTOCOL)
# Data for inset
inset_data = compare_bounds(N, Bs_inset, rdm_size=rdm_size)
inset_path = Path(f"../benchmarks/anderson_uskov_comparison_N{N}_cs{rdm_size}_inset.pkl")
inset_path.mkdir(exist_ok=True)
with open(inset_path, 'wb') as f:
pickle.dump(inset_data, f, protocol=pickle.HIGHEST_PROTOCOL)
# Load previous results
N = 8
Bs = np.arange(0, 3.1, 0.1)
Bs_inset = np.arange(0, 0.11, 0.01)
comparison_path = Path(f"../benchmarks/anderson_uskov_comparison_N{N}_cs{rdm_size}.pkl")
with open(comparison_path, 'rb') as f:
comparison = pickle.load(f)
inset_path = Path(f"../benchmarks/anderson_uskov_comparison_N{N}_cs{rdm_size}_inset.pkl")
with open(inset_path, 'rb') as f:
inset = pickle.load(f)
figsize = (8, 5)
fontsize, labelsize = 16, 14
plot_data = comparison[:-1]
inset_data = inset[:-1]
labels = ['Anderson bound', 'TI bound', '1-body overlap', '2-body overlap', '3-body overlap']
fig = plt.figure(figsize=figsize)
ax1 = fig.add_axes([0.1, 0.1, 0.9, 0.8])
ax2 = fig.add_axes([0.19, 0.17, 0.42, 0.45])
for bounds, ins, label in zip(plot_data, inset_data, labels):
ax1.plot(Bs, np.array(bounds)/N, linewidth=2.2, label=label)
ax2.plot(Bs_inset, np.array(ins)/N, linewidth=2)
ax1.grid()
ax1.legend(fontsize=labelsize, loc="upper right")
ax1.set_xticks(Bs[::5])
ax1.set_xlabel("B/J", fontsize=fontsize)
ax1.set_ylabel("Energy bound per spin", fontsize=fontsize)
ax1.tick_params(labelsize=labelsize);
ax2.grid()
ax2.set_ylim([-1.375, -1.33])
ax2.set_xlim([-0.002, 0.075])
plt.savefig("../figures/comparison_anderson_uskov.pdf", bbox_inches="tight")
```
| github_jupyter |
```
from math import *
seed = (0,0)
length = 100
minlength = 10
ratio = sqrt(2)
vertical = True
queue = [(seed,length,vertical)]
def makelines(lines,queue,ratio):
pt,length, vertical = queue.pop(0)
print("length:",length)
if length>minlength:
if vertical:
A = (pt[0],pt[1]+length/2.0)
B = (pt[0],pt[1]-length/2.0)
else:
A = (pt[0]+length/2.0,pt[1])
B = (pt[0]-length/2.0,pt[1])
lines.append([A,B])
queue.append((A,length/ratio,not vertical))
queue.append((B,length/ratio,not vertical))
return lines,queue
lines = []
while (len(queue)>0):
lines, queue = makelines(lines,queue,ratio)
print ("queue: ",len(queue),'\tlines: ',len(lines))
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
def plotLines(lines):
for line in lines:
A,B = line
xs = [A[0],B[0]]
ys = [A[1],B[1]]
plt.plot(xs,ys,color='black',linewidth=2)
plt.axis('square')
plt.show()
plotLines(lines)
10/sqrt(2)
```
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
\
# Recursion solution
```
from math import *
lines = []
def Hlines(pt,length):
minlength = 50
if length<=minlength: return
r = sqrt(2)
x,y = pt
x0 = pt[0]-length/(2)
y0 = pt[1]-length/(2)
x1 = pt[0]+length/(2)
y1 = pt[1]+length/(2)
lines.append([(x0, y0), (x0, y1)] ) # left segment
lines.append([(x1, y0), (x1, y1)] ) #right segment
lines.append([(x0, y ), (x1, y) ] ) # connecting segment
newLength = length/r
Hlines((x0, y0), newLength) # lower left H-tree
Hlines((x0, y1), newLength) # upper left H-tree
Hlines((x1, y0), newLength) # lower right H-tree
Hlines((x1, y1), newLength) # upper right H-tree
from math import *
lines = []
def Hlines(pt,length):
minlength = 10
if length<=minlength: return
r = sqrt(2)
x,y = pt
xp = pt[0]+length/(2)
xn = pt[0]-length/2
lines.append([(xp, y ), (xn, y) ] ) # middle bar length Length
yn = pt[1]-length/(2*r)
yp = pt[1]+length/(2*r)
lines.append([(xn, yn), (xn, yp)] ) # left segment length length/sqrt(2)
lines.append([(xp, yn), (xp, yp)] ) #right segment length length/sqrt(2)
newLength = length/(r*r)
Hlines((xn, yn), newLength) # lower left H-tree
Hlines((xn, yp), newLength) # upper left H-tree
Hlines((xp, yn), newLength) # lower right H-tree
Hlines((xp, yp), newLength) # upper right H-tree
import sys
x=5000
sys.setrecursionlimit(x)
seed = (0,0)
length = 100
lines = []
Hlines(seed,length)
plotLines(lines)
```
| github_jupyter |
```
// #r ".\binaries\bossspad.dll"
// #r ".\binaries\XNSEC.dll"
#r "C:\BoSSS\experimental\public\src\L4-application\BoSSSpad\bin\Release\net5.0\bossspad.dll"
#r "C:\BoSSS\experimental\public\src\L4-application\BoSSSpad\bin\Release\net5.0\XNSEC.dll"
// #r "C:\BoSSS\experimental\public\src\L4-application\BoSSSpad\bin\Release\net5.0\bossspad.dll"
//#r "C:\BoSSS\experimental\public\src\L4-application\BoSSSpad\bin\Release\net5.0\XNSEC.dll"
using System;
using System.Collections.Generic;
using System.Linq;
using System.IO;
using System.Data;
using System.Globalization;
using System.Threading;
using ilPSP;
using ilPSP.Utils;
using BoSSS.Platform;
using BoSSS.Foundation;
using BoSSS.Foundation.Grid;
using BoSSS.Foundation.Grid.Classic;
using BoSSS.Foundation.IO;
using BoSSS.Solution;
using BoSSS.Solution.Control;
using BoSSS.Solution.GridImport;
using BoSSS.Solution.Statistic;
using BoSSS.Solution.Utils;
using BoSSS.Solution.Gnuplot;
using BoSSS.Application.BoSSSpad;
using BoSSS.Application.XNSE_Solver;
using static BoSSS.Application.BoSSSpad.BoSSSshell;
using BoSSS.Foundation.Grid.RefElements;
using BoSSS.Platform.LinAlg;
using BoSSS.Solution.NSECommon;
Init();
```
## Open Database
```
// static var myDb = OpenOrCreateDatabase(@"C:\Databases\BoSSS_DB");
static var myDb = OpenOrCreateDatabase(@"\\hpccluster\hpccluster-scratch\gutierrez\Database_HeatedCavity1e5_finer___223");
myDb.Sessions[0].KeysAndQueries["Grid:NoOfCells"]
BoSSSshell.WorkflowMgm.Init("HeatedSquareCavity");
var sess = myDb.Sessions[0];
sess.Export().Do()
// var rightvalue = sess.KeysAndQueries["NusseltNumber1"];
// var residualValue = sess.KeysAndQueries["NusseltNumber2"];
```
## Create grid
```
// int[] Resolutions = new int[]{ 2,3,4,5,6,7 }; //new int[]{ 10, 20 };
// int[] DGdegree = new int[]{ 1,2,3,4 };
int[] Resolutions = new int[]{5 }; //new int[]{ 10, 20 };
int[] DGdegree = new int[]{ 5 };
int[] nCells = Resolutions.Select(r => (int)(Math.Pow(2,r+1))).ToArray();
public static class GridFactory {
public static double[] GetXNodes(int Res) {
var xNodes = GenericBlas.SinLinSpacing(-0.5, 0.5, 0, Res + 1);
return xNodes;
}
static double[] GetYNodes(int Res) {
double[] yNodes = GenericBlas.SinLinSpacing(-0.5, 0.5,0, Res + 1);
return yNodes;
}
public static Grid2D GenerateGrid(int Res) {
var xNodes = GetXNodes(Res);
var yNodes = GetYNodes(Res);
var grd = Grid2D.Cartesian2DGrid(xNodes, yNodes);
grd.EdgeTagNames.Add(1, "NoSlipNeumann");
grd.EdgeTagNames.Add(2, "wall_tempfixed_left");
grd.EdgeTagNames.Add(3, "wall_tempfixed_right");
grd.DefineEdgeTags( delegate (double[] X) {
double x = X[0];
double y = X[1];
//Edge tags
//1: Adiabatic no slip wall
//2: Temperature fixed no slip wall
//right cold wall
if (Math.Abs(x - 0.5) < 1e-8)
return 3;
//bottom adiabatic Wall
if (Math.Abs(y - 0.5 ) < 1e-8)
return 1;
// left hot wall
if (Math.Abs(x + 0.5) < 1e-8)
return 2;
//top adiabatic Wall
if (Math.Abs(y + 0.5 ) < 1e-8)
return 1;
else throw new ArgumentOutOfRangeException();
});
bool force = true;
myDb.SaveGrid(ref grd, force );
return grd;
}
}
public static class BoundaryValueFactory {
public static string GetPrefixCode(double Th, double Tc, double Froude) {
using(var stw = new System.IO.StringWriter()) {
stw.WriteLine("static class BoundaryValues {");
stw.WriteLine(" static public double VelX(double[] X) {");
stw.WriteLine(" return 0.0;");
stw.WriteLine(" }");
stw.WriteLine(" static public double VelY(double[] X) {");
stw.WriteLine(" return 0.0;");
stw.WriteLine(" }");
stw.WriteLine(" static public double TemperatureHot(double[] X) {");
stw.WriteLine(" return 1.6;");
stw.WriteLine(" }");
stw.WriteLine(" static public double TemperatureCold(double[] X) {");
stw.WriteLine(" return 0.4;");
stw.WriteLine(" }");
stw.WriteLine(" static public double One(double[] X) {");
stw.WriteLine(" return 1.0;");
stw.WriteLine(" }");
stw.WriteLine(" static public double Zero(double[] X) {");
stw.WriteLine(" return 0.0;");
stw.WriteLine(" }");
stw.WriteLine(" static public double InitialPressure(double[] X) { ");
stw.WriteLine(" return (-1)* X[1] / ("+Froude * Froude +") ;");
stw.WriteLine(" }");
stw.WriteLine("}");
return stw.ToString();
}
}
static public Formula Get_VelX(double Th, double Tc , double Froude) {
return new Formula("BoundaryValues.VelX", AdditionalPrefixCode:GetPrefixCode(Th,Tc,Froude));
}
static public Formula Get_VelY(double Th, double Tc, double Froude){
return new Formula("BoundaryValues.VelY", AdditionalPrefixCode:GetPrefixCode(Th,Tc,Froude));
}
static public Formula Get_TemperatureHot(double Th, double Tc, double Froude){
return new Formula("BoundaryValues.TemperatureHot", AdditionalPrefixCode:GetPrefixCode(Th,Tc,Froude));
}
static public Formula Get_TemperatureCold(double Th, double Tc, double Froude){
return new Formula("BoundaryValues.TemperatureCold", AdditionalPrefixCode:GetPrefixCode(Th,Tc,Froude));
}
static public Formula Get_One(double Th, double Tc, double Froude){
return new Formula("BoundaryValues.One", AdditionalPrefixCode:GetPrefixCode(Th,Tc,Froude));
}
static public Formula Get_Zero(double Th, double Tc, double Froude){
return new Formula("BoundaryValues.Zero", AdditionalPrefixCode:GetPrefixCode(Th,Tc,Froude));
}
static public Formula Get_InitialPressure(double Th, double Tc, double Froude){
return new Formula("BoundaryValues.InitialPressure", AdditionalPrefixCode:GetPrefixCode(Th,Tc,Froude));
}
}
```
## Send and run jobs
```
var controls = new List<BoSSS.Application.XNSEC.XNSEC_Control>();
myDb
var controls = new List<BoSSS.Application.XNSEC.XNSEC_Control>();
double Th = 1.6; double Tc = 0.4;
double Ra = 1e4;
foreach(int dg in DGdegree){
foreach(int Res in Resolutions) {
var C = new BoSSS.Application.XNSEC.XNSEC_Control();
C.SetDGdegree(dg);
var nCells = (int) Math.Pow(2,Res+1);
C.SetGrid(GridFactory.GenerateGrid(nCells));
C.Paramstudy_CaseIdentification.Add(new Tuple<string, object>("Res", Res));
C.Paramstudy_CaseIdentification.Add(new Tuple<string, object>("Dgdegree", dg));
C.EnableMassFractions = false;
C.NumberOfChemicalSpecies = 1;
C.ChemicalReactionActive = false;
C.MatParamsMode = MaterialParamsMode.Sutherland;
C.physicsMode = PhysicsMode.Combustion;
C.TimesteppingMode = AppControl._TimesteppingMode.Steady;
C.LinearSolver.SolverCode = LinearSolverCode.exp_Kcycle_schwarz;
C.LinearSolver.NoOfMultigridLevels = 5;
// C.NonLinearSolver.SolverCode = NonLinearSolverCode.Newton;
C.LinearSolver.SolverCode = LinearSolverCode.classic_pardiso;
C.NonLinearSolver.ConvergenceCriterion = 1e-11;
C.LinearSolver.ConvergenceCriterion = 1e-12;
C.NonLinearSolver.verbose = true;
C.LinearSolver.verbose = false;
C.NonLinearSolver.MaxSolverIterations = 50;
C.PenaltyViscMomentum = 1.0 * 1;
C.PenaltyHeatConduction = 1.0 * 1;
C.PhysicalParameters.IncludeConvection = true;
C.UseSelfMadeTemporalOperator = false;
C.timeDerivativeEnergyp0_OK = false;
C.timeDerivativeConti_OK = false;
C.EdgeTagsNusselt = new string[] { "wall_tempfixed_left", "wall_tempfixed_right", "NoSlipNeumann" };
C.Rayleigh = Ra;
C.Reynolds = Math.Sqrt(Ra);
C.Prandtl = 0.71;
double Fr =Math.Sqrt(2 * C.Prandtl * (1.6 - 0.4) / (1.6 + 0.4));
C.Froude = Fr;
C.HeatCapacityRatio = 1.4;
C.T_ref_Sutherland = 600*1+273*0;
C.ThermodynamicPressureMode = ThermodynamicPressureMode.MassDetermined; // Because its a closed system, i.e. p0 = p0(time)
C.PhysicalParameters.IncludeConvection = true;
C.Timestepper_LevelSetHandling = BoSSS.Solution.XdgTimestepping.LevelSetHandling.None;
C.SessionName = "NaturalConvection_k" + Res + "_DG" + dg;
C.AddBoundaryValue("NoSlipNeumann", VariableNames.VelocityX,BoundaryValueFactory.Get_VelX(Th, Tc,Fr));
C.AddBoundaryValue("NoSlipNeumann", VariableNames.VelocityY,BoundaryValueFactory.Get_VelY(Th, Tc,Fr));
C.AddBoundaryValue("wall_tempfixed_left", VariableNames.Temperature,BoundaryValueFactory.Get_TemperatureHot(Th, Tc,Fr));
C.AddBoundaryValue("wall_tempfixed_right", VariableNames.Temperature, BoundaryValueFactory.Get_TemperatureCold(Th, Tc,Fr));
C.AddBoundaryValue("wall_tempfixed_left", VariableNames.MassFraction0, BoundaryValueFactory.Get_One(Th, Tc,Fr));
C.AddBoundaryValue("wall_tempfixed_right", VariableNames.MassFraction0, BoundaryValueFactory.Get_One(Th, Tc,Fr));
C.AddInitialValue(VariableNames.VelocityX, BoundaryValueFactory.Get_Zero(Th, Tc,Fr));
C.AddInitialValue(VariableNames.VelocityY,BoundaryValueFactory.Get_Zero(Th, Tc,Fr));
C.AddInitialValue(VariableNames.Pressure,BoundaryValueFactory.Get_InitialPressure(Th, Tc,Fr));
C.AddInitialValue(VariableNames.Temperature,BoundaryValueFactory.Get_One(Th, Tc,Fr));
C.AddInitialValue(VariableNames.MassFraction0, BoundaryValueFactory.Get_One(Th, Tc,Fr));
//C.AddInitialValu(VariableNames.Temperature, X => (Tc - Th) / 1 * X[0] + Th);
//C.AddInitialValue.Add(VariableNames.Temperature, X => X[0] * X[0] + X[1] * X[1] + 1);
C.AddInitialValue(VariableNames.ThermodynamicPressure,BoundaryValueFactory.Get_One(Th, Tc,Fr));
controls.Add(C);
}
}
Console.WriteLine(controls.Count);
```
## Run Simulations
```
BoSSSshell.ExecutionQueues.ForEach(q => Console.WriteLine(q))
var myBatch = BoSSSshell.ExecutionQueues[3];
myBatch.AllowedDatabasesPaths.Add(new AllowedDatabasesPair(myDb.Path,""));
Type solver = typeof(BoSSS.Application.XNSEC.XNSEC);
foreach(var c in controls) {
string jobName = c.SessionName;
var oneJob = new Job(jobName, solver);
int noOfProcs = Convert.ToInt32(c.Paramstudy_CaseIdentification[0].Item2) > 2 ? 4:1;
noOfProcs = Convert.ToInt32(c.Paramstudy_CaseIdentification[0].Item2) > 4 ? 8:noOfProcs;
oneJob.NumberOfMPIProcs = noOfProcs;
oneJob.SetControlObject(c);
oneJob.Activate(myBatch);
}
BoSSSshell.WorkflowMgm.BlockUntilAllJobsTerminate();
```
## Post Processing: h-convergence study
```
var Tab = BoSSSshell.WorkflowMgm.SessionTable;
BoSSSshell.WorkflowMgm.hConvergence.Update();
var Tab = BoSSSshell.WorkflowMgm.SessionTable; // The session table has to be created again in order to see the results from hconvergence
Tab.GetColumnNames().Skip(350)
var Tab = BoSSSshell.WorkflowMgm.SessionTable;
var Tab2 = Tab.ExtractColumns("DGdegree:Temperature","Grid:hMin","Grid:NoOfCells" ,"L2Error_Temperature");
//Extract last point, the one against we are comparing our solutions with
Tab2 = Tab2.ExtractRows((iRow,RowEntries)=> Convert.ToInt32(RowEntries["Grid:NoOfCells"]) !=65536 );
Tab2 = Tab2.ExtractRows((iRow,RowEntries)=> Convert.ToInt32(RowEntries["Grid:NoOfCells"]) !=64 );
var ErrorPlot = Tab2.ToPlot("Grid:hMin","L2Error_Temperature", "DGdegree:Temperature");
ErrorPlot.LogX = true;
ErrorPlot.LogY = true;
ErrorPlot.PlotNow() // No semicolon!!
ErrorPlot.Regression()
// string dataPathResultsBase = @"C:\Users\Gutierrez\Documents\data\convergenceProblemData\" ;
string dataPathResultsBase = @"C:\Users\gutierrez\Dropbox\CombustionDGPaper\Paper\data\HSC_convstudy_XNSEC";
string folderpath = dataPathResultsBase ;// + ending +"LowTemp"+ @"\";
System.IO.Directory.CreateDirectory(folderpath); // Create folder for storing data
// string[] varnames = new string[]{"VelocityX","VelocityY","Temperature", "Pressure", "ThermodynamicPressure" };//
string[] varnames = new string[]{"Pressure", "ThermodynamicPressure" };//
foreach (var varname in varnames) {
foreach (int pDeg in DGdegree) {
var pDegSessions = myDb.Sessions.Where(
Si => (Convert.ToInt32(Si.KeysAndQueries["DGdegree:Velocity*"]) == pDeg)
).ToArray();
Plot2Ddata pDegPlot =
pDegSessions.ToEstimatedGridConvergenceData(varname,
xAxis_Is_hOrDof: true, // false selects DOFs for x-axis
normType: NormType.L2_embedded );
//Saving to a txt file
pDegPlot.SaveTextFileToPublish(folderpath+"\\"+ varname + "DG" + pDeg, false);
Console.WriteLine(folderpath+"\\"+ varname + "DG" + pDeg);
}
}
myDb.Sessions
var db2 = OpenOrCreateDatabase(@"C:\Databases\BoSSS_DB");
db2.Sessions[0]
```
| github_jupyter |
<a href="https://colab.research.google.com/github/simecek/dspracticum2020/blob/master/lecture_02/01_one_neuron_and_MPG_dataset.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
print(tf.__version__)
```
## Data
```
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(url, names=column_names,
na_values='?', comment='\t',
sep=' ', skipinitialspace=True)
dataset = raw_dataset.copy()
# because of missing values in the Horsepower column
dataset = dataset.dropna()
dataset.tail()
# split the dataset into two parts (train & test)
train_dataset = dataset.sample(frac=0.8, random_state=42)
test_dataset = dataset.drop(train_dataset.index)
train_dataset.shape, test_dataset.shape
# separate label column from the data
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_features.pop('MPG')
test_labels = test_features.pop('MPG')
train_features.shape, test_features.shape, train_labels.shape, test_labels.shape
```
## Predict MPG (miles per gallon) from Horsepower
```
sns.scatterplot(data=dataset, x="Horsepower", y="MPG");
horsepower = np.array(train_features['Horsepower'])
# we will use train data to estimate average and SD of horsepower and
# get transformation to zero mean and unit variance
horsepower_normalizer = preprocessing.Normalization(input_shape=[1,])
horsepower_normalizer.adapt(horsepower)
normalized_horsepower = np.array(horsepower_normalizer(horsepower))
normalized_horsepower.mean(), normalized_horsepower.std()
# model
horsepower_model = tf.keras.Sequential([
horsepower_normalizer,
layers.Dense(units=1)
])
horsepower_model.summary()
# model compilation
horsepower_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error')
%%time
history = horsepower_model.fit(
train_features['Horsepower'], train_labels,
epochs=100,
# suppress logging
verbose=0,
# Calculate validation results on 20% of the training data
validation_split = 0.2)
print(history.history['val_loss'][-1:])
def plot_loss(history):
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.ylim([0, 10])
plt.xlabel('Epoch')
plt.ylabel('Error [MPG]')
plt.legend()
plt.grid(True)
plot_loss(history)
# prediction on test data
test_data_predictions = horsepower_model.predict(test_features['Horsepower'])
def plot_horsepower(preds):
plt.scatter(test_features['Horsepower'], test_labels, label='Data')
plt.plot(test_features['Horsepower'], preds, color='k', label='Predictions')
plt.xlabel('Horsepower')
plt.ylabel('MPG')
plt.legend()
plot_horsepower(test_data_predictions)
# evaluation on test data
test_evaluation = {}
test_evaluation['horsepower_model'] = horsepower_model.evaluate(
test_features['Horsepower'],
test_labels, verbose=0)
test_evaluation
```
## Predict MPG (miles per gallon) from Other Features
```
sns.pairplot(train_dataset[['MPG', 'Cylinders', 'Displacement', 'Weight']], diag_kind='kde');
train_dataset.describe().transpose()
# normalizer for all feature columns
normalizer = preprocessing.Normalization()
normalizer.adapt(np.array(train_features))
# model definition
linear_model = tf.keras.Sequential([
normalizer,
layers.Dense(units=1)
])
# you can even do prediction from (untrained) model or look what is his weights
print(linear_model.predict(train_features[:10]))
# parameters (weights and bias)
linear_model.layers[1].kernel, linear_model.layers[1].bias
linear_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error')
%%time
history = linear_model.fit(
train_features, train_labels,
epochs=100,
# suppress logging
verbose=0,
# Calculate validation results on 20% of the training data
validation_split = 0.2)
plot_loss(history)
```
| github_jupyter |
<!--BOOK_INFORMATION-->
<img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png">
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*
*The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!*
<!--NAVIGATION-->
< [Feature Engineering](05.04-Feature-Engineering.ipynb) | [Contents](Index.ipynb) | [In Depth: Linear Regression](05.06-Linear-Regression.ipynb) >
<a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.05-Naive-Bayes.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
# In Depth: Naive Bayes Classification
The previous four sections have given a general overview of the concepts of machine learning.
In this section and the ones that follow, we will be taking a closer look at several specific algorithms for supervised and unsupervised learning, starting here with naive Bayes classification.
Naive Bayes models are a group of extremely fast and simple classification algorithms that are often suitable for very high-dimensional datasets.
Because they are so fast and have so few tunable parameters, they end up being very useful as a quick-and-dirty baseline for a classification problem.
This section will focus on an intuitive explanation of how naive Bayes classifiers work, followed by a couple examples of them in action on some datasets.
## Bayesian Classification
Naive Bayes classifiers are built on Bayesian classification methods.
These rely on Bayes's theorem, which is an equation describing the relationship of conditional probabilities of statistical quantities.
In Bayesian classification, we're interested in finding the probability of a label given some observed features, which we can write as $P(L~|~{\rm features})$.
Bayes's theorem tells us how to express this in terms of quantities we can compute more directly:
$$
P(L~|~{\rm features}) = \frac{P({\rm features}~|~L)P(L)}{P({\rm features})}
$$
If we are trying to decide between two labels—let's call them $L_1$ and $L_2$—then one way to make this decision is to compute the ratio of the posterior probabilities for each label:
$$
\frac{P(L_1~|~{\rm features})}{P(L_2~|~{\rm features})} = \frac{P({\rm features}~|~L_1)}{P({\rm features}~|~L_2)}\frac{P(L_1)}{P(L_2)}
$$
All we need now is some model by which we can compute $P({\rm features}~|~L_i)$ for each label.
Such a model is called a *generative model* because it specifies the hypothetical random process that generates the data.
Specifying this generative model for each label is the main piece of the training of such a Bayesian classifier.
The general version of such a training step is a very difficult task, but we can make it simpler through the use of some simplifying assumptions about the form of this model.
This is where the "naive" in "naive Bayes" comes in: if we make very naive assumptions about the generative model for each label, we can find a rough approximation of the generative model for each class, and then proceed with the Bayesian classification.
Different types of naive Bayes classifiers rest on different naive assumptions about the data, and we will examine a few of these in the following sections.
We begin with the standard imports:
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
```
## Gaussian Naive Bayes
Perhaps the easiest naive Bayes classifier to understand is Gaussian naive Bayes.
In this classifier, the assumption is that *data from each label is drawn from a simple Gaussian distribution*.
Imagine that you have the following data:
```
from sklearn.datasets import make_blobs
X, y = make_blobs(100, 2, centers=2, random_state=2, cluster_std=1.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu');
```
One extremely fast way to create a simple model is to assume that the data is described by a Gaussian distribution with no covariance between dimensions.
This model can be fit by simply finding the mean and standard deviation of the points within each label, which is all you need to define such a distribution.
The result of this naive Gaussian assumption is shown in the following figure:

[figure source in Appendix](06.00-Figure-Code.ipynb#Gaussian-Naive-Bayes)
The ellipses here represent the Gaussian generative model for each label, with larger probability toward the center of the ellipses.
With this generative model in place for each class, we have a simple recipe to compute the likelihood $P({\rm features}~|~L_1)$ for any data point, and thus we can quickly compute the posterior ratio and determine which label is the most probable for a given point.
This procedure is implemented in Scikit-Learn's ``sklearn.naive_bayes.GaussianNB`` estimator:
```
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(X, y);
```
Now let's generate some new data and predict the label:
```
rng = np.random.RandomState(0)
Xnew = [-6, -14] + [14, 18] * rng.rand(2000, 2)
ynew = model.predict(Xnew)
```
Now we can plot this new data to get an idea of where the decision boundary is:
```
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu')
lim = plt.axis()
plt.scatter(Xnew[:, 0], Xnew[:, 1], c=ynew, s=20, cmap='RdBu', alpha=0.1)
plt.axis(lim);
```
We see a slightly curved boundary in the classifications—in general, the boundary in Gaussian naive Bayes is quadratic.
A nice piece of this Bayesian formalism is that it naturally allows for probabilistic classification, which we can compute using the ``predict_proba`` method:
```
yprob = model.predict_proba(Xnew)
yprob[-8:].round(2)
```
The columns give the posterior probabilities of the first and second label, respectively.
If you are looking for estimates of uncertainty in your classification, Bayesian approaches like this can be a useful approach.
Of course, the final classification will only be as good as the model assumptions that lead to it, which is why Gaussian naive Bayes often does not produce very good results.
Still, in many cases—especially as the number of features becomes large—this assumption is not detrimental enough to prevent Gaussian naive Bayes from being a useful method.
## Multinomial Naive Bayes
The Gaussian assumption just described is by no means the only simple assumption that could be used to specify the generative distribution for each label.
Another useful example is multinomial naive Bayes, where the features are assumed to be generated from a simple multinomial distribution.
The multinomial distribution describes the probability of observing counts among a number of categories, and thus multinomial naive Bayes is most appropriate for features that represent counts or count rates.
The idea is precisely the same as before, except that instead of modeling the data distribution with the best-fit Gaussian, we model the data distribuiton with a best-fit multinomial distribution.
### Example: Classifying Text
One place where multinomial naive Bayes is often used is in text classification, where the features are related to word counts or frequencies within the documents to be classified.
We discussed the extraction of such features from text in [Feature Engineering](05.04-Feature-Engineering.ipynb); here we will use the sparse word count features from the 20 Newsgroups corpus to show how we might classify these short documents into categories.
Let's download the data and take a look at the target names:
```
from sklearn.datasets import fetch_20newsgroups
data = fetch_20newsgroups()
data.target_names
```
For simplicity here, we will select just a few of these categories, and download the training and testing set:
```
categories = ['talk.religion.misc', 'soc.religion.christian',
'sci.space', 'comp.graphics']
train = fetch_20newsgroups(subset='train', categories=categories)
test = fetch_20newsgroups(subset='test', categories=categories)
```
Here is a representative entry from the data:
```
print(train.data[5])
```
In order to use this data for machine learning, we need to be able to convert the content of each string into a vector of numbers.
For this we will use the TF-IDF vectorizer (discussed in [Feature Engineering](05.04-Feature-Engineering.ipynb)), and create a pipeline that attaches it to a multinomial naive Bayes classifier:
```
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import make_pipeline
model = make_pipeline(TfidfVectorizer(), MultinomialNB())
```
With this pipeline, we can apply the model to the training data, and predict labels for the test data:
```
model.fit(train.data, train.target)
labels = model.predict(test.data)
```
Now that we have predicted the labels for the test data, we can evaluate them to learn about the performance of the estimator.
For example, here is the confusion matrix between the true and predicted labels for the test data:
```
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(test.target, labels)
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=train.target_names, yticklabels=train.target_names)
plt.xlabel('true label')
plt.ylabel('predicted label');
```
Evidently, even this very simple classifier can successfully separate space talk from computer talk, but it gets confused between talk about religion and talk about Christianity.
This is perhaps an expected area of confusion!
The very cool thing here is that we now have the tools to determine the category for *any* string, using the ``predict()`` method of this pipeline.
Here's a quick utility function that will return the prediction for a single string:
```
def predict_category(s, train=train, model=model):
pred = model.predict([s])
return train.target_names[pred[0]]
```
Let's try it out:
```
predict_category('sending a payload to the ISS')
predict_category('discussing islam vs atheism')
predict_category('determining the screen resolution')
```
Remember that this is nothing more sophisticated than a simple probability model for the (weighted) frequency of each word in the string; nevertheless, the result is striking.
Even a very naive algorithm, when used carefully and trained on a large set of high-dimensional data, can be surprisingly effective.
## When to Use Naive Bayes
Because naive Bayesian classifiers make such stringent assumptions about data, they will generally not perform as well as a more complicated model.
That said, they have several advantages:
- They are extremely fast for both training and prediction
- They provide straightforward probabilistic prediction
- They are often very easily interpretable
- They have very few (if any) tunable parameters
These advantages mean a naive Bayesian classifier is often a good choice as an initial baseline classification.
If it performs suitably, then congratulations: you have a very fast, very interpretable classifier for your problem.
If it does not perform well, then you can begin exploring more sophisticated models, with some baseline knowledge of how well they should perform.
Naive Bayes classifiers tend to perform especially well in one of the following situations:
- When the naive assumptions actually match the data (very rare in practice)
- For very well-separated categories, when model complexity is less important
- For very high-dimensional data, when model complexity is less important
The last two points seem distinct, but they actually are related: as the dimension of a dataset grows, it is much less likely for any two points to be found close together (after all, they must be close in *every single dimension* to be close overall).
This means that clusters in high dimensions tend to be more separated, on average, than clusters in low dimensions, assuming the new dimensions actually add information.
For this reason, simplistic classifiers like naive Bayes tend to work as well or better than more complicated classifiers as the dimensionality grows: once you have enough data, even a simple model can be very powerful.
<!--NAVIGATION-->
< [Feature Engineering](05.04-Feature-Engineering.ipynb) | [Contents](Index.ipynb) | [In Depth: Linear Regression](05.06-Linear-Regression.ipynb) >
<a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.05-Naive-Bayes.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
| github_jupyter |
📍 **Project Title** : Digit Recognizer Project
📍 **Aim of the Project** : This project will classify different digits and predict accordingly.
📍 **Dataset** : https://www.kaggle.com/c/digit-recognizer/data
📍 **Libraries used :** ```Numpy, Pandas, Matplotlib, Seaborn, Tensorflow, Keras```
*********************************************************************
## Importing Libraries
```
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from keras import Sequential
from keras.layers import Dense
from keras.utils import np_utils
import random
```
## Dataset
- **Train Dataset:**
> X_train -> (images)
> y_train -> (labels)
- **Test Dataset:**
> X_test -> (images)
> y_test -> (labels)
```
(X_train, y_train) , (X_test, y_test) = keras.datasets.mnist.load_data()
X_train.shape
y_train
X_test.shape
y_test
# view of actual dataset
X_train[0]
```
## Data Preprocessing and Visualisation
## Data Preprocessing
#### Scaling the Data
**We know that RGB values ranges from ```0 to 255``` so, dividing the images(values) with 255 will help us to scale the values between ```0 to 1```.**
```
X_train = X_train / 255
X_test = X_test / 255
print('Dataset Data Type: ', X_train.dtype)
print('Maximum Value: ' ,X_train.max())
print('Minimum Value: ', X_train.min())
```
### Data Visualisation
#### y_train (labels) - Count
```
sns.countplot(y_train)
plt.tight_layout()
plt.show()
```
#### Displaying the Data (random 6 images)
```
for i in range(9):
plt.subplot(330 + 1 + i)
plt.xticks([])
plt.yticks([])
plt.imshow(X_train[random.randint(0, 100)])
plt.subplots_adjust(left=0.1,
right=0.9,
top=1.5,
wspace=0.4,
hspace=0.4)
```
## Model Building
### Model - 1
```
print(y_test.shape)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
print(y_test.shape)
model_1 = keras.Sequential([
keras.layers.Flatten(input_shape=(28,28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model_1.summary()
model_1.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy']
)
X_train = X_train.reshape(-1,28,28)
X_test = X_test.reshape(-1, 28,28)
model_1.fit(X_train, y_train, epochs= 10, batch_size = 100)
scores_basic = model_1.evaluate(X_test, y_test)
print('Accuracy: ',scores_basic[1] * 100)
```
### Model - 2
```
(train_images, train_labels), (test_images, test_labels) = keras.datasets.mnist.load_data()
train_images = train_images.reshape(-1,28,28,1)
test_images = test_images.reshape(-1,28,28,1)
train_images.shape
plt.imshow(np.squeeze(train_images[0]))
plt.show()
model_2 = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation=tf.nn.relu, input_shape=(28,28,1)),
tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation=tf.nn.relu),
tf.keras.layers.MaxPool2D(pool_size=(2,2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model_2.summary()
model_2.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model_2.fit(train_images, train_labels, epochs=4)
scores_basic = model_2.evaluate(test_images, test_labels)
print('Accuracy: ',scores_basic[1] * 100)
cnn_prediction = model_2.predict(test_images)
model_2_predict_labels = [np.argmax(i) for i in cnn_prediction]
for i in range(5):
plt.subplot(3, 3, 1+i)
plt.xticks([])
plt.yticks([])
plt.title("predicted: " + str(model_2_predict_labels[i]))
plt.imshow(np.squeeze(test_images[i]))
plt.subplots_adjust(left=0.1,
right=0.9,
top=1.5,
wspace=0.4,
hspace=0.4)
```
| github_jupyter |
# Experiment: Presidential Campaigns Ads Dataset - Feature Extraction -
This notebook shows how to use cloud services using REST API to convert audio to text, to analyze the extracted text and frames contents. Using the files previously collected (see Experiment: Presidential Campaigns Ads Dataset - Data Collection -), You are going to use cognitive services, text analytics, speech recognition and optical character recognition that are among the most powerful tools for captions generartion, transcribe audio to text and for text analytics offered by Azure Microsoft public cloud. By using these tools you will be able to extract features from a largely available source: YouTube videos. We are going to use the audio files (.WAV format) and the frames extracted from them (.JPG format) stored in the data folder in the main repository folder.

# Table of Contents
* [Experiment: Predict Elections Outcomes Using Presidential Commercial Campaign](#Experiment:-Predict-Elections-Outcomes-Using-Presidential-Commercial-Campaign)
* [Feature engineering: extract data using Microsoft Azure public cloud services](#Paragraph-3)
* [Set up containers and upload files: audio & image](#Set-up-containers-and-upload-files-audio-&-image)
* [Extract speech from audio using Speech Recognition API](#Extract-speech-from-audio-using-Bing-Speech-Recognition-API)
* [Extract sentiment and key phrases from text using Text Analytics API](#Extract-sentiment-and-key-phrases-from-text-using-Text-Analytics-API)
* [Extract image contents and text using Vision API](#Extract-image-contents-and-text-using-Vision-API)
* [Presidential Campaigns Ads Dataset](#Presidential-Campaigns-Ads-Dataset)
* [Combine dataframes](#Combine-dataframes)
* [Data type and data description](#Data-type-and-data-description)
* [Recap](#Recap)
* [What you have learnt](#What-you-have-learnt)
* [What are you going to learn](#What-are-you-going-to-learn)
## Feature engineering: extract data using Microsoft Azure public cloud services
### Set up containers and upload files: audio and image (video frames)
To set up containers, follows these steps:
- access Azure Portal using your account [[Link here](https://portal.azure.com)]
- import libraries and run functions we will use to accomplish tasks quickly and without hardcoding
- set directories to import videos and images
- retrieve storage account service credentials from your azure_keys (public_cloud_computing\guides\keys)
- create container, retrive files to download path and upload them. Repeat the task twice using **`upload_files_to_container()`**. First to upload audio files and then to upload the image files. The function call these funtions at once:
- retrieve files name, path and extensions use **`get_files()`**
- set two containers name and create containers: audio and image use **`make_public_container()`**
- upload files to containers use **`upload_file()`**
#### _Import libraries and functions_
```
#import library
import sys
#import functions from utilities
sys.path.insert(0, "../../guides/utilities/")
try:
from utils import *
except ImportError:
print('No Import')
#import libraries
import os
import time
import pickle
from azure.storage.blob import BlockBlobService, PublicAccess
from azure.storage.blob import ContentSettings
```
#### _Set directories_
```
#set notebook current directory
cur_dir = os.getcwd()
#set directory to the folder to import azure keys
os.chdir('../../guides/keys/')
dir_azure_keys = os.getcwd()
#set directory to the folder to import audio files
os.chdir('../../data/video/audio/')
dir_audio_files = os.getcwd()
#set directory to the folder to import image files
os.chdir(cur_dir)
os.chdir('../../data/image/frames/')
dir_image_files = os.getcwd()
#print your notebook directory
#print directories where files are goint to be saved
print('---------------------------------------------------------')
print('Your documents directories are:')
print('- notebook:\t', cur_dir)
print('- azure keys:\t', dir_azure_keys)
print('- audio files:\t', dir_audio_files)
print('- image files:\t', dir_image_files)
print('---------------------------------------------------------')
```
#### _Retrieve storage account credentials_
```
#ERASE MY PATH BEFORE REALISING THE WORKSHOP MATERIALS
my_path_to_keys = 'C:/Users/popor/Desktop/keys/'
#set service name, path to the keys and keys file name
SERVICE_NAME = 'STORAGE' #add here: STORAGE, FACE, COMPUTER_VISION, SPEECH_RECOGNITION, TEXT_ANALYTICS, ML_STUDIO
PATH_TO_KEYS = my_path_to_keys #add here (use dir_azure_keys)
KEYS_FILE_NAME = 'azure_services_keys_v1.1.json' #add file name (eg 'azure_services_keys.json')
#call function to retrive
storage_keys = retrieve_keys(SERVICE_NAME, PATH_TO_KEYS, KEYS_FILE_NAME)
#set storage name and keys
STORAGE_NAME = storage_keys['NAME']
STORAGE_KEY = storage_keys['API_KEY']
```
#### _Create container, get files and upload audio files_
```
#set a name for a new container
NEW_CONTAINER_NAME ='myaudio'
#set the audio file directory
DIR_FILES = dir_audio_files
# #set content type of the file, in this case is a audio .wav
# CONTENT_TYPE = 'audio/x-'
#upload files
upload_files_to_container(STORAGE_NAME, STORAGE_KEY, NEW_CONTAINER_NAME, DIR_FILES)
```
#### _Create container, get files and upload image files_
```
#set a name for a new container
NEW_CONTAINER_NAME ='myimage'
#set the audio file directory
DIR_FILES = dir_image_files
#crete container and upload frames
upload_files_to_container(STORAGE_NAME, STORAGE_KEY, NEW_CONTAINER_NAME, DIR_FILES)
def get_list_blob(STORAGE_NAME, STORAGE_KEY, CONTAINER_NAME):
""""create blob service and return list of blobs in the container"""
blob_service = BlockBlobService(account_name= STORAGE_NAME, account_key=STORAGE_KEY)
uploaded_file = blob_service.list_blobs(CONTAINER_NAME)
blob_name_list = []
for blob in uploaded_file:
blob_name_list.append(blob.name)
return blob_name_list
```
## Extract audio script using Speech Recognition
To extract text from the audio files uploaded to cloud storage previously, follows these steps:
- access Azure Portal using your account [[Link here](https://portal.azure.com)]
- retrieve speech recognition service credentials and configure API to access cloud service
- request speech recognition services to the public cloud for each audio file
- extract text from each response
- recompose the script of each video, collect results into a dataframe and save it as tabular dataset
#### _Retrieve speech recognition service credentials and configure API to access cloud service_
```
# import libraries
import requests
import urllib
import uuid
import json
#set service name
SERVICE_NAME = 'SPEECH_RECOGNITION' #add here: STORAGE, FACE, COMPUTER_VISION, SPEECH_RECOGNITION, TEXT_ANALYTICS, ML_STUDIO
#call function to retrive keys
storage_keys = retrieve_keys(SERVICE_NAME, PATH_TO_KEYS, KEYS_FILE_NAME)
#set speech recognition keys
SPEECH_RECOGNITION_KEY = storage_keys['API_KEY']
#configure API access to request speech recognition service
URI_TOKEN_SPEECH = 'https://api.cognitive.microsoft.com/sts/v1.0/issueToken'
URL_SPEECH = 'https://speech.platform.bing.com/recognize'
#set api request REST headers
headers_api = {}
headers_api['Authorization'] = 'Bearer {0}'.format(access_token)
headers_api['Content-type'] = 'audio/wav'
headers_api['codec'] = 'audio/pcm'
headers_api['samplerate'] = '16000'
#set api request parameters
params_set = {}
params_set['scenarios'] = 'ulm'
params_set['appid'] = 'D4D52672-91D7-4C74-8AD8-42B1D98141A5'
params_set['locale'] = 'en-US'
params_set['device.os'] = 'PC'
params_set['version'] = '3.0'
params_set['format'] = 'json'
params_set['instanceid'] = str(uuid.uuid1())
params_set['requestid'] = str(uuid.uuid1())
```
#### _Request speech recognition service to the public cloud for each audio file_
```
#set container to retrieve files from
CONTAINER_NAME = 'myaudio'
#get list of blob
#blob_list = get_list_blob(STORAGE_NAME, STORAGE_KEY, CONTAINER_NAME)
blob_name_list, blob_url_list = retrieve_blob_list(STORAGE_NAME, STORAGE_KEY, CONTAINER_NAME)
#store http response and json file
responses = []
http_responses = []
#set procedure starting time
print('---------------------------------------------------------')
print("Start speech to text conversion")
print('---------------------------------------------------------')
start = time.time()
#run speech recognition on uploaded audio files (i.e. extension .wax)
for blob_name in blob_name_list:
if blob_name.split('.')[-1] == 'wav':
#set token request REST headers
headers_token = {}
headers_token['Ocp-Apim-Subscription-Key'] = SPEECH_RECOGNITION_KEY
headers_token['Content-Length'] = '0'
#request for token
api_response = requests.post(URI_TOKEN_SPEECH, headers=headers_token)
access_token = str(api_response.content.decode('utf-8'))
#convert blob to bytes
blob_service = BlockBlobService(STORAGE_NAME, STORAGE_KEY)
blob = blob_service.get_blob_to_bytes(CONTAINER_NAME, blob_name)
#request for speech recognition service
params = urllib.parse.urlencode(params_set)
api_response = requests.post(URL_SPEECH, headers=headers_api, params=params, data=blob.content)
print('{} had a {} response'.format(blob_name, api_response))
#extract data from response
res_json = json.loads(api_response.content.decode('utf-8'))
http_responses.append(api_response)
responses.append(res_json)
#set procedure ending time
end = time.time()
print('---------------------------------------------------------')
print('Conversion completed')
print('---------------------------------------------------------')
print('It took {} seconds to '.format(round(end - start, 2)))
```
#### _Extract text from each response_
```
#organize response output
status = []
name = []
lexical = []
request_id = []
confidence = []
#select variables from output
for i, response in enumerate(responses):
if responses[i]['header']['status'] == 'success':
status.append(responses[i]['header']['status'])
name.append(responses[i]['header']['name'])
lexical.append(responses[i]['header']['lexical'])
request_id.append(responses[i]['header']['properties']['requestid'])
confidence.append(responses[i]['results'][0]['confidence'])
else:
status.append('Error')
name.append('Nan')
lexical.append('Nan')
request_id.append('Nan')
confidence.append('Nan')
#combine output into df
df_log_response = pd.DataFrame({'file_name' : blob_name_list,
'stt_http_response' : http_responses,
'stt_id' : request_id,
'stt_status' : status,
'stt_name' : name,
'stt_text' : lexical,
'stt_confidence' : confidence})
#display df
df_log_response.head()
```
#### _Recompose the script of each video, collect results into a dataframe_
```
#recompose text from speech recognition service into a dataframe
dict_speech_recognition = dict()
video_list = ['eisenhower_for_president_1952',
'1988_george_bush_sr_revolving_door_attack_ad_campaign',
'high_quality_famous_daisy_attack_ad_from_1964_presidential_election',
'humphrey_laughing_at_spiro_agnew_1968_political_ad',
'kennedy_for_me_campaign_jingle_jfk_1960',
'mcgovern_defense_plan_ad_nixon_1972_presidential_campaign_commercial',
'ronald_reagan_tv_ad_its_morning_in_america_again',
'bill_clinton_hope_ad_1992',
'historical_campaign_ad_windsurfing_bushcheney_04',
'yes_we_can__barack_obama_music_video']
#extract text for each candidate and join it
for name in video_list:
dict_name = dict()
audio_text = []
#for each entry append text to the correspendent video
for i, entry in enumerate(df_log_response.loc[:,'file_name']):
if name in entry:
if df_log_response.loc[i, 'stt_text'] != 'Nan':
audio_text.append(df_log_response.loc[i, 'stt_text'])
#uncomment the line below and indent the next if you want to keep track of empty audio chuncks
#else:
#audio_text.append('Nan')
n_words = []
for words in audio_text:
n_words.append(int(len(words.split(' '))))
words_count = sum(n_words)
joined_audio = " ".join(audio_text)
dict_name['stt_text'] = joined_audio
dict_name['stt_words_count'] = words_count
dict_speech_recognition[name] = dict_name
#convert dictionary to df
df_speech_to_text = pd.DataFrame.from_dict(dict_speech_recognition , orient='index').reset_index()
df_speech_to_text.columns = 'video_title', 'stt_text', 'stt_words_count'
#display dataframe
df_speech_to_text
#orint the output of the script text extracted of a selected Ad
print('---------------------------------------------------------')
print('The entire script of the video {} is:'.format(df_speech_to_text.loc[1,'video_title']))
print('---------------------------------------------------------')
print('{}"'.format(df_speech_to_text.loc[1,'stt_text'].replace('Nan', '').replace(' ', ' ')))
```
### Extract sentiment and key phrases from text using Text Analytics API
To extract sentiment and key phrases from the text, follows these steps:
- retrieve text analytics service credentials and configure API to access service
- use text from the audio files
- request sentiment analysis and extract key phrases services to the public cloud for each script
- collect results into a dataframe and save it as tabular dataset
#### _Retrieve text analytics service credentials and configure API to access service_
```
#set service name
SERVICE_NAME = 'TEXT_ANALYTICS' #add here: STORAGE, FACE, COMPUTER_VISION, SPEECH_RECOGNITION, TEXT_ANALYTICS, ML_STUDIO
#call function to retrive keys
storage_keys = retrieve_keys(SERVICE_NAME, PATH_TO_KEYS, KEYS_FILE_NAME)
#set text analytics keys
TEXT_ANALYTICS_KEY = storage_keys['API_KEY']
#configure API access to request text analytics service
URI_SENTIMENT = 'https://eastus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment'
URI_KEY_PHRASES = 'https://eastus.api.cognitive.microsoft.com/text/analytics/v2.0/keyPhrases'
#set REST headers
headers = {}
headers['Ocp-Apim-Subscription-Key'] = TEXT_ANALYTICS_KEY
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
```
#### _Request sentiment analysis and extract key phrases services to the public cloud for each script_
```
#set procedure starting time
print('--------------------------------------')
print("Start text analysis")
print('--------------------------------------')
start = time.time()
#store text analysis to list
sentiment_text = []
key_phrases = []
sentiment_mean_key_phrases = []
#perform on text for each audio
for i, entry in enumerate(df_speech_to_text.index):
text = df_speech_to_text.loc[i,'stt_text'].replace('Nan', '').replace(' ', ' ')
#create request to determine sentiment from text
data = json.dumps({"documents":[{"id":str(uuid.uuid1()), "language":"en", "text":text}]}).encode('utf-8')
request = urllib.request.Request(URI_SENTIMENT, data, headers)
response = urllib.request.urlopen(request)
responsejson = json.loads(response.read().decode('utf-8'))
try:
sentiment = responsejson['documents'][0]['score']
except:
sentiment = 'Nan'
sentiment_text.append(sentiment)
#create request to determine key phrases from text
data = data
request = urllib.request.Request(URI_KEY_PHRASES, data, headers)
response = urllib.request.urlopen(request)
responsejson = json.loads(response.read().decode('utf-8'))
try:
key_phrase = responsejson['documents'][0]['keyPhrases']
except:
key_phrase = 'Nan'
key_phrases.append(key_phrase)
#create request to determine sentiment from key phrases
sentiment_key_phrases = []
for key in key_phrase:
data = json.dumps({"documents":[{"id":str(uuid.uuid1()), "language":"en", "text":key}]}).encode('utf-8')
request = urllib.request.Request(URI_SENTIMENT, data, headers)
response = urllib.request.urlopen(request)
responsejson = json.loads(response.read().decode('utf-8'))
sentiment = responsejson['documents'][0]['score']
sentiment_key_phrases.append(round(sentiment, 2))
time.sleep(1)
sentiment_mean = sum(sentiment_key_phrases)/len(sentiment_key_phrases)
sentiment_mean_key_phrases.append(sentiment_mean)
#assign new column to df_stt
df_speech_to_text['ta_sentiment_text'] = sentiment_text
df_speech_to_text['ta_key_phrases'] = key_phrases
df_speech_to_text['ta_sentiment_key_phrases'] = sentiment_mean_key_phrases
#set procedure ending time
end = time.time()
print('Text analysis completed')
print('--------------------------------------')
print('It took {} to perform text analysis'.format(round(end - start, 2)))
```
#### _Collect results into a dataframe and save it as tabular dataset_
```
#make a copy of the dataset
df_text_analysis = df_speech_to_text.copy()
#save dataframe to folder
df_text_analysis.to_csv('../../dataset/data_extraction_text_analysis.csv', sep=',', encoding='utf-8')
#display dataset with text analytics
df_text_analysis
```
## Extract image contents and text using Vision API
To extract text from the audio files uploaded to cloud storage previously, follows these steps:
- access Azure Portal using your account [Link here]
- retrieve computer vision service credentials and configure API to access analyze image and optical character recognition services
- request analyze image and optical character recognition services to the public cloud for each script
- extract text from each response
- recompose the script of each video, collect results into a dataframe and save it as tabular dataset
- collect results into a dataframe and save it as tabular dataset
#### _Retrieve computer vision service credentials and configure API to access analyze image and optical character recognition services_
```
#set service name
SERVICE_NAME = 'COMPUTER_VISION'
#call function to retrive keys
storage_keys = retrieve_keys(SERVICE_NAME, PATH_TO_KEYS, KEYS_FILE_NAME)
#set text analytics keys
COMPUTER_VISION_KEY = storage_keys['API_KEY']
#configure API access to request text analytics service
URI_ANALYZE = 'https://eastus.api.cognitive.microsoft.com/vision/v1.0/analyze'
URI_OCR = 'https://eastus.api.cognitive.microsoft.com/vision/v1.0/ocr'
#set REST headers
headers = {}
headers['Ocp-Apim-Subscription-Key'] = COMPUTER_VISION_KEY
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
#set api request parameters for analyze image service
params_set_vis = {}
params_set_vis['visualFeatures'] = 'Categories,Tags,Description,Faces,ImageType,Color,Adult'
#set api request parameters for ocr service
params_set_ocr = {}
params_set_ocr['language'] = 'unk'
params_set_ocr['detectOrientation'] = 'false'
```
#### _Request analyze image and optical character recognition services to the public cloud for each script_
```
#set container to retrieve files from
CONTAINER_NAME = 'myimage'
#get list of BLOB urls and names
blob_name_list, blob_url_list = retrieve_blob_list(STORAGE_NAME, STORAGE_KEY, CONTAINER_NAME)
#store http response
responses_vis = []
http_responses_vis = []
responses_ocr = []
http_responses_ocr = []
#set procedure starting time
print('-------------------')
print("Start computer vision")
print('-------------------')
start = time.time()
#run analyze image service on video frames (i.e. extension .jpg)
for i, blob_name in enumerate(blob_url_list):
if i == 0 or i%10 != 0:
if blob_name.split('.')[-1] == 'jpg':
#send image request REST to computer vision
params = urllib.parse.urlencode(params_set_vis)
query_string = '?{0}'.format(params)
url = URI_ANALYZE + query_string
body = '{\'url\':\'' + blob_name + '\'}'
#request for analyze image service
api_response = requests.post(url, headers=headers, data=body)
print('{} had a {} from analyze service'.format(blob_name.split('/')[-1], api_response))
#extract data from analyze image response
res_json = json.loads(api_response.content.decode('utf-8'))
http_responses_vis.append(api_response)
responses_vis.append(res_json)
#send ocr request REST to computer vision
params = urllib.parse.urlencode(params_set_ocr)
query_string = '?{0}'.format(params)
url = URI_OCR + query_string
body = '{\'url\':\'' + blob_name + '\'}'
#request for ocr service
api_response = requests.post(url, headers=headers, data=body)
print('{} had a {} from ocr service'.format(blob_name.split('/')[-1], api_response))
#extract data from ocr response
res_json = json.loads(api_response.content.decode('utf-8'))
http_responses_ocr.append(api_response)
responses_ocr.append(res_json)
else:
time.sleep(50)
#set procedure ending time
end = time.time()
print('-------------------')
print('Computer Vision completed')
print('-------------------')
print('It took {} seconds to '.format(round(end - start, 2)))
```
#### _Extract text from each response_
```
#store results from response
response_status_vis = []
fr_category = []
fr_category_confidence = []
fr_detail_celebrities = []
fr_detail_celebrities_confidence = []
fr_tag_name = []
fr_tag_confidence = []
fr_tag_description = []
fr_caption = []
fr_caption_confidence = []
fr_face_age = []
fr_face_gender = []
#parse over response and extract features
for i, response in enumerate(responses_vis):
if next(iter(responses_vis[i])) == 'statusCode':
response_status_vis.append(responses_vis[i]['statusCode'])
fr_category.append('Nan')
fr_category_confidence.append('Nan')
fr_detail_celebrities.append('Nan')
fr_detail_celebrities_confidence.append('Nan')
fr_tag_name.append('Nan')
fr_tag_confidence.append('Nan')
fr_tag_description.append('Nan')
fr_caption.append('Nan')
fr_caption_confidence.append('Nan')
fr_face_age.append('Nan')
fr_face_gender.append('Nan')
else:
response_status_vis.append('<200>')
#parse over the categories key of the response
for j, response in enumerate(responses_vis[i]['categories']):
#get all the category with a relatively high score
count = 0
if response['score'] > 0.25:
#check for multiple high score category
fr_category.append(response['name'].strip('_'))
fr_category_confidence.append(response['score'])
#extract celebrities
if 'detail' in response.keys():
if 'celebrities' in response['detail'].keys():
if response['detail']['celebrities'] != []:
fr_detail_celebrities.append(response['detail']['celebrities'][0]['name'])
fr_detail_celebrities_confidence.append(response['detail']['celebrities'][0]['confidence'])
else:
fr_detail_celebrities.append('Nan')
fr_detail_celebrities_confidence.append('Nan')
else:
fr_detail_celebrities.append('Nan')
fr_detail_celebrities_confidence.append('Nan')
else:
fr_detail_celebrities.append('Nan')
fr_detail_celebrities_confidence.append('Nan')
break
else:
if count == j:
fr_category.append('Nan')
fr_category_confidence.append('Nan')
fr_detail_celebrities.append('Nan')
fr_detail_celebrities_confidence.append('Nan')
break
count =+ 1
#parse over the tags key of the response
tags_name = []
tags_confidence = []
for k, response in enumerate(responses_vis[i]['tags']):
tags_name.append(response['name'])
tags_confidence.append(response['confidence'])
fr_tag_name.append(tags_name)
fr_tag_confidence.append(tags_confidence)
#parse over the description key of the response
tags_description = []
for k, response in enumerate(responses_vis[i]['description']['tags']):
tags_description.append(response)
fr_tag_description.append(tags_description)
caption = []
caption_confidence = []
for k, response in enumerate(responses_vis[i]['description']['captions']):
caption.append(response['text'])
caption_confidence.append(response['confidence'])
fr_caption.append(caption)
fr_caption_confidence.append(caption_confidence)
#parse over the faces key of the response
#print(i)
face_age = []
face_gender = []
for k, response in enumerate(responses_vis[i]['faces']):
face_age.append(response['age'])
face_gender.append(response['gender'])
fr_face_age.append(face_age)
fr_face_gender.append(face_gender)
#store results from response
response_status = []
fr_ocr_words = []
#parse over response and extract features
for i, response in enumerate(responses_ocr):
if next(iter(responses_ocr[i])) == 'statusCode':
response_status.append(responses_ocr[i]['statusCode'])
fr_ocr_words.append('Nan')
else:
response_status.append('<200>')
words = []
for j, response in enumerate(responses_ocr[i]['regions']):
for k, box in enumerate(response['lines']):
for l, word in enumerate(box['words']):
words.append(word['text'])
fr_ocr_words.append(words)
#display results
log_text_analysis = {'file_name' : blob_name_list,
'vis_http_response' : response_status,
'vis_fr_caption' : fr_caption,
'vis_fr_caption_score[%]' : fr_caption_confidence,
'vis_tag_description': fr_tag_description,
'vis_tag_name' : fr_tag_name,
'vis_tag_confidence' : fr_tag_confidence,
'vis_face_gender' : fr_face_gender,
'vis_face_age' : fr_face_age,
'vis_ocr' : fr_ocr_words,
'vis_fr_category' : fr_category,
'vis_fr_category_score[%]' : fr_category_confidence,
'vis_fr_celebrities' : fr_detail_celebrities,
'vis_fr_celebrities_score[%]' : fr_detail_celebrities_confidence}
df_log_text_analysis = pd.DataFrame.from_dict(log_text_analysis, orient='index')
df_log_text_analysis = df_log_text_analysis.transpose()
df_log_text_analysis[df_log_text_analysis['vis_http_response'] == '<200>'].head(10)
```
#### _Recompose output of computer vision of each video, collect results into a dataframe and save it as tabular dataset_
```
#recompose text from speech recognition service into a dataframe
dict_computer_vision = dict()
video_list = ['eisenhower_for_president_1952',
'1988_george_bush_sr_revolving_door_attack_ad_campaign',
'high_quality_famous_daisy_attack_ad_from_1964_presidential_election',
'humphrey_laughing_at_spiro_agnew_1968_political_ad',
'kennedy_for_me_campaign_jingle_jfk_1960',
'mcgovern_defense_plan_ad_nixon_1972_presidential_campaign_commercial',
'ronald_reagan_tv_ad_its_morning_in_america_again',
'bill_clinton_hope_ad_1992',
'historical_campaign_ad_windsurfing_bushcheney_04',
'yes_we_can__barack_obama_music_video']
#extract text for each candidate and join it
for name in video_list:
dict_name = dict()
caption = []
caption_score = []
caption_tag = []
caption_tag_description = []
caption_tag_score = []
caption_people_gender = []
caption_people_age = []
caption_tag_celebrities = []
caption_text = []
caption_category = []
caption_category_score = []
for i, entry in enumerate(df_log_text_analysis.loc[:,'file_name']):
if name in entry:
caption.append(df_log_text_analysis.loc[i,'vis_fr_caption'])
caption_score.append(df_log_text_analysis.loc[i, 'vis_fr_caption_score[%]'])
caption_tag.append(df_log_text_analysis.loc[i, 'vis_tag_name'])
caption_tag_description.append(df_log_text_analysis.loc[i, 'vis_tag_description'])
caption_tag_score.append(df_log_text_analysis.loc[i, 'vis_tag_confidence'])
caption_people_gender.append(df_log_text_analysis.loc[i, 'vis_face_gender'])
caption_people_age.append(df_log_text_analysis.loc[i, 'vis_face_age'])
caption_tag_celebrities.append(df_log_text_analysis.loc[i, 'vis_fr_celebrities'])
caption_text.append(df_log_text_analysis.loc[i, 'vis_ocr'])
caption_category.append(df_log_text_analysis.loc[i, 'vis_fr_category'])
caption_category_score.append(df_log_text_analysis.loc[i, 'vis_fr_category_score[%]'])
dict_name['caption'] = caption
dict_name['caption_score'] = caption_score
dict_name['tag'] = caption_tag
dict_name['tag_description'] = caption_tag_description
dict_name['tag_score'] = caption_tag_score
dict_name['people_gender'] = caption_people_gender
dict_name['people_age'] = caption_people_age
dict_name['people_celebrities'] = caption_tag_celebrities
dict_name['image_text'] = caption_text
dict_name['category'] = caption_category
dict_name['category_score'] = caption_category_score
dict_computer_vision[name] = dict_name
#convert dictionary to df
df_computer_vision = pd.DataFrame.from_dict(dict_computer_vision , orient='index').reset_index()
#save dataframe to folder
df_computer_vision.to_csv('../../dataset/data_extraction_computer_vision.csv', sep=',', encoding='utf-8')
#display dataframe
df_computer_vision
```
## Presidential Campaigns Ads Dataset
### Combine dataframes
```
#read data from data collection dataset
df1 = pd.read_csv('../../dataset/data_collection_presidential_campaign.csv')
df1 = df1.set_index('video_title').drop('Unnamed: 0', axis=1)
#read data from text analysis dataset
df2 = pd.read_csv('../../dataset/data_extraction_text_analysis.csv')
df2 = df2.set_index('video_title').drop('Unnamed: 0', axis=1)
#read data from computer vision dataset
df3 = pd.read_csv('../../dataset/data_extraction_computer_vision.csv')
df3 = df3.set_index('index').drop('Unnamed: 0', axis=1)
#concatenate dataframes
df_presidential_campaigns = pd.concat([df1, df2, df3], axis=1,sort=False)
df_presidential_campaigns.reset_index(level=0, inplace=True)
#make column title look the same to join dataset
df_presidential_campaigns['index'] = df_presidential_campaigns['index'].apply(
lambda value: 'eisenhower_for_president' if value == 'eisenhower_for_president_1952'
else 'kennedy_for_me' if value == 'kennedy_for_me_campaign_jingle_jfk_1960'
else 'daisy_attack' if value == 'high_quality_famous_daisy_attack_ad_from_1964_presidential_election'
else 'humphrey_laughing_at_spiro_agnew' if value == 'humphrey_laughing_at_spiro_agnew_1968_political_ad'
else 'mcgovern_defense_plan' if value == 'mcgovern_defense_plan_ad_nixon_1972_presidential_campaign_commercial'
else 'its_morning_in_america_again' if value == 'ronald_reagan_tv_ad_its_morning_in_america_again'
else 'revolving_door_attack' if value == '1988_george_bush_sr_revolving_door_attack_ad_campaign'
else 'hope' if value == 'bill_clinton_hope_ad_1992'
else 'windsurfing' if value == 'historical_campaign_ad_windsurfing_bushcheney_04'
else 'yes_we_can' if value == 'yes_we_can__barack_obama_music_video'
else 'unknown')
#save dataframe to folder
df_presidential_campaigns.to_csv('../../dataset/presidential_campaigns_ads_dataset.csv', sep=',', encoding='utf-8')
#display the dataset
df_presidential_campaigns
```
### Data type and data description
Below it is a complete list of the data available in the _**Presidential Campaigns Ads Dataset**_:
|field|data type|data description|
|:---|:---|:---|
|index|**string**|presidential campaign title title|
|video_url|**string**|YouTube video url|
|video_length[sec]|**float**|YouTube video lenght|
|video_frames[n]|**integer**|YouTube video number of frames|
|frame_sec[n/sec]|**float**|ratio number of frames in the video and video lenght|
|frame_extracted[n]|**integer**|number of frames extracted by the orignal video (i.e. 100th frame and multiples)|
|year|**integer**|presidential campaign year|
|candidate_name|**string**|presidential candidate name|
|party|**string**|candidate party|
|stt_text|**string**|text trascription of the video (i.e. script)|
|stt_words_count|**integer**|number of words in the script|
|ta_sentiment_text|**float**|scores close to 1 indicate positive sentiment in the script of the video, while scores close to 0 indicate negative sentiment|
|ta_key_phrases|**string**|list of strings denoting the key talking points in the script of the video|
|ta_sentiment_key_phrases|**float**|scores close to 1 indicate positive sentiment in the key talking point, while scores close to 0 indicate negative sentiment|
|caption|**string**|sentence describing image contents|
|caption_score|**float**|probability of the captions being accurate|
|tag|**string**|key points in the image|
|tag_description|**string**|list of subjects the image|
|tag_score|**float**|probability of the tag being accurate|
|people_gender|**string**|gender of people in the image|
|people_age|**integer**|age of people in the image|
|people_celebrities|**string**|name of celebrities in the image|
|image_text|**string**|character recognition in the image|
|category|**string**|main subject in the image|
|category_score|**float**|probability of the category being accurate|
## Recap
### What you have learnt
- How convert audio to text and recombine it to trascribe a video
- How extract key phrases and sentiment from the text
- How to extract image contents and text from frames extracted by video
- Organize dataset and save it
### What you will learn next guide
- This is the last guide of the workshop. In the future we hope to provide you with tutorials that use this dataset to make prediction as well as to build other interesting models. Stay tuned!

### Question for you¶
- Does the datset looks interesting for your research? In general to social scientists?
- What was your idea about public cloud computing before and after the workshop?
- What do you think about captions generation (before/after the workshop)?
```
#import library to display notebook as HTML
import os
from IPython.core.display import HTML
#path to .ccs style script
cur_path = os.path.dirname(os.path.abspath("__file__"))
new_path = os.path.relpath('..\\..\\..\\styles\\custom_styles_public_cloud_computing.css', cur_path)
#function to display notebook
def css():
style = open(new_path, "r").read()
return HTML(style)
css()
def get_files(dir_files):
""""store file name, extension and path """
files_name = []
files_path = []
files_extension = []
for root, directories, files in os.walk(dir_files):
for file in files:
files_name.append(file)
files_path.append(os.path.join(root,file))
files_extension.append(file.split('.')[-1])
print('Data stored from directory):\t {}'.format(dir_files))
return files_name, files_path, files_extension
def retrive_keys(service_name, PATH_TO_KEYS, KEYS_FILE_NAME):
""""function to retrieve_keys. return name and key for the selected cloud computing service"""
path_to_keys = os.path.join(PATH_TO_KEYS, KEYS_FILE_NAME)
with open(path_to_keys, 'rb') as handle:
azure_keys = pickle.load(handle)
service_key = azure_keys[service_name]
return service_key
def make_public_container(STORAGE_NAME, STORAGE_KEY, NEW_CONTAINER_NAME):
""""create blob service, blob container and set it to public access. return blob service"""
blob_service = BlockBlobService(account_name= STORAGE_NAME, account_key=STORAGE_KEY)
new_container_status = blob_service.create_container(NEW_CONTAINER_NAME)
blob_service.set_container_acl(NEW_CONTAINER_NAME, public_access=PublicAccess.Container)
if new_container == True:
print('{} BLOB container has been successfully created: {}'.format(NEW_CONTAINER_NAME, new_container_status))
else:
print('{] something went wrong: check parameters and subscription'.format(NEW_CONTAINER_NAME))
def upload_file(STORAGE_NAME, STORAGE_KEY, NEW_CONTAINER_NAME, file, path, extension, content_type):
""""create blob service, and upload files to container"""
blob_service = BlockBlobService(account_name= STORAGE_NAME, account_key=STORAGE_KEY)
try:
blob_service.create_blob_from_path(NEW_CONTAINER_NAME, file, path, content_settings=ContentSettings(content_type= content_type+extension))
print("{} // BLOB upload status: successful".format(file))
except:
print("{} // BLOB upload status: failed".format(file))
def upload_files_to_container(STORAGE_NAME, STORAGE_KEY, NEW_CONTAINER_NAME, DIR_FILES, CONTENT_TYPE):
""""create container, get files, and upload to storage"""
#call funtion to make container
make_public_container(STORAGE_NAME, STORAGE_KEY, NEW_CONTAINER_NAME)
print('---------------------------------------------------------')
#find names, paths and extension of the files stored into directory
files_name, files_path, files_extension = get_files(DIR_FILES)
#set uploading procedure starting time
print('---------------------------------------------------------')
print("Start uploading files")
print('---------------------------------------------------------')
start = time.time()
#upload all files at once to the new container
count = 0
for path, file, ext in zip(files_path, files_name, files_extension):
upload_file(STORAGE_NAME, STORAGE_KEY, NEW_CONTAINER_NAME, file, path, ext, CONTENT_TYPE) #(blob_service, NEW_CONTAINER_NAME, file, path, ext, CONTENT_TYPE)
count += 1
#add print only failed otherwise good to go
#set procedure ending time
end = time.time()
print('---------------------------------------------------------')
print('Uploading completed')
print('---------------------------------------------------------')
print('It took {} seconds to upload {} files'.format(round(end - start, 2), count))
def delete_container(STORAGE_NAME, STORAGE_KEY, CONTAINER_NAME):
##############################################################
#RUN THIS ONLY IF YOU WANT TO DELETE A CONTAINTER #
#REMEMBER TO DOWNLOAD YOUR DATA BEFORE DELETING THE CONTAINER#
#IMPORTANT: YOU WILL LOOSE YOUR BLOB INTO THE CONTAINER #
##############################################################
blob_service = BlockBlobService(account_name= STORAGE_NAME, account_key=STORAGE_KEY)
#delete container
delete_container = blob_service.delete_container(CONTAINER_NAME)
print("{} delition status success: {}".format(CONTAINER_NAME, delete_container))
```
| github_jupyter |
```
import time
from collections import OrderedDict, namedtuple
import numpy as np
from numpy import linspace
from pandas import DataFrame
from scipy.integrate import odeint, ode
import ggplot as gg
%autosave 600
HAS_SOLVEIVP = False
try:
from scipy.integrate import solve_ivp
HAS_SOLVEIVP = True
except:
pass
if not HAS_SOLVEIVP:
try:
from scipy_ode import solve_ivp
HAS_SOLVEIVP = True
except:
pass
HAS_SOLVEIVP
HAS_ODES = False
try:
from scikits.odes.odeint import odeint as odes_odeint
from scikits.odes import ode as odes_ode
HAS_ODES = True
except:
pass
HAS_ODES
```
# Models to use in performance test
```
class egfngf_model:
def __init__(self):
self.name = 'egfngf'
self.ts = linspace(0, 120, 121, dtype=float)
self.has_userdata = True
self.has_userdata_odes = True
self.k = [
2.18503E-5,
0.0121008,
1.38209E-7,
0.00723811,
694.731,
6086070.0,
389.428,
2112.66,
1611.97,
896896.0,
32.344,
35954.3,
1509.36,
1432410.0,
0.884096,
62464.6,
185.759,
4768350.0,
125.089,
157948.0,
2.83243,
518753.0,
9.85367,
1007340.0,
8.8912,
3496490.0,
0.0213697,
763523.0,
10.6737,
184912.0,
0.0771067,
272056.0,
0.0566279,
653951.0,
15.1212,
119355.0,
146.912,
12876.2,
1.40145,
10965.6,
27.265,
295990.0,
2.20995,
1025460.0,
0.126329,
1061.71,
441.287,
1.08795E7
]
self.userdata = self.k
self.y0 = [
1000,
4560,
80000.0,
0.0,
10000.0,
0.0,
120000.0,
0.0,
120000.0,
0.0,
120000.0,
0.0,
120000.0,
0.0,
120000.0,
0.0,
600000.0,
0.0,
600000.0,
0.0,
120000.0,
0.0,
120000.0,
0.0,
120000.0,
0.0,
120000.0,
0.0,
120000.0,
120000.0,
120000.0,
120000.0
]
def f(self, t, y, k):
return [
((-1.0 * k[0] * y[0] * y[2])) + (1.0 * k[1] * y[3]),
((-1.0 * k[2] * y[1] * y[4])) + (1.0 * k[3] * y[5]),
((-1.0 * k[0] * y[0] * y[2])) + (1.0 * k[1] * y[3]),
((1.0 * k[0] * y[0] * y[2]) + (-1.0 * k[1] * y[3])),
((-1.0 * k[2] * y[1] * y[4]) + (1.0 * k[3] * y[5])),
((1.0 * k[2] * y[1] * y[4]) + (-1.0 * k[3] * y[5])),
((-1.0 * k[4] * y[3] * y[6] / (y[6] + k[5])) + (-1.0 * k[6] * y[5] * y[6] / (y[6] + k[7])) + (
1.0 * k[8] * y[9] * y[7] / (y[7] + k[9]))),
((1.0 * k[4] * y[3] * y[6] / (y[6] + k[5])) + (1.0 * k[6] * y[5] * y[6] / (y[6] + k[7])) + (
-1.0 * k[8] * y[9] * y[7] / (y[7] + k[9]))),
((-1.0 * k[26] * y[19] * y[8] / (y[8] + k[27]))),
((1.0 * k[26] * y[19] * y[8] / (y[8] + k[27]))),
((-1.0 * k[10] * y[7] * y[10] / (y[10] + k[11])) + (1.0 * k[12] * y[28] * y[11] / (y[11] + k[13]))),
((1.0 * k[10] * y[7] * y[10] / (y[10] + k[11])) + (-1.0 * k[12] * y[28] * y[11] / (y[11] + k[13]))),
((-1.0 * k[14] * y[11] * y[12] / (y[12] + k[15])) + (1.0 * k[44] * y[31] * y[13] / (y[13] + k[45])) + (
1.0 * k[34] * y[23] * y[13] / (y[13] + k[35]))),
((1.0 * k[14] * y[11] * y[12] / (y[12] + k[15])) + (-1.0 * k[44] * y[31] * y[13] / (y[13] + k[45])) + (
-1.0 * k[34] * y[23] * y[13] / (y[13] + k[35]))),
((-1.0 * k[42] * y[27] * y[14] / (y[14] + k[43])) + (1.0 * k[46] * y[31] * y[15] / (y[15] + k[47]))),
((1.0 * k[42] * y[27] * y[14] / (y[14] + k[43])) + (-1.0 * k[46] * y[31] * y[15] / (y[15] + k[47]))),
((-1.0 * k[16] * y[13] * y[16] / (y[16] + k[17])) + (-1.0 * k[18] * y[15] * y[16] / (y[16] + k[19])) + (
1.0 * k[20] * y[30] * y[17] / (y[17] + k[21]))),
((1.0 * k[16] * y[13] * y[16] / (y[16] + k[17])) + (1.0 * k[18] * y[15] * y[16] / (y[16] + k[19])) + (
-1.0 * k[20] * y[30] * y[17] / (y[17] + k[21]))),
((-1.0 * k[22] * y[17] * y[18] / (y[18] + k[23])) + (1.0 * k[24] * y[30] * y[19] / (y[19] + k[25]))),
((1.0 * k[22] * y[17] * y[18] / (y[18] + k[23])) + (-1.0 * k[24] * y[30] * y[19] / (y[19] + k[25]))),
((-1.0 * k[28] * y[3] * y[20] / (y[20] + k[29])) + (-1.0 * k[30] * y[11] * y[20] / (y[20] + k[31]))),
((1.0 * k[28] * y[3] * y[20] / (y[20] + k[29])) + (1.0 * k[30] * y[11] * y[20] / (y[20] + k[31]))),
((-1.0 * k[32] * y[21] * y[22] / (y[22] + k[33]))),
((1.0 * k[32] * y[21] * y[22] / (y[22] + k[33]))),
((-1.0 * k[36] * y[5] * y[24] / (y[24] + k[37]))),
((1.0 * k[36] * y[5] * y[24] / (y[24] + k[37]))),
((-1.0 * k[38] * y[25] * y[26] / (y[26] + k[39])) + (1.0 * k[40] * y[29] * y[27] / (y[27] + k[41]))),
((1.0 * k[38] * y[25] * y[26] / (y[26] + k[39])) + (-1.0 * k[40] * y[29] * y[27] / (y[27] + k[41]))),
0,
0,
0,
0
]
def f_odes(self, t, y, yout, k):
yout[:] = [
((-1.0 * k[0] * y[0] * y[2])) + (1.0 * k[1] * y[3]),
((-1.0 * k[2] * y[1] * y[4])) + (1.0 * k[3] * y[5]),
((-1.0 * k[0] * y[0] * y[2])) + (1.0 * k[1] * y[3]),
((1.0 * k[0] * y[0] * y[2]) + (-1.0 * k[1] * y[3])),
((-1.0 * k[2] * y[1] * y[4]) + (1.0 * k[3] * y[5])),
((1.0 * k[2] * y[1] * y[4]) + (-1.0 * k[3] * y[5])),
((-1.0 * k[4] * y[3] * y[6] / (y[6] + k[5])) + (-1.0 * k[6] * y[5] * y[6] / (y[6] + k[7])) + (
1.0 * k[8] * y[9] * y[7] / (y[7] + k[9]))),
((1.0 * k[4] * y[3] * y[6] / (y[6] + k[5])) + (1.0 * k[6] * y[5] * y[6] / (y[6] + k[7])) + (
-1.0 * k[8] * y[9] * y[7] / (y[7] + k[9]))),
((-1.0 * k[26] * y[19] * y[8] / (y[8] + k[27]))),
((1.0 * k[26] * y[19] * y[8] / (y[8] + k[27]))),
((-1.0 * k[10] * y[7] * y[10] / (y[10] + k[11])) + (1.0 * k[12] * y[28] * y[11] / (y[11] + k[13]))),
((1.0 * k[10] * y[7] * y[10] / (y[10] + k[11])) + (-1.0 * k[12] * y[28] * y[11] / (y[11] + k[13]))),
((-1.0 * k[14] * y[11] * y[12] / (y[12] + k[15])) + (1.0 * k[44] * y[31] * y[13] / (y[13] + k[45])) + (
1.0 * k[34] * y[23] * y[13] / (y[13] + k[35]))),
((1.0 * k[14] * y[11] * y[12] / (y[12] + k[15])) + (-1.0 * k[44] * y[31] * y[13] / (y[13] + k[45])) + (
-1.0 * k[34] * y[23] * y[13] / (y[13] + k[35]))),
((-1.0 * k[42] * y[27] * y[14] / (y[14] + k[43])) + (1.0 * k[46] * y[31] * y[15] / (y[15] + k[47]))),
((1.0 * k[42] * y[27] * y[14] / (y[14] + k[43])) + (-1.0 * k[46] * y[31] * y[15] / (y[15] + k[47]))),
((-1.0 * k[16] * y[13] * y[16] / (y[16] + k[17])) + (-1.0 * k[18] * y[15] * y[16] / (y[16] + k[19])) + (
1.0 * k[20] * y[30] * y[17] / (y[17] + k[21]))),
((1.0 * k[16] * y[13] * y[16] / (y[16] + k[17])) + (1.0 * k[18] * y[15] * y[16] / (y[16] + k[19])) + (
-1.0 * k[20] * y[30] * y[17] / (y[17] + k[21]))),
((-1.0 * k[22] * y[17] * y[18] / (y[18] + k[23])) + (1.0 * k[24] * y[30] * y[19] / (y[19] + k[25]))),
((1.0 * k[22] * y[17] * y[18] / (y[18] + k[23])) + (-1.0 * k[24] * y[30] * y[19] / (y[19] + k[25]))),
((-1.0 * k[28] * y[3] * y[20] / (y[20] + k[29])) + (-1.0 * k[30] * y[11] * y[20] / (y[20] + k[31]))),
((1.0 * k[28] * y[3] * y[20] / (y[20] + k[29])) + (1.0 * k[30] * y[11] * y[20] / (y[20] + k[31]))),
((-1.0 * k[32] * y[21] * y[22] / (y[22] + k[33]))),
((1.0 * k[32] * y[21] * y[22] / (y[22] + k[33]))),
((-1.0 * k[36] * y[5] * y[24] / (y[24] + k[37]))),
((1.0 * k[36] * y[5] * y[24] / (y[24] + k[37]))),
((-1.0 * k[38] * y[25] * y[26] / (y[26] + k[39])) + (1.0 * k[40] * y[29] * y[27] / (y[27] + k[41]))),
((1.0 * k[38] * y[25] * y[26] / (y[26] + k[39])) + (-1.0 * k[40] * y[29] * y[27] / (y[27] + k[41]))),
0,
0,
0,
0
]
return 0
%load_ext Cython
%%cython -I /home/benny/git/odes/scikits/odes/sundials/ -I /usr/local/lib/python3.5/dist-packages/scikits.odes-2.3.0.dev0-py3.5-linux-x86_64.egg/scikits/odes/sundials/
## update include flag -I to point to odes/sundials directory!
import numpy as np
from cpython cimport bool
cimport numpy as np
from scikits.odes.sundials.cvode cimport CV_RhsFunction
#scikits.odes allows cython functions only if derived from correct class
cdef class egfngf_cython_model(CV_RhsFunction):
cdef public ts, k, y0, userdata
cdef public object name
cdef public CV_RhsFunction f_odes
cdef public bool has_userdata, has_userdata_odes
def __cinit__(self):
self.name = 'egfngf_cython'
self.ts = np.linspace(0, 120, 121, dtype=float)
self.has_userdata = True
self.has_userdata_odes = True
self.k = np.array([
2.18503E-5,
0.0121008,
1.38209E-7,
0.00723811,
694.731,
6086070.0,
389.428,
2112.66,
1611.97,
896896.0,
32.344,
35954.3,
1509.36,
1432410.0,
0.884096,
62464.6,
185.759,
4768350.0,
125.089,
157948.0,
2.83243,
518753.0,
9.85367,
1007340.0,
8.8912,
3496490.0,
0.0213697,
763523.0,
10.6737,
184912.0,
0.0771067,
272056.0,
0.0566279,
653951.0,
15.1212,
119355.0,
146.912,
12876.2,
1.40145,
10965.6,
27.265,
295990.0,
2.20995,
1025460.0,
0.126329,
1061.71,
441.287,
1.08795E7
], float)
self.userdata = self.k
self.y0 = np.array([
1000,
4560,
80000.0,
0.0,
10000.0,
0.0,
120000.0,
0.0,
120000.0,
0.0,
120000.0,
0.0,
120000.0,
0.0,
120000.0,
0.0,
600000.0,
0.0,
600000.0,
0.0,
120000.0,
0.0,
120000.0,
0.0,
120000.0,
0.0,
120000.0,
0.0,
120000.0,
120000.0,
120000.0,
120000.0
], float)
cpdef np.ndarray[double, ndim=1] f(self, double t, np.ndarray[double, ndim=1] y,
np.ndarray[double, ndim=1] k):
return np.array([
((-1.0 * k[0] * y[0] * y[2])) + (1.0 * k[1] * y[3]),
((-1.0 * k[2] * y[1] * y[4])) + (1.0 * k[3] * y[5]),
((-1.0 * k[0] * y[0] * y[2])) + (1.0 * k[1] * y[3]),
((1.0 * k[0] * y[0] * y[2]) + (-1.0 * k[1] * y[3])),
((-1.0 * k[2] * y[1] * y[4]) + (1.0 * k[3] * y[5])),
((1.0 * k[2] * y[1] * y[4]) + (-1.0 * k[3] * y[5])),
((-1.0 * k[4] * y[3] * y[6] / (y[6] + k[5])) + (-1.0 * k[6] * y[5] * y[6] / (y[6] + k[7])) + (
1.0 * k[8] * y[9] * y[7] / (y[7] + k[9]))),
((1.0 * k[4] * y[3] * y[6] / (y[6] + k[5])) + (1.0 * k[6] * y[5] * y[6] / (y[6] + k[7])) + (
-1.0 * k[8] * y[9] * y[7] / (y[7] + k[9]))),
((-1.0 * k[26] * y[19] * y[8] / (y[8] + k[27]))),
((1.0 * k[26] * y[19] * y[8] / (y[8] + k[27]))),
((-1.0 * k[10] * y[7] * y[10] / (y[10] + k[11])) + (1.0 * k[12] * y[28] * y[11] / (y[11] + k[13]))),
((1.0 * k[10] * y[7] * y[10] / (y[10] + k[11])) + (-1.0 * k[12] * y[28] * y[11] / (y[11] + k[13]))),
((-1.0 * k[14] * y[11] * y[12] / (y[12] + k[15])) + (1.0 * k[44] * y[31] * y[13] / (y[13] + k[45])) + (
1.0 * k[34] * y[23] * y[13] / (y[13] + k[35]))),
((1.0 * k[14] * y[11] * y[12] / (y[12] + k[15])) + (-1.0 * k[44] * y[31] * y[13] / (y[13] + k[45])) + (
-1.0 * k[34] * y[23] * y[13] / (y[13] + k[35]))),
((-1.0 * k[42] * y[27] * y[14] / (y[14] + k[43])) + (1.0 * k[46] * y[31] * y[15] / (y[15] + k[47]))),
((1.0 * k[42] * y[27] * y[14] / (y[14] + k[43])) + (-1.0 * k[46] * y[31] * y[15] / (y[15] + k[47]))),
((-1.0 * k[16] * y[13] * y[16] / (y[16] + k[17])) + (-1.0 * k[18] * y[15] * y[16] / (y[16] + k[19])) + (
1.0 * k[20] * y[30] * y[17] / (y[17] + k[21]))),
((1.0 * k[16] * y[13] * y[16] / (y[16] + k[17])) + (1.0 * k[18] * y[15] * y[16] / (y[16] + k[19])) + (
-1.0 * k[20] * y[30] * y[17] / (y[17] + k[21]))),
((-1.0 * k[22] * y[17] * y[18] / (y[18] + k[23])) + (1.0 * k[24] * y[30] * y[19] / (y[19] + k[25]))),
((1.0 * k[22] * y[17] * y[18] / (y[18] + k[23])) + (-1.0 * k[24] * y[30] * y[19] / (y[19] + k[25]))),
((-1.0 * k[28] * y[3] * y[20] / (y[20] + k[29])) + (-1.0 * k[30] * y[11] * y[20] / (y[20] + k[31]))),
((1.0 * k[28] * y[3] * y[20] / (y[20] + k[29])) + (1.0 * k[30] * y[11] * y[20] / (y[20] + k[31]))),
((-1.0 * k[32] * y[21] * y[22] / (y[22] + k[33]))),
((1.0 * k[32] * y[21] * y[22] / (y[22] + k[33]))),
((-1.0 * k[36] * y[5] * y[24] / (y[24] + k[37]))),
((1.0 * k[36] * y[5] * y[24] / (y[24] + k[37]))),
((-1.0 * k[38] * y[25] * y[26] / (y[26] + k[39])) + (1.0 * k[40] * y[29] * y[27] / (y[27] + k[41]))),
((1.0 * k[38] * y[25] * y[26] / (y[26] + k[39])) + (-1.0 * k[40] * y[29] * y[27] / (y[27] + k[41]))),
0,
0,
0,
0], float)
cpdef int evaluate(self, double t,
np.ndarray[double, ndim=1] y,
np.ndarray[double, ndim=1] yout,
object userdata = None) except? -1:
#cdef np.ndarray[double, ndim=1] k = self.k # avoid self.k gives quite some speedup!
cdef np.ndarray[double, ndim=1] k = userdata
# avoiding creation of temporary arrays gives quite some speedup!
yout[0] = ((-1.0 * k[0] * y[0] * y[2])) + (1.0 * k[1] * y[3])
yout[1] = ((-1.0 * k[2] * y[1] * y[4])) + (1.0 * k[3] * y[5])
yout[2] = ((-1.0 * k[0] * y[0] * y[2])) + (1.0 * k[1] * y[3])
yout[3] = ((1.0 * k[0] * y[0] * y[2]) + (-1.0 * k[1] * y[3]))
yout[4] = ((-1.0 * k[2] * y[1] * y[4]) + (1.0 * k[3] * y[5]))
yout[5] = ((1.0 * k[2] * y[1] * y[4]) + (-1.0 * k[3] * y[5]))
yout[6] = ((-1.0 * k[4] * y[3] * y[6] / (y[6] + k[5])) + (-1.0 * k[6] * y[5] * y[6] / (y[6] + k[7])) + (
1.0 * k[8] * y[9] * y[7] / (y[7] + k[9])))
yout[7] = ((1.0 * k[4] * y[3] * y[6] / (y[6] + k[5])) + (1.0 * k[6] * y[5] * y[6] / (y[6] + k[7])) + (
-1.0 * k[8] * y[9] * y[7] / (y[7] + k[9])))
yout[8] = ((-1.0 * k[26] * y[19] * y[8] / (y[8] + k[27])))
yout[9] = ((1.0 * k[26] * y[19] * y[8] / (y[8] + k[27])))
yout[10] = ((-1.0 * k[10] * y[7] * y[10] / (y[10] + k[11])) + (1.0 * k[12] * y[28] * y[11] / (y[11] + k[13])))
yout[11] = ((1.0 * k[10] * y[7] * y[10] / (y[10] + k[11])) + (-1.0 * k[12] * y[28] * y[11] / (y[11] + k[13])))
yout[12] = ((-1.0 * k[14] * y[11] * y[12] / (y[12] + k[15])) + (1.0 * k[44] * y[31] * y[13] / (y[13] + k[45])) + (
1.0 * k[34] * y[23] * y[13] / (y[13] + k[35])))
yout[13] = ((1.0 * k[14] * y[11] * y[12] / (y[12] + k[15])) + (-1.0 * k[44] * y[31] * y[13] / (y[13] + k[45])) + (
-1.0 * k[34] * y[23] * y[13] / (y[13] + k[35])))
yout[14] = ((-1.0 * k[42] * y[27] * y[14] / (y[14] + k[43])) + (1.0 * k[46] * y[31] * y[15] / (y[15] + k[47])))
yout[15] = ((1.0 * k[42] * y[27] * y[14] / (y[14] + k[43])) + (-1.0 * k[46] * y[31] * y[15] / (y[15] + k[47])))
yout[16] = ((-1.0 * k[16] * y[13] * y[16] / (y[16] + k[17])) + (-1.0 * k[18] * y[15] * y[16] / (y[16] + k[19])) + (
1.0 * k[20] * y[30] * y[17] / (y[17] + k[21])))
yout[17] = ((1.0 * k[16] * y[13] * y[16] / (y[16] + k[17])) + (1.0 * k[18] * y[15] * y[16] / (y[16] + k[19])) + (
-1.0 * k[20] * y[30] * y[17] / (y[17] + k[21])))
yout[18] = ((-1.0 * k[22] * y[17] * y[18] / (y[18] + k[23])) + (1.0 * k[24] * y[30] * y[19] / (y[19] + k[25])))
yout[19] = ((1.0 * k[22] * y[17] * y[18] / (y[18] + k[23])) + (-1.0 * k[24] * y[30] * y[19] / (y[19] + k[25])))
yout[20] = ((-1.0 * k[28] * y[3] * y[20] / (y[20] + k[29])) + (-1.0 * k[30] * y[11] * y[20] / (y[20] + k[31])))
yout[21] = ((1.0 * k[28] * y[3] * y[20] / (y[20] + k[29])) + (1.0 * k[30] * y[11] * y[20] / (y[20] + k[31])))
yout[22] = ((-1.0 * k[32] * y[21] * y[22] / (y[22] + k[33])))
yout[23] = ((1.0 * k[32] * y[21] * y[22] / (y[22] + k[33])))
yout[24] = ((-1.0 * k[36] * y[5] * y[24] / (y[24] + k[37])))
yout[25] = ((1.0 * k[36] * y[5] * y[24] / (y[24] + k[37])))
yout[26] = ((-1.0 * k[38] * y[25] * y[26] / (y[26] + k[39])) + (1.0 * k[40] * y[29] * y[27] / (y[27] + k[41])))
yout[27] = ((1.0 * k[38] * y[25] * y[26] / (y[26] + k[39])) + (-1.0 * k[40] * y[29] * y[27] / (y[27] + k[41])))
yout[28] = 0
yout[29] = 0
yout[30] = 0
yout[31] = 0
return 0
model2 = egfngf_cython_model()
# for the performance comparator, f_odes is the right hand side.
# For cython odes, it must be CV_RhsFunction, so we make a circular link:
model2.f_odes = model2
models = [egfngf_model(), model2]
```
# Methods to use to solve the models
```
class scipy_ode_int:
name = 'odeint'
def __call__(self, model, rtol):
def reordered_ode_userdata(t, y):
return model.f(y, t, model.userdata)
def reordered_ode(t, y):
return model.f(y, t)
if model.has_userdata:
result = odeint(reordered_ode_userdata, model.y0, model.ts, rtol=rtol)
else:
result = odeint(reordered_ode, model.y0, model.ts, rtol=rtol)
return result
class scipy_ode_class:
def __init__(self, name):
self.name = name
space_pos = name.find(" ")
if space_pos > -1:
self.solver = name[0:space_pos]
self.method = name[space_pos+1:]
else:
self.solver = name
self.method = None
def __call__(self, model, rtol):
solver = ode(model.f)
solver.set_integrator(self.solver, method=self.method, rtol=rtol,
nsteps=10000)
solver.set_initial_value(model.y0, 0.0)
if model.has_userdata:
solver.set_f_params(model.userdata)
result = np.empty((len(model.ts), len(model.y0)))
for i, t in enumerate(model.ts): # Drop t=0.0
if t == 0:
result[i, :] = model.y0
continue
result[i, :] = solver.integrate(t)
return result
class scipy_odes_class(scipy_ode_class):
def __call__(self, model, rtol):
userdata = None
if model.has_userdata_odes:
userdata = model.userdata
solver = odes_ode(self.solver, model.f_odes, old_api=False,
lmm_type=self.method, rtol=rtol,
user_data = userdata)
solution = solver.solve(model.ts, model.y0)
for i, t in enumerate(model.ts):
try:
result[i, :] = solution.values.y[i]
except:
# no valid solution anymore
result[i, :] = 0
return result
class scipy_solver_class:
def __init__(self, name):
self.name = name
def __call__(self, model, rtol):
def collected_ode_userdata(t, y):
return model.f(t, y, model.userdata)
def collected_ode(t, y):
return model.f(t, y)
if model.has_userdata:
sol = solve_ivp(collected_ode_userdata, [0.0, np.max(model.ts)], model.y0, method=self.name, rtol=rtol, t_eval=model.ts)
else:
sol = solve_ivp(collected_ode, [0.0, np.max(model.ts)], model.y0, method=self.name, rtol=rtol, t_eval=model.ts)
return sol.y.transpose()
methods = [
scipy_ode_int(),
scipy_ode_class("vode bdf"),
scipy_ode_class("vode adams"),
scipy_ode_class("lsoda"),
scipy_ode_class("dopri5"),
scipy_ode_class("dop853"),
]
if HAS_SOLVEIVP:
methods += [scipy_solver_class("RK45"),
scipy_solver_class("RK23"),
scipy_solver_class("Radau"),
scipy_solver_class("BDF"),
]
if HAS_ODES:
methods += [scipy_odes_class("cvode BDF"),
scipy_odes_class("cvode ADAMS"),
]
```
# Compare the methods with the gold standard
```
rtols = 10 ** np.arange(-9.0, 0.0)
GoldStandard = namedtuple('GoldStandard', ['name', 'values', 'max'])
gold_standards = []
for model in models:
print('Gold standard for {}'.format(model.name))
result = methods[0](model, 1e-12)
gold_standards.append((model.name, GoldStandard(model.name, result, np.max(result))))
gold_standards = OrderedDict(gold_standards)
data = []
for method in methods:
for model in models:
for rtol in rtols:
print('method: {} model: {} rtol: {}'.format(method.name, model.name, rtol), end='')
# Run
tic = time.time()
result = method(model, rtol)
toc = time.time() - tic
# Compare to gold standard
standard = gold_standards[model.name]
diff = result - standard.values
max_rel_diff = np.max(diff/standard.max)
# Append to table
record = (method.name, model.name, rtol, max_rel_diff, toc)
print(' err: {} toc: {}'.format(max_rel_diff, toc))
data.append(record)
data = DataFrame(data, columns=['method', 'model', 'rtol', 'err', 'time'])
```
# Plot the performance
```
for model in models:
print(gg.ggplot(data[data.model == model.name], gg.aes(x='err', y='time', color='method'))
+ gg.geom_point(size=60.0)
+ gg.geom_line()
+ gg.scale_x_log()
+ gg.scale_y_log()
+ gg.xlim(1e-10, 1e-2)
+ gg.ggtitle('Model ' + model.name)
)
```
In above plot, cvode (ADAMS or BDF) is the scikits.odes method. For the pure python case and the cython case, cvode BDF is the best performing integrator.
Note that lsoda and odeint don't show the smooth rate as cvode does, with timings sometimes much higher for unknown reasons.
| github_jupyter |
```
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import random
import os
import copy
import json
import scipy
# Detectron colors
_COLORS = np.array([
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188
]).astype(np.float32).reshape((-1, 3))
# Random number generator seed
_RNG_SEED = 1
# Fix RNG seeds
random.seed(_RNG_SEED)
np.random.seed(_RNG_SEED)
# Directory where sweep summaries are stored
_DATA_DIR = '../data'
# Max flops constraint
_MAX_F = 0.600
# Max params constraint
_MAX_P = 6.000
def load_sweep(sweep_name):
"""Loads a sweep summary."""
summary_path = os.path.join(_DATA_DIR, '{}.json'.format(sweep_name))
with open(summary_path, 'r') as f:
sweep_summary = json.load(f)
return sweep_summary
def compute_norm_ws(cs, num_bins, c_range):
"""Computes normalized EDF weights."""
hist, edges = np.histogram(cs, bins=num_bins, range=c_range)
inds = np.digitize(cs, bins=edges) - 1
assert np.count_nonzero(hist) == num_bins
return 1 / hist[inds] / num_bins
def compute_c_range_mins(sweeps, dss):
"""Computes the complexity range mins."""
max_min_ps = 0.0
max_min_fs = 0.0
for cm in ['params', 'flops']:
for ds in dss:
if cm == 'params':
ps = np.array([job['params'] * 1e-6 for job in sweeps[ds]])
max_min_ps = max(min(ps), max_min_ps)
if cm == 'flops':
fs = np.array([job['flops'] * 1e-9 for job in sweeps[ds]])
max_min_fs = max(min(fs), max_min_fs)
return max_min_ps, max_min_fs
def is_valid_p(job, min_p, max_p):
return min_p < job['params'] * 1e-6 and job['params'] * 1e-6 < max_p
def is_valid_f(job, min_f, max_f):
return min_f < job['flops'] * 1e-9 and job['flops'] * 1e-9 < max_f
# NAS sweeps
sweeps_nas = {
'NASNet': load_sweep('NASNet_in'),
'Amoeba': load_sweep('Amoeba_in'),
'PNAS': load_sweep('PNAS_in'),
'ENAS': load_sweep('ENAS_in'),
'DARTS': load_sweep('DARTS_in')
}
# Standard DS sweeps
sweeps_std = {
'NASNet': load_sweep('NASNet_in'),
'DARTS': load_sweep('DARTS_in'),
'ResNeXt-A': load_sweep('ResNeXt-A_in'),
'ResNeXt-B': load_sweep('ResNeXt-B_in')
}
# lr wd sweeps
sweeps_lr_wd = {
'Vanilla': load_sweep('Vanilla_lr-wd_in'),
'ResNet': load_sweep('ResNet_lr-wd_in'),
'DARTS': load_sweep('DARTS_lr-wd_in')
}
print('Figure 16a\n')
num_bins = 8
dss = ['NASNet', 'Amoeba', 'PNAS', 'ENAS', 'DARTS']
cols = [0, 1, 4, 2, 3]
cms = ['params', 'flops']
r, c = 1, 2
w, h = 4, 3
fig, axes = plt.subplots(nrows=r, ncols=c, figsize=(c * w, r * h))
min_p, min_f = compute_c_range_mins(sweeps_nas, dss)
max_p, max_f = _MAX_P, _MAX_F
for i, cm in enumerate(cms):
ax = axes[i]
for j, ds in enumerate(dss):
if cm == 'params':
jobs = [job for job in sweeps_nas[ds] if is_valid_p(job, min_p, max_p)]
errs = np.array([job['min_test_top1'] for job in jobs])
ps = np.array([job['params'] * 1e-6 for job in jobs])
inds = np.argsort(errs)
errs, ps = errs[inds], ps[inds]
ws = compute_norm_ws(ps, num_bins, c_range=(min_p, max_p))
if cm == 'flops':
jobs = [job for job in sweeps_nas[ds] if is_valid_f(job, min_f, max_f)]
errs = np.array([job['min_test_top1'] for job in jobs])
fs = np.array([job['flops'] * 1e-9 for job in jobs])
inds = np.argsort(errs)
errs, fs = errs[inds], fs[inds]
ws = compute_norm_ws(fs, num_bins, c_range=(min_f, max_f))
assert np.isclose(np.sum(ws), 1.0)
ax.plot(
errs, np.cumsum(ws),
color=_COLORS[cols[j]], linewidth=2, alpha=0.8, label=ds
)
ax.set_xlabel('error | {}'.format(cm), fontsize=16)
ax.grid(alpha=0.4)
ax.set_ylabel('cumulative prob.', fontsize=16)
ax.set_xlim([27, 50])
ax.legend(loc='lower right', prop={'size': 13})
plt.tight_layout();
print('Figure 16b\n')
num_bins = 8
dss = ['NASNet', 'Amoeba', 'PNAS', 'ENAS', 'DARTS']
cols = [0, 1, 4, 2, 3]
cms = ['params', 'flops']
r, c = 1, 2
w, h = 4, 3
fig, axes = plt.subplots(nrows=r, ncols=c, figsize=(c * w, r * h))
random.seed(_RNG_SEED)
ks = [2 ** p for p in range(6)]
min_p, min_f = compute_c_range_mins(sweeps_nas, dss)
max_p, max_f = _MAX_P, _MAX_F
for i, cm in enumerate(cms):
ax = axes[i]
for j, ds in enumerate(dss):
if cm == 'params':
jobs = [job for job in sweeps_nas[ds] if is_valid_p(job, min_p, max_p)]
errs = np.array([job['min_test_top1'] for job in jobs])
ps = np.array([job['params'] * 1e-6 for job in jobs])
inds = np.argsort(errs)
errs, ps = errs[inds], ps[inds]
ws = compute_norm_ws(ps, num_bins, c_range=(min_p, max_p))
if cm == 'flops':
jobs = [job for job in sweeps_nas[ds] if is_valid_f(job, min_f, max_f)]
errs = np.array([job['min_test_top1'] for job in jobs])
fs = np.array([job['flops'] * 1e-9 for job in jobs])
inds = np.argsort(errs)
errs, fs = errs[inds], fs[inds]
ws = compute_norm_ws(fs, num_bins, c_range=(min_f, max_f))
assert np.isclose(np.sum(ws), 1.0)
cum_ws = np.cumsum(ws)
# Compute min errs for each k
k_errs = {}
for k in ks:
k_errs[k] = []
n = len(errs) // k
for s in range(n):
s_errs = random.choices(population=errs, cum_weights=cum_ws, k=k)
k_errs[k].append(np.min(s_errs))
# Plot means and stds
ax.scatter(
np.log2(ks), [np.mean(k_errs[k]) for k in ks],
color=_COLORS[cols[j]], alpha=0.8, label=ds
)
mus = np.array([np.mean(k_errs[k]) for k in ks])
stds = np.array([np.std(k_errs[k]) for k in ks])
ax.fill_between(
np.log2(ks), mus - 2 * stds, mus + 2 * stds,
color=_COLORS[cols[j]], alpha=0.05
)
ax.set_ylabel('error | {}'.format(cm), fontsize=16)
ax.grid(alpha=0.4)
ax.set_xlabel('experiment size (log2)', fontsize=16)
ax.set_ylim([27, 50])
ax.legend(loc='upper right', prop={'size': 13})
plt.tight_layout();
print('Figure 16c\n')
num_bins = 5
dss = ['NASNet', 'DARTS', 'ResNeXt-A', 'ResNeXt-B']
cols = [0, 3, 1, 2]
cms = ['params', 'flops']
r, c = 1, 2
w, h = 4, 3
fig, axes = plt.subplots(nrows=r, ncols=c, figsize=(c * w, r * h))
min_p, min_f = compute_c_range_mins(sweeps_std, dss)
max_p, max_f = _MAX_P, _MAX_F
for i, cm in enumerate(cms):
ax = axes[i]
for j, ds in enumerate(dss):
if cm == 'params':
jobs = [job for job in sweeps_std[ds] if is_valid_p(job, min_p, max_p)]
errs = np.array([job['min_test_top1'] for job in jobs])
ps = np.array([job['params'] * 1e-6 for job in jobs])
inds = np.argsort(errs)
errs, ps = errs[inds], ps[inds]
ws = compute_norm_ws(ps, num_bins, c_range=(min_p, max_p))
if cm == 'flops':
jobs = [job for job in sweeps_std[ds] if is_valid_f(job, min_f, max_f)]
errs = np.array([job['min_test_top1'] for job in jobs])
fs = np.array([job['flops'] * 1e-9 for job in jobs])
inds = np.argsort(errs)
errs, fs = errs[inds], fs[inds]
ws = compute_norm_ws(fs, num_bins, c_range=(min_f, max_f))
assert np.isclose(np.sum(ws), 1.0)
ax.plot(
errs, np.cumsum(ws),
color=_COLORS[cols[j]], linewidth=2, alpha=0.8, label=ds
)
ax.set_xlabel('error | {}'.format(cm), fontsize=16)
ax.grid(alpha=0.4)
ax.set_ylabel('cumulative prob.', fontsize=16)
ax.set_xlim([27, 50])
ax.legend(loc='lower right', prop={'size': 13})
plt.tight_layout();
print('Figure 16d\n')
r, c = 1, 3
w, h = 4, 3
fig, axes = plt.subplots(nrows=r, ncols=c, figsize=(c * w, r * h))
hps = ['base_lr', 'wd']
lbs = ['learning rate (log10)', 'weight decay (log10)']
dss = ['Vanilla', 'ResNet', 'DARTS']
def_pt = [5 * 1e-2, 5 * 1e-5]
def_pt_log = np.log10(def_pt)
for j, ds in enumerate(dss):
ax = axes[j] if r == 1 else axes[i, j]
sweep = sweeps_lr_wd[ds]
xs = [job['optim'][hps[0]] for job in sweep]
ys = [job['optim'][hps[1]] for job in sweep]
# Use log10 scale
xs_log = np.log10(xs)
ys_log = np.log10(ys)
# Compute relative ranks
errs = [job['min_test_top1'] for job in sweep]
ranks = np.argsort(np.argsort(errs))
ranks += 1
ranks_rel = ranks / (len(ranks))
# Plot relative ranks
s = ax.scatter(xs_log, ys_log, c=ranks_rel, alpha=0.4, cmap='viridis', rasterized=True)
ax.set_xlabel(lbs[0], fontsize=16)
if j == 0:
ax.set_ylabel('{}'.format(lbs[1]), fontsize=16)
xlim_log = np.log10([0.001, 1.0])
ylim_log = np.log10([0.00001, 0.01])
ax.set_xlim(xlim_log)
ax.set_ylim(ylim_log)
ax.grid(alpha=0.4)
# Show default setting
def_pt_alpha = 0.8
pr_col = _COLORS[1]
ax.scatter(def_pt_log[0], def_pt_log[1], color=pr_col, alpha=def_pt_alpha)
ax.plot(
np.linspace(xlim_log[0], def_pt_log[0], 10), [def_pt_log[1] for _ in range(10)],
color=pr_col, alpha=def_pt_alpha, linestyle='--', linewidth=2.5
)
ax.plot(
[def_pt_log[0] for _ in range(10)], np.linspace(ylim_log[0], def_pt_log[1], 10),
color=pr_col, alpha=def_pt_alpha, linestyle='--', linewidth=2.5
)
ax.set_title(ds, fontsize=16)
fig.colorbar(s, ax=axes.ravel().tolist());
```
| github_jupyter |
# "Statistical Thinking in Python (Part 1)"
> "Building the foundation you need to think statistically, speak the language of your data, and understand what your data is telling you."
- toc: true
- comments: true
- author: Victor Omondi
- categories: [statistical-thinking, eda, data-science]
- image: images/statistical-thinking-1.png
# Graphical exploratory data analysis
Before diving into sophisticated statistical inference techniques, we should first explore our data by plotting them and computing simple summary statistics. This process, called **exploratory data analysis**, is a crucial first step in statistical analysis of data.
## Introduction to Exploratory Data Analysis
Exploratory Data Analysis is the process of organizing, plo!ing, and summarizing a data set
>“Exploratory data analysis can never be the
whole story, but nothing else can serve as the
foundation stone. ” > ~ John Tukey
### Tukey's comments on EDA
* Exploratory data analysis is detective work.
* There is no excuse for failing to plot and look.
* The greatest value of a picture is that it forces us to notice what we never expected to see.
* It is important to understand what you can do before you learn how to measure how well you seem to have done it.
> If you don't have time to do EDA, you really don't have time to do hypothesis tests. And you should always do EDA first.
### Advantages of graphical EDA
* It often involves converting tabular data into graphical form.
* If done well, graphical representations can allow for more rapid interpretation of data.
* There is no excuse for neglecting to do graphical EDA.
> While a good, informative plot can sometimes be the end point of an analysis, it is more like a beginning: it helps guide you in the quantitative statistical analyses that come next.
## Plotting a histogram
### Plotting a histogram of iris data
We will use a classic data set collected by botanist Edward Anderson and made famous by Ronald Fisher, one of the most prolific statisticians in history. Anderson carefully measured the anatomical properties of samples of three different species of iris, Iris setosa, Iris versicolor, and Iris virginica. The full data set is [available as part of scikit-learn](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_iris.html). Here, you will work with his measurements of petal length.
We will plot a histogram of the petal lengths of his 50 samples of Iris versicolor using matplotlib/seaborn's default settings.
The subset of the data set containing the Iris versicolor petal lengths in units of centimeters (cm) is stored in the NumPy array `versicolor_petal_length`.
# Libraries
```
# Import plotting modules
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
# Set default Seaborn style
sns.set()
%matplotlib inline
versicolor_petal_length = np.array([4.7, 4.5, 4.9, 4. , 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4. ,
4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4. , 4.9, 4.7, 4.3, 4.4,
4.8, 5. , 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1,
4. , 4.4, 4.6, 4. , 3.3, 4.2, 4.2, 4.2, 4.3, 3. , 4.1])
# Plot histogram of versicolor petal lengths
plt.hist(versicolor_petal_length)
plt.ylabel("count")
plt.xlabel("petal length (cm)")
plt.show()
```
### Adjusting the number of bins in a histogram
The histogram we just made had ten bins. This is the default of matplotlib.
>Tip: The "square root rule" is a commonly-used rule of thumb for choosing number of bins: choose the number of bins to be the square root of the number of samples.
We will plot the histogram of _Iris versicolor petal lengths_ again, this time using the square root rule for the number of bins. You specify the number of bins using the `bins` keyword argument of `plt.hist()`.
```
# Compute number of data points: n_data
n_data = len(versicolor_petal_length)
# Number of bins is the square root of number of data points: n_bins
n_bins = np.sqrt(n_data)
# Convert number of bins to integer: n_bins
n_bins = int(n_bins)
# Plot the histogram
_ = plt.hist(versicolor_petal_length, bins=n_bins)
# Label axes
_ = plt.xlabel('petal length (cm)')
_ = plt.ylabel('count')
# Show histogram
plt.show()
```
## Plot all data: Bee swarm plots
### Bee swarm plot
We will make a bee swarm plot of the iris petal lengths. The x-axis will contain each of the three species, and the y-axis the petal lengths.
```
iris_petal_lengths = pd.read_csv("../datasets/iris_petal_lengths.csv")
iris_petal_lengths.head()
iris_petal_lengths.shape
iris_petal_lengths.tail()
# Create bee swarm plot with Seaborn's default settings
_ = sns.swarmplot(data=iris_petal_lengths, x="species", y="petal length (cm)")
# Label the axes
_ = plt.xlabel("species")
_ = plt.ylabel("petal length (cm)")
# Show the plot
plt.show()
```
### Interpreting a bee swarm plot
* _I. virginica_ petals tend to be the longest, and _I. setosa_ petals tend to be the shortest of the three species.
> Note: Notice that we said **"tend to be."** Some individual _I. virginica_ flowers may be shorter than individual _I. versicolor_ flowers. It is also possible that an individual _I. setosa_ flower may have longer petals than in individual _I. versicolor_ flower, though this is highly unlikely, and was not observed by Anderson.
## Plot all data: ECDFs
> Note: Empirical cumulative distribution function (ECDF)
### Computing the ECDF
We will write a function that takes as input a 1D array of data and then returns the `x` and `y` values of the ECDF.
> Important: ECDFs are among the most important plots in statistical analysis.
```
def ecdf(data):
"""Compute ECDF for a one-dimensional array of measurements."""
# Number of data points: n
n = len(data)
# x-data for the ECDF: x
x = np.sort(data)
# y-data for the ECDF: y
y = np.arange(1, n+1) / n
return x, y
```
### Plotting the ECDF
We will now use `ecdf()` function to compute the ECDF for the petal lengths of Anderson's _Iris versicolor_ flowers. We will then plot the ECDF.
> Warning: `ecdf()` function returns two arrays so we will need to unpack them. An example of such unpacking is `x, y = foo(data)`, for some function `foo()`.
```
# Compute ECDF for versicolor data: x_vers, y_vers
x_vers, y_vers = ecdf(versicolor_petal_length)
# Generate plot
_ = plt.plot(x_vers, y_vers, marker=".", linestyle="none")
# Label the axes
_ = plt.xlabel("versicolor petal length, (cm)")
_ = plt.ylabel("ECDF")
# Display the plot
plt.show()
```
### Comparison of ECDFs
ECDFs also allow us to compare two or more distributions ***(though plots get cluttered if you have too many)***. Here, we will plot ECDFs for the petal lengths of all three iris species.
> Important: we already wrote a function to generate ECDFs so we can put it to good use!
```
setosa_petal_length = iris_petal_lengths["petal length (cm)"][iris_petal_lengths.species == "setosa"]
versicolor_petal_length = iris_petal_lengths["petal length (cm)"][iris_petal_lengths.species == "versicolor"]
virginica_petal_length = iris_petal_lengths["petal length (cm)"][iris_petal_lengths.species == "virginica"]
setosa_petal_length.head()
# Compute ECDFs
x_set, y_set = ecdf(setosa_petal_length)
x_vers, y_vers = ecdf(versicolor_petal_length)
x_virg, y_virg = ecdf(virginica_petal_length)
# Plot all ECDFs on the same plot
_ = plt.plot(x_set, y_set, marker=".", linestyle="none")
_ = plt.plot(x_vers, y_vers, marker=".", linestyle="none")
_ = plt.plot(x_virg, y_virg, marker=".", linestyle="none")
# Annotate the plot
plt.legend(('setosa', 'versicolor', 'virginica'), loc='lower right')
_ = plt.xlabel('petal length (cm)')
_ = plt.ylabel('ECDF')
# Display the plot
plt.show()
```
> Note: The ECDFs expose clear differences among the species. Setosa is much shorter, also with less absolute variability in petal length than versicolor and virginica.
## Onward toward the whole story!
> Important: “Exploratory data analysis can never be the
whole story, but nothing else can serve as the
foundation stone.”
—John Tukey
# Quantitative exploratory data analysis
We will compute useful summary statistics, which serve to concisely describe salient features of a dataset with a few numbers.
## Introduction to summary statistics: The sample mean and median
$$
mean = \bar{x} = \frac{1}{n} \sum_{i=1}^{n} x_i
$$
> ### Outliers
● Data points whose value is far greater or less than
most of the rest of the data
> ### The median
● The middle value of a data set
> Note: An outlier can significantly affect the value of the mean, but not the median
### Computing means
The mean of all measurements gives an indication of the typical magnitude of a measurement. It is computed using `np.mean()`.
```
# Compute the mean: mean_length_vers
mean_length_vers = np.mean(versicolor_petal_length)
# Print the result with some nice formatting
print('I. versicolor:', mean_length_vers, 'cm')
```
## Percentiles, outliers, and box plots
### Computing percentiles
We will compute the percentiles of petal length of _Iris versicolor_.
```
# Specify array of percentiles: percentiles
percentiles = np.array([2.5, 25, 50, 75, 97.5])
# Compute percentiles: ptiles_vers
ptiles_vers = np.percentile(versicolor_petal_length, percentiles)
# Print the result
ptiles_vers
```
### Comparing percentiles to ECDF
To see how the percentiles relate to the ECDF, we will plot the percentiles of _Iris versicolor_ petal lengths on the ECDF plot.
```
# Plot the ECDF
_ = plt.plot(x_vers, y_vers, '.')
_ = plt.xlabel('petal length (cm)')
_ = plt.ylabel('ECDF')
# Overlay percentiles as red diamonds.
_ = plt.plot(ptiles_vers, percentiles/100, marker='D', color='red',
linestyle="none")
# Show the plot
plt.show()
```
### Box-and-whisker plot
> Warning: Making a box plot for the petal lengths is unnecessary because the iris data set is not too large and the bee swarm plot works fine.
We will Make a box plot of the iris petal lengths.
```
# Create box plot with Seaborn's default settings
_ = sns.boxplot(data=iris_petal_lengths, x="species", y="petal length (cm)")
# Label the axes
_ = plt.xlabel("species")
_ = plt.ylabel("petal length (cm)")
# Show the plot
plt.show()
```
## Variance and standard deviation
> ### Variance
● The mean squared distance of the data from their
mean
> Tip: Variance; nformally, a measure of the spread of data
> $$
variance = \frac{1}{n} \sum_{i=1}^{n} (x_i - \bar{x})^2
$$
> ### standard Deviation
$$
std = \sqrt {\frac{1}{n} \sum_{i=1}^{n} (x_i - \bar{x})^2}
$$
### Computing the variance
we will explicitly compute the variance of the petal length of _Iris veriscolor_, we will then use `np.var()` to compute it.
```
# Array of differences to mean: differences
differences = versicolor_petal_length-np.mean(versicolor_petal_length)
# Square the differences: diff_sq
diff_sq = differences**2
# Compute the mean square difference: variance_explicit
variance_explicit = np.mean(diff_sq)
# Compute the variance using NumPy: variance_np
variance_np = np.var(versicolor_petal_length)
# Print the results
print(variance_explicit, variance_np)
```
### The standard deviation and the variance
the standard deviation is the square root of the variance.
```
# Compute the variance: variance
variance = np.var(versicolor_petal_length)
# Print the square root of the variance
print(np.sqrt(variance))
# Print the standard deviation
print(np.std(versicolor_petal_length))
```
## Covariance and the Pearson correlation coefficient
> ### Covariance
● A measure of how two quantities vary together
> $$
covariance = \frac{1}{n} \sum_{i=1}^{n} (x_i\ \bar{x})\ (y_i \ - \bar{y})
$$
> ### Pearson correlation coefficient
> $$
\rho = Pearson\ correlation = \frac{covariance}{(std\ of\ x)\ (std\ of\ y)} = \frac{variability\ due\ to\ codependence}{independent variability}
$$
### Scatter plots
When we made bee swarm plots, box plots, and ECDF plots in previous exercises, we compared the petal lengths of different species of _iris_. But what if we want to compare two properties of a single species? This is exactly what we will do, we will make a **scatter plot** of the petal length and width measurements of Anderson's _Iris versicolor_ flowers.
> Important: If the flower scales (that is, it preserves its proportion as it grows), we would expect the length and width to be correlated.
```
versicolor_petal_width = np.array([1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1. , 1.3, 1.4, 1. , 1.5, 1. ,
1.4, 1.3, 1.4, 1.5, 1. , 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4,
1.4, 1.7, 1.5, 1. , 1.1, 1. , 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3,
1.3, 1.2, 1.4, 1.2, 1. , 1.3, 1.2, 1.3, 1.3, 1.1, 1.3])
# Make a scatter plot
_ = plt.plot(versicolor_petal_length, versicolor_petal_width, marker=".", linestyle="none")
# Label the axes
_ = plt.xlabel("petal length, (cm)")
_ = plt.ylabel("petal length, (cm)")
# Show the result
plt.show()
```
> Tip: we see some correlation. Longer petals also tend to be wider.
### Computing the covariance
The covariance may be computed using the Numpy function `np.cov()`. For example, we have two sets of data $x$ and $y$, `np.cov(x, y)` returns a 2D array where entries `[0,1`] and `[1,0]` are the covariances. Entry `[0,0]` is the variance of the data in `x`, and entry `[1,1]` is the variance of the data in `y`. This 2D output array is called the **covariance matrix**, since it organizes the self- and covariance.
```
# Compute the covariance matrix: covariance_matrix
covariance_matrix = np.cov(versicolor_petal_length, versicolor_petal_width)
# Print covariance matrix
print(covariance_matrix)
# Extract covariance of length and width of petals: petal_cov
petal_cov = covariance_matrix[0,1]
# Print the length/width covariance
print(petal_cov)
```
### Computing the Pearson correlation coefficient
the Pearson correlation coefficient, also called the **Pearson r**, is often easier to interpret than the covariance. It is computed using the `np.corrcoef()` function. Like `np.cov(`), it takes two arrays as arguments and returns a 2D array. Entries `[0,0]` and `[1,1]` are necessarily equal to `1`, and the value we are after is entry `[0,1]`.
We will write a function, `pearson_r(x, y)` that takes in two arrays and returns the Pearson correlation coefficient. We will then use this function to compute it for the petal lengths and widths of $I.\ versicolor$.
```
def pearson_r(x, y):
"""Compute Pearson correlation coefficient between two arrays."""
# Compute correlation matrix: corr_mat
corr_mat = np.corrcoef(x,y)
# Return entry [0,1]
return corr_mat[0,1]
# Compute Pearson correlation coefficient for I. versicolor: r
r = pearson_r(versicolor_petal_length, versicolor_petal_width)
# Print the result
print(r)
```
# Thinking probabilistically-- Discrete variables
Statistical inference rests upon probability. Because we can very rarely say anything meaningful with absolute certainty from data, we use probabilistic language to make quantitative statements about data. We will think probabilistically about discrete quantities: those that can only take certain values, like integers.
## Probabilistic logic and statistical inference
### the goal of statistical inference
* To draw probabilistic conclusions about what we might expect if we collected the same data again.
* To draw actionable conclusions from data.
* To draw more general conclusions from relatively few data or observations.
> Note: Statistical inference involves taking your data to probabilistic conclusions about what you would expect if you took even more data, and you can make decisions based on these conclusions.
### Why we use the probabilistic language in statistical inference
* Probability provides a measure of uncertainty and this is crucial because we can quantify what we might expect if the data were acquired again.
* Data are almost never exactly the same when acquired again, and probability allows us to say how much we expect them to vary. We need probability to say how data might vary if acquired again.
> Note: Probabilistic language is in fact very precise. It precisely describes uncertainty.
## Random number generators and hacker statistics
> ### Hacker statistics
- Uses simulated repeated measurements to compute
probabilities.
> ### The np.random module
- Suite of functions based on random number generation
- `np.random.random()`: draw a number between $0$ and $1$
> ### Bernoulli trial
● An experiment that has two options,
"success" (True) and "failure" (False).
> ### Random number seed
- Integer fed into random number generating algorithm
- Manually seed random number generator if you need reproducibility
- Specified using `np.random.seed()`
> ### Hacker stats probabilities
- Determine how to simulate data
- Simulate many many times
- Probability is approximately fraction of trials with the outcome of interest
### Generating random numbers using the np.random module
we'll generate lots of random numbers between zero and one, and then plot a histogram of the results. If the numbers are truly random, all bars in the histogram should be of (close to) equal height.
```
# Seed the random number generator
np.random.seed(42)
# Initialize random numbers: random_numbers
random_numbers = np.empty(100000)
# Generate random numbers by looping over range(100000)
for i in range(100000):
random_numbers[i] = np.random.random()
# Plot a histogram
_ = plt.hist(random_numbers, bins=316, histtype="step", density=True)
_ = plt.xlabel("random numbers")
_ = plt.ylabel("counts")
# Show the plot
plt.show()
```
> Note: The histogram is almost exactly flat across the top, indicating that there is equal chance that a randomly-generated number is in any of the bins of the histogram.
### The np.random module and Bernoulli trials
> Tip: You can think of a Bernoulli trial as a flip of a possibly biased coin. Each coin flip has a probability $p$ of landing heads (success) and probability $1−p$ of landing tails (failure).
We will write a function to perform `n` Bernoulli trials, `perform_bernoulli_trials(n, p)`, which returns the number of successes out of `n` Bernoulli trials, each of which has probability $p$ of success. To perform each Bernoulli trial, we will use the `np.random.random()` function, which returns a random number between zero and one.
```
def perform_bernoulli_trials(n, p):
"""Perform n Bernoulli trials with success probability p
and return number of successes."""
# Initialize number of successes: n_success
n_success = False
# Perform trials
for i in range(n):
# Choose random number between zero and one: random_number
random_number = np.random.random()
# If less than p, it's a success so add one to n_success
if random_number < p:
n_success += 1
return n_success
```
### How many defaults might we expect?
Let's say a bank made 100 mortgage loans. It is possible that anywhere between $0$ and $100$ of the loans will be defaulted upon. We would like to know the probability of getting a given number of defaults, given that the probability of a default is $p = 0.05$. To investigate this, we will do a simulation. We will perform 100 Bernoulli trials using the `perform_bernoulli_trials()` function and record how many defaults we get. Here, a success is a default.
> Important: Remember that the word "success" just means that the Bernoulli trial evaluates to True, i.e., did the loan recipient default?
You will do this for another $100$ Bernoulli trials. And again and again until we have tried it $1000$ times. Then, we will plot a histogram describing the probability of the number of defaults.
```
# Seed random number generator
np.random.seed(42)
# Initialize the number of defaults: n_defaults
n_defaults = np.empty(1000)
# Compute the number of defaults
for i in range(1000):
n_defaults[i] = perform_bernoulli_trials(100, 0.05)
# Plot the histogram with default number of bins; label your axes
_ = plt.hist(n_defaults, density=True)
_ = plt.xlabel('number of defaults out of 100 loans')
_ = plt.ylabel('probability')
# Show the plot
plt.show()
```
> Warning: This is actually not an optimal way to plot a histogram when the results are known to be integers. We will revisit this
### Will the bank fail?
If interest rates are such that the bank will lose money if 10 or more of its loans are defaulted upon, what is the probability that the bank will lose money?
```
# Compute ECDF: x, y
x,y = ecdf(n_defaults)
# Plot the ECDF with labeled axes
_ = plt.plot(x,y, marker=".", linestyle="none")
_ = plt.xlabel("number of defaults")
_ = plt.ylabel("ECDF")
# Show the plot
plt.show()
# Compute the number of 100-loan simulations with 10 or more defaults: n_lose_money
n_lose_money = np.sum(n_defaults >= 10)
# Compute and print probability of losing money
print('Probability of losing money =', n_lose_money / len(n_defaults))
```
> Note: we most likely get 5/100 defaults. But we still have about a 2% chance of getting 10 or more defaults out of 100 loans.
## Probability distributions and stories: The Binomial distribution
> ### Probability mass function (PMF)
- The set of probabilities of discrete outcomes
> ### Probability distribution
- A mathematical description of outcomes
> ### Discrete Uniform distribution: the story
- The outcome of rolling a single fair die is Discrete Uniformly distributed.
> ### Binomial distribution: the story
- The number $r$ of successes in $n$ Bernoulli trials with
probability $p$ of success, is Binomially distributed
- The number $r$ of heads in $4$ coin flips with probability
$0.5$ of heads, is Binomially distributed
### Sampling out of the Binomial distribution
We will compute the probability mass function for the number of defaults we would expect for $100$ loans as in the last section, but instead of simulating all of the Bernoulli trials, we will perform the sampling using `np.random.binomial()`{% fn 1 %}.
> Note: This is identical to the calculation we did in the last set of exercises using our custom-written `perform_bernoulli_trials()` function, but far more computationally efficient.
Given this extra efficiency, we will take $10,000$ samples instead of $1000$. After taking the samples, we will plot the CDF. This CDF that we are plotting is that of the Binomial distribution.
```
# Take 10,000 samples out of the binomial distribution: n_defaults
n_defaults = np.random.binomial(100, 0.05, size=10000)
# Compute CDF: x, y
x,y = ecdf(n_defaults)
# Plot the CDF with axis labels
_ = plt.plot(x,y, marker=".", linestyle="-")
_ = plt.xlabel("number of defaults out of 100 loans")
_ = plt.ylabel("CDF")
# Show the plot
plt.show()
```
> Tip: If you know the story, using built-in algorithms to directly sample out of the distribution is ***much*** faster.
### Plotting the Binomial PMF
> Warning: plotting a nice looking PMF requires a bit of matplotlib trickery that we will not go into here.
we will plot the PMF of the Binomial distribution as a histogram. The trick is setting up the edges of the `bins` to pass to `plt.hist()` via the `bins` keyword argument. We want the bins centered on the integers. So, the edges of the bins should be $-0.5, 0.5, 1.5, 2.5, ...$ up to `max(n_defaults) + 1.5`. We can generate an array like this using `np.arange() `and then subtracting `0.5` from the array.
```
# Compute bin edges: bins
bins = np.arange(0, max(n_defaults) + 1.5) - 0.5
# Generate histogram
_ = plt.hist(n_defaults, density=True, bins=bins)
# Label axes
_ = plt.xlabel("number of defaults out of 100 loans")
_ = plt.ylabel("probability")
# Show the plot
plt.show()
```
## Poisson processes and the Poisson distribution
> ### Poisson process
- The timing of the next event is completely independent of when the previous event happened
> ### Examples of Poisson processes
- Natural births in a given hospital
- Hit on a website during a given hour
- Meteor strikes
- Molecular collisions in a gas
- Aviation incidents
- Buses in Poissonville
> ### Poisson distribution
- The number $r$ of arrivals of a Poisson process in a
given time interval with average rate of $λ$ arrivals
per interval is Poisson distributed.
- The number r of hits on a website in one hour with
an average hit rate of 6 hits per hour is Poisson
distributed.
> ### Poisson Distribution
- Limit of the Binomial distribution for low
probability of success and large number of trials.
- That is, for rare events.
### Relationship between Binomial and Poisson distributions
> Important: Poisson distribution is a limit of the Binomial distribution for rare events.
> Tip: Poisson distribution with arrival rate equal to $np$ approximates a Binomial distribution for $n$ Bernoulli trials with probability $p$ of success (with $n$ large and $p$ small). Importantly, the Poisson distribution is often simpler to work with because it has only one parameter instead of two for the Binomial distribution.
Let's explore these two distributions computationally. We will compute the mean and standard deviation of samples from a Poisson distribution with an arrival rate of $10$. Then, we will compute the mean and standard deviation of samples from a Binomial distribution with parameters $n$ and $p$ such that $np = 10$.
```
# Draw 10,000 samples out of Poisson distribution: samples_poisson
samples_poisson = np.random.poisson(10, size=10000)
# Print the mean and standard deviation
print('Poisson: ', np.mean(samples_poisson),
np.std(samples_poisson))
# Specify values of n and p to consider for Binomial: n, p
n = [20, 100, 1000]
p = [.5, .1, .01]
# Draw 10,000 samples for each n,p pair: samples_binomial
for i in range(3):
samples_binomial = np.random.binomial(n[i],p[i], size=10000)
# Print results
print('n =', n[i], 'Binom:', np.mean(samples_binomial),
np.std(samples_binomial))
```
> Note: The means are all about the same, which can be shown to be true by doing some pen-and-paper work. The standard deviation of the Binomial distribution gets closer and closer to that of the Poisson distribution as the probability $p$ gets lower and lower.
### Was 2015 anomalous?
In baseball, a no-hitter is a game in which a pitcher does not allow the other team to get a hit. This is a rare event, and since the beginning of the so-called modern era of baseball (starting in 1901), there have only been 251 of them through the 2015 season in over 200,000 games. The ECDF of the number of no-hitters in a season is shown to the right. The probability distribution that would be appropriate to describe the number of no-hitters we would expect in a given season? is Both Binomial and Poisson, though Poisson is easier to model and compute.
> Important: When we have rare events (low $p$, high $n$), the Binomial distribution is Poisson. This has a single parameter, the mean number of successes per time interval, in our case the mean number of no-hitters per season.
1990 and 2015 featured the most no-hitters of any season of baseball (there were seven). Given that there are on average $\frac{251}{115}$ no-hitters per season, what is the probability of having seven or more in a season? Let's find out
```
# Draw 10,000 samples out of Poisson distribution: n_nohitters
n_nohitters = np.random.poisson(251/115, size=10000)
# Compute number of samples that are seven or greater: n_large
n_large = np.sum(n_nohitters >= 7)
# Compute probability of getting seven or more: p_large
p_large = n_large/10000
# Print the result
print('Probability of seven or more no-hitters:', p_large)
```
> Note: The result is about $0.007$. This means that it is not that improbable to see a 7-or-more no-hitter season in a century. We have seen two in a century and a half, so it is not unreasonable.
# Thinking probabilistically-- Continuous variables
It’s time to move onto continuous variables, such as those that can take on any fractional value. Many of the principles are the same, but there are some subtleties. We will be speaking the probabilistic language needed to launch into the inference techniques.
## Probability density functions
> ### Continuous variables
- Quantities that can take any value, not just
discrete values
> ### Probability density function (PDF)
- Continuous analog to the PMF
- Mathematical description of the relative likelihood
of observing a value of a continuous variable
## Introduction to the Normal distribution
> ### Normal distribution
- Describes a continuous variable whose PDF has a single symmetric peak.
>|Parameter| |Calculated from data|
|---|---|---|
|mean of a Normal distribution|≠| mean computed from data|
|st. dev. of a Normal distribution|≠|standard deviation computed from data|
### The Normal PDF
```
# Draw 100000 samples from Normal distribution with stds of interest: samples_std1, samples_std3, samples_std10
samples_std1 = np.random.normal(20,1,size=100000)
samples_std3 = np.random.normal(20, 3, size=100000)
samples_std10 = np.random.normal(20, 10, size=100000)
# Make histograms
_ = plt.hist(samples_std1, density=True, histtype="step", bins=100)
_ = plt.hist(samples_std3, density=True, histtype="step", bins=100)
_ = plt.hist(samples_std10, density=True, histtype="step", bins=100)
# Make a legend, set limits and show plot
_ = plt.legend(('std = 1', 'std = 3', 'std = 10'))
plt.ylim(-0.01, 0.42)
plt.show()
```
> Note: You can see how the different standard deviations result in PDFs of different widths. The peaks are all centered at the mean of 20.
### The Normal CDF
```
# Generate CDFs
x_std1, y_std1 = ecdf(samples_std1)
x_std3, y_std3 = ecdf(samples_std3)
x_std10, y_std10 = ecdf(samples_std10)
# Plot CDFs
_ = plt.plot(x_std1, y_std1, marker=".", linestyle="none")
_ = plt.plot(x_std3, y_std3, marker=".", linestyle="none")
_ = plt.plot(x_std10, y_std10, marker=".", linestyle="none")
# Make a legend and show the plot
_ = plt.legend(('std = 1', 'std = 3', 'std = 10'), loc='lower right')
plt.show()
```
> Note: The CDFs all pass through the mean at the 50th percentile; the mean and median of a Normal distribution are equal. The width of the CDF varies with the standard deviation.
## The Normal distribution: Properties and warnings
### Are the Belmont Stakes results Normally distributed?
Since 1926, the Belmont Stakes is a $1.5$ mile-long race of 3-year old thoroughbred horses. <a href="https://en.wikipedia.org/wiki/Secretariat_(horse)">Secretariat</a> ran the fastest Belmont Stakes in history in $1973$. While that was the fastest year, 1970 was the slowest because of unusually wet and sloppy conditions. With these two outliers removed from the data set, we will compute the mean and standard deviation of the Belmont winners' times. We will sample out of a Normal distribution with this mean and standard deviation using the `np.random.normal()` function and plot a CDF. Overlay the ECDF from the winning Belmont times {% fn 2 %}.
```
belmont_no_outliers = np.array([148.51, 146.65, 148.52, 150.7 , 150.42, 150.88, 151.57, 147.54,
149.65, 148.74, 147.86, 148.75, 147.5 , 148.26, 149.71, 146.56,
151.19, 147.88, 149.16, 148.82, 148.96, 152.02, 146.82, 149.97,
146.13, 148.1 , 147.2 , 146. , 146.4 , 148.2 , 149.8 , 147. ,
147.2 , 147.8 , 148.2 , 149. , 149.8 , 148.6 , 146.8 , 149.6 ,
149. , 148.2 , 149.2 , 148. , 150.4 , 148.8 , 147.2 , 148.8 ,
149.6 , 148.4 , 148.4 , 150.2 , 148.8 , 149.2 , 149.2 , 148.4 ,
150.2 , 146.6 , 149.8 , 149. , 150.8 , 148.6 , 150.2 , 149. ,
148.6 , 150.2 , 148.2 , 149.4 , 150.8 , 150.2 , 152.2 , 148.2 ,
149.2 , 151. , 149.6 , 149.6 , 149.4 , 148.6 , 150. , 150.6 ,
149.2 , 152.6 , 152.8 , 149.6 , 151.6 , 152.8 , 153.2 , 152.4 ,
152.2 ])
# Compute mean and standard deviation: mu, sigma
mu = np.mean(belmont_no_outliers)
sigma = np.std(belmont_no_outliers)
# Sample out of a normal distribution with this mu and sigma: samples
samples = np.random.normal(mu, sigma, size=10000)
# Get the CDF of the samples and of the data
x_theor, y_theor = ecdf(samples)
x,y = ecdf(belmont_no_outliers)
# Plot the CDFs and show the plot
_ = plt.plot(x_theor, y_theor)
_ = plt.plot(x, y, marker='.', linestyle='none')
_ = plt.xlabel('Belmont winning time (sec.)')
_ = plt.ylabel('CDF')
plt.show()
```
> Note: The theoretical CDF and the ECDF of the data suggest that the winning Belmont times are, indeed, Normally distributed. This also suggests that in the last 100 years or so, there have not been major technological or training advances that have significantly affected the speed at which horses can run this race.
### What are the chances of a horse matching or beating Secretariat's record?
The probability that the winner of a given Belmont Stakes will run it as fast or faster than Secretariat assuming that the Belmont winners' times are Normally distributed (with the 1970 and 1973 years removed)
```
# Take a million samples out of the Normal distribution: samples
samples = np.random.normal(mu, sigma, size=1000000)
# Compute the fraction that are faster than 144 seconds: prob
prob = np.sum(samples<=144)/len(samples)
# Print the result
print('Probability of besting Secretariat:', prob)
```
> Note: We had to take a million samples because the probability of a fast time is very low and we had to be sure to sample enough. We get that there is only a 0.06% chance of a horse running the Belmont as fast as Secretariat.
## The Exponential distribution
The waiting time between arrivals of a Poisson process is Exponentially distributed
> ### Possible Poisson process
- Nuclear incidents:
- Timing of one is independent of all others
$f(x; \frac{1}{\beta}) = \frac{1}{\beta} \exp(-\frac{x}{\beta})$
### If you have a story, you can simulate it!
Sometimes, the story describing our probability distribution does not have a named distribution to go along with it. In these cases, fear not! You can always simulate it.
we looked at the rare event of no-hitters in Major League Baseball. _Hitting the cycle_ is another rare baseball event. When a batter hits the cycle, he gets all four kinds of hits, a single, double, triple, and home run, in a single game. Like no-hitters, this can be modeled as a Poisson process, so the time between hits of the cycle are also Exponentially distributed.
How long must we wait to see both a no-hitter and then a batter hit the cycle? The idea is that we have to wait some time for the no-hitter, and then after the no-hitter, we have to wait for hitting the cycle. Stated another way, what is the total waiting time for the arrival of two different Poisson processes? The total waiting time is the time waited for the no-hitter, plus the time waited for the hitting the cycle.
> Important: We will write a function to sample out of the distribution described by this story.
```
def successive_poisson(tau1, tau2, size=1):
"""Compute time for arrival of 2 successive Poisson processes."""
# Draw samples out of first exponential distribution: t1
t1 = np.random.exponential(tau1, size=size)
# Draw samples out of second exponential distribution: t2
t2 = np.random.exponential(tau2, size=size)
return t1 + t2
```
### Distribution of no-hitters and cycles
We'll use the sampling function to compute the waiting time to observe a no-hitter and hitting of the cycle. The mean waiting time for a no-hitter is $764$ games, and the mean waiting time for hitting the cycle is $715$ games.
```
# Draw samples of waiting times: waiting_times
waiting_times = successive_poisson(764, 715, size=100000)
# Make the histogram
_ = plt.hist(waiting_times, bins=100, density=True, histtype="step")
# Label axes
_ = plt.xlabel("Waiting times")
_ = plt.ylabel("probability")
# Show the plot
plt.show()
```
Notice that the PDF is peaked, unlike the waiting time for a single Poisson process. For fun (and enlightenment), Let's also plot the CDF.
```
x,y = ecdf(waiting_times)
_ = plt.plot(x,y)
_ = plt.plot(x,y, marker=".", linestyle="none")
_ = plt.xlabel("Waiting times")
_ = plt.ylabel("CDF")
plt.show()
```
{{'For this exercise and all going forward, the random number generator is pre-seeded for you (with `np.random.seed(42))` to save you typing that each time.' | fndetail: 1 }}
{{'we scraped the data concerning the Belmont Stakes from the [Belmont Wikipedia page](https://en.wikipedia.org/wiki/Belmont_Stakes).' | fndetail: 2 }}
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import sys
sys.path.append('../scripts')
import numpy as np
import os, h5py
import pandas as pd
import variant_effect
# read df and add strand
all_dfs = []
cagi_data = '../data/CAGI/'
combined_filename = '../data/combined_cagi.bed'
for filename in os.listdir(cagi_data):
prefix, regulator = filename.split('.tsv')[0].split('_')
one_reg = pd.read_csv(os.path.join(cagi_data,filename), skiprows=7, sep='\t', header=None)
one_reg['regulator'] = regulator
one_reg['set'] = prefix
all_dfs.append(one_reg)
combined_cagi = pd.concat(all_dfs)
combined_cagi.insert(4, 'strand', '+')
combined_cagi.insert(2,'end',combined_cagi.iloc[:,1]+1)
combined_cagi.iloc[:,0] = 'chr'+combined_cagi.iloc[:,0].astype(str)
combined_cagi.to_csv(combined_filename, sep='\t', header=False, index=None)
output_filename = '../data/nonneg_cagi_3K.bed'
variant_effect.expand_range(combined_filename, output_filename)
fa_filename = '../data/cagi_3k.fa'
coords_list, seqs_list = variant_effect.convert_bed_to_seq(output_filename, fa_filename, genomefile='/home/shush/genomes/hg19.fa')
window = 3072
bad_lines = []
N = len(seqs_list)
nonneg_df = pd.read_csv(output_filename, sep='\t', header=None)
mid = window // 2
onehot_ref = []
onehot_alt = []
coord_np = np.empty((N, 4)) # chrom, start, end coordinate array
pos_dict = {'+': 1535, '-':1536}
for i,(chr_s_e, seq) in enumerate(zip(coords_list, seqs_list)):
alt = ''
strand = chr_s_e.split('(')[-1].split(')')[0]
pos = pos_dict[strand]
# coord_np[i,3] = pos_dict[strand] - 1535
if seq[pos] != nonneg_df.iloc[i, 3]:
# print('Error in line ' + str(i))
bad_lines.append(i)
else:
alt = nonneg_df.iloc[i,4]
onehot = variant_effect.dna_one_hot(seq)
mutated_onehot = onehot.copy()
mutated_onehot[pos] = variant_effect.dna_one_hot(alt)[0]
onehot_ref.append(onehot)
onehot_alt.append(mutated_onehot)
onehot_alt = np.array(onehot_alt)
onehot_ref = np.array(onehot_ref)
included_df = nonneg_df[~nonneg_df.index.isin(bad_lines)]
included_df.to_csv('../data/final_cagi_metadata.csv')
onehot_ref_alt = h5py.File('../data/CAGI_onehot.h5', 'w')
onehot_ref_alt.create_dataset('ref', data=onehot_ref)
onehot_ref_alt.create_dataset('alt', data=onehot_alt)
onehot_ref_alt.close()
```
## Sanity check that only one nucleotide is different
```
onehot_ref_alt = h5py.File('../data/CAGI_onehot.h5', 'r')
np.argwhere(onehot_ref_alt['ref'][0,:,:] != onehot_ref_alt['alt'][0,:,:])
onehot_ref_alt['ref'][0,1535,:], onehot_ref_alt['alt'][0,1535,:]
```
## Run functional variant calling with eixisting model
```
#Read in created dataset
onehot_ref_alt = h5py.File('../data/CAGI_onehot.h5', 'r')
ref = onehot_ref_alt['ref'][()]
alt = onehot_ref_alt['alt'][()]
ref.shape
# Quantiative model
import variant_effect
variant_effect.vcf_quantitative('../tutorial_outputs/',ref,alt,2048,
'../data/cagi_robust',robust = True)
#Binary model
import tensorflow as tf
tf.keras.backend.clear_session()
variant_effect.vcf_binary('../tutorial_binary/files/best_model.h5',ref,alt,-1,2048,
'../data/cagi_binary_robust',robust=True)
```
| github_jupyter |
# TorchDyn Quickstart
**TorchDyn is the toolkit for continuous models in PyTorch. Play with state-of-the-art architectures or use its powerful libraries to create your own.**
Central to the `torchdyn` approach are continuous neural networks, where *width*, *depth* (or both) are taken to their infinite limit. On the optimization front, we consider continuous "data-stream" regimes and gradient flow methods, where the dataset represents a time-evolving signal processed by the neural network to adapt its parameters.
By providing a centralized, easy-to-access collection of model templates, tutorial and application notebooks, we hope to speed-up research in this area and ultimately contribute to turning neural differential equations into an effective tool for control, system identification and common machine learning tasks.
```
from torchdyn.models import *
from torchdyn.data_utils import *
from torchdyn import *
```
## Generate data from a static toy dataset
We’ll be generating data from toy datasets. We provide a wide range of datasets often use to benchmark and understand neural ODEs. Here we will use the classic moons dataset and train a neural ODE for binary classification
```
d = ToyDataset()
X, yn = d.generate(n_samples=520, dataset_type='moons')
import matplotlib.pyplot as plt
colors = ['orange', 'blue']
fig = plt.figure(figsize=(3,3))
ax = fig.add_subplot(111)
for i in range(len(X)):
ax.scatter(X[i,0], X[i,1], color=colors[yn[i].int()])
```
Generated data can be easily loaded in the dataloader with standard `PyTorch` calls
```
import torch
import torch.utils.data as data
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
X_train = torch.Tensor(X).to(device)
y_train = torch.LongTensor(yn.long()).to(device)
train = data.TensorDataset(X_train, y_train)
trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)
```
The learner is defined as....
```
import torch.nn as nn
import pytorch_lightning as pl
class Learner(pl.LightningModule):
def __init__(self, model:nn.Module, settings:dict={}):
super().__init__()
defaults.update(settings)
self.settings = defaults
self.model = model
self.c = 0
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = nn.CrossEntropyLoss()(y_hat, y)
logs = {'train_loss': loss}
return {'loss': loss, 'log': logs}
def configure_optimizers(self):
return torch.optim.Adam(self.model.parameters(), lr=0.01)
def train_dataloader(self):
return trainloader
```
## Define a Neural ODE
Analogously to most forward neural models we want to realize a map
$$
x \mapsto \hat y
$$
where $\hat y$ becomes the best approximation of a true output $y$ given an input $x$.\
In torchdyn you can define very simple neural ODE models of the form
$$ \left\{
\begin{aligned}
\dot{h}(s) &= f(h(s), \theta)\\
h(0) &= x\\
\hat y & = h(1)
\end{aligned}
\right. \quad s\in[0,1]
$$
by just specifying a neural network $f$ and giving some simple settings.
**Note:** This neural ODE model is of *depth-invariant* type as neither $f$ explicitly depend on $s$ nor the parameters $\theta$ are depth-varying. Together with their *depth-variant* counterpart with $s$ concatenated in the vector field was first proposed and implemeted by [[Chen T. Q. et al, 2018]](https://arxiv.org/abs/1806.07366)
### Define the vector field (DEFunc)
The first step is to define a `torch.nn.Sequential` object and wrap it with the `DEFunc` class from torchdyn. This automatically defines the vector field $f(h,\theta)$ of the neural ODE
```
f = DEFunc(nn.Sequential(
nn.Linear(2, 64),
nn.Tanh(),
nn.Linear(64,2)
)
)
```
In this case we chose $f$ to be a simple MLP with one hidden layer and $\tanh$ activation
### Define the NeuralDE
The final step to define a neural ODE object is to instantiate an object of the torchdyn's class `NeuralDE` passing some preferences and `f`.
In this case with `settings` we just specify that:
* we want a `'classic'` neural ODE;
* we will use the `'dopri5'` (Dormand-Prince) ODE solver from `torchdiffeq`;
* we compute backward gradients with the `'adjoint'` method.
```
settings = {'type':'classic', 'solver':'dopri5', 'backprop_style':'adjoint'}
model = NeuralDE(f, settings).to(device)
```
## Train the Model
```
learn = Learner(model)
trainer = pl.Trainer(min_nb_epochs=200, max_nb_epochs=300)
trainer.fit(learn)
```
With the method `trajectory` of `NeuralDE` objects you can quickly evaluate the entire trajectory of each data point in `X_train` on an interval `s_span`
```
s_span = torch.linspace(0,1,100)
trajectory = model.trajectory(X_train, s_span).detach().cpu()
```
### Plot the Training Results
We can first plot the trajectories of the data points in the depth domain $s$
```
color=['orange', 'blue']
fig = plt.figure(figsize=(8,2))
ax0 = fig.add_subplot(121)
ax1 = fig.add_subplot(122)
for i in range(500):
ax0.plot(s_span, trajectory[:,i,0], color=color[int(yn[i])], alpha=.1);
ax1.plot(s_span, trajectory[:,i,1], color=color[int(yn[i])], alpha=.1);
ax0.set_xlabel(r"$s$ [Depth]")
ax0.set_ylabel(r"$h_0(s)$")
ax0.set_title("Dimension 0")
ax1.set_xlabel(r"$s$ [Depth]")
ax1.set_ylabel(r"$h_1(s)$")
ax1.set_title("Dimension 1")
```
Then the trajectory in the *state-space*
```
fig = plt.figure(figsize=(3,3))
ax = fig.add_subplot(111)
for i in range(500):
ax.plot(trajectory[:,i,0], trajectory[:,i,1], color=color[int(yn[i])], alpha=.1);
ax.set_xlabel(r"$h_0$")
ax.set_ylabel(r"$h_1$")
ax.set_title("Flows in the state-space")
```
As you can see, the neural ODE steers the data-points into regions of null loss with a continuous flow in the depth domain.\ Finally, we can also plot the learned vector field $f$
```
plot_static_vector_field(model, trajectory)
```
**Sweet! You trained your first neural ODE! Now go on and learn more advanced models with the next tutorials**
| github_jupyter |
```
import os
from enum import Enum
import gzip
import time
import numpy as np
from scipy.sparse import dok_matrix, csr_matrix
import tensorflow as tf
# Attalos Imports
import attalos.util.log.log as l
from attalos.dataset.dataset import Dataset
from attalos.evaluation.evaluation import Evaluation
# Local models
from mse import MSEModel
from negsampling import NegSamplingModel
from fast0tag import FastZeroTagModel
# Setup global objects
logger = l.getLogger(__name__)
from attalos.imgtxt_algorithms.regress2sum.multihot import MultihotModel
from attalos.imgtxt_algorithms.regress2sum.naivesum import NaiveSumModel
from attalos.imgtxt_algorithms.regress2sum.wdv import WDVModel
from attalos.dataset.wordvectors.glove import GloveWrapper
# Temp object using duck typing to replace command line arguments
args = lambda: None
#args.image_feature_file_train = "/local_data/teams/attalos/features/image/espgame_train_20160823_inception.hdf5"
#args.text_feature_file_train = "/local_data/teams/attalos/features/text/espgame_train_20160823_text.json.gz"
#args.image_feature_file_test = "/local_data/teams/attalos/features/image/espgame_test_20160823_inception.hdf5"
#args.text_feature_file_test = "/local_data/teams/attalos/features/text/espgame_test_20160823_text.json.gz"
args.image_feature_file_train = "/local_data/teams/attalos/features/image/iaprtc_train_20160816_inception.hdf5"
args.text_feature_file_train = "/local_data/teams/attalos/features/text/iaprtc_train_20160816_text.json.gz"
args.image_feature_file_test = "/local_data/teams/attalos/features/image/iaprtc_test_20160816_inception.hdf5"
args.text_feature_file_test = "/local_data/teams/attalos/features/text/iaprtc_test_20160816_text.json.gz"
args.word_vector_file = "/local_data/kylez/glove.6B.200d.txt"
args.word_vector_type = "glove"
args.model_type = "wdv"
args.cross_eval = False
args.in_memory = True
args.model_input_path = None
args.model_output_path = None
args.num_epochs = 400
args.batch_size = 100
args.learning_rate = 0.0001
class WordVectorTypes(Enum):
w2v = 1
glove = 2
class ModelTypes(Enum):
mse = 1
negsampling = 2
fast0tag = 3
multihot = MultihotModel
naivesum = NaiveSumModel
wdv = WDVModel
def train_batch(sess, model, batch):
train_x, train_y = batch
training_loss = model.fit(sess, train_x, train_y)
return training_loss
def train_epoch(sess, model, train_dataset, batch_size):
training_losses = []
for cur_batch_num, batch in enumerate(model.to_batches(train_dataset, batch_size)):
training_loss = train_batch(sess, model, batch)
training_losses.append(training_loss)
avg_training_loss = sum(training_losses) / float(len(training_losses))
return avg_training_loss
def train(sess, model, num_epochs, train_dataset, batch_size, epoch_verbosity_rate=10):
for cur_epoch in xrange(num_epochs):
verbose = cur_epoch % epoch_verbosity_rate == 0
avg_training_loss = train_epoch(sess, model, train_dataset, batch_size)
if verbose:
logger.info("Finished epoch %s. (Avg. training loss: %s)" % (cur_epoch, avg_training_loss))
def load_wv_model(word_vector_file, word_vector_type):
if args.word_vector_type == WordVectorTypes.glove.name:
from glove import Glove
glove_model = Glove.load_stanford(word_vector_file)
wv_model = GloveWrapper(glove_model)
else: #args.word_vector_type == WordVectorTypes.w2v.name:
import word2vec
w2v_model = word2vec.load(word_vector_file)
wv_model = W2VWrapper(w2v_model)
return wv_model
logger.info("Parsing train and test datasets.")
train_dataset = Dataset(args.image_feature_file_train, args.text_feature_file_train, load_image_feats_in_mem=args.in_memory)
test_dataset = Dataset(args.image_feature_file_test, args.text_feature_file_test)
logger.info("Reading word vectors from file.")
wv_model = load_wv_model(args.word_vector_file, args.word_vector_type)
#sess.close()
config = tf.ConfigProto(log_device_placement=True)
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
model_cls = ModelTypes[args.model_type].value
logger.info("Selecting model class: %s" % model_cls.__name__)
#datasets = [train_dataset] if args.cross_eval else [train_dataset, test_dataset]
model = model_cls(wv_model, train_dataset=train_dataset, test_dataset=test_dataset, **vars(args))
model.initialize_model(sess)
logger.info("Starting training phase.")
train(sess, model, args.num_epochs, train_dataset, args.batch_size) #, train_dataset, wv_model, test_dataset=test_dataset, epoch_verbosity_rate=100)
logger.info("Starting evaluation phase.")
test_x, test_y = model.to_ndarrs(test_dataset)
predictions = model.predict(sess, test_x)
evaluator = Evaluation(test_y, predictions, k=5)
logger.info("Evaluation (precision, recall, f1): %s" % evaluator.evaluate())
```
| github_jupyter |
# Setup
```
# Python 3 compatability
from __future__ import division, print_function
# system functions that are always useful to have
import time, sys, os
# basic numeric setup
import numpy as np
import math
from numpy import linalg
import scipy
from scipy import stats
# plotting
import matplotlib
from matplotlib import pyplot as plt
# fits data
from astropy.io import fits
# inline plotting
%matplotlib inline
# re-defining plotting defaults
from matplotlib import rcParams
rcParams.update({'xtick.major.pad': '7.0'})
rcParams.update({'xtick.major.size': '7.5'})
rcParams.update({'xtick.major.width': '1.5'})
rcParams.update({'xtick.minor.pad': '7.0'})
rcParams.update({'xtick.minor.size': '3.5'})
rcParams.update({'xtick.minor.width': '1.0'})
rcParams.update({'ytick.major.pad': '7.0'})
rcParams.update({'ytick.major.size': '7.5'})
rcParams.update({'ytick.major.width': '1.5'})
rcParams.update({'ytick.minor.pad': '7.0'})
rcParams.update({'ytick.minor.size': '3.5'})
rcParams.update({'ytick.minor.width': '1.0'})
rcParams.update({'axes.titlepad': '15.0'})
rcParams.update({'axes.labelpad': '15.0'})
rcParams.update({'font.size': 30})
```
# HSC SynPipe
Plot comparisons to data from HSC Synpipe.
```
# convert from magnitudes to fluxes
def inv_magnitude(mag, err, zeropoints=1.):
phot = 10**(-0.4 * mag) * zeropoints
phot_err = err * 0.4 * np.log(10.) * phot
return phot, phot_err
bands = ['G', 'R', 'I', 'Z', 'Y']
cpivot, mrange = 1e-4, 2. # pivot point & mag range used to shift offsets
nmin = 10 # minimum number of objects required to plot results
boxcar = 6 # bins used for boxcar used to determine variance for plotting
mgrid = np.arange(18, 28., 0.15) # magnitude bins
dmgrid = np.arange(-0.1, 0.1, 0.0075) # dmag bins
dmpgrid = np.arange(-0.02, 0.02, 1e-5) # dmag (predicted) bins
plt.figure(figsize=(50, 14))
plt.suptitle('Tract 8764 (Good Seeing)', y=1.02, fontsize=40)
for count, i in enumerate(bands):
# load data
data = fits.open('data/synpipe/star1_HSC-{0}_good.fits'.format(i))[1].data
# top panel: dmag distribution (shifted)
plt.subplot(2, 5, 1 + count)
moff = np.median(data['mag'] - data['mag.psf'])
n, bx, by, _ = plt.hist2d(data['mag'],
data['mag.psf'] - data['mag'] + moff,
[mgrid, dmgrid])
xc, yc = 0.5 * (bx[1:] + bx[:-1]), 0.5 * (by[1:] + by[:-1]) # bin centers
nmag = np.sum(n, axis=1) # counts per magnitude bin
nmean = np.sum(yc * n, axis=1) / np.sum(n, axis=1) # mean
nstd = np.sqrt(np.sum((yc[None, :] - nmean[:, None])**2 * n, axis=1) / np.sum(n, axis=1)) # error
# compute SNR as a function of magnitude
mconst = 2.5 / np.log(10)
fout, fe = inv_magnitude(data['mag.psf'], data['mag.psf.err'])
fin, fe2 = inv_magnitude(data['mag'], data['mag.psf.apcorr.err'])
snr = fout/np.sqrt(fe**2 + (0.02 * fout)**2)
# first order
n, bx, by = np.histogram2d(data['mag'], -mconst * snr**-2,
[mgrid, dmpgrid])
xc, yc = 0.5 * (bx[1:] + bx[:-1]), 0.5 * (by[1:] + by[:-1])
cmean = np.sum(yc * n, axis=1) / np.sum(n, axis=1)
# prettify
plt.xlabel('{0}-band PSF Mag'.format(i))
plt.ylabel(r'$\Delta\,$mag')
try:
midx = np.where(cmean < -0.012)[0][0]
except:
midx = -1
pass
plt.xlim([mgrid[0], mgrid[midx]])
plt.tight_layout()
# bottom panel: computed mean offsets vs predicted mean offsets
plt.subplot(2, 5, 6 + count)
mhigh = xc[np.abs(cmean - np.nanmax(cmean)) > cpivot][0]
mlow = mhigh - mrange
nsel = nmag > nmin
offset = np.nanmedian(nmean[(xc >= mlow) & (xc <= mhigh) & nsel])
nmean_err = nstd/np.sqrt(nmag)
nmean_serr = np.array([np.std(nmean[i:i+boxcar]) for i in range(len(nmean) - boxcar)])
nmean_err[boxcar//2:-boxcar//2] = np.sqrt(nmean_err[boxcar//2:-boxcar//2]**2 + nmean_serr**2)
plt.fill_between(xc[nsel], (nmean - offset - nmean_err)[nsel],
(nmean - offset + nmean_err)[nsel], color='gray', alpha=0.8)
# apply linear bias correction
lin_coeff = np.polyfit(xc[(xc >= mlow - 1) & (xc <= mhigh + 1) & nsel],
nmean[(xc >= mlow - 1) & (xc <= mhigh + 1) & nsel], 1)
lin_off = np.poly1d(lin_coeff)(xc)
nmean -= lin_off
offset = np.nanmedian(nmean[(xc >= mlow) & (xc <= mhigh) & nsel])
plt.fill_between(xc[nsel], (nmean - offset - nmean_err)[nsel],
(nmean - offset + nmean_err)[nsel], color='orange', alpha=0.4)
# plot prediction
plt.plot(xc[nsel], cmean[nsel], lw=6, color='red', alpha=0.7)
# prettify
plt.xlabel('{0}-band PSF Mag'.format(i))
plt.ylabel(r'Mean $\Delta\,$mag')
try:
midx = np.where(cmean < -0.012)[0][0]
except:
midx = -1
pass
plt.xlim([mgrid[0], mgrid[midx]])
plt.ylim([-0.02, 0.01])
plt.tight_layout()
plt.legend(['Prediction', 'No Corr.', 'Linear Corr.'])
# save figure
plt.savefig('plots/hsc_synpipe_goodseeing.png', bbox_inches='tight')
plt.figure(figsize=(50, 14))
plt.suptitle('Tract 9699 (Poor Seeing)', y=1.02, fontsize=40)
for count, i in enumerate(bands):
# load data
data = fits.open('data/synpipe/star2_HSC-{0}_good.fits'.format(i))[1].data
# top panel: dmag distribution (shifted)
plt.subplot(2, 5, 1 + count)
moff = np.median(data['mag'] - data['mag.psf'])
n, bx, by, _ = plt.hist2d(data['mag'],
data['mag.psf'] - data['mag'] + moff,
[mgrid, dmgrid])
xc, yc = 0.5 * (bx[1:] + bx[:-1]), 0.5 * (by[1:] + by[:-1]) # bin centers
nmag = np.sum(n, axis=1) # counts per magnitude bin
nmean = np.sum(yc * n, axis=1) / np.sum(n, axis=1) # mean
nstd = np.sqrt(np.sum((yc[None, :] - nmean[:, None])**2 * n, axis=1) / np.sum(n, axis=1)) # error
# compute SNR as a function of magnitude
mconst = 2.5 / np.log(10)
fout, fe = inv_magnitude(data['mag.psf'], data['mag.psf.err'])
fin, fe2 = inv_magnitude(data['mag'], data['mag.psf.apcorr.err'])
snr = fout/np.sqrt(fe**2 + (0.02 * fout)**2)
# first order
n, bx, by = np.histogram2d(data['mag'], -mconst * snr**-2,
[mgrid, dmpgrid])
xc, yc = 0.5 * (bx[1:] + bx[:-1]), 0.5 * (by[1:] + by[:-1])
cmean = np.sum(yc * n, axis=1) / np.sum(n, axis=1)
# prettify
plt.xlabel('{0}-band PSF Mag'.format(i))
plt.ylabel(r'$\Delta\,$mag')
try:
midx = np.where(cmean < -0.012)[0][0]
except:
midx = -1
pass
plt.xlim([mgrid[0], mgrid[midx]])
plt.tight_layout()
# bottom panel: computed mean offsets vs predicted mean offsets
plt.subplot(2, 5, 6 + count)
mhigh = xc[np.abs(cmean - np.nanmax(cmean)) > cpivot][0]
mlow = mhigh - mrange
nsel = nmag > nmin
offset = np.nanmedian(nmean[(xc >= mlow) & (xc <= mhigh) & nsel])
nmean_err = nstd/np.sqrt(nmag)
nmean_serr = np.array([np.std(nmean[i:i+boxcar]) for i in range(len(nmean) - boxcar)])
nmean_err[boxcar//2:-boxcar//2] = np.sqrt(nmean_err[boxcar//2:-boxcar//2]**2 + nmean_serr**2)
plt.fill_between(xc[nsel], (nmean - offset - nmean_err)[nsel],
(nmean - offset + nmean_err)[nsel], color='gray', alpha=0.8)
# apply linear bias correction
lin_coeff = np.polyfit(xc[(xc >= mlow - 1) & (xc <= mhigh + 1) & nsel],
nmean[(xc >= mlow - 1) & (xc <= mhigh + 1) & nsel], 1)
lin_off = np.poly1d(lin_coeff)(xc)
nmean -= lin_off
offset = np.nanmedian(nmean[(xc >= mlow) & (xc <= mhigh) & nsel])
plt.fill_between(xc[nsel], (nmean - offset - nmean_err)[nsel],
(nmean - offset + nmean_err)[nsel], color='orange', alpha=0.4)
# plot prediction
plt.plot(xc[nsel], cmean[nsel], lw=6, color='red', alpha=0.7)
# prettify
plt.xlabel('{0}-band PSF Mag'.format(i))
plt.ylabel(r'Mean $\Delta\,$mag')
try:
midx = np.where(cmean < -0.012)[0][0]
except:
midx = -1
pass
plt.xlim([mgrid[0], mgrid[midx]])
plt.ylim([-0.02, 0.01])
plt.tight_layout()
plt.legend(['Prediction', 'No Corr.', 'Linear Corr.'])
# save figure
plt.savefig('plots/hsc_synpipe_poorseeing.png', bbox_inches='tight')
```
| github_jupyter |
<h1> Polynomial Regression
This cell is regarding polynomial regression, first we will grab the dataset and clean it a little bit.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
nyc_pumpkins = pd.read_csv("./new-york_9-24-2016_9-30-2017.csv")
cat_map = {
'sml': 0,
'med': 1,
'med-lge': 2,
'lge': 3,
'xlge': 4,
'exjbo': 5
}
nyc_pumpkins = nyc_pumpkins.assign(
size=nyc_pumpkins['Item Size'].map(cat_map),
price=nyc_pumpkins['High Price'] + nyc_pumpkins['Low Price'] / 2,
size_class=(nyc_pumpkins['Item Size'].map(cat_map) >= 2).astype(int)
)
nyc_pumpkins = nyc_pumpkins.drop([c for c in nyc_pumpkins.columns if c not in ['size', 'price', 'size_class']],
axis='columns')
nyc_pumpkins = nyc_pumpkins.dropna()
nyc_pumpkins.head(10)
nyc_pumpkins.shape
```
Now we will split into train and test set with the useful train_test_split method from sklearn. We will test with polynomials of degree 1,2 and 4. Remember a polynomial of dgree 1 is just linear regression!
In this case as we want to predict the size which is an integer, we will round up the predicted value and we will how close we are with a distance of 1 class.
```
X_train, X_test, y_train, y_test = train_test_split(nyc_pumpkins['price'], nyc_pumpkins['size'], test_size=0.20, random_state=42, shuffle=True) #split 20% into test set
degrees = [1, 2, 4]
fig = plt.figure(figsize=(20,10))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees)+1, i + 2)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=True)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X_train[:, np.newaxis], y_train)
# Evaluate the models using crossvalidation
predicted_sizes = np.round(pipeline.predict(X_test[:, np.newaxis]))
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
pd.Series(
np.abs((np.array(y_test) - predicted_sizes).flatten()) <= 1
).value_counts().plot.bar(title='Accuracy Within 1 Class \n for degree {}'.format(degrees[i]))
```
We did pretty good with polynomial regression! Lets analyze how well it can generalize. one problem with degree 4 we will have is correlation, lets check it!
```
correlations = pd.DataFrame(PolynomialFeatures(degree=4, include_bias=False).fit_transform(np.array(nyc_pumpkins['price']).reshape(-1,1))).corr()
# plot correlation matrix
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
cax = ax.matshow(correlations, vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = np.arange(0,4,1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(['X', 'X^2', 'X^3', 'X^4'])
ax.set_yticklabels(['X', 'X^2', 'X^3', 'X^4'])
plt.show()
```
Thats bad! Finally, to see what is the issue with polynomial regression, lts add just *ONE* outlier!
```
X_train, X_test, y_train, y_test = train_test_split(nyc_pumpkins['price'], nyc_pumpkins['size'], test_size=0.20, random_state=42, shuffle=True) #split 20% into test set
X_train[20]= -10
y_train[20] = 30
degrees = [1, 2, 4]
fig = plt.figure(figsize=(20,10))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees)+1, i + 2)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=True)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X_train[:, np.newaxis], y_train)
# Evaluate the models using crossvalidation
predicted_sizes = np.round(pipeline.predict(X_test[:, np.newaxis]))
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
pd.Series(
np.abs((np.array(y_test) - predicted_sizes).flatten()) <= 1
).value_counts().plot.bar(title='Accuracy Within 1 Class \n for degree {}'.format(degrees[i]))
```
Polynomial regression is really non robust!
| github_jupyter |
# Sentiment analysis with TFLearn
In this notebook, we'll continue Andrew Trask's work by building a network for sentiment analysis on the movie review data. Instead of a network written with Numpy, we'll be using [TFLearn](http://tflearn.org/), a high-level library built on top of TensorFlow. TFLearn makes it simpler to build networks just by defining the layers. It takes care of most of the details for you.
We'll start off by importing all the modules we'll need, then load and prepare the data.
```
import pandas as pd
import numpy as np
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical
```
## Preparing the data
Following along with Andrew, our goal here is to convert our reviews into word vectors. The word vectors will have elements representing words in the total vocabulary. If the second position represents the word 'the', for each review we'll count up the number of times 'the' appears in the text and set the second position to that count. I'll show you examples as we build the input data from the reviews data. Check out Andrew's notebook and video for more about this.
### Read the data
Use the pandas library to read the reviews and postive/negative labels from comma-separated files. The data we're using has already been preprocessed a bit and we know it uses only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like `The`, `the`, and `THE`, all the same way.
```
reviews = pd.read_csv('reviews.txt', header=None)
labels = pd.read_csv('labels.txt', header=None)
```
### Counting word frequency
To start off we'll need to count how often each word appears in the data. We'll use this count to create a vocabulary we'll use to encode the review data. This resulting count is known as a [bag of words](https://en.wikipedia.org/wiki/Bag-of-words_model). We'll use it to select our vocabulary and build the word vectors. You should have seen how to do this in Andrew's lesson. Try to implement it here using the [Counter class](https://docs.python.org/2/library/collections.html#collections.Counter).
> **Exercise:** Create the bag of words from the reviews data and assign it to `total_counts`. The reviews are stores in the `reviews` [Pandas DataFrame](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html). If you want the reviews as a Numpy array, use `reviews.values`. You can iterate through the rows in the DataFrame with `for idx, row in reviews.iterrows():` ([documentation](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.iterrows.html)). When you break up the reviews into words, use `.split(' ')` instead of `.split()` so your results match ours.
```
from collections import Counter
total_counts = # bag of words here
print("Total words in data set: ", len(total_counts))
```
Let's keep the first 10000 most frequent words. As Andrew noted, most of the words in the vocabulary are rarely used so they will have little effect on our predictions. Below, we'll sort `vocab` by the count value and keep the 10000 most frequent words.
```
vocab = sorted(total_counts, key=total_counts.get, reverse=True)[:10000]
print(vocab[:60])
```
What's the last word in our vocabulary? We can use this to judge if 10000 is too few. If the last word is pretty common, we probably need to keep more words.
```
print(vocab[-1], ': ', total_counts[vocab[-1]])
```
The last word in our vocabulary shows up in 30 reviews out of 25000. I think it's fair to say this is a tiny proportion of reviews. We are probably fine with this number of words.
**Note:** When you run, you may see a different word from the one shown above, but it will also have the value `30`. That's because there are many words tied for that number of counts, and the `Counter` class does not guarantee which one will be returned in the case of a tie.
Now for each review in the data, we'll make a word vector. First we need to make a mapping of word to index, pretty easy to do with a dictionary comprehension.
> **Exercise:** Create a dictionary called `word2idx` that maps each word in the vocabulary to an index. The first word in `vocab` has index `0`, the second word has index `1`, and so on.
```
word2idx = ## create the word-to-index dictionary here
```
### Text to vector function
Now we can write a function that converts a some text to a word vector. The function will take a string of words as input and return a vector with the words counted up. Here's the general algorithm to do this:
* Initialize the word vector with [np.zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html), it should be the length of the vocabulary.
* Split the input string of text into a list of words with `.split(' ')`. Again, if you call `.split()` instead, you'll get slightly different results than what we show here.
* For each word in that list, increment the element in the index associated with that word, which you get from `word2idx`.
**Note:** Since all words aren't in the `vocab` dictionary, you'll get a key error if you run into one of those words. You can use the `.get` method of the `word2idx` dictionary to specify a default returned value when you make a key error. For example, `word2idx.get(word, None)` returns `None` if `word` doesn't exist in the dictionary.
```
def text_to_vector(text):
pass
```
If you do this right, the following code should return
```
text_to_vector('The tea is for a party to celebrate '
'the movie so she has no time for a cake')[:65]
array([0, 1, 0, 0, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0])
```
```
text_to_vector('The tea is for a party to celebrate '
'the movie so she has no time for a cake')[:65]
```
Now, run through our entire review data set and convert each review to a word vector.
```
word_vectors = np.zeros((len(reviews), len(vocab)), dtype=np.int_)
for ii, (_, text) in enumerate(reviews.iterrows()):
word_vectors[ii] = text_to_vector(text[0])
# Printing out the first 5 word vectors
word_vectors[:5, :23]
```
### Train, Validation, Test sets
Now that we have the word_vectors, we're ready to split our data into train, validation, and test sets. Remember that we train on the train data, use the validation data to set the hyperparameters, and at the very end measure the network performance on the test data. Here we're using the function `to_categorical` from TFLearn to reshape the target data so that we'll have two output units and can classify with a softmax activation function. We actually won't be creating the validation set here, TFLearn will do that for us later.
```
Y = (labels=='positive').astype(np.int_)
records = len(labels)
shuffle = np.arange(records)
np.random.shuffle(shuffle)
test_fraction = 0.9
train_split, test_split = shuffle[:int(records*test_fraction)], shuffle[int(records*test_fraction):]
trainX, trainY = word_vectors[train_split,:], to_categorical(Y.values[train_split], 2)
testX, testY = word_vectors[test_split,:], to_categorical(Y.values[test_split], 2)
trainY
```
## Building the network
[TFLearn](http://tflearn.org/) lets you build the network by [defining the layers](http://tflearn.org/layers/core/).
### Input layer
For the input layer, you just need to tell it how many units you have. For example,
```
net = tflearn.input_data([None, 100])
```
would create a network with 100 input units. The first element in the list, `None` in this case, sets the batch size. Setting it to `None` here leaves it at the default batch size.
The number of inputs to your network needs to match the size of your data. For this example, we're using 10000 element long vectors to encode our input data, so we need 10000 input units.
### Adding layers
To add new hidden layers, you use
```
net = tflearn.fully_connected(net, n_units, activation='ReLU')
```
This adds a fully connected layer where every unit in the previous layer is connected to every unit in this layer. The first argument `net` is the network you created in the `tflearn.input_data` call. It's telling the network to use the output of the previous layer as the input to this layer. You can set the number of units in the layer with `n_units`, and set the activation function with the `activation` keyword. You can keep adding layers to your network by repeated calling `net = tflearn.fully_connected(net, n_units)`.
### Output layer
The last layer you add is used as the output layer. Therefore, you need to set the number of units to match the target data. In this case we are predicting two classes, positive or negative sentiment. You also need to set the activation function so it's appropriate for your model. Again, we're trying to predict if some input data belongs to one of two classes, so we should use softmax.
```
net = tflearn.fully_connected(net, 2, activation='softmax')
```
### Training
To set how you train the network, use
```
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')
```
Again, this is passing in the network you've been building. The keywords:
* `optimizer` sets the training method, here stochastic gradient descent
* `learning_rate` is the learning rate
* `loss` determines how the network error is calculated. In this example, with the categorical cross-entropy.
Finally you put all this together to create the model with `tflearn.DNN(net)`. So it ends up looking something like
```
net = tflearn.input_data([None, 10]) # Input
net = tflearn.fully_connected(net, 5, activation='ReLU') # Hidden
net = tflearn.fully_connected(net, 2, activation='softmax') # Output
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')
model = tflearn.DNN(net)
```
> **Exercise:** Below in the `build_model()` function, you'll put together the network using TFLearn. You get to choose how many layers to use, how many hidden units, etc.
```
# Network building
def build_model():
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
#### Your code ####
model = tflearn.DNN(net)
return model
```
## Intializing the model
Next we need to call the `build_model()` function to actually build the model. In my solution I haven't included any arguments to the function, but you can add arguments so you can change parameters in the model if you want.
> **Note:** You might get a bunch of warnings here. TFLearn uses a lot of deprecated code in TensorFlow. Hopefully it gets updated to the new TensorFlow version soon.
```
model = build_model()
```
## Training the network
Now that we've constructed the network, saved as the variable `model`, we can fit it to the data. Here we use the `model.fit` method. You pass in the training features `trainX` and the training targets `trainY`. Below I set `validation_set=0.1` which reserves 10% of the data set as the validation set. You can also set the batch size and number of epochs with the `batch_size` and `n_epoch` keywords, respectively. Below is the code to fit our the network to our word vectors.
You can rerun `model.fit` to train the network further if you think you can increase the validation accuracy. Remember, all hyperparameter adjustments must be done using the validation set. **Only use the test set after you're completely done training the network.**
```
# Training
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128, n_epoch=10)
```
## Testing
After you're satisified with your hyperparameters, you can run the network on the test set to measure its performance. Remember, *only do this after finalizing the hyperparameters*.
```
predictions = (np.array(model.predict(testX))[:,0] >= 0.5).astype(np.int_)
test_accuracy = np.mean(predictions == testY[:,0], axis=0)
print("Test accuracy: ", test_accuracy)
```
## Try out your own text!
```
# Helper function that uses your model to predict sentiment
def test_sentence(sentence):
positive_prob = model.predict([text_to_vector(sentence.lower())])[0][1]
print('Sentence: {}'.format(sentence))
print('P(positive) = {:.3f} :'.format(positive_prob),
'Positive' if positive_prob > 0.5 else 'Negative')
sentence = "Moonlight is by far the best movie of 2016."
test_sentence(sentence)
sentence = "It's amazing anyone could be talented enough to make something this spectacularly awful"
test_sentence(sentence)
```
| github_jupyter |
### Cleaning data associated with bills: utterances, summaries; so they are ready for input to pointer-gen model - this is the new cleaning method implementation
There are 6541 BIDs which overlap between the utterances and summaries datasets (using all the summary data). There are 359 instances in which the summaries are greater than 100 tokens in length, and 41 instances in which the summaries are greater than 201 tokens in length. In these instances, the summaries with less than 201 tokens were cut to their first 100 tokens (anything over 201 tokens is cut entirely). There are 374 instances in which the utterances are less than 70 tokens in length. In the final dataset(old) of 6000 examples, there are 865 examples of resolutions.
There are 374+127=501 instances in which the utterances are less than 100 tokens in length.
```
import json
import numpy as np
import ast
import re
import spacy
from collections import Counter,defaultdict
import warnings
warnings.filterwarnings('ignore')
nlp = spacy.load("en_core_web_sm")
with open("../data/bill_summaries.json") as summaries_file: # loading in the data
bill_summaries = json.load(summaries_file)
with open("../data/bill_utterances.json") as utterances_file:
bill_utterances = json.load(utterances_file)
ca_bill_utterances = bill_utterances['CA']
```
### Cleaning data before the processing to format which is accepted by pointer-gen model
```
def clean_bill_summaries(bill_summaries,max_summary_length=201,ignore_resolutions=False):
""" post-processing to remove bill summary entries with certain critera:
1) if the summary does not start with "This" (probable encoding error)
2) if "page 1" occurs in the text (indicates improper encoding)
3) if the text is over max_summary_length tokens in length (very long summaries indicate probable encoding error)
-for bill summaries which have ordering (" 1)"," 2)","(1)","(2)"," a)","(a)"), removes the implicit ordering
args:
summary_cutoff: the length of the summary for the text in which to keep
max_summary_length: max length of summaries in which to keep
ignore_resolutions (bool): whether to ignore resolutions and only output bills
"""
num_cutoff_counter=0 # counts the number of summaries ignored due to being too long
bill_summary_info = defaultdict(dict) # stores both summaries and utterances for each CA bill
for bid,summary in bill_summaries.items():
text = summary['text']
if "page 1" in text: # ignore this instance, indicator of encoding error
continue
if text[0:4] != "This": # relatively strong indicator that there was error in encoding
continue
if ignore_resolutions and "R" in bid: # ignore this instance if wanting to ignore resolutions
continue
tokens = [str(token) for token in nlp(text)]
if len(tokens)>max_summary_length: # ignore this instance, includes many errors in pdf encoding in which end state not reached
num_cutoff_counter += 1
continue
# removing the implicit ordering for all instances
if " 1)" in text or " 2)" in text or "(1)" in text or "(2)" in text or " a)" in text or " b)" in text or "(a)" in text or "(b)" in text:
text = re.sub(" \([0-9]\)","",text)
text = re.sub(" [0-9]\)","",text)
text = re.sub(" \([a-j]\)","",text)
text = re.sub(" [a-j]\)","",text)
tokens = [str(token) for token in nlp(text)]
bill_summary_info[bid]['summary'] = summary
bill_summary_info[bid]['summary']['text']=text # text is occasionally updated (when ordering removed)
bill_summary_info[bid]['summary_tokens'] = tokens
return bill_summary_info,num_cutoff_counter
bill_summary_info,_ = clean_bill_summaries(bill_summaries,max_summary_length=650,ignore_resolutions=False)
len(bill_summary_info)
def clean_bill_utterances(bill_summary_info,ca_bill_utterances,minimum_utterance_tokens=99,token_cutoff=1000):
""" cleans and combines the summary and utterance data
args:
bill_summary_info: holds cleaned information about bill summaries
token_cutoff: max number of tokens to consider for utterances
minimum_utterance_tokens: minimum number of utterance tokens allowable
"""
num_utterance_counter=0 # counts num. examples ignored due to utterances being too short
all_bill_info = {}
all_tokens_dict = {} # stores all tokens for a given bid (ignoring token_cutoff)
for bid in ca_bill_utterances:
if bid in bill_summary_info: # there is a summary assigned to this bill
all_utterances = [] # combining all discussions (did) for this bid together
for utterance_list in ca_bill_utterances[bid]['utterances']:
all_utterances+=utterance_list
all_token_lists = [[str(token) for token in nlp(utterance)] for utterance in all_utterances]
all_tokens = [] # getting a single stream of tokens
multitask_y = [] # 0 if not end of utterance, 1 if end of utterance (multitask component)
for token_list in all_token_lists:
multitask_y += [0 for _ in range(len(token_list)-1)]+[1]
all_tokens += token_list
multitask_loss_mask = [1 for _ in range(len(multitask_y))] # getting multitask components to correct shape
if len(multitask_loss_mask)<token_cutoff:
amount_to_pad = token_cutoff-len(multitask_loss_mask)
multitask_loss_mask += [0 for _ in range(amount_to_pad)]
multitask_y += [0 for _ in range(amount_to_pad)]
multitask_loss_mask = multitask_loss_mask[:token_cutoff]
multitask_y = multitask_y[:token_cutoff]
assert(len(multitask_loss_mask)==token_cutoff and len(multitask_y)==token_cutoff)
if len(all_tokens)-len(all_token_lists)>=minimum_utterance_tokens: # ignore bids which don't have enough utterance tokens
all_tokens_dict[bid]=[token.lower() for token in all_tokens] # adding all utterance tokens
all_tokens_dict[bid]+=[token.lower() for token in bill_summary_info[bid]['summary_tokens']] # adding all summary tokens
all_bill_info[bid] = bill_summary_info[bid]
all_tokens = all_tokens[:token_cutoff] # taking up to max number of tokens
all_bill_info[bid]['utterances']=all_utterances
all_bill_info[bid]['utterance_tokens']=all_tokens
all_bill_info[bid]['resolution'] = "R" in bid
all_bill_info[bid]['multitask_y'] = multitask_y
all_bill_info[bid]['multitask_loss_mask'] = multitask_loss_mask
else:
num_utterance_counter += 1
return all_bill_info,all_tokens_dict,num_utterance_counter
all_bill_info,all_tokens_dict,_ = clean_bill_utterances(bill_summary_info,ca_bill_utterances,token_cutoff=500)
len(all_bill_info)
```
### Processing data to get to format which is accepted by pointer-gen model
```
### using pretrained Glove vectors
word_to_embedding = {}
with open("../data/glove.6B/glove.6B.100d.txt") as glove_file:
for line in glove_file.readlines():
values = line.split()
word = values[0]
coefs = np.asarray(values[1:],dtype='float32')
word_to_embedding[word] = coefs
print(len(word_to_embedding))
# getting all unique tokens used to get words which will be part of the fixed vocabulary
## specifically specifying that I want a vocabulary size of 30k (adding less common words up to that threshold)
all_tokens = []
for bid in all_tokens_dict:
all_tokens += all_tokens_dict[bid]
word_freq = Counter(all_tokens)
words_by_freq = (list(word_freq.items()))
words_by_freq.sort(key=lambda tup: tup[1],reverse=True) # sorting by occurance freq.
most_freq_words = [word_tup[0] for word_tup in words_by_freq if word_tup[1] >= 3]
most_freq_words += [word_tup[0] for word_tup in words_by_freq if word_tup[1] == 2 and word_tup[0] in word_to_embedding][:30000-3-len(most_freq_words)]
less_freq_words = [word_tup[0] for word_tup in words_by_freq if word_tup[1] < 2]
print(most_freq_words[0:10])
print(less_freq_words[0:10])
print(len(most_freq_words),len(less_freq_words))
## new addition to this where I store the word embeddings for the vocabulary
# assigning indices for all words, and adding <PAD>,<SENT>,<UNK> symbols
fixed_vocab_word_to_index = {"<PAD>":0,"<SENT>":1,"<UNK>":2} # for words assigned to the fixed_vocabulary
fixed_vocab_index_to_word = {0:"<PAD>",1:"<SENT>",2:"<UNK>"}
word_embeddings = [np.random.uniform(low=-0.05,high=0.05,size=100).astype("float32") for _ in range(3)]
index = 3 # starting index for all words
# assigning indices to most common words:
for word in most_freq_words:
fixed_vocab_word_to_index[word]=index
fixed_vocab_index_to_word[index]=word
index += 1
if word in word_to_embedding: # use pre-trained embedding
word_embeddings.append(word_to_embedding[word])
else: # initialize a trainable embedding
word_embeddings.append(np.random.uniform(low=-0.05,high=0.05,size=100).astype("float32"))
word_embeddings = np.stack(word_embeddings)
print(len(fixed_vocab_word_to_index),word_embeddings.shape)
## saving all of the vocabulary related information
np.save("../data/len_500_data/word_embeddings.npy",word_embeddings)
with open("../data/len_500_data/word_to_index.json","w+") as out_file:
json.dump(fixed_vocab_word_to_index,out_file)
with open("../data/len_500_data/index_to_word.json","w+") as out_file:
json.dump(fixed_vocab_index_to_word,out_file)
num_fixed_words = len(fixed_vocab_word_to_index)
token_cutoff=500 # this is the amount to pad up to for the input representation
# creating the input data representations for the model - input is padded up to a length of 500
x = [] # stores the integer/index representation for all input
x_indices = [] # stores the joint probability vector indices for all words in the input
x_indices_dicts = [] # stores the dicts for assigning words which are not in the fixed_vocabulary
att_mask = [] # stores the attention masks (0 for valid words, -np.inf for padding)
multitask_y = [] # stores labels for the multitask component
multitask_loss_mask = [] # stores loss mask for the multitask component
## data stores for debugging/error analysis
bill_information_dict = {} # stores summary(text),utterances(2d list of utterances),resolution(boolean) for each BID
bids = [] # stores the BIDs in sequential order
for bid in all_bill_info:
# creating representations for data store
bill_information_dict[bid] = {"summary":all_bill_info[bid]["summary"]["text"],"utterances":all_bill_info[bid]["utterances"],"resolution":all_bill_info[bid]["resolution"]}
bids.append(bid)
# getting the multitask data representations
this_multitask_y = all_bill_info[bid]['multitask_y']
this_multitask_loss_mask = all_bill_info[bid]['multitask_loss_mask']
multitask_y.append(this_multitask_y)
multitask_loss_mask.append(this_multitask_loss_mask)
# creating the standard input representation:
utterance_tokens = [token.lower() for token in all_bill_info[bid]["utterance_tokens"]]
x_rep = [] # assigning indices to words, if input word not part of fixed_vocab, assign to <UNK>
for token in utterance_tokens:
if token in fixed_vocab_word_to_index:
x_rep.append(fixed_vocab_word_to_index[token])
else:
x_rep.append(fixed_vocab_word_to_index['<UNK>'])
att_mask_rep = [0 for i in range(len(x_rep))]
amount_to_pad = token_cutoff-len(x_rep)
x_rep += [0 for i in range(amount_to_pad)] # padding the input
att_mask_rep += [-np.inf for i in range(amount_to_pad)]
x.append(x_rep)
att_mask.append(att_mask_rep)
# creating the joint probability representation for the input:
## (the index in joint prob vector that each input word probability should be assigned to)
index=num_fixed_words # start index for assignment to joint_probability vector, length of fixed_vocab_size
non_vocab_dict = {} # stores all OOV words for this bid
this_x_indices = [] # joint prob vector indices for this bid
for token in utterance_tokens:
if token in fixed_vocab_word_to_index:
this_x_indices.append(fixed_vocab_word_to_index[token])
else:
if token in non_vocab_dict: # this word is OOV but has been seen before
this_x_indices.append(non_vocab_dict[token])
else: # this word is OOV and has never been seen before
non_vocab_dict[token]=index
this_x_indices.append(index)
index += 1
x_indices_dicts.append(non_vocab_dict)
this_x_indices += [0 for i in range(amount_to_pad)] # padding will be masked out in att calculation, so padding with 0 here is valid
x_indices.append(this_x_indices)
# this is the largest number of OOV words for a given bid utterances
max([len(dic) for dic in x_indices_dicts])
# creating the output representations for the model - output is padded up to a length of 101
## the last index is for <SENT> to indicate the end of decoding (assuming representation is shorter than 100 tokens)
## assuming the summary is greater than 100 tokens in length, we simply cut off the first 101 tokens
### when we do this cutoff, we do NOT include that <SENT> token as the 102nd token
## all words in output that are not in input utterances or in fixed_vocab_vector are assigned 3:<UNK>
y = [] # stores the index representations for all words in the headlines (this is never used)
loss_mask = [] # 1 for valid words, 0 for padding
decoder_x = [] # starts with 1:<SENT>, followed by y[0:len(headline)-1] (this is the input for teacher-forcing)(101x1)
y_indices = [] # index for the correct decoder prediction, in the joint-probability vector
total_oov_words = 0
resolution_bools = [] # bool, whether a given example is a resolution (False=bill); used for train_test_split
for bid_i,bid in enumerate(all_bill_info.keys()):
# creating standard output representation:
summary_tokens = [token.lower() for token in all_bill_info[bid]["summary_tokens"]]
y_rep = [] # not used in the model, stores indices using only fixed_vocab_vector
for token in summary_tokens:
if token in fixed_vocab_word_to_index:
y_rep.append(fixed_vocab_word_to_index[token])
else:
y_rep.append(fixed_vocab_word_to_index['<UNK>'])
resolution_bools.append(all_bill_info[bid]['resolution'])
## this is a new addition from before, including longer summaries, but just cutting off the text
if len(y_rep) > 100: # simply cutoff to the first 101 tokens
y_rep = y_rep[:101]
else: # append a end-of-sentence indicator
y_rep.append(fixed_vocab_word_to_index['<SENT>'])
loss_mask_rep = [1 for i in range(len(y_rep))]
decoder_x_rep = [1]+y_rep[0:len(y_rep)-1] # embedding word in input but not in fixed_vocab is currently set to <UNK>
amount_to_pad = 101-len(y_rep) # 100+1 represents final <SENT> prediction
y_rep += [0 for i in range(amount_to_pad)]
loss_mask_rep += [0 for i in range(amount_to_pad)] # cancels out loss contribution from padding
decoder_x_rep += [0 for i in range(amount_to_pad)]
# creating joint-probability representation of output:
non_vocab_dict = x_indices_dicts[bid_i]
y_indices_rep = []
for token in summary_tokens:
if token in fixed_vocab_word_to_index: # word is in fixed_vocabulary
y_indices_rep.append(fixed_vocab_word_to_index[token])
elif token in non_vocab_dict: # word is OOV but in the input utterances, use the index assigned to this word in x_indices
y_indices_rep.append(non_vocab_dict[token])
else: # word is OOV and not in input utterances
y_indices_rep.append(fixed_vocab_word_to_index["<UNK>"])
total_oov_words += 1
if len(y_indices_rep) > 100: # simply cutoff to the first 101 tokens
y_indices_rep = y_indices_rep[:101]
else: # if len <= 100, last prediction should be <SENT>
y_indices_rep.append(fixed_vocab_word_to_index['<SENT>'])
y_indices_rep += [0 for i in range(amount_to_pad)] # padding will be ignored by loss_mask
y.append(y_rep)
loss_mask.append(loss_mask_rep)
decoder_x.append(decoder_x_rep)
y_indices.append(y_indices_rep)
x = np.array(x).astype("int32")
x_indices = np.array(x_indices).astype("int32")
att_mask = np.array(att_mask).astype("float32")
loss_mask = np.array(loss_mask).astype("float32")
decoder_x = np.array(decoder_x).astype("int32")
y_indices = np.array(y_indices).astype("int32")
multitask_y = np.array(multitask_y).astype("float32")
multitask_loss_mask = np.array(multitask_loss_mask).astype("float32")
print(x.shape,x_indices.shape,att_mask.shape)
print(loss_mask.shape,decoder_x.shape,y_indices.shape)
print(multitask_y.shape,multitask_loss_mask.shape)
bids = np.array(bids)
print(bids.shape,len(bill_information_dict))
```
#### Shuffling the data so that only bills are in the validation and test sets
```
from sklearn.utils import shuffle
x_resolution = x[resolution_bools]
x_indices_resolution = x_indices[resolution_bools]
att_mask_resolution = att_mask[resolution_bools]
loss_mask_resolution = loss_mask[resolution_bools]
decoder_x_resolution = decoder_x[resolution_bools]
y_indices_resolution = y_indices[resolution_bools]
bids_resolution = bids[resolution_bools]
multitask_y_resolution = multitask_y[resolution_bools]
multitask_loss_mask_resolution = multitask_loss_mask[resolution_bools]
bill_bools = [not res_bool for res_bool in resolution_bools] # reversal
x_bill = x[bill_bools]
x_indices_bill = x_indices[bill_bools]
att_mask_bill = att_mask[bill_bools]
loss_mask_bill = loss_mask[bill_bools]
decoder_x_bill = decoder_x[bill_bools]
y_indices_bill = y_indices[bill_bools]
bids_bill = bids[bill_bools]
multitask_y_bill = multitask_y[bill_bools]
multitask_loss_mask_bill = multitask_loss_mask[bill_bools]
print(x_resolution.shape,loss_mask_resolution.shape,bids_resolution.shape,multitask_y_resolution.shape)
print(x_bill.shape,loss_mask_bill.shape,bids_bill.shape,multitask_y_bill.shape)
# shuffling only the bill data - in order to get the validation and val set data
x_bill,x_indices_bill,att_mask_bill,loss_mask_bill,decoder_x_bill,y_indices_bill,bids_bill,multitask_y_bill,multitask_loss_mask_bill = shuffle(x_bill,x_indices_bill,att_mask_bill,loss_mask_bill,decoder_x_bill,y_indices_bill,bids_bill,multitask_y_bill,multitask_loss_mask_bill,random_state=1)
x_bill_val,x_indices_bill_val,att_mask_bill_val,loss_mask_bill_val,decoder_x_bill_val,y_indices_bill_val,bids_bill_val,multitask_y_bill_val,multitask_loss_mask_bill_val = x_bill[:400],x_indices_bill[:400],att_mask_bill[:400],loss_mask_bill[:400],decoder_x_bill[:400],y_indices_bill[:400],bids_bill[:400],multitask_y_bill[:400],multitask_loss_mask_bill[:400]
x_bill_train,x_indices_bill_train,att_mask_bill_train,loss_mask_bill_train,decoder_x_bill_train,y_indices_bill_train,bids_bill_train,multitask_y_bill_train,multitask_loss_mask_bill_train = x_bill[400:],x_indices_bill[400:],att_mask_bill[400:],loss_mask_bill[400:],decoder_x_bill[400:],y_indices_bill[400:],bids_bill[400:],multitask_y_bill[400:],multitask_loss_mask_bill[400:]
print(x_bill_val.shape,loss_mask_bill_val.shape,bids_bill_val.shape,multitask_y_bill_val.shape)
print(x_bill_train.shape,loss_mask_bill_train.shape,bids_bill_train.shape,multitask_y_bill_train.shape)
## to remove resolutions, simply don't include them here
# shuffling the training set - which is a combination of bill and resolution data
x_train = np.vstack([x_bill_train,x_resolution])
x_indices_train = np.vstack([x_indices_bill_train,x_indices_resolution])
att_mask_train = np.vstack([att_mask_bill_train,att_mask_resolution])
loss_mask_train = np.vstack([loss_mask_bill_train,loss_mask_resolution])
decoder_x_train = np.vstack([decoder_x_bill_train,decoder_x_resolution])
y_indices_train = np.vstack([y_indices_bill_train,y_indices_resolution])
bids_train = np.concatenate([bids_bill_train,bids_resolution])
multitask_y_train = np.vstack([multitask_y_bill_train,multitask_y_resolution])
multitask_loss_mask_train = np.vstack([multitask_loss_mask_bill_train,multitask_loss_mask_resolution])
x_train,x_indices_train,att_mask_train,loss_mask_train,decoder_x_train,y_indices_train,multitask_y_train,multitask_loss_mask_train = shuffle(x_train,x_indices_train,att_mask_train,loss_mask_train,decoder_x_train,y_indices_train,multitask_y_train,multitask_loss_mask_train,random_state=2)
print(x_train.shape,loss_mask_train.shape,bids_train.shape,multitask_y_train.shape)
# adding all the data together, with the final 400 instances being the val and test sets
x_final = np.vstack([x_train,x_bill_val])
x_indices_final = np.vstack([x_indices_train,x_indices_bill_val])
att_mask_final = np.vstack([att_mask_train,att_mask_bill_val])
loss_mask_final = np.vstack([loss_mask_train,loss_mask_bill_val])
decoder_x_final = np.vstack([decoder_x_train,decoder_x_bill_val])
y_indices_final = np.vstack([y_indices_train,y_indices_bill_val])
bids_final = np.concatenate([bids_train,bids_bill_val])
multitask_y_final = np.vstack([multitask_y_train,multitask_y_bill_val])
multitask_loss_mask_final = np.vstack([multitask_loss_mask_train,multitask_loss_mask_bill_val])
print(x_final.shape,loss_mask_final.shape,bids_final.shape,multitask_y_final.shape)
## there is no final shuffling, as the last 400 datapoints represent the validation/test sets
subdir = "len_500_data"
np.save("../data/{}/x_500.npy".format(subdir),x_final)
np.save("../data/{}/x_indices_500.npy".format(subdir),x_indices_final)
np.save("../data/{}/att_mask_500.npy".format(subdir),att_mask_final)
np.save("../data/{}/loss_mask_500.npy".format(subdir),loss_mask_final)
np.save("../data/{}/decoder_x_500.npy".format(subdir),decoder_x_final)
np.save("../data/{}/y_indices_500.npy".format(subdir),y_indices_final)
np.save("../data/{}/bids_500.npy".format(subdir),bids_final)
np.save("../data/{}/multitask_y_500.npy".format(subdir),multitask_y_final)
np.save("../data/{}/multitask_loss_mask_500.npy".format(subdir),multitask_loss_mask_final)
with open("../data/len_500_data/bill_information.json","w+") as out_file:
json.dump(bill_information_dict,out_file)
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# Configuration
_**Setting up your Azure Machine Learning services workspace and configuring your notebook library**_
---
---
## Table of Contents
1. [Introduction](#Introduction)
1. What is an Azure Machine Learning workspace
1. [Setup](#Setup)
1. Azure subscription
1. Azure ML SDK and other library installation
1. Azure Container Instance registration
1. [Configure your Azure ML Workspace](#Configure%20your%20Azure%20ML%20workspace)
1. Workspace parameters
1. Access your workspace
1. Create a new workspace
1. Create compute resources
1. [Next steps](#Next%20steps)
---
## Introduction
This notebook configures your library of notebooks to connect to an Azure Machine Learning (ML) workspace. In this case, a library contains all of the notebooks in the current folder and any nested folders. You can configure this notebook library to use an existing workspace or create a new workspace.
Typically you will need to run this notebook only once per notebook library as all other notebooks will use connection information that is written here. If you want to redirect your notebook library to work with a different workspace, then you should re-run this notebook.
In this notebook you will
* Learn about getting an Azure subscription
* Specify your workspace parameters
* Access or create your workspace
* Add a default compute cluster for your workspace
### What is an Azure Machine Learning workspace
An Azure ML Workspace is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models.
## Setup
This section describes activities required before you can access any Azure ML services functionality.
### 1. Azure Subscription
In order to create an Azure ML Workspace, first you need access to an Azure subscription. An Azure subscription allows you to manage storage, compute, and other assets in the Azure cloud. You can [create a new subscription](https://azure.microsoft.com/en-us/free/) or access existing subscription information from the [Azure portal](https://portal.azure.com). Later in this notebook you will need information such as your subscription ID in order to create and access AML workspaces.
### 2. Azure ML SDK and other library installation
If you are running in your own environment, follow [SDK installation instructions](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment). If you are running in Azure Notebooks or another Microsoft managed environment, the SDK is already installed.
Also install following libraries to your environment. Many of the example notebooks depend on them
```
(myenv) $ conda install -y matplotlib tqdm scikit-learn
```
Once installation is complete, the following cell checks the Azure ML SDK version:
```
import azureml.core
print("This notebook was created using version 1.1.5 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
```
If you are using an older version of the SDK then this notebook was created using, you should upgrade your SDK.
### 3. Azure Container Instance registration
Azure Machine Learning uses of [Azure Container Instance (ACI)](https://azure.microsoft.com/services/container-instances) to deploy dev/test web services. An Azure subscription needs to be registered to use ACI. If you or the subscription owner have not yet registered ACI on your subscription, you will need to use the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and execute the following commands. Note that if you ran through the AML [quickstart](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started) you have already registered ACI.
```shell
# check to see if ACI is already registered
(myenv) $ az provider show -n Microsoft.ContainerInstance -o table
# if ACI is not registered, run this command.
# note you need to be the subscription owner in order to execute this command successfully.
(myenv) $ az provider register -n Microsoft.ContainerInstance
```
---
## Configure your Azure ML workspace
### Workspace parameters
To use an AML Workspace, you will need to import the Azure ML SDK and supply the following information:
* Your subscription id
* A resource group name
* (optional) The region that will host your workspace
* A name for your workspace
You can get your subscription ID from the [Azure portal](https://portal.azure.com).
You will also need access to a [_resource group_](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview#resource-groups), which organizes Azure resources and provides a default region for the resources in a group. You can see what resource groups to which you have access, or create a new one in the [Azure portal](https://portal.azure.com). If you don't have a resource group, the create workspace command will create one for you using the name you provide.
The region to host your workspace will be used if you are creating a new workspace. You do not need to specify this if you are using an existing workspace. You can find the list of supported regions [here](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=machine-learning-service). You should pick a region that is close to your location or that contains your data.
The name for your workspace is unique within the subscription and should be descriptive enough to discern among other AML Workspaces. The subscription may be used only by you, or it may be used by your department or your entire enterprise, so choose a name that makes sense for your situation.
The following cell allows you to specify your workspace parameters. This cell uses the python method `os.getenv` to read values from environment variables which is useful for automation. If no environment variable exists, the parameters will be set to the specified default values.
If you ran the Azure Machine Learning [quickstart](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started) in Azure Notebooks, you already have a configured workspace! You can go to your Azure Machine Learning Getting Started library, view *config.json* file, and copy-paste the values for subscription ID, resource group and workspace name below.
Replace the default values in the cell below with your workspace parameters
```
import os
subscription_id = os.getenv("SUBSCRIPTION_ID", default="<my-subscription-id>")
resource_group = os.getenv("RESOURCE_GROUP", default="<my-resource-group>")
workspace_name = os.getenv("WORKSPACE_NAME", default="<my-workspace-name>")
workspace_region = os.getenv("WORKSPACE_REGION", default="eastus2")
```
### Access your workspace
The following cell uses the Azure ML SDK to attempt to load the workspace specified by your parameters. If this cell succeeds, your notebook library will be configured to access the workspace from all notebooks using the `Workspace.from_config()` method. The cell can fail if the specified workspace doesn't exist or you don't have permissions to access it.
```
from azureml.core import Workspace
try:
ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)
# write the details of the workspace to a configuration file to the notebook library
ws.write_config()
print("Workspace configuration succeeded. Skip the workspace creation steps below")
except:
print("Workspace not accessible. Change your parameters or create a new workspace below")
```
### Create a new workspace
If you don't have an existing workspace and are the owner of the subscription or resource group, you can create a new workspace. If you don't have a resource group, the create workspace command will create one for you using the name you provide.
**Note**: As with other Azure services, there are limits on certain resources (for example AmlCompute quota) associated with the Azure ML service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.
This cell will create an Azure ML workspace for you in a subscription provided you have the correct permissions.
This will fail if:
* You do not have permission to create a workspace in the resource group
* You do not have permission to create a resource group if it's non-existing.
* You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription
If workspace creation fails, please work with your IT admin to provide you with the appropriate permissions or to provision the required resources.
**Note**: A Basic workspace is created by default. If you would like to create an Enterprise workspace, please specify sku = 'enterprise'.
Please visit our [pricing page](https://azure.microsoft.com/en-us/pricing/details/machine-learning/) for more details on our Enterprise edition.
```
from azureml.core import Workspace
# Create the workspace using the specified parameters
ws = Workspace.create(name = workspace_name,
subscription_id = subscription_id,
resource_group = resource_group,
location = workspace_region,
create_resource_group = True,
sku = 'basic',
exist_ok = True)
ws.get_details()
# write the details of the workspace to a configuration file to the notebook library
ws.write_config()
```
### Create compute resources for your training experiments
Many of the sample notebooks use Azure ML managed compute (AmlCompute) to train models using a dynamically scalable pool of compute. In this section you will create default compute clusters for use by the other notebooks and any other operations you choose.
To create a cluster, you need to specify a compute configuration that specifies the type of machine to be used and the scalability behaviors. Then you choose a name for the cluster that is unique within the workspace that can be used to address the cluster later.
The cluster parameters are:
* vm_size - this describes the virtual machine type and size used in the cluster. All machines in the cluster are the same type. You can get the list of vm sizes available in your region by using the CLI command
```shell
az vm list-skus -o tsv
```
* min_nodes - this sets the minimum size of the cluster. If you set the minimum to 0 the cluster will shut down all nodes while not in use. Setting this number to a value higher than 0 will allow for faster start-up times, but you will also be billed when the cluster is not in use.
* max_nodes - this sets the maximum size of the cluster. Setting this to a larger number allows for more concurrency and a greater distributed processing of scale-out jobs.
To create a **CPU** cluster now, run the cell below. The autoscale settings mean that the cluster will scale down to 0 nodes when inactive and up to 4 nodes when busy.
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
cpu_cluster_name = "cpu-cluster"
# Verify that cluster does not exist already
try:
cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)
print("Found existing cpu-cluster")
except ComputeTargetException:
print("Creating new cpu-cluster")
# Specify the configuration for the new cluster
compute_config = AmlCompute.provisioning_configuration(vm_size="STANDARD_D2_V2",
min_nodes=0,
max_nodes=4)
# Create the cluster with the specified name and configuration
cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)
# Wait for the cluster to complete, show the output log
cpu_cluster.wait_for_completion(show_output=True)
```
To create a **GPU** cluster, run the cell below. Note that your subscription must have sufficient quota for GPU VMs or the command will fail. To increase quota, see [these instructions](https://docs.microsoft.com/en-us/azure/azure-supportability/resource-manager-core-quotas-request).
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your GPU cluster
gpu_cluster_name = "gpu-cluster"
# Verify that cluster does not exist already
try:
gpu_cluster = ComputeTarget(workspace=ws, name=gpu_cluster_name)
print("Found existing gpu cluster")
except ComputeTargetException:
print("Creating new gpu-cluster")
# Specify the configuration for the new cluster
compute_config = AmlCompute.provisioning_configuration(vm_size="STANDARD_NC6",
min_nodes=0,
max_nodes=4)
# Create the cluster with the specified name and configuration
gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, compute_config)
# Wait for the cluster to complete, show the output log
gpu_cluster.wait_for_completion(show_output=True)
```
---
## Next steps
In this notebook you configured this notebook library to connect easily to an Azure ML workspace. You can copy this notebook to your own libraries to connect them to you workspace, or use it to bootstrap new workspaces completely.
If you came here from another notebook, you can return there and complete that exercise, or you can try out the [Tutorials](./tutorials) or jump into "how-to" notebooks and start creating and deploying models. A good place to start is the [train within notebook](./how-to-use-azureml/training/train-within-notebook) example that walks through a simplified but complete end to end machine learning process.
| github_jupyter |
# End-to-End Incremental Training Image Classification Example
1. [Introduction](#Introduction)
2. [Prerequisites and Preprocessing](#Prequisites-and-Preprocessing)
1. [Permissions and environment variables](#Permissions-and-environment-variables)
2. [Prepare the data](#Prepare-the-data)
3. [Training the model](#Training-the-model)
1. [Training parameters](#Training-parameters)
2. [Start the training](#Start-the-training)
4. [Inference](#Inference)
## Introduction
Welcome to our end-to-end example of incremental training using Amazon Sagemaker image classification algorithm. In this demo, we will use the Amazon sagemaker image classification algorithm to train on the [caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). First, we will run the training for few epochs. Then, we will use the generated model in the previous training to start another training to improve accuracy further without re-training again.
To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on.
## Prequisites and Preprocessing
### Permissions and environment variables
Here we set up the linkage and authentication to AWS services. There are three parts to this:
* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook
* The S3 bucket that you want to use for training and model data
* The Amazon sagemaker image classification docker image which need not be changed
```
%%time
import sagemaker
from sagemaker import get_execution_role
role = get_execution_role()
print(role)
sess = sagemaker.Session()
bucket = sess.default_bucket()
prefix = "ic-fulltraining"
from sagemaker import image_uris
training_image = image_uris.retrieve(region=sess.boto_region_name, framework="image-classification")
```
### Data preparation
Download the data and transfer to S3 for use in training. In this demo, we are using [Caltech-256](http://www.vision.caltech.edu/Image_Datasets/Caltech256/) dataset, which contains 30608 images of 256 objects. For the training and validation data, we follow the splitting scheme in this MXNet [example](https://github.com/apache/incubator-mxnet/blob/master/example/image-classification/data/caltech256.sh). In particular, it randomly selects 60 images per class for training, and uses the remaining data for validation. The algorithm takes `RecordIO` file as input. The user can also provide the image files as input, which will be converted into `RecordIO` format using MXNet's [im2rec](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) tool. It takes around 50 seconds to converted the entire Caltech-256 dataset (~1.2GB) on a p2.xlarge instance. However, for this demo, we will use record io format.
```
import boto3
s3_client = boto3.client("s3")
def upload_to_s3(channel, file):
s3 = boto3.resource("s3")
data = open(file, "rb")
key = channel + "/" + file
s3.Bucket(bucket).put_object(Key=key, Body=data)
# caltech-256
s3_client.download_file(
"sagemaker-sample-files",
"datasets/image/caltech-256/caltech-256-60-train.rec",
"caltech-256-60-train.rec",
)
s3_client.download_file(
"sagemaker-sample-files",
"datasets/image/caltech-256/caltech-256-60-val.rec",
"caltech-256-60-val.rec",
)
# Two channels: train, validation
s3train = "s3://{}/{}/train/".format(bucket, prefix)
s3validation = "s3://{}/{}/validation/".format(bucket, prefix)
# upload the rec files to train and validation channels
!aws s3 cp caltech-256-60-train.rec $s3train --quiet
!aws s3 cp caltech-256-60-val.rec $s3validation --quiet
```
Once we have the data available in the correct format for training, the next step is to actually train the model using the data. After setting training parameters, we kick off training, and poll for status until training is completed.
## Training the model
Now that we are done with all the setup that is needed, we are ready to train our object detector. To begin, let us create a ``sageMaker.estimator.Estimator`` object. This estimator will launch the training job.
### Training parameters
There are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:
* **Training instance count**: This is the number of instances on which to run the training. When the number of instances is greater than one, then the image classification algorithm will run in distributed settings.
* **Training instance type**: This indicates the type of machine on which to run the training. Typically, we use GPU instances for these training
* **Output path**: This the s3 folder in which the training output is stored
```
s3_output_location = "s3://{}/{}/output".format(bucket, prefix)
ic = sagemaker.estimator.Estimator(
training_image,
role,
train_instance_count=1,
train_instance_type="ml.p2.xlarge",
train_volume_size=50,
train_max_run=360000,
input_mode="File",
output_path=s3_output_location,
sagemaker_session=sess,
)
```
Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:
* **num_layers**: The number of layers (depth) for the network. We use 18 in this samples but other values such as 50, 152 can be used.
* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.
* **num_classes**: This is the number of output classes for the new dataset. Imagenet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For caltech, we use 257 because it has 256 object categories + 1 clutter class.
* **num_training_samples**: This is the total number of training samples. It is set to 15240 for caltech dataset with the current split.
* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.
* **epochs**: Number of training epochs.
* **learning_rate**: Learning rate for training.
* **top_k**: Report the top-k accuracy during training.
```
ic.set_hyperparameters(
num_layers=18,
image_shape="3,224,224",
num_classes=257,
num_training_samples=15420,
mini_batch_size=256,
epochs=10,
learning_rate=0.1,
top_k=2,
)
```
## Input data specification
Set the data type and channels used for training
```
train_data = sagemaker.session.s3_input(
s3train,
distribution="FullyReplicated",
content_type="application/x-recordio",
s3_data_type="S3Prefix",
)
validation_data = sagemaker.session.s3_input(
s3validation,
distribution="FullyReplicated",
content_type="application/x-recordio",
s3_data_type="S3Prefix",
)
data_channels = {"train": train_data, "validation": validation_data}
```
## Start the training
Start training by calling the fit method in the estimator
```
ic.fit(inputs=data_channels, logs=True)
```
## Prepare for incremental training
Now, we will use the model generated in the previous training to start another training with the same dataset. This new training will start with higher accuracy as it uses the model generated in the previous training.
```
# Print the location of the model data from previous training
print(ic.model_data)
# Prepare model channel in addition to train and validation
model_data = sagemaker.session.s3_input(
ic.model_data,
distribution="FullyReplicated",
s3_data_type="S3Prefix",
content_type="application/x-sagemaker-model",
)
data_channels = {"train": train_data, "validation": validation_data, "model": model_data}
```
## Start another training
We use the same hyperparameters as before. When the model channel is present, the use_pretrained_model parameter is ignored. The number of classes, input image shape and number of layers should be the same as the previous training since we are starting with the same model. Other parameters, such as learning_rate, mini_batch_size, etc., can be varied.
```
incr_ic = sagemaker.estimator.Estimator(
training_image,
role,
train_instance_count=1,
train_instance_type="ml.p2.xlarge",
train_volume_size=50,
train_max_run=360000,
input_mode="File",
output_path=s3_output_location,
sagemaker_session=sess,
)
incr_ic.set_hyperparameters(
num_layers=18,
image_shape="3,224,224",
num_classes=257,
num_training_samples=15420,
mini_batch_size=128,
epochs=2,
learning_rate=0.01,
top_k=2,
)
incr_ic.fit(inputs=data_channels, logs=True)
```
As you can see from the logs, the training starts with the previous model and hence the accuracy for the first epoch itself is higher.
# Inference
***
We can now use the trained model to perform inference. You can deploy the created model by using the deploy method in the estimator
```
ic_classifier = incr_ic.deploy(initial_instance_count=1, instance_type="ml.m4.xlarge")
```
### Download test image
```
file_name = "/tmp/test.jpg"
s3_client.download_file(
"sagemaker-sample-files",
"datasets/image/caltech-256/256_ObjectCategories/008.bathtub/008_0007.jpg",
file_name,
)
# test image
from IPython.display import Image
Image(file_name)
```
### Evaluation
Evaluate the image through the network for inteference. The network outputs class probabilities and typically, one selects the class with the maximum probability as the final class output.
```
import json
import numpy as np
from sagemaker.serializers import IdentitySerializer
with open(file_name, "rb") as f:
payload = f.read()
ic_classifier.serializer = IdentitySerializer("image/jpeg")
result = json.loads(ic_classifier.predict(payload))
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = [
"ak47",
"american-flag",
"backpack",
"baseball-bat",
"baseball-glove",
"basketball-hoop",
"bat",
"bathtub",
"bear",
"beer-mug",
"billiards",
"binoculars",
"birdbath",
"blimp",
"bonsai-101",
"boom-box",
"bowling-ball",
"bowling-pin",
"boxing-glove",
"brain-101",
"breadmaker",
"buddha-101",
"bulldozer",
"butterfly",
"cactus",
"cake",
"calculator",
"camel",
"cannon",
"canoe",
"car-tire",
"cartman",
"cd",
"centipede",
"cereal-box",
"chandelier-101",
"chess-board",
"chimp",
"chopsticks",
"cockroach",
"coffee-mug",
"coffin",
"coin",
"comet",
"computer-keyboard",
"computer-monitor",
"computer-mouse",
"conch",
"cormorant",
"covered-wagon",
"cowboy-hat",
"crab-101",
"desk-globe",
"diamond-ring",
"dice",
"dog",
"dolphin-101",
"doorknob",
"drinking-straw",
"duck",
"dumb-bell",
"eiffel-tower",
"electric-guitar-101",
"elephant-101",
"elk",
"ewer-101",
"eyeglasses",
"fern",
"fighter-jet",
"fire-extinguisher",
"fire-hydrant",
"fire-truck",
"fireworks",
"flashlight",
"floppy-disk",
"football-helmet",
"french-horn",
"fried-egg",
"frisbee",
"frog",
"frying-pan",
"galaxy",
"gas-pump",
"giraffe",
"goat",
"golden-gate-bridge",
"goldfish",
"golf-ball",
"goose",
"gorilla",
"grand-piano-101",
"grapes",
"grasshopper",
"guitar-pick",
"hamburger",
"hammock",
"harmonica",
"harp",
"harpsichord",
"hawksbill-101",
"head-phones",
"helicopter-101",
"hibiscus",
"homer-simpson",
"horse",
"horseshoe-crab",
"hot-air-balloon",
"hot-dog",
"hot-tub",
"hourglass",
"house-fly",
"human-skeleton",
"hummingbird",
"ibis-101",
"ice-cream-cone",
"iguana",
"ipod",
"iris",
"jesus-christ",
"joy-stick",
"kangaroo-101",
"kayak",
"ketch-101",
"killer-whale",
"knife",
"ladder",
"laptop-101",
"lathe",
"leopards-101",
"license-plate",
"lightbulb",
"light-house",
"lightning",
"llama-101",
"mailbox",
"mandolin",
"mars",
"mattress",
"megaphone",
"menorah-101",
"microscope",
"microwave",
"minaret",
"minotaur",
"motorbikes-101",
"mountain-bike",
"mushroom",
"mussels",
"necktie",
"octopus",
"ostrich",
"owl",
"palm-pilot",
"palm-tree",
"paperclip",
"paper-shredder",
"pci-card",
"penguin",
"people",
"pez-dispenser",
"photocopier",
"picnic-table",
"playing-card",
"porcupine",
"pram",
"praying-mantis",
"pyramid",
"raccoon",
"radio-telescope",
"rainbow",
"refrigerator",
"revolver-101",
"rifle",
"rotary-phone",
"roulette-wheel",
"saddle",
"saturn",
"school-bus",
"scorpion-101",
"screwdriver",
"segway",
"self-propelled-lawn-mower",
"sextant",
"sheet-music",
"skateboard",
"skunk",
"skyscraper",
"smokestack",
"snail",
"snake",
"sneaker",
"snowmobile",
"soccer-ball",
"socks",
"soda-can",
"spaghetti",
"speed-boat",
"spider",
"spoon",
"stained-glass",
"starfish-101",
"steering-wheel",
"stirrups",
"sunflower-101",
"superman",
"sushi",
"swan",
"swiss-army-knife",
"sword",
"syringe",
"tambourine",
"teapot",
"teddy-bear",
"teepee",
"telephone-box",
"tennis-ball",
"tennis-court",
"tennis-racket",
"theodolite",
"toaster",
"tomato",
"tombstone",
"top-hat",
"touring-bike",
"tower-pisa",
"traffic-light",
"treadmill",
"triceratops",
"tricycle",
"trilobite-101",
"tripod",
"t-shirt",
"tuning-fork",
"tweezer",
"umbrella-101",
"unicorn",
"vcr",
"video-projector",
"washing-machine",
"watch-101",
"waterfall",
"watermelon",
"welding-mask",
"wheelbarrow",
"windmill",
"wine-bottle",
"xylophone",
"yarmulke",
"yo-yo",
"zebra",
"airplanes-101",
"car-side-101",
"faces-easy-101",
"greyhound",
"tennis-shoes",
"toad",
"clutter",
]
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
```
### Clean up
When we're done with the endpoint, we can just delete it and the backing instances will be released. Uncomment and run the following cell to delete the endpoint and model
```
ic_classifier.delete_endpoint()
```
| github_jupyter |
# Variable transformers : YeoJohnsonTransformer
The YeoJohnsonTransformer() applies the Yeo-Johnson transformation to the
numerical variables.
**For this demonstration, we use the Ames House Prices dataset produced by Professor Dean De Cock:**
Dean De Cock (2011) Ames, Iowa: Alternative to the Boston Housing
Data as an End of Semester Regression Project, Journal of Statistics Education, Vol.19, No. 3
http://jse.amstat.org/v19n3/decock.pdf
https://www.tandfonline.com/doi/abs/10.1080/10691898.2011.11889627
The version of the dataset used in this notebook can be obtained from [Kaggle](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data)
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from feature_engine.imputation import ArbitraryNumberImputer
from feature_engine.transformation import YeoJohnsonTransformer
# load data
data = pd.read_csv('houseprice.csv')
data.head()
# let's separate into training and testing set
X_train, X_test, y_train, y_test = train_test_split(
data.drop(['Id', 'SalePrice'], axis=1), data['SalePrice'], test_size=0.3, random_state=0)
X_train.shape, X_test.shape
# initialize transformer to transform 2 variables
yjt = YeoJohnsonTransformer(variables = ['LotArea', 'GrLivArea'])
# find otpimal lambdas for the transformation
yjt.fit(X_train)
# these are the lambdas for the YeoJohnson transformation
yjt.lambda_dict_
# transform variables
train_t = yjt.transform(X_train)
test_t = yjt.transform(X_test)
# variable before transformation
X_train['GrLivArea'].hist(bins=50)
plt.title('Variable before transformation')
plt.xlabel('GrLivArea')
# transformed variable
train_t['GrLivArea'].hist(bins=50)
plt.title('Transformed variable')
plt.xlabel('GrLivArea')
# tvariable before transformation
X_train['LotArea'].hist(bins=50)
plt.title('Variable before transformation')
plt.xlabel('LotArea')
# transformed variable
train_t['LotArea'].hist(bins=50)
plt.title('Variable before transformation')
plt.xlabel('LotArea')
```
## Automatically select numerical variables
Before using YeoJohnsonTransformer we need to ensure that numerical variables do not have missing data.
```
# impute missing data
arbitrary_imputer = ArbitraryNumberImputer(arbitrary_number=2)
arbitrary_imputer.fit(X_train)
train_t = arbitrary_imputer.transform(X_train)
test_t = arbitrary_imputer.transform(X_test)
# intializing transformer to transform all variables
yjt = YeoJohnsonTransformer()
yjt.fit(train_t)
```
Note, the run time error is because we are trying to transform integers.
```
# variables that will be transformed
# (these are the numerical variables in the dataset)
yjt.variables
# these are the parameters for YeoJohnsonTransformer
yjt.lambda_dict_
# transform variables
train_t = yjt.transform(train_t)
test_t = yjt.transform(test_t)
```
| github_jupyter |
## Preprocessing Tabular Data
The purpose of this notebook is to demonstrate how to preprocess tabular data for training a machine learning model via Amazon SageMaker. In this notebook we focus on preprocessing our tabular data and in a sequel notebook, [training_model_on_tabular_data.ipynb](training_model_on_tabular_data.ipynb) we use our preprocessed tabular data to train a machine learning model. We showcase how to preprocess 3 different tabular data sets.
#### Notes
In this notebook, we use the sklearn framework for data partitionining and storemagic to share dataframes in [training_model_on_tabular_data.ipynb](training_model_on_tabular_data.ipynb). While we load data into memory here we do note that is it possible to skip this and load your partitioned data directly to an S3 bucket.
#### Tabular Data Sets
* [boston house data](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html)
* [california house data](https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html)
* [diabetes data ](https://www4.stat.ncsu.edu/~boos/var.select/diabetes.html)
#### Library Dependencies:
* sagemaker >= 2.0.0
* numpy
* pandas
* plotly
* sklearn
* matplotlib
* seaborn
### Setting up the notebook
```
import os
import sys
import subprocess
import pkg_resources
def get_sagemaker_version():
"Return the version of 'sagemaker' in your kernel or -1 if 'sagemaker' is not installed"
for i in pkg_resources.working_set:
if i.key == "sagemaker":
return "%s==%s" % (i.key, i.version)
return -1
# Store original 'sagemaker' version
sagemaker_version = get_sagemaker_version()
# Install any missing dependencies
!{sys.executable} -m pip install -qU 'plotly' 'sagemaker>=2.0.0'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import *
import sklearn.model_selection
# SageMaker dependencies
import sagemaker
from sagemaker import get_execution_role
from sagemaker.image_uris import retrieve
# This instantiates a SageMaker session that we will be operating in.
session = sagemaker.Session()
# This object represents the IAM role that we are assigned.
role = sagemaker.get_execution_role()
print(role)
```
### Step 1: Select and Download Data
Here you can select the tabular data set of your choice to preprocess.
```
data_sets = {'diabetes': 'load_diabetes()', 'california': 'fetch_california_housing()', 'boston' : 'load_boston()'}
```
To do select a particular dataset, assign **choosen_data_set** below to be one of 'diabetes', 'california', or 'boston' where each name corresponds to the it's respective dataset.
* 'boston' : [boston house data](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html)
* 'california' : [california house data](https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html)
* 'diabetes' : [diabetes data ](https://www4.stat.ncsu.edu/~boos/var.select/diabetes.html)
```
# Change choosen_data_set variable to one of the data sets above.
choosen_data_set = 'california'
assert choosen_data_set in data_sets.keys()
print("I selected the '{}' dataset!".format(choosen_data_set))
```
### Step 2: Describe Feature Information
Here you can select the tabular data set of your choice to preprocess.
```
data_set = eval(data_sets[choosen_data_set])
X = pd.DataFrame(data_set.data, columns=data_set.feature_names)
Y = pd.DataFrame(data_set.target)
print("Features:", list(X.columns))
print("Dataset shape:", X.shape)
print("Dataset Type:", type(X))
print("Label set shape:", Y.shape)
print("Label set Type:", type(X))
```
#### We describe both our training data inputs X and outputs Y by computing the count, mean, std, min, percentiles.
```
display(X.describe())
display(Y.describe())
```
### Step 3: Plot on Feature Correlation
Here we show a heatmap and clustergrid across all our features. These visualizations help us analyze correlated features and are particularly important if we want to remove redundant features. The heatmap computes a similarity score across each feature and colors like features using this score. The clustergrid is similar, however it presents feature correlations hierarchically.
**Note**: For the purposes of this notebook we do not remove any features but by gathering the findings from these plots one may choose to and can do so at this point.
```
plt.figure(figsize=(14,12))
cor = X.corr()
sns.heatmap(cor, annot=True, cmap=sns.diverging_palette(20, 220, n=200))
plt.show()
cluster_map = sns.clustermap(cor, cmap =sns.diverging_palette(20, 220, n=200), linewidths = 0.1);
plt.setp(cluster_map.ax_heatmap.yaxis.get_majorticklabels(), rotation = 0)
cluster_map
```
### Step 4: Partition Dataset into Train, Test, Validation Splits
Here using the sklearn framework we partition our selected dataset into Train, Test and Validation splits. We choose a partition size of 1/3 and then further split the training set into 2/3 training and 1/3 validation set.
```
# We partition the dataset into 2/3 training and 1/3 test set.
X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X, Y, test_size=0.33)
# We further split the training set into a validation set i.e., 2/3 training set, and 1/3 validation set
X_train, X_val, Y_train, Y_val = sklearn.model_selection.train_test_split(X_train, Y_train, test_size=0.33)
```
### Step 5: Store Variables using storemagic
We use storemagic to persist all relevant variables so they can be reused in our sequel notebook, [training_model_on_tabular_data.ipynb](training_model_on_tabular_data.ipynb).
Alternatively, it is possible to upload your partitioned data to an S3 bucket and point to it during the model training phase. We note that this is beyond the scope of this notebook hence why we omit it.
```
# Using storemagic we persist the variables below so we can access them in the training_model_on_tabular_data.ipynb
%store X_train
%store X_test
%store X_val
%store Y_train
%store Y_test
%store Y_val
%store choosen_data_set
%store sagemaker_version
```
| github_jupyter |
```
from keras.datasets import fashion_mnist
(train_X,train_Y), (test_X,test_Y) = fashion_mnist.load_data()
import numpy as np
from keras.utils import to_categorical
import matplotlib.pyplot as plt
%matplotlib inline
print('Training data shape: ', train_X.shape, train_Y.shape)
print('Testing data shape: ', test_X.shape, test_Y.shape)
classes = np.unique(train_Y)
nclasses = len(classes)
print('Total number of outputs: ', nclasses)
print('Outputnclasses: ', classes)
plt.figure(figsize=[5,5])
# Display the first image in training data
plt.subplot(121)
plt.imshow(train_X[0,:,:], cmap='gray')
plt.title("Ground Truth : {}".format(train_Y[0]))
# Display the first image in testing data
plt.subplot(122)
plt.imshow(test_X[0,:,:], cmap='gray')
plt.title("Ground Truth : {}".format(test_Y[0]))
tr_x = train_X
ts_x = test_X
tr_y = train_Y
ts_y = test_Y
train_X = train_X.reshape(-1, 28, 28, 1)
test_X = test_X.reshape(-1, 28, 28, 1)
train_X.shape, test_X.shape
train_X = train_X.astype('float32')
test_X = test_X.astype('float32')
train_X = train_X / 255
test_X = test_X / 255
train_Y_one_hot = to_categorical(train_Y)
test_Y_one_hot = to_categorical(test_Y)
print('Original label:', train_Y[0])
print('After conversion to one-hot', train_Y_one_hot[0])
from sklearn.model_selection import train_test_split
train_X, valid_X, train_label, valid_label = train_test_split(train_X, train_Y_one_hot, test_size = 0.2, random_state = 13)
train_X.shape, valid_X.shape, train_label.shape, valid_label.shape
import keras
from keras.models import Sequential, Input, Model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
batch_size = 64
epochs = 20
num_classes = 10
fashion_model = Sequential()
fashion_model.add(Conv2D(32, kernel_size = (3, 3), activation = 'linear', input_shape=(28,28,1), padding = 'same'))
fashion_model.add(LeakyReLU(alpha = 0.1))
fashion_model.add(MaxPooling2D((2,2), padding = 'same'))
fashion_model.add(Conv2D(64, (3, 3), activation = 'linear', padding = 'same'))
fashion_model.add(LeakyReLU(alpha = 0.1))
fashion_model.add(MaxPooling2D(pool_size = (2,2), padding = 'same'))
fashion_model.add(Conv2D(128, (3, 3), activation = 'linear', padding = 'same'))
fashion_model.add(LeakyReLU(alpha = 0.1))
fashion_model.add(MaxPooling2D(pool_size=(2, 2), padding = 'same'))
fashion_model.add(Flatten())
fashion_model.add(Dense(128, activation = 'linear'))
fashion_model.add(LeakyReLU(alpha = 0.1))
fashion_model.add(Dense(num_classes, activation = 'softmax'))
fashion_model.compile(loss = keras.losses.categorical_crossentropy, optimizer = keras.optimizers.Adam(), metrics = ['accuracy'])
fashion_model.summary()
fashion_train = fashion_model.fit(train_X, train_label, batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(valid_X, valid_label))
test_eval = fashion_model.evaluate(test_X, test_Y_one_hot, verbose = 0)
print('Test loss: ', test_eval[0])
print('Test accuracy: ', test_eval[1])
accuracy = fashion_train.history['accuracy']
val_accuracy = fashion_train.history['val_accuracy']
loss = fashion_train.history['loss']
val_loss = fashion_train.history['val_loss']
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, 'bo', label = 'Training accuracy')
plt.plot(epochs, val_accuracy, 'b', label = 'Validation accuracy')
plt.title('Trainimg and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label = 'Training loss')
plt.plot(epochs, val_loss, 'b', label = 'Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
batch_size = 64
epochs = 20
num_classes = 10
fashion_model = Sequential()
fashion_model.add(Conv2D(32, kernel_size = (3, 3), activation = 'linear', input_shape=(28,28,1), padding = 'same'))
fashion_model.add(LeakyReLU(alpha = 0.1))
fashion_model.add(MaxPooling2D((2,2), padding = 'same'))
fashion_model.add(Dropout(0.25))
fashion_model.add(Conv2D(64, (3, 3), activation = 'linear', padding = 'same'))
fashion_model.add(LeakyReLU(alpha = 0.1))
fashion_model.add(MaxPooling2D(pool_size = (2,2), padding = 'same'))
fashion_model.add(Dropout(0.25))
fashion_model.add(Conv2D(128, (3, 3), activation = 'linear', padding = 'same'))
fashion_model.add(LeakyReLU(alpha = 0.1))
fashion_model.add(MaxPooling2D(pool_size=(2, 2), padding = 'same'))
fashion_model.add(Dropout(0.4))
fashion_model.add(Flatten())
fashion_model.add(Dense(128, activation = 'linear'))
fashion_model.add(LeakyReLU(alpha = 0.1))
fashion_model.add(Dropout(0.3))
fashion_model.add(Dense(num_classes, activation = 'softmax'))
fashion_model.summary()
fashion_model.compile(loss = keras.losses.categorical_crossentropy, optimizer = keras.optimizers.Adam(), metrics = ['accuracy'])
fashion_train_dropout = fashion_model.fit(train_X, train_label, batch_size = batch_size, epochs = epochs, verbose = 1, validation_data = (valid_X, valid_label))
fashion_model.save("fashion_model_dropout.h5py")
test_eval = fashion_model.evaluate(test_X, test_Y_one_hot, verbose = 1)
print('Test loss: ', test_eval[0])
print('Test accuracy: ', test_eval[1])
accuracy = fashion_train_dropout.history['accuracy']
val_accuracy = fashion_train_dropout.history['val_accuracy']
loss = fashion_train_dropout.history['loss']
val_loss = fashion_train_dropout.history['val_loss']
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, 'bo', label='Training accuracy')
plt.plot(epochs, val_accuracy, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
predictes_classes = fashion_model.predict(test_X)
predicted_classes = np.argmax(np.round(predictes_classes), axis = 1)
predicted_classes.shape, test_Y.shape
correct = np.where(predicted_classes == test_Y)[0]
# print("Found ", len(correct), " correct labels")
print ("Found %d correct labels" % len(correct))
for i, correct in enumerate(correct[:9]):
plt.subplot(3, 3, i + 1)
plt.imshow(test_X[correct].reshape(28, 28), cmap = 'gray', interpolation = 'none')
plt.title("Predicted {}, Class {}".format(predicted_classes[correct], test_Y[correct]))
plt.tight_layout()
incorrect = np.where(predicted_classes!=test_Y)[0]
print ("Found %d incorrect labels" % len(incorrect))
for i, incorrect in enumerate(incorrect[:9]):
plt.subplot(3,3,i+1)
plt.imshow(test_X[incorrect].reshape(28,28), cmap='gray', interpolation='none')
plt.title("Predicted {}, Class {}".format(predicted_classes[incorrect], test_Y[incorrect]))
plt.tight_layout()
from sklearn.metrics import classification_report
target_names = ["Class {}".format(i) for i in range(num_classes)]
print(classification_report(test_Y, predicted_classes, target_names=target_names))
```
| github_jupyter |
# All
## Set Up
```
print("Installing dependencies...")
%tensorflow_version 2.x
!pip install -q t5
import functools
import os
import time
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
import t5
```
## Set UP TPU Runtime
```
ON_CLOUD = True
if ON_CLOUD:
print("Setting up GCS access...")
import tensorflow_gcs_config
from google.colab import auth
# Set credentials for GCS reading/writing from Colab and TPU.
TPU_TOPOLOGY = "v3-8"
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU zdetection
TPU_ADDRESS = tpu.get_master()
print('Running on TPU:', TPU_ADDRESS)
except ValueError:
raise BaseException('ERROR: Not connected to a TPU runtime; please see the previous cell in this notebook for instructions!')
auth.authenticate_user()
tf.config.experimental_connect_to_host(TPU_ADDRESS)
tensorflow_gcs_config.configure_gcs_from_colab_auth()
tf.disable_v2_behavior()
# Improve logging.
from contextlib import contextmanager
import logging as py_logging
if ON_CLOUD:
tf.get_logger().propagate = False
py_logging.root.setLevel('INFO')
@contextmanager
def tf_verbosity_level(level):
og_level = tf.logging.get_verbosity()
tf.logging.set_verbosity(level)
yield
tf.logging.set_verbosity(og_level)
```
## 5b
```
def dumping_dataset(split, shuffle_files = False):
del shuffle_files
if split == 'train':
ds = tf.data.TextLineDataset(
[
'gs://scifive/finetune/bioasq5b/bioasq_5b_train_1.tsv',
]
)
else:
ds = tf.data.TextLineDataset(
[
'gs://scifive/finetune/bio_data/bioasq5b/bioasq_5b_test.tsv',
]
)
# Split each "<t1>\t<t2>" example into (input), target) tuple.
ds = ds.map(
functools.partial(tf.io.decode_csv, record_defaults=["", ""],
field_delim="\t", use_quote_delim=False),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Map each tuple to a {"input": ... "target": ...} dict.
ds = ds.map(lambda *ex: dict(zip(["input", "target"], ex)))
return ds
print("A few raw validation examples...")
for ex in tfds.as_numpy(dumping_dataset("train").take(5)):
print(ex)
def ner_preprocessor(ds):
def normalize_text(text):
return text
def to_inputs_and_targets(ex):
"""Map {"inputs": ..., "targets": ...}->{"inputs": ner..., "targets": ...}."""
return {
"inputs":
tf.strings.join(
["bioasq5b: ", normalize_text(ex["input"])]),
"targets": normalize_text(ex["target"])
}
return ds.map(to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
t5.data.TaskRegistry.remove('bioasq5b')
t5.data.TaskRegistry.add(
"bioasq5b",
# Supply a function which returns a tf.data.Dataset.
dataset_fn=dumping_dataset,
splits=["train", "validation"],
# Supply a function which preprocesses text from the tf.data.Dataset.
text_preprocessor=[ner_preprocessor],
# Lowercase targets before computing metrics.
postprocess_fn=t5.data.postprocessors.lower_text,
# We'll use accuracy as our evaluation metric.
metric_fns=[t5.evaluation.metrics.accuracy,
t5.evaluation.metrics.sequence_accuracy,
],
# output_features=t5.data.Feature(vocabulary=t5.data.SentencePieceVocabulary(vocab))
)
nq_task = t5.data.TaskRegistry.get("bioasq5b")
ds = nq_task.get_dataset(split="train", sequence_length={"inputs": 128, "targets": 128})
print("A few preprocessed validation examples...")
for ex in tfds.as_numpy(ds.take(5)):
print(ex)
```
## Dataset Mixture
```
t5.data.MixtureRegistry.remove("bioasqb")
t5.data.MixtureRegistry.add(
"bioasqb",
["bioasq5b"],
default_rate=1.0
)
```
## Define Model
```
# Using pretrained_models from wiki + books
MODEL_SIZE = "base"
# BASE_PRETRAINED_DIR = "gs://t5-data/pretrained_models"
BASE_PRETRAINED_DIR = "gs://t5_training/models/bio/pmc_v1"
PRETRAINED_DIR = os.path.join(BASE_PRETRAINED_DIR, MODEL_SIZE)
MODEL_DIR = "gs://t5_training/models/bio/bioasq5b_pmc_v1"
MODEL_DIR = os.path.join(MODEL_DIR, MODEL_SIZE)
# Set parallelism and batch size to fit on v2-8 TPU (if possible).
# Limit number of checkpoints to fit within 5GB (if possible).
model_parallelism, train_batch_size, keep_checkpoint_max = {
"small": (1, 256, 16),
"base": (2, 128*2, 8),
"large": (8, 64, 4),
"3B": (8, 16, 1),
"11B": (8, 16, 1)}[MODEL_SIZE]
tf.io.gfile.makedirs(MODEL_DIR)
# The models from our paper are based on the Mesh Tensorflow Transformer.
model = t5.models.MtfModel(
model_dir=MODEL_DIR,
tpu=TPU_ADDRESS,
tpu_topology=TPU_TOPOLOGY,
model_parallelism=model_parallelism,
batch_size=train_batch_size,
sequence_length={"inputs": 512, "targets": 32},
learning_rate_schedule=0.001,
save_checkpoints_steps=1000,
keep_checkpoint_max=keep_checkpoint_max if ON_CLOUD else None,
iterations_per_loop=100,
)
```
## Finetune
```
FINETUNE_STEPS = 35000
model.finetune(
mixture_or_task_name="bioasqb",
pretrained_model_dir=PRETRAINED_DIR,
finetune_steps=FINETUNE_STEPS
)
```
## Predict
```
year = 5
output_dir = 'bioasq5b_pmc_v1'
import tensorflow.compat.v1 as tf
# for year in range(4,7):
for batch in range (1,6):
task = "%dB%d"%(year, batch)
dir = "bioasq%db"%(year)
input_file = task + '_factoid_predict_input.txt'
output_file = task + '_predict_output.txt'
predict_inputs_path = os.path.join('gs://t5_training/t5-data/bio_data', dir, 'eval_data', input_file)
print(predict_inputs_path)
predict_outputs_path = os.path.join('gs://t5_training/t5-data/bio_data', dir, output_dir, MODEL_SIZE, output_file)
with tf_verbosity_level('ERROR'):
model.batch_size = 8 # Min size for small model on v2-8 with parallelism 1.
model.predict(
input_file=predict_inputs_path,
output_file=predict_outputs_path,
# Select the most probable output token at each step.
temperature=0,
)
print("Predicted task : " + task)
prediction_files = sorted(tf.io.gfile.glob(predict_outputs_path + "*"))
print("\nPredictions using checkpoint %s:\n" % prediction_files[-1].split("-")[-1])
# t5_training/t5-data/bio_data/bioasq4b/eval_data/4B1_factoid_predict_input.txt
```
| github_jupyter |
# Loops and Conditionals
---
## Loops
- for
- while
**looping through list**
*lets create a list*
```
lst = [1, 3, 4]
for item in lst:
print(item)
print('='*3)
```
**Note: blocks**
```
print('Length of the list: ', len(lst))
for item in lst:
print(item)
print('='*3)
print('Finished......')
```
*for each `item` in list `lst` print that `item`*
```
# variable inside for loop holds value after loop has been completed
item
```
*looping through tuple*
```
for item in (1, 3, 4):
print(item)
```
*looping through dictionary - by keys*
```
dct = {
'name': 'Harry',
'age': 42,
'profession': 'Foreman',
'address': ('12th Street', 'CA')
}
for item in dct:
print(item)
```
*here items printed are keys of dictionary*
*If we loop through a dictionary we get keys*
```
# get all the keys
dct.keys()
# looping through list of keys of the dictionary
for item in dct.keys():
print(item)
# now obtain value from keys of a dictionary
for key in dct:
print(dct[key])
print('='*3)
dct['name']
```
*looping by value*
```
# get list of values of the dictionary
dct.values()
# get all the values of dictionary from list of values
for item in dct.values():
print(item)
# we can access dictionary values using dictionary[key] see: session3a
# for format string see: session 2
for item in dct:
print("'{}' has value '{}'".format(item, dct[item]))
```
### tuple unpacking, variable assignment
```
# create variables a1 and b1 and assign values to them
a1 = 5
b1 = 8
print(a1, b1)
# variable assignment can be done in single line as
a1, b1 = 9, 6
print(a1, b1)
```
*In above `a1` is assigned `9` and `b1` is assigned `6`*
```
a, b = (0, 1)
a
b
```
*a1, b1 = (0, 1) is same as a1, b1 = 0, 1*
since (0, 1) is a tuple and it is unpacked ( taken out of tuple ), this process is called unpacking
```
# both sides should be equal
a, b, c = 0, 9
# both sides should be equal
a, b, c = 9, 8, 7, 8
(a, b, c) = (88, 99, 77)
print(a, b, c)
tup = (8, 9, 7)
e, n, s = tup
print(e, ',', n, ',', s)
# get items of a dictionary
dct.items()
for item in dct.items():
print(item)
for item in dct.items():
key, value = item # ('profession', 'Foreman')
print("Key {} with value {}".format(key, value))
for key, value in dct.items():
print("key {} has value {}".format(key, value))
```
### Ranges
*create a range object starting from 0 and up to 9, (remember 9 is excluded)*
```
range(0, 9)
# range from 0 to 9 (excluding 9)
list(range(0, 9))
for i in range(0, 9):
print(i)
# 0 to 9 (excluding) with step 2
for i in range(0, 9, 2):
print(i)
list(range(1, 9, 2))
```
#### while loop
```
a
a = a + 1
a
```
**There is no ++/-- operator in python**
```
# it is shorthand for a = a + 1
a += 1
```
*shorthands*
```python
a = a - 1
# same as
a -= 1
b = b * 3
# same as
b *= 3
c = c / 4
# same as
c /= 4
```
```
a = 0
while a < 3:
print(a)
a += 1
```
## Conditionals
*empty list, tuple, dictionary are false*
```
[] == True
bool([])
```
*same is true for empty string and 0*
```
'' == True
bool('')
```
*'' is empty string but not ' ', which has space in it*
```
bool(' ')
0 == True
bool(0)
```
*check if a is present in string *
```
'a' in 'abc'
```
*check if number is present in list*
```
5 in list(range(0, 5))
5 in list(range(0, 6))
list(range(0, 6))
```
**if/elif/else**
```
# empty space is not same as empty string
if ' ':
print('If')
if '':
print('This is True')
if '' == True:
print('Some string')
else:
print('empty string')
```
*Note: Above code is for example, should follow code like below*
```python
if '':
print('Some string')
else:
print('empty string')
```
```
a
```
*Note: a is already assigned above in while loop*
```
if a < 3:
print('a is less than 3')
elif a == 3:
print('a is equals to 3')
else:
print('a is greater than 3')
```
*Note: This is way of doing switch/case in python*
**logical operators**
```
a
a < 3 and a == 3
a <= 3 and a >= 3
```
*Note: there is no && or || operator in python*
```
a < 3 or a == 3
```
*Note: while there is & and | operators, which are bitwise operators*
```
a <= 3
a >= 3
a
```
*a is greaterthan or equals to 3 AND less than 9*
```
3 <= a < 9
3 < a < 9
a > 3 and a < 9
```
**Negation operators**
```
# a is not equal to 4
a != 4
not True
not False
not (3 <= a < 9)
# not boolean
if not []:
print("List is empty")
```
**Break and Continue**
```
a = 0
while a < 9:
print(a)
if a == 3:
break
a += 1
```
*always breaks nearest parent loop*
```
a = 0
while a < 9:
a += 1
if 4 < a < 6:
continue
print(a)
```
*continue the loop but skip the remaining lines, above print function is skipped when a is 5*
**Conditional assignment** *(Reference only)*
*value_of is assigned __aa__ if __aa__ is less than __3__, otherwise it is assigned __False__*
```
aa = 4
value_of = aa * 3 if aa > 3 else 3
value_of
value_of = aa * 3 if aa < 4 else 3
value_of
```
| github_jupyter |
```
from datascience import *
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import pandas as pd
from utils import *
plt.style.use('seaborn-muted')
from matplotlib import patches
import csaps
import warnings
warnings.filterwarnings("ignore")
```
# An Empirical Example from EEP 147
Let's take a look at an empirical example of production. The dataset for this section comes from EEP 147: Regulation of Energy and the Environment.
```
ESG_table = Table.read_table('ESGPorfolios_forcsv.csv').select(
"Group", "Group_num", "UNIT NAME", "Capacity_MW", "Total_Var_Cost_USDperMWH"
).sort("Total_Var_Cost_USDperMWH", descending = False).relabel(4, "Average Variable Cost")
ESG_table
```
This table shows some electricity generation plants in California and their costs. The `Capacity` is the output the firm is capable of producing. The `Average Variable Cost` shows the minimum variable cost per megawatt (MW) produced. At a price below AVC, the firm supplies nothing. At a price above the AVC, the firm can supply up to its capacity. Being a profit-maximising firm, it will try to supply its full capacity.
First, lets look at just the Big Coal producers and understand this firm's particular behavior.
```
selection = 'Big Coal'
Group = ESG_table.where("Group", are.equal_to(selection))
Group
# Make the plot
plt.figure(figsize=(9,6))
plt.bar(new_x_group, height_group, width=width_group, edgecolor = "black")
# Add title and axis names
plt.title(selection)
plt.xlabel('Capacity_MW')
plt.ylabel('Variable Cost/Price')
plt.show()
```
We have created the Big Coal supply curve. It shows the price of electricity, and the quantity supplied at those prices, which depends on variable cost. For example, at any variable cost equal to or above 36.5, the producer `FOUR CORNERS` (the one with the lowest production costs) will supply, and so on. Notably, we observe that the supply curve is also upward sloping since we need higher prices to entice producers with higher variasble costs to produce.
```
group_plot(30)
group_plot(37)
group_plot(50)
```
Now we will look at all the energy sources. They have been colored according to source for reference.
```
ESG_plot(30)
ESG_plot(50)
```
Look at the thin bars concentrated on the right end of the plot. These are plants with small capacities and high variable costs. Conversely, plants with larger capacities tend to have lower variable costs. Why might this be the case? Electricity production typically benefits from economies of scale: it is cheaper per unit when producing more units. Perhaps the high fixed cost required for electricity production, such as for equipment and land, is the reason behind this phenomenon.
| github_jupyter |
# Clustering of Social Groups Using Census Demographic Variables
#### Purpose of this notebook
- 1) Use scikit-learn K-Means to create social groups across Toronto, Vancouver, Montreal
#### Data Sources
- Census Variables: https://www12.statcan.gc.ca/census-recensement/2016/dp-pd/prof/details/download-telecharger/comp/page_dl-tc.cfm?Lang=E
- Census Geographies: https://www12.statcan.gc.ca/census-recensement/2011/geo/bound-limit/bound-limit-2016-eng.cfm
```
import pandas as pd
import geopandas as gpd
import os
import numpy as np
import matplotlib.pyplot as plt
os.chdir('C:/Users/Leonardo/OneDrive/Documents/MetroWork/RealEstateData')
# make dataframe with variables of interest
import re
variables = open("CensusData2016/Census/Test2Variables.txt")
var_list = []
for line in variables:
var_list.append(line)
var_df = pd.DataFrame({'census_variable': var_list})
var_df = var_df.census_variable.str.split(pat = ".", expand=True)[[0,1]]
#var_df = var_df['1'].str.split(pat = "/n", expand=True)[[0,1]]
var_df = var_df[[0,1]]
var_df.columns = ['Member ID: Profile of Dissemination Areas (2247)', 'DIM: Profile of Dissemination Areas (2247)']
# Read Canada Census data by dissemination area
Canada_census_2016 = gpd.read_file('CensusData2016_MA/CanadaWide/CanadaDAs_Census2016_vars.shp')
#function for extracting metropolitan census blocks
def get_metrocensus(canada=None, NAMES=[]):
'''filters canadian census layer by metropolitan area, and dissolves all polygons. The result is metropolitan boundaries'''
#create new col to be used with dissolve
MAREA = canada[canada.CCSNAME.isin(NAMES)]
return MAREA
# extract census blocks for all cities
CT = get_metrocensus(canada=Canada_census_2016,NAMES=['Toronto'])
CV = get_metrocensus(canada=Canada_census_2016,NAMES=['Vancouver'])
CM = get_metrocensus(canada=Canada_census_2016,NAMES=['Montréal'])
CO = get_metrocensus(canada=Canada_census_2016,NAMES=['Ottawa'])
CC = get_metrocensus(canada=Canada_census_2016,NAMES=['Calgary'])
CT.plot()
```
# K-Means Clustering
### Extracting Social Groups from Census Data
### 1: Scale Data and Get rid of Outliers
```
#prepare dataframe for ML algorithm
df = pd.DataFrame(CT.iloc[:,23:214])
df = df.replace([np.inf, -np.inf], 0)
#MVDA_Census2016_vars_PCA = MVDA_Census2016_vars_PCA.drop(['index'], axis = 1)
#split into X data and y blocks
X = df.iloc[:,1:] # all rows, all the features and no labels
y = df.iloc[:, 0] # all rows, label (census blocks) only
#import libraries, scale the data
from scipy.stats import mstats
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.decomposition import NMF
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import (KNeighborsClassifier,
NeighborhoodComponentsAnalysis)
from sklearn.preprocessing import StandardScaler, MinMaxScaler
#scaler = StandardScaler()
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(X)
#visualize distribution of variables/outliers
pd.DataFrame(X_scaled).plot.box(figsize=(26,8))
#remove outliers by winsorizing data
X_scaled_wd = mstats.winsorize(X_scaled, inplace = True, axis = 0, limits = [0.05, 0.05])
#visualize data
pd.DataFrame(X_scaled_wd).plot.box(figsize=(26,8))
```
### 2: Fit K-Means algorithm to data and find the optimal number of clusters
```
#import clustering algorithm data using k means
from sklearn.cluster import KMeans
from sklearn import cluster, mixture
from sklearn.metrics import silhouette_score
#to chose the right number of clusters we visualize the inertia of the clusters
kmeans_per_k = [KMeans(n_clusters=k, algorithm='auto', init='k-means++',
max_iter=300, n_init=30, n_jobs=None, precompute_distances='auto',
random_state=5, tol=0.0001).fit(X_scaled_wd) for k in range(1, 10)]
inertias = [model.inertia_ for model in kmeans_per_k]
plt.figure(figsize=(8, 3.5))
plt.plot(range(1, 10), inertias)
plt.xlabel("Number of Clusters", fontsize=14)
plt.ylabel("Inertia", fontsize=14)
plt.show()
#we visualize the silhouette scores
silhouette_scores = [silhouette_score(X_scaled_wd, model.labels_) for model in kmeans_per_k[1:]]
plt.figure(figsize=(8, 3))
plt.plot(range(2, 10), silhouette_scores)
plt.xlabel("K", fontsize=14)
plt.ylabel("Silhouette score", fontsize=14)
plt.show()
#the silhouette scores can be visualized for each label within each number of clusters
from sklearn.metrics import silhouette_samples
from matplotlib.ticker import FixedLocator, FixedFormatter
import matplotlib
plt.figure(figsize=(11, 9))
# we visualize 4 plots, one for the result of each of k-means with 3, 4, 5, and 6 clusters
for k in (3, 4, 5, 6):
plt.subplot(2, 3, k - 2)
y_pred = kmeans_per_k[k - 1].labels_
silhouette_coefficients = silhouette_samples(X_scaled_wd, y_pred)
padding = len(X_scaled_wd) // 30
pos = padding
ticks = []
for i in range(k):
coeffs = silhouette_coefficients[y_pred == i]
coeffs.sort()
color = matplotlib.cm.Spectral(i / k)
plt.fill_betweenx(np.arange(pos, pos + len(coeffs)), 0, coeffs,
facecolor=color, edgecolor=color, alpha=0.7)
ticks.append(pos + len(coeffs) // 2)
pos += len(coeffs) + padding
plt.gca().yaxis.set_major_locator(FixedLocator(ticks))
plt.gca().yaxis.set_major_formatter(FixedFormatter(range(k)))
if k in (3, 5):
plt.ylabel("Cluster")
if k in (5, 6):
plt.gca().set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.xlabel("Silhouette Coefficient")
else:
plt.tick_params(labelbottom=False)
plt.axvline(x=silhouette_scores[k - 2], color="red", linestyle="--")
plt.title("$k={}$".format(k), fontsize=16)
plt.show()
#THE CHOICE IS 4 CLUSTERS: the silhouette scores are highest, each label is about the average silhouette coefficient, and each cluster is of similar size
k4cls_rnd_10_inits_Kplus = KMeans(n_clusters=4, algorithm='auto', init='k-means++',
max_iter=300, n_init=30, n_jobs=None, precompute_distances='auto',
random_state=5, tol=0.0001)
k4cls_Kplus = k4cls_rnd_10_inits_Kplus.fit(X_scaled_wd)
#now lets merge back the labels to the initial dataframe
X_std1 = pd.DataFrame(X_scaled_wd)
X_std1.columns = df.iloc[:,1:191].columns
X_std1['GEO_NAME'] = y.values
X_std1['k4cls'] = k4cls_Kplus.labels_
X_std1.head()
#merge back to spatial layer and save data to shapefile
CT['DAUID'] = CT['DAUID'].astype('int64')
CT_4Kcls = pd.merge(CT, X_std1, left_on='DAUID', right_on='GEO_NAME')
CC_4Kcls.to_file('CensusData2016_MA/PCA/CC_DA_Census2016_3Kcls_5.shp')
```
#### 2: Visualize the size of the clusters
```
#visualize the size of the clusters
ksizes = X_std1.groupby('k4cls').size()
ksizes.plot(kind = 'bar')
plt.title("Size of K-Means Clusters")
```
| github_jupyter |
Intro To Python
=====
In this notebook, we will explore basic Python:
- data types, including dictionaries
- functions
- loops
Please note that we are using Python 3.
(__NOT__ Python 2! Python 2 has some different functions and syntax)
```
# Let's make sure we are using Python 3
import sys
print(sys.version[0])
```
# 1. Basic Data Types: Numbers, Booleans, and Strings
## 1.1 Numbers
```
a = 5
# Note: use the `type()` function to get the type of a variable
# Numbers can be integers ('int'), such as 3, 5 and 3049, or floats
# ('float'), such as 2.5, 3.1, and 2.34938493
print(type(a))
print(a)
list = [1,2]
print(type(list))
```
### Mathematical Operators: +, -, *, /, **
Mathematical operators allow you to perform math operations on numbers in Python.
```
b = a + 1
print(b)
c = a - 1
print(c)
d = a * 2
print(d)
e = a / 2
print(e)
# Note: ** is the exponention operator
f = a ** 2
print(f)
```
### Shorthand mathematical operators
`a += 1` is shorthand for `a = a + 1`
```
a += 1
print(a)
a *= 2
print(a)
```
## 1.2 Booleans & Logic Operators
```
im_true = True
im_false = False
print(type(im_true))
```
### Equality operators
Equality operators (== and !=) allow you to compare the values of variables on the left and right hand side.
```
print(im_true == im_false) # Equality operator
print(im_true != im_false)
```
The `and` operator requires that the variables on each side of the operator are equal to true.
```
print(im_true and im_false)
```
The `or` operator only requires the ONE of the variables on each side of the operator to be true.
```
print(im_true or im_false)
```
## 1.3 Strings
You can use single or double quotes for strings.
```
my_string = 'delta'
my_other_string = "analytics"
print(my_string, my_other_string)
```
### String methods
Concatenating strings:
```
another_string = 'hello, ' + my_string + " " + my_other_string
print(another_string)
```
Get the length of the string:
```
print(len(another_string))
```
# 2. Container Data Types
## 2.1 Lists
A Python `list` stores multiple elements, which can be different types
```
my_list = ['a', 'b', 'c', 3485]
print(my_list)
```
You can access an element in a list with the following syntax:
Note: the first element in a list has an index of zero.
```
print(my_list[2])
print(my_list[0])
```
Reassigning elements in a list:
```
my_list[0] = 'delta'
print(my_list)
```
Adding/removing elements from a list:
```
my_list.append('hello')
print(my_list)
my_list.pop()
print(my_list)
```
Accessing multiple elements in a list:
```
print(my_list[0:2]) # Access elements in index 0, 1 and 2
print(my_list[2:]) # Access elements from index 2 to the end
print(my_list[:2]) # Access elements from the beginning to index 2
```
## 2.2 Dictionaries
Dictionaries hold key/value pairs and are useful for storing information.
```
my_dict = { 'key_one': 'value_one', 'name': 'mike' }
```
Access a value from a dictionary by a key:
```
print(my_dict['key_one'])
print(my_dict['name'])
```
Looping over values of a dictionary:
```
for key in my_dict:
print("The key is " + key)
for key, value in my_dict.items():
print("The key is " + key + ", and the value is " + value)
```
## 2.3 Sets
Sets are similar to lists, but can only contain distinct values.
```
my_set = {1, 2, 3, 'hello'}
print(my_set)
```
When defining a set with the same value present multiple times, only one element will be added to the set. For example:
```
multiple = {1, 2, 2, 2, 2, 2, 3, 'hello'}
print(multiple) # This will return {1, 2, 3, 'hello'}
```
# 3. Functions
A function is a block of reusable code that performs a certain action. Once you've defined a function, you can use it anywhere in your code!
Defining a function:
```
def am_i_happy(happiness_level):
if happiness_level >= 10:
return "You're very happy."
elif happiness_level >= 5:
return "You're happy."
else:
return "You're not happy."
```
Calling a function:
```
print(am_i_happy(0))
print(am_i_happy(5))
```
# 4. Control Flow
## 4.1 If/Else If/Else
```
sleepy = True
hungry = False
if sleepy and hungry:
print("Eat a snack and take a nap.")
elif sleepy and not hungry:
print("Take a nap")
elif hungry and not sleepy:
print("Eat a snack")
else:
print("Go on with your day")
```
## 4.2 Loops
### 4.2.1 'while' loops
```
counter = 0
while (counter < 10):
print("You have counted to", counter)
counter = counter + 1 # Increment the counter
print("You're finished counting")
```
### 4.2.2 'for' loops
Loop over a list:
```
cool_animals = ['cat', 'dog', 'lion', 'bear']
for animal in cool_animals:
print(animal + "s are cool")
```
Loop over a dict:
```
animal_sounds = {
'dog': 'bark',
'cat': 'meow',
'pig': 'oink'
}
for animal, sound in animal_sounds.items():
print("The " + animal + " says " + sound + "!")
```
<br>
<br>
<br>
----
| github_jupyter |
<a href="https://colab.research.google.com/gist/adaamko/0161526d638e1877f7b649b3fff8f3de/deep-learning-practical-lesson.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Natural Language Processing and Information Extraction
## Deep learning - practical session
__Nov 12, 2021__
__Ádám Kovács__
During this lecture we are going to use a classification dataset from a shared task: SemEval 2019 - Task 6.
The dataset is about Identifying and Categorizing Offensive Language in Social Media.
__Preparation:__
- You will need the Semeval dataset (we will have code to download it)
- You will need to install pytorch:
- pip install torch
- You will also need to have pandas, torchtext, numpy and scikit learn installed.
We are going to use an open source library for building optimized deep learning models that can be run on GPUs, the library is called [Pytorch](https://pytorch.org/docs/stable/index.html). It is one of the most widely used libraries for building neural networks/deep learning models.
In this lecture we are mostly using pure PyTorch models, but there are multiple libraries available to make it even easier to build neural networks. You are free to use them in your projects.
Just to name a few:
- TorchText: https://pytorch.org/text/stable/index.html
- AllenNLP: https://github.com/allenai/allennlp
__NOTE: It is advised to use Google Colab for this laboratory for free access to GPUs, and also for reproducibility.__
```
!pip install torch
# Import the needed libraries
import pandas as pd
import numpy as np
```
## Download the dataset and load it into a pandas DataFrame
```
import os
if not os.path.isdir("./data"):
os.mkdir("./data")
import urllib.request
u = urllib.request.URLopener()
u.retrieve(
"https://raw.githubusercontent.com/ZeyadZanaty/offenseval/master/datasets/training-v1/offenseval-training-v1.tsv",
"data/offenseval.tsv",
)
```
## Read in the dataset into a Pandas DataFrame
Use `pd.read_csv` with the correct parameters to read in the dataset. If done correctly, `DataFrame` should have 5 columns,
`id`, `tweet`, `subtask_a`, `subtask_b`, `subtask_c`.
```
import pandas as pd
import numpy as np
def read_dataset():
train_data = pd.read_csv("./data/offenseval.tsv", sep="\t")
return train_data
train_data_unprocessed = read_dataset()
train_data_unprocessed
```
## Convert `subtask_a` into a binary label
The task is to classify the given tweets into two category: _offensive(OFF)_ , _not offensive (NOT)_. For machine learning algorithms you will need integer labels instead of strings. Add a new column to the dataframe called `label`, and transform the `subtask_a` column into a binary integer label.
```
def transform(train_data):
labels = {"NOT": 0, "OFF": 1}
train_data["label"] = [labels[item] for item in train_data.subtask_a]
train_data["tweet"] = train_data["tweet"].str.replace("@USER", "")
return train_data
train_data = transform(train_data_unprocessed)
```
## Train a simple neural network on this dataset
In this notebook we are going to build different neural architectures on the task:
- A simple one layered feed forward neural network (FNN) with one-hot encoded vectors
- Adding more layers to the FNN, making it a deep neural network
- Instead of using one-hot encoded vectors we are going to add embedding vectors to the architecture, that takes the sequential nature of natural texts into account
- Then we will train LSTM networks
- At last, we will also build a Transformer architecture, that currently achieves SOTA results on a lot of tasks
First we will build one-hot-encoded vectors for each sentence, and then use a simple feed forward neural network to predict the correct labels.
```
# First we need to import pytorch and set a fixed random seed number for reproducibility
import torch
SEED = 1234
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
```
### Split the dataset into a train and a validation dataset
Use the random seed for splitting. You should split the dataset into 70% training data and 30% validation data
```
from sklearn.model_selection import train_test_split as split
def split_data(train_data, random_seed):
tr_data, val_data = split(train_data, test_size=0.3, random_state=SEED)
return tr_data, val_data
tr_data, val_data = split_data(train_data, SEED)
```
### Use CountVectorizer to prepare the features for the sentences
_CountVectorizer_ is a great tool from _sklearn_ that helps us with basic preprocessing steps. It has lots of parameters to play with, you can check the documentation [here](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html). It will:
- Tokenize, lowercase the text
- Filter out stopwords
- Convert the text into one-hot encoded vectors
- Select the _n_-best features
We fit CountVectorizer using _3000_ features
We will also _lemmatize_ texts using the _nltk_ package and its lemmatizer. Check the [docs](https://www.nltk.org/_modules/nltk/stem/wordnet.html) for more.
```
from sklearn.feature_extraction.text import CountVectorizer
import nltk
nltk.download("punkt")
nltk.download("wordnet")
from nltk.stem import WordNetLemmatizer
from nltk import word_tokenize
class LemmaTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, articles):
return [self.wnl.lemmatize(t) for t in word_tokenize(articles)]
def prepare_vectorizer(tr_data):
vectorizer = CountVectorizer(
max_features=3000, tokenizer=LemmaTokenizer(), stop_words="english"
)
word_to_ix = vectorizer.fit(tr_data.tweet)
return word_to_ix
word_to_ix = prepare_vectorizer(tr_data)
# The vocab size is the length of the vocabulary, or the length of the feature vectors
VOCAB_SIZE = len(word_to_ix.vocabulary_)
assert VOCAB_SIZE == 3000
```
CountVectorizer can directly transform any sentence into a one-hot encoded vector based on the corpus it was built upon.

```
word_to_ix.transform(["Hello my name is adam"]).toarray()
# Initialize the correct device
# It is important that every array should be on the same device or the training won't work
# A device could be either the cpu or the gpu if it is available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
```
### Prepare the DataLoader for batch processing
The __prepare_dataloader(..)__ function will take the training and the validation dataset and convert them to one-hot encoded vectors with the help of the initialized CountVectorizer.
We prepare two FloatTensors and LongTensors for the converted tweets and labels of the training and the validation data.
Then zip together the vectors with the labels as a list of tuples!
```
# Preparing the data loaders for the training and the validation sets
# PyTorch operates on it's own datatype which is very similar to numpy's arrays
# They are called Torch Tensors: https://pytorch.org/docs/stable/tensors.html
# They are optimized for training neural networks
def prepare_dataloader(tr_data, val_data, word_to_ix):
# First we transform the tweets into one-hot encoded vectors
# Then we create Torch Tensors from the list of the vectors
# It is also inportant to send the Tensors to the correct device
# All of the tensors should be on the same device when training
tr_data_vecs = torch.FloatTensor(word_to_ix.transform(tr_data.tweet).toarray()).to(
device
)
tr_labels = torch.LongTensor(tr_data.label.tolist()).to(device)
val_data_vecs = torch.FloatTensor(
word_to_ix.transform(val_data.tweet).toarray()
).to(device)
val_labels = torch.LongTensor(val_data.label.tolist()).to(device)
tr_data_loader = [(sample, label) for sample, label in zip(tr_data_vecs, tr_labels)]
val_data_loader = [
(sample, label) for sample, label in zip(val_data_vecs, val_labels)
]
return tr_data_loader, val_data_loader
tr_data_loader, val_data_loader = prepare_dataloader(tr_data, val_data, word_to_ix)
```
- __We have the correct lists now, it is time to initialize the DataLoader objects!__
- __Create two DataLoader objects with the lists we have created__
- __Shuffle the training data but not the validation data!__
```
# We then define a BATCH_SIZE for our model
# Usually we don't feed the whole dataset into our model at once
# For this we have the BATCH_SIZE parameter
# Try to experiment with different sized batches and see if changing this will improve the performance or not!
BATCH_SIZE = 64
from torch.utils.data import DataLoader
# The DataLoader(https://pytorch.org/docs/stable/data.html) class helps us to prepare the training batches
# It has a lot of useful parameters, one of it is _shuffle_ which will randomize the training dataset in each epoch
# This can also improve the performance of our model
def create_dataloader_iterators(tr_data_loader, val_data_loader, BATCH_SIZE):
train_iterator = DataLoader(
tr_data_loader,
batch_size=BATCH_SIZE,
shuffle=True,
)
valid_iterator = DataLoader(
val_data_loader,
batch_size=BATCH_SIZE,
shuffle=False,
)
return train_iterator, valid_iterator
train_iterator, valid_iterator = create_dataloader_iterators(
tr_data_loader, val_data_loader, BATCH_SIZE
)
assert type(train_iterator) == torch.utils.data.dataloader.DataLoader
```
### Building the first PyTorch model
At first, the model will contain a single Linear layer that takes one-hot-encoded vectors and trainsforms it into the dimension of the __NUM_LABELS__(how many classes we are trying to predict). Then, run through the output on a softmax activation to produce probabilites of the classes!
```
from torch import nn
class BoWClassifier(nn.Module): # inheriting from nn.Module!
def __init__(self, num_labels, vocab_size):
# calls the init function of nn.Module. Dont get confused by syntax,
# just always do it in an nn.Module
super(BoWClassifier, self).__init__()
# Define the parameters that you will need.
# Torch defines nn.Linear(), which provides the affine map.
# Note that we could add more Linear Layers here connected to each other
# Then we would also need to have a HIDDEN_SIZE hyperparameter as an input to our model
# Then, with activation functions between them (e.g. RELU) we could have a "Deep" model
# This is just an example for a shallow network
self.linear = nn.Linear(vocab_size, num_labels)
def forward(self, bow_vec, sequence_lens):
# Ignore sequence_lens for now!
# Pass the input through the linear layer,
# then pass that through log_softmax.
# Many non-linearities and other functions are in torch.nn.functional
# Softmax will provide a probability distribution among the classes
# We can then use this for our loss function
return F.log_softmax(self.linear(bow_vec), dim=1)
# The INPUT_DIM is the size of our input vectors
INPUT_DIM = VOCAB_SIZE
# We have only 2 classes
OUTPUT_DIM = 2
# Init the model
# At first it is untrained, the weights are assigned random
model = BoWClassifier(OUTPUT_DIM, INPUT_DIM)
# Set the optimizer and the loss function!
# https://pytorch.org/docs/stable/optim.html
import torch.optim as optim
# The optimizer will update the weights of our model based on the loss function
# This is essential for correct training
# The _lr_ parameter is the learning rate
optimizer = optim.Adam(model.parameters(), lr=1e-3)
criterion = nn.NLLLoss()
# Copy the model and the loss function to the correct device
model = model.to(device)
criterion = criterion.to(device)
assert model.linear.out_features == 2
```
### Training and evaluating PyTorch models
- __calculate_performance__: This should calculate the batch-wise precision, recall, and fscore of your model!
- __train__ - Train your model on the training data! This function should set the model to training mode, then use the given iterator to iterate through the training samples and make predictions using the provided model. You should then propagate back the error with the loss function and the optimizer. Finally return the average epoch loss and performance!
- __evaluate__ - Evaluate your model on the validation dataset. This function is essentially the same as the trainnig function, but you should set your model to eval mode and don't propagate back the errors to your weights!
```
from sklearn.metrics import precision_recall_fscore_support
def calculate_performance(preds, y):
"""
Returns precision, recall, fscore per batch
"""
# Get the predicted label from the probabilities
rounded_preds = preds.argmax(1)
# Calculate the correct predictions batch-wise and calculate precision, recall, and fscore
# WARNING: Tensors here could be on the GPU, so make sure to copy everything to CPU
precision, recall, fscore, support = precision_recall_fscore_support(
rounded_preds.cpu(), y.cpu()
)
return precision[1], recall[1], fscore[1]
import torch.nn.functional as F
def train(model, iterator, optimizer, criterion):
# We will calculate loss and accuracy epoch-wise based on average batch accuracy
epoch_loss = 0
epoch_prec = 0
epoch_recall = 0
epoch_fscore = 0
# You always need to set your model to training mode
# If you don't set your model to training mode the error won't propagate back to the weights
model.train()
# We calculate the error on batches so the iterator will return matrices with shape [BATCH_SIZE, VOCAB_SIZE]
for batch in iterator:
text_vecs = batch[0]
labels = batch[1]
sen_lens = []
texts = []
# This is for later!
if len(batch) > 2:
sen_lens = batch[2]
texts = batch[3]
# We reset the gradients from the last step, so the loss will be calculated correctly (and not added together)
optimizer.zero_grad()
# This runs the forward function on your model (you don't need to call it directly)
predictions = model(text_vecs, sen_lens)
# Calculate the loss and the accuracy on the predictions (the predictions are log probabilities, remember!)
loss = criterion(predictions, labels)
prec, recall, fscore = calculate_performance(predictions, labels)
# Propagate the error back on the model (this means changing the initial weights in your model)
# Calculate gradients on parameters that requries grad
loss.backward()
# Update the parameters
optimizer.step()
# We add batch-wise loss to the epoch-wise loss
epoch_loss += loss.item()
# We also do the same with the scores
epoch_prec += prec.item()
epoch_recall += recall.item()
epoch_fscore += fscore.item()
return (
epoch_loss / len(iterator),
epoch_prec / len(iterator),
epoch_recall / len(iterator),
epoch_fscore / len(iterator),
)
# The evaluation is done on the validation dataset
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_prec = 0
epoch_recall = 0
epoch_fscore = 0
# On the validation dataset we don't want training so we need to set the model on evaluation mode
model.eval()
# Also tell Pytorch to not propagate any error backwards in the model or calculate gradients
# This is needed when you only want to make predictions and use your model in inference mode!
with torch.no_grad():
# The remaining part is the same with the difference of not using the optimizer to backpropagation
for batch in iterator:
text_vecs = batch[0]
labels = batch[1]
sen_lens = []
texts = []
if len(batch) > 2:
sen_lens = batch[2]
texts = batch[3]
predictions = model(text_vecs, sen_lens)
loss = criterion(predictions, labels)
prec, recall, fscore = calculate_performance(predictions, labels)
epoch_loss += loss.item()
epoch_prec += prec.item()
epoch_recall += recall.item()
epoch_fscore += fscore.item()
# Return averaged loss on the whole epoch!
return (
epoch_loss / len(iterator),
epoch_prec / len(iterator),
epoch_recall / len(iterator),
epoch_fscore / len(iterator),
)
import time
# This is just for measuring training time!
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
```
### Training loop!
Below is the training loop of our model! Try to set an EPOCH number that will correctly train your model :) (it is not underfitted but neither overfitted!
```
def training_loop(epoch_number=15):
# Set an EPOCH number!
N_EPOCHS = epoch_number
best_valid_loss = float("inf")
# We loop forward on the epoch number
for epoch in range(N_EPOCHS):
start_time = time.time()
# Train the model on the training set using the dataloader
train_loss, train_prec, train_rec, train_fscore = train(
model, train_iterator, optimizer, criterion
)
# And validate your model on the validation set
valid_loss, valid_prec, valid_rec, valid_fscore = evaluate(
model, valid_iterator, criterion
)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
# If we find a better model, we save the weights so later we may want to reload it
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), "tut1-model.pt")
print(f"Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s")
print(
f"\tTrain Loss: {train_loss:.3f} | Train Prec: {train_prec*100:.2f}% | Train Rec: {train_rec*100:.2f}% | Train Fscore: {train_fscore*100:.2f}%"
)
print(
f"\t Val. Loss: {valid_loss:.3f} | Val Prec: {valid_prec*100:.2f}% | Val Rec: {valid_rec*100:.2f}% | Val Fscore: {valid_fscore*100:.2f}%"
)
training_loop()
```
__NOTE: DON'T FORGET TO RERUN THE MODEL INITIALIZATION WHEN YOU ARE TRYING TO RUN THE MODEL MULTIPLE TIMES. IF YOU DON'T REINITIALIZE THE MODEL IT WILL CONTINUE THE TRAINING WHERE IT HAS STOPPED LAST TIME AND DOESN'T RUN FROM SRATCH!__
These lines:
```python
model = BoWClassifier(OUTPUT_DIM, INPUT_DIM)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
criterion = nn.NLLLoss()
model = model.to(device)
criterion = criterion.to(device)
```
This will reinitialize the model!
```
def reinitialize(model):
optimizer = optim.Adam(model.parameters(), lr=1e-3)
criterion = nn.NLLLoss()
model = model.to(device)
criterion = criterion.to(device)
reinitialize(BoWClassifier(OUTPUT_DIM, INPUT_DIM))
```
## Add more linear layers to the model and experiment with other hyper-parameters
### More layers
Currently we only have a single linear layers in our model. We are now adding more linear layers to the model.
We also introduce a HIDDEN_SIZE parameter that will be the size of the intermediate representation between the linear layers. Also adding a RELU activation function between the linear layers.
See more:
- https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html
- https://pytorch.org/tutorials/beginner/examples_nn/two_layer_net_nn.html
```
from torch import nn
class BoWDeepClassifier(nn.Module):
def __init__(self, num_labels, vocab_size, hidden_size):
super(BoWDeepClassifier, self).__init__()
# First linear layer
self.linear1 = nn.Linear(vocab_size, hidden_size)
# Non-linear activation function between them
self.relu = torch.nn.ReLU()
# Second layer
self.linear2 = nn.Linear(hidden_size, num_labels)
def forward(self, bow_vec, sequence_lens):
# Run the input vector through every layer
output = self.linear1(bow_vec)
output = self.relu(output)
output = self.linear2(output)
# Get the probabilities
return F.log_softmax(output, dim=1)
HIDDEN_SIZE = 200
learning_rate = 0.001
BATCH_SIZE = 64
N_EPOCHS = 15
model = BoWDeepClassifier(OUTPUT_DIM, INPUT_DIM, HIDDEN_SIZE)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.NLLLoss()
model = model.to(device)
criterion = criterion.to(device)
training_loop()
```
## Implement automatic early-stopping in the training loop
Early stopping is a very easy method to avoid the overfitting of your model.
We could:
- Save the training and the validation loss of the last two epochs (if you are atleast in the third epoch)
- If the loss increased in the last two epoch on the training data but descreased or stagnated in the validation data, you should stop the training automatically!
```
# REINITIALIZE YOUR MODEL TO GET A CORRECT RUN!
```
## Handling class imbalance
Our data is imbalanced, the first class has twice the population of the second class.
One way of handling imbalanced data is to weight the loss function, so it penalizes errors on the smaller class.
Look at the documentation of the loss function: https://pytorch.org/docs/stable/generated/torch.nn.NLLLoss.html
Set the weights based on the inverse population of the classes (so the less sample a class has, more the errors will be penalized!)
```
tr_data.groupby("label").size()
weights = torch.Tensor([1, 2])
criterion = nn.NLLLoss(weight=weights)
```
## Adding an Embedding Layer to the network
- We only used one-hot-encoded vectors as our features until now
- Now we will introduce an [embedding](https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html) layer into our network.
- We will feed the words into our network one-by-one, and the layer will learn a dense vector representation for each word

_from pytorch.org_
```
# Get the analyzer to get the word-id mapping from CountVectorizer
an = word_to_ix.build_analyzer()
an("hello my name is adam")
max(word_to_ix.vocabulary_, key=word_to_ix.vocabulary_.get)
len(word_to_ix.vocabulary_)
def create_input(dataset, analyzer, vocabulary):
dataset_as_indices = []
# We go through each tweet in the dataset
# We need to add two additional symbols to the vocabulary
# We have 3000 features, ranged 0-2999
# We add 3000 as an id for the "unknown" words not among the features
# 3001 will be the symbol for padding, but about this later!
for tweet in dataset:
tokens = analyzer(tweet)
token_ids = []
for token in tokens:
# if the token is in the vocab, we add the id
if token in vocabulary:
token_ids.append(vocabulary[token])
# else we add the id of the unknown token
else:
token_ids.append(3000)
# if we removed every token during preprocessing (stopword removal, lemmatization), we add the unknown token to the list so it won't be empty
if not token_ids:
token_ids.append(3000)
dataset_as_indices.append(torch.LongTensor(token_ids).to(device))
return dataset_as_indices
# We add the length of the tweets so sentences with similar lengths will be next to each other
# This can be important because of padding
tr_data["length"] = tr_data.tweet.str.len()
val_data["length"] = val_data.tweet.str.len()
tr_data.tweet.str.len()
tr_data = tr_data.sort_values(by="length")
val_data = val_data.sort_values(by="length")
# We create the dataset as ids of tokens
dataset_as_ids = create_input(tr_data.tweet, an, word_to_ix.vocabulary_)
dataset_as_ids[0]
```
### Padding
- We didn't need to take care of input padding when using one-hot-encoded vectors
- Padding handles different sized inputs
- We can pad sequences from the left, or from the right

_image from https://towardsdatascience.com/nlp-preparing-text-for-deep-learning-model-using-tensorflow2-461428138657_
```
from torch.nn.utils.rnn import pad_sequence
# pad_sequence will take care of the padding
# we will need to provide a padding_value to it
padded = pad_sequence(dataset_as_ids, batch_first=True, padding_value=3001)
def prepare_dataloader_with_padding(tr_data, val_data, word_to_ix):
# First create the id representations of the input vectors
# Then pad the sequences so all of the input is the same size
# We padded texts for the whole dataset, this could have been done batch-wise also!
tr_data_vecs = pad_sequence(
create_input(tr_data.tweet, an, word_to_ix.vocabulary_),
batch_first=True,
padding_value=3001,
)
tr_labels = torch.LongTensor(tr_data.label.tolist()).to(device)
tr_lens = torch.LongTensor(
[len(i) for i in create_input(tr_data.tweet, an, word_to_ix.vocabulary_)]
)
# We also add the texts to the batches
# This is for the Transformer models, you wont need this in the next experiments
tr_sents = tr_data.tweet.tolist()
val_data_vecs = pad_sequence(
create_input(val_data.tweet, an, word_to_ix.vocabulary_),
batch_first=True,
padding_value=3001,
)
val_labels = torch.LongTensor(val_data.label.tolist()).to(device)
val_lens = torch.LongTensor(
[len(i) for i in create_input(val_data.tweet, an, word_to_ix.vocabulary_)]
)
val_sents = val_data.tweet.tolist()
tr_data_loader = [
(sample, label, length, sent)
for sample, label, length, sent in zip(
tr_data_vecs, tr_labels, tr_lens, tr_sents
)
]
val_data_loader = [
(sample, label, length, sent)
for sample, label, length, sent in zip(
val_data_vecs, val_labels, val_lens, val_sents
)
]
return tr_data_loader, val_data_loader
tr_data_loader, val_data_loader = prepare_dataloader_with_padding(
tr_data, val_data, word_to_ix
)
def create_dataloader_iterators_with_padding(
tr_data_loader, val_data_loader, BATCH_SIZE
):
train_iterator = DataLoader(
tr_data_loader,
batch_size=BATCH_SIZE,
shuffle=True,
)
valid_iterator = DataLoader(
val_data_loader,
batch_size=BATCH_SIZE,
shuffle=False,
)
return train_iterator, valid_iterator
train_iterator, valid_iterator = create_dataloader_iterators_with_padding(
tr_data_loader, val_data_loader, BATCH_SIZE
)
next(iter(train_iterator))
```

_image from bentrevett_
```
from torch import nn
import numpy as np
class BoWClassifierWithEmbedding(nn.Module):
def __init__(self, num_labels, vocab_size, embedding_dim):
super(BoWClassifierWithEmbedding, self).__init__()
# We define the embedding layer here
# It will convert a list of ids: [1, 50, 64, 2006]
# Into a list of vectors, one for each word
# The embedding layer will learn the vectors from the contexts
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=3001)
# We could also load precomputed embeddings, e.g. GloVe, in some cases we don't want to train the embedding layer
# In this case we enable the training
self.embedding.weight.requires_grad = True
self.linear = nn.Linear(embedding_dim, num_labels)
def forward(self, text, sequence_lens):
# First we create the embedded vectors
embedded = self.embedding(text)
# We need a pooling to convert a list of embedded words to a sentence vector
# We could have chosen different pooling, e.g. min, max, average..
# With LSTM we also do a pooling, just smarter
pooled = F.max_pool2d(embedded, (embedded.shape[1], 1)).squeeze(1)
return F.log_softmax(self.linear(pooled), dim=1)
```
Output of the LSTM layer..

_image from stackoverflow_
```
class LSTMClassifier(nn.Module):
def __init__(self, num_labels, vocab_size, embedding_dim, hidden_dim):
super(LSTMClassifier, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=3001)
self.embedding.weight.requires_grad = True
# Define the LSTM layer
# Documentation: https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html
self.lstm = nn.LSTM(
embedding_dim,
hidden_dim,
batch_first=True,
num_layers=1,
bidirectional=False,
)
self.linear = nn.Linear(hidden_dim, num_labels)
# Dropout to overcome overfitting
self.dropout = nn.Dropout(0.25)
def forward(self, text, sequence_lens):
embedded = self.embedding(text)
# To ensure LSTM doesn't learn gradients for the id of the padding symbol
packed = nn.utils.rnn.pack_padded_sequence(
embedded, sequence_lens, enforce_sorted=False, batch_first=True
)
packed_outputs, (h, c) = self.lstm(packed)
# extract LSTM outputs (not used here)
lstm_outputs, lens = nn.utils.rnn.pad_packed_sequence(
packed_outputs, batch_first=True
)
# We use the last hidden vector from LSTM
y = self.linear(h[-1])
log_probs = F.log_softmax(y, dim=1)
return log_probs
INPUT_DIM = VOCAB_SIZE + 2
OUTPUT_DIM = 2
EMBEDDING_DIM = 100
HIDDEN_DIM = 20
criterion = nn.NLLLoss()
# model = BoWClassifierWithEmbedding(OUTPUT_DIM, INPUT_DIM, EMBEDDING_DIM)
model = LSTMClassifier(OUTPUT_DIM, INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM)
model = model.to(device)
criterion = criterion.to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
training_loop(epoch_number=15)
```
## Transformers
To completely understand the transformers architecture look at this lecture held by Judit Acs (on the course of Introduction to Python and Natural Language Technologies in BME):
- https://github.com/bmeaut/python_nlp_2021_spring/blob/main/lectures/09_Transformers_BERT.ipynb
Here I will only include and present the necessary details _from the lecture_ about transformers and BERT.
### Problems with recurrent neural networks:
Recall that we used recurrent neural cells, specifically LSTMs to encode a list of vectors into a sentence vector.
- Problem 1. No parallelism
- LSTMs are recurrent, they rely on their left and right history, so the symbols need to be processed in order -> no parallelism.
- Problem 2. Long-range dependencies
- Long-range dependencies are not infrequent in NLP.
- "The people/person who called and wanted to rent your house when you go away next year are/is from California" -- Miller & Chomsky 1963
- LSTMs have a problem capturing these because there are too many backpropagation steps between the symbols.
Introduced in [Attention Is All You Need](https://papers.nips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf) by Vaswani et al., 2017
Transformers solve Problem 1 by relying purely on attention instead of recurrence.
Not having recurrent connections means that sequence position no longer matters.
Recurrence is replaced by self attention.
- Transformers are available in the __transformers__ Python package: https://github.com/huggingface/transformers.
- There are thousands of pretrained transformers models in different languages and with different architectures.
- With the huggingface package there is a unified interface to download and use all the models. Browse https://huggingface.co/models for more!
- There is also a great blog post to understand the architecture of transformers: https://jalammar.github.io/illustrated-transformer/
### BERT
[BERT](https://www.aclweb.org/anthology/N19-1423/): Pre-training of Deep Bidirectional Transformers for Language Understanding by Devlin et al. 2018, 17500 citations
[BERTology](https://huggingface.co/transformers/bertology.html) is the nickname for the growing amount of BERT-related research.
BERT trains a transformer model on two tasks:
- Masked language model:
- 15% of the tokenswordpieces are selected at the beginning.
- 80% of those are replaced with [MASK],
- 10% are replaced with a random token,
- 10% are kept intact.
- Next sentence prediction:
- Are sentences A and B consecutive sentences?
- Generate 50-50%.
- Binary classification task.
### Training, Finetuning BERT
- BERT models are (masked-)language models that were usually trained on large corporas.
- e.g. BERT base model was trained on BookCorpus, a dataset consisting of 11,038 unpublished books and English Wikipedia.
#### Finetuning
- Get a trained BERT model.
- Add a small classification layer on top (typically a 2-layer MLP).
- Train BERT along with the classification layer on an annotated dataset.
- Much smaller than the data BERT was trained on
- Another option: freeze BERT and train the classification layer only.
- Easier training regime.
- Smaller memory footprint.
- Worse performance.
<img src="https://production-media.paperswithcode.com/methods/new_BERT_Overall.jpg" alt="finetune" width="800px"/>
```
!pip install transformers
```
### WordPiece tokenizer
- BERT has its own tokenizer
- All inputs must be tokenized with BERT
- You don't need to remove stopwords, lemmatize, preprocess the input for BERT
- It is a middle ground between word and character tokenization.
- Static vocabulary:
- Special tokens: [CLS], [SEP], [MASK], [UNK]
- It tokenizes everything, falling back to characters and [UNK] if necessary
```
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
print(type(tokenizer))
print(len(tokenizer.get_vocab()))
tokenizer.tokenize("My shihtzu's name is Maszat.")
tokenizer("There are black cats and black dogs.", "Another sentence.")
```
### Train a BertForSequenceClassification model on the dataset
```
from transformers import BertForSequenceClassification
```
__BertForSequenceClassification__ is a helper class to train transformer-based BERT models. It puts a classification layer on top of a pretrained model.
Read more in the documentation: https://huggingface.co/transformers/model_doc/bert.html#bertforsequenceclassification
```
model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2)
_ = model.to(device)
# We only want to finetune the classification layer on top of BERT
for p in model.base_model.parameters():
p.requires_grad = False
params = list(model.named_parameters())
print(f"The BERT model has {len(params)} different named parameters.")
print("==== Embedding Layer ====\n")
for p in params[0:5]:
print(f"{p[0]} {str(tuple(p[1].size()))}")
print("\n==== First Transformer ====\n")
for p in params[5:21]:
print(f"{p[0]} {str(tuple(p[1].size()))}")
print("\n==== Output Layer ====\n")
for p in params[-4:]:
print(f"{p[0]} {str(tuple(p[1].size()))}")
N_EPOCHS = 5
optimizer = optim.Adam(model.parameters())
tr_data_loader, val_data_loader = prepare_dataloader_with_padding(
tr_data, val_data, word_to_ix
)
train_iterator, valid_iterator = create_dataloader_iterators_with_padding(
tr_data_loader, val_data_loader, BATCH_SIZE
)
for epoch in range(N_EPOCHS):
start_time = time.time()
train_epoch_loss = 0
train_epoch_prec = 0
train_epoch_recall = 0
train_epoch_fscore = 0
model.train()
# We use our own iterator but now use the raw texts instead of the ID tokens
for train_batch in train_iterator:
labels = train_batch[1]
texts = train_batch[3]
optimizer.zero_grad()
# We use BERT's own tokenizer on raw texts
# Check the documentation: https://huggingface.co/transformers/main_classes/tokenizer.html
encoded = tokenizer(
texts,
truncation=True,
max_length=128,
padding=True,
return_tensors="pt",
)
# BERT converts texts into IDs of its own vocabulary
input_ids = encoded["input_ids"].to(device)
# Mask to avoid performing attention on padding token indices.
attention_mask = encoded["attention_mask"].to(device)
# Run the model
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
loss = outputs[0]
predictions = outputs[1]
prec, recall, fscore = calculate_performance(predictions, labels)
loss.backward()
optimizer.step()
train_epoch_loss += loss.item()
train_epoch_prec += prec.item()
train_epoch_recall += recall.item()
train_epoch_fscore += fscore.item()
train_loss = train_epoch_loss / len(train_iterator)
train_prec = train_epoch_prec / len(train_iterator)
train_rec = train_epoch_recall / len(train_iterator)
train_fscore = train_epoch_fscore / len(train_iterator)
# And validate your model on the validation set
valid_epoch_loss = 0
valid_epoch_prec = 0
valid_epoch_recall = 0
valid_epoch_fscore = 0
model.eval()
with torch.no_grad():
for valid_batch in valid_iterator:
labels = valid_batch[1]
texts = valid_batch[3]
encoded = tokenizer(
texts,
truncation=True,
max_length=128,
padding=True,
return_tensors="pt",
)
input_ids = encoded["input_ids"].to(device)
attention_mask = encoded["attention_mask"].to(device)
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
loss = outputs[0]
predictions = outputs[1]
prec, recall, fscore = calculate_performance(predictions, labels)
# We add batch-wise loss to the epoch-wise loss
valid_epoch_loss += loss.item()
valid_epoch_prec += prec.item()
valid_epoch_recall += recall.item()
valid_epoch_fscore += fscore.item()
valid_loss = valid_epoch_loss / len(valid_iterator)
valid_prec = valid_epoch_prec / len(valid_iterator)
valid_rec = valid_epoch_recall / len(valid_iterator)
valid_fscore = valid_epoch_fscore / len(valid_iterator)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f"Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s")
print(
f"\tTrain Loss: {train_loss:.3f} | Train Prec: {train_prec*100:.2f}% | Train Rec: {train_rec*100:.2f}% | Train Fscore: {train_fscore*100:.2f}%"
)
print(
f"\t Val. Loss: {valid_loss:.3f} | Val Prec: {valid_prec*100:.2f}% | Val Rec: {valid_rec*100:.2f}% | Val Fscore: {valid_fscore*100:.2f}%"
)
```
| github_jupyter |
# T81-558: Applications of Deep Neural Networks
**Module 11: Natural Language Processing and Speech Recognition**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 11 Material
* Part 11.1: Getting Started with Spacy in Python [[Video]](https://www.youtube.com/watch?v=A5BtU9vXzu8&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_11_01_spacy.ipynb)
* Part 11.2: Word2Vec and Text Classification [[Video]](https://www.youtube.com/watch?v=nWxtRlpObIs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_11_02_word2vec.ipynb)
* Part 11.3: What are Embedding Layers in Keras [[Video]](https://www.youtube.com/watch?v=OuNH5kT-aD0&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_11_03_embedding.ipynb)
* **Part 11.4: Natural Language Processing with Spacy and Keras** [[Video]](https://www.youtube.com/watch?v=BKgwjhao5DU&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_11_04_text_nlp.ipynb)
* Part 11.5: Learning English from Scratch with Keras and TensorFlow [[Video]](https://www.youtube.com/watch?v=Y1khuuSjZzc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=58) [[Notebook]](t81_558_class_11_05_english_scratch.ipynb)
# Part 11.4: Natural Language Processing with Spacy and Keras
In this part we will see how to use Spacy and Keras together.
### Word-Level Text Generation
There are a number of different approaches to teaching a neural network to output free-form text. The most basic question is if you wish the neural network to learn at the word or character level. In many ways, learning at the character level is the more interesting of the two. The LSTM is learning construct its own words without even being shown what a word is. We will begin with character-level text generation. In the next module, we will see how we can use nearly the same technique to operate at the word level. The automatic captioning that will be implemented in the next module is at the word level.
We begin by importing the needed Python packages and defining the sequence length, named **maxlen**. Time-series neural networks always accept their input as a fixed length array. Not all of the sequence might be used, it is common to fill extra elements with zeros. The text will be divided into sequences of this length and the neural network will be trained to predict what comes after this sequence.
```
from tensorflow.keras.callbacks import LambdaCallback
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.optimizers import RMSprop
import numpy as np
import random
import sys
import io
import requests
import re
import requests
r = requests.get("https://data.heatonresearch.com/data/t81-558/text/treasure_island.txt")
raw_text = r.text.lower()
print(raw_text[0:1000])
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(raw_text)
vocab = set()
tokenized_text = []
for token in doc:
word = ''.join([i if ord(i) < 128 else ' ' for i in token.text])
word = word.strip()
if not token.is_digit \
and not token.like_url \
and not token.like_email:
vocab.add(word)
tokenized_text.append(word)
print(f"Vocab size: {len(vocab)}")
```
The above section might have given you this error:
```
OSError: [E050] Can't find model 'en_core_web_sm'. It doesn't seem to be a shortcut link, a Python package or a valid path to a data directory.
```
If so, Spacy can be installed with a simple PIP install. This was included in the list of packages to install for this course. You will need to ensure that you've installed a language with Spacy. If you do not, you will get the following error:
To install English, use the following command:
```
python -m spacy download en
```
```
print(list(vocab)[:20])
word2idx = dict((n, v) for v, n in enumerate(vocab))
idx2word = dict((n, v) for n, v in enumerate(vocab))
tokenized_text = [word2idx[word] for word in tokenized_text]
tokenized_text
# cut the text in semi-redundant sequences of maxlen words
maxlen = 6
step = 3
sentences = []
next_words = []
for i in range(0, len(tokenized_text) - maxlen, step):
sentences.append(tokenized_text[i: i + maxlen])
next_words.append(tokenized_text[i + maxlen])
print('nb sequences:', len(sentences))
sentences[0:5]
import numpy as np
print('Vectorization...')
x = np.zeros((len(sentences), maxlen, len(vocab)), dtype=np.bool)
y = np.zeros((len(sentences), len(vocab)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, word in enumerate(sentence):
x[i, t, word] = 1
y[i, next_words[i]] = 1
x.shape
y.shape
y[0:5]
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(vocab))))
model.add(Dense(len(vocab), activation='softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
model.summary()
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def on_epoch_end(epoch, _):
# Function invoked at end of each epoch. Prints generated text.
print("****************************************************************************")
print('----- Generating text after Epoch: %d' % epoch)
start_index = random.randint(0, len(tokenized_text) - maxlen)
for temperature in [0.2, 0.5, 1.0, 1.2]:
print('----- temperature:', temperature)
#generated = ''
sentence = tokenized_text[start_index: start_index + maxlen]
#generated += sentence
o = ' '.join([idx2word[idx] for idx in sentence])
print(f'----- Generating with seed: "{o}"')
#sys.stdout.write(generated)
for i in range(100):
x_pred = np.zeros((1, maxlen, len(vocab)))
for t, word in enumerate(sentence):
x_pred[0, t, word] = 1.
preds = model.predict(x_pred, verbose=0)[0]
next_index = sample(preds, temperature)
next_word = idx2word[next_index]
#generated += next_char
sentence = sentence[1:]
sentence.append(next_index)
sys.stdout.write(next_word)
sys.stdout.write(' ')
sys.stdout.flush()
print()
print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
model.fit(x, y,
batch_size=128,
epochs=60,
callbacks=[print_callback])
```
| github_jupyter |
# Managing offline map areas

With ArcGIS you can take your web maps and layers offline in field apps to continue work in places with limited or no connectivity. Using [ArcGIS Runtime SDKs](https://developers.arcgis.com/features/offline/), you can build offline ready apps that can make use of this functionality. A GIS administrator or content manager/owner can prepare map areas ahead of time to make going offline faster and easier for the field worker. This guide describes how to use the ArcGIS API for Python to create **preplanned offline map areas** for use in the ArcGIS Runtime SDKs and in the future, with apps like Collector.
To learn about the general concept and steps in taking a map offline, refer [here](https://developers.arcgis.com/ios/latest/swift/guide/create-an-offline-map.htm). To understand the data requirements needed to take a map offline, refer [here](https://doc.arcgis.com/en/collector/ios/create-maps/offline-map-prep.htm).
## Creating offline map areas
With ArcGIS API for Python, you can conveniently manage offline areas from the `WebMap` object. The [`offline_areas`](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.mapping.html#arcgis.mapping.WebMap.offline_areas) property off the `WebMap` object gives you access to the [`OfflineAreaManager`](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.mapping.html#offlinemapareamanager) object with which you can `create()`, `list()` and `update()` these offline packages.
```
from arcgis.gis import GIS
from arcgis.mapping import WebMap
gis = GIS("https://www.arcgis.com","arcgis_python","P@ssword123")
```
Let us use a fire first responder web map and take it offline.
```
wm_item = gis.content.get('7f88050bf48749c6b9d82634f04b6362')
wm_item
fire_webmap = WebMap(wm_item)
```
You can create offline areas for a specified extent or a bookmark. You can additionally specify any layers that you need to ignore, a destination folder to store these packages and a min, max scale to which the packages need to be cached.
List the bookmarks in the web map:
```
for bookmark in fire_webmap.definition.bookmarks:
print(bookmark.name)
```
Create offline areas for the bookmark "Southern California" and while we are at it, let us limit the scales for which the packages need to be created. As one of the parameters, you can specify a name, title, description for the "offline map area" item that gets created during this process.
```
offline_item_properties = {'title': 'Offline area for Southern California',
'tags': ['Python', 'automation', 'fires'],
'snippet': 'Area created for first responders'}
socal_offline_item = fire_webmap.offline_areas.create(area = fire_webmap.definition.bookmarks[1].name,
item_properties = offline_item_properties,
min_scale = 147914000,
max_scale = 73957000)
```
This operation can take a while as the server is packaging the contents of the web map for offline use. To view the status, you can optionally turn on the verbosity using the `env` module as shown below:
```python
from arcgis import env
env.verbose = True
```
```
socal_offline_item
```
The type of the item we just created is `Map Area`. Read along to see how you can list the packages created for this map area.
```
socal_offline_item.type
```
### Inspecting offline packages created for a map area
The packages created for a `Map Area` item share a relationship of type `Area2Package` with that `Item`. Thus using the `related_items()` method off the `Item` object, you can list those packages as shown below:
```
socal_offline_item.related_items('Area2Package', 'forward')
```
These items are meant for use in offline applications described above. However, if needed, you can call the `download()` method off these `Item`s and download their data to disk using the Python API.
## Listing offline areas created for a web map
You can use the `list()` method off the `offline_areas` property of a `WebMap` object to find the offline areas created for that web map.
```
fire_webmap.offline_areas.list()
```
## Updating offline areas
Keeping offline areas up to date is an important task. You can accomplish this by calling the `update()` method off the `offline_areas` property of the `WebMap` object. This method accepts a list of `Map Area` items as input. To update all the offline areas created for a web map call the method without any input parameters.
Below is an example of how the progress is relayed back to you when you turn on the `verbosity` in the `env` module.
```
# update all offline areas for the fire web map
fire_webmap.offline_areas.update()
```
Now your field users are all set with the latest packages for use in a disconnected setting.
| github_jupyter |
# Using nbconvert as a library
In this notebook, you will be introduced to the programmatic API of nbconvert and how it can be used in various contexts.
A great [blog post](http://jakevdp.github.io/blog/2013/04/15/code-golf-in-python-sudoku/) by [@jakevdp](https://github.com/jakevdp) will be used to demonstrate. This notebook will not focus on using the command line tool. The attentive reader will point-out that no data is read from or written to disk during the conversion process. This is because nbconvert has been designed to work in memory so that it works well in a database or web-based environment too.
## Quick overview
Credit: Jonathan Frederic (@jdfreder on github)
The main principle of nbconvert is to instantiate an `Exporter` that controls the pipeline through which notebooks are converted.
First, download @jakevdp's notebook (if you do not have `requests`, install it by running `pip install requests`, or if you don't have pip installed, you can find it on PYPI):
```
from urllib.request import urlopen
url = 'http://jakevdp.github.com/downloads/notebooks/XKCD_plots.ipynb'
response = urlopen(url).read().decode()
response[0:60] + ' ...'
```
The response is a JSON string which represents a Jupyter notebook.
Next, we will read the response using nbformat. Doing this will guarantee that the notebook structure is valid. Note that the in-memory format and on disk format are slightly different. In particual, on disk, multiline strings might be split into a list of strings.
```
import nbformat
jake_notebook = nbformat.reads(response, as_version=4)
jake_notebook.cells[0]
```
The nbformat API returns a special type of dictionary. For this example, you don't need to worry about the details of the structure (if you are interested, please see the [nbformat documentation](https://nbformat.readthedocs.io/en/latest/)).
The nbconvert API exposes some basic exporters for common formats and defaults. You will start by using one of them. First, you will import one of these exporters (specifically, the HTML exporter), then instantiate it using most of the defaults, and then you will use it to process the notebook we downloaded earlier.
```
from traitlets.config import Config
# 1. Import the exporter
from nbconvert import HTMLExporter
# 2. Instantiate the exporter. We use the `basic` template for now; we'll get into more details
# later about how to customize the exporter further.
html_exporter = HTMLExporter()
html_exporter.template_file = 'basic'
# 3. Process the notebook we loaded earlier
(body, resources) = html_exporter.from_notebook_node(jake_notebook)
```
The exporter returns a tuple containing the source of the converted notebook, as well as a resources dict. In this case, the source is just raw HTML:
```
print(body[:400] + '...')
```
If you understand HTML, you'll notice that some common tags are omitted, like the `body` tag. Those tags are included in the default `HtmlExporter`, which is what would have been constructed if we had not modified the `template_file`.
The resource dict contains (among many things) the extracted `.png`, `.jpg`, etc. from the notebook when applicable. The basic HTML exporter leaves the figures as embedded base64, but you can configure it to extract the figures. So for now, the resource dict should be mostly empty, except for a key containing CSS and a few others whose content will be obvious:
```
print("Resources:", resources.keys())
print("Metadata:", resources['metadata'].keys())
print("Inlining:", resources['inlining'].keys())
print("Extension:", resources['output_extension'])
```
`Exporter`s are stateless, so you won't be able to extract any useful information beyond their configuration. You can re-use an exporter instance to convert another notebook. In addition to the `from_notebook_node` used above, each exporter exposes `from_file` and `from_filename` methods.
## Extracting Figures using the RST Exporter
When exporting, you may want to extract the base64 encoded figures as files. While the HTML exporter does not do this by default, the `RstExporter` does:
```
# Import the RST exproter
from nbconvert import RSTExporter
# Instantiate it
rst_exporter = RSTExporter()
# Convert the notebook to RST format
(body, resources) = rst_exporter.from_notebook_node(jake_notebook)
print(body[:970] + '...')
print('[.....]')
print(body[800:1200] + '...')
```
Notice that base64 images are not embedded, but instead there are filename-like strings, such as `output_3_0.png`. The strings actually are (configurable) keys that map to the binary data in the resources dict.
Note, if you write an RST Plugin, you are responsible for writing all the files to the disk (or uploading, etc...) in the right location. Of course, the naming scheme is configurable.
As an exercise, this notebook will show you how to get one of those images. First, take a look at the `'outputs'` of the returned resources dictionary. This is a dictionary that contains a key for each extracted resource, with values corresponding to the actual base64 encoding:
```
sorted(resources['outputs'].keys())
```
In this case, there are 5 extracted binary figures, all `png`s. We can use the Image display object to actually display one of the images:
```
from IPython.display import Image
Image(data=resources['outputs']['output_3_0.png'], format='png')
```
Note that this image is being rendered without ever reading or writing to the disk.
## Extracting Figures using the HTML Exporter
As mentioned above, by default, the HTML exporter does not extract images -- it just leaves them as inline base64 encodings. However, this is not always what you might want. For example, here is a use case from @jakevdp:
> I write an [awesome blog](http://jakevdp.github.io/) using Jupyter notebooks converted to HTML, and I want the images to be cached. Having one html file with all of the images base64 encoded inside it is nice when sharing with a coworker, but for a website, not so much. I need an HTML exporter, and I want it to extract the figures!
### Some theory
Before we get into actually extracting the figures, it will be helpful to give a high-level overview of the process of converting a notebook to a another format:
1. Retrieve the notebook and it's accompanying resources (you are responsible for this).
2. Feed the notebook into the `Exporter`, which:
1. Sequentially feeds the notebook into an array of `Preprocessor`s. Preprocessors only act on the **structure** of the notebook, and have unrestricted access to it.
2. Feeds the notebook into the Jinja templating engine, which converts it to a particular format depending on which template is selected.
3. The exporter returns the converted notebook and other relevant resources as a tuple.
4. You write the data to the disk using the built-in `FilesWriter` (which writes the notebook and any extracted files to disk), or elsewhere using a custom `Writer`.
### Using different preprocessors
To extract the figures when using the HTML exporter, we will want to change which `Preprocessor`s we are using. There are several preprocessors that come with nbconvert, including one called the `ExtractOutputPreprocessor`.
The `ExtractOutputPreprocessor` is responsible for crawling the notebook, finding all of the figures, and putting them into the resources directory, as well as choosing the key (i.e. `filename_xx_y.extension`) that can replace the figure inside the template. To enable the `ExtractOutputPreprocessor`, we must add it to the exporter's list of preprocessors:
```
# create a configuration object that changes the preprocessors
from traitlets.config import Config
c = Config()
c.HTMLExporter.preprocessors = ['nbconvert.preprocessors.ExtractOutputPreprocessor']
# create the new exporter using the custom config
html_exporter_with_figs = HTMLExporter(config=c)
html_exporter_with_figs.preprocessors
```
We can compare the result of converting the notebook using the original HTML exporter and our new customized one:
```
(_, resources) = html_exporter.from_notebook_node(jake_notebook)
(_, resources_with_fig) = html_exporter_with_figs.from_notebook_node(jake_notebook)
print("resources without figures:")
print(sorted(resources.keys()))
print("\nresources with extracted figures (notice that there's one more field called 'outputs'):")
print(sorted(resources_with_fig.keys()))
print("\nthe actual figures are:")
print(sorted(resources_with_fig['outputs'].keys()))
```
## Custom Preprocessors
There are an endless number of transformations that you may want to apply to a notebook. In particularly complicated cases, you may want to actually create your own `Preprocessor`. Above, when we customized the list of preprocessors accepted by the `HTMLExporter`, we passed in a string -- this can be any valid module name. So, if you create your own preprocessor, you can include it in that same list and it will be used by the exporter.
To create your own preprocessor, you will need to subclass from `nbconvert.preprocessors.Preprocessor` and overwrite either the `preprocess` and/or `preprocess_cell` methods.
## Example
The following demonstration adds the ability to exclude a cell by index.
Note: injecting cells is similar, and won't be covered here. If you want to inject static content at the beginning/end of a notebook, use a custom template.
```
from traitlets import Integer
from nbconvert.preprocessors import Preprocessor
class PelicanSubCell(Preprocessor):
"""A Pelican specific preprocessor to remove some of the cells of a notebook"""
# I could also read the cells from nb.metadata.pelican if someone wrote a JS extension,
# but for now I'll stay with configurable value.
start = Integer(0, help="first cell of notebook to be converted")
end = Integer(-1, help="last cell of notebook to be converted")
start.tag(config='True')
end.tag(config='True')
def preprocess(self, nb, resources):
self.log.info("I'll keep only cells from %d to %d", self.start, self.end)
nb.cells = nb.cells[self.start:self.end]
return nb, resources
```
Here a Pelican exporter is created that takes `PelicanSubCell` preprocessors and a `config` object as parameters. This may seem redundant, but with the configuration system you can register an inactive preprocessor on all of the exporters and activate it from config files or the command line.
```
# Create a new config object that configures both the new preprocessor, as well as the exporter
c = Config()
c.PelicanSubCell.start = 4
c.PelicanSubCell.end = 6
c.RSTExporter.preprocessors = [PelicanSubCell]
# Create our new, customized exporter that uses our custom preprocessor
pelican = RSTExporter(config=c)
# Process the notebook
print(pelican.from_notebook_node(jake_notebook)[0])
```
## Programmatically creating templates
```
from jinja2 import DictLoader
dl = DictLoader({'full.tpl':
"""
{%- extends 'basic.tpl' -%}
{% block footer %}
FOOOOOOOOTEEEEER
{% endblock footer %}
"""})
exportHTML = HTMLExporter(extra_loaders=[dl])
(body, resources) = exportHTML.from_notebook_node(jake_notebook)
for l in body.split('\n')[-4:]:
print(l)
```
## Real World Uses
@jakevdp uses Pelican and Jupyter Notebook to blog. Pelican [will use](https://github.com/getpelican/pelican-plugins/pull/21) nbconvert programmatically to generate blog post. Have a look a [Pythonic Preambulations](http://jakevdp.github.io/) for Jake's blog post.
@damianavila wrote the Nikola Plugin to [write blog post as Notebooks](http://damianavila.github.io/blog/posts/one-line-deployment-of-your-site-to-gh-pages.html) and is developing a js-extension to publish notebooks via one click from the web app.
<center>
<blockquote class="twitter-tweet"><p>As <a href="https://twitter.com/Mbussonn">@Mbussonn</a> requested... easieeeeer! Deploy your Nikola site with just a click in the IPython notebook! <a href="http://t.co/860sJunZvj">http://t.co/860sJunZvj</a> cc <a href="https://twitter.com/ralsina">@ralsina</a></p>— Damián Avila (@damian_avila) <a href="https://twitter.com/damian_avila/statuses/370306057828335616">August 21, 2013</a></blockquote>
</center>
| github_jupyter |
## Ireland Covid-19 datasets
* https://data.gov.ie/dataset?q=covid
* https://www.hpsc.ie/a-z/respiratory/coronavirus/novelcoronavirus/casesinireland/epidemiologyofcovid-19inireland/
* https://covid19ireland-geohive.hub.arcgis.com/
```
import pandas as pd
import pylab as plt
import numpy as np
import seaborn as sns
import matplotlib as mpl
import pylab as plt
import matplotlib.dates as mdates
sns.set_context('talk')
pd.set_option('display.width', 150)
locator = mdates.AutoDateLocator(minticks=4, maxticks=10)
formatter = mdates.ConciseDateFormatter(locator)
import geopandas as gpd
```
## LaboratoryLocalTimeSeriesHistoricView
```
labs = pd.read_csv('https://opendata-geohive.hub.arcgis.com/datasets/f6d6332820ca466999dbd852f6ad4d5a_0.csv?outSR=%7B%22latestWkid%22%3A3857%2C%22wkid%22%3A102100%7D')
labs['date'] = pd.to_datetime(labs['Date_HPSC'], infer_datetime_format=True)
labs['Pos24'] = labs.Positive.diff()
labs['Hosp24'] = labs.Hospitals.diff()
labs['Nonhosp24'] = labs.NonHospitals.diff()
labs['rate'] = labs.Pos24/labs.Test24*100
labs[-10:]
labs[-90:]['Test24'].mean()
window=7
x=labs.set_index('date').rolling(window).mean()
x['false_pos'] = x.Test24*0.005
f,ax=plt.subplots(figsize=(17,7))
l1=x.plot(y='Pos24',ax=ax,legend=False,alpha=.9)
ax1=ax.twinx()
x.plot(y='Test24',ax=ax1,color='red',lw=3,legend=False,alpha=.6)
ax.set_ylabel('positives')
ax2=ax.twinx()
#ax2.axis('off')
ax1.set_ylabel('tests')
ax2.set_ylabel('positive rate')
ax2.spines["right"].set_position(("axes", 1.1))
ax.set_facecolor('#f0f0f0')
x.plot(y='rate',ax=ax2,color='green',lw=3,legend=False)
#error on positives based on false positive rate in testing
#ax.fill_between(x.index, x.Pos24, x.Pos24-x.false_pos, alpha=0.3)
ax.legend([ax.get_lines()[0],ax1.get_lines()[0],ax2.get_lines()[0]],['positive','tests','positive rate'],loc=9,fontsize=14)
f.suptitle('Ireland COVID-19 tests and positive rate')
#for date,text in [('2020-08-07',' 3 counties\n lockdown'),('2020-09-18',' Dublin\n level 3'),
# ('2020-10-06','Country\n level 3'),('2020-10-22','Country\n level 5')]:
# ax.axvline(date,0,1000,linestyle='--',alpha=0.5,c='black')
# ax.text(date,750,text,fontsize=10)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
sns.despine()
plt.tight_layout()
f.savefig('ireland_covid_tests_prate.jpg',dpi=150)
def plot_scatter(ax,i=10000):
x = labs[(labs.date>'2020-05-01')]
sc = x[:i].plot('Test24','Pos24',kind='scatter',cmap='RdYlGn',
alpha=0.8,s=80,ax=ax,c='date')
ax.set_xlabel('tests')
ax.set_ylabel('positives')
# ax.set_xlim((0,max(labs.Tests_diff.dropna())))
#ax.set_ylim((-100,1600))
ax.set_title('Ireland tests vs positives')
plt.tight_layout()
plt.savefig('ireland_covid_tests_v_positives.jpg',dpi=120)
fig,ax=plt.subplots(1,1,figsize=(7,7))
plot_scatter(ax)
```
## day of week testing
```
labs['dayofweek'] = labs.date.dt.day_name()
#labs.groupby('dayofweek').agg({'Tests_diff':np.mean})
sns.factorplot(x='dayofweek',y='Pos24',data=labs,kind='bar',aspect=3,size=4)
sns.factorplot(x='dayofweek',y='Test24',data=labs,kind='bar',aspect=3,size=4)
def fit_cases(y):
from scipy.optimize import curve_fit
x = np.linspace(0, 1, num = len(y))
#x = range(len(y))
def func0(x, a, b):
return a+b*x
def func1(x, a, b):
return a+np.exp(b*x)
param, param_cov = curve_fit(func0, x, y)
f0 = func0(x,param[0],param[1])
#param, param_cov = curve_fit(func1, x, y)
#f1 = func1(x,param[0],param[1])
#print (param)
f,ax=plt.subplots(figsize=(10,6))
plt.scatter(x,y,alpha=0.6)
plt.plot(x,f0,'--', color ='red')
#plt.plot(x,f1,'--', color ='green',lw=4)
plt.text(.1,.9,param,transform=ax.transAxes)
y = labs[(labs.date>'2020-06-01')].Positive_diff
y = np.log(y)
fit_cases(y)
```
## Covid19CountyStatisticsHPSCIrelandOpenData
```
cs = pd.read_csv('https://opendata.arcgis.com/datasets/d9be85b30d7748b5b7c09450b8aede63_0.csv')
cs['time'] = pd.to_datetime(cs['TimeStamp'], infer_datetime_format=True)
cs = cs.sort_values(['CountyName','time'])
cs.columns
x=cs.set_index('time').rolling(window).mean()
f,ax=plt.subplots(3,3,figsize=(17,10))
axs=ax.flat
grouped = cs.groupby('CountyName')
i=0
for c in ['Dublin','Cork','Kildare','Offaly','Laois','Meath','Louth','Wicklow','Carlow']:
ax=axs[i]
x = cs[cs.CountyName==c].set_index('time').rolling(window).mean()
x['cases'] = x['ConfirmedCovidCases'].diff()
x.plot(y='cases',ax=ax,legend=False)
ax.set_title(c)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.grid(axis='x',color='0.95')
#ax1=ax.twinx()
x.plot(y='ConfirmedCovidDeaths',ax=ax1,legend=False,color='red')
if i>5:
ax.xaxis.set_major_formatter(formatter)
else:
ax.set_xticklabels([])
i+=1
sns.despine()
f.suptitle('Ireland COVID-19 positive tests for 9 counties')
plt.tight_layout()
f.savefig('ireland_covid_tests_bycounty.jpg',dpi=150)
```
## single counties plot
```
f,ax=plt.subplots(1,1,figsize=(17,6))
colors=sns.color_palette()
counties = cs.CountyName.unique()
selection = ['Dublin','Cork','Kildare','Offaly','Laois','Meath','Louth','Wicklow','Carlow']
i=0
lines=[]
for c in selection:
x = cs[cs.CountyName==c].set_index('time').rolling(window).mean()
x['cases'] = x['ConfirmedCovidCases'].diff()
if c == 'Dublin':
continue
if c in selection:
clr=colors[i]
i+=1
else:
clr='#d0d0e1'
a=x.plot(y='cases',ax=ax,legend=False,c=clr,lw=1.2)
ax.set_title('Ireland COVID-19 positive tests by county')
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.grid(axis='x',color='0.95')
#ax.legend(lines,selection,loc=9,fontsize=14)
sns.despine()
```
## CovidStatisticsProfileHPSCIrelandOpenData
```
daily = pd.read_csv('http://opendata-geohive.hub.arcgis.com/datasets/d8eb52d56273413b84b0187a4e9117be_0.csv')
daily['date'] = pd.to_datetime(daily['Date'], infer_datetime_format=True)
daily['hospitalised_diff'] = daily.HospitalisedCovidCases.diff()
ds = daily#.groupby('date').sum().reset_index()
cols=['date','ConfirmedCovidCases','HospitalisedCovidCases','hospitalised_diff','ConfirmedCovidDeaths']
x=ds[cols]
x = x[(x.date>'2020-04-30')]
#x=ds#.set_index('date').rolling(window).mean()
f,ax=plt.subplots(1,1,figsize=(18,8))
ax1=ax.twinx()
#x['cases_log'] = x.ConfirmedCovidCases.log()
x.plot(x='date',y='ConfirmedCovidCases',ax=ax,lw=3,legend=False,alpha=0.8)#,logy=True)
#x.plot(x='date',y='hospitalised_diff',ax=ax1,lw=3,legend=False,color='green')
x.plot(x='date',y='ConfirmedCovidDeaths',ax=ax1,lw=3,legend=False,color='red')#,logy=True)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.legend([ax.get_lines()[0],ax1.get_lines()[0]],['positive','deaths'],loc=9,fontsize=14)
sns.despine()
plt.savefig('ireland_covid_deaths_tests.jpg',dpi=150)
f,axs=plt.subplots(2,1,figsize=(20,7))
ax1=axs[0]
#ds.plot(x='date',y='ConfirmedCovidCases',ax=ax1,lw=2,legend=False,alpha=0.8,kind='bar')
ax1.bar(ds.date, ds.ConfirmedCovidCases,color='blue',width=.8)
ax1.set_ylabel('cases')
ax1.xaxis.set_ticklabels([])
ax2=axs[1]
#ds.plot(x='date',y='hospitalised_diff',ax=ax1,lw=3,legend=False,color='green')
#ds.plot(x='date',y='ConfirmedCovidDeaths',ax=ax2,legend=False,color='red',kind='bar')
ax2.bar(ds.date, ds.ConfirmedCovidDeaths,color='red')
ax2.set_ylabel('deaths')
ax2.xaxis.set_major_formatter(formatter)
ax2.xaxis.set_major_locator(locator)
#ax.legend([ax.get_lines()[0],ax1.get_lines()[0],ax2.get_lines()[0]],['confirmed cases','hospitalised','deaths'],loc=1,fontsize=14)
f.suptitle('Ireland COVID-19 cases and deaths')
plt.tight_layout()
sns.despine()
f.savefig('ireland_covid_hosp_deaths.jpg',dpi=150)
hosp = pd.read_csv('http://opendata-geohive.hub.arcgis.com/datasets/fe9bb23592ec4142a4f4c2c9bd32f749_0.csv?outSR={%22latestWkid%22:4326,%22wkid%22:4326}')
idf = gpd.read_file('map_data/Covid19CountyStatisticsHPSCIreland.shp')
time='2020-02-27'
g=idf[idf.TimeStamp==time]
fig, ax = plt.subplots(1, figsize=(9,9))
g.plot(column='Population', cmap='OrRd', linewidth=0.8, ax=ax, edgecolor='0.2',legend=True)
ax.axis('off')
```
| github_jupyter |
# TensorFlow Tutorial
Welcome to this week's programming assignment. Until now, you've always used numpy to build neural networks. Now we will step you through a deep learning framework that will allow you to build neural networks more easily. Machine learning frameworks like TensorFlow, PaddlePaddle, Torch, Caffe, Keras, and many others can speed up your machine learning development significantly. All of these frameworks also have a lot of documentation, which you should feel free to read. In this assignment, you will learn to do the following in TensorFlow:
- Initialize variables
- Start your own session
- Train algorithms
- Implement a Neural Network
Programing frameworks can not only shorten your coding time, but sometimes also perform optimizations that speed up your code.
## <font color='darkblue'>Updates</font>
#### If you were working on the notebook before this update...
* The current notebook is version "v3b".
* You can find your original work saved in the notebook with the previous version name (it may be either TensorFlow Tutorial version 3" or "TensorFlow Tutorial version 3a.)
* To view the file directory, click on the "Coursera" icon in the top left of this notebook.
#### List of updates
* forward_propagation instruction now says 'A1' instead of 'a1' in the formula for Z2;
and are updated to say 'A2' instead of 'Z2' in the formula for Z3.
* create_placeholders instruction refer to the data type "tf.float32" instead of float.
* in the model function, the x axis of the plot now says "iterations (per fives)" instead of iterations(per tens)
* In the linear_function, comments remind students to create the variables in the order suggested by the starter code. The comments are updated to reflect this order.
* The test of the cost function now creates the logits without passing them through a sigmoid function (since the cost function will include the sigmoid in the built-in tensorflow function).
* In the 'model' function, the minibatch_cost is now divided by minibatch_size (instead of num_minibatches).
* Updated print statements and 'expected output that are used to check functions, for easier visual comparison.
## 1 - Exploring the Tensorflow Library
To start, you will import the library:
```
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict
%matplotlib inline
np.random.seed(1)
```
Now that you have imported the library, we will walk you through its different applications. You will start with an example, where we compute for you the loss of one training example.
$$loss = \mathcal{L}(\hat{y}, y) = (\hat y^{(i)} - y^{(i)})^2 \tag{1}$$
```
y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36.
y = tf.constant(39, name='y') # Define y. Set to 39
loss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss
init = tf.global_variables_initializer() # When init is run later (session.run(init)),
# the loss variable will be initialized and ready to be computed
with tf.Session() as session: # Create a session and print the output
session.run(init) # Initializes the variables
print(session.run(loss)) # Prints the loss
```
Writing and running programs in TensorFlow has the following steps:
1. Create Tensors (variables) that are not yet executed/evaluated.
2. Write operations between those Tensors.
3. Initialize your Tensors.
4. Create a Session.
5. Run the Session. This will run the operations you'd written above.
Therefore, when we created a variable for the loss, we simply defined the loss as a function of other quantities, but did not evaluate its value. To evaluate it, we had to run `init=tf.global_variables_initializer()`. That initialized the loss variable, and in the last line we were finally able to evaluate the value of `loss` and print its value.
Now let us look at an easy example. Run the cell below:
```
a = tf.constant(2)
b = tf.constant(10)
c = tf.multiply(a,b)
print(c)
```
As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type "int32". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it.
```
sess = tf.Session()
print(sess.run(c))
```
Great! To summarize, **remember to initialize your variables, create a session and run the operations inside the session**.
Next, you'll also have to know about placeholders. A placeholder is an object whose value you can specify only later.
To specify values for a placeholder, you can pass in values by using a "feed dictionary" (`feed_dict` variable). Below, we created a placeholder for x. This allows us to pass in a number later when we run the session.
```
# Change the value of x in the feed_dict
x = tf.placeholder(tf.int64, name = 'x')
print(sess.run(2 * x, feed_dict = {x: 3}))
sess.close()
```
When you first defined `x` you did not have to specify a value for it. A placeholder is simply a variable that you will assign data to only later, when running the session. We say that you **feed data** to these placeholders when running the session.
Here's what's happening: When you specify the operations needed for a computation, you are telling TensorFlow how to construct a computation graph. The computation graph can have some placeholders whose values you will specify only later. Finally, when you run the session, you are telling TensorFlow to execute the computation graph.
### 1.1 - Linear function
Lets start this programming exercise by computing the following equation: $Y = WX + b$, where $W$ and $X$ are random matrices and b is a random vector.
**Exercise**: Compute $WX + b$ where $W, X$, and $b$ are drawn from a random normal distribution. W is of shape (4, 3), X is (3,1) and b is (4,1). As an example, here is how you would define a constant X that has shape (3,1):
```python
X = tf.constant(np.random.randn(3,1), name = "X")
```
You might find the following functions helpful:
- tf.matmul(..., ...) to do a matrix multiplication
- tf.add(..., ...) to do an addition
- np.random.randn(...) to initialize randomly
```
# GRADED FUNCTION: linear_function
def linear_function():
"""
Implements a linear function:
Initializes X to be a random tensor of shape (3,1)
Initializes W to be a random tensor of shape (4,3)
Initializes b to be a random tensor of shape (4,1)
Returns:
result -- runs the session for Y = WX + b
"""
np.random.seed(1)
"""
Note, to ensure that the "random" numbers generated match the expected results,
please create the variables in the order given in the starting code below.
(Do not re-arrange the order).
"""
### START CODE HERE ### (4 lines of code)
X = tf.constant(np.random.randn(3,1), name = "X")
W = tf.constant(np.random.randn(4,3), name = "W")
b = tf.constant(np.random.randn(4,1), name = "b")
Y = tf.add(tf.matmul(W, X), b)
### END CODE HERE ###
# Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate
### START CODE HERE ###
sess = tf.Session()
result = sess.run(Y)
### END CODE HERE ###
# close the session
sess.close()
return result
print( "result = \n" + str(linear_function()))
```
*** Expected Output ***:
```
result =
[[-2.15657382]
[ 2.95891446]
[-1.08926781]
[-0.84538042]]
```
### 1.2 - Computing the sigmoid
Great! You just implemented a linear function. Tensorflow offers a variety of commonly used neural network functions like `tf.sigmoid` and `tf.softmax`. For this exercise lets compute the sigmoid function of an input.
You will do this exercise using a placeholder variable `x`. When running the session, you should use the feed dictionary to pass in the input `z`. In this exercise, you will have to (i) create a placeholder `x`, (ii) define the operations needed to compute the sigmoid using `tf.sigmoid`, and then (iii) run the session.
** Exercise **: Implement the sigmoid function below. You should use the following:
- `tf.placeholder(tf.float32, name = "...")`
- `tf.sigmoid(...)`
- `sess.run(..., feed_dict = {x: z})`
Note that there are two typical ways to create and use sessions in tensorflow:
**Method 1:**
```python
sess = tf.Session()
# Run the variables initialization (if needed), run the operations
result = sess.run(..., feed_dict = {...})
sess.close() # Close the session
```
**Method 2:**
```python
with tf.Session() as sess:
# run the variables initialization (if needed), run the operations
result = sess.run(..., feed_dict = {...})
# This takes care of closing the session for you :)
```
```
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Computes the sigmoid of z
Arguments:
z -- input value, scalar or vector
Returns:
results -- the sigmoid of z
"""
### START CODE HERE ### ( approx. 4 lines of code)
# Create a placeholder for x. Name it 'x'.
x = tf.placeholder(tf.float32, name = "x")
# compute sigmoid(x)
sigmoid = tf.sigmoid(x)
# Create a session, and run it. Please use the method 2 explained above.
# You should use a feed_dict to pass z's value to x.
with tf.Session() as sess:
# Run session and call the output "result"
result = sess.run(sigmoid, feed_dict = {x: z})
### END CODE HERE ###
return result
print ("sigmoid(0) = " + str(sigmoid(0)))
print ("sigmoid(12) = " + str(sigmoid(12)))
```
*** Expected Output ***:
<table>
<tr>
<td>
**sigmoid(0)**
</td>
<td>
0.5
</td>
</tr>
<tr>
<td>
**sigmoid(12)**
</td>
<td>
0.999994
</td>
</tr>
</table>
<font color='blue'>
**To summarize, you how know how to**:
1. Create placeholders
2. Specify the computation graph corresponding to operations you want to compute
3. Create the session
4. Run the session, using a feed dictionary if necessary to specify placeholder variables' values.
### 1.3 - Computing the Cost
You can also use a built-in function to compute the cost of your neural network. So instead of needing to write code to compute this as a function of $a^{[2](i)}$ and $y^{(i)}$ for i=1...m:
$$ J = - \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log a^{ [2] (i)} + (1-y^{(i)})\log (1-a^{ [2] (i)} )\large )\small\tag{2}$$
you can do it in one line of code in tensorflow!
**Exercise**: Implement the cross entropy loss. The function you will use is:
- `tf.nn.sigmoid_cross_entropy_with_logits(logits = ..., labels = ...)`
Your code should input `z`, compute the sigmoid (to get `a`) and then compute the cross entropy cost $J$. All this can be done using one call to `tf.nn.sigmoid_cross_entropy_with_logits`, which computes
$$- \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log \sigma(z^{[2](i)}) + (1-y^{(i)})\log (1-\sigma(z^{[2](i)})\large )\small\tag{2}$$
```
# GRADED FUNCTION: cost
def cost(logits, labels):
"""
Computes the cost using the sigmoid cross entropy
Arguments:
logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)
labels -- vector of labels y (1 or 0)
Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels"
in the TensorFlow documentation. So logits will feed into z, and labels into y.
Returns:
cost -- runs the session of the cost (formula (2))
"""
### START CODE HERE ###
# Create the placeholders for "logits" (z) and "labels" (y) (approx. 2 lines)
z = tf.placeholder(tf.float32, name = "z")
y = tf.placeholder(tf.float32, name = "y")
# Use the loss function (approx. 1 line)
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits = z, labels = y)
# Create a session (approx. 1 line). See method 1 above.
sess = tf.Session()
# Run the session (approx. 1 line).
cost = sess.run(cost, feed_dict = {z: logits, y: labels})
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return cost
logits = np.array([0.2,0.4,0.7,0.9])
cost = cost(logits, np.array([0,0,1,1]))
print ("cost = " + str(cost))
```
** Expected Output** :
```
cost = [ 0.79813886 0.91301525 0.40318605 0.34115386]
```
### 1.4 - Using One Hot encodings
Many times in deep learning you will have a y vector with numbers ranging from 0 to C-1, where C is the number of classes. If C is for example 4, then you might have the following y vector which you will need to convert as follows:
<img src="images/onehot.png" style="width:600px;height:150px;">
This is called a "one hot" encoding, because in the converted representation exactly one element of each column is "hot" (meaning set to 1). To do this conversion in numpy, you might have to write a few lines of code. In tensorflow, you can use one line of code:
- tf.one_hot(labels, depth, axis)
**Exercise:** Implement the function below to take one vector of labels and the total number of classes $C$, and return the one hot encoding. Use `tf.one_hot()` to do this.
```
# GRADED FUNCTION: one_hot_matrix
def one_hot_matrix(labels, C):
"""
Creates a matrix where the i-th row corresponds to the ith class number and the jth column
corresponds to the jth training example. So if example j had a label i. Then entry (i,j)
will be 1.
Arguments:
labels -- vector containing the labels
C -- number of classes, the depth of the one hot dimension
Returns:
one_hot -- one hot matrix
"""
### START CODE HERE ###
# Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line)
C = tf.constant(C, name = "C")
# Use tf.one_hot, be careful with the axis (approx. 1 line)
one_hot_matrix = tf.one_hot(labels, C, axis=0)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session (approx. 1 line)
one_hot = sess.run(one_hot_matrix)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return one_hot
labels = np.array([1,2,3,0,2,1])
one_hot = one_hot_matrix(labels, C = 4)
print ("one_hot = \n" + str(one_hot))
```
**Expected Output**:
```
one_hot =
[[ 0. 0. 0. 1. 0. 0.]
[ 1. 0. 0. 0. 0. 1.]
[ 0. 1. 0. 0. 1. 0.]
[ 0. 0. 1. 0. 0. 0.]]
```
### 1.5 - Initialize with zeros and ones
Now you will learn how to initialize a vector of zeros and ones. The function you will be calling is `tf.ones()`. To initialize with zeros you could use tf.zeros() instead. These functions take in a shape and return an array of dimension shape full of zeros and ones respectively.
**Exercise:** Implement the function below to take in a shape and to return an array (of the shape's dimension of ones).
- tf.ones(shape)
```
# GRADED FUNCTION: ones
def ones(shape):
"""
Creates an array of ones of dimension shape
Arguments:
shape -- shape of the array you want to create
Returns:
ones -- array containing only ones
"""
### START CODE HERE ###
# Create "ones" tensor using tf.ones(...). (approx. 1 line)
ones = tf.ones(shape)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session to compute 'ones' (approx. 1 line)
ones = sess.run(ones)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return ones
print ("ones = " + str(ones([3])))
```
**Expected Output:**
<table>
<tr>
<td>
**ones**
</td>
<td>
[ 1. 1. 1.]
</td>
</tr>
</table>
# 2 - Building your first neural network in tensorflow
In this part of the assignment you will build a neural network using tensorflow. Remember that there are two parts to implement a tensorflow model:
- Create the computation graph
- Run the graph
Let's delve into the problem you'd like to solve!
### 2.0 - Problem statement: SIGNS Dataset
One afternoon, with some friends we decided to teach our computers to decipher sign language. We spent a few hours taking pictures in front of a white wall and came up with the following dataset. It's now your job to build an algorithm that would facilitate communications from a speech-impaired person to someone who doesn't understand sign language.
- **Training set**: 1080 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (180 pictures per number).
- **Test set**: 120 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (20 pictures per number).
Note that this is a subset of the SIGNS dataset. The complete dataset contains many more signs.
Here are examples for each number, and how an explanation of how we represent the labels. These are the original pictures, before we lowered the image resolutoion to 64 by 64 pixels.
<img src="images/hands.png" style="width:800px;height:350px;"><caption><center> <u><font color='purple'> **Figure 1**</u><font color='purple'>: SIGNS dataset <br> <font color='black'> </center>
Run the following code to load the dataset.
```
# Loading the dataset
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
```
Change the index below and run the cell to visualize some examples in the dataset.
```
# Example of a picture
index = 0
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
```
As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so.
```
# Flatten the training and test images
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
# Normalize image vectors
X_train = X_train_flatten/255.
X_test = X_test_flatten/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6)
Y_test = convert_to_one_hot(Y_test_orig, 6)
print ("number of training examples = " + str(X_train.shape[1]))
print ("number of test examples = " + str(X_test.shape[1]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
```
**Note** that 12288 comes from $64 \times 64 \times 3$. Each image is square, 64 by 64 pixels, and 3 is for the RGB colors. Please make sure all these shapes make sense to you before continuing.
**Your goal** is to build an algorithm capable of recognizing a sign with high accuracy. To do so, you are going to build a tensorflow model that is almost the same as one you have previously built in numpy for cat recognition (but now using a softmax output). It is a great occasion to compare your numpy implementation to the tensorflow one.
**The model** is *LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX*. The SIGMOID output layer has been converted to a SOFTMAX. A SOFTMAX layer generalizes SIGMOID to when there are more than two classes.
### 2.1 - Create placeholders
Your first task is to create placeholders for `X` and `Y`. This will allow you to later pass your training data in when you run your session.
**Exercise:** Implement the function below to create the placeholders in tensorflow.
```
# GRADED FUNCTION: create_placeholders
def create_placeholders(n_x, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)
n_y -- scalar, number of classes (from 0 to 5, so -> 6)
Returns:
X -- placeholder for the data input, of shape [n_x, None] and dtype "tf.float32"
Y -- placeholder for the input labels, of shape [n_y, None] and dtype "tf.float32"
Tips:
- You will use None because it let's us be flexible on the number of examples you will for the placeholders.
In fact, the number of examples during test/train is different.
"""
### START CODE HERE ### (approx. 2 lines)
X = tf.placeholder(tf.float32, shape=[n_x, None], name = "X")
Y = tf.placeholder(tf.float32, shape=[n_y, None], name="Y")
### END CODE HERE ###
return X, Y
X, Y = create_placeholders(12288, 6)
print ("X = " + str(X))
print ("Y = " + str(Y))
```
**Expected Output**:
<table>
<tr>
<td>
**X**
</td>
<td>
Tensor("Placeholder_1:0", shape=(12288, ?), dtype=float32) (not necessarily Placeholder_1)
</td>
</tr>
<tr>
<td>
**Y**
</td>
<td>
Tensor("Placeholder_2:0", shape=(6, ?), dtype=float32) (not necessarily Placeholder_2)
</td>
</tr>
</table>
### 2.2 - Initializing the parameters
Your second task is to initialize the parameters in tensorflow.
**Exercise:** Implement the function below to initialize the parameters in tensorflow. You are going use Xavier Initialization for weights and Zero Initialization for biases. The shapes are given below. As an example, to help you, for W1 and b1 you could use:
```python
W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer())
```
Please use `seed = 1` to make sure your results match ours.
```
# GRADED FUNCTION: initialize_parameters
def initialize_parameters():
"""
Initializes parameters to build a neural network with tensorflow. The shapes are:
W1 : [25, 12288]
b1 : [25, 1]
W2 : [12, 25]
b2 : [12, 1]
W3 : [6, 12]
b3 : [6, 1]
Returns:
parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 6 lines of code)
W1 = tf.get_variable("W1", [25, 12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b1 = tf.get_variable("b1", [25, 1], initializer = tf.zeros_initializer())
W2 = tf.get_variable("W2", [12, 25], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b2 = tf.get_variable("b2", [12, 1], initializer = tf.zeros_initializer())
W3 = tf.get_variable("W3", [6, 12], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b3 = tf.get_variable("b3", [6, 1], initializer = tf.zeros_initializer())
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
return parameters
tf.reset_default_graph()
with tf.Session() as sess:
parameters = initialize_parameters()
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
**Expected Output**:
<table>
<tr>
<td>
**W1**
</td>
<td>
< tf.Variable 'W1:0' shape=(25, 12288) dtype=float32_ref >
</td>
</tr>
<tr>
<td>
**b1**
</td>
<td>
< tf.Variable 'b1:0' shape=(25, 1) dtype=float32_ref >
</td>
</tr>
<tr>
<td>
**W2**
</td>
<td>
< tf.Variable 'W2:0' shape=(12, 25) dtype=float32_ref >
</td>
</tr>
<tr>
<td>
**b2**
</td>
<td>
< tf.Variable 'b2:0' shape=(12, 1) dtype=float32_ref >
</td>
</tr>
</table>
As expected, the parameters haven't been evaluated yet.
### 2.3 - Forward propagation in tensorflow
You will now implement the forward propagation module in tensorflow. The function will take in a dictionary of parameters and it will complete the forward pass. The functions you will be using are:
- `tf.add(...,...)` to do an addition
- `tf.matmul(...,...)` to do a matrix multiplication
- `tf.nn.relu(...)` to apply the ReLU activation
**Question:** Implement the forward pass of the neural network. We commented for you the numpy equivalents so that you can compare the tensorflow implementation to numpy. It is important to note that the forward propagation stops at `z3`. The reason is that in tensorflow the last linear layer output is given as input to the function computing the loss. Therefore, you don't need `a3`!
```
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents:
Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, A1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3, A2) + b3
### END CODE HERE ###
return Z3
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
print("Z3 = " + str(Z3))
```
**Expected Output**:
<table>
<tr>
<td>
**Z3**
</td>
<td>
Tensor("Add_2:0", shape=(6, ?), dtype=float32)
</td>
</tr>
</table>
You may have noticed that the forward propagation doesn't output any cache. You will understand why below, when we get to brackpropagation.
### 2.4 Compute cost
As seen before, it is very easy to compute the cost using:
```python
tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = ..., labels = ...))
```
**Question**: Implement the cost function below.
- It is important to know that the "`logits`" and "`labels`" inputs of `tf.nn.softmax_cross_entropy_with_logits` are expected to be of shape (number of examples, num_classes). We have thus transposed Z3 and Y for you.
- Besides, `tf.reduce_mean` basically does the summation over the examples.
```
# GRADED FUNCTION: compute_cost
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
# to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)
logits = tf.transpose(Z3)
labels = tf.transpose(Y)
### START CODE HERE ### (1 line of code)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = labels))
### END CODE HERE ###
return cost
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
cost = compute_cost(Z3, Y)
print("cost = " + str(cost))
```
**Expected Output**:
<table>
<tr>
<td>
**cost**
</td>
<td>
Tensor("Mean:0", shape=(), dtype=float32)
</td>
</tr>
</table>
### 2.5 - Backward propagation & parameter updates
This is where you become grateful to programming frameworks. All the backpropagation and the parameters update is taken care of in 1 line of code. It is very easy to incorporate this line in the model.
After you compute the cost function. You will create an "`optimizer`" object. You have to call this object along with the cost when running the tf.session. When called, it will perform an optimization on the given cost with the chosen method and learning rate.
For instance, for gradient descent the optimizer would be:
```python
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)
```
To make the optimization you would do:
```python
_ , c = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
```
This computes the backpropagation by passing through the tensorflow graph in the reverse order. From cost to inputs.
**Note** When coding, we often use `_` as a "throwaway" variable to store values that we won't need to use later. Here, `_` takes on the evaluated value of `optimizer`, which we don't need (and `c` takes the value of the `cost` variable).
### 2.6 - Building the model
Now, you will bring it all together!
**Exercise:** Implement the model. You will be calling the functions you had previously implemented.
```
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 1500, minibatch_size = 32, print_cost = True):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_x, n_y)
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
### START CODE HERE ### (1 line)
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
### END CODE HERE ###
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
### START CODE HERE ### (1 line)
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
### END CODE HERE ###
epoch_cost += minibatch_cost / minibatch_size
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per fives)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
```
Run the following cell to train your model! On our machine it takes about 5 minutes. Your "Cost after epoch 100" should be 1.048222. If it's not, don't waste time; interrupt the training by clicking on the square (⬛) in the upper bar of the notebook, and try to correct your code. If it is the correct cost, take a break and come back in 5 minutes!
```
parameters = model(X_train, Y_train, X_test, Y_test)
```
**Expected Output**:
<table>
<tr>
<td>
**Train Accuracy**
</td>
<td>
0.999074
</td>
</tr>
<tr>
<td>
**Test Accuracy**
</td>
<td>
0.716667
</td>
</tr>
</table>
Amazing, your algorithm can recognize a sign representing a figure between 0 and 5 with 71.7% accuracy.
**Insights**:
- Your model seems big enough to fit the training set well. However, given the difference between train and test accuracy, you could try to add L2 or dropout regularization to reduce overfitting.
- Think about the session as a block of code to train the model. Each time you run the session on a minibatch, it trains the parameters. In total you have run the session a large number of times (1500 epochs) until you obtained well trained parameters.
### 2.7 - Test with your own image (optional / ungraded exercise)
Congratulations on finishing this assignment. You can now take a picture of your hand and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the following code
4. Run the code and check if the algorithm is right!
```
import scipy
from PIL import Image
from scipy import ndimage
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "thumbs_up.jpg"
## END CODE HERE ##
# We preprocess your image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
image = image/255.
my_image = scipy.misc.imresize(image, size=(64,64)).reshape((1, 64*64*3)).T
my_image_prediction = predict(my_image, parameters)
plt.imshow(image)
print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction)))
```
You indeed deserved a "thumbs-up" although as you can see the algorithm seems to classify it incorrectly. The reason is that the training set doesn't contain any "thumbs-up", so the model doesn't know how to deal with it! We call that a "mismatched data distribution" and it is one of the various of the next course on "Structuring Machine Learning Projects".
<font color='blue'>
**What you should remember**:
- Tensorflow is a programming framework used in deep learning
- The two main object classes in tensorflow are Tensors and Operators.
- When you code in tensorflow you have to take the following steps:
- Create a graph containing Tensors (Variables, Placeholders ...) and Operations (tf.matmul, tf.add, ...)
- Create a session
- Initialize the session
- Run the session to execute the graph
- You can execute the graph multiple times as you've seen in model()
- The backpropagation and optimization is automatically done when running the session on the "optimizer" object.
| github_jupyter |
```
import numpy as np
import orqviz
import matplotlib.pyplot as plt
```
Given this "mysterious loss function named *loss_function*, what can we find out about it?
```
def loss_function(pars):
norm_of_pars = np.linalg.norm(pars, ord=2)
freq = 2
return -np.sin(freq*norm_of_pars) / (freq*norm_of_pars) + 1
n_dimensions = 4
loss_limits = (-0.1, 1.3)
title_fontsize = 18
label_fontsize = 18
tick_fontsize = 15
colorbar_fontsize = 17
legend_fontsize = 16
```
Let's get some gradients going
```
from gradient_descent_optimizer import gradient_descent_optimizer
from orqviz.gradients import calculate_full_gradient
gradient_function = lambda pars: calculate_full_gradient(pars, loss_function, eps=1e-3, stochastic=False)
```
And do 50 repetitions!
```
all_parameter_trajectories = []
all_costs = []
np.random.seed(123)
for _ in range(100):
init_params = np.random.uniform(-5, 5, size=n_dimensions)
parameter_trajectory, costs = gradient_descent_optimizer(init_params=init_params, loss_function=loss_function,
n_iters=150, learning_rate=0.2,
full_gradient_function=gradient_function)
all_parameter_trajectories.append(parameter_trajectory)
all_costs.append(costs)
all_parameter_trajectories = np.array(all_parameter_trajectories)
all_final_parameters = all_parameter_trajectories[:,-1]
all_costs = np.array(all_costs)
sorted_indices = np.argsort(all_costs[:,-1])
all_parameter_trajectories = all_parameter_trajectories[sorted_indices]
all_final_parameters = all_parameter_trajectories[:,-1]
all_costs = all_costs[sorted_indices]
```
What do we now now? We know the training progress...
```
plt.plot(all_costs.T, linewidth=2.5, color=plt.get_cmap("coolwarm", 7)(0), alpha=0.9)
plt.title("Training Progress of 100 Repetitions", fontsize=title_fontsize)
plt.ylabel("Loss", fontsize=label_fontsize)
plt.xlabel("Training Iterations", fontsize=label_fontsize)
plt.tick_params(labelsize=tick_fontsize)
plt.ylim(loss_limits)
plt.scatter([150, 150, 150], all_costs[[0, 1, -1],-1], color=plt.get_cmap("coolwarm", 7)(6), linewidth=2, zorder=3)
plt.savefig("sombrero_pics/progress.pdf")
plt.show()
```
...and the final local minima.
```
plt.hist(all_costs[:,-1], bins=100, width=0.05, align="mid", color=plt.get_cmap("coolwarm", 7)(6), alpha=0.9)
plt.title("Histogram of 100 final losses", fontsize=title_fontsize)
plt.xlabel("Loss", fontsize=label_fontsize)
plt.ylabel("Occurance", fontsize=label_fontsize)
plt.tick_params(labelsize=tick_fontsize)
plt.xlim(loss_limits)
plt.savefig("sombrero_pics/histogramm.pdf")
plt.show()
```
This is solid information, however, we have no idea what the loss function really does, how the landscape looks like, etc.\
We have barely found out the losses of local minima. The distribution of those is very discrete, which is extremely valuable, to be fair. But can we do better?
### 1D Interpolation
```
from orqviz.scans import perform_1D_interpolation, plot_1D_interpolation_result
fig, axes = plt.subplots(1, 3, figsize=(16.5 ,3.5), gridspec_kw=dict(wspace=0.3))
(ax1, ax2, ax3) = axes
pairs = [(0,1), (0,-1), (1, -1)]
for ax, pair in zip(axes, pairs):
scan1D_result = perform_1D_interpolation(all_final_parameters[pair[0]],
all_final_parameters[pair[1]],
loss_function, n_steps=100)
plot_1D_interpolation_result(scan1D_result, ax=ax)
ax.tick_params(labelsize=tick_fontsize)
ax.set_xlabel("Interpolation Direction", fontsize=label_fontsize)
ax.set_ylabel("Loss", fontsize=label_fontsize+2)
ax.set_ylim(loss_limits)
ax1.set_title("Global and $1^{st}$ minimum", fontsize=title_fontsize)
ax2.set_title("Global and $2^{nd}$ minimum", fontsize=title_fontsize)
ax3.set_title("$1^{st}$ and $2^{nd}$ minimum", fontsize=title_fontsize)
plt.savefig("sombrero_pics/1D_interpolation.pdf")
plt.show()
```
### 2D Interpolation
```
from orqviz.scans import perform_2D_interpolation, plot_2D_interpolation_result
from orqviz.plot_utils import normalize_color_and_colorbar, get_colorbar_from_ax
fig, axes = plt.subplots(1, 3, figsize=(16.5 ,3.5), gridspec_kw=dict(wspace=0.3))
(ax1, ax2, ax3) = axes
pairs = [(0,1), (1,2), (1,-1)]
for ax, pair in zip(axes, pairs):
scan2D_result = perform_2D_interpolation(all_final_parameters[pair[0]],
all_final_parameters[pair[1]],
loss_function, n_steps_x=100, end_points_x=(-0.5, 1.5), end_points_y=(-1, 1))
plot_2D_interpolation_result(scan2D_result, fig=fig, ax=ax)
normalize_color_and_colorbar(fig, ax, min_val=0, max_val=loss_limits[1])
cbar = get_colorbar_from_ax(ax)
cbar.ax.tick_params(labelsize=colorbar_fontsize)
ax.tick_params(labelsize=tick_fontsize)
ax.set_xlabel("Interpolation Direction", fontsize=label_fontsize)
ax.set_ylabel("Random Orth. Direction", fontsize=label_fontsize)
ax1.set_title("Global and $1^{st}$ minimum", fontsize=title_fontsize)
ax2.set_title("Global and $2^{nd}$ minimum", fontsize=title_fontsize)
ax3.set_title("$1^{st}$ and $2^{nd}$ minimum", fontsize=title_fontsize)
plt.savefig("sombrero_pics/2D_interpolation.pdf")
plt.show()
```
What do we know so far? We have a general idea that the loss function is somewhat periodic and concentric.\
We can also check how the training trajectories look like, i.e. which space the gradient descent traversed:
### PCA Scans
```
from orqviz.pca import get_pca, perform_2D_pca_scan, plot_pca_landscape, plot_scatter_points_on_pca
fig, ax = plt.subplots(1, 1, figsize=(5.5 ,3.5))
whiches = np.append(np.arange(0,8), np.arange(-8,-1), axis=0)
pca = get_pca(np.concatenate([all_parameter_trajectories[which] for which in whiches], axis=0))
scanpca_result = perform_2D_pca_scan(pca, loss_function, n_steps_x=100, offset=3)
plot_pca_landscape(scanpca_result, pca)
for which in whiches:
plot_scatter_points_on_pca(all_parameter_trajectories[which], pca, color="royalblue", alpha=0.5)
plot_scatter_points_on_pca(all_final_parameters[which], pca, color="red", alpha=1., s=150)
normalize_color_and_colorbar(fig, ax, 0., 1.3)
ax.set_xlabel("$1^{st}$ PCA Component", fontsize=label_fontsize)
ax.set_ylabel("$2^{nd}$ PCA Component", fontsize=label_fontsize)
ax.tick_params(labelsize=tick_fontsize)
cbar = get_colorbar_from_ax(ax)
cbar.ax.tick_params(labelsize=label_fontsize)
plt.title("Collection of 15 trajectories", fontsize=title_fontsize)
plt.savefig("./sombrero_pics/pca_many.pdf")
plt.show()
fig, axes = plt.subplots(1, 3, figsize=(16.5 ,3.5), gridspec_kw=dict(wspace=0.3))
whiches = [0, 1, 2]
for ax, which in zip(axes, whiches):
pca = get_pca(all_parameter_trajectories[which])
scanpca_result = perform_2D_pca_scan(pca, loss_function, n_steps_x=100, offset=3)
plot_pca_landscape(scanpca_result, pca, fig=fig, ax=ax)
plot_scatter_points_on_pca(all_parameter_trajectories[which], pca, color="royalblue", alpha=0.5, ax=ax)
plot_scatter_points_on_pca(all_final_parameters[which], pca, color="red", alpha=1., ax=ax, s=150)
normalize_color_and_colorbar(fig, ax, 0., 1.3)
ax.set_xlabel("$1^{st}$ PCA Component", fontsize=label_fontsize)
ax.set_ylabel("$2^{nd}$ PCA Component", fontsize=label_fontsize)
ax.tick_params(labelsize=tick_fontsize)
cbar = get_colorbar_from_ax(ax)
cbar.ax.tick_params(labelsize=colorbar_fontsize)
plt.show()
```
And now with two trajectories at a time:
```
fig, axes = plt.subplots(1, 3, figsize=(16.5 ,3.5), gridspec_kw=dict(wspace=0.3))
(ax1, ax2, ax3) = axes
pairs = [(0,1), (0,-1), (1, -1)]
for ax, pair in zip(axes, pairs):
pca = get_pca(np.append(all_parameter_trajectories[pair[0]],all_parameter_trajectories[pair[1]], axis=0))
scanpca_result = perform_2D_pca_scan(pca, loss_function, n_steps_x=100, offset=2)
plot_pca_landscape(scanpca_result, pca, fig=fig, ax=ax)
plot_scatter_points_on_pca(all_parameter_trajectories[pair[0]], pca, ax=ax, color="royalblue", alpha=0.5,)
plot_scatter_points_on_pca(all_parameter_trajectories[pair[1]], pca, ax=ax, color="royalblue", alpha=0.5)
plot_scatter_points_on_pca(all_final_parameters[pair[0]], pca, color="red", alpha=1., ax=ax, s=150)
plot_scatter_points_on_pca(all_final_parameters[pair[1]], pca, color="red", alpha=1., ax=ax, s=150)
normalize_color_and_colorbar(fig, ax, min_val=0, max_val=loss_limits[1])
cbar = get_colorbar_from_ax(ax)
cbar.ax.tick_params(labelsize=colorbar_fontsize)
ax.tick_params(labelsize=tick_fontsize)
ax.set_xlabel("$1^{st}$ PCA Component", fontsize=label_fontsize)
ax.set_ylabel("$2^{nd}$ PCA Component", fontsize=label_fontsize)
ax1.set_title("Global and $1^{st}$ minimum", fontsize=title_fontsize)
ax2.set_title("Global and $2^{nd}$ minimum", fontsize=title_fontsize)
ax3.set_title("$1^{st}$ and $2^{nd}$ minimum", fontsize=title_fontsize)
plt.savefig("sombrero_pics/pca_on_two.pdf")
plt.show()
```
Surprisingly straight paths! This indicates that the landscape is locally highly convex. This can be tested with the Hessian!
### Hessians
```
from orqviz.hessians import get_Hessian, perform_1D_hessian_eigenvector_scan, plot_1D_hessian_eigenvector_scan_result
```
Lets first look at the eigenvalues
```
hessian0 = get_Hessian(all_final_parameters[0], loss_function, eps=1e-3)
hessian1 = get_Hessian(all_final_parameters[1], loss_function, eps=1e-3)
hessian_1 = get_Hessian(all_final_parameters[-1], loss_function, eps=1e-3)
plt.plot(hessian0.eigenvalues, label="Global minimum", alpha=0.9, linewidth=2, marker="s", ms=8)
plt.plot(hessian1.eigenvalues, label="$1^{st}$ local minimum", alpha=0.9, linewidth=2, marker="o", ms=8)
plt.plot(hessian_1.eigenvalues, label="$2^{nd}$ local minimum", alpha=0.9, linewidth=2, marker="v", ms=8)
plt.ylim(-0.01, 1.5)
plt.legend(fontsize=legend_fontsize, loc="center left")
plt.title("Hessian Eigenvalues", fontsize=title_fontsize)
plt.ylabel("Magnitude of Eigenvalue", fontsize=label_fontsize)
plt.xlabel("Order of Eigenvalue", fontsize=label_fontsize)
plt.tick_params(labelsize=tick_fontsize)
plt.savefig("sombrero_pics/hessian_eigenvalues.pdf")
plt.show()
```
Interestingly, the global minimum has four equal eigenvalues, and all are positive. The other minima have at least one eigenvalue which is zero eigenvalues. This shows that there is at least one direction in which the landscape is locally flat. Can we see this in 1D scans in the direction of the eigenvectors?
```
fig, axes = plt.subplots(1, 3, figsize=(16.5, 3.5), gridspec_kw=dict(wspace=0.3))
(ax1, ax2, ax3) = axes
#
styles = ["solid",
"dashed",
"dotted",
"dashdot",
]
list_of_scans0 = perform_1D_hessian_eigenvector_scan(hessian0, loss_function, n_points=101)
for ii in range(len(list_of_scans0)):
plot_1D_hessian_eigenvector_scan_result([list_of_scans0[ii]], [hessian0.eigenvalues[ii]], linestyle=styles[ii],
ax=ax1, color="tab:blue")
list_of_scans1 = perform_1D_hessian_eigenvector_scan(hessian1, loss_function, n_points=101)
for ii in range(len(list_of_scans1)):
plot_1D_hessian_eigenvector_scan_result([list_of_scans1[ii]], [hessian1.eigenvalues[ii]], linestyle=styles[ii],
ax=ax2, color="tab:orange")
list_of_scans_1 = perform_1D_hessian_eigenvector_scan(hessian_1, loss_function, n_points=101)
for ii in range(len(list_of_scans_1)):
plot_1D_hessian_eigenvector_scan_result([list_of_scans_1[ii]], [hessian_1.eigenvalues[ii]], linestyle=styles[ii],
ax=ax3, color="tab:green")
for ax in axes:
ax.set_ylim(loss_limits)
ax.set_ylabel("Loss", fontsize=label_fontsize+2)
ax.set_xlabel("Eigenvector Direction", fontsize=label_fontsize)
ax.tick_params(labelsize=tick_fontsize)
ax.legend(fontsize=legend_fontsize, loc="lower right")
ax1.set_title("Global Minimum", fontsize=title_fontsize)
ax2.set_title("$1^{st}$ Local Minimum", fontsize=title_fontsize)
ax3.set_title("$2^{nd}$ Local Minimum", fontsize=title_fontsize)
plt.savefig("sombrero_pics/hessian_1D_scan.pdf")
plt.show()
```
Apparently, from the global minimum it goes straight up in every direction! This is not the case for the other two minima. However, we see that some directions are significantly flatter.
```
from orqviz.scans import perform_2D_scan, plot_2D_scan_result
```
Let's try 2D Hessian eigenvector scans in the
```
factor = 3
whiches = [0,1]
##
fig, axes = plt.subplots(1, 2, figsize=(12,3.5), gridspec_kw=dict(wspace=0.3))
(ax1, ax2) = axes
print("First in the low eigenvalue directions:")
for ax, which, hess in zip(axes, whiches, [hessian0, hessian1, hessian_1]):
scan2D_result = perform_2D_scan(all_final_parameters[which], loss_function,
direction_x=hess.eigenvectors[0]*factor,
direction_y=hess.eigenvectors[1]*factor,
n_steps_x=100)
plot_2D_scan_result(scan2D_result, fig=fig, ax=ax)
normalize_color_and_colorbar(fig, ax, min_val=0., max_val=loss_limits[1])
ax.scatter(0,0, color="red")
cbar = get_colorbar_from_ax(ax)
cbar.ax.tick_params(labelsize=colorbar_fontsize)
ax.tick_params(labelsize=tick_fontsize)
ax.set_xlabel("$1^{st}$ Eigenvector Direction", fontsize=label_fontsize)
ax.set_ylabel("$2^{nd}$ Eigenvector Direction", fontsize=label_fontsize)
ax1.set_title("Minimum 1", fontsize=title_fontsize)
ax2.set_title("Minimum 2", fontsize=title_fontsize)
# ax3.set_title("Minimum 4", fontsize=title_fontsize)
plt.show()
fig, axes = plt.subplots(1,2, figsize=(12, 3.5), gridspec_kw=dict(wspace=0.3))
(ax1, ax2) = axes
print("Now in the high eigenvector directions:")
for ax, which, hess in zip(axes, whiches, [hessian0, hessian1, hessian_1]):
scan2D_result = perform_2D_scan(all_final_parameters[which], loss_function,
direction_x=hess.eigenvectors[0]*factor,
direction_y=hess.eigenvectors[-1]*factor,
n_steps_x=100)
plot_2D_scan_result(scan2D_result, fig=fig, ax=ax)
normalize_color_and_colorbar(fig, ax, min_val=0., max_val=loss_limits[1])
ax.scatter(0,0, color="red")
cbar = get_colorbar_from_ax(ax)
cbar.ax.tick_params(labelsize=colorbar_fontsize)
ax.tick_params(labelsize=tick_fontsize)
ax.set_xlabel("$3^{rd}$ Eigenvector Direction", fontsize=label_fontsize)
ax.set_ylabel("$4^{th}$ Eigenvector Direction", fontsize=label_fontsize)
ax1.set_title("Minimum 1", fontsize=title_fontsize)
ax2.set_title("Minimum 2", fontsize=title_fontsize)
# ax3.set_title("Minimum 4", fontsize=title_fontsize)
# fig.savefig("sombrero_pics/hessian_2D_scan.pdf")
plt.show()
```
This is not too much information that we didn't already know, but those flat directions indicated by the Hessian eigenvales and by the scans look interesting. Unfortunately, the straight 1D scans didn't reveal too much...\
Luckily, we have a tool to scan non-linearly - the Nudged Elastic Band!
### Nudged Elastic Band
```
from orqviz.elastic_band import Chain, ChainPath, run_AutoNEB
```
The initial trajectories are linear interpolations between the points
```
lin_chain01 = Chain(np.linspace(all_final_parameters[0], all_final_parameters[1], num=5))
lin_path01 = ChainPath(lin_chain01)
#
lin_chain12 = Chain(np.linspace(all_final_parameters[1], all_final_parameters[2], num=5))
lin_path12 = ChainPath(lin_chain12)
```
Now lets train!
```
all_chains12 = run_AutoNEB(lin_chain12, loss_function, n_cycles=4, n_iters_per_cycle=100, max_new_pivots=2,
weighted_redistribution=True, eps=1e-3, learning_rate=0.1, percentage_tol=0.1)
trained_chain12 = all_chains12[-1]
trained_path12 = ChainPath(trained_chain12)
plt.plot(np.linspace(0, 1, 100), lin_path12.evaluate_points_on_path(100, loss_function), label="linear interpolation", linewidth=3)
plt.plot(np.linspace(0, 1, 100), trained_path12.evaluate_points_on_path(100, loss_function), label="optimized path", linewidth=3)
plt.scatter([0,1], lin_chain12.evaluate_on_pivots(loss_function)[[0,-1]], color="red", linewidth=3, zorder=3)
fig = plt.gcf()
ax = fig.gca()
ax.set_xlabel("Position on Path", fontsize=label_fontsize)
ax.set_ylabel("Loss", fontsize=label_fontsize)
ax.tick_params(labelsize=tick_fontsize)
plt.legend(fontsize=legend_fontsize)
plt.ylim(loss_limits)
plt.title("Between two local minima", fontsize=title_fontsize)
plt.savefig("./sombrero_pics/NEB_loss.pdf")
plt.show()
```
The space between them is flat!!!
```
from orqviz.pca import plot_line_through_points_on_pca
pca = get_pca(trained_chain12.pivots)
scanpca_result = perform_2D_pca_scan(pca, loss_function, n_steps_x=100, offset=3)
plot_pca_landscape(scanpca_result, pca)
plot_scatter_points_on_pca(lin_chain12.pivots, pca, color="royalblue", linewidth=0.5, edgecolor="white", s=200, label="Linear Path")
plot_scatter_points_on_pca(trained_chain12.pivots, pca, color="tab:orange", linewidth=0.5, edgecolor="white", s=200, label="Optimized Path")
fig = plt.gcf()
ax = fig.gca()
normalize_color_and_colorbar(fig, ax, 0, loss_limits[1])
ax.set_xlabel("$1^{st}$ PCA Component", fontsize=label_fontsize)
ax.set_ylabel("$2^{nd}$ PCA Component", fontsize=label_fontsize)
ax.tick_params(labelsize=tick_fontsize)
cbar = get_colorbar_from_ax(ax)
cbar.ax.tick_params(labelsize=colorbar_fontsize)
plt.legend(loc="lower left", fontsize=legend_fontsize)
plt.title("Between two local minima", fontsize=title_fontsize)
plt.savefig("./sombrero_pics/NEB_pca_chain.pdf")
plt.show()
```
And PCA scans tell us how. These tools all work in conjunction :)\
Last question, can we get into the global minimum from the first local minimum?
```
all_chains01 = run_AutoNEB(lin_chain01, loss_function, n_cycles=4, n_iters_per_cycle=100, max_new_pivots=2,
weighted_redistribution=True, eps=1e-3, learning_rate=0.1, percentage_tol=0.1)
trained_chain01 = all_chains01[-1]
trained_path01 = ChainPath(trained_chain01)
# plt.plot(lin_path01.evaluate_points_on_path(100, loss_function), label="linear interpolation")
# plt.plot(trained_path01.evaluate_points_on_path(100, loss_function), label="optimized path")
plt.plot(np.linspace(0, 1, 100), lin_path01.evaluate_points_on_path(100, loss_function), label="linear interpolation", linewidth=3)
plt.plot(np.linspace(0, 1, 100), trained_path01.evaluate_points_on_path(100, loss_function), label="optimized path", linewidth=3)
plt.scatter([0,1], lin_chain01.evaluate_on_pivots(loss_function)[[0,-1]], color="red", linewidth=3, zorder=3)
fig = plt.gcf()
ax = fig.gca()
ax.set_xlabel("Position on Path", fontsize=label_fontsize)
ax.set_ylabel("Loss", fontsize=label_fontsize)
ax.tick_params(labelsize=tick_fontsize)
plt.legend(fontsize=legend_fontsize)
plt.ylim(loss_limits)
plt.title("Between global and $1^{st}$ minum", fontsize=title_fontsize)
plt.savefig("./sombrero_pics/NEB_loss_2.pdf")
plt.show()
pca = get_pca(trained_chain01.pivots)
scanpca_result = perform_2D_pca_scan(pca, loss_function, n_steps_x=100, offset=3)
plot_pca_landscape(scanpca_result, pca)
plot_scatter_points_on_pca(lin_chain01.pivots, pca, color="royalblue", linewidth=0.5, edgecolor="white", s=200, label="Linear Path")
plot_scatter_points_on_pca(trained_chain01.pivots, pca, color="tab:orange", linewidth=0.5, edgecolor="white", s=200, label="Optimized Path")
fig = plt.gcf()
ax = fig.gca()
normalize_color_and_colorbar(fig, ax, 0, loss_limits[1])
ax.set_xlabel("$1^{st}$ PCA Component", fontsize=label_fontsize)
ax.set_ylabel("$2^{nd}$ PCA Component", fontsize=label_fontsize)
ax.tick_params(labelsize=tick_fontsize)
cbar = get_colorbar_from_ax(ax)
cbar.ax.tick_params(labelsize=colorbar_fontsize)
plt.legend(loc="lower left", fontsize=legend_fontsize)
plt.title("Between global and $1^{st}$ minum", fontsize=title_fontsize)
plt.savefig("./sombrero_pics/NEB_pca_chain_2.pdf")
plt.show()
```
No!\
So what do we have here? Let's do a final 2D scan with 3D plot to best imagine what we have just learned.
```
from orqviz.scans import plot_2D_scan_result_as_3D
fig = plt.figure(figsize=(9,9))
ax = fig.add_subplot(111, projection="3d")
factor = 6
scan2D_result = perform_2D_scan(all_final_parameters[0], loss_function,
direction_x=hess.eigenvectors[0]*factor,
direction_y=hess.eigenvectors[1]*factor,
n_steps_x=100)
plot_2D_scan_result_as_3D(scan2D_result, ax=ax)
ax.set_zlim(loss_limits)
ax.tick_params(labelsize=14)
ax.set_xlabel("Scan Direction x", fontsize=15)
ax.set_ylabel("Scan Direction y", fontsize=15)
ax.set_zlabel("Loss", fontsize=18, rotation=90)
plt.tight_layout()
ax._axis3don = False
plt.savefig("./sombrero_pics/sombrero_3D.pdf")
plt.show()
```
### The End
| github_jupyter |
# Training a ConvNet PyTorch
In this notebook, you'll learn how to use the powerful PyTorch framework to specify a conv net architecture and train it on the CIFAR-10 dataset.
```
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.data import sampler
import torchvision.datasets as dset
import torchvision.transforms as T
import numpy as np
import timeit
```
## What's this PyTorch business?
You've written a lot of code in this assignment to provide a whole host of neural network functionality. Dropout, Batch Norm, and 2D convolutions are some of the workhorses of deep learning in computer vision. You've also worked hard to make your code efficient and vectorized.
For the last part of this assignment, though, we're going to leave behind your beautiful codebase and instead migrate to one of two popular deep learning frameworks: in this instance, PyTorch (or TensorFlow, if you switch over to that notebook).
Why?
* Our code will now run on GPUs! Much faster training. When using a framework like PyTorch or TensorFlow you can harness the power of the GPU for your own custom neural network architectures without having to write CUDA code directly (which is beyond the scope of this class).
* We want you to be ready to use one of these frameworks for your project so you can experiment more efficiently than if you were writing every feature you want to use by hand.
* We want you to stand on the shoulders of giants! TensorFlow and PyTorch are both excellent frameworks that will make your lives a lot easier, and now that you understand their guts, you are free to use them :)
* We want you to be exposed to the sort of deep learning code you might run into in academia or industry.
## How will I learn PyTorch?
If you've used Torch before, but are new to PyTorch, this tutorial might be of use: http://pytorch.org/tutorials/beginner/former_torchies_tutorial.html
Otherwise, this notebook will walk you through much of what you need to do to train models in Torch. See the end of the notebook for some links to helpful tutorials if you want to learn more or need further clarification on topics that aren't fully explained here.
## Load Datasets
We load the CIFAR-10 dataset. This might take a couple minutes the first time you do it, but the files should stay cached after that.
```
class ChunkSampler(sampler.Sampler):
"""Samples elements sequentially from some offset.
Arguments:
num_samples: # of desired datapoints
start: offset where we should start selecting from
"""
def __init__(self, num_samples, start = 0):
self.num_samples = num_samples
self.start = start
def __iter__(self):
return iter(range(self.start, self.start + self.num_samples))
def __len__(self):
return self.num_samples
NUM_TRAIN = 49000
NUM_VAL = 1000
cifar10_train = dset.CIFAR10('./cs231n/datasets', train=True, download=True,
transform=T.ToTensor())
loader_train = DataLoader(cifar10_train, batch_size=64, sampler=ChunkSampler(NUM_TRAIN, 0))
cifar10_val = dset.CIFAR10('./cs231n/datasets', train=True, download=True,
transform=T.ToTensor())
loader_val = DataLoader(cifar10_val, batch_size=64, sampler=ChunkSampler(NUM_VAL, NUM_TRAIN))
cifar10_test = dset.CIFAR10('./cs231n/datasets', train=False, download=True,
transform=T.ToTensor())
loader_test = DataLoader(cifar10_test, batch_size=64)
```
For now, we're going to use a CPU-friendly datatype. Later, we'll switch to a datatype that will move all our computations to the GPU and measure the speedup.
```
dtype = torch.FloatTensor # the CPU datatype
# Constant to control how frequently we print train loss
print_every = 100
# This is a little utility that we'll use to reset the model
# if we want to re-initialize all our parameters
def reset(m):
if hasattr(m, 'reset_parameters'):
m.reset_parameters()
```
## Example Model
### Some assorted tidbits
Let's start by looking at a simple model. First, note that PyTorch operates on Tensors, which are n-dimensional arrays functionally analogous to numpy's ndarrays, with the additional feature that they can be used for computations on GPUs.
We'll provide you with a Flatten function, which we explain here. Remember that our image data (and more relevantly, our intermediate feature maps) are initially N x C x H x W, where:
* N is the number of datapoints
* C is the number of channels
* H is the height of the intermediate feature map in pixels
* W is the height of the intermediate feature map in pixels
This is the right way to represent the data when we are doing something like a 2D convolution, that needs spatial understanding of where the intermediate features are relative to each other. When we input data into fully connected affine layers, however, we want each datapoint to be represented by a single vector -- it's no longer useful to segregate the different channels, rows, and columns of the data. So, we use a "Flatten" operation to collapse the C x H x W values per representation into a single long vector. The Flatten function below first reads in the N, C, H, and W values from a given batch of data, and then returns a "view" of that data. "View" is analogous to numpy's "reshape" method: it reshapes x's dimensions to be N x ??, where ?? is allowed to be anything (in this case, it will be C x H x W, but we don't need to specify that explicitly).
```
class Flatten(nn.Module):
def forward(self, x):
N, C, H, W = x.size() # read in N, C, H, W
return x.view(N, -1) # "flatten" the C * H * W values into a single vector per image
```
### The example model itself
The first step to training your own model is defining its architecture.
Here's an example of a convolutional neural network defined in PyTorch -- try to understand what each line is doing, remembering that each layer is composed upon the previous layer. We haven't trained anything yet - that'll come next - for now, we want you to understand how everything gets set up. nn.Sequential is a container which applies each layer
one after the other.
In that example, you see 2D convolutional layers (Conv2d), ReLU activations, and fully-connected layers (Linear). You also see the Cross-Entropy loss function, and the Adam optimizer being used.
Make sure you understand why the parameters of the Linear layer are 5408 and 10.
```
# Here's where we define the architecture of the model...
simple_model = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
Flatten(), # see above for explanation
nn.Linear(5408, 10), # affine layer
)
# Set the type of all data in this model to be FloatTensor
simple_model.type(dtype)
loss_fn = nn.CrossEntropyLoss().type(dtype)
optimizer = optim.Adam(simple_model.parameters(), lr=1e-2) # lr sets the learning rate of the optimizer
```
PyTorch supports many other layer types, loss functions, and optimizers - you will experiment with these next. Here's the official API documentation for these (if any of the parameters used above were unclear, this resource will also be helpful). One note: what we call in the class "spatial batch norm" is called "BatchNorm2D" in PyTorch.
* Layers: http://pytorch.org/docs/nn.html
* Activations: http://pytorch.org/docs/nn.html#non-linear-activations
* Loss functions: http://pytorch.org/docs/nn.html#loss-functions
* Optimizers: http://pytorch.org/docs/optim.html#algorithms
## Training a specific model
In this section, we're going to specify a model for you to construct. The goal here isn't to get good performance (that'll be next), but instead to get comfortable with understanding the PyTorch documentation and configuring your own model.
Using the code provided above as guidance, and using the following PyTorch documentation, specify a model with the following architecture:
* 7x7 Convolutional Layer with 32 filters and stride of 1
* ReLU Activation Layer
* Spatial Batch Normalization Layer
* 2x2 Max Pooling layer with a stride of 2
* Affine layer with 1024 output units
* ReLU Activation Layer
* Affine layer from 1024 input units to 10 outputs
And finally, set up a **cross-entropy** loss function and the **RMSprop** learning rule.
```
fixed_model_base = nn.Sequential( # You fill this in!
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=7, stride=1),
nn.ReLU(True),
nn.BatchNorm2d(num_features=32),
nn.MaxPool2d(kernel_size=(2,2), stride=2),
Flatten(),
nn.Linear(in_features=5408, out_features=1024),
nn.ReLU(True),
nn.Linear(in_features=1024, out_features=10)
)
fixed_model = fixed_model_base.type(dtype)
```
To make sure you're doing the right thing, use the following tool to check the dimensionality of your output (it should be 64 x 10, since our batches have size 64 and the output of the final affine layer should be 10, corresponding to our 10 classes):
```
## Now we're going to feed a random batch into the model you defined and make sure the output is the right size
x = torch.randn(64, 3, 32, 32).type(dtype)
x_var = Variable(x.type(dtype)) # Construct a PyTorch Variable out of your input data
ans = fixed_model(x_var) # Feed it through the model!
# Check to make sure what comes out of your model
# is the right dimensionality... this should be True
# if you've done everything correctly
np.array_equal(np.array(ans.size()), np.array([64, 10]))
```
### GPU!
Now, we're going to switch the dtype of the model and our data to the GPU-friendly tensors, and see what happens... everything is the same, except we are casting our model and input tensors as this new dtype instead of the old one.
If this returns false, or otherwise fails in a not-graceful way (i.e., with some error message), you may not have an NVIDIA GPU available on your machine. If you're running locally, we recommend you switch to Google Cloud and follow the instructions to set up a GPU there. If you're already on Google Cloud, something is wrong -- make sure you followed the instructions on how to request and use a GPU on your instance. If you did, post on Piazza or come to Office Hours so we can help you debug.
```
# Verify that CUDA is properly configured and you have a GPU available
torch.cuda.is_available()
import copy
gpu_dtype = torch.cuda.FloatTensor
fixed_model_gpu = copy.deepcopy(fixed_model_base).type(gpu_dtype)
x_gpu = torch.randn(64, 3, 32, 32).type(gpu_dtype)
x_var_gpu = Variable(x.type(gpu_dtype)) # Construct a PyTorch Variable out of your input data
ans = fixed_model_gpu(x_var_gpu) # Feed it through the model!
# Check to make sure what comes out of your model
# is the right dimensionality... this should be True
# if you've done everything correctly
np.array_equal(np.array(ans.size()), np.array([64, 10]))
```
Run the following cell to evaluate the performance of the forward pass running on the CPU:
```
%%timeit
ans = fixed_model(x_var)
```
... and now the GPU:
```
%%timeit
torch.cuda.synchronize() # Make sure there are no pending GPU computations
ans = fixed_model_gpu(x_var_gpu) # Feed it through the model!
torch.cuda.synchronize() # Make sure there are no pending GPU computations
```
You should observe that even a simple forward pass like this is significantly faster on the GPU. So for the rest of the assignment (and when you go train your models in assignment 3 and your project!), you should use the GPU datatype for your model and your tensors: as a reminder that is *torch.cuda.FloatTensor* (in our notebook here as *gpu_dtype*)
### Train the model.
Now that you've seen how to define a model and do a single forward pass of some data through it, let's walk through how you'd actually train one whole epoch over your training data (using the simple_model we provided above).
Make sure you understand how each PyTorch function used below corresponds to what you implemented in your custom neural network implementation.
Note that because we are not resetting the weights anywhere below, if you run the cell multiple times, you are effectively training multiple epochs (so your performance should improve).
First, set up an RMSprop optimizer (using a 1e-3 learning rate) and a cross-entropy loss function:
```
loss_fn = nn.CrossEntropyLoss().cuda()
optimizer = optim.RMSprop(fixed_model_gpu.parameters(),lr=1e-3)
pass
# This sets the model in "training" mode. This is relevant for some layers that may have different behavior
# in training mode vs testing mode, such as Dropout and BatchNorm.
fixed_model_gpu.train()
# Load one batch at a time.
for t, (x, y) in enumerate(loader_train):
x_var = Variable(x.type(gpu_dtype))
y_var = Variable(y.type(gpu_dtype).long())
# This is the forward pass: predict the scores for each class, for each x in the batch.
scores = fixed_model_gpu(x_var)
# Use the correct y values and the predicted y values to compute the loss.
loss = loss_fn(scores, y_var)
if (t + 1) % print_every == 0:
print('t = %d, loss = %.4f' % (t + 1, loss.data[0]))
# Zero out all of the gradients for the variables which the optimizer will update.
optimizer.zero_grad()
# This is the backwards pass: compute the gradient of the loss with respect to each
# parameter of the model.
loss.backward()
# Actually update the parameters of the model using the gradients computed by the backwards pass.
optimizer.step()
```
Now you've seen how the training process works in PyTorch. To save you writing boilerplate code, we're providing the following helper functions to help you train for multiple epochs and check the accuracy of your model:
```
def train(model, loss_fn, optimizer, num_epochs = 1):
for epoch in range(num_epochs):
print('Starting epoch %d / %d' % (epoch + 1, num_epochs))
model.train()
for t, (x, y) in enumerate(loader_train):
x_var = Variable(x.type(gpu_dtype))
y_var = Variable(y.type(gpu_dtype).long())
scores = model(x_var)
loss = loss_fn(scores, y_var)
if (t + 1) % print_every == 0:
print('t = %d, loss = %.4f' % (t + 1, loss.data[0]))
optimizer.zero_grad()
loss.backward()
optimizer.step()
def check_accuracy(model, loader):
if loader.dataset.train:
print('Checking accuracy on validation set')
else:
print('Checking accuracy on test set')
num_correct = 0
num_samples = 0
model.eval() # Put the model in test mode (the opposite of model.train(), essentially)
for x, y in loader:
x_var = Variable(x.type(gpu_dtype), volatile=True)
scores = model(x_var)
_, preds = scores.data.cpu().max(1)
num_correct += (preds == y).sum()
num_samples += preds.size(0)
acc = float(num_correct) / num_samples
print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc))
```
### Check the accuracy of the model.
Let's see the train and check_accuracy code in action -- feel free to use these methods when evaluating the models you develop below.
You should get a training loss of around 1.2-1.4, and a validation accuracy of around 50-60%. As mentioned above, if you re-run the cells, you'll be training more epochs, so your performance will improve past these numbers.
But don't worry about getting these numbers better -- this was just practice before you tackle designing your own model.
```
torch.cuda.random.manual_seed(12345)
fixed_model_gpu.apply(reset)
train(fixed_model_gpu, loss_fn, optimizer, num_epochs=1)
check_accuracy(fixed_model_gpu, loader_val)
```
### Don't forget the validation set!
And note that you can use the check_accuracy function to evaluate on either the test set or the validation set, by passing either **loader_test** or **loader_val** as the second argument to check_accuracy. You should not touch the test set until you have finished your architecture and hyperparameter tuning, and only run the test set once at the end to report a final value.
## Train a _great_ model on CIFAR-10!
Now it's your job to experiment with architectures, hyperparameters, loss functions, and optimizers to train a model that achieves **>=70%** accuracy on the CIFAR-10 **validation** set. You can use the check_accuracy and train functions from above.
### Things you should try:
- **Filter size**: Above we used 7x7; this makes pretty pictures but smaller filters may be more efficient
- **Number of filters**: Above we used 32 filters. Do more or fewer do better?
- **Pooling vs Strided Convolution**: Do you use max pooling or just stride convolutions?
- **Batch normalization**: Try adding spatial batch normalization after convolution layers and vanilla batch normalization after affine layers. Do your networks train faster?
- **Network architecture**: The network above has two layers of trainable parameters. Can you do better with a deep network? Good architectures to try include:
- [conv-relu-pool]xN -> [affine]xM -> [softmax or SVM]
- [conv-relu-conv-relu-pool]xN -> [affine]xM -> [softmax or SVM]
- [batchnorm-relu-conv]xN -> [affine]xM -> [softmax or SVM]
- **Global Average Pooling**: Instead of flattening and then having multiple affine layers, perform convolutions until your image gets small (7x7 or so) and then perform an average pooling operation to get to a 1x1 image picture (1, 1 , Filter#), which is then reshaped into a (Filter#) vector. This is used in [Google's Inception Network](https://arxiv.org/abs/1512.00567) (See Table 1 for their architecture).
- **Regularization**: Add l2 weight regularization, or perhaps use Dropout.
### Tips for training
For each network architecture that you try, you should tune the learning rate and regularization strength. When doing this there are a couple important things to keep in mind:
- If the parameters are working well, you should see improvement within a few hundred iterations
- Remember the coarse-to-fine approach for hyperparameter tuning: start by testing a large range of hyperparameters for just a few training iterations to find the combinations of parameters that are working at all.
- Once you have found some sets of parameters that seem to work, search more finely around these parameters. You may need to train for more epochs.
- You should use the validation set for hyperparameter search, and save your test set for evaluating your architecture on the best parameters as selected by the validation set.
### Going above and beyond
If you are feeling adventurous there are many other features you can implement to try and improve your performance. You are **not required** to implement any of these; however they would be good things to try for extra credit.
- Alternative update steps: For the assignment we implemented SGD+momentum, RMSprop, and Adam; you could try alternatives like AdaGrad or AdaDelta.
- Alternative activation functions such as leaky ReLU, parametric ReLU, ELU, or MaxOut.
- Model ensembles
- Data augmentation
- New Architectures
- [ResNets](https://arxiv.org/abs/1512.03385) where the input from the previous layer is added to the output.
- [DenseNets](https://arxiv.org/abs/1608.06993) where inputs into previous layers are concatenated together.
- [This blog has an in-depth overview](https://chatbotslife.com/resnets-highwaynets-and-densenets-oh-my-9bb15918ee32)
If you do decide to implement something extra, clearly describe it in the "Extra Credit Description" cell below.
### What we expect
At the very least, you should be able to train a ConvNet that gets at least 70% accuracy on the validation set. This is just a lower bound - if you are careful it should be possible to get accuracies much higher than that! Extra credit points will be awarded for particularly high-scoring models or unique approaches.
You should use the space below to experiment and train your network.
Have fun and happy training!
```
# Train your model here, and make sure the output of this cell is the accuracy of your best model on the
# train, val, and test sets. Here's some code to get you started. The output of this cell should be the training
# and validation accuracy on your best model (measured by validation accuracy).
class model_cifar10(nn.Module):
def __init__(self):
super(model_cifar10, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1),
nn.ReLU(True),
nn.BatchNorm2d(num_features=64),
nn.MaxPool2d(kernel_size=(2,2), stride=2)
) # (None, 16, 16, 64)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1),
nn.ReLU(True),
nn.BatchNorm2d(num_features=128),
nn.MaxPool2d(kernel_size=(2,2), stride=2)
) # (None, 8, 8, 128)
self.fc1 = nn.Linear(in_features=6*6*128, out_features=1024)
self.dropout = nn.Dropout(p=0.5)
self.out = nn.Linear(in_features=1024, out_features=10)
def forward(self,x):
x = self.conv1(x)
x = self.conv2(x)
N, C, H, W = x.size()
x = x.view(N, -1)
x = self.fc1(x)
x = self.dropout(x)
x = self.out(x)
return x
model = model_cifar10().cuda()
loss_fn = nn.CrossEntropyLoss().cuda()
optimizer = optim.RMSprop(model.parameters(),lr=1e-3)
train(model, loss_fn, optimizer, num_epochs=10)
check_accuracy(model, loader_val)
```
### Describe what you did
In the cell below you should write an explanation of what you did, any additional features that you implemented, and any visualizations or graphs that you make in the process of training and evaluating your network.
Tell us here!
## Test set -- run this only once
Now that we've gotten a result we're happy with, we test our final model on the test set (which you should store in best_model). This would be the score we would achieve on a competition. Think about how this compares to your validation set accuracy.
```
best_model = model
check_accuracy(best_model, loader_test)
```
## Going further with PyTorch
The next assignment will make heavy use of PyTorch. You might also find it useful for your projects.
Here's a nice tutorial by Justin Johnson that shows off some of PyTorch's features, like dynamic graphs and custom NN modules: http://pytorch.org/tutorials/beginner/pytorch_with_examples.html
If you're interested in reinforcement learning for your final project, this is a good (more advanced) DQN tutorial in PyTorch: http://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
| github_jupyter |
# Label and feature engineering
This lab is *optional*. It demonstrates advanced SQL queries for time-series engineering. For real-world problems, this type of feature engineering code is essential. If you are pursuing a time-series project for open project week, feel free to use this code as a template.
---
Learning objectives:
1. Learn how to use BigQuery to build time-series features and labels for forecasting
2. Learn how to visualize and explore features.
3. Learn effective scaling and normalizing techniques to improve our modeling results
Now that we have explored the data, let's start building our features, so we can build a model.
<h3><font color="#4885ed">Feature Engineering</font> </h3>
Use the `price_history` table, we can look at past performance of a given stock, to try to predict it's future stock price. In this notebook we will be focused on cleaning and creating features from this table.
There are typically two different approaches to creating features with time-series data.
**One approach** is aggregate the time-series into "static" features, such as "min_price_over_past_month" or "exp_moving_avg_past_30_days". Using this approach, we can use a deep neural network or a more "traditional" ML model to train. Notice we have essentially removed all sequention information after aggregating. This assumption can work well in practice.
A **second approach** is to preserve the ordered nature of the data and use a sequential model, such as a recurrent neural network. This approach has a nice benefit that is typically requires less feature engineering. Although, training sequentially models typically takes longer.
In this notebook, we will build features and also create rolling windows of the ordered time-series data.
<h3><font color="#4885ed">Label Engineering</font> </h3>
We are trying to predict if the stock will go up or down. In order to do this we will need to "engineer" our label by looking into the future and using that as the label. We will be using the [`LAG`](https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-and-operators#lag) function in BigQuery to do this. Visually this looks like:

## Import libraries; setup
```
PROJECT = !(gcloud config get-value core/project)
PROJECT = PROJECT[0]
import pandas as pd
from google.cloud import bigquery
from IPython import get_ipython
from IPython.core.magic import register_cell_magic
bq = bigquery.Client(project=PROJECT)
# Allow you to easily have Python variables in SQL query.
@register_cell_magic("with_globals")
def with_globals(line, cell):
contents = cell.format(**globals())
if "print" in line:
print(contents)
get_ipython().run_cell(contents)
def create_dataset():
dataset = bigquery.Dataset(bq.dataset("stock_market"))
try:
bq.create_dataset(dataset) # Will fail if dataset already exists.
print("Dataset created")
except:
print("Dataset already exists")
create_dataset()
```
## Create time-series features and determine label based on market movement
### Summary of base tables
**TODO**: How many rows are in our base tables `price_history` and `snp500`?
```
%%with_globals
%%bigquery --project {PROJECT}
--# TODO
%%with_globals
%%bigquery --project {PROJECT}
--# TODO
```
### Label engineering
Ultimately, we need to end up with a single label for each day. The label takes on 3 values: {`down`, `stay`, `up`}, where `down` and `up` indicates the normalized price (more on this below) went down 1% or more and up 1% or more, respectively. `stay` indicates the stock remained within 1%.
The steps are:
1. Compare close price and open price
2. Compute price features using analytics functions
3. Compute normalized price change (%)
4. Join with S&P 500 table
5. Create labels (`up`, `down`, `stay`)
<h3><font color="#4885ed">Compare close price and open price</font> </h3>
For each row, get the close price of yesterday and the open price of tomorrow using the [`LAG`](https://cloud.google.com/bigquery/docs/reference/legacy-sql#lag) function. We will determine tomorrow's close - today's close.
#### Shift to get tomorrow's close price.
**Learning objective 1**
```
%%with_globals print
%%bigquery df --project {PROJECT}
CREATE OR REPLACE TABLE `stock_market.price_history_delta`
AS
(
WITH shifted_price AS
(
SELECT *,
(LAG(close, 1) OVER (PARTITION BY symbol order by Date DESC)) AS tomorrow_close
FROM `stock_src.price_history`
WHERE Close > 0
)
SELECT a.*,
(tomorrow_close - Close) AS tomo_close_m_close
FROM shifted_price a
)
%%with_globals
%%bigquery --project {PROJECT}
SELECT *
FROM stock_market.price_history_delta
ORDER by Date
LIMIT 100
```
**TODO**: Historically, we know that the stock market has been going up. Can you think of a way to verify this using our newly created table `price_history_delta`?
**Learning objective 2**
```
%%with_globals print
%%bigquery --project {PROJECT}
SELECT
--# TODO: verify the stock market is going up -- on average.
FROM
stock_market.price_history_delta
```
### Add time series features
<h3><font color="#4885ed">Compute price features using analytics functions</font> </h3>
In addition, we will also build time-series features using the min, max, mean, and std (can you think of any over functions to use?). To do this, let's use [analytic functions]() in BigQuery (also known as window functions).
```
An analytic function is a function that computes aggregate values over a group of rows. Unlike aggregate functions, which return a single aggregate value for a group of rows, analytic functions return a single value for each row by computing the function over a group of input rows.
```
Using the `AVG` analytic function, we can compute the average close price of a given symbol over the past week (5 business days):
```python
(AVG(close) OVER (PARTITION BY symbol
ORDER BY Date
ROWS BETWEEN 5 PRECEDING AND 1 PRECEDING)) / close
AS close_avg_prior_5_days
```
**Learning objective 1**
**TODO**: Please fill in the `# TODO`s in the below query
```
def get_window_fxn(agg_fxn, n_days):
"""Generate a time-series feature.
E.g., Compute the average of the price over the past 5 days."""
SCALE_VALUE = "close"
sql = """
({agg_fxn}(close) OVER (PARTITION BY (# TODO)
ORDER BY (# TODO)
ROWS BETWEEN {n_days} (# TODO)))/{scale}
AS close_{agg_fxn}_prior_{n_days}_days""".format(
agg_fxn=agg_fxn, n_days=n_days, scale=SCALE_VALUE
)
return sql
WEEK = 5
MONTH = 20
YEAR = 52 * 5
agg_funcs = ("MIN", "MAX", "AVG", "STDDEV")
lookbacks = (WEEK, MONTH, YEAR)
sqls = []
for fxn in agg_funcs:
for lookback in lookbacks:
sqls.append(get_window_fxn(fxn, lookback))
time_series_features_sql = ",".join(sqls) # SQL string.
def preview_query():
print(time_series_features_sql[0:1000])
preview_query()
%%with_globals print
%%bigquery --project {PROJECT}
CREATE OR REPLACE TABLE stock_market.price_features_delta
AS
SELECT *
FROM
(SELECT *,
{time_series_features_sql},
-- Also get the raw time-series values; will be useful for the RNN model.
(ARRAY_AGG(close) OVER (PARTITION BY symbol
ORDER BY Date
ROWS BETWEEN 260 PRECEDING AND 1 PRECEDING))
AS close_values_prior_260,
ROW_NUMBER() OVER (PARTITION BY symbol ORDER BY Date) AS days_on_market
FROM stock_market.price_history_delta)
WHERE days_on_market > {YEAR}
%%bigquery --project {PROJECT}
SELECT *
FROM stock_market.price_features_delta
ORDER BY symbol, Date
LIMIT 10
```
#### Compute percentage change, then self join with prices from S&P index.
We will also compute price change of S&P index, GSPC. We do this so we can compute the normalized percentage change.
<h3><font color="#4885ed">Compute normalized price change (%)</font> </h3>
Before we can create our labels we need to normalize the price change using the S&P 500 index. The normalization using the S&P index fund helps ensure that the future price of a stock is not due to larger market effects. Normalization helps us isolate the factors contributing to the performance of a stock_market.
Let's use the normalization scheme from by subtracting the scaled difference in the S&P 500 index during the same time period.
In Python:
```python
# Example calculation.
scaled_change = (50.59 - 50.69) / 50.69
scaled_s_p = (939.38 - 930.09) / 930.09
normalized_change = scaled_change - scaled_s_p
assert normalized_change == ~1.2%
```
```
scaled_change = (50.59 - 50.69) / 50.69
scaled_s_p = (939.38 - 930.09) / 930.09
normalized_change = scaled_change - scaled_s_p
print(
"""
scaled change: {:2.3f}
scaled_s_p: {:2.3f}
normalized_change: {:2.3f}
""".format(
scaled_change, scaled_s_p, normalized_change
)
)
```
### Compute normalized price change (shown above).
Let's join scaled price change (tomorrow_close / close) with the [gspc](https://en.wikipedia.org/wiki/S%26P_500_Index) symbol (symbol for the S&P index). Then we can normalize using the scheme described above.
**Learning objective 3**
**TODO**: Please fill in the `# TODO` in the code below.
```
snp500_index = "gspc"
%%with_globals print
%%bigquery --project {PROJECT}
CREATE OR REPLACE TABLE stock_market.price_features_norm_per_change
AS
WITH
all_percent_changes AS
(
SELECT *, (tomo_close_m_close / Close) AS scaled_change
FROM `stock_market.price_features_delta`
),
s_p_changes AS
(SELECT
scaled_change AS s_p_scaled_change,
date
FROM all_percent_changes
WHERE symbol="{snp500_index}")
SELECT all_percent_changes.*,
s_p_scaled_change,
(
# TODO
) AS normalized_change
FROM
all_percent_changes LEFT JOIN s_p_changes
--# Add S&P change to all rows
ON all_percent_changes.date = s_p_changes.date
```
#### Verify results
```
%%with_globals print
%%bigquery df --project {PROJECT}
SELECT *
FROM stock_market.price_features_norm_per_change
LIMIT 10
df.head()
```
<h3><font color="#4885ed">Join with S&P 500 table and Create labels: {`up`, `down`, `stay`}</font> </h3>
Join the table with the list of S&P 500. This will allow us to limit our analysis to S&P 500 companies only.
Finally we can create labels. The following SQL statement should do:
```sql
CASE WHEN normalized_change < -0.01 THEN 'DOWN'
WHEN normalized_change > 0.01 THEN 'UP'
ELSE 'STAY'
END
```
**Learning objective 1**
```
down_thresh = -0.01
up_thresh = 0.01
```
**TODO**: Please fill in the `CASE` function below.
```
%%with_globals print
%%bigquery df --project {PROJECT}
CREATE OR REPLACE TABLE stock_market.percent_change_sp500
AS
SELECT *,
CASE
--# TODO
END AS direction
FROM stock_market.price_features_norm_per_change features
INNER JOIN `stock_src.snp500`
USING (symbol)
%%with_globals print
%%bigquery --project {PROJECT}
SELECT direction, COUNT(*) as cnt
FROM stock_market.percent_change_sp500
GROUP BY direction
%%with_globals print
%%bigquery df --project {PROJECT}
SELECT *
FROM stock_market.percent_change_sp500
LIMIT 20
df.columns
```
The dataset is still quite large and the majority of the days the market `STAY`s. Let's focus our analysis on dates where [earnings per share](https://en.wikipedia.org/wiki/Earnings_per_share) (EPS) information is released by the companies. The EPS data has 3 key columns surprise, reported_EPS, and consensus_EPS:
```
%%with_globals print
%%bigquery --project {PROJECT}
SELECT *
FROM `stock_src.eps`
LIMIT 10
```
The surprise column indicates the difference between the expected (consensus expected eps by analysts) and the reported eps. We can join this table with our derived table to focus our analysis during earnings periods:
```
%%with_globals print
%%bigquery --project {PROJECT}
CREATE OR REPLACE TABLE stock_market.eps_percent_change_sp500
AS
SELECT a.*, b.consensus_EPS, b.reported_EPS, b.surprise
FROM stock_market.percent_change_sp500 a
INNER JOIN `stock_src.eps` b
ON a.Date = b.date
AND a.symbol = b.symbol
%%with_globals print
%%bigquery --project {PROJECT}
SELECT *
FROM stock_market.eps_percent_change_sp500
LIMIT 20
%%with_globals print
%%bigquery --project {PROJECT}
SELECT direction, COUNT(*) as cnt
FROM stock_market.eps_percent_change_sp500
GROUP BY direction
```
## Feature exploration
Now that we have created our recent movements of the company’s stock price, let's visualize our features. This will help us understand the data better and possibly spot errors we may have made during our calculations.
As a reminder, we calculated the scaled prices 1 week, 1 month, and 1 year before the date that we are predicting at.
Let's write a re-usable function for aggregating our features.
**Learning objective 2**
```
def get_aggregate_stats(field, round_digit=2):
"""Run SELECT ... GROUP BY field, rounding to nearest digit."""
df = bq.query(
"""
SELECT {field}, COUNT(*) as cnt
FROM
(SELECT ROUND({field}, {round_digit}) AS {field}
FROM stock_market.eps_percent_change_sp500) rounded_field
GROUP BY {field}
ORDER BY {field}""".format(
field=field, round_digit=round_digit, PROJECT=PROJECT
)
).to_dataframe()
return df.dropna()
field = "close_AVG_prior_260_days"
CLIP_MIN, CLIP_MAX = 0.1, 4.0
df = get_aggregate_stats(field)
values = df[field].clip(CLIP_MIN, CLIP_MAX)
counts = 100 * df["cnt"] / df["cnt"].sum() # Percentage.
ax = values.hist(weights=counts, bins=30, figsize=(10, 5))
ax.set(xlabel=field, ylabel="%");
```
**TODO** Use the `get_aggregate_stats` from above to visualize the `normalized_change` column.
```
field = "normalized_change"
# TODO
```
Let's look at results by day-of-week, month, etc.
```
VALID_GROUPBY_KEYS = (
"DAYOFWEEK",
"DAY",
"DAYOFYEAR",
"WEEK",
"MONTH",
"QUARTER",
"YEAR",
)
DOW_MAPPING = {
1: "Sun",
2: "Mon",
3: "Tues",
4: "Wed",
5: "Thur",
6: "Fri",
7: "Sun",
}
def groupby_datetime(groupby_key, field):
if groupby_key not in VALID_GROUPBY_KEYS:
raise Exception("Please use a valid groupby_key.")
sql = """
SELECT {groupby_key}, AVG({field}) as avg_{field}
FROM
(SELECT {field},
EXTRACT({groupby_key} FROM date) AS {groupby_key}
FROM stock_market.eps_percent_change_sp500) foo
GROUP BY {groupby_key}
ORDER BY {groupby_key} DESC""".format(
groupby_key=groupby_key, field=field, PROJECT=PROJECT
)
print(sql)
df = bq.query(sql).to_dataframe()
if groupby_key == "DAYOFWEEK":
df.DAYOFWEEK = df.DAYOFWEEK.map(DOW_MAPPING)
return df.set_index(groupby_key).dropna()
field = "normalized_change"
df = groupby_datetime("DAYOFWEEK", field)
ax = df.plot(kind="barh", color="orange", alpha=0.7)
ax.grid(which="major", axis="y", linewidth=0)
field = "close"
df = groupby_datetime("DAYOFWEEK", field)
ax = df.plot(kind="barh", color="orange", alpha=0.7)
ax.grid(which="major", axis="y", linewidth=0)
field = "normalized_change"
df = groupby_datetime("MONTH", field)
ax = df.plot(kind="barh", color="blue", alpha=0.7)
ax.grid(which="major", axis="y", linewidth=0)
field = "normalized_change"
df = groupby_datetime("QUARTER", field)
ax = df.plot(kind="barh", color="green", alpha=0.7)
ax.grid(which="major", axis="y", linewidth=0)
field = "close"
df = groupby_datetime("YEAR", field)
ax = df.plot(kind="line", color="purple", alpha=0.7)
ax.grid(which="major", axis="y", linewidth=0)
field = "normalized_change"
df = groupby_datetime("YEAR", field)
ax = df.plot(kind="line", color="purple", alpha=0.7)
ax.grid(which="major", axis="y", linewidth=0)
```
BONUS: How do our features correlate with the label `direction`? Build some visualizations. What features are most important? You can visualize this and do it statistically using the [`CORR`](https://cloud.google.com/bigquery/docs/reference/standard-sql/statistical_aggregate_functions) function.
Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| github_jupyter |
```
# para que funcione para python 2 y 3
from __future__ import division, print_function, unicode_literals
import numpy as np
import os
#salidas repetibles
np.random.seed(42)
# lindas figuras
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# evitar advertencias innecesarias
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")
from sklearn.model_selection import train_test_split
import seaborn as sns
from sklearn import metrics
```
# Regresion Logística
$P_{1}=(x_{1},y_{1})$
$P_{2}=(x_{2},y_{2})$
$P_{3}=(x_{3},y_{3})$
pero ahora y toma solo valores 0 y 1.
Un problema de clasificación, dado un punto
$P_{4}=(x_{4})$, queremos predecir
$y_{4}$
# El modelo logistico va estar dado por:
$t(x)=w_{1} x+w_{0}$
$w=[w_{1} w_{0}]$
$p(x)=\sigma(t)=\frac{1}{1+e^{-t(x)}}$
# En este caso se utiliza la funcion de costo que se llama binary crossentropy
$F(w)=-\frac{1}{n}\sum_{i=0}^{n}(y_{i}\log(p(x_{i})+(1-y_{i})\log(1-p(x_{i}))$
con 3 puntos $P_{1}$,$P_{2}$ y $P_{3}$, tenemos que:
$F(w_{i})=-\frac{1}{3}((y_{1}\log(p(x_{1})+(1-y_{1})\log(1-p(x_{1}))$+
$(y_{2}\log(p(x_{2})+(1-y_{2})\log(1-p(x_{2}))$+
$(y_{3}\log(p(x_{3})+(1-y_{3})\log(1-p(x_{3})))$
Note que si $y_{1}=1$, $y_{2}=1$ $y_{3}=0$, tenemos que:
$F(w_{i})=-\frac{1}{3}(\log(p(x_{1}))$+
$\log(p(x_{2}))$+
$\log(1-p(x_{3})))$
#Ahora derivando en relacion a $w_{i}$
$\frac{dF}{dw_{j}}$=$\frac{\partial F}{\partial p}\frac{\partial p}{\partial t}\frac{\partial t}{\partial w_{j}}$
$\frac{\partial F}{\partial p}$=$-\frac{1}{n}\sum_{i=0}^{n}(-\frac{y_{i}}{p(x_{i})}-(-1)\frac{1-y_{i}}{1-p(x_{i})})$
$\frac{\partial p}{\partial t}$=$p(x_{i})(1-p(x_{i}))$
$\frac{\partial t}{\partial w_{j}}$=$x_{j}^{i}$
$\frac{dF}{dw_{j}}$=$-\frac{1}{n}\sum_{i=0}^{n}(-\frac{y_{i}}{p(x_{i})}-(-1)\frac{1-y_{i}}{1-p(x_{i})})p(x_{i}) (1-p(x_{i})) x_{j}^{i}$
$\frac{dF}{dw_{j}}$=$\frac{1}{n}\sum_{i=0}^{n}(p(x_{i})-y_{i})x_{j}^{i}$
```
x = 10* np.random.rand(500, 1)-5
t= 5*x+3+ 0.1*np.random.randn(500, 1)
p = 1 / (1 + np.exp(-t))
y=p> 0.5
T= np.linspace(-10, 10, 50)
sig = 1 / (1 + np.exp(-T))
plt.figure(figsize=(9, 3))
plt.plot([-10, 10], [0, 0], "k-")
plt.plot([-10, 10], [0.5, 0.5], "k:")
plt.plot([-10, 10], [1, 1], "k:")
plt.plot([0, 0], [-1.1, 1.1], "k-")
plt.plot(T,sig, "b-", linewidth=2, label=r"$\sigma(t) = \frac{1}{1 + e^{-t}}$")
plt.plot(t,y, "ro", label=r"$\sigma(t) = \frac{1}{1 + e^{-t}}$")
plt.xlabel("t")
#plt.legend(loc="upper left", fontsize=20)
plt.axis([-10, 10, -0.1, 1.1])
plt.show()
A = np.c_[np.ones((len(x), 1)), x]
lr = 0.1
n_iteraciones = 2000
m = len(x)
w = np.random.randn(2,1)
for iteraciones in range(n_iteraciones):
p=1/(1+np.exp(-A.dot( w)))
if (iteraciones % 100 == 0):
F = (-1/m)*(np.sum((y*np.log(p)) + ((1-y)*(np.log(1-p)))))
print(F )
gradiente = 1/m * A.T.dot(p - y)
w= w - lr* gradiente
#print("Cost after %i iteration is %f" %(i, cost))
w
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)
At = np.c_[np.ones((len(x_test), 1)), x_test]
p=1/(1+np.exp(-At.dot( w)))
predictions=p>0.5
cm = metrics.confusion_matrix(y_test, predictions)
print(cm)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression(fit_intercept=True,max_iter=3000,penalty='l2',solver='newton-cg')
log_reg.fit(x_train, y_train,)
print(log_reg.intercept_)
print(log_reg.coef_)
predictions2 = log_reg.predict(x_test)
cm = metrics.confusion_matrix(y_test, predictions2)
print(cm)
from sklearn import datasets
iris = datasets.load_iris()
list(iris.keys())
print(iris.DESCR)
X = iris["data"][:, 3:] # petal width
y = (iris["target"] == 2).astype(np.int) # 1 if Iris-Virginica, else 0
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression(solver="liblinear", random_state=42)
log_reg.fit(X, y)
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_proba = log_reg.predict_proba(X_new)
plt.plot(X_new, y_proba[:, 1], "g-", linewidth=2, label="Iris-Virginica")
plt.plot(X_new, y_proba[:, 0], "b--", linewidth=2, label="Not Iris-Virginica")
```
The figure in the book actually is actually a bit fancier:
```
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_proba = log_reg.predict_proba(X_new)
decision_boundary = X_new[y_proba[:, 1] >= 0.5][0]
plt.figure(figsize=(8, 3))
plt.plot(X[y==0], y[y==0], "bs")
plt.plot(X[y==1], y[y==1], "g^")
plt.plot([decision_boundary, decision_boundary], [-1, 2], "k:", linewidth=2)
plt.plot(X_new, y_proba[:, 1], "g-", linewidth=2, label="Iris-Virginica")
plt.plot(X_new, y_proba[:, 0], "b--", linewidth=2, label="Not Iris-Virginica")
plt.text(decision_boundary+0.02, 0.15, "Decision boundary", fontsize=14, color="k", ha="center")
plt.arrow(decision_boundary, 0.08, -0.3, 0, head_width=0.05, head_length=0.1, fc='b', ec='b')
plt.arrow(decision_boundary, 0.92, 0.3, 0, head_width=0.05, head_length=0.1, fc='g', ec='g')
plt.xlabel("Petal width (cm)", fontsize=14)
plt.ylabel("Probability", fontsize=14)
plt.legend(loc="center left", fontsize=14)
plt.axis([0, 3, -0.02, 1.02])
plt.show()
decision_boundary
log_reg.predict([[1.7], [1.5]])
from sklearn.linear_model import LogisticRegression
X = iris["data"][:, (2, 3)] # petal length, petal width
y = (iris["target"] == 2).astype(np.int)
log_reg = LogisticRegression(solver="liblinear", C=10**10, random_state=42)
log_reg.fit(X, y)
x0, x1 = np.meshgrid(
np.linspace(2.9, 7, 500).reshape(-1, 1),
np.linspace(0.8, 2.7, 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_proba = log_reg.predict_proba(X_new)
plt.figure(figsize=(10, 4))
plt.plot(X[y==0, 0], X[y==0, 1], "bs")
plt.plot(X[y==1, 0], X[y==1, 1], "g^")
zz = y_proba[:, 1].reshape(x0.shape)
contour = plt.contour(x0, x1, zz, cmap=plt.cm.brg)
left_right = np.array([2.9, 7])
boundary = -(log_reg.coef_[0][0] * left_right + log_reg.intercept_[0]) / log_reg.coef_[0][1]
plt.clabel(contour, inline=1, fontsize=12)
plt.plot(left_right, boundary, "k--", linewidth=3)
plt.text(3.5, 1.5, "Not Iris-Virginica", fontsize=14, color="b", ha="center")
plt.text(6.5, 2.3, "Iris-Virginica", fontsize=14, color="g", ha="center")
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.axis([2.9, 7, 0.8, 2.7])
plt.show()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
softmax_reg = LogisticRegression(multi_class="multinomial",solver="lbfgs", C=10, random_state=42)
softmax_reg.fit(X, y)
x0, x1 = np.meshgrid(
np.linspace(0, 8, 500).reshape(-1, 1),
np.linspace(0, 3.5, 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_proba = softmax_reg.predict_proba(X_new)
y_predict = softmax_reg.predict(X_new)
zz1 = y_proba[:, 1].reshape(x0.shape)
zz = y_predict.reshape(x0.shape)
plt.figure(figsize=(10, 4))
plt.plot(X[y==2, 0], X[y==2, 1], "g^", label="Iris-Virginica")
plt.plot(X[y==1, 0], X[y==1, 1], "bs", label="Iris-Versicolor")
plt.plot(X[y==0, 0], X[y==0, 1], "yo", label="Iris-Setosa")
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])
plt.contourf(x0, x1, zz, cmap=custom_cmap)
contour = plt.contour(x0, x1, zz1, cmap=plt.cm.brg)
plt.clabel(contour, inline=1, fontsize=12)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="center left", fontsize=14)
plt.axis([0, 7, 0, 3.5])
plt.show()
softmax_reg.predict([[5, 2]])
softmax_reg.predict_proba([[5, 2]])
```
| github_jupyter |
```
# default_exp env_wrappers
#hide
from nbdev import *
```
# env_wrappers
> Here we provide a useful set of environment wrappers.
```
%nbdev_export
import gym
import numpy as np
import torch
from typing import Optional, Union
%nbdev_export
class ToTorchWrapper(gym.Wrapper):
"""
Environment wrapper for converting actions from torch.Tensors to np.array and converting observations from np.array to
torch.Tensors.
Args:
- env (gym.Env): Environment to wrap. Should be a subclass of gym.Env and follow the OpenAI Gym API.
"""
def __init__(self, env: gym.Env):
super().__init__(env)
self.env = env
def reset(self, *args, **kwargs):
"""
Reset the environment.
Returns:
- tensor_obs (torch.Tensor): output of reset as PyTorch Tensor.
"""
obs = self.env.reset(*args, **kwargs)
tensor_obs = torch.as_tensor(obs, dtype=torch.float32)
return tensor_obs
def step(self, action: torch.Tensor, *args, **kwargs):
"""
Execute environment step.
Converts from torch.Tensor action and returns observations as a torch.Tensor.
Returns:
- tensor_obs (torch.Tensor): Next observations as pytorch tensor.
- reward (float or int): The reward earned at the current timestep.
- done (bool): Whether the episode is in a terminal state.
- infos (dict): The info dict from the environment.
"""
action = self.action2np(action)
obs, reward, done, infos = self.env.step(action, *args, **kwargs)
tensor_obs = torch.as_tensor(obs, dtype=torch.float32)
return tensor_obs, reward, done, infos
def action2np(self, action: torch.Tensor):
"""
Convert torch.Tensor action to NumPy.
Args:
- action (torch.Tensor): The action to convert.
Returns:
- np_act (np.array or int): The action converted to numpy.
"""
if isinstance(self.action_space, gym.spaces.Discrete):
action_map = lambda action: int(action.squeeze().numpy())
if isinstance(self.action_space, gym.spaces.Box):
action_map = lambda action: action.numpy()
np_act = action_map(action)
return np_act
show_doc(ToTorchWrapper)
show_doc(ToTorchWrapper.reset)
show_doc(ToTorchWrapper.step)
show_doc(ToTorchWrapper.action2np)
```
Example usage of the `ToTorchWrapper` is demonstrated below.
```
env = gym.make("CartPole-v1")
env = ToTorchWrapper(env)
obs = env.reset()
print("initial obs:", obs)
action = env.action_space.sample()
# need to convert action to PyTorch Tensor because ToTorchWrapper expects actions as Tensors.
# normally you would not need to do this, your PyTorch NN actor will output a Tensor by default.
action = torch.as_tensor(action, dtype=torch.float32)
stepped = env.step(action)
print("stepped once:", stepped)
print("\nEntering interaction loop! \n")
# interaction loop
obs = env.reset()
ret = 0
for i in range(100):
action = torch.as_tensor(env.action_space.sample(), dtype=torch.float32)
state, reward, done, _ = env.step(action)
ret += reward
if done:
print(f"Random policy got {ret} reward!")
obs = env.reset()
ret = 0
if i < 99:
print("Starting new episode.")
if i == 99:
print(f"\nInteraction loop ended! Got reward {ret} before episode was cut off.")
break
#hide
env = gym.make("CartPole-v1")
env = ToTorchWrapper(env)
obs = env.reset()
assert type(obs) == torch.Tensor
action = torch.as_tensor(env.action_space.sample(), dtype=torch.float32)
step_out = env.step(action)
assert type(step_out[0]) == torch.Tensor
env = gym.make("LunarLanderContinuous-v2")
env = ToTorchWrapper(env)
obs = env.reset()
assert type(obs) == torch.Tensor
action = torch.as_tensor(env.action_space.sample(), dtype=torch.float32)
step_out = env.step(action)
assert type(step_out[0]) == torch.Tensor
%nbdev_export
class StateNormalizeWrapper(gym.Wrapper):
"""
Environment wrapper for normalizing states.
Args:
- env (gym.Env): Environment to wrap.
- beta (float): Beta parameter for running mean and variance calculation.
- eps (float): Parameter to avoid division by zero in case variance goes to zero.
"""
def __init__(self, env: gym.Env, beta: Optional[float] = 0.99, eps: Optional[float] = 1e-8):
super().__init__(env)
self.env = env
self.mean = np.zeros(self.observation_space.shape)
self.var = np.ones(self.observation_space.shape)
self.beta = beta
self.eps = eps
def normalize(self, state: np.array):
"""
Update running mean and variance parameters and normalize input state.
Args:
- state (np.array): State to normalize and to use to calculate update.
Returns:
- norm_state (np.array): Normalized state.
"""
self.mean = self.beta * self.mean + (1. - self.beta) * state
self.var = self.beta * self.var + (1. - self.beta) * np.square(state - self.mean)
norm_state = (state - self.mean) / (np.sqrt(self.var) + self.eps)
return norm_state
def reset(self, *args, **kwargs):
"""
Reset environment and return normalized state.
Returns:
- norm_state (np.array): Normalized state.
"""
state = self.env.reset()
norm_state = self.normalize(state)
return norm_state
def step(self, action: Union[np.array, int, float], *args, **kwargs):
"""
Step environment and normalize state.
Args:
- action (np.array or int or float): Action to use to step the environment.
Returns:
- norm_state (np.array): Normalized state.
- reward (int or float): Reward earned at step.
- done (bool): Whether the episode is over.
- infos (dict): Any infos from the environment.
"""
state, reward, done, infos = self.env.step(action, *args, **kwargs)
norm_state = self.normalize(state)
return norm_state, reward, done, infos
```
**Note: Testing needed for StateNormalizeWrapper. At present, use `ToTorchWrapper` for guaranteed working.**
```
show_doc(StateNormalizeWrapper)
show_doc(StateNormalizeWrapper.reset)
show_doc(StateNormalizeWrapper.normalize)
show_doc(StateNormalizeWrapper.step)
```
Here is a demonstration of using the `StateNormalizeWrapper`.
```
env = gym.make("CartPole-v1")
env = StateNormalizeWrapper(env)
obs = env.reset()
print("initial obs:", obs)
# the StateNormalizeWrapper expects NumPy arrays, so there is no need to convert action to PyTorch Tensor.
action = env.action_space.sample()
stepped = env.step(action)
print("stepped once:", stepped)
print("\nEntering interaction loop! \n")
# interaction loop
obs = env.reset()
ret = 0
for i in range(100):
action = env.action_space.sample()
state, reward, done, _ = env.step(action)
ret += reward
if done:
print(f"Random policy got {ret} reward!")
obs = env.reset()
ret = 0
if i < 99:
print("Starting new episode.")
if i == 99:
print(f"\nInteraction loop ended! Got reward {ret} before episode was cut off.")
break
#hide
env = gym.make("CartPole-v1")
env = StateNormalizeWrapper(env)
assert env.reset() is not None
action = env.action_space.sample()
assert env.step(action) is not None
env = ToTorchWrapper(env)
assert env.reset() is not None
assert type(env.reset()) == torch.Tensor
action = env.action_space.sample()
t_action = torch.as_tensor(action, dtype=torch.float32)
assert env.step(t_action) is not None
assert type(env.step(t_action)[0]) == torch.Tensor
%nbdev_export
class RewardScalerWrapper(gym.Wrapper):
"""
A class for reward scaling over training.
Calculates running mean and standard deviation of observed rewards and scales the rewards using the variance.
Computes: $r_t / (\sigma + eps)$
"""
def __init__(self, env: gym.Env, beta: Optional[float] = 0.99, eps: Optional[float] = 1e-8):
super().__init__(env)
self.beta = beta
self.eps = eps
self.var = 1
self.mean = 0
def scale(self, reward: Union[int, float]):
"""
Update running mean and variance for rewards, scale reward using the variance.
Args:
- reward (int or float): reward to scale.
Returns:
- scaled_rew (float): reward scaled using variance.
"""
self.mean = self.beta * self.mean + (1. - self.beta) * reward
self.var = self.beta * self.var + (1. - self.beta) * np.square(reward - self.mean)
scaled_rew = (reward - self.mean) / (np.sqrt(self.var) + self.eps)
return scaled_rew
def step(self, action, *args, **kwargs):
"""
Step the environment and scale the reward.
Args:
- action (np.array or int or float): Action to use to step the environment.
Returns:
- state (np.array): Next state from environment.
- scaled_rew (float): reward scaled using the variance.
- done (bool): Indicates whether the episode is over.
- infos (dict): Any information from the environment.
"""
state, reward, done, infos = self.env.step(action, *args, **kwargs)
scaled_rew = self.scale(reward)
return state, scaled_rew, done, infos
#hide
env = gym.make("CartPole-v1")
env = RewardScalerWrapper(env)
assert env.reset() is not None
action = env.action_space.sample()
assert env.step(action) is not None
assert type(env.step(action)[0]) == np.ndarray
env = StateNormalizeWrapper(env)
assert env.reset() is not None
action = env.action_space.sample()
assert env.step(action) is not None
assert type(env.step(action)[0]) == np.ndarray
env = ToTorchWrapper(env)
assert env.reset() is not None
assert type(env.reset()) == torch.Tensor
action = torch.as_tensor(env.action_space.sample(), dtype=torch.float32)
assert env.step(action) is not None
assert type(env.step(action)[0]) == torch.Tensor
```
**Note: Testing needed for RewardScalerWrapper. At present, use `ToTorchWrapper` for guaranteed working.**
```
show_doc(RewardScalerWrapper)
show_doc(RewardScalerWrapper.scale)
show_doc(RewardScalerWrapper.step)
```
An example usage of the RewardScalerWrapper.
```
env = gym.make("CartPole-v1")
env = RewardScalerWrapper(env)
obs = env.reset()
print("initial obs:", obs)
action = env.action_space.sample()
stepped = env.step(action)
print("stepped once:", stepped)
print("\nEntering interaction loop! \n")
# interaction loop
obs = env.reset()
ret = 0
for i in range(100):
action = env.action_space.sample()
state, reward, done, _ = env.step(action)
ret += reward
if done:
print(f"Random policy got {ret} reward!")
obs = env.reset()
ret = 0
if i < 99:
print("Starting new episode.")
if i == 99:
print(f"\nInteraction loop ended! Got reward {ret} before episode was cut off.")
break
```
## Combining Wrappers
All of these wrappers can be composed together! Simply be sure to call the `ToTorchWrapper` last, because the others expect NumPy arrays as input, and the `ToTorchWrapper` converts outputs to PyTorch tensors. Below is an example.
```
env = gym.make("CartPole-v1")
env = StateNormalizeWrapper(env)
print(f"After wrapping with StateNormalizeWrapper, output is still a NumPy array: {env.reset()}")
env = RewardScalerWrapper(env)
print(f"After wrapping with RewardScalerWrapper, output is still a NumPy array: {env.reset()}")
env = ToTorchWrapper(env)
print(f"But after wrapping with ToTorchWrapper, output is now a PyTorch Tensor: {env.reset()}")
%nbdev_export
class BestPracticesWrapper(gym.Wrapper):
"""
This wrapper combines the wrappers which we think (from experience and from reading papers/blogs and watching lectures)
constitute best practices.
At the moment it combines the wrappers below in the order listed:
1. StateNormalizeWrapper
2. RewardScalerWrapper
3. ToTorchWrapper
Args:
- env (gym.Env): Environment to wrap.
"""
def __init__(self, env: gym.Env):
super().__init__(env)
env = StateNormalizeWrapper(env)
env = RewardScalerWrapper(env)
self.env = ToTorchWrapper(env)
def reset(self):
"""
Reset environment.
Returns:
- obs (torch.Tensor): Starting observations from the environment.
"""
obs = self.env.reset()
return obs
def step(self, action, *args, **kwargs):
"""
Step the environment forward using input action.
Args:
- action (torch.Tensor): Action to step the environment with.
Returns:
- obs (torch.Tensor): Next step observations.
- reward (int or float): Reward for the last timestep.
- done (bool): Whether the episode is over.
- infos (dict): Dictionary of any info from the environment.
"""
obs, reward, done, infos = self.env.step(action, *args, **kwargs)
return obs, reward, done, infos
#hide
env = gym.make("CartPole-v1")
env = BestPracticesWrapper(env)
assert env.reset() is not None
assert type(env.reset()) == torch.Tensor
action = torch.as_tensor(env.action_space.sample(), dtype=torch.float32)
stepped = env.step(action)
assert stepped is not None
assert type(stepped[0]) == torch.Tensor
```
**Note: Testing needed for BestPracticesWrapper. At present, use `ToTorchWrapper` for guaranteed working.**
```
show_doc(BestPracticesWrapper)
show_doc(BestPracticesWrapper.reset)
show_doc(BestPracticesWrapper.step)
```
Below is a usage example of the `BestPracticesWrapper`. It is used in the same way as the `ToTorchWrapper`.
```
env = gym.make("CartPole-v1")
env = BestPracticesWrapper(env)
obs = env.reset()
print("initial obs:", obs)
action = torch.as_tensor(env.action_space.sample(), dtype=torch.float32)
stepped = env.step(action)
print("stepped once:", stepped)
print("\nEntering interaction loop! \n")
# interaction loop
obs = env.reset()
ret = 0
for i in range(100):
action = torch.as_tensor(env.action_space.sample(), dtype=torch.float32)
state, reward, done, _ = env.step(action)
ret += reward
if done:
print(f"Random policy got {ret} reward!")
obs = env.reset()
ret = 0
if i < 99:
print("Starting new episode.")
if i == 99:
print(f"\nInteraction loop ended! Got reward {ret} before episode was cut off.")
break
#hide
notebook2script()
```
| github_jupyter |
### 1. Bias-Variance decomposition
Вспомним, что функцию потерь в задачах регрессии или классификации можно разложить на три компоненты: смещение (bias), дисперсию (variance) и шум (noise). Эти компоненты позволяют описать сложность алгоритма, альтернативно сравнению ошибок на тренировочной и тестовой выборках. Хотя такое разложение можно построить для произвольной функции потерь, наиболее просто (и классически) оно строится для среднеквадратичной функции в задаче регрессии, что мы и рассмотрим ниже.
Пусть $(X, y)$ – некоторая выборка. Обучим интересующий нас алгоритм на этой выборке и сделаем предсказания на ней. Обозначим предсказания как $\hat{y}$. Тогда
$$
\mathrm{bias} := \mathbb{E}(\hat{y}) - y.
$$
$$
\mathrm{variance} := \mathbb{E}[\mathbb{E}(\hat{y}) - \hat{y}]^2
$$
$$
\mathrm{noise} := \mathbb{E}[y - \mathbb{E}(y)]^2
$$
Ожидаемую среднеквадратичную ошибку на тренировочной выборке можно разложить как
$$
\mathrm{E}[y - \hat{y}]^2 = \mathrm{bias}^2 + \mathrm{variance} + \mathrm{noise}.
$$
**Задание для самых смелых:** покажите, что это разложение корректно. Проверьте себя [здесь](https://github.com/esokolov/ml-course-hse/blob/master/2020-fall/lecture-notes/lecture08-ensembles.pdf).
**Техническое замечание:** все математические ожидания в разложении выше берутся по объектам тренировочной выборки, то есть это разложение верно для среднеквадратичной ошибки на тренировочной выборке, которую иногда называют MSE for estimator. Тем не менее, нам интересна и величина ошибки на ненаблюдаемых данных, которую иногда называют MSE for predictor. В этом случае математическое ожидание ошибки следует брать по ненаблюдаемым объектам. Для решения этой проблемы зачастую предполагается, что тренировочная и тестовая выборка имеют одинаковое распределение, и математическое ожидание берётся по всевозможным вариациям тренировочной выборки. Суть разложения при этом не изменится, однако запись его станет более громоздкой. Посмотреть на это можно [здесь](https://towardsdatascience.com/mse-and-bias-variance-decomposition-77449dd2ff55).
Заметим, что так как на практике мы считаем оценки математических ожиданий и зачастую имеем доступ к тестовой выборке, то проблем с расчётом **оценок** MSE for estimator и MSE for predictor не возникает.
Разберёмся с интерпретацией компонент.
- $\mathrm{Bias}$ – показывает отклонение среднего ответа алгоритма от ответа идеального алгоритма. $\mathrm{Bias}$ отражает ошибку модели, возникающую из-за простоты модели. Высокое смещение обычно является показателем того, что модель недообучена.
- $\mathrm{Variance}$ – показывает разброс ответов алгоритмов относительно среднего ответа алгоритма. Показывает, насколько сильно небольшие изменения в обучающей выборке скажутся на предсказаниях алгоритма. $\mathrm{Variance}$ отражает ошибку модели, возникающую из-за чрезмерной сложности модели. Высокая дисперсия обычно является показателем того, что модель переобучена.
- $\mathrm{Noise}$ – ошибка идеального классификатора, естественный неустранимый шум в данных.
Посмотрим наглядно на примере полиномиальной регрессии.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
# Выборка
np.random.seed(42)
N = 10
X = np.linspace(-5, 5, N).reshape(-1, 1)
y = np.sin(X) + np.random.normal(0, 0.2, size = N).reshape(-1, 1)
X_test = np.linspace(-5, 5, N // 2).reshape(-1, 1)
y_test = np.sin(X_test) + np.random.normal(0, 0.2, size = N // 2).reshape(-1, 1)
# Очень простая модель (регрессия на константу)
too_simple_model_predictions = np.mean(y) * np.ones_like(y)
# В меру сложная модель
X_ok = np.hstack([X, X ** 2, X ** 3])
ok_model = LinearRegression()
ok_model.fit(X_ok, y)
ok_model_predictions = ok_model.predict(X_ok)
# Очень сложная модель
X_compl = np.hstack([X, X ** 2, X ** 3, X ** 4, X ** 5, X ** 6, X ** 7, X ** 8, X ** 9, X ** 10])
compl_model = LinearRegression()
compl_model.fit(X_compl, y)
compl_model_predictions = compl_model.predict(X_compl)
plt.figure(figsize = (10, 7))
plt.scatter(X, y, label = 'Тренировочная выборка')
plt.scatter(X_test, y_test, c = 'r', label = 'Тестовая выборка')
plt.plot(X, too_simple_model_predictions, label = 'Очень простая модель')
plt.plot(X, ok_model_predictions, label = 'В меру сложная модель')
plt.plot(X, compl_model_predictions, label = 'Очень сложная модель')
plt.grid()
plt.legend();
```
- Очень простая модель имеет большое смещение (bias), но малую (нулевую) дисперсию (variance). Модель явно недообучена.
- В меру сложная модель имеет небольшое смещение (bias) и небольшую дисперсию (variance).
- Очень сложная модель имеет небольшое смещение (bias), но большую дисперсию (variance). Модель явно переобучена.
**Задание:** пользуясь определениями выше, объясните, почему это так.
**Задание:** прокомментируйте величину смещения и дисперсии для следующих моделей:
1. Линейная регрессия, обучаемая на большой выборке без выбросов и линейно зависимых признаков, в которой признаки сильно коррелируют с целевой переменной.
2. Решающее дерево, которое строится до тех пор, пока в листах не окажется по одному объекту.
3. Логистическая регрессия, относящая все точки к одному классу.
### 1.A. Bias-Variance tradeoff
Из описания выше можно заметить, что при обучении моделей возникает выбор между смещением и дисперсией: недообученная модель имеет низкую дисперсию, но высокое смещение, а переобученная – низкое смещение, но высокую дисперсию. Этот выбор можно отобразить на картинке ([источник](https://www.bradyneal.com/bias-variance-tradeoff-textbooks-update)). Здесь Total Error – ошибка на тестовой выборке (generalization error).

Вывод из неё очевиден: строить следует оптимальные по сложности модели.
Возникает ли такой выбор при обучении любой модели? Последние исследования показывают, что поведение ошибки при обучении некоторых (современных) моделей не соответствует такой U-образной форме. Например, было показано, что ошибка на тестовой выборке продолжает убывать при расширении (увеличении числа слоёв) нейронных сетей:
<img src="https://www.bradyneal.com/img/bias-variance/neyshabur.jpg" alt="drawing" width="400"/>
В таких моделях поведение ошибки приобретает сложный вид:
<img src="https://www.bradyneal.com/img/bias-variance/double_descent.jpg" alt="drawing" width="800"/>
### 3. От деревьев к случайному лесу
#### 3.1 Решающее дерево
Мотивацию построения алгоритма случайного леса (Random Forest) удобно рассматривать в терминах смещения и дисперсии. Начнём с построения решающего дерева.
```
# Пример отсюда: http://rasbt.github.io/mlxtend/user_guide/evaluate/bias_variance_decomp/
from sklearn.model_selection import train_test_split
from mlxtend.data import boston_housing_data
X, y = boston_housing_data()
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.3,
random_state = 123,
shuffle = True)
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
# TODO: обучите решающее дерево без ограничений на тренировочной выборке
# TODO: рассчитайте MSE на тренировочной и тестовой выборках
from mlxtend.evaluate import bias_variance_decomp
# TODO: воспользуйтесь функцией bias_variance_decomp и выведите среднее смещение и среднюю дисперсию модели
# на тестовой выборке
```
Как мы обсуждали на предыдущем семинаре, такое дерево окажется сильно переобученным (высокая дисперсия и низкое смещение). Постараемся исправить это. На лекции мы обсуждали, что один из способов борьбы с переобучением – построение композиций моделей. На этом семинаре мы рассмотрим построение композиций при помощи бэггинга.
#### 3.2 Бэггинг
Вспомним суть алгоритма:
1. Обучаем много деревьев на бутстрапированных подвыборках исходной выборки независимо друг от друга. Бутстрапированную подвыборку строим при помощи выбора $N$ (размер исходной выборки) наблюдений из исходной выборки с возвращением.
2. Усредняем предсказания всех моделей (например, берём арифметическое среднее).
Можно показать, что модель, построенная при помощи бэггинга, будет иметь **то же смещение**, что и у отдельных деревьев, но значительно **меньшую дисперсию** (при выполнении некоторых условий).
```
from sklearn.ensemble import BaggingRegressor
base_tree = DecisionTreeRegressor(random_state = 123)
# TODO: обучите бэггинг с 20 деревьями, каждое из которых строится без ограничений
# TODO: выведите среднее смещение и среднюю дисперсию модели на тестовой выборке
```
Как мы видим, по сравнению с единичным деревом смещение практически не изменилось, но дисперсия уменьшилась в несколько раз!
Посмотрим, как это отразилось на среднеквадратичной ошибке.
```
# TODO: рассчитайте MSE на тренировочной и тестовой выборках для бэггинга
```
Среднеквадратичная ошибка на тренировочной выборке больше не равна 0, а на тестовой – уменьшилась, что говорит о том, что мы успешно победили переобучение единичного решающего дерева.
Можем ли мы снизить переобучение ещё сильнее? Можем!
#### 3.3 Случайный лес
При построении каждого дерева в бэггинге в ходе создания очередного узла будем выбирать случайный набор признаков, на основе которых производится разбиение. В результате такой процедуры мы уменьшим корреляцию между деревьями, за счёт чего снизим дисперсию итоговой модели. Такой алгоритм назвывается **случайным лесом** (Random Forest).
По сравнению с единичным деревом к параметрам случайного леса добавляются:
- `max_features` – число признаков, на основе которых проводятся разбиения при построении дерева.
- `n_estimators` – число деревьев.
Естественно, все параметры, относящиеся к единичному дереву, сохраняются для случайного леса.
```
from sklearn.ensemble import RandomForestRegressor
# TODO: обучите случайный лес с 20 деревьями, каждое из которых строится без ограничений
# TODO: выведите среднее смещение и среднюю дисперсию модели на тестовой выборке
# TODO: рассчитайте MSE на тренировочной и тестовой выборках для случайного леса
```
Как мы видим, по сравнению с бэггингом смещение вновь осталось практически неизменным, а дисперсия немного уменьшилась. Конечно, если подобрать хорошие гиперпараметры, то получится снизить дисперсию ещё больше.
Ошибка на тренировочной выборке увеличилась, а на тестовой – уменьшилась, что означает, что мы добились нашей цели в борьбе с переобученными деревьями!
### 4. Особенности случайного леса
#### 4.1 Число деревьев и "Случайный лес не переобучается"
В своём [блоге](https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm#remarks) Лео Бриман (Leo Breiman), создатель случайного леса, написал следующее:
> Random forest does not overfit. You can run as many trees as you want.
**Обратите внимание:** как говорилось на лекции, случайный лес не переобучается именно с ростом числа деревьев (за счёт совместной работы бэггинга и использования случайных подпространств), но не в принципе. Посмотрим на поведение случайного леса при росте числа деревьев.
```
X, y = boston_housing_data()
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.3,
random_state = 123,
shuffle = True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 123)
n_trees = 100
train_loss = []
test_loss = []
for i in range(1, n_trees):
rf = RandomForestRegressor(n_estimators = i, random_state = 123)
rf.fit(X_train, y_train)
train_loss.append(mean_squared_error(y_train, rf.predict(X_train)))
test_loss.append(mean_squared_error(y_test, rf.predict(X_test)))
plt.figure(figsize = (10, 7))
plt.grid()
plt.plot(train_loss, label = 'MSE_train')
plt.plot(test_loss, label = 'MSE_test')
plt.ylabel('MSE')
plt.xlabel('# trees')
plt.legend();
```
Как и ожидалось, по достижении некоторого числа деревьев обе ошибки практически не изменяются, то есть переобучения при росте числа деревьев не происходит.
Однако практика показывает, что при изменении какого-нибудь другого параметра на реальных данных переобучение может произойти: [пример 1](https://datascience.stackexchange.com/questions/1028/do-random-forest-overfit), [пример 2](https://mljar.com/blog/random-forest-overfitting/). Например, случайный лес с ограниченными по глубине деревьями может предсказывать более точно, чем лес без ограничений.
В нашем же случае случайный лес, скорее, лишь страдает от регуляризации. Например, посмотрим на поведение модели при изменении максимальной глубины деревьев (поэксперементируйте с другими параметрами).
```
max_depth = 40
train_loss = []
test_loss = []
for i in range(1, max_depth):
rf = RandomForestRegressor(n_estimators = 20, max_depth = i, random_state = 123)
rf.fit(X_train, y_train)
train_loss.append(mean_squared_error(y_train, rf.predict(X_train)))
test_loss.append(mean_squared_error(y_test, rf.predict(X_test)))
plt.figure(figsize = (10, 7))
plt.grid()
plt.plot(train_loss, label = 'MSE_train')
plt.plot(test_loss, label = 'MSE_test')
plt.ylabel('MSE')
plt.xlabel('max_depth')
plt.legend();
```
Переобучение не наблюдается. Вообще же, как обычно, гиперпараметры случайного леса стоит подбирать на кросс-валидации.
#### 4.2 Out-of-bag-ошибка
Как мы обсудили выше, при построении случайного леса каждое дерево строится на бутстрапированной подвыборке, полученной из исходной обучающей выборки случайным набором с повторениями. Понятно, что некоторые наблюдения попадут в такую подвыборку несколько раз, а некоторые не войдут в неё вообще. Для каждого дерева мы можем рассмотреть объекты, которые не участвовали в обучении и использовать их для валидации.
Усреднённая ошибка на неотобранных образцах по всему случайному лесу называется **out-of-bag-ошибкой**.
```
X, y = boston_housing_data()
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.3,
random_state = 123,
shuffle = True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 123)
# oob_score_ = R2 на невиденных наблюдениях.
rf = RandomForestRegressor(n_estimators = 100, random_state = 123, oob_score = True)
rf.fit(X_train, y_train)
rf.oob_score_
```
#### 4.3 Важность признаков
Как и решающие деревья, случайный лес позволяет оценивать важность признаков.
```
# Просто чтобы подгрузить названия признаков
from sklearn.datasets import load_boston
data = load_boston()
plt.figure(figsize = (10, 7))
plt.bar(data['feature_names'], rf.feature_importances_);
```
Будьте осторожны с сильно коррелирующими признаками. Посмотрим, что произойдёт с важностью, если добавить в выборку линейно зависимый признак.
```
RM_mc = (X_train[:, 5] * 2 + 3).reshape(-1, 1)
X_train_new = np.hstack((X_train, RM_mc))
rf.fit(X_train_new, y_train)
plt.figure(figsize = (10, 7))
names = list(data['feature_names'])
names.append('RM_mc')
plt.bar(names, rf.feature_importances_);
```
Важности перераспределились между линейной зависимыми признаками `RM` и `RM_mc`. Не забывайте учитывать корреляции между признаками, если вы используете этот метод для отбора признаков. Также обратите внимание на предупреждение в документации `sklearn`: не стоит использовать этот метод и для признаков, в которых есть много уникальных значений (например, категориальные признаки с небольшим числом категорий).
### 5. Тестирование случайного леса на разных данных
Ниже представлены шаблоны для сравнения случайного леса и других моделей на данных разных типов. Проведите побольше экспериментов, используя разные модели и метрики. Попробуйте подобрать гиперпараметры случайного леса так, чтобы достичь какого-нибудь порога качества.
**Внимание:** в этой части вам предстоит скачивать объёмные наборы данных. Не забудьте удалить их после семинара, если не планируете использовать их в дальнейшем, чтобы они не занимали лишнее место на вашем компьютере.
**! Случайный лес может обучаться достаточно долго.**
#### 5.1 Бинарная классификация на примере [Kaggle Predicting a Biological Response](https://www.kaggle.com/c/bioresponse/data?select=train.csv)
```
# Загрузка данных
!wget -O 'kaggle_response.csv' -q 'https://www.dropbox.com/s/uha70sej5ugcrur/_train_sem09.csv?dl=1'
data = pd.read_csv('kaggle_response.csv')
X = data.iloc[:, 1:].values
y = data.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 123)
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
# TODO: обучите логистическую регрессию и случайный лес с дефолтными параметрами
# Сравните их AUC ROC на тестовой выборке
```
#### 5.2 Изображения на примере [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist)
```
# Загрузка данных
import torchvision
fmnist = torchvision.datasets.FashionMNIST('./', download = True)
X = fmnist.data.numpy().reshape(-1, 28 * 28)
y = fmnist.targets.numpy()
plt.imshow(X[0, :].reshape(28, 28))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 123)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
# TODO: обучите случайный лес и kNN с дефолтными параметрами
# Сравните их доли правильных ответов на тестовой выборке
```
#### 5.3 Тексты на примере бинарной классификации твитов из семинара 10
Скачиваем куски датасета ([источник](http://study.mokoron.com/)): [положительные](https://www.dropbox.com/s/fnpq3z4bcnoktiv/positive.csv?dl=0), [отрицательные](https://www.dropbox.com/s/r6u59ljhhjdg6j0/negative.csv).
```
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import MaxAbsScaler
# Предобработка из семинара 10
positive = pd.read_csv('positive.csv', sep=';', usecols=[3], names=['text'])
positive['label'] = 'positive'
negative = pd.read_csv('negative.csv', sep=';', usecols=[3], names=['text'])
negative['label'] = 'negative'
df = positive.append(negative)
X_train, X_test, y_train, y_test = train_test_split(df.text, df.label, random_state=13)
vec = CountVectorizer(ngram_range=(1, 1))
bow = vec.fit_transform(X_train)
bow_test = vec.transform(X_test)
scaler = MaxAbsScaler()
bow = scaler.fit_transform(bow)
bow_test = scaler.transform(bow_test)
X_train = bow
X_test = bow_test
from sklearn.tree import DecisionTreeClassifier
# TODO: обучите случайный лес с числом деревьев 100 и макс. глубиной дерева 20
# и решающее дерево с макс. глубиной 20
# Сравните их доли правильных ответов на тестовой выборке
```
| github_jupyter |
```
%matplotlib notebook
import tensorflow as tf
import tensorflow.keras as K
from tensorflow.keras.losses import categorical_crossentropy
import numpy as np
import matplotlib.pyplot as plt
import cv2
import pandas as pd
import sys
sys.path
sys.path.append("../../models/classification")
from models import ResNet, AlexNet
from dataset import Dataset
TRAIN_PATH = "../../../honours_project_data/main/train_high.txt"
VAL_PATH = "../../../honours_project_data/main/test_high.txt"
train_data = Dataset(TRAIN_PATH, res=(120, 160))
val_data = Dataset(VAL_PATH, res=(120, 160))
X, y = val_data.get_all()
VIS_WEIGHTS_PATH = "../../../out/new_out/ResNet/ResNet_rgb_weights.h5"
LWIR_WEIGHTS_PATH = "../../../out/new_out/ResNet/ResNet_lwir_weights.h5"
FUSION_WEIGHTS_PATH = "../../../out/new_out/ResNet/ResNet_fusion_weights.h5"
model_rgb = ResNet("rgb", train_data.num_classes(), train_data.shape()).get_model()
model_lwir = ResNet("lwir", train_data.num_classes(), train_data.shape()).get_model()
model_fusion = ResNet("fusion", train_data.num_classes(), train_data.shape()).get_model()
model_rgb.load_weights(VIS_WEIGHTS_PATH)
model_lwir.load_weights(LWIR_WEIGHTS_PATH)
model_fusion.load_weights(FUSION_WEIGHTS_PATH)
preds_rgb = model_rgb.predict(X)
preds_lwir = model_lwir.predict(X)
preds_fusion = model_fusion.predict(X)
scores = []
for img, label, rgb, lwir, fusion in zip(X, y, preds_rgb, preds_lwir, preds_fusion):
mean_rgb = np.mean(img[..., :3])
std_rgb = np.std(img[..., :3])
mean_lwir = np.mean(img[..., 3])
std_lwir = np.std(img[..., 3])
loss_rgb = categorical_crossentropy(label, rgb)
loss_lwir = categorical_crossentropy(label, lwir)
loss_fusion = categorical_crossentropy(label, fusion)
scores.append((mean_rgb, std_rgb, mean_lwir, std_lwir, loss_rgb, loss_lwir, loss_fusion))
scores = pd.DataFrame(np.array(scores), columns=["mean_rgb", "std_rgb", "mean_lwir", "std_lwir", "rgb", "lwir", "fusion"])
scores_mean_rgb = scores.groupby("mean_rgb").mean()
scores_std_rgb = scores.groupby("std_rgb").mean()
scores_mean_lwir = scores.groupby("mean_lwir").mean()
scores_std_lwir = scores.groupby("std_lwir").mean()
fig, ax = plt.subplots(2, 2, dpi=150)
font = {
"fontname": "fbb",
# "fontweight": "bold",
}
axis_font = {"size": "8"}
def plot_metric(ax, name, xlabel, df, rolling_window=75, ylim=[0, 5], legend=False):
ax.plot(df.index, df.rgb.rolling(rolling_window).mean(), label="RGB")
ax.plot(df.index, df.lwir.rolling(rolling_window).mean(), label="LWIR")
ax.plot(df.index, df.fusion.rolling(rolling_window).mean(), label="fusion", c="green")
ax.set_ylabel("cross entropy loss", fontdict=axis_font)
ax.set_xlabel(xlabel, fontdict=axis_font)
ax.set_ylim(ylim)
ax.set_title(name, fontdict=font)
if legend:
ax.legend()
plot_metric(ax[0][0], "VIS intensity mean", "mean intensity of VIS images", scores_mean_rgb)
plot_metric(ax[0][1], "VIS intensity std", "std of intensity of VIS images", scores_std_rgb)
plot_metric(ax[1][0], "LWIR intensity mean", "mean intensity of LWIR images", scores_mean_lwir)
plot_metric(ax[1][1], "LWIR intensity std", "std of intensity of VIS imagse", scores_std_lwir, legend=True)
fig.tight_layout()
plt.savefig("output.pdf")
```
| github_jupyter |
```
import tensorflow as tf
import numpy as np
import os
import random
import copy
import keras
from keras.layers import Input, Dense, Conv2D, Dropout, Flatten, Reshape
from keras.optimizers import RMSprop, Adam
from keras.models import Model
from keras.models import Sequential
from keras.callbacks import LambdaCallback
from pypianoroll import Multitrack, Track
from matplotlib import pyplot as plt
import pypianoroll as ppr
import my_config
# set up config
config = copy.deepcopy(my_config.config_5b)
dataset = np.load(config['dataset_path'])
print(config)
# load dataset
dataset = dataset[0:100]
dataset.shape
maxlen = 96
# feature
reshaped_dataset = dataset.reshape((len(dataset),-1,84,5))
feature = []
label = []
for now_song in reshaped_dataset:
for i in range(0, len(now_song), 24):
if (i + maxlen + 1) < len(now_song):
feature.append(now_song[i:i+maxlen])
label.append(now_song[i+maxlen])
# label
# batch
# split data to train and validation
dataset_size = len(dataset)
val_ratio = 0.1
train_size = int(dataset_size * (1 - val_ratio))
# eval_size = dataset_size - train_size
x_train = np.array(feature[0:train_size])
y_train = np.array(label[0:train_size])
x_val = np.array(feature[train_size: dataset_size])
y_val = np.array(label[train_size: dataset_size])
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print('x_val shape:', x_val.shape)
print('y_val shape:', y_val.shape)
# build model
xx = Input(shape=(maxlen, 84, 5))
xxx = Flatten()(xx)
xxx = Dense(84*5, activation='relu')(xxx)
xxx = Reshape((84, 5))(xxx)
model = Model(xx, xxx)
model.summary()
model.compile(loss='mean_squared_error',
optimizer=RMSprop(),
metrics=['accuracy'])
# snapshot
generate_length = 1000
def write_midi(filepath, pianorolls, config):
is_drums = config['is_drums']
track_names = config['track_names']
tempo = config['tempo']
beat_resolution = config['beat_resolution']
program_nums = config['program_nums']
if not np.issubdtype(pianorolls.dtype, np.bool_):
raise TypeError("Support only binary-valued piano-rolls")
if isinstance(program_nums, int):
program_nums = [program_nums]
if isinstance(is_drums, int):
is_drums = [is_drums]
if program_nums is None:
program_nums = [0] * len(pianorolls)
if is_drums is None:
is_drums = [False] * len(pianorolls)
multitrack = Multitrack(beat_resolution=beat_resolution, tempo=tempo)
for idx in range(pianorolls.shape[2]):
if track_names is None:
track = Track(pianorolls[..., idx], program_nums[idx],
is_drums[idx])
else:
track = Track(pianorolls[..., idx], program_nums[idx],
is_drums[idx], track_names[idx])
multitrack.append_track(track)
multitrack.write(filepath)
def save_midi(filepath, phrases, config):
if not np.issubdtype(phrases.dtype, np.bool_):
raise TypeError("Support only binary-valued piano-rolls")
reshaped = phrases.reshape(-1, phrases.shape[1] * phrases.shape[2],
phrases.shape[3], phrases.shape[4])
# print("reshaped shape:", reshaped.shape)
# result final shape: (5, 1, 96, 84, 5)
pad_width = ((0, 0), (0, config['pause_between_samples']),
(config['lowest_pitch'],
128 - config['lowest_pitch'] - config['num_pitch']),
(0, 0))
# pad width 表示前补和后补的长度
# print('pad_width:',pad_width)
padded = np.pad(reshaped, pad_width, 'constant')
print("padded shape:", padded.shape)
pianorolls = padded.reshape(-1, padded.shape[2], padded.shape[3])
print("pianorolls shape:", pianorolls.shape)
write_midi(filepath, pianorolls, config)
def on_epoch_end(epoch, logs):
start_index = random.randint(0, len(x_train)-1)
result = []
x_pred = np.array([x_train[start_index]])
print('x_pred shape:', x_pred.shape)
result = copy.deepcopy(x_pred)
print("result shape:", result.shape)
print(config)
for i in range(generate_length):
y_pred = model.predict(x_pred, verbose = 0)
# print("y_pred shape:", y_pred.shape)
result = np.append(result, [y_pred], axis = 1)
# print("before x_pred shape:", x_pred[:,1:maxlen,:,:].shape)
x_pred = np.append(x_pred[:,1:maxlen,:,:], [y_pred], axis = 1)
# print("after x_pred shape:", x_pred.shape)
# print("result shape:", result.shape)
# print("x_pred shape:", x_pred.shape)
print('result shape:',result.shape)
result = np.array(result, dtype=np.bool_)
# print('result:',result)
need_length = (generate_length + maxlen) // (96*4) * (96*4)
result = result[0]
result = result[0:need_length]
# now is stard piano roll
print('result shape:',result.shape)
result = result.reshape((-1,4,96,84,5))
print('result final shape:',result.shape)
save_midi('/Users/mac/Desktop/test_ppr/test_train_%d.mid' % (epoch+1), result, config)
# print('2333')
print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
# train
model.fit( x = x_train,
y = y_train,
validation_data = (x_val, y_val),
batch_size = 2,
verbose = 1,
epochs = 5,
callbacks = [print_callback])
# tensorboard
```
| github_jupyter |
# Facies classification using machine learning techniques
The ideas of
<a href="https://home.deib.polimi.it/bestagini/">Paolo Bestagini's</a> "Try 2", <a href="https://github.com/ar4">Alan Richardson's</a> "Try 2",
<a href="https://github.com/dalide">Dalide's</a> "Try 6", augmented, by Dimitrios Oikonomou and Eirik Larsen (ESA AS) by
- adding the gradient of gradient of features as augmented features.
- with an ML estimator for PE using both training and blind well data.
- removing the NM_M from augmented features.
In the following, we provide a possible solution to the facies classification problem described at https://github.com/seg/2016-ml-contest.
The proposed algorithm is based on the use of random forests, xgboost or gradient boost combined in one-vs-one multiclass strategy. In particular, we would like to study the effect of:
- Robust feature normalization.
- Feature imputation for missing feature values.
- Well-based cross-validation routines.
- Feature augmentation strategies.
- Test multiple classifiers
# Script initialization
Let's import the used packages and define some parameters (e.g., colors, labels, etc.).
```
# Import
from __future__ import division
get_ipython().magic(u'matplotlib inline')
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = (20.0, 10.0)
inline_rc = dict(mpl.rcParams)
from classification_utilities import make_facies_log_plot
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn import preprocessing
from sklearn.model_selection import LeavePGroupsOut
from sklearn.metrics import f1_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsOneClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, GradientBoostingClassifier
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from scipy.signal import medfilt
import sys, scipy, sklearn
print('Python: ' + sys.version.split('\n')[0])
print(' ' + sys.version.split('\n')[0])
print('Pandas: ' + pd.__version__)
print('Numpy: ' + np.__version__)
print('Scipy: ' + scipy.__version__)
print('Sklearn: ' + sklearn.__version__)
print('Xgboost: ' + xgb.__version__)
```
### Parameters
```
feature_names = ['GR', 'ILD_log10', 'DeltaPHI', 'PHIND', 'PE', 'NM_M', 'RELPOS']
facies_names = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D', 'PS', 'BS']
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
#Select classifier type
#clfType='GB' #Gradient Boosting Classifier
clfType='XBA' #XGB Clasifier
# Define window length
N_neig=2
#Seed
seed = 24
np.random.seed(seed)
```
# Load data
Let's load the data
```
# Load data from file
data = pd.read_csv('../facies_vectors.csv')
# Load Test data from file
test_data = pd.read_csv('../validation_data_nofacies.csv')
test_data.insert(0,'Facies',np.ones(test_data.shape[0])*(-1))
#Create Dataset for PE prediction from both dasets
all_data=pd.concat([data,test_data])
```
#### Let's store features, labels and other data into numpy arrays.
```
# Store features and labels
X = data[feature_names].values # features
y = data['Facies'].values # labels
# Store well labels and depths
well = data['Well Name'].values
depth = data['Depth'].values
```
# Data inspection
Let us inspect the features we are working with. This step is useful to understand how to normalize them and how to devise a correct cross-validation strategy. Specifically, it is possible to observe that:
- Some features seem to be affected by a few outlier measurements.
- Only a few wells contain samples from all classes.
- PE measurements are available only for some wells.
```
# Define function for plotting feature statistics
def plot_feature_stats(X, y, feature_names, facies_colors, facies_names):
# Remove NaN
nan_idx = np.any(np.isnan(X), axis=1)
X = X[np.logical_not(nan_idx), :]
y = y[np.logical_not(nan_idx)]
# Merge features and labels into a single DataFrame
features = pd.DataFrame(X, columns=feature_names)
labels = pd.DataFrame(y, columns=['Facies'])
for f_idx, facies in enumerate(facies_names):
labels[labels[:] == f_idx] = facies
data = pd.concat((labels, features), axis=1)
# Plot features statistics
facies_color_map = {}
for ind, label in enumerate(facies_names):
facies_color_map[label] = facies_colors[ind]
sns.pairplot(data, hue='Facies', palette=facies_color_map, hue_order=list(reversed(facies_names)))
```
## Feature distribution
plot_feature_stats(X, y, feature_names, facies_colors, facies_names)
mpl.rcParams.update(inline_rc)
```
# Facies per well
for w_idx, w in enumerate(np.unique(well)):
ax = plt.subplot(3, 4, w_idx+1)
hist = np.histogram(y[well == w], bins=np.arange(len(facies_names)+1)+.5)
plt.bar(np.arange(len(hist[0])), hist[0], color=facies_colors, align='center')
ax.set_xticks(np.arange(len(hist[0])))
ax.set_xticklabels(facies_names)
ax.set_title(w)
# Features per well
for w_idx, w in enumerate(np.unique(well)):
ax = plt.subplot(3, 4, w_idx+1)
hist = np.logical_not(np.any(np.isnan(X[well == w, :]), axis=0))
plt.bar(np.arange(len(hist)), hist, color=facies_colors, align='center')
ax.set_xticks(np.arange(len(hist)))
ax.set_xticklabels(feature_names)
ax.set_yticks([0, 1])
ax.set_yticklabels(['miss', 'hit'])
ax.set_title(w)
```
## Feature imputation
Let us fill missing PE values. Currently no feature engineering is used, but this should be explored in the future.
```
reg = RandomForestRegressor(max_features='sqrt', n_estimators=50, random_state=seed)
DataImpAll = all_data[feature_names].copy()
DataImp = DataImpAll.dropna(axis = 0, inplace=False)
Ximp=DataImp.loc[:, DataImp.columns != 'PE']
Yimp=DataImp.loc[:, 'PE']
reg.fit(Ximp, Yimp)
X[np.array(data.PE.isnull()),feature_names.index('PE')] = reg.predict(data.loc[data.PE.isnull(),:][['GR', 'ILD_log10', 'DeltaPHI', 'PHIND', 'NM_M', 'RELPOS']])
```
# Augment features
```
# ## Feature augmentation
# Our guess is that facies do not abrutly change from a given depth layer to the next one. Therefore, we consider features at neighboring layers to be somehow correlated. To possibly exploit this fact, let us perform feature augmentation by:
# - Select features to augment.
# - Aggregating aug_features at neighboring depths.
# - Computing aug_features spatial gradient.
# - Computing aug_features spatial gradient of gradient.
# Feature windows concatenation function
def augment_features_window(X, N_neig, features=-1):
# Parameters
N_row = X.shape[0]
if features==-1:
N_feat = X.shape[1]
features=np.arange(0,X.shape[1])
else:
N_feat = len(features)
# Zero padding
X = np.vstack((np.zeros((N_neig, X.shape[1])), X, (np.zeros((N_neig, X.shape[1])))))
# Loop over windows
X_aug = np.zeros((N_row, N_feat*(2*N_neig)+X.shape[1]))
for r in np.arange(N_row)+N_neig:
this_row = []
for c in np.arange(-N_neig,N_neig+1):
if (c==0):
this_row = np.hstack((this_row, X[r+c,:]))
else:
this_row = np.hstack((this_row, X[r+c,features]))
X_aug[r-N_neig] = this_row
return X_aug
# Feature gradient computation function
def augment_features_gradient(X, depth, features=-1):
if features==-1:
features=np.arange(0,X.shape[1])
# Compute features gradient
d_diff = np.diff(depth).reshape((-1, 1))
d_diff[d_diff==0] = 0.001
X_diff = np.diff(X[:,features], axis=0)
X_grad = X_diff / d_diff
# Compensate for last missing value
X_grad = np.concatenate((X_grad, np.zeros((1, X_grad.shape[1]))))
return X_grad
# Feature augmentation function
def augment_features(X, well, depth, N_neig=1, features=-1):
if (features==-1):
N_Feat=X.shape[1]
else:
N_Feat=len(features)
# Augment features
X_aug = np.zeros((X.shape[0], X.shape[1] + N_Feat*(N_neig*2+2)))
for w in np.unique(well):
w_idx = np.where(well == w)[0]
X_aug_win = augment_features_window(X[w_idx, :], N_neig,features)
X_aug_grad = augment_features_gradient(X[w_idx, :], depth[w_idx],features)
X_aug_grad_grad = augment_features_gradient(X_aug_grad, depth[w_idx])
X_aug[w_idx, :] = np.concatenate((X_aug_win, X_aug_grad,X_aug_grad_grad), axis=1)
# Find padded rows
padded_rows = np.unique(np.where(X_aug[:, 0:7] == np.zeros((1, 7)))[0])
return X_aug, padded_rows
# Train and test a classifier
def train_and_test(X_tr, y_tr, X_v, well_v, clf):
# Feature normalization
scaler = preprocessing.RobustScaler(quantile_range=(25.0, 75.0)).fit(X_tr)
X_tr = scaler.transform(X_tr)
X_v = scaler.transform(X_v)
# Train classifier
clf.fit(X_tr, y_tr)
# Test classifier
y_v_hat = clf.predict(X_v)
# Clean isolated facies for each well
for w in np.unique(well_v):
y_v_hat[well_v==w] = medfilt(y_v_hat[well_v==w], kernel_size=3)
return y_v_hat
# Define which features to augment by introducing window and gradients.
augm_Features=['GR', 'ILD_log10', 'DeltaPHI', 'PHIND', 'PE', 'RELPOS']
# Get the columns of features to be augmented
feature_indices=[feature_names.index(log) for log in augm_Features]
# Augment features
X_aug, padded_rows = augment_features(X, well, depth, N_neig=N_neig, features=feature_indices)
# Remove padded rows
data_no_pad = np.setdiff1d(np.arange(0,X_aug.shape[0]), padded_rows)
X=X[data_no_pad ,:]
depth=depth[data_no_pad]
X_aug=X_aug[data_no_pad ,:]
y=y[data_no_pad]
data=data.iloc[data_no_pad ,:]
well=well[data_no_pad]
```
## Generate training, validation and test data splitsar4_submission_withFac.ipynb
The choice of training and validation data is paramount in order to avoid overfitting and find a solution that generalizes well on new data. For this reason, we generate a set of training-validation splits so that:
- Features from each well belongs to training or validation set.
- Training and validation sets contain at least one sample for each class.
# Initialize model selection methods
```
lpgo = LeavePGroupsOut(2)
# Generate splits
split_list = []
for train, val in lpgo.split(X, y, groups=data['Well Name']):
hist_tr = np.histogram(y[train], bins=np.arange(len(facies_names)+1)+.5)
hist_val = np.histogram(y[val], bins=np.arange(len(facies_names)+1)+.5)
if np.all(hist_tr[0] != 0) & np.all(hist_val[0] != 0):
split_list.append({'train':train, 'val':val})
# Print splits
for s, split in enumerate(split_list):
print('Split %d' % s)
print(' training: %s' % (data.iloc[split['train']]['Well Name'].unique()))
print(' validation: %s' % (data.iloc[split['val']]['Well Name'].unique()))
```
## Classification parameters optimization
Let us perform the following steps for each set of parameters:
- Select a data split.
- Normalize features using a robust scaler.
- Train the classifier on training data.
- Test the trained classifier on validation data.
- Repeat for all splits and average the F1 scores.
At the end of the loop, we select the classifier that maximizes the average F1 score on the validation set. Hopefully, this classifier should be able to generalize well on new data.
```
if clfType=='XB':
md_grid = [2,3]
# mcw_grid = [1]
gamma_grid = [0.2, 0.3, 0.4]
ss_grid = [0.7, 0.9, 0.5]
csb_grid = [0.6,0.8,0.9]
alpha_grid =[0.2, 0.4, 0.3]
lr_grid = [0.04, 0.06, 0.05]
ne_grid = [100,200,300]
param_grid = []
for N in md_grid:
# for M in mcw_grid:
for S in gamma_grid:
for L in ss_grid:
for K in csb_grid:
for P in alpha_grid:
for R in lr_grid:
for E in ne_grid:
param_grid.append({'maxdepth':N,
# 'minchildweight':M,
'gamma':S,
'subsample':L,
'colsamplebytree':K,
'alpha':P,
'learningrate':R,
'n_estimators':E})
if clfType=='XBA':
learning_rate_grid=[0.12] #[0.06, 0.10, 0.12]
max_depth_grid=[3] #[3, 5]
min_child_weight_grid=[6] #[6, 8, 10]
colsample_bytree_grid = [0.9] #[0.7, 0.9]
n_estimators_grid=[120] #[80, 120, 150] #[150]
param_grid = []
for max_depth in max_depth_grid:
for min_child_weight in min_child_weight_grid:
for colsample_bytree in colsample_bytree_grid:
for learning_rate in learning_rate_grid:
for n_estimators in n_estimators_grid:
param_grid.append({'maxdepth':max_depth,
'minchildweight':min_child_weight,
'colsamplebytree':colsample_bytree,
'learningrate':learning_rate,
'n_estimators':n_estimators})
if clfType=='RF':
N_grid = [50, 100, 150]
M_grid = [5, 10, 15]
S_grid = [10, 25, 50, 75]
L_grid = [2, 3, 4, 5, 10, 25]
param_grid = []
for N in N_grid:
for M in M_grid:
for S in S_grid:
for L in L_grid:
param_grid.append({'N':N, 'M':M, 'S':S, 'L':L})
if clfType=='GB':
N_grid = [80] #[80, 100, 120]
MD_grid = [5] #[3, 5]
M_grid = [10]
LR_grid = [0.12] #[0.1, 0.08, 0.12]
L_grid = [3] #[3, 5, 7]
S_grid = [25] #[20, 25, 30]
param_grid = []
for N in N_grid:
for M in MD_grid:
for M1 in M_grid:
for S in LR_grid:
for L in L_grid:
for S1 in S_grid:
param_grid.append({'N':N, 'MD':M, 'MF':M1,'LR':S,'L':L,'S1':S1})
def getClf(clfType, param):
if clfType=='RF':
clf = OneVsOneClassifier(RandomForestClassifier(n_estimators=param['N'], criterion='entropy',
max_features=param['M'], min_samples_split=param['S'], min_samples_leaf=param['L'],
class_weight='balanced', random_state=seed), n_jobs=-1)
if clfType=='XB':
clf = OneVsOneClassifier(XGBClassifier(
learning_rate = param['learningrate'],
n_estimators=param['n_estimators'],
max_depth=param['maxdepth'],
# min_child_weight=param['minchildweight'],
gamma = param['gamma'],
subsample=param['subsample'],
colsample_bytree=param['colsamplebytree'],
reg_alpha = param['alpha'],
nthread =4,
seed = seed,
) , n_jobs=4)
if clfType=='XBA':
clf = XGBClassifier(
learning_rate = param['learningrate'],
n_estimators=param['n_estimators'],
max_depth=param['maxdepth'],
min_child_weight=param['minchildweight'],
colsample_bytree=param['colsamplebytree'],
nthread =4,
seed = 17
)
if clfType=='GB':
clf=OneVsOneClassifier(GradientBoostingClassifier(
loss='exponential',
n_estimators=param['N'],
learning_rate=param['LR'],
max_depth=param['MD'],
max_features= param['MF'],
min_samples_leaf=param['L'],
min_samples_split=param['S1'],
random_state=seed,
max_leaf_nodes=None,)
, n_jobs=-1)
return clf
# For each set of parameters
score_param = []
print('features: %d' % X_aug.shape[1])
exportScores=[]
for param in param_grid:
print('features: %d' % X_aug.shape[1])
# For each data split
score_split = []
split = split_list[5]
split_train_no_pad = split['train']
# Select training and validation data from current split
X_tr = X_aug[split_train_no_pad, :]
X_v = X_aug[split['val'], :]
y_tr = y[split_train_no_pad]
y_v = y[split['val']]
# Select well labels for validation data
well_v = well[split['val']]
# Train and test
y_v_hat = train_and_test(X_tr, y_tr, X_v, well_v, getClf(clfType,param))
# Score
score = f1_score(y_v, y_v_hat, average='micro')
score_split.append(score)
#print('Split: {0}, Score = {1:0.3f}'.format(split_list.index(split),score))
#print('Split: , Score = {0:0.3f}'.format(score))
# Average score for this param
score_param.append(np.mean(score_split))
print('Average F1 score = %.3f %s' % (score_param[-1], param))
exportScores.append('Average F1 score = %.3f %s' % (score_param[-1], param))
# Best set of parameters
best_idx = np.argmax(score_param)
param_best = param_grid[best_idx]
score_best = score_param[best_idx]
print('\nBest F1 score = %.3f %s' % (score_best, param_best))
# Store F1 scores for multiple param grids
if len(exportScores)>1:
exportScoresFile=open('results_{0}_{1}_sub01b.txt'.format(clfType,N_neig),'wb')
exportScoresFile.write('features: %d' % X_aug.shape[1])
for item in exportScores:
exportScoresFile.write("%s\n" % item)
exportScoresFile.write('\nBest F1 score = %.3f %s' % (score_best, param_best))
exportScoresFile.close()
# ## Predict labels on test data
# Let us now apply the selected classification technique to test data.
# Training data
X_tr = X_aug
y_tr = y
# Prepare test data
well_ts = test_data['Well Name'].values
depth_ts = test_data['Depth'].values
X_ts = test_data[feature_names].values
# Augment Test data features
X_ts, padded_rows = augment_features(X_ts, well_ts,depth_ts,N_neig=N_neig, features=feature_indices)
# Predict test labels
y_ts_hat = train_and_test(X_tr, y_tr, X_ts, well_ts, getClf(clfType,param_best))
# Save predicted labels
test_data['Facies'] = y_ts_hat
test_data.to_csv('esa_predicted_facies_{0}_{1}_sub01c.csv'.format(clfType,N_neig))
# Plot predicted labels
make_facies_log_plot(
test_data[test_data['Well Name'] == 'STUART'],
facies_colors=facies_colors)
make_facies_log_plot(
test_data[test_data['Well Name'] == 'CRAWFORD'],
facies_colors=facies_colors)
mpl.rcParams.update(inline_rc)
```
| github_jupyter |
## Setup
We'll be using a Python library that helps us to parse markup languages like HTML and XML called BeautifulSoup. We will be using an additional library called `lxml`, which helps BeautifulSoup (aka BS4) to search and build XML. It is possible that you may need to do an extra step to install `lxml` if you have not used it before, and those steps are [outlined in the BS4 documentation here](https://beautiful-soup-4.readthedocs.io/en/latest/index.html?highlight=namespace#installing-a-parser).
```
from bs4 import BeautifulSoup
```
If you use the cells below that use LXML on its own, then you'll also need to import
the library so you can call it directly:
```
import xml.etree.ElementTree as ET
try:
from lxml import etree
print('running with lxml.etree')
except ImportError:
print('you\re not running with lxml active')
```
Later on, we will use regular expressions to identify strings that match certain patterns.
To do this, you also need to install the `re` library:
```
import re
```
## First Steps to Navigating the Tree: Beautiful Soup
### Load the records
This activity is designed to have the data included alongside our notebook, so the files are already included in this repository. This should allow you to download and run the notebook yourself, using the same commands and finding the same results. There could be other ways to do this if you are working in a different context - for example, if you are working with records you're pulling from the web, you might want to pull them dynamically using the `requests` library (that allows you to make HTML requests).
Here is how we can parse one of the XML records using BeautifulSoup:
```
MODS_collection = open('2018_lcwa_MODS_5.xml', 'r')
```
Let's find a bit of information in the "soup" (that is, the file loaded as data).
To do that, we can use the BS4 library to call items by name. (This, of course,
requires a knowledge of what tags and items you would expect to find in the record,
which we will look at later.)
```
soup = BeautifulSoup(MODS_collection, 'lxml')
print(soup.text[:100])
```
The above cell prints a string of text from the XML object that we've loaded,
in order to demonstrate that, yes, we have loaded content.
Later we will pull more meaningful information. For now, let's quickly
pull out some of the titles, this time using the tag names:
```
for tag in soup.find_all('title'):
print(tag.name, tag.text)
```
### Navigating and Exploring the Tree
To get a list of all the tags in the document, try something like this (using `True` to demonstrate the existence of each tag in the file). Here, note the use of the `limit` argument to return only 10 instances. We don't need the whole list here for the purposes of demonstration:
```
for tag in soup.find_all(True, limit=10):
print(tag.name)
```
Or, we could look for each of the attributes on the top-level `mods` tags. We can see that
they are stored in a dictionary-like object, which is indicated with curly braces `{}`):
```
for tag in soup.find_all('mods'):
print(tag.name, tag.attrs)
```
Finally, don't forget to close the file:
```
MODS_collection.close()
```
### Namespaces
In some cases, you may be working with data that has tags from various namespaces (that is,
basically different tag schemas, such as MODS or EAD). For specificity, it can be
important to have a list of the namespaces that you will reference. In this case,
that list is a python dictionary named `ns`:
```
ns = {
'mods' : 'http://www.loc.gov/mods/v3',
'ead3' : 'http://ead3.archivists.org/schema/',
}
```
## First Steps to Navigate the Tree Using LXML / XPath
Load records, list tags and child elements, see subelements, display tags and attributes...
## Examples
Here are some things we'll do using the BeautifulSoup library,
to be developed below in the notebook following:
1. Looking in a single XML file with multiple `<mods>` subelements, use a loop to count the elements.
1. Using a similar loop to the above, can you go through the individual `<mods>` and extract the record's identifier?
1. Let's get a bit deeper into MODS. These records use the `<titleInfo>` and `<title>` tags. Use the `.findall()` function to look into each of these and pull out the text of each `<title>` element.
1. The above pulls out all the titles, including related titles. Use the `.find()` funtion to search for just the first instance and pull out only the main titles.
1. These records contain `<subject>` designations, but only some of these correspond to headings that are authorized headings in the Library of Congress Subject Headings. Those are marked with an attribute `authority='lcsh'`, which is indicated as an embedded attribute in the tag. Look through `<subject>` tags, identify only the ones that include an LCSH attribute, then print the content of those subject headings.
1. Data addition or modification: identify the local call numbers, then check to make sure all of them have appropriate attribute data attached.
1. Data validation: check the local call number references to ensure that they are in the proper format (e.g., _lcwaAddddddd_).
1. Save the updated metadata. In this case, write the udpated metadata to a new file.
#### Counting Records in the Set
Activity 1: How many metadata records for discrete items are included in the set? A compound MODS file may create multiple records in one file; in this scenario, the tag `<mods>` encloses each individual record, and the list of records is enclosed in a `<modsCollection>` tag. Use a loop to count the `<mods>` elements.
```
#BS4
record_count = 0
with open('2018_lcwa_MODS_5.xml', 'r') as xml_records:
metadata = BeautifulSoup(xml_records, 'lxml')
for mods in metadata.find_all('mods'):
print(mods.name, mods.title)
record_count += 1
print(record_count)
```
#### Extract Item Identifiers
Activity 2: Each individual metadata record has at least one `<identifier>` element; this element is used to include a reference to the item, such as a URI, or another identifier that a system may use to locate an item. Using a loop similar to the example above, how would you print each record's identifier(s)?
```
#BS4
with open('2018_lcwa_MODS_5.xml', 'r') as xml_records:
metadata = BeautifulSoup(xml_records, 'lxml')
for mods in metadata.find_all('mods'):
for identifier in mods.find_all('identifier'):
print(identifier.name, identifier.text)
```
There are clearly different types of identifiers here, and when we check the identifier attributes,
it is clear that some of these will be more useful than others. Below, use the `.attrs` method to see the dictionary that each element carries:
```
#BS4
with open('2018_lcwa_MODS_5.xml', 'r') as xml_records:
metadata = BeautifulSoup(xml_records, 'lxml')
for mods in metadata.find_all('mods'):
for identifier in mods.find_all('identifier'):
print(identifier.name, identifier.text, identifier.attrs)
```
Since some of the elements do not have attributes, we need a try-except loop to look at each dictionary,
and to generate a "Blank" value for elements without attributes:
```
#BS4
with open('2018_lcwa_MODS_5.xml', 'r') as xml_records:
metadata = BeautifulSoup(xml_records, 'lxml')
for mods in metadata.find_all('mods'):
for identifier in mods.find_all('identifier'):
tag = identifier.name
content = identifier.text
try:
type_ = identifier.attrs['type']
except:
type_ = "Blank type"
print(tag, content, type_)
```
Finally, let's print a clean list of only the URI identifiers:
```
#BS4
with open('2018_lcwa_MODS_5.xml', 'r') as xml_records:
metadata = BeautifulSoup(xml_records, 'lxml')
for mods in metadata.find_all('mods'):
for identifier in mods.find_all('identifier', type="uri"):
print(identifier.attrs['type'], identifier.text)
```
Now, we could try that another way using the `lxml` XML library directly:
```
#lxml
xml_records = ET.parse('2018_lcwa_MODS_5.xml')
for identifier in xml_records.findall('.//mods:identifier', namespaces=ns):
element = identifier
print(element.tag, element.text, element.attrib)
```
And, filter to identify only the URI elements...
Note the commands are similar, but the process is not exactly the same. Some of the
methods to show elements, attributes, and other elements differ, and
the syntax for searching and navigating the XML tree is slightly different, too!
The display is also slightly different when looking at the element tags, since
the `lxml` parser is very specific about the "namespace" (in this case, that is,
the rules that are specifying what goes in the MODS record and how it is
structured) of each tag.
```
#lxml
xml_records = ET.parse('2018_lcwa_MODS_5.xml')
for identifier in xml_records.findall('.//mods:identifier', namespaces=ns):
element = identifier
attribs = element.attrib
type = attribs.get('type')
if type == 'uri':
print(element.tag, type, element.text)
```
#### Extract the record titles
Activity 3: Each individual metadata record has at least one `<title>` element; this element is used to identyify an item's title. Using a loop similar to the example above, how would you print each record's title(s)?
```
#BS4
with open('2018_lcwa_MODS_5.xml', 'r') as xml_records:
metadata = BeautifulSoup(xml_records, 'lxml')
for mods in metadata.find_all('mods'):
for title in mods.find_all('title'):
print(title.name, title.find_parent())
```
Note above that the `.find_parent()` method can be used to look "up" the tree,
in this case displaying the parent element of the `title` element.
A similar result can be produced using the `lxml` library directly. In this case,
the `.findall()` method is similar, but notice that the request can be given
using XPATH references and while specifying namespaces:
```
#lxml
xml_records = ET.parse('2018_lcwa_MODS_5.xml')
for titleInfo in xml_records.findall('.//mods:title', namespaces=ns):
element = titleInfo
print(element.text)
```
#### Extract only the Main Titles
Activity 4: Notice in the previous activity that even though we are working with only five
records, there are well more than five titles. Each of these records has multiple `title` elements,
some of which are for `relatedItem` elements. If we want only the main titles, use the `.find()` function
to search for only the first instance and print out only the main title. An alternative way to do this in `lxml`
is to use a more specific XPath selector.
```
#BS4
with open('2018_lcwa_MODS_5.xml', 'r') as xml_records:
metadata = BeautifulSoup(xml_records, 'lxml')
for mods in metadata.find_all('mods'):
title = mods.find('title')
print(title.name, title.text)
```
Try `lxml` to make a more specific XPath request. (Note: you can also use the `.find()` method in `lxml` to return only the first result.)
```
#lxml
xml_records = ET.parse('2018_lcwa_MODS_5.xml')
for title in xml_records.findall('.//mods:mods/mods:titleInfo/mods:title', namespaces=ns):
element = title
print(element.text, element.tag)
```
Above, the query specifically asks for the `title` elements that are direct
child elements of a `titleInfo` element, which is a child of the `mods` element.
This is necessary to filter out any `titleInfo` elements that are actually under a
`relatedItem` element. With less specificity, the query will return numerous elements
that "related" but not the title of the actual item:
#### Exploring the Subject Element
Activity 5: These records contain `<subject>` designations, but only some of these correspond to headings that are authorized headings in the Library of Congress Subject Headings (LCSH). Those are indicated with an attribute `authority='lcsh'`, which is indicated as an embedded attribute in the tag. Look through the `<subject>` tags and identify only the ones that include an LCSH attribute, then print the content of those subject headings.
Note: this activity requires using the twenty-five record set rather than the one with five records.
As LCSH headings are generally constructed as a main topic word, followed by descriptors that indicate further
topical, geographic, or chronological details, note that the structure is mimicked here, with `<topic>` and
various `<genre>`, `<geographic>`, or other specifiers.
```
#BS4
metadata = BeautifulSoup(open('2018_lcwa_MODS_25.xml'), 'lxml')
for mods in metadata.find_all('mods'):
for subject in mods.find_all('subject', authority="lcsh"):
print(subject, subject.attrs, '\n')
# demonstrating navigate in BS4 using dot notation (subject.topic) to go down in the tree
#BS4
metadata = BeautifulSoup(open('2018_lcwa_MODS_25.xml'), 'lxml')
for mods in metadata.find_all('mods'):
for subject in mods.find_all('subject', authority="lcsh"):
print(subject.topic)
```
Using `lxml`, we can use XPath queries. (Again, remember to use the 25-record file, not the 5-record file.)
```
#lxml
xml_records = ET.parse('2018_lcwa_MODS_25.xml')
for subject in xml_records.findall('.//mods:mods/mods:subject', namespaces=ns):
element = subject
print(element.tag, element.attrib)
```
Similarly, we can filter to view only those with `lcsh` subject authorities:
```
#lxml
xml_records = ET.parse('2018_lcwa_MODS_25.xml')
for subject in xml_records.findall('.//mods:mods/mods:subject', namespaces=ns):
if subject.attrib['authority'] == 'lcsh':
print(subject.tag, len(subject), subject.attrib)
```
You may notice that the lxml tools are more literal, in a sense,
meaning that they really only usually give you what you ask for. So,
for example, unlike using BeautifulSoup when we can ask for the contents
of a tag (that is, all of the text that is enclosed by the tag),
`lxml` treats the metadata more like data. In this case, the `subject` tags don't
strictly contain any actual text (that is, nothing the parser recognizes as
a string of characters), in fact they only contain more sublements, which then contain text.
The structure looks something like this:
```xml
<subject authority="lcsh">
<topic>Animals</topic>
<genre>Pictorial works</genre>
</subject>
```
So, as the `lxml` parser sees it, the "contents" of the `<subject>` tag
are two subelements: `<topic>` and `<genre>`.
To get the "text" or content of the element, we need to look at the attributes
(in this case, the authority type), and then to extract the subelements.
Only when the subelements are obtained can we retrieve their text.
First, look for the contents of the `subject` element, the list of its subelements,
here identified as the "child" elements since they are further "down" in the tree:
```
#lxml
xml_records = ET.parse('2018_lcwa_MODS_25.xml')
count = 0
for subject in xml_records.findall('.//mods:mods/mods:subject', namespaces=ns):
if subject.attrib['authority'] == 'lcsh':
count += 1
print(subject.tag, count, 'children:')
for subelement in subject:
print(' ',subelement.tag)
print('\n')
if count > 4:
break
```
Finally, to extract the actual subject terms, we can request the text of the subelements:
```
#lxml
xml_records = ET.parse('2018_lcwa_MODS_25.xml')
count = 0
for subject in xml_records.findall('.//mods:mods/mods:subject', namespaces=ns):
if subject.attrib['authority'] == 'lcsh':
count += 1
print(subject.tag, count, 'children:')
for subelement in subject:
print(' {} - {}'.format(subelement.tag, subelement.text))
print('\n')
if count > 4:
break
```
#### Data Addition or Modification
Activity 6: Now that we can request things in the tree, let's look for
more specific things, like content strings that meet certain criteria,
then add or modify content to enhance them.
Let's return to the `identifier` elements. Some of these are structured with
local call numbers, but those don't appear to be identified with any additional
attributes:
```xml
<identifier>lcwaN0010234</identifier>
<identifier invalid="yes" type="database id">85999</identifier>
<identifier invalid="yes" type="database id">109353</identifier>
```
Would it be possible to modify these and add a "type" attribute for those
local numbers? Let's start with BeautifulSoup:
```
#BS4
#BS4 - show the identifiers and their attributes
with open('2018_lcwa_MODS_5.xml', 'r') as xml_records:
metadata = BeautifulSoup(xml_records, 'lxml')
for mods in metadata.find_all('mods'):
for identifier in mods.find_all('identifier'):
print(identifier.name, identifier.text, identifier.attrs)
#BS4 - use regular expressions to identify the local identifiers or "call numbers"
with open('2018_lcwa_MODS_5.xml', 'r') as xml_records:
metadata = BeautifulSoup(xml_records, 'lxml')
# set up a regex pattern
call_num_pattern = re.compile(r'[a-z]{4}N\d{7}')
# alternatively, be more specific and look for the lcwa string at the beginning:
# call_num_pattern = re.compile(r'^blcwaN\d{7}')
for mods in metadata.find_all('mods'):
for identifier in mods.find_all('identifier'):
if re.match(call_num_pattern, identifier.text):
print(identifier.name, identifier.text, identifier.attrs)
#BS4
# now, add in new attributes for these "local call number" elements
#BS4 - use regular expressions to identify the local identifiers or "call numbers"
with open('2018_lcwa_MODS_5.xml', 'r') as xml_records:
metadata = BeautifulSoup(xml_records, 'lxml')
# set up a regex pattern
call_num_pattern = re.compile(r'[a-z]{4}N\d{7}')
# alternatively, be more specific and look for the lcwa string at the beginning:
# call_num_pattern = re.compile(r'^blcwaN\d{7}')
for mods in metadata.find_all('mods'):
for identifier in mods.find_all('identifier'):
if re.match(call_num_pattern, identifier.text):
# add attributes by assigning values
identifier['type'] = 'local_call_number'
identifier['invalid'] = 'no'
identifier['displaylabel'] = 'Local Call Number'
#print, to make sure that these were added
print(identifier.name, identifier.text, identifier.attrs)
#BS4 - check to make sure they look okay as XML
with open('2018_lcwa_MODS_5.xml', 'r') as xml_records:
metadata = BeautifulSoup(xml_records, 'lxml')
call_num_pattern = re.compile(r'[a-z]{4}N\d{7}')
for mods in metadata.find_all('mods'):
for identifier in mods.find_all('identifier'):
if re.match(call_num_pattern, identifier.text):
# add attributes by assigning values
identifier['type'] = 'local_call_number'
identifier['invalid'] = 'no'
identifier['displaylabel'] = 'Local Call Number'
print(identifier.prettify())
#lxml identify the local call number identifiers
xml_records = ET.parse('2018_lcwa_MODS_5.xml')
# regex pattern to identify the call number:
call_num_pattern = re.compile(r'[a-z]{4}N\d{7}')
for identifier in xml_records.findall('.//mods:mods/mods:identifier', namespaces=ns):
if re.match(call_num_pattern, identifier.text):
print(identifier.text)
#lxml insert attributes to make a more complete metadata record
xml_records = ET.parse('2018_lcwa_MODS_5.xml')
# regex pattern to identify the call number:
call_num_pattern = re.compile(r'[a-z]{4}N\d{7}')
for identifier in xml_records.findall('.//mods:mods/mods:identifier', namespaces=ns):
if re.match(call_num_pattern, identifier.text):
print(identifier.text)
identifier.attrib['displaylabel'] = 'Local Call Number'
identifier.attrib['invalid'] = 'no'
identifier.attrib['type'] = 'local_call_number'
print(' ',identifier.attrib)
```
#### Data validation - ADD new sample file TODO
7 - Data validation: check the reference IDs to ensure that they are in the proper format, then identify for correction as needed.
```
#lxml
#filter the IDs, then do a regex match?
xml_records = ET.parse('2018_lcwa_MODS_25.xml')
# previously we used a regex pattern to identify the call number:
# call_num_pattern = re.compile(r'[a-z]{4}N\d{7}')
# this time, be more specific and look for the lcwa string at the beginning:
call_num_pattern = re.compile(r'^blcwa[A-Z]{1}\d{7}')
for identifier in xml_records.findall('.//mods:mods/mods:identifier', namespaces=ns):
print(identifier.text)
if re.match(call_num_pattern, identifier.text):
print(identifier.tag, identifier.text, identifier.attrib)
```
#### Saving the Updated Metadata
Activity 8: Now, let's write the updated metadata to a new file.
```
#BS4 - write out to a new file...
newfile_name = '2018_lcwa_MODS_5_updated.xml'
with open('2018_lcwa_MODS_5.xml', 'r') as xml_records:
metadata = BeautifulSoup(xml_records, 'lxml')
call_num_pattern = re.compile(r'[a-z]{4}N\d{7}')
for mods in metadata.find_all('mods'):
for identifier in mods.find_all('identifier'):
if re.match(call_num_pattern, identifier.text):
# add attributes by assigning values
identifier['type'] = 'local_call_number'
identifier['invalid'] = 'no'
identifier['displaylabel'] = 'Local Call Number'
#new file
with open(newfile_name, 'w') as updated_records:
updated_records.write(metadata.prettify(formatter="minimal"))
print("Wrote a new file, you're welcome!")
```
This is the end of the activities section.
Materials below this cell are draft code blocks that were either modified or prepared
for interactive discussions in class.
############
OLD
#### Data Addition or Modification
Old Activity 6: Now that we can request things in the tree, let's look for
more specific things, like content strings that meet certain criteria,
then add or modify content to enhance them.
Above you may have noticed that some of the subject terms are blank.
Looking at the tree, it is clear that some were left to complete later:
```xml
<subject authority="lcsh">
<name authority="naf" type="corporate">
<namePart><!-- TODO: Insert name authority here (can be same as name authority above, under title). --></namePart>
</name>
</subject>
```
Let's find these and then replace the comments with different content:
======
These cells attempted to look for comments in the XML using the LXML parser:
```
#lxml
xml_records = ET.parse('2018_lcwa_MODS_25.xml')
count = 0
for subject in xml_records.findall('.//mods:mods/mods:subject', namespaces=ns):
if subject.attrib['authority'] == 'lcsh':
count += 1
print(subject.tag, count, 'children:')
for subelement in subject:
print(' {} - {}'.format(subelement.tag, subelement.text))
print('\n')
if count > 4:
break
#lxml
xml_records = ET.parse('2018_lcwa_MODS_25.xml')
count = 0
for subject in xml_records.findall('.//mods:mods/mods:subject', namespaces=ns):
if subject.attrib['authority'] == 'lcsh':
count += 1
print(subject.tag, count, 'children:')
for subelement in subject:
print(' {} - {}'.format(subelement.tag, subelement.text))
for subsubelement in subelement:
print(subsubelement.tag, subsubelement.text)
print('\n')
if count > 4:
break
```
| github_jupyter |
<a href="https://colab.research.google.com/github/ashishpatel26/100-Days-Of-ML-Code/blob/master/Tensorflow_Basic_Chapter_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Basic Perceptron
```
import tensorflow as tf
print(tf.__version__)
W = tf.Variable(tf.ones(shape=(2,2)), name='w')
b = tf.Variable(tf.zeros(shape=(2)), name='b')
@tf.function
def model(x):
return W * x + b
out_a = model([1, 0])
print(out_a)
```
## Tensorflow 2.0 Code First Example
```
import tensorflow as tf
from tensorflow import keras
NB_CLASSES = 10
RESHAPED = 784
model = tf.keras.Sequential()
model.add(keras.layers.Dense(NB_CLASSES, input_shape = (RESHAPED,), kernel_initializer='zeros', name='Dense_layer', activation='softmax'))
model.summary()
```
## MNIST Example with tf2.2.0
```
import tensorflow as tf
from tensorflow import keras
import numpy as np
# Network and Training Parameter
EPOCH = 200
BATCH_SIZE = 128
VERBOSE = 1
NB_CLASSES = 10 # number of output = number of digit
N_HIDDEN = 128
VALIDATION_SPLIT = 0.2 # how much TRAIN is reserved for VALIDATION
### Loading MNIST Dataset which containing 60000 training and 10000 testing example.
mnist = keras.datasets.mnist
(X_train,Y_train), (X_test, Y_test) = mnist.load_data()
# X train is 60000 rows of 28 X 28 values ==> we are doing reshape into 60000 X 784
RESHAPED = 784
X_train = X_train.reshape(60000, RESHAPED)
X_test = X_test.reshape(10000, RESHAPED)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = X_train/255
X_test = X_test/255
print(X_train.shape[0],'Train Samples')
print(X_test.shape[0],'Test Samples')
Y_train = tf.keras.utils.to_categorical(Y_train, NB_CLASSES)
Y_test = tf.keras.utils.to_categorical(Y_test, NB_CLASSES)
# Build a model
model = tf.keras.models.Sequential()
model.add(keras.layers.Dense(NB_CLASSES,
input_shape = (RESHAPED, ),
name = 'dense_layer',
activation = 'softmax'))
#compile the modela and apply backpropagation
model.compile(optimizer='SGD',
loss='categorical_crossentropy',
metrics = ['accuracy'])
#model summary
model.summary()
model.fit(X_train,Y_train,
batch_size = BATCH_SIZE,
epochs = EPOCH,
verbose=VERBOSE, validation_split = VALIDATION_SPLIT)
# evaluate the model
test_loss, test_acc = model.evaluate(X_test, Y_test)
print('Test Accuracy:', test_acc)
```
| github_jupyter |
```
import azureml.core
from azureml.core import Workspace
ws = Workspace.from_config()
# Get the default datastore
default_ds = ws.get_default_datastore()
default_ds.upload_files(files=['./Data/borrower.csv', './Data/loan.csv'], # Upload the diabetes csv files in /data
target_path='creditrisk-data/', # Put it in a folder path in the datastore
overwrite=True, # Replace existing files of the same name
show_progress=True)
#Create a Tabular dataset from the path on the datastore
from azureml.core import Dataset
tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'creditrisk-data/borrower.csv'))
tab_data_set = tab_data_set.register(workspace=ws,
name='BorrowerData',
description='Borrower Data',
tags = {'format':'CSV'},
create_new_version=True)
#Create a Tabular dataset from the path on the datastore
tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'creditrisk-data/loan.csv'))
tab_data_set = tab_data_set.register(workspace=ws,
name='LoanData',
description='Loans Data',
tags = {'format':'CSV'},
create_new_version=True)
from azureml.core import Workspace, Dataset, Datastore, ScriptRunConfig, Experiment
from azureml.data.data_reference import DataReference
import os
import azureml.dataprep as dprep
import pandas as pd
import numpy as np
import azureml.core
from azureml.core import Workspace
ws = Workspace.from_config()
borrowerData = Dataset.get_by_name(ws, name='BorrowerData')
loanData = Dataset.get_by_name(ws, name='LoanData')
from azureml.core import Datastore
from azureml.core.compute import AmlCompute, ComputeTarget
datastore = ws.get_default_datastore()
# Create a compute cluster
compute_name = 'cpu-cluster'
if not compute_name in ws.compute_targets :
print('creating a new compute target...')
provisioning_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS2_V2',
min_nodes=0,
max_nodes=1)
compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)
compute_target.wait_for_completion(
show_output=True, min_node_count=None, timeout_in_minutes=20)
# Show the result
print(compute_target.get_status().serialize())
compute_target = ws.compute_targets[compute_name]
from azureml.core.runconfig import RunConfiguration
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies
# Create a Python environment for the experiment
creditrisk_env = Environment("creditrisk-pipeline-env")
creditrisk_env.python.user_managed_dependencies = False # Let Azure ML manage dependencies
creditrisk_env.docker.enabled = True # Use a docker container
# Create a set of package dependencies
creditrisk_packages = CondaDependencies.create(conda_packages=['scikit-learn','joblib','pandas','numpy','pip'],
pip_packages=['azureml-defaults','azureml-dataprep[pandas]'])
# Add the dependencies to the environment
creditrisk_env.python.conda_dependencies = creditrisk_packages
# Register the environment
creditrisk_env.register(workspace=ws)
registered_env = Environment.get(ws, 'creditrisk-pipeline-env')
# Create a new runconfig object for the pipeline
aml_run_config = RunConfiguration()
# Use the compute you created above.
aml_run_config.target = compute_target
# Assign the environment to the run configuration
aml_run_config.environment = registered_env
print ("Run configuration created.")
%%writefile PrepareData.py
from azureml.core import Run
import pandas as pd
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--prepared_data', dest='prepared_data', required=True)
args = parser.parse_args()
borrowerData = Run.get_context().input_datasets['BorrowerData']
loanData = Run.get_context().input_datasets['LoanData']
df_borrower = borrowerData.to_pandas_dataframe()
df_loan = loanData.to_pandas_dataframe()
# Join data and do some transformations
df_data = df_borrower.merge(df_loan,on='memberId',how='inner')
df_data.shape
df_data['homeOwnership'] = df_data['homeOwnership'].replace('nan', np.nan).fillna(0)
df_data['isJointApplication'] = df_data['isJointApplication'].replace('nan', np.nan).fillna(0)
drop_cols = ['memberId', 'loanId', 'date','grade','residentialState']
df_data = df_data.drop(drop_cols, axis=1)
df_data['loanStatus'] = np.where(df_data['loanStatus'] == 'Default', 1, 0) # change label column to 0/1
df_data.to_csv(os.path.join(args.prepared_data,"prepared_data.csv"),index=False)
print(f"Wrote prepped data to {args.prepared_data}/prepared_data.csv")
from azureml.data import OutputFileDatasetConfig
from azureml.pipeline.steps import PythonScriptStep
prepared_data = OutputFileDatasetConfig(name="prepared_data")
dataprep_step = PythonScriptStep(
name="PrepareData",
script_name="PrepareData.py",
compute_target=compute_target,
runconfig=aml_run_config,
arguments=["--prepared_data", prepared_data],
inputs=[borrowerData.as_named_input('BorrowerData'),loanData.as_named_input('LoanData')],
allow_reuse=True
)
# prepared_data = prepared_data_path.read_delimited_files()
%%writefile TrainTestDataSplit.py
from azureml.core import Run
import pandas as pd
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--prepared_data', dest='prepared_data', required=True)
parser.add_argument('--train_data', dest='train_data', required=True)
parser.add_argument('--test_data', dest='test_data', required=True)
args = parser.parse_args()
df_data = pd.read_csv(args.prepared_data + '/prepared_data.csv')
df_train=df_data.sample(frac=0.8,random_state=200) #random state is a seed value
df_train=df_data.drop(df_train.index)
df_train.to_csv(os.path.join(args.train_data,"train_data.csv"),index=False)
df_train.to_csv(os.path.join(args.test_data,"test_data.csv"),index=False)
print(f"Wrote prepped data to {args.train_data}/train_data.csv")
print(f"Wrote prepped data to {args.test_data}/test_data.csv")
# test train split the data
train_data = OutputFileDatasetConfig(name="train_data")
test_data = OutputFileDatasetConfig(name="test_data")
test_train_step = PythonScriptStep(name = "TestTrainDataSplit",
script_name ="TrainTestDataSplit.py",
arguments = ["--prepared_data", prepared_data.as_input(),
"--train_data", train_data,
"--test_data", test_data],
outputs = [train_data,test_data],
compute_target = compute_target,
runconfig = aml_run_config,
allow_reuse = True
)
training_data = train_data.read_delimited_files()
training_data
testing_data = test_data.read_delimited_files()
testing_data
%%writefile TrainModel.py
from azureml.core import Run
from azureml.core.model import Model
import joblib
import pandas as pd
import numpy as np
import argparse
from sklearn.linear_model import LogisticRegression
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer
def creditrisk_onehot_encoder(df_data):
catColumns = df_data.select_dtypes(['object']).columns
df_data[catColumns] = df_data[catColumns].fillna(value='Unknown')
df_data = df_data.fillna(df_data.mean())
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols= pd.DataFrame(OH_encoder.fit_transform(df_data[catColumns]),columns = list(OH_encoder.get_feature_names(catColumns)))
# Remove categorical columns (will replace with one-hot encoding)
numeric_cols = df_data.drop(catColumns, axis=1)
# Add one-hot encoded columns to numerical features
df_result = pd.concat([numeric_cols, OH_cols], axis=1)
# impute missing numeric values with mean
fill_NaN = SimpleImputer(missing_values=np.nan, strategy='mean')
imputed_df = pd.DataFrame(fill_NaN.fit_transform(df_result))
imputed_df.columns = df_result.columns
imputed_df.index = df_result.index
df_result = imputed_df
return(df_result)
# Get the experiment run context
run = Run.get_context()
parser = argparse.ArgumentParser()
parser.add_argument('--train_data', dest='train_data', required=True)
parser.add_argument('--test_data', dest='test_data', required=True)
parser.add_argument('--metrics_data', dest='metrics_data', required=True)
parser.add_argument('--model_data', dest='model_data', required=True)
args = parser.parse_args()
df_train = pd.read_csv(args.train_data + '/train_data.csv')
df_test = pd.read_csv(args.test_data + '/test_data.csv')
df_train = creditrisk_onehot_encoder(df_train)
df_test = creditrisk_onehot_encoder(df_test)
cols = [col for col in df_train.columns if col not in ["loanStatus"]]
clf = LogisticRegression()
clf.fit(df_train[cols].values, df_train["loanStatus"].values)
print('predicting ...')
y_hat = clf.predict(df_test[cols].astype(int).values)
acc = np.average(y_hat == df_test["loanStatus"].values)
print('Accuracy is', acc)
print("save model")
os.makedirs('models', exist_ok=True)
joblib.dump(value=clf, filename= 'models/creditrisk_model.pkl')
model = Model.register(model_path = 'models/creditrisk_model.pkl',
model_name = 'creditrisk_model',
description = 'creditrisk model',
workspace = run.experiment.workspace,
properties={'Accuracy': np.float(acc)})
modeldata = []
modeldata.append(('models/creditrisk_model.pkl','creditrisk_model'))
df_model = pd.DataFrame(modeldata, columns=('modelfile', 'model_name'))
metricsdata = []
metricsdata.append(('Accuracy',acc))
df_metrics = pd.DataFrame(metricsdata, columns=('Metric', 'Value'))
df_model.to_csv(os.path.join(args.model_data,"model_data.csv"),index=False)
df_metrics.to_csv(os.path.join(args.metrics_data,"metrics_data.csv"),index=False)
print(f"Wrote model data to {args.model_data}/model_data.csv")
print(f"Wrote metrics data to {args.metrics_data}/metrics_data.csv")
# train the model
model_data = OutputFileDatasetConfig(name="model_data")
metrics_data = OutputFileDatasetConfig(name="metrics_data")
train_step = PythonScriptStep(name = "TrainModel",
script_name ="TrainModel.py",
arguments = ["--train_data", train_data.as_input(),
"--test_data", test_data.as_input(),
"--model_data", model_data,
"--metrics_data", metrics_data],
outputs = [model_data,metrics_data],
compute_target = compute_target,
runconfig = aml_run_config,
allow_reuse = True
)
%%writefile BatchInference.py
from azureml.core import Run
from azureml.core.model import Model
import joblib
import pandas as pd
import numpy as np
import argparse
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer
def creditrisk_onehot_encoder(df_data):
catColumns = df_data.select_dtypes(['object']).columns
df_data[catColumns] = df_data[catColumns].fillna(value='Unknown')
df_data = df_data.fillna(df_data.mean())
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols= pd.DataFrame(OH_encoder.fit_transform(df_data[catColumns]),columns = list(OH_encoder.get_feature_names(catColumns)))
# Remove categorical columns (will replace with one-hot encoding)
numeric_cols = df_data.drop(catColumns, axis=1)
# Add one-hot encoded columns to numerical features
df_result = pd.concat([numeric_cols, OH_cols], axis=1)
# impute missing numeric values with mean
fill_NaN = SimpleImputer(missing_values=np.nan, strategy='mean')
imputed_df = pd.DataFrame(fill_NaN.fit_transform(df_result))
imputed_df.columns = df_result.columns
imputed_df.index = df_result.index
df_result = imputed_df
return(df_result)
parser = argparse.ArgumentParser()
parser.add_argument('--test_data', dest="test_data", type=str, required=True)
parser.add_argument('--model_data', dest="model_data", type=str, required=True)
parser.add_argument('--batchinfer_data', dest='batchinfer_data', required=True)
args = parser.parse_args()
# Get the experiment run context
run = Run.get_context()
df_model = pd.read_csv(args.model_data + '/model_data.csv')
# model_path = Model.get_model_path(model_name = 'best_model_data')
model_name = df_model['model_name'][0]
model_path = Model.get_model_path(model_name=model_name, _workspace=run.experiment.workspace)
model = joblib.load(model_path)
df_test = pd.read_csv(args.test_data + '/test_data.csv')
df_test = creditrisk_onehot_encoder(df_test)
x_test = df_test.drop(['loanStatus'], axis=1)
y_predict = model.predict(x_test)
df_test['Prediction'] = y_predict
df_test.to_csv(os.path.join(args.batchinfer_data,"batchinfer_data.csv"),index=False)
print(f"Wrote prediction data with to {args.batchinfer_data}/batchinfer_data.csv")
from azureml.data import OutputFileDatasetConfig
from azureml.pipeline.steps import PythonScriptStep
batchinfer_data = OutputFileDatasetConfig(name="batchinfer_data").register_on_complete(name="CreditRiskBatchInferenceData",description = 'Batch Inference Data Output')
batchinfer_step = PythonScriptStep(
name="RunBatchInference",
script_name="BatchInference.py",
compute_target=compute_target,
runconfig=aml_run_config,
arguments=["--test_data", test_data.as_input(),"--model_data", model_data.as_input(),"--batchinfer_data", batchinfer_data],
outputs = [batchinfer_data],
allow_reuse=True
)
from azureml.pipeline.core import Pipeline
from azureml.core import Experiment
pipeline = Pipeline(ws, [dataprep_step, test_train_step, train_step,batchinfer_step])
experiment = Experiment(workspace=ws, name='CreditRiskPipeline')
run = experiment.submit(pipeline, show_output=True)
run.wait_for_completion()
```
| github_jupyter |
```
import os, sys
from glob import glob
sys.path.append("../")
sys.path.append('/Users/hongwan/GitHub/DarkHistory/')
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from scipy.interpolate import interp1d, RegularGridInterpolator
from tqdm import *
import darkhistory.physics as phys
import darkhistory.history.tla_DP_DM_heating as tla
import pickle
from scipy.integrate import quad
from grf.grf import TransitionProbabilities, PerturbedProbability, FIRAS
from grf.units import *
from grf.pk_interp import PowerSpectrumGridInterpolator
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('retina')
%matplotlib inline
%load_ext autoreload
%autoreload 2
# Load plot style
from plot_params import params
pylab.rcParams.update(params)
cols_default = plt.rcParams['axes.prop_cycle'].by_key()['color']
# Non-linear matter power spectrum.
pspec_nonlin_matter = PowerSpectrumGridInterpolator("nonlin_matter")
# Non-linear baryon power spectrum from simulations, lower limit.
pspec_nonlin_baryon_lower = PowerSpectrumGridInterpolator("franken_lower")
# Non-linear baryon power spectrum from simulations, upper limit.
pspec_nonlin_baryon_upper = PowerSpectrumGridInterpolator("franken_upper")
# Class containing results with non-linear matter power spectrum.
firas_nonlin = FIRAS(pspec_nonlin_matter)
# Class containing results with non-linear baryon power spectrum from simulations, lower limit.
firas_nonlin_baryon_low = FIRAS(pspec_nonlin_baryon_lower)
# Class containing results with non-linear baryon power spectrum from simulations, upper limit.
firas_nonlin_baryon_upp = FIRAS(pspec_nonlin_baryon_upper)
# Analytic PDF.
log_analytic_pdf_interp = pickle.load(open('../data/analytic_pdf_grids/log_analytic_pdf_interp.npy', 'rb'))
def xe_reion_func(rs):
# Parameters of the model.
f = 1. + phys.chi
delta_z = 0.7
delta_eta = 1.5*rs**0.5*delta_z
z_re = 8.8
eta = rs**(3/2)
eta_re = (1 + z_re)**(3/2)
reion_model_xe = (f/2)*(1 + np.tanh((eta_re - eta)/delta_eta))
return reion_model_xe
m_Ap = 1e-12 / 1e3
rs_vec = np.flipud(np.arange(20, 1600, 0.01))
soln_ary = []
soln_homo_ary = []
hmax = 0.1
def inj_rate(rs):
return phys.rho_DM * rs**3 / np.abs(phys.dtdz(rs))
for eps in tqdm_notebook([5e-13, 1e-13, 3e-14, 2e-14]):
homo_resonance_info = firas_nonlin_baryon_low.P_tot(
[m_Ap], eps, m_Ap, evolve_z=False
)
z_res = homo_resonance_info[0][0]
P_res = firas_nonlin_baryon_low.P_trans(
m_Ap, z_res, m_Ap, eps, evolve_z=False
)
print(z_res, P_res)
def f_heating_homo(rs, xHI, xHeI, xHeII):
sigma = 0.5
dP_dz = 0.
for z,P in zip(z_res, P_res):
dP_dz += P/np.sqrt(2*np.pi*sigma**2) * np.exp(-(rs - (1. + z))**2 / (2*sigma**2))
return dP_dz
res_homo = tla.get_history(
rs_vec, f_heating=f_heating_homo, injection_rate=inj_rate,
reion_switch=True, xe_reion_func=xe_reion_func, reion_rs=13, hmax=0.2,
coll_ion=True
)
soln_homo_ary.append(res_homo)
def f_heating(rs, xHI, xHeI, xHeII):
dP_dz = firas_nonlin_baryon_low._dP_dz(
[rs-1.], m_Ap, k_min=1e-4, k_max=1e4, omega=[m_Ap],
x_e_ary=np.array([1. - xHI]),
pdf = 'lognormal', one_plus_delta_bound=1e2,
Ap_DM = True, eng_weight=True
)[0][0][0] * eps**2
# print(rs, dP_dz)
return dP_dz
res = tla.get_history(
rs_vec, f_heating=f_heating, injection_rate=inj_rate,
reion_switch=True, xe_reion_func=xe_reion_func, reion_rs=13, hmax=hmax,
coll_ion=True
)
soln_ary.append(res)
std = tla.get_history(rs_vec, reion_switch=True, xe_reion_func=xe_reion_func, heat_switch=False, reion_rs=13)
std_optical_depth = phys.get_optical_depth(np.flipud(rs_vec), np.flipud(std[:,1]))
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1.loglog()
plt_labels=[
r'$\epsilon = 5 \times 10^{-13}$', r'$\epsilon = 10^{-13}$',
r'$\epsilon = 3 \times 10^{-14}$', r'$\epsilon = 2 \times 10^{-14}$'
]
for soln,label in zip(soln_homo_ary, plt_labels):
ax1.plot(rs_vec, soln[:,0]/phys.kB, label=label)
ax1.plot(rs_vec, std[:,0]/phys.kB, 'k:', label=r'$\Lambda$CDM')
ax1.set_ylabel(r'$T_K$ [K]')
ax1.axis([20, 200, 1e3, 2e4])
ax1.set_title(r'\textbf{Homogeneous Conversion}')
ax2.loglog()
for soln,label in zip(soln_homo_ary, plt_labels):
ax2.plot(rs_vec, soln[:,1], label=label)
print(phys.get_optical_depth(np.flipud(rs_vec), np.flipud(soln[:,1])) - std_optical_depth)
ax2.plot(rs_vec, std[:,1], 'k:', label=r'$\Lambda$CDM')
ax2.set_xlabel(r'$1+z$')
ax2.set_ylabel(r'$x_e$')
ax2.legend(fontsize=12)
ax2.axis([20, 200, 1e-4, 2])
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1.loglog()
plt_labels=[
r'$\epsilon = 5 \times 10^{-13}$', r'$\epsilon = 10^{-13}$',
r'$\epsilon = 3 \times 10^{-14}$', r'$\epsilon = 2 \times 10^{-14}$'
]
for soln,label in zip(soln_ary, plt_labels):
ax1.plot(rs_vec, soln[:,0]/phys.kB, label=label)
ax1.set_title(r'\textbf{Conversion with Perturbations}')
ax1.plot(rs_vec, std[:,0]/phys.kB, 'k--', label=r'$\Lambda$CDM')
ax1.set_ylabel(r'$T_K$ [K]')
ax1.axis([20, 200, 1, 1e5])
ax2.loglog()
for soln,label in zip(soln_ary, plt_labels):
ax2.plot(rs_vec, soln[:,1], label=label)
print(phys.get_optical_depth(np.flipud(rs_vec), np.flipud(soln[:,1])) - std_optical_depth)
ax2.plot(rs_vec, std[:,1], 'k:', label=r'$\Lambda$CDM')
plt.xlabel(r'$1+z$')
plt.ylabel(r'$x_e$')
plt.legend(fontsize=12, loc='upper right')
ax2.axis([20, 200, 1e-4, 2])
plt.figure()
ax2 = plt.gca()
ax2.loglog()
for soln in soln_homo_ary:
ax2.plot(rs_vec, soln[:,1])
print(phys.get_optical_depth(np.flipud(rs_vec), np.flipud(soln[:,1])) - std_optical_depth)
ax2.plot(rs_vec, std[:,1], 'k:')
plt.xlabel(r'$1+z$')
plt.ylabel(r'$x_e$')
ax2.axis([20, 1600, 1e-4, 2])
plt.figure()
ax2 = plt.gca()
ax2.loglog()
for soln in soln_homo_ary:
ax2.plot(rs_vec, soln[:,0]/phys.kB)
ax2.plot(rs_vec, std[:,0]/phys.kB, 'k:')
plt.xlabel(r'$1+z$')
plt.ylabel(r'$T_K$ [K]')
ax2.axis([20, 1600, 1, 1e5])
plt.figure()
ax2 = plt.gca()
ax2.loglog()
for soln in soln_ary:
ax2.plot(rs_vec, soln[:,1])
print(phys.get_optical_depth(np.flipud(rs_vec), np.flipud(soln[:,1])) - std_optical_depth)
ax2.plot(rs_vec, std[:,1], 'k:')
plt.xlabel(r'$1+z$')
plt.ylabel(r'$x_e$')
ax2.axis([20, 1600, 1e-4, 2])
plt.figure()
ax2 = plt.gca()
ax2.loglog()
for soln in soln_ary:
ax2.plot(rs_vec, soln[:,0]/phys.kB)
ax2.plot(rs_vec, std[:,0]/phys.kB, 'k:')
plt.xlabel(r'$1+z$')
plt.ylabel(r'$T_K$ [K]')
ax2.axis([20, 1600, 1, 1e5])
```
| github_jupyter |
```
from google.colab import drive
drive.mount('/content/gdrive')
import pandas as pd
import glob
import datetime as dt
import multiprocessing as mp
from datetime import datetime
import numpy as np
import plotly
from pandas import Series
import sys
from scipy import stats
from statsmodels.tsa.stattools import adfuller
from tqdm import tqdm, tqdm_notebook
import time
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve, auc
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
!pip install pyfolio
import pyfolio as pf
from scipy.stats import weightedtau
import os
# For sending GET requests from the API
import requests
cd gdrive/My Drive/TFM/
```
#**QUICK LOAD**
```
# raw trade data from https://public.bitmex.com/?prefix=data/trade/
Dollar_bars = pd.DataFrame()
for i,file in enumerate(glob.glob("data/bars/new_features/*.csv")):
if i == 0:
Dollar_bars = Dollar_bars.append(pd.read_csv(file))
print('Percentge of files already Loaded:',round((i/len(glob.glob("data/bars/new_features/*.csv")))*100,1), '%. There are', len(glob.glob("data/bars/new_features/*.csv"))-i,
"files left", end='')
else:
Dollar_bars = Dollar_bars.append(pd.read_csv(file))
print('\r Percentge of files already Loaded:',round((i/len(glob.glob("data/bars/new_features/*.csv")))*100,1), '%. There are', len(glob.glob("data/bars/new_features/*.csv"))-i,
"files left",end='', flush=True)
Dollar_bars['timestamp'] = Dollar_bars.timestamp.map(lambda t: datetime.strptime(t, "%Y-%m-%d %H:%M:%S.%f"))
Dollar_bars.set_index('timestamp', inplace=True)
Dollar_bars
# raw trade data from https://public.bitmex.com/?prefix=data/trade/
Dollar_bars1 = pd.DataFrame()
for i,file in enumerate(glob.glob("data/bars/*.csv")):
if i == 0:
Dollar_bars1 = Dollar_bars1.append(pd.read_csv(file))
print('Percentge of files already Loaded:',round((i/len(glob.glob("data/bars/*.csv")))*100,1), '%. There are', len(glob.glob("data/bars/*.csv"))-i,
"files left", end='')
else:
Dollar_bars = Dollar_bars1.append(pd.read_csv(file))
print('\r Percentge of files already Loaded:',round((i/len(glob.glob("data/bars/*.csv")))*100,1), '%. There are', len(glob.glob("data/bars/*.csv"))-i,
"files left",end='', flush=True)
Dollar_bars1['timestamp'] = Dollar_bars1.timestamp.map(lambda t: datetime.strptime(t, "%Y-%m-%d %H:%M:%S.%f"))
Dollar_bars1.set_index('timestamp', inplace=True)
Dollar_bars1.drop(columns=['timestamp.1','foreignNotional','grossValue','homeNotional'], inplace = True)
Dollar_bars1
from plotly.subplots import make_subplots
import plotly.graph_objects as go
fig = make_subplots(rows=1, cols=1,specs=[[{"secondary_y": True}]])
fig.add_trace(go.Scatter(
x=Dollar_bars1.index,
y=Dollar_bars1['close'],
name="ETH closing price",
mode = 'lines',
textfont_family="Arial_Black"),
row= 1 ,
col= 1 )
fig.update_layout(
legend=dict(
x=0.0,
y=0.98,
traceorder="normal",
font=dict(
family="sans-serif",
size=12,
color="black"
),
)
)
Dollar_bars1['timestamp'] = Dollar_bars1.index
Dollar_bars1.index = Dollar_bars1['timestamp']
Dollar_bars1
```
# classes and methods definitions
```
class MultiProcessingFunctions:
""" This static functions in this class enable multi-processing"""
def __init__(self):
pass
@staticmethod
def lin_parts(num_atoms, num_threads):
""" This function partitions a list of atoms in subsets (molecules) of equal size.
An atom is a set of indivisible set of tasks.
"""
# partition of atoms with a single loop
parts = np.linspace(0, num_atoms, min(num_threads, num_atoms) + 1)
parts = np.ceil(parts).astype(int)
return parts
@staticmethod
def nested_parts(num_atoms, num_threads, upper_triangle=False):
""" This function enables parallelization of nested loops.
"""
# partition of atoms with an inner loop
parts = []
num_threads_ = min(num_threads, num_atoms)
for num in range(num_threads_):
part = 1 + 4 * (parts[-1] ** 2 + parts[-1] + num_atoms * (num_atoms + 1.) / num_threads_)
part = (-1 + part ** .5) / 2.
parts.append(part)
parts = np.round(parts).astype(int)
if upper_triangle: # the first rows are heaviest
parts = np.cumsum(np.diff(parts)[::-1])
parts = np.append(np.array([0]), parts)
return parts
@staticmethod
def mp_pandas_obj(func, pd_obj, num_threads=24, mp_batches=1, lin_mols=True, **kargs):
"""
:param func: (string) function to be parallelized
:param pd_obj: (vector) Element 0, is name of argument used to pass the molecule;
Element 1, is the list of atoms to be grouped into a molecule
:param num_threads: (int) number of threads
:param mp_batches: (int) number of batches
:param lin_mols: (bool) Tells if the method should use linear or nested partitioning
:param kargs: (var args)
:return: (data frame) of results
"""
if lin_mols:
parts = MultiProcessingFunctions.lin_parts(len(pd_obj[1]), num_threads * mp_batches)
else:
parts = MultiProcessingFunctions.nested_parts(len(pd_obj[1]), num_threads * mp_batches)
jobs = []
for i in range(1, len(parts)):
job = {pd_obj[0]: pd_obj[1][parts[i - 1]:parts[i]], 'func': func}
job.update(kargs)
jobs.append(job)
if num_threads == 1:
out = MultiProcessingFunctions.process_jobs_(jobs)
else:
out = MultiProcessingFunctions.process_jobs(jobs, num_threads=num_threads)
if isinstance(out[0], pd.DataFrame):
df0 = pd.DataFrame()
elif isinstance(out[0], pd.Series):
df0 = pd.Series()
else:
return out
for i in out:
df0 = df0.append(i)
df0 = df0.sort_index()
return df0
@staticmethod
def process_jobs_(jobs):
""" Run jobs sequentially, for debugging """
out = []
for job in jobs:
out_ = MultiProcessingFunctions.expand_call(job)
out.append(out_)
return out
@staticmethod
def expand_call(kargs):
""" Expand the arguments of a callback function, kargs['func'] """
func = kargs['func']
del kargs['func']
out = func(**kargs)
return out
@staticmethod
def report_progress(job_num, num_jobs, time0, task):
# Report progress as asynch jobs are completed
msg = [float(job_num) / num_jobs, (time.time() - time0)/60.]
msg.append(msg[1] * (1/msg[0] - 1))
time_stamp = str(dt.datetime.fromtimestamp(time.time()))
msg = time_stamp + ' ' + str(round(msg[0]*100, 2)) + '% '+task+' done after ' + \
str(round(msg[1], 2)) + ' minutes. Remaining ' + str(round(msg[2], 2)) + ' minutes.'
if job_num < num_jobs:
sys.stderr.write(msg+'\r')
else:
sys.stderr.write(msg+'\n')
return
@staticmethod
def process_jobs(jobs, task=None, num_threads=24):
""" Run in parallel. jobs must contain a 'func' callback, for expand_call"""
if task is None:
task = jobs[0]['func'].__name__
pool = mp.Pool(processes=num_threads)
# outputs, out, time0 = pool.imap_unordered(MultiProcessingFunctions.expand_call,jobs),[],time.time()
outputs = pool.imap_unordered(MultiProcessingFunctions.expand_call, jobs)
out = []
time0 = time.time()
# Process asyn output, report progress
for i, out_ in enumerate(outputs, 1):
out.append(out_)
MultiProcessingFunctions.report_progress(i, len(jobs), time0, task)
pool.close()
pool.join() # this is needed to prevent memory leaks
return out
def relative_strength_index(df, n):
"""Calculate Relative Strength Index(RSI) for given data.
https://github.com/Crypto-toolbox/pandas-technical-indicators/blob/master/technical_indicators.py
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
i = 0
UpI = [0]
DoI = [0]
while i + 1 <= df.index[-1]:
UpMove = df.loc[i + 1, 'high'] - df.loc[i, 'high']
DoMove = df.loc[i, 'low'] - df.loc[i + 1, 'low']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(UpI.ewm(span=n, min_periods=n).mean())
NegDI = pd.Series(DoI.ewm(span=n, min_periods=n).mean())
RSI = pd.Series(round(PosDI * 100. / (PosDI + NegDI)), name='RSI_' + str(n))
# df = df.join(RSI)
return RSI
#SNIPPET 3.1 DAILY VOLATILITY ESTIMATES
def get_daily_vol(close, lookback=100):
"""
:param close: (data frame) Closing prices
:param lookback: (int) lookback period to compute volatility
:return: (series) of daily volatility value
"""
print('Calculating daily volatility for dynamic thresholds')
df0 = close.index.searchsorted(close.index - pd.Timedelta(days=1))
df0 = df0[df0 > 0]
df0 = (pd.Series(close.index[df0 - 1], index=close.index[close.shape[0] - df0.shape[0]:]))
df0 = close.loc[df0.index] / close.loc[df0.values].values - 1 # daily returns
df0 = df0.ewm(span=lookback).std()
return df0
def get_t_events(raw_price, threshold):
"""
:param raw_price: (series) of close prices.
:param threshold: (float) when the abs(change) is larger than the threshold, the
function captures it as an event.
:return: (datetime index vector) vector of datetimes when the events occurred. This is used later to sample.
"""
print('Applying Symmetric CUSUM filter.')
t_events = []
s_pos = 0
s_neg = 0
# log returns
diff = np.log(raw_price).diff().dropna()
# Get event time stamps for the entire series
for i in tqdm(diff.index[1:]):
pos = float(s_pos + diff.loc[i])
neg = float(s_neg + diff.loc[i])
s_pos = max(0.0, pos)
s_neg = min(0.0, neg)
if s_neg < -threshold:
s_neg = 0
t_events.append(i)
elif s_pos > threshold:
s_pos = 0
t_events.append(i)
event_timestamps = pd.DatetimeIndex(t_events)
return event_timestamps
# SNIPPET 3.4 ADDING A VERTICAL BARRIER
def add_vertical_barrier(t_events, close, num_days=5):
"""
:param t_events: (series) series of events (symmetric CUSUM filter)
:param close: (series) close prices
:param num_days: (int) maximum number of days a trade can be active
:return: (series) timestamps of vertical barriers
"""
t1 = close.index.searchsorted(t_events + pd.Timedelta(days=num_days))
t1 = t1[t1 < close.shape[0]]
t1 = pd.Series(close.index[t1], index=t_events[:t1.shape[0]]) # NaNs at end
return t1
#SNIPPET 3.2 TRIPLE-BARRIER LABELING METHOD
def apply_pt_sl_on_t1(close, events, pt_sl, molecule):
"""
:param close: (series) close prices
:param events: (series) of indices that signify "events"
:param pt_sl: (array) element 0, indicates the profit taking level;
element 1 is stop loss level
:param molecule: (an array) a set of datetime index values for processing
:return: (dataframe) timestamps at which each barrier was touched
"""
# apply stop loss/profit taking, if it takes place before t1 (end of event)
events_ = events.loc[molecule]
out = events_[['t1']].copy(deep=True)
if pt_sl[0] > 0:
pt = pt_sl[0] * events_['trgt']
else:
pt = pd.Series(index=events.index) # NaNs
if pt_sl[1] > 0:
sl = -pt_sl[1] * events_['trgt']
else:
sl = pd.Series(index=events.index) # NaNs
for loc, t1 in events_['t1'].fillna(close.index[-1]).iteritems():
df0 = close[loc:t1] # path prices
df0 = (df0 / close[loc] - 1) * events_.at[loc, 'side'] # path returns
out.loc[loc, 'sl'] = df0[df0 < sl[loc]].index.min() # earliest stop loss
out.loc[loc, 'pt'] = df0[df0 > pt[loc]].index.min() # earliest profit taking
return out
# SNIPPET 3.3 GETTING THE TIME OF FIRST TOUCH
def get_events(close, t_events, pt_sl, target, min_ret, num_threads,
vertical_barrier_times=False, side=None):
"""
:param close: (series) Close prices
:param t_events: (series) of t_events.
These are timestamps that will seed every triple barrier.
:param pt_sl: (2 element array) element 0, indicates the profit taking percentage;
element 1 is stop loss percentage.
A non-negative float that sets the width of the two barriers.
A 0 value means that the respective horizontal barrier will be disabled.
:param target: (series) of values that are used (in conjunction with pt_sl)
to determine the width of the barrier.
:param min_ret: (float) The minimum target return required for running a triple barrier search.
:param num_threads: (int) The number of threads concurrently used by the function.
:param vertical_barrier_times: (series) A pandas series with the timestamps of the vertical barriers.
:param side: (series) Side of the bet (long/short) as decided by the primary model
:return: (data frame) of events
-events.index is event's starttime
-events['t1'] is event's endtime
-events['trgt'] is event's target
-events['side'] (optional) implies the algo's position side
"""
# 1) Get target
target = target.loc[target.index.intersection(t_events)]
target = target[target > min_ret] # min_ret
# 2) Get vertical barrier (max holding period)
if vertical_barrier_times is False:
vertical_barrier_times = pd.Series(pd.NaT, index=t_events)
# 3) Form events object, apply stop loss on vertical barrier
if side is None:
side_ = pd.Series(1., index=target.index)
pt_sl_ = [pt_sl[0], pt_sl[0]]
else:
side_ = side.loc[target.index]
pt_sl_ = pt_sl[:2]
events = pd.concat({'t1': vertical_barrier_times, 'trgt': target, 'side': side_},
axis=1)
events = events.dropna(subset=['trgt'])
# Apply Triple Barrier
df0 = MultiProcessingFunctions.mp_pandas_obj(func=apply_pt_sl_on_t1,
pd_obj=('molecule', events.index),
num_threads=num_threads,
close=close,
events=events,
pt_sl=pt_sl_)
events['t1'] = df0.dropna(how='all').min(axis=1) # pd.min ignores nan
if side is None:
events = events.drop('side', axis=1)
return events
def barrier_touched(out_df):
"""
:param out_df: (DataFrame) containing the returns and target
:return: (DataFrame) containing returns, target, and labels
"""
store = []
for i in np.arange(len(out_df)):
date_time = out_df.index[i]
ret = out_df.loc[date_time, 'ret']
target = out_df.loc[date_time, 'trgt']
if ret > 0.0 and ret > target:
# Top barrier reached
store.append(1)
elif ret < 0.0 and ret < -target:
# Bottom barrier reached
store.append(-1)
else:
# Vertical barrier reached
store.append(0)
out_df['bin'] = store
return out_df
# SNIPPET 3.5 LABELING FOR SIDE AND SIZE
def get_bins(triple_barrier_events, close):
"""
:param triple_barrier_events: (data frame)
-events.index is event's starttime
-events['t1'] is event's endtime
-events['trgt'] is event's target
-events['side'] (optional) implies the algo's position side
Case 1: ('side' not in events): bin in (-1,1) <-label by price action
Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)
:param close: (series) close prices
:return: (data frame) of meta-labeled events
"""
# 1) Align prices with their respective events
events_ = triple_barrier_events.dropna(subset=['t1'])
prices = events_.index.union(events_['t1'].values)
prices = prices.drop_duplicates()
prices = close.reindex(prices, method='bfill')
# 2) Create out DataFrame
out_df = pd.DataFrame(index=events_.index)
# Need to take the log returns, else your results will be skewed for short positions
out_df['ret'] = np.log(prices.loc[events_['t1'].values].values) - np.log(prices.loc[events_.index])
out_df['trgt'] = events_['trgt']
# Meta labeling: Events that were correct will have pos returns
if 'side' in events_:
out_df['ret'] = out_df['ret'] * events_['side'] # meta-labeling
# Added code: label 0 when vertical barrier reached
out_df = barrier_touched(out_df)
# Meta labeling: label incorrect events with a 0
if 'side' in events_:
out_df.loc[out_df['ret'] <= 0, 'bin'] = 0
# Transform the log returns back to normal returns.
out_df['ret'] = np.exp(out_df['ret']) - 1
# Add the side to the output. This is useful for when a meta label model must be fit
tb_cols = triple_barrier_events.columns
if 'side' in tb_cols:
out_df['side'] = triple_barrier_events['side']
out_df
return out_df
def get_weight_ffd(differencing_amt, threshold, weight_vector_len):
"""
Source: Chapter 5, AFML (section 5.4.2)
The helper function generates weights that are used to compute fractionally differentiated series.
:param differencing_amt: (double) a amt (fraction) by which the series is differenced
:param threshold: (double) used to discard weights that are less than the threshold
:param weight_vector_len: (int) length of teh vector to be generated
:return: (np.array) weights vector
"""
weights = [1.]
k = 1
ctr = 0
while True:
weight_ = -weights[-1] / k * (differencing_amt - k + 1)
if abs(weight_) < threshold:
break
weights.append(weight_)
k += 1
ctr += 1
if ctr == weight_vector_len - 1:
break
weights = np.array(weights[::-1]).reshape(-1, 1)
return weights
def frac_diff_ffd(price_series, differencing_amt, threshold=1e-5):
"""
Source: Chapter 5, AFML (section 5.5);
Source code: https://github.com/philipperemy/fractional-differentiation-time-series
References:
https://www.wiley.com/en-us/Advances+in+Financial+Machine+Learning-p-9781119482086
https://wwwf.imperial.ac.uk/~ejm/M3S8/Problems/hosking81.pdf
https://en.wikipedia.org/wiki/Fractional_calculus
The steps are as follows:
- Compute weights (this is a one-time exercise)
- Iteratively apply the weights to the price series and generate output points
:param price_series: (series) of prices. These could be raw prices or log of prices
:param differencing_amt: (double) a amt (fraction) by which the series is differenced
:param threshold: (double) used to discard weights that are less than the threshold
:return: (np.array) fractionally differenced series
"""
# compute weights for the longest series
weights = get_weight_ffd(differencing_amt, threshold, len(price_series))
width = len(weights) - 1
# apply weights to values
output = []
output.extend([0] * width)
for i in range(width, len(price_series)):
output.append(np.dot(weights.T, price_series[i - width:i + 1])[0])
return np.array(output)
def compare_adf_stat_with_critical_values(result):
""" Function compares the t-stat with adfuller critcial values (1%) and returnsm true or false
depending on if the t-stat >= adfuller critical value
:result (dict_items) Output from adfuller test
:return (bool)
"""
tstat = abs(next(iter(result[4].items()))[1])
adf_stat = abs(round(result[0], 3))
if adf_stat >= tstat:
return True
else:
return False
def compute_differencing_amt(price_series, threshold=1e-5):
""" Function iterates over the differencing amounts and computes the smallest amt that will make the
series stationary
:price_series (pd.Series) price series
:threshold (float) pass-thru to fracdiff function
:return (float) differencing amount
"""
test_range = np.arange(0.0, 1., 0.05)
found = False
i = 0
while not found:
fracs = frac_diff_ffd(price_series.apply(np.log), differencing_amt=test_range[i], threshold=threshold)
result = adfuller(fracs, maxlag=2, regression='C', autolag='AIC', store=False, regresults=False)
if compare_adf_stat_with_critical_values(result):
if i > 0 and i < len(test_range):
test_narrow_range = np.arange(test_range[i-1], test_range[i+1], 0.01)
found = False
j = 0
while not found:
fracs = frac_diff_ffd(price_series.apply(np.log), differencing_amt=test_narrow_range[j], threshold=threshold)
result = adfuller(fracs, maxlag=2, regression='C', autolag='AIC', store=False, regresults=False)
if compare_adf_stat_with_critical_values(result):
found = True
diff_amt = test_narrow_range[j]
else:
j += 1
elif i == 0:
found = True
diff_amt = test_range[i+1]
else:
found = True
diff_amt = test_range[len(test_range)]
else:
i += 1
if not found:
diff_amt = 1.0
return diff_amt
def bbands(close_prices, window, no_of_stdev):
# rolling_mean = close_prices.rolling(window=window).mean()
# rolling_std = close_prices.rolling(window=window).std()
rolling_mean = close_prices.ewm(span=window).mean()
rolling_std = close_prices.ewm(span=window).std()
upper_band = rolling_mean + (rolling_std * no_of_stdev)
lower_band = rolling_mean - (rolling_std * no_of_stdev)
return rolling_mean, upper_band, lower_band
def get_adx(high, low, close, lookback):
plus_dm = high.diff()
minus_dm = low.diff()
plus_dm[plus_dm < 0] = 0
minus_dm[minus_dm > 0] = 0
tr1 = pd.DataFrame(high - low)
tr2 = pd.DataFrame(abs(high - close.shift(1)))
tr3 = pd.DataFrame(abs(low - close.shift(1)))
frames = [tr1, tr2, tr3]
tr = pd.concat(frames, axis = 1, join = 'inner').max(axis = 1)
atr = tr.rolling(lookback).mean()
plus_di = 100 * (plus_dm.ewm(alpha = 1/lookback).mean() / atr)
minus_di = abs(100 * (minus_dm.ewm(alpha = 1/lookback).mean() / atr))
dx = (abs(plus_di - minus_di) / abs(plus_di + minus_di)) * 100
adx = ((dx.shift(1) * (lookback - 1)) + dx) / lookback
adx_smooth = adx.ewm(alpha = 1/lookback).mean()
return plus_di, minus_di, adx_smooth
#SNIPPET 4.1 ESTIMATING THE UNIQUENESS OF A LABEL
def mpNumCoEvents(closeIdx,t1,molecule):
#Compute the number of concurrent events per bar.
#+molecule[0] is the date of the first event on which the weight will be computed
#+molecule[-1] is the date of the last event on which the weight will be computed
#Any event that starts before t1[molecule].max() impacts the count.
#1) find events that span the period [molecule[0],molecule[-1]]
t1=t1.fillna(closeIdx[-1]) # unclosed events still must impact other weights
t1=t1[t1>=molecule[0]] # events that end at or after molecule[0]
t1=t1.loc[:t1[molecule].max()]# events that start at or before t1[molecule].max()
#2) count events spanning a bar
iloc=closeIdx.searchsorted(np.array([t1.index[0],t1.max()]))
count=pd.Series(0,index=closeIdx[iloc[0]:iloc[1]+1])
for tIn,tOut in t1.iteritems():count.loc[tIn:tOut]+=1.
return count.loc[molecule[0]:t1[molecule].max()]
#SNIPPET 4.2 ESTIMATING THE AVERAGE UNIQUENESS OF EACH LABEL
def mpSampleTW(t1,numCoEvents,molecule):
# Derive average uniqueness over the event's lifespan
wght=pd.Series(index=molecule)
for tIn,tOut in t1.loc[wght.index].iteritems():
wght.loc[tIn]=(1./numCoEvents.loc[tIn:tOut]).mean()
return wght
#SNIPPET 4.10 DETERMINATION OF SAMPLE WEIGHT BY ABSOLUTE RETURN ATTRIBUTION
def mpSampleW(t1,numCoEvents,close,molecule):
# Derive sample weight by return attribution
ret=np.log(close).diff() # log-returns, so that they are additive
wght=pd.Series(index=molecule)
for tIn,tOut in t1.loc[wght.index].iteritems():
wght.loc[tIn]=(ret.loc[tIn:tOut]/numCoEvents.loc[tIn:tOut]).sum()
return wght.abs()
def getTrainTimes(t1,testTimes):
"""
Given testTimes, find the times of the training observations
-t1.index: Time when the observation started
-t1.value: Time when the observation ended
-testTimes: Times of testing observations
"""
trn=t1.copy(deep=True)
for i,j in testTimes.iteritems():
df0=trn[(i<=trn.index)&(trn.index<=j)].index # train starts within test
df1=trn[(i<=trn)&(trn<=j)].index # train ends within test
df2=trn[(trn.index<=i)&(j<=trn)].index # train envelops test
trn=trn.drop(df0.union(df1).union(df2))
return trn
# AFML, snippet 7.2
def getEmbargoTimes(times,pctEmbargo):
# Get embargo time for each bar
step=int(times.shape[0]*pctEmbargo)
if step==0:
mbrg=pd.Series(times,index=times)
else:
mbrg=pd.Series(times[step:],index=times[:-step])
mbrg=mbrg.append(pd.Series(times[-1],index=times[-step:]))
return mbrg
def embargo1(test_times, t1, pct_embargo=0.01): # done before purging
# embargoed t1
t1_embargo = getEmbargoTimes(t1, pct_embargo)
# test_start, test_end = test_times.index[0], test_times.index[-1]
test_times_embargoed = t1_embargo.loc[test_times]
return test_times_embargoed
from sklearn.model_selection._split import _BaseKFold
class PurgedKFold(_BaseKFold):
#Extend KFold class to work with labels that span intervals
#The train is purged of observations overlapping test-label intervals
#Test set is assumed contiguous (shuffle=False), w/o training samples in between
def __init__(self,n_splits=3,t1=None,pctEmbargo=0.):
if not isinstance(t1,pd.Series):
raise ValueError('Label Through Dates must be a pd.Series')
super(PurgedKFold,self).__init__(n_splits,shuffle=False,random_state=None)
self.t1=t1
self.pctEmbargo=pctEmbargo
def split(self,X,y=None,groups=None):
if (X.index==self.t1.index).sum()!=len(self.t1):
raise ValueError('X and ThruDateValues must have the same index')
indices=np.arange(X.shape[0])
mbrg=int(X.shape[0]*self.pctEmbargo)
test_starts=[(i[0],i[-1]+1) for i in np.array_split(np.arange(X.shape[0]),self.n_splits)]
for i,j in test_starts:
t0=self.t1.index[i] # start of test set
test_indices=indices[i:j]
maxT1Idx=self.t1.index.searchsorted(self.t1[test_indices].max())
train_indices=self.t1.index.searchsorted(self.t1[self.t1<=t0].index)
if maxT1Idx<X.shape[0]:# right train (with embargo)
train_indices=np.concatenate((train_indices,indices[maxT1Idx+mbrg:]))
yield train_indices,test_indices
def cvScore(clf,X,y,sample_weight,scoring='neg_log_loss',t1=None,cv=None,cvGen=None, pctEmbargo=None):
if scoring not in ['neg_log_loss','accuracy']:
raise Exception('wrong scoring method.')
from sklearn.metrics import log_loss,accuracy_score
if cvGen is None:
cvGen=PurgedKFold(n_splits=cv,t1=t1,pctEmbargo=pctEmbargo) # purged
score=[]
for train,test in cvGen.split(X=X):
fit=clf.fit(X=X.iloc[train,:],y=y.iloc[train],
sample_weight=sample_weight.iloc[train].values)
if scoring=='neg_log_loss':
prob=fit.predict_proba(X.iloc[test,:])
score_=-log_loss(y.iloc[test],prob,sample_weight=sample_weight.iloc[test].values,labels=clf.classes_)
else:
pred=fit.predict(X.iloc[test,:])
score_=accuracy_score(y.iloc[test],pred,sample_weight = sample_weight.iloc[test].values)
score.append(score_)
return np.array(score)
def crossValPlot2(skf,classifier,X,y,pred_times, eval_times):
"""Code adapted from:
"""
from itertools import cycle
from sklearn.metrics import roc_curve, auc
from scipy import interp
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
idx = pd.IndexSlice
f,ax = plt.subplots(figsize=(10,7))
i = 0
for train, test in skf.split(X, y, pred_times, eval_times):
probas_ = (classifier.fit(X.iloc[idx[train]], y.iloc[idx[train]])
.predict_proba(X.iloc[idx[test]]))
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y.iloc[idx[test]], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
ax.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i += 1
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Luck', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set_xlim([-0.05, 1.05])
ax.set_ylim([-0.05, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic example')
ax.legend(bbox_to_anchor=(1,1))
def featImpMDI(fit,featNames):
# feat importance based on IS mean impurity reduction
df0={i:tree.feature_importances_ for i,tree in enumerate(fit.estimators_)}
df0=pd.DataFrame.from_dict(df0,orient='index')
df0.columns=featNames
df0=df0.replace(0,np.nan) # because max_features=1
imp=pd.concat({'mean':df0.mean(),'std':df0.std()*df0.shape[0]**-.5},axis=1)
imp/=imp['mean'].sum()
return imp
def featImpMDA(clf,X,y,cv,sample_weight,t1,pctEmbargo,scoring='neg_log_loss'):
# feat importance based on OOS score reduction
if scoring not in ['neg_log_loss','accuracy']:
raise Exception('wrong scoring method.')
from sklearn.metrics import log_loss,accuracy_score
cvGen=PurgedKFold(n_splits=cv,t1=t1,pctEmbargo=pctEmbargo) # purged cv
scr0,scr1=pd.Series(),pd.DataFrame(columns=X.columns)
for i,(train,test) in enumerate(cvGen.split(X=X)):
X0,y0,w0=X.iloc[train,:],y.iloc[train],sample_weight.iloc[train]
X1,y1,w1=X.iloc[test,:],y.iloc[test],sample_weight.iloc[test]
fit=clf.fit(X=X0,y=y0,sample_weight=w0.values)
if scoring=='neg_log_loss':
prob=fit.predict_proba(X1)
scr0.loc[i]=-log_loss(y1,prob,sample_weight=w1.values,labels=clf.classes_)
else:
pred=fit.predict(X1)
scr0.loc[i]=accuracy_score(y1,pred,sample_weight=w1.values)
for j in X.columns:
X1_=X1.copy(deep=True)
np.random.shuffle(X1_[j].values) # permutation of a single column
if scoring=='neg_log_loss':
prob=fit.predict_proba(X1_)
scr1.loc[i,j]=-log_loss(y1,prob,sample_weight=w1.values,labels=clf.classes_)
else:
pred=fit.predict(X1_)
scr1.loc[i,j]=accuracy_score(y1,pred,sample_weight=w1.values)
imp=(-scr1).add(scr0,axis=0)
if scoring=='neg_log_loss':imp=imp/-scr1
else:imp=imp/(1.-scr1)
imp=pd.concat({'mean':imp.mean(),'std':imp.std()*imp.shape[0]**-.5},axis=1)
return imp,scr0.mean()
def auxFeatImpSFI(featNames,clf,X,labels,y,scoring,cvGen):
imp=pd.DataFrame(columns=['mean','std'])
for featName in featNames:
df0=cvScore(clf,X,y,sample_weight=labels['w'],scoring='accuracy',t1=t1,pctEmbargo=0.01,cv=10)
imp.loc[featName,'mean']=df0.mean()
imp.loc[featName,'std']=df0.std()*df0.shape[0]**-.5
return imp
#SNIPPET 8.5 COMPUTATION OF ORTHOGONAL FEATURES
def get_eVec(dot,varThres):
# compute eVec from dot prod matrix, reduce dimension
eVal,eVec=np.linalg.eigh(dot)
idx=eVal.argsort()[::-1] # arguments for sorting eVal desc
eVal,eVec=eVal[idx],eVec[:,idx]
#2) only positive eVals
eVal=pd.Series(eVal,index=['PC_'+str(i+1) for i in range(eVal.shape[0])])
eVec=pd.DataFrame(eVec,index=dot.index,columns=eVal.index)
eVec=eVec.loc[:,eVal.index]
#3) reduce dimension, form PCs
cumVar=eVal.cumsum()/eVal.sum()
dim=cumVar.values.searchsorted(varThres)
eVal,eVec=eVal.iloc[:dim+1],eVec.iloc[:,:dim+1]
return eVal,eVec
def orthoFeats(dfX,varThres=.95):
# Given a dataframe dfX of features, compute orthofeatures dfP
dfZ=dfX.sub(dfX.mean(),axis=1).div(dfX.std(),axis=1) # standardize
dot=pd.DataFrame(np.dot(dfZ.T,dfZ),index=dfX.columns,columns=dfX.columns)
eVal,eVec=get_eVec(dot,varThres)
dfP=np.dot(dfZ,eVec)
return dfP
def featImportance(trnsX,cont,n_estimators=1000,cv=10,max_samples=1.,numThreads=24, pctEmbargo=0,scoring='accuracy',method='SFI',minWLeaf=0.,**kargs):
# feature importance from a random forest
n_jobs=(-1 if numThreads>1 else 1) # run 1 thread with ht_helper in dirac1
#1) prepare classifier,cv. max_features=1, to prevent masking
clf=DecisionTreeClassifier(criterion='entropy',max_features=1,class_weight='balanced',
min_weight_fraction_leaf=minWLeaf)
clf=BaggingClassifier(base_estimator=clf,n_estimators=n_estimators,max_features=1.,
max_samples=max_samples,oob_score=True,n_jobs=n_jobs)
fit=clf.fit(X=trnsX,y=cont['bin'],sample_weight=cont['w'].values)
oob=fit.oob_score_
if method=='MDI':
imp=featImpMDI(fit,featNames=trnsX.columns)
oos=cvScore(clf,X=trnsX,y=cont['bin'],cv=cv,sample_weight=cont['w'],t1=cont['t1'],pctEmbargo=pctEmbargo,scoring=scoring).mean()
elif method=='MDA':
imp,oos=featImpMDA(clf,X=trnsX,y=cont['bin'],cv=cv,sample_weight=cont['w'],t1=cont['t1'],pctEmbargo=pctEmbargo,scoring=scoring)
elif method=='SFI':
cvGen=PurgedKFold(n_splits=cv,t1=cont['t1'],pctEmbargo=pctEmbargo)
oos=cvScore(clf,X=trnsX,y=cont['bin'],sample_weight=cont['w'],
scoring=scoring,cvGen=cvGen).mean()
clf.n_jobs=1 # paralellize auxFeatImpSFI rather than clf
imp=mpPandasObj(auxFeatImpSFI,('featNames',trnsX.columns),numThreads,clf=clf,trnsX=trnsX,cont=cont,scoring=scoring,cvGen=cvGen)
return imp,oob,oos
def plotFeatImportance(pathOut,imp,oob,oos,method,tag=0,simNum=0,**kargs):
# plot mean imp bars with std
plt.figure(figsize=(10,imp.shape[0]/5.))
imp=imp.sort_values('mean',ascending=True)
ax=imp['mean'].plot(kind='barh',color='b',alpha=.25,xerr=imp['std'],error_kw={'ecolor':'r'})
if method=='MDI':
plt.xlim([0,imp.sum(axis=1).max()])
plt.axvline(1./imp.shape[0],linewidth=1,color='r',linestyle='dotted')
ax.get_yaxis().set_visible(False)
for i,j in zip(ax.patches,imp.index):
ax.text(i.get_width()/2,i.get_y()+i.get_height()/2,j,ha='center',va='center',color='black')
plt.title('tag='+str(tag)+' | simNum='+str(simNum)+' | oos='+str(round(oos,4)))
plt.savefig(pathOut+'featImportance_'+str(simNum)+'.png',dpi=100)
plt.clf();plt.close()
return
import itertools as itt
import numbers
import numpy as np
import pandas as pd
from abc import abstractmethod
from typing import Iterable, Tuple, List
class BaseTimeSeriesCrossValidator:
"""
Abstract class for time series cross-validation.
Time series cross-validation requires each sample has a prediction time pred_time, at which the features are used to
predict the response, and an evaluation time eval_time, at which the response is known and the error can be
computed. Importantly, it means that unlike in standard sklearn cross-validation, the samples X, response y,
pred_times and eval_times must all be pandas dataframe/series having the same index. It is also assumed that the
samples are time-ordered with respect to the prediction time (i.e. pred_times is non-decreasing).
Parameters
----------
n_splits : int, default=10
Number of folds. Must be at least 2.
"""
def __init__(self, n_splits=10):
if not isinstance(n_splits, numbers.Integral):
raise ValueError(f"The number of folds must be of Integral type. {n_splits} of type {type(n_splits)}"
f" was passed.")
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(f"K-fold cross-validation requires at least one train/test split by setting n_splits = 2 "
f"or more, got n_splits = {n_splits}.")
self.n_splits = n_splits
self.pred_times = None
self.eval_times = None
self.indices = None
@abstractmethod
def split(self, X: pd.DataFrame, y: pd.Series = None,
pred_times: pd.Series = None, eval_times: pd.Series = None):
if not isinstance(X, pd.DataFrame) and not isinstance(X, pd.Series):
raise ValueError('X should be a pandas DataFrame/Series.')
if not isinstance(y, pd.Series) and y is not None:
raise ValueError('y should be a pandas Series.')
if not isinstance(pred_times, pd.Series):
raise ValueError('pred_times should be a pandas Series.')
if not isinstance(eval_times, pd.Series):
raise ValueError('eval_times should be a pandas Series.')
if y is not None and (X.index == y.index).sum() != len(y):
raise ValueError('X and y must have the same index')
if (X.index == pred_times.index).sum() != len(pred_times):
raise ValueError('X and pred_times must have the same index')
if (X.index == eval_times.index).sum() != len(eval_times):
raise ValueError('X and eval_times must have the same index')
self.pred_times = pred_times
self.eval_times = eval_times
self.indices = np.arange(X.shape[0])
class CombPurgedKFoldCV(BaseTimeSeriesCrossValidator):
"""
Purged and embargoed combinatorial cross-validation
As described in Advances in financial machine learning, Marcos Lopez de Prado, 2018.
The samples are decomposed into n_splits folds containing equal numbers of samples, without shuffling. In each cross
validation round, n_test_splits folds are used as the test set, while the other folds are used as the train set.
There are as many rounds as n_test_splits folds among the n_splits folds.
Each sample should be tagged with a prediction time pred_time and an evaluation time eval_time. The split is such
that the intervals [pred_times, eval_times] associated to samples in the train and test set do not overlap. (The
overlapping samples are dropped.) In addition, an "embargo" period is defined, giving the minimal time between an
evaluation time in the test set and a prediction time in the training set. This is to avoid, in the presence of
temporal correlation, a contamination of the test set by the train set.
Parameters
----------
n_splits : int, default=10
Number of folds. Must be at least 2.
n_test_splits : int, default=2
Number of folds used in the test set. Must be at least 1.
embargo_td : pd.Timedelta, default=0
Embargo period (see explanations above).
"""
def __init__(self, n_splits=10, n_test_splits=2, embargo_td=pd.Timedelta(minutes=0)):
super().__init__(n_splits)
if not isinstance(n_test_splits, numbers.Integral):
raise ValueError(f"The number of test folds must be of Integral type. {n_test_splits} of type "
f"{type(n_test_splits)} was passed.")
n_test_splits = int(n_test_splits)
if n_test_splits <= 0 or n_test_splits > self.n_splits - 1:
raise ValueError(f"K-fold cross-validation requires at least one train/test split by setting "
f"n_test_splits between 1 and n_splits - 1, got n_test_splits = {n_test_splits}.")
self.n_test_splits = n_test_splits
if not isinstance(embargo_td, pd.Timedelta):
raise ValueError(f"The embargo time should be of type Pandas Timedelta. {embargo_td} of type "
f"{type(embargo_td)} was passed.")
if embargo_td < pd.Timedelta(minutes=0):
raise ValueError(f"The embargo time should be positive, got embargo = {embargo_td}.")
self.embargo_td = embargo_td
def split(self, X: pd.DataFrame, y: pd.Series = None,
pred_times: pd.Series = None, eval_times: pd.Series = None) -> Iterable[Tuple[np.ndarray, np.ndarray]]:
"""
Yield the indices of the train and test sets.
Although the samples are passed in the form of a pandas dataframe, the indices returned are position indices,
not labels.
Parameters
----------
X : pd.DataFrame, shape (n_samples, n_features), required
Samples. Only used to extract n_samples.
y : pd.Series, not used, inherited from _BaseKFold
pred_times : pd.Series, shape (n_samples,), required
Times at which predictions are made. pred_times.index has to coincide with X.index.
eval_times : pd.Series, shape (n_samples,), required
Times at which the response becomes available and the error can be computed. eval_times.index has to
coincide with X.index.
Returns
-------
train_indices: np.ndarray
A numpy array containing all the indices in the train set.
test_indices : np.ndarray
A numpy array containing all the indices in the test set.
"""
super().split(X, y, pred_times, eval_times)
# Fold boundaries
fold_bounds = [(fold[0], fold[-1] + 1) for fold in np.array_split(self.indices, self.n_splits)]
# List of all combinations of n_test_splits folds selected to become test sets
selected_fold_bounds = list(itt.combinations(fold_bounds, self.n_test_splits))
# In order for the first round to have its whole test set at the end of the dataset
selected_fold_bounds.reverse()
for fold_bound_list in selected_fold_bounds:
# Computes the bounds of the test set, and the corresponding indices
test_fold_bounds, test_indices = self.compute_test_set(fold_bound_list)
# Computes the train set indices
train_indices = self.compute_train_set(test_fold_bounds, test_indices)
yield train_indices, test_indices
def compute_train_set(self, test_fold_bounds: List[Tuple[int, int]], test_indices: np.ndarray) -> np.ndarray:
"""
Compute the position indices of samples in the train set.
Parameters
----------
test_fold_bounds : List of tuples of position indices
Each tuple records the bounds of a block of indices in the test set.
test_indices : np.ndarray
A numpy array containing all the indices in the test set.
Returns
-------
train_indices: np.ndarray
A numpy array containing all the indices in the train set.
"""
# As a first approximation, the train set is the complement of the test set
train_indices = np.setdiff1d(self.indices, test_indices)
# But we now have to purge and embargo
for test_fold_start, test_fold_end in test_fold_bounds:
# Purge
train_indices = purge(self, train_indices, test_fold_start, test_fold_end)
# Embargo
train_indices = embargo(self, train_indices, test_indices, test_fold_end)
return train_indices
def compute_test_set(self, fold_bound_list: List[Tuple[int, int]]) -> Tuple[List[Tuple[int, int]], np.ndarray]:
"""
Compute the indices of the samples in the test set.
Parameters
----------
fold_bound_list: List of tuples of position indices
Each tuple records the bounds of the folds belonging to the test set.
Returns
-------
test_fold_bounds: List of tuples of position indices
Like fold_bound_list, but with the neighboring folds in the test set merged.
test_indices: np.ndarray
A numpy array containing the test indices.
"""
test_indices = np.empty(0)
test_fold_bounds = []
for fold_start, fold_end in fold_bound_list:
# Records the boundaries of the current test split
if not test_fold_bounds or fold_start != test_fold_bounds[-1][-1]:
test_fold_bounds.append((fold_start, fold_end))
# If the current test split is contiguous to the previous one, simply updates the endpoint
elif fold_start == test_fold_bounds[-1][-1]:
test_fold_bounds[-1] = (test_fold_bounds[-1][0], fold_end)
test_indices = np.union1d(test_indices, self.indices[fold_start:fold_end]).astype(int)
return test_fold_bounds, test_indices
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def purge(cv: BaseTimeSeriesCrossValidator, train_indices: np.ndarray,
test_fold_start: int, test_fold_end: int) -> np.ndarray:
"""
Purge part of the train set.
Given a left boundary index test_fold_start of the test set, this method removes from the train set all the
samples whose evaluation time is posterior to the prediction time of the first test sample after the boundary.
Parameters
----------
cv: Cross-validation class
Needs to have the attributes cv.pred_times, cv.eval_times and cv.indices.
train_indices: np.ndarray
A numpy array containing all the indices of the samples currently included in the train set.
test_fold_start : int
Index corresponding to the start of a test set block.
test_fold_end : int
Index corresponding to the end of the same test set block.
Returns
-------
train_indices: np.ndarray
A numpy array containing the train indices purged at test_fold_start.
"""
time_test_fold_start = cv.pred_times.iloc[test_fold_start]
# The train indices before the start of the test fold, purged.
train_indices_1 = np.intersect1d(train_indices, cv.indices[cv.eval_times < time_test_fold_start])
# The train indices after the end of the test fold.
train_indices_2 = np.intersect1d(train_indices, cv.indices[test_fold_end:])
return np.concatenate((train_indices_1, train_indices_2))
def compute_fold_bounds(cv: BaseTimeSeriesCrossValidator, split_by_time: bool) -> List[int]:
"""
Compute a list containing the fold (left) boundaries.
Parameters
----------
cv: BaseTimeSeriesCrossValidator
Cross-validation object for which the bounds need to be computed.
split_by_time: bool
If False, the folds contain an (approximately) equal number of samples. If True, the folds span identical
time intervals.
"""
if split_by_time:
full_time_span = cv.pred_times.max() - cv.pred_times.min()
fold_time_span = full_time_span / cv.n_splits
fold_bounds_times = [cv.pred_times.iloc[0] + fold_time_span * n for n in range(cv.n_splits)]
return cv.pred_times.searchsorted(fold_bounds_times)
else:
return [fold[0] for fold in np.array_split(cv.indices, cv.n_splits)]
def embargo(cv: BaseTimeSeriesCrossValidator, train_indices: np.ndarray,
test_indices: np.ndarray, test_fold_end: int) -> np.ndarray:
"""
Apply the embargo procedure to part of the train set.
This amounts to dropping the train set samples whose prediction time occurs within self.embargo_dt of the test
set sample evaluation times. This method applies the embargo only to the part of the training set immediately
following the end of the test set determined by test_fold_end.
Parameters
----------
cv: Cross-validation class
Needs to have the attributes cv.pred_times, cv.eval_times, cv.embargo_dt and cv.indices.
train_indices: np.ndarray
A numpy array containing all the indices of the samples currently included in the train set.
test_indices : np.ndarray
A numpy array containing all the indices of the samples in the test set.
test_fold_end : int
Index corresponding to the end of a test set block.
Returns
-------
train_indices: np.ndarray
The same array, with the indices subject to embargo removed.
"""
if not hasattr(cv, 'embargo_td'):
raise ValueError("The passed cross-validation object should have a member cv.embargo_td defining the embargo"
"time.")
last_test_eval_time = cv.eval_times.iloc[test_indices[:test_fold_end]].max()
min_train_index = len(cv.pred_times[cv.pred_times <= last_test_eval_time + cv.embargo_td])
if min_train_index < cv.indices.shape[0]:
allowed_indices = np.concatenate((cv.indices[:test_fold_end], cv.indices[min_train_index:]))
train_indices = np.intersect1d(train_indices, allowed_indices)
return train_indices
```
# Feature engineering
```
Dollar_bars.drop(columns=['Unnamed: 0','timestamp.1','foreignNotional','grossValue','homeNotional'], inplace = True)
# fast and slow mavg
# Add fast and slow moving averages
fast_window = 30 #12 7 25
slow_window = 60 #26 14 50
Dollar_bars['fast_mavg'] = (Dollar_bars['close']
.rolling(window=fast_window,
min_periods=fast_window, center=False)
.mean())
Dollar_bars['slow_mavg'] = (Dollar_bars['close']
.rolling(window=slow_window,
min_periods=slow_window, center=False)
.mean())
Dollar_bars['log_ret'] = np.log(Dollar_bars['close']).diff()
# fast and slow mavg
# Add fast and slow moving averages
fast_window = 30 #12 7 25
slow_window = 60 #26 14 50
Dollar_bars1['fast_mavg'] = (Dollar_bars1['close']
.rolling(window=fast_window,
min_periods=fast_window, center=False)
.mean())
Dollar_bars1['slow_mavg'] = (Dollar_bars1['close']
.rolling(window=slow_window,
min_periods=slow_window, center=False)
.mean())
Dollar_bars1['log_ret'] = np.log(Dollar_bars1['close']).diff()
from plotly.subplots import make_subplots
import plotly.graph_objects as go
fig = make_subplots(rows=1, cols=1,specs=[[{"secondary_y": True}]])
fig.add_trace(go.Scatter(
x=Dollar_bars.index,
y=Dollar_bars['close'],
name="ETH closing price",
mode = 'lines',
textfont_family="Arial_Black"),
row= 1 ,
col= 1 )
fig.add_trace(go.Scatter(
x=Dollar_bars.index,
y=Dollar_bars['fast_mavg'],
name="ETH 30 samples moving average",
mode = 'lines',
textfont_family="Arial_Black"),
row= 1 ,
col= 1 )
fig.add_trace(go.Scatter(
x=Dollar_bars.index,
y=Dollar_bars['slow_mavg'],
name="ETH 60 samples moving average",
mode = 'lines',
textfont_family="Arial_Black"),
row= 1 ,
col= 1 )
fig.update_layout(
legend=dict(
x=0.0,
y=0.98,
traceorder="normal",
font=dict(
family="sans-serif",
size=12,
color="black"
),
)
)
diff_amt = compute_differencing_amt(Dollar_bars['close'])
print('Differening amount: {:.3f}'.format(diff_amt))
fracs = frac_diff_ffd(np.log(Dollar_bars['close']), differencing_amt=diff_amt, threshold=1e-5)
frac_df = pd.Series(data=fracs, index=Dollar_bars.index)
frac_df.tail()
Dollar_bars['fracdiff'] = [frac_df[x] for x in Dollar_bars.index]
#diff_amt = compute_differencing_amt(Dollar_bars1['close'])
print('Differening amount: {:.3f}'.format(diff_amt))
fracs = frac_diff_ffd(np.log(Dollar_bars1['close']), differencing_amt=0.280, threshold=1e-5)
frac_df = pd.Series(data=fracs, index=Dollar_bars1.index)
frac_df.tail()
Dollar_bars1['fracdiff'] = [frac_df[x] for x in Dollar_bars1.index]
# compute sides
# Compute sides
Dollar_bars['side'] = np.nan
long_signals = Dollar_bars['fast_mavg'] >= Dollar_bars['slow_mavg']
short_signals = Dollar_bars['fast_mavg'] < Dollar_bars['slow_mavg']
Dollar_bars.loc[long_signals, 'side'] = 1
Dollar_bars.loc[short_signals, 'side'] = -1
# Remove Look ahead bias by lagging the signal
Dollar_bars['side'] = Dollar_bars['side'].shift(1)
# drop the NaN values from our data set
Dollar_bars.dropna(axis=0, how='any', inplace=True)
print(Dollar_bars.side.value_counts())
# compute sides
# Compute sides
Dollar_bars1['side'] = np.nan
long_signals = Dollar_bars1['fast_mavg'] >= Dollar_bars1['slow_mavg']
short_signals = Dollar_bars1['fast_mavg'] < Dollar_bars1['slow_mavg']
Dollar_bars1.loc[long_signals, 'side'] = 1
Dollar_bars1.loc[short_signals, 'side'] = -1
# Remove Look ahead bias by lagging the signal
Dollar_bars1['side'] = Dollar_bars1['side'].shift(1)
# drop the NaN values from our data set
Dollar_bars1.dropna(axis=0, how='any', inplace=True)
print(Dollar_bars1.side.value_counts())
# Save the raw data
raw_data = Dollar_bars.copy()
raw_data
rsi_windows = [ 55]
# momentum
for w in rsi_windows:
raw_data[f'mom_{w}'] = raw_data['fracdiff'].pct_change(w)
rsi_windows = [ 13, 34, 55, 134, 255]
# volatility
for w in rsi_windows:
raw_data[f'vol_{w}'] = (raw_data['fracdiff']
.rolling(window=w, min_periods=w, center=False)
.std())
rsi_windows = [ 13, 34, 55, 134, 255]
# volatility
for w in rsi_windows:
Dollar_bars1[f'vol_{w}'] = (Dollar_bars1['fracdiff']
.rolling(window=w, min_periods=w, center=False)
.std())
# ROC Rate of Change
N = raw_data['close'].diff(10)
D = raw_data['close'].shift(10)
raw_data['ROC'] = N/D
# CCI Commodity Channel Index
TP = (raw_data['high'] + raw_data['low'] + raw_data['close']) / 3
raw_data['CCI'] = (TP - TP.rolling(20).mean()) / (0.015 * TP.rolling(20).std() )
# Create Average True Range
raw_data['TR'] = raw_data['high'] - raw_data['low']
raw_data['ATR'] = raw_data['TR'].ewm(span = 10).mean()
raw_data['100ema'] = raw_data['close'].ewm(span=100).mean()
raw_data['250ema'] = raw_data['close'].ewm(span=250).mean()
raw_data['70ema'] = raw_data['close'].ewm(span=70).mean()
raw_data['26ema'] = raw_data['close'].ewm(span=26).mean()
raw_data['12ema'] = raw_data['close'].ewm(span=12).mean()
#raw_data['MACD'] = (raw_data['12ema']-raw_data['26ema'])
rsi_windows = [ 250,512]
# Excess kurtosis
for w in rsi_windows:
raw_data[f'kurt_{w}'] = (raw_data['fracdiff']
.rolling(window=w, min_periods=w, center=False)
.kurt())
# autocorr
window_autocorr = 50
raw_data['autocorr_1'] = (raw_data['fracdiff']
.rolling(window=window_autocorr,
min_periods=window_autocorr, center=False)
.apply(lambda x: x.autocorr(lag=1), raw=False))
raw_data['autocorr_2'] = (raw_data['fracdiff']
.rolling(window=window_autocorr,
min_periods=window_autocorr, center=False)
.apply(lambda x: x.autocorr(lag=3), raw=False))
raw_data['autocorr_3'] = (raw_data['fracdiff']
.rolling(window=window_autocorr,
min_periods=window_autocorr, center=False)
.apply(lambda x: x.autocorr(lag=5), raw=False))
# Autocorrelation
window_autocorr = 20
#raw_data['autocorr_20_4'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr, center=False).apply(lambda x: x.autocorr(lag=4), raw=False)
#raw_data['autocorr_20_5'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr, center=False).apply(lambda x: x.autocorr(lag=5), raw=False)
#raw_data['autocorr_20_1'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr, center=False).apply(lambda x: x.autocorr(lag=1), raw=False)
#raw_data['autocorr_20_2'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr, center=False).apply(lambda x: x.autocorr(lag=2), raw=False)
raw_data['autocorr_20_3'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr, center=False).apply(lambda x: x.autocorr(lag=3), raw=False)
# Log-return momentum
#raw_data['log_t1'] = raw_data['fracdiff'].shift(1)
#raw_data['log_t2'] = raw_data['fracdiff'].shift(2)
#raw_data['log_t3'] = raw_data['fracdiff'].shift(3)
raw_data['log_t4'] = raw_data['fracdiff'].shift(4)
#raw_data['log_t5'] = raw_data['fracdiff'].shift(5)
#raw_data['log_t6'] = raw_data['fracdiff'].shift(5)
raw_data['log_t10'] = raw_data['fracdiff'].shift(10)
Dollar_bars1['log_t4'] = Dollar_bars1['fracdiff'].shift(4)
#raw_data['log_t5'] = raw_data['fracdiff'].shift(5)
#raw_data['log_t6'] = raw_data['fracdiff'].shift(5)
Dollar_bars1['log_t10'] = Dollar_bars1['fracdiff'].shift(10)
# Drop the NaN values from our data set
raw_data.replace([np.inf, -np.inf], np.nan, inplace=True)
raw_data.replace(0, np.nan, inplace=True)
raw_data.dropna(axis=0, how='any', inplace=True)
raw_data
# Drop the NaN values from our data set
Dollar_bars1.replace([np.inf, -np.inf], np.nan, inplace=True)
Dollar_bars1.replace(0, np.nan, inplace=True)
Dollar_bars1.dropna(axis=0, how='any', inplace=True)
Dollar_bars1
close = raw_data['close']
# creating our event triggers using the CUSUM filter
# determining daily volatility using the last 50 days
daily_vol = get_daily_vol(close=close, lookback=50)
cusum_events = get_t_events(close, threshold=daily_vol.mean())
# adding vertical barriers with a half day expiration window
vertical_barriers = add_vertical_barrier(t_events=cusum_events,
close=close, num_days=0.9)
# determining timestamps of first touch
pt_sl = [9, 9] # setting profit-take and stop-loss at 1% and 2%
min_ret = 0.0005 # setting a minimum return of 5%
triple_barrier_events = get_events(close=close,
t_events=cusum_events,
pt_sl=pt_sl,
target=daily_vol,
min_ret=min_ret,
num_threads=2,
vertical_barrier_times=vertical_barriers,
side=Dollar_bars['side']
)
labels = get_bins(triple_barrier_events, Dollar_bars['close'])
labels.side.value_counts()
close = Dollar_bars1['close']
# creating our event triggers using the CUSUM filter
# determining daily volatility using the last 50 days
daily_vol = get_daily_vol(close=close, lookback=50)
cusum_events = get_t_events(close, threshold=daily_vol.mean())
# adding vertical barriers with a half day expiration window
vertical_barriers = add_vertical_barrier(t_events=cusum_events,
close=close, num_days=0.9)
# determining timestamps of first touch
pt_sl = [9, 9] # setting profit-take and stop-loss at 1% and 2%
min_ret = 0.0005 # setting a minimum return of 5%
triple_barrier_events1 = get_events(close=close,
t_events=cusum_events,
pt_sl=pt_sl,
target=daily_vol,
min_ret=min_ret,
num_threads=2,
vertical_barrier_times=vertical_barriers,
side=Dollar_bars1['side']
)
labels1 = get_bins(triple_barrier_events1, Dollar_bars1['close'])
labels1.side.value_counts()
labels1
# Remove Look ahead bias by lagging the signal
raw_data['side'] = raw_data['side'].shift(1)
# Remove Look ahead bias by lagging the signal
Dollar_bars1['side'] = Dollar_bars1['side'].shift(1)
# Get features at event dates
X = raw_data.loc[labels.index, :]
# Drop unwanted columns
X.drop([#'fracdiff',
'open', 'high', 'low', 'close','side','slow_mavg','fast_mavg'
#,'avg','lower','upper'
], axis=1, inplace=True)
y = labels['bin']
X
# Get features at event dates
X = raw_data.loc[labels.index, :]
# Drop unwanted columns
X.drop([#'fracdiff',
'open', 'high', 'low', 'close','side','slow_mavg','fast_mavg','log_ret','Google_trend1','fracdiff'
#,'avg','lower','upper'
], axis=1, inplace=True)
y = labels['bin']
X
# Get features at event dates
X_test = Dollar_bars1.loc[labels1.index, :]
# Drop unwanted columns
X_test.drop([#'fracdiff',
'open', 'high', 'low', 'close','side','slow_mavg','fast_mavg','log_ret','fracdiff','timestamp'
#,'avg','lower','upper'
], axis=1, inplace=True)
y_test = labels1['bin']
X_test
primary_forecast = pd.DataFrame(labels['bin'])
primary_forecast['pred'] = 1
primary_forecast.columns = ['actual', 'pred']
# Performance Metrics
actual = primary_forecast['actual']
pred = primary_forecast['pred']
print(classification_report(y_true=actual, y_pred=pred))
print("Confusion Matrix")
print(confusion_matrix(actual, pred))
print('')
print("Accuracy")
print(accuracy_score(actual, pred))
```
#Train/Test single path
```
y_test = labels1['bin']
n_estimator = 1000
depth = 2
rf = RandomForestClassifier(max_depth=depth, n_estimators=n_estimator,
criterion='entropy', class_weight='balanced_subsample',
random_state=0)
rf.fit(X, y)
# Performance Metrics
y_pred_rf = rf.predict_proba(X)[:, 1]
y_pred = rf.predict(X)
fpr_rf, tpr_rf, _ = roc_curve(y, y_pred_rf)
print(classification_report(y, y_pred))
print("Confusion Matrix")
print(confusion_matrix(y, y_pred))
print('')
print("Accuracy")
print(accuracy_score(y, y_pred))
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
# Feature Importance
title = 'Feature Importance:'
figsize = (15, 5)
feat_imp = pd.DataFrame({'Importance':rf.feature_importances_})
feat_imp['feature'] = X.columns
feat_imp.sort_values(by='Importance', ascending=False, inplace=True)
feat_imp = feat_imp
feat_imp.sort_values(by='Importance', inplace=True)
feat_imp = feat_imp.set_index('feature', drop=True)
feat_imp.plot.barh(title=title, figsize=figsize)
plt.xlabel('Feature Importance Score')
plt.show()
# Performance Metrics
y_pred_rf = rf.predict_proba(X_test)[:, 1]
y_pred = rf.predict(X_test)
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
print(classification_report(y_test, y_pred))
print("Confusion Matrix")
print(confusion_matrix(y_test, y_pred))
print('')
print("Accuracy")
print(accuracy_score(y_test, y_pred))
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
```
#COMBINATORIAL PURGED CV
```
numCoEvents = MultiProcessingFunctions.mp_pandas_obj(func=mpNumCoEvents,
pd_obj= ('molecule',triple_barrier_events.index),
num_threads=mp.cpu_count(),
closeIdx=close.index,
t1=triple_barrier_events['t1']
)
numCoEvents=numCoEvents.loc[~numCoEvents.index.duplicated(keep='last')]
numCoEvents=numCoEvents.reindex(close.index).fillna(0)
numCoEvents.value_counts()
#SAMPLE WEIGHT BY ABSOLUTE RETURN ATTRIBUTION
labels['w'] = MultiProcessingFunctions.mp_pandas_obj(func = mpSampleW,
pd_obj = ('molecule',triple_barrier_events.index),
num_threads=mp.cpu_count(),
t1=triple_barrier_events['t1'],
numCoEvents=numCoEvents,
close=Dollar_bars['close'])
labels['w'] *= labels.shape[0]/labels['w'].sum()
#CALLING SNIPPET 4.2 AVERAGE UNIQUENESS OF EACH LABEL
labels['tW'] = MultiProcessingFunctions.mp_pandas_obj(func= mpSampleTW,
pd_obj = ('molecule',triple_barrier_events.index),
num_threads = mp.cpu_count(),
t1 = triple_barrier_events['t1'],
numCoEvents = numCoEvents)
avgU = labels['tW'].mean()
avgU
t1 = triple_barrier_events['t1'].loc[X.index]
idx = triple_barrier_events.loc[X.index].index
n_estimator = 1000
depth = 2
#skf = CombPurgedKFoldCV(n_splits=10,t1=t1,pctEmbargo=0.01)
skf = CombPurgedKFoldCV(n_splits=10, n_test_splits= 2 ,embargo_td=pd.Timedelta(minutes=100))
classifier = RandomForestClassifier(n_estimators=n_estimator, max_depth=depth,
criterion='entropy',
class_weight='balanced_subsample',
random_state=0)
eval_times = pd.Series(t1)
eval_times.index = X.index
pred_times = pd.Series(index=X.index, data=X.index)
n_estimator = 1000
depth = 2
t1 = triple_barrier_events['t1'].loc[X.index]
idx = triple_barrier_events.loc[X.index].index
#skf = CombPurgedKFoldCV(n_splits=10,t1=t1,pctEmbargo=0.01)
skf = CombPurgedKFoldCV(n_splits=10, n_test_splits= 2 ,embargo_td=pd.Timedelta(minutes=100))
classifier = RandomForestClassifier(n_estimators=n_estimator, max_depth=depth,
criterion='entropy',
class_weight='balanced_subsample',
random_state=0)
classifier=DecisionTreeClassifier(criterion='entropy',max_features='auto',
class_weight='balanced',min_weight_fraction_leaf=0.)
classifier=BaggingClassifier(base_estimator=classifier,n_estimators=1000,max_features=1.,
max_samples=avgU,oob_score=True,n_jobs=1)
train_dates = X.index
test_dates = X_test.index
eVec = pd.read_csv ('eVec.csv')
eVec.index = eVec['Unnamed: 0']
eVec.drop(columns=['Unnamed: 0'], inplace = True)
eVec.index.name = None
ortho_X_train = np.dot(X,eVec)
ortho_X_test = np.dot(X_test,eVec)
labels.loc[train_dates, 'w']
classifier_ = classifier.fit(ortho_X_train,y,labels.loc[train_dates, 'w'])
results.to_csv('results.csv')
!cp results.csv "gdrive/My Drive/TFM/results.csv"
```
#THRESHOLDING OUTPUTS
```
array_ = classifier_.predict(ortho_X_test)
results = labels1.loc[test_dates]
results['predit_proba'] = array_
bet_sizing = results[results['predit_proba'] ==1]
bet_sizing
classifier.fit(ortho_X_train, y)
results = labels1.loc[X_test.index]
z = classifier.predict_proba(ortho_X_test)
results['predict_proba'] = np.argmax(z, axis=1)
bet_sizing = results[results['predict_proba'] ==1]
bet_sizing = bet_sizing[bet_sizing['side'] == 1.0]
timings = triple_barrier_events1['t1'].loc[X_test.index]
active_bet = 0
final_bets = pd.DataFrame()
for index,row in results.iterrows():
if (active_bet == 1 and index > end):
active_bet = 0
if (active_bet == 0):
final_bets = final_bets.append(row)
end = timings.loc[index]
active_bet = 1
final_bets['end'] = timings.loc[final_bets.index]
final_bets
#LONGS ONLY
final_bets = final_bets[final_bets['side'] == 1.0]
len(final_bets)
len(final_bets)
#classifier_.predict(ortho_X_test)
z = classifier.predict_proba(ortho_X_test)
labels_ = np.argmax(z, axis=1)
label_prob = []
for element in z:
if element[0]> element[1]:
label_prob.append(element[0])
elif element[1]>element[0]:
label_prob.append(element[1])
predictions = pd.DataFrame(index=X_test.index)
predictions['label_prob'] = label_prob
predictions['label'] = labels_
final_bets_index = predictions.loc[final_bets.index]
final_bets_index
#classifier_.predict(ortho_X_test)
z = classifier.predict_proba(ortho_X_test)
labels_ = np.argmax(z, axis=1)
#labels_prob
label_prob = []
for element in z:
if element[0]> element[1]:
label_prob.append(element[0])
elif element[1]>element[0]:
label_prob.append(element[1])
predictions = pd.DataFrame(index=X_test.index)
predictions['label_prob'] = label_prob
predictions['label'] = labels_
final_bets_index = predictions.loc[final_bets.index]
final_bets_index = final_bets_index[final_bets_index['label_prob'] > 0.7]
final_bets.loc[final_bets_index.index]
report = pd.DataFrame(data=final_bets.loc[final_bets_index.index]['ret']*100)
report['Equity'] = report.cumsum() + 100
report
```
#BACKTEST STATISTICS
```
%%capture
!pip install ffn
import ffn
perm = ffn.core.PerformanceStats(report['Equity'], rf = 0.33)
perm.display()
perm.stats
%matplotlib inline
perm.plot(title='Equity curve',figsize=(15,7),grid=True)
ffn.to_drawdown_series(report['Equity']).plot(title='Drawdown periods',figsize=(15,7),grid=True)
eVec.to_csv('eVec.csv')
!cp eVec.csv "gdrive/My Drive/TFM/eVec.csv"
pieces = []
for train_indices, test_indices in skf.split(X,y,pred_times,eval_times):
pieces.append(test_indices)
from scipy.special import comb
N = 10
k = 2
model_splits = int(comb(10,2))
split_map = np.zeros([N, model_splits])
col = 0
for base in range(N):
for other in range(base +1,N):
split_map[base, col]=1
split_map[other, col]=1
col += 1
for row in range(N):
for i in range(1,model_splits):
val = split_map[row,i]
prev_val = np.max(split_map[row, :i])
if val == 0:
continue
elif val == 1:
split_map[row, i] = prev_val +1
import plotly.express as px
fig, ax1 = plt.subplots(1,1)
im = ax1.imshow(split_map)
fig.set_size_inches(18.5, 10.5)
ax1.set_xlabel('Paths', fontsize=20)
ax1.set_ylabel('Splits', fontsize=20)
plt.savefig('Combinatorial_cv_purged_paths.png')
from google.colab import files
files.download("Combinatorial_cv_purged_paths.png")
eVec = pd.read_csv ('eVec.csv')
eVec.index = eVec['Unnamed: 0']
eVec.drop(columns=['Unnamed: 0'], inplace = True)
eVec.index.name = None
orthoFeats_ = np.dot(X,eVec)
orthoFeats_ = pd.DataFrame(orthoFeats_)
```
#BACKTEST
```
import joblib
joblib.dump(rf, 'RandomForest1.pkl', compress=9)
```
| github_jupyter |
<img src="../../../images/qiskit_header.png" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" align="middle">
## _*Relaxation and Decoherence*_
* **Last Updated:** Feb 25, 2019
* **Requires:** qiskit-terra 0.8, qiskit-ignis 0.1.1, qiskit-aer 0.2
This notebook gives examples for how to use the ``ignis.characterization.coherence`` module for measuring $T_1$ and $T_2$.
```
import numpy as np
import matplotlib.pyplot as plt
import qiskit
from qiskit.providers.aer.noise.errors.standard_errors import thermal_relaxation_error
from qiskit.providers.aer.noise import NoiseModel
from qiskit.ignis.characterization.coherence import T1Fitter, T2StarFitter, T2Fitter
from qiskit.ignis.characterization.coherence import t1_circuits, t2_circuits, t2star_circuits
```
# Generation of coherence circuits
This shows how to generate the circuits. The list of qubits specifies for which qubits to generate characterization circuits; these circuits will run in parallel. The discrete unit of time is the identity gate (``iden``) and so the user must specify the time of each identity gate if they would like the characterization parameters returned in units of time. This should be available from the backend.
```
num_of_gates = (np.linspace(10, 300, 50)).astype(int)
gate_time = 0.1
# Note that it is possible to measure several qubits in parallel
qubits = [0, 2]
t1_circs, t1_xdata = t1_circuits(num_of_gates, gate_time, qubits)
t2star_circs, t2star_xdata, osc_freq = t2star_circuits(num_of_gates, gate_time, qubits, nosc=5)
t2echo_circs, t2echo_xdata = t2_circuits(np.floor(num_of_gates/2).astype(int),
gate_time, qubits)
t2cpmg_circs, t2cpmg_xdata = t2_circuits(np.floor(num_of_gates/6).astype(int),
gate_time, qubits,
n_echos=5, phase_alt_echo=True)
```
# Backend execution
```
backend = qiskit.Aer.get_backend('qasm_simulator')
shots = 400
# Let the simulator simulate the following times for qubits 0 and 2:
t_q0 = 25.0
t_q2 = 15.0
# Define T1 and T2 noise:
t1_noise_model = NoiseModel()
t1_noise_model.add_quantum_error(
thermal_relaxation_error(t_q0, 2*t_q0, gate_time),
'id', [0])
t1_noise_model.add_quantum_error(
thermal_relaxation_error(t_q2, 2*t_q2, gate_time),
'id', [2])
t2_noise_model = NoiseModel()
t2_noise_model.add_quantum_error(
thermal_relaxation_error(np.inf, t_q0, gate_time, 0.5),
'id', [0])
t2_noise_model.add_quantum_error(
thermal_relaxation_error(np.inf, t_q2, gate_time, 0.5),
'id', [2])
# Run the simulator
t1_backend_result = qiskit.execute(t1_circs, backend, shots=shots,
noise_model=t1_noise_model, optimization_level=0).result()
t2star_backend_result = qiskit.execute(t2star_circs, backend, shots=shots,
noise_model=t2_noise_model, optimization_level=0).result()
t2echo_backend_result = qiskit.execute(t2echo_circs, backend, shots=shots,
noise_model=t2_noise_model, optimization_level=0).result()
# It is possible to split the circuits into multiple jobs and then give the results to the fitter as a list:
t2cpmg_backend_result1 = qiskit.execute(t2cpmg_circs[0:5], backend,
shots=shots, noise_model=t2_noise_model,
optimization_level=0).result()
t2cpmg_backend_result2 = qiskit.execute(t2cpmg_circs[5:], backend,
shots=shots, noise_model=t2_noise_model,
optimization_level=0).result()
```
# Analysis of results
```
# Fitting T1
%matplotlib inline
plt.figure(figsize=(15, 6))
t1_fit = T1Fitter(t1_backend_result, t1_xdata, qubits,
fit_p0=[1, t_q0, 0],
fit_bounds=([0, 0, -1], [2, 40, 1]))
print(t1_fit.time())
print(t1_fit.time_err())
print(t1_fit.params)
print(t1_fit.params_err)
for i in range(2):
ax = plt.subplot(1, 2, i+1)
t1_fit.plot(i, ax=ax)
plt.show()
```
Execute the backend again to get more statistics, and add the results to the previous ones:
```
t1_backend_result_new = qiskit.execute(t1_circs, backend,
shots=shots, noise_model=t1_noise_model,
optimization_level=0).result()
t1_fit.add_data(t1_backend_result_new)
plt.figure(figsize=(15, 6))
for i in range(2):
ax = plt.subplot(1, 2, i+1)
t1_fit.plot(i, ax=ax)
plt.show()
# Fitting T2*
%matplotlib inline
t2star_fit = T2StarFitter(t2star_backend_result, t2star_xdata, qubits,
fit_p0=[0.5, t_q0, osc_freq, 0, 0.5],
fit_bounds=([-0.5, 0, 0, -np.pi, -0.5],
[1.5, 40, 2*osc_freq, np.pi, 1.5]))
plt.figure(figsize=(15, 6))
for i in range(2):
ax = plt.subplot(1, 2, i+1)
t2star_fit.plot(i, ax=ax)
plt.show()
# Fitting T2 single echo
%matplotlib inline
t2echo_fit = T2Fitter(t2echo_backend_result, t2echo_xdata, qubits,
fit_p0=[0.5, t_q0, 0.5],
fit_bounds=([-0.5, 0, -0.5],
[1.5, 40, 1.5]))
print(t2echo_fit.params)
plt.figure(figsize=(15, 6))
for i in range(2):
ax = plt.subplot(1, 2, i+1)
t2echo_fit.plot(i, ax=ax)
plt.show()
# Fitting T2 CPMG
%matplotlib inline
t2cpmg_fit = T2Fitter([t2cpmg_backend_result1, t2cpmg_backend_result2],
t2cpmg_xdata, qubits,
fit_p0=[0.5, t_q0, 0.5],
fit_bounds=([-0.5, 0, -0.5],
[1.5, 40, 1.5]))
plt.figure(figsize=(15, 6))
for i in range(2):
ax = plt.subplot(1, 2, i+1)
t2cpmg_fit.plot(i, ax=ax)
plt.show()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
```
| github_jupyter |
## Universal Style Transfer
The models above are trained to work for a single style. Using these methods, in order to create a new style transfer model, you have to train the model with a wide variety of content images.
Recent work by Yijun Li et al. shows that it is possible to create a model that generalizes to unseen style images, while maintaining the quality of output images.
Their method works by treating style transfer as an image reconstruction task. They use the output of a VGG19 ReLU layer to encode features of various content images and traing a decoder to reconstruct these images. Then, with these two networks fixed, they feed the content and the style image into the encoder and use a whitening and coloring transform so that the covarience matrix of the features matches the covarience matrix of the style.
This process can then be expanded to the remaining ReLU layers of VGG19 to create a style transfer pipeline that can apply to all spatial scales.
Since only content images were used to train the encoder and decoder, additional training is not needed when generalizing this to new styles.
<img src="images/universal-style-transfer.png" style="width: 600px;"/>
(Yijun Li et al., Universal Style Transfer)
<img src="images/doge_the_scream.jpg" style="width: 300px;"/>
<img src="images/doge_mosaic.jpg" style="width: 300px;"/>
The results are pretty impressive, but there are some patches of blurriness, most likely as a result of the transforms.
### Whitening Transform
The whitening transform removes the style from the content image, keeping the global content structure.
The features of the content image, $f_c$, are transformed to obtain $\hat{f}_c$, such that the feature maps
are uncorrelated ($\hat{f}_c \hat{f}_c^T = I$),
$$
\hat{f}_c = E_c D_c^{- \frac{1}{2}} E_c^T f_c
$$
where $D_c$ is a diagonal matrix with the eigenvalues of the covariance matrix $f_c f_c^T \in R^{C \times C}$,
and $E_c$ is the corresponding orthogonal matrix of eigenvectors, satisfying $f_c f_c^T = E_c D_c E_c^T$.
<img src="images/whitening.png" style="width: 300px;"/>
(Yijun Li et al., Universal Style Transfer)
### Coloring Transform
The coloring transform adds the style from the style image onto the content image.
The whitening transformed features of the content image, $\hat{f}_c$, are transformed to obtain $\hat{f}_{cs}$, such that the feature maps have that desired correlations ($\hat{f}_{cs} \hat{f}_{cs}^T = f_s f_s^T$),
$$
\hat{f}_{cs} = E_s D_s^{\frac{1}{2}} E_s^T \hat{f}_c
$$
where $D_s$ is a diagonal matrix with the eigenvalues of the covariance matrix $f_s f_s^T \in R^{C \times C}$,
and $E_s$ is the corresponding orthogonal matrix of eigenvectors, satisfying $f_c f_c^T = E_c D_c E_c^T$.
In practice, we also take a weighted sum of the colored and original activations such that:
$$ f_{blend} = \alpha\hat{f}_{cs} + (1-\alpha)\hat{f}_c $$
Before each transform step, the mean of the corresponding feature maps are subtracted, and the mean of the style features are added back to the final transformed features.
```
# workaround for multiple OpenMP on Mac
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import tensorflow as tf
from pathlib import PurePath
import IPython.display as display
from IPython.display import HTML
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (12,12)
mpl.rcParams['axes.grid'] = False
%matplotlib inline
import numpy as np
import PIL.Image
import time
import functools
print('here')
def tensor_to_image(tensor):
tensor = tensor*255
tensor = np.array(tensor, dtype=np.uint8)
if np.ndim(tensor)>3:
assert tensor.shape[0] == 1
tensor = tensor[0]
return PIL.Image.fromarray(tensor)
def load_img(path_to_img):
max_dim = 512
img = tf.io.read_file(path_to_img)
img = tf.image.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
shape = tf.cast(tf.shape(img)[:-1], tf.float32)
long_dim = max(shape)
scale = max_dim / long_dim
new_shape = tf.cast(shape * scale, tf.int32)
img = tf.image.resize(img, new_shape)
img = img[tf.newaxis, :]
return img
def imshow(image, title=None):
if len(image.shape) > 3:
image = tf.squeeze(image, axis=0)
plt.imshow(image)
if title==None:
title = str(image.shape)
else:
title += ' '+str(image.shape)
plt.title(title)
```
# Using a pre-trained AutoEncoder
For this assignment, i will be using an auto encoder created with Yihao Wang, a PhD student in the UbiComp lab here at SMU. The original code used to created this encoder is available for SMU students.
The model that was trained can be downloaded from:
https://www.dropbox.com/sh/2djb2c0ohxtvy2t/AAAxA2dnoFBcHGqfP0zLx-Oua?dl=0
```
ModelBlock2 = tf.keras.models.load_model('decoder_2.h5', compile = False)
ModelBlock2.summary()
class VGG19AutoEncoder(tf.keras.Model):
def __init__(self, files_path):
super(VGG19AutoEncoder, self).__init__()
#Load Full Model with every trained decoder
#Get Each SubModel
# Each model has an encoder, a decoder, and an extra output convolution
# that converts the upsampled activations into output images
# DO NOT load models four and five because they are not great auto encoders
# and therefore will cause weird artifacts when used for style transfer
ModelBlock3 = tf.keras.models.load_model(str(PurePath(files_path, 'Block3_Model')), compile = False)
self.E3 = ModelBlock3.layers[0] # VGG encoder
self.D3 = ModelBlock3.layers[1] # Trained decoder from VGG
self.O3 = ModelBlock3.layers[2] # Conv layer to get to three channels, RGB image
ModelBlock2 = tf.keras.models.load_model('decoder_2.h5', compile = False)
self.E2 = ModelBlock2.layers[0] # VGG encoder
self.D2 = ModelBlock2.layers[1] # Trained decoder from VGG
self.O2 = ModelBlock2.layers[2] # Conv layer to get to three channels, RGB image
# no special decoder for this one becasue VGG first layer has
# no downsampling. So the decoder is just a convolution
ModelBlock1 = tf.keras.models.load_model(str(PurePath(files_path, 'Block1_Model')), compile = False)
self.E1 = ModelBlock1.layers[0] # VGG encoder, one layer
self.O1 = ModelBlock1.layers[1] # Conv layer to get to three channels, RGB image
def call(self, image, alphas=None, training = False):
# Input should be dictionary with 'style' and 'content' keys
# {'style':style_image, 'content':content_image}
# value in each should be a 4D Tensor,: (batch, i,j, channel)
style_image = image['style']
content_image = image['content']
output_dict = dict()
# this will be the output, where each value is a styled
# version of the image at layer 1, 2, and 3. So each key in the
# dictionary corresponds to layer1, layer2, and layer3.
# we also give back the reconstructed image from the auto encoder
# so each value in the dict is a tuple (styled, reconstructed)
x = content_image
# choose covariance function
# covariance is more stable, but signal will work for very small images
wct = self.wct_from_cov
if alphas==None:
alphas = {'layer3':0.6,
'layer2':0.6,
'layer1':0.6}
# ------Layer 3----------
# apply whiten/color on layer 3 from the original image
# get activations
a_c = self.E3(tf.constant(x))
a_s = self.E3(tf.constant(style_image))
# swap grammian of activations, blended with original
x = wct(a_c.numpy(), a_s.numpy(), alpha=alphas['layer3'])
# decode the new style
x = self.O3(self.D3(x))
x = self.enhance_contrast(x)
# get reconstruction
reconst3 = self.O3(self.D3(self.E3(tf.constant(content_image))))
# save off the styled and reconstructed images for display
blended3 = tf.clip_by_value(tf.squeeze(x), 0, 1)
reconst3 = tf.clip_by_value(tf.squeeze(reconst3), 0, 1)
output_dict['layer3'] = (blended3, reconst3)
# ------Layer 2----------
# apply whiten/color on layer 2 from the already blended image
# get activations
a_c = self.E2(tf.constant(x))
a_s = self.E2(tf.constant(style_image))
# swap grammian of activations, blended with original
x = wct(a_c.numpy(),a_s.numpy(), alpha=alphas['layer2'])
# decode the new style
x = self.O2(self.D2(x))
x = self.enhance_contrast(x,1.3)
# get reconstruction
reconst2 = self.O2(self.D2(self.E2(tf.constant(content_image))))
# save off the styled and reconstructed images for display
blended2 = tf.clip_by_value(tf.squeeze(x), 0, 1)
reconst2 = tf.clip_by_value(tf.squeeze(reconst2), 0, 1)
output_dict['layer2'] = (blended2, reconst2)
# ------Layer 1----------
# apply whiten/color on layer 1 from the already blended image
# get activations
a_c = self.E1(tf.constant(x))
a_s = self.E1(tf.constant(style_image))
# swap grammian of activations, blended with original
x = wct(a_c.numpy(),a_s.numpy(), alpha=alphas['layer1'])
# decode the new style
x = self.O1(x)
x = self.enhance_contrast(x,1.2)
# get reconstruction
reconst1 = self.O1(self.E1(tf.constant(content_image)))
# save off the styled and reconstructed images for display
blended1 = tf.clip_by_value(tf.squeeze(x), 0, 1)
reconst1 = tf.clip_by_value(tf.squeeze(reconst1), 0, 1)
output_dict['layer1'] = (blended1, reconst1)
return output_dict
@staticmethod
def enhance_contrast(image, factor=1.25):
return tf.image.adjust_contrast(image,factor)
@staticmethod
def wct_from_cov(content, style, alpha=0.6, eps=1e-5):
'''
https://github.com/eridgd/WCT-TF/blob/master/ops.py
Perform Whiten-Color Transform on feature maps using numpy
See p.4 of the Universal Style Transfer paper for equations:
https://arxiv.org/pdf/1705.08086.pdf
'''
# 1xHxWxC -> CxHxW
content_t = np.transpose(np.squeeze(content), (2, 0, 1))
style_t = np.transpose(np.squeeze(style), (2, 0, 1))
# CxHxW -> CxH*W
content_flat = content_t.reshape(-1, content_t.shape[1]*content_t.shape[2])
style_flat = style_t.reshape(-1, style_t.shape[1]*style_t.shape[2])
# applt a threshold for only the largets eigen values
eigen_val_thresh = 1e-5
# ===Whitening transform===
# 1. take mean of each channel
mc = content_flat.mean(axis=1, keepdims=True)
fc = content_flat - mc
# 2. get covariance of content, take SVD
cov_c = np.dot(fc, fc.T) / (content_t.shape[1]*content_t.shape[2] - 1)
Uc, Sc, _ = np.linalg.svd(cov_c)
# 3. truncate the SVD to only the largest eigen values
k_c = (Sc > eigen_val_thresh).sum()
Dc = np.diag((Sc[:k_c]+eps)**-0.5)
Uc = Uc[:,:k_c]
# 4. Now make a whitened content image
fc_white = (Uc @ Dc @ Uc.T) @ fc
# ===Coloring transform===
# 1. take mean of each channel
ms = style_flat.mean(axis=1, keepdims=True)
fs = style_flat - ms
# 2. get covariance of style, take SVD
cov_s = np.dot(fs, fs.T) / (style_t.shape[1]*style_t.shape[2] - 1)
Us, Ss, _ = np.linalg.svd(cov_s)
# 3. truncate the SVD to only the largest eigen values
k_s = (Ss > eigen_val_thresh).sum()
Ds = np.sqrt(np.diag(Ss[:k_s]+eps))
Us = Us[:,:k_s]
# 4. Now make a colored image that mixes the Grammian of the style
# with the whitened content image
fcs_hat = (Us @ Ds @ Us.T) @ fc_white
fcs_hat = fcs_hat + ms # add style mean back to each channel
# Blend transform features with original features
blended = alpha*fcs_hat + (1 - alpha)*(content_flat)
# CxH*W -> CxHxW
blended = blended.reshape(content_t.shape)
# CxHxW -> 1xHxWxC
blended = np.expand_dims(np.transpose(blended, (1,2,0)), 0)
return np.float32(blended)
@staticmethod
def wct_from_signal(content, style, alpha=0.6 ):
# This uses a more computational SVD decomposition to get the Grammian
# to match. However, the numerical precision makes this totally fail
# if the activations are too large.
# This code is only for reference based on our discussion of WCT
# 1xHxWxC -> CxHxW
content_t = np.transpose(np.squeeze(content), (2, 0, 1))
style_t = np.transpose(np.squeeze(style), (2, 0, 1))
# CxHxW -> Cx(H*W)
content_flat = content_t.reshape(-1, content_t.shape[1]*content_t.shape[2])
style_flat = style_t.reshape(-1, style_t.shape[1]*style_t.shape[2])
singular_val_thresh = 1e-3
#-------------------------------------------
# Whitening transform and Coloring transform
# 1. SVD of content signals
mc = content_flat.mean()
fc = content_flat - mc
Uc, Sc, Vc = np.linalg.svd(fc, full_matrices=False)
k_c = (Sc > singular_val_thresh).sum()
# 2. SVD of style signals
ms = style_flat.mean()
fs = style_flat - ms
Us, Ss, Vs = np.linalg.svd(fs, full_matrices=False)
k_s = (Ss > singular_val_thresh).sum()
k = min(k_s,k_c)
# Blend transform features with original features
fcs = (Us[:,:k] @ np.diag(Ss[:k]) @ Vc[:k,:]) + mc
blended = alpha*fcs + (1 - alpha)*(content_flat)
# CxH*W -> CxHxW
blended = blended.reshape(content_t.shape)
# CxHxW -> 1xHxWxC
blended = np.expand_dims(np.transpose(blended, (1,2,0)), 0)
return np.float32(blended)
%%time
AE = VGG19AutoEncoder('../VGGDecoderWeights/')
%%time
from skimage.transform import resize
content_path = 'images/dallas_hall.jpg'
style_path = 'images/mosaic_style.png'
content_image = load_img(content_path)
style_image = load_img(style_path)
plt.subplot(1, 2, 1)
imshow(content_image,'Content')
plt.subplot(1, 2, 2)
imshow(style_image,'Style')
tmp = {'style':style_image,
'content':content_image}
alphas = {'layer3':0.8, 'layer2':0.6, 'layer1':0.6}
decoded_images = AE(tmp, alphas=alphas)
imshow(style_image,'Style')
for layer in decoded_images.keys():
plt.figure(figsize=(10,10))
plt.subplot(1,2,1)
imshow(decoded_images[layer][0],'Styled')
plt.subplot(1,2,2)
imshow(decoded_images[layer][1],'Reconstructed')
```
| github_jupyter |
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# Solution Notebook
## Problem: Given two strings, find the longest common substring.
* [Constraints](#Constraints)
* [Test Cases](#Test-Cases)
* [Algorithm](#Algorithm)
* [Code](#Code)
* [Unit Test](#Unit-Test)
## Constraints
* Can we assume the inputs are valid?
* No
* Can we assume the strings are ASCII?
* Yes
* Is this case sensitive?
* Yes
* Is a substring a contiguous block of chars?
* Yes
* Do we expect a string as a result?
* Yes
* Can we assume this fits memory?
* Yes
## Test Cases
* str0 or str1 is None -> Exception
* str0 or str1 equals 0 -> ''
* General case
str0 = 'ABCDEFGHIJ'
str1 = 'FOOBCDBCDE'
result: 'BCDE'
## Algorithm
We'll use bottom up dynamic programming to build a table.
<pre>
The rows (i) represent str0.
The columns (j) represent str1.
str1
-------------------------------------------------
| | | A | B | C | D | E | F | G | H | I | J |
-------------------------------------------------
| | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| F | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 |
| O | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 |
s | O | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 |
t | B | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
r | C | 0 | 0 | 1 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 |
0 | D | 0 | 0 | 1 | 2 | 3 | 3 | 3 | 3 | 3 | 3 | 3 |
| B | 0 | 0 | 1 | 2 | 3 | 3 | 3 | 3 | 3 | 3 | 3 |
| C | 0 | 0 | 1 | 2 | 3 | 3 | 3 | 3 | 3 | 3 | 3 |
| D | 0 | 0 | 1 | 2 | 3 | 3 | 3 | 3 | 3 | 3 | 3 |
| E | 0 | 0 | 1 | 2 | 3 | 4 | 4 | 4 | 4 | 4 | 4 |
-------------------------------------------------
if str1[j] != str0[i]:
T[i][j] = max(
T[i][j-1],
T[i-1][j])
else:
T[i][j] = T[i-1][j-1] + 1
</pre>
Complexity:
* Time: O(m * n), where m is the length of str0 and n is the length of str1
* Space: O(m * n), where m is the length of str0 and n is the length of str1
## Code
```
class StringCompare(object):
def longest_common_substr(self, str0, str1):
if str0 is None or str1 is None:
raise TypeError('str input cannot be None')
# Add one to number of rows and cols for the dp table's
# first row of 0's and first col of 0's
num_rows = len(str0) + 1
num_cols = len(str1) + 1
T = [[None] * num_cols for _ in range(num_rows)]
for i in range(num_rows):
for j in range(num_cols):
if i == 0 or j == 0:
T[i][j] = 0
elif str0[j-1] != str1[i-1]:
T[i][j] = max(T[i][j-1],
T[i-1][j])
else:
T[i][j] = T[i-1][j-1] + 1
results = ''
i = num_rows - 1
j = num_cols - 1
# Walk backwards to determine the substring
while T[i][j]:
if T[i][j] == T[i][j-1]:
j -= 1
elif T[i][j] == T[i-1][j]:
i -= 1
elif T[i][j] == T[i-1][j-1] + 1:
results += str1[i-1]
i -= 1
j -= 1
else:
raise Exception('Error constructing table')
# Walking backwards results in a string in reverse order
return results[::-1]
```
## Unit Test
```
%%writefile test_longest_common_substr.py
import unittest
class TestLongestCommonSubstr(unittest.TestCase):
def test_longest_common_substr(self):
str_comp = StringCompare()
self.assertRaises(TypeError, str_comp.longest_common_substr, None, None)
self.assertEqual(str_comp.longest_common_substr('', ''), '')
str0 = 'ABCDEFGHIJ'
str1 = 'FOOBCDBCDE'
expected = 'BCDE'
self.assertEqual(str_comp.longest_common_substr(str0, str1), expected)
print('Success: test_longest_common_substr')
def main():
test = TestLongestCommonSubstr()
test.test_longest_common_substr()
if __name__ == '__main__':
main()
%run -i test_longest_common_substr.py
```
| github_jupyter |
```
!git clone https://github.com/muhwagua/color-bert.git
!pip install transformers
import random
import re
import urllib.request
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DataCollatorForLanguageModeling
)
from argparse import Namespace
txt_url = "https://raw.githubusercontent.com/muhwagua/color-bert/main/data/all.txt"
urllib.request.urlretrieve(txt_url, 'train.txt')
args = Namespace()
args.train = "train.txt"
args.max_len = 128
args.model_name = "bert-base-uncased"
args.batch_size = 4
args.color_ratio = 0.5
tokenizer = BertTokenizer.from_pretrained(args.model_name)
class MaskedLMDataset(Dataset):
def __init__(self, file, color_ratio, tokenizer, masking):
self.tokenizer = tokenizer
self.color_ratio = color_ratio
self.masking = masking
self.lines = self.load_lines(file)
self.masked = self.all_mask(self.lines, self.color_ratio)
self.ids = self.encode_lines(self.lines, self.masked, masking)
def load_lines(self, file):
with open(file) as f:
lines = [
line
for line in f.read().splitlines()
if (len(line) > 0 and not line.isspace())
]
return lines
def color_mask(self, line, masking=True):
colors = [
"red",
"orange",
"yellow",
"green",
"blue",
"purple",
"brown",
"white",
"black",
"pink",
"lime",
"gray",
"violet",
"cyan",
"magenta",
"khaki",
]
for color in colors:
match = re.search(f"(\s|^){color}(\s|[.!?\\-])", line)
if match:
global start, end
(start, end) = random.choice([match.span()])
return line[: start + 1] + "[MASK]" + line[end - 1 :]
def random_mask(self, line, masking=True):
words = line.split()
mask_idx = random.choice(range(len(words)))
words[mask_idx] = "[MASK]"
return " ".join(words)
def all_mask(self, lines, color_ratio, masking=True):
masked = []
for line in lines:
coin = random.random()
if coin > color_ratio:
masked.append(self.random_mask(line))
else:
masked.append(self.color_mask(line))
return masked
def encode_lines(self, lines, masked, masking):
if masking == True:
batch_encoding = self.tokenizer(
masked, add_special_tokens=True, truncation=True, padding=True,
max_length=args.max_len
)
return batch_encoding["input_ids"]
elif masking == False:
batch_encoding = self.tokenizer(
lines, add_special_tokens=True, truncation=True, padding=True,
max_length=args.max_len
)
return batch_encoding["input_ids"]
def __len__(self):
return len(self.lines)
def __getitem(self, idx):
return torch.tensor(self.ids[idx], dtype=torch.long)
train_dataset = MaskedLMDataset(args.train, args.color_ratio, tokenizer, masking=True)
label_dataset = MaskedLMDataset(args.train, args.color_ratio, tokenizer, masking=False)
train_dataset.lines[:10]
train_dataset.masked[:10]
train_dataset.lines[-10:]
train_dataset.masked[-10:]
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size)
label_loader = DataLoader(
label_dataset,
batch_size=args.batch_size)
```
| github_jupyter |
# Installing Cantera
For this notebook you will need [Cantera](http://www.cantera.org/), an open source suite of object-oriented software tools for problems involving chemical kinetics, thermodynamics, and/or transport processes.
Fortunately a helpful chap named Bryan Weber has made Anaconda packages, so to install you can simply type
```
conda install -c bryanwweber cantera
```
at your terminal (if you can remember back to when you installed Anaconda!).
If you are on Windows you will probably also need to install the Visual C++ Redistributable which you can get [from Microsoft here](https://www.microsoft.com/en-us/download/details.aspx?id=48145).
There are other, more difficult, ways to install it in [the instructions](http://www.cantera.org/docs/sphinx/html/install.html) if you can't get the Anaconda packages to work. It is also already on the COE computer lab 274 Snell (though there you will have to `pip install jupyter` to get this notebook working).
```
# First, import cantera, with the nickname `ct` to save us some typing later.
import cantera as ct
# Then the usual suspects:
import numpy as np
%matplotlib inline
from matplotlib import pyplot as plt
```
# Heptane combustion
Download the reduced n-heptane model from LLNL https://combustion.llnl.gov/archived-mechanisms/alkanes/heptane-reduced-mechanism. Save the files alongside this python notebook. These files are in "CHEMKIN" format. First, we have to convert them into a format that is usable by Cantera.
This may take a while and issue some warnings, but then end by saying `Validating mechanism...PASSED`:
```
from cantera import ck2cti
ck2cti.main(['--input=heptanesymp159_mec.txt',
'--thermo=heptanesymp_therm.txt',
'--permissive',
'--output=heptanesymp159.cti'])
```
Clearly, with 160 species and 1540 reactions, this mechanism is more detailed than any we have considered before!
Now, let's create a 'Solution' phase in Cantera called `gas` from the Cantera mechanism file we just created.
```
gas = ct.Solution('heptanesymp159.cti')
```
Let's examine some of the reactions and species in the mechanism.
This will return the first 10 reactions:
```
gas.reaction_equations(np.arange(10))
```
And this will return a list of the chemical species names, joined by spaces:
```
print(" ".join(gas.species_names))
```
Knowing what all those species names mean is a [formidable challenge](http://www.northeastern.edu/comocheng/2014/04/nsf-grant-to-identify-discrepancies/) but we are [making headway](http://www.northeastern.edu/comocheng/2015/05/uscombustionmeeting/) (and more help is welcome).
For now, lets loop through all the species looking for ones with 7 carbons and 16 hydrogen atoms, which should be all the isomers of heptane.
```
for species in gas.species():
if species.composition == {'C':7, 'H':16}:
print(species.name)
```
There is only one!
Based on the name beginning with 'n' let's assume it represents normal-heptane (all 7 carbons in a single chain with no branching), which is the fuel that we want to simulate. Now we need to find the index number for this species.
```
i_heptane = gas.species_names.index('nc7h16')
# alternatively, this shortcut:
i_heptane = gas.species_index('nc7h16')
print("heptane is species index {0}".format(i_heptane))
```
To specify the state of a system we must supply two intensive variables (temperature, pressure, density, specific entropy, specific enthalpy, specific volume) and the composition (mass or mole fractions). We will set the temperature, pressure, and mole fractions. In cantera, mole fractions are `X` and mass fractions are `Y`. We can then print some properties of our gas system by typing `gas()`.
```
gas.TPX = 1000, 10e5, 'nc7h16:1.0'
gas()
```
To find equilbrium you must specify which two intensive variables to hold constant. We'll find the equilbrium at constant Temperature and Pressure, then print the properties again.
```
gas.equilibrate('TP')
gas()
```
You will recall from Thermodynamics II that a system going to equilibrium at constant T and P should minimize the specific Gibbs free energy of the system. Sure enough, it has gone down (compare the "Gibbs function" in the "1 kg" columns above. To check that number represents what we expect (this will be returned in Cantera's default SI units, a combination of K, m<sup>3</sup>, Pa, J, kg, kmol; in this case J/kg)
```
print(gas.h - gas.T * gas.s)
print(gas.g)
```
Now lets find the equilibrium composition at 1 bar pressure and a range of temperatures between 100 and 2000 K
```
temperatures = np.arange(100,2000,20)
# make a big array to store the results in
equilibrium_mass_fractions = np.zeros((len(temperatures), gas.n_species))
for i, T in enumerate(temperatures):
gas.TP = T, 1e5
gas.equilibrate('TP')
print(T,end=" ")
equilibrium_mass_fractions[i,:] = gas.Y
```
Now plot the equilibrium mass fractions as a function of temperature. With 160 lines, let's forgo the legend and instead label the biggest peaks directly.
```
plt.plot(temperatures,equilibrium_mass_fractions)
plt.xlabel("Temperature (K)")
plt.ylabel("Equilibrium mole fraction")
for i, name in enumerate(gas.species_names):
Y = equilibrium_mass_fractions[:,i]
if max(Y)> 0.08:
peakT = temperatures[Y.argmax()]
peakY = max(Y)
plt.text(peakT,peakY, name)
plt.show()
```
## Question (a)
What do you notice about the species that peaks at 100K, and the ones that peak at 2000K? Can you explain or justify this?
To see some of the complexity hidden at low concentrations, let's plot the y axis on a logarithmic scale:
```
plt.semilogy(temperatures,equilibrium_mass_fractions)
plt.ylim(1e-30,1)
plt.xlabel("Temperature (K)")
plt.ylabel("Equilibrium mole fraction")
plt.show()
```
If you think about how many reactions are equilibrated, it was remarkably quick!
Now we'll add some air, which is mostly nitrogen and oxygen. First of all, find the names of anything with just 2 oxygen atoms or just 2 nitrogen atoms.
```
for species in gas.species():
if species.composition == {'O':2} or species.composition == {'N':2}:
print(species.name)
```
Now look up and store the species indices
```
i_oxygen = gas.species_names.index('o2')
print("oxygen is species index {0}".format(i_oxygen))
i_nitrogen = gas.species_names.index('n2')
print("nitrogen is species index {0}".format(i_nitrogen))
```
## Question (b)
For a "stoichiometric" mixture of n-heptane and air (enough oxygen to reach complete combustion) how many moles of heptane and how many moles of nitrogen should you have for one mole of oxygen? Assume air is 80% nitrogen and 20% oxygen.
```
oxygen_mole = 1. # moles oxygen
## ANSWER:
nitrogen_mole = 4 * oxygen_mole
heptane_mole = oxygen_mole / 11.
## Some checks
assert nitrogen_mole / oxygen_mole == 4, "Assume air is 80% nitrogen and 20% oxygen"
assert oxygen_mole / heptane_mole == 3+1+3//5*3+8-5//3, "C7H16 + ?? O2 => 8 H2O + 7 CO2"
```
Now use those to make a string for the '`X`' when we set `gas.TPX`. Although we call it a mole fraction, they don't need to add up to one: Cantera will normalize it, preserving the ratios. Then print it, use it, and check it.
```
X_string = 'nc7h16:{0},o2:{1},n2:{2}'.format(heptane_mole, oxygen_mole, nitrogen_mole)
print("The 'X' will be set to {0!r}".format(X_string))
gas.TPX = 1000, 10e5, X_string
gas()
assert round(gas.concentrations[i_oxygen] / gas.concentrations[i_heptane], 2) == 11
```
## Question (c)
We can do an equilibrium analysis like before, but before you do,
starting with a stoichiometric mixture of fuel and air
what do you expect the equilibrium composition to mostly consist of?
(Imagine all reactions are fast with no barriers)
```
temperatures = np.arange(100,2000,20)
# make a big array to store the results in
equilibrium_mass_fractions = np.zeros((len(temperatures), gas.n_species))
for i, T in enumerate(temperatures):
gas.TP = T, 1e5
gas.equilibrate('TP')
print(T, end=" ")
equilibrium_mass_fractions[i,:] = gas.Y
plt.plot(temperatures,equilibrium_mass_fractions)
for i, name in enumerate(gas.species_names):
Y = equilibrium_mass_fractions[:,i]
if max(Y)> 0.08:
peakT = temperatures[Y.argmax()]
peakY = max(Y)
plt.text(peakT,peakY, name)
plt.show()
```
## Kinetics
Now we are done with equilbria, let's calculate some kinetics!
Cantera can do complex networks of reactors with valves, flow controllers, etc.
but we will make a simple "reactor network" with just one constant volume ideal gas batch reactor.
```
gas.TPX = 800, 10e5, X_string
reactor = ct.IdealGasReactor(gas)
reactor_network = ct.ReactorNet([reactor])
start_time = 0.0 #starting time
end_time = 4e-3 # seconds
n_steps = 251
times = np.linspace(start_time, end_time, n_steps)
concentrations = np.zeros((n_steps, gas.n_species))
pressures = np.zeros(n_steps)
temperatures = np.zeros(n_steps)
print_data = True
if print_data:
#this just gives headings
print('{0:>10s} {1:>10s} {2:>10s} {3:>14s}'.format(
't [s]','T [K]','P [Pa]','u [J/kg]'))
for n, time in enumerate(times):
if time > 0:
reactor_network.advance(time)
temperatures[n] = reactor.T
pressures[n] = reactor.thermo.P
concentrations[n,:] = reactor.thermo.concentrations
if print_data:
print('{0:10.3e} {1:10.3f} {2:10.3f} {3:14.6e}'.format(
reactor_network.time, reactor.T, reactor.thermo.P, reactor.thermo.u))
```
Now let's plot some graphs to see how things look
```
plt.plot(times*1e3, concentrations[:,i_heptane])
plt.ylabel("Heptane concentration (kmol/m3)")
plt.xlabel("Time (ms)")
plt.ylim(0,)
plt.show()
plt.plot(times*1e3, pressures/1e5)
plt.xlabel("Time (ms)")
plt.ylabel("Pressure (bar)")
plt.show()
plt.plot(times*1e3, temperatures)
plt.xlabel("Time (ms)")
plt.ylabel("Temperature (K)")
plt.show()
```
Although the timescale is milliseconds instead of hours, that looks remarkably like the thermal runaway reaction that caused the T2 laboratory explosion that we studied last lecture. This time, however, it's not just a thermal runaway but a chemical runaway - it's the gradual accumulation of reactive radical species like `OH` that is auto-catalytic.
Let's look at some of the other species:
```
# skip the zeroth species which is nitrogen
plt.plot(times*1e3, concentrations[:,1:])
plt.ylim(0,)
plt.ylabel("Concentration")
plt.xlabel("Time (ms)")
for i, name in enumerate(gas.species_names):
if i==0: continue
concentration = concentrations[:,i]
peak_concentration = max(concentration)
if peak_concentration > 0.001:
peak_time = times[concentration.argmax()]
plt.text(peak_time*1e3, peak_concentration, name)
plt.show()
```
Let's zoom in on the y axis by making it logarithmic:
```
plt.semilogy(times*1e3, concentrations)
plt.ylim(1e-15,1)
plt.ylabel("Concentration")
plt.xlabel("Time (ms)")
plt.show()
```
What a mess! Let's zoom in a little and see if we can pick out any significant intermediates
```
plt.semilogy(times*1e3, concentrations)
plt.ylim(1e-4,1)
# Add some labels
for t in [1.5, 3]:
i = (times*1e3>t).nonzero()[0][0]
time = times[i]*1e3
for j, name in enumerate(gas.species_names):
concentration = concentrations[i,j]
if concentration > 1e-4:
plt.text(time, concentration, name)
plt.ylabel("Concentration")
plt.xlabel("Time (ms)")
plt.show()
```
Not really! We would have to do a flux analysis and [reaction path diagram](http://www.cantera.org/docs/sphinx/html/cython/examples/kinetics_reaction_path.html) to see what is going on.
## Defining ignition delay time.
We want to identify when the ignition occurs, so that we could compare our simulation with an experiment.
Some experiments measure pressure rise; some monitor the concentration of an intermediate like `OH` via laser absorption; but other studies monitor the luminescence of excited `OH*` decaying to ground state `OH` (which it does by emitting a photon). This process is proportional to the rate of formation (not concentration) of `OH*`, which is predominantly made by reaction of `CH` with `O2`, so it is pretty closely proportional to the product `[CH][O2]`, i.e. "brightest flash of light" is propontional to “peak `OH*` emission” which can be modeled as “peak in the product of `[CH]` and `[O2]`”. Likewise photoemission from creation of excited `CH*` can be modeled reasonably as the product `[C2H][O]`. When modeling an experiment it's important to know precisely what the experimenter measurend and how they defined their derived parameters. For now we'll look for the peak in `OH*` emission:
```
i_ch = gas.species_index('ch')
i_o2 = gas.species_index('o2')
excited_oh_generation = concentrations[:,i_ch] * concentrations[:,i_o2]
plt.plot(times*1e3, excited_oh_generation)
plt.xlabel("Time (ms)")
plt.ylabel("Excited OH* emission (arbitrary units)")
plt.show()
ignition_time = times[excited_oh_generation.argmax()]
print("Ignition delay time is {0} ms".format(ignition_time * 1e3))
```
Now let's put it all together, into a function that takes temperature, pressure, and stoichiometry, and predicts ignition delay time for n-heptane. It's a bit different from before - now we let the ODE solver choose the array of times, which means we don't know how long it will be when we begin, so we have to use lists (which can grow as we add to them) and convert to arrays when we've finished.
```
def get_ignition_delay(temperature, pressure = 10.,
stoichiometry = 1.0, plot = False):
"""
Get the ignition delay time in miliseconds, at the specified
temperature (K), pressure (bar), and stoichiometry
(stoichiometric = 1.0, fuel-rich > 1.0, oxygen-rich < 1.0).
Default pressure is 10.0 bar, default stoichoimetry is 1.0.
If plot=True then it draws a plot (default is False).
"""
oxygen_mole = 1.
nitrogen_mole = 4*oxygen_mole
heptane_mole = stoichiometry/11
X_string = 'nc7h16:{0},o2:{1},n2:{2}'.format(heptane_mole, oxygen_mole, nitrogen_mole)
gas.TPX = temperature, pressure*1e5, X_string
reactor = ct.IdealGasReactor(gas)
reactor_network = ct.ReactorNet([reactor])
time = 0.0
end_time = 10e-3
# Use lists instead of arrays, so they can be any length
times = []
concentrations = []
pressures = []
temperatures = []
print_data = True
while time < end_time:
time = reactor_network.time
times.append(time)
temperatures.append(reactor.T)
pressures.append(reactor.thermo.P)
concentrations.append(reactor.thermo.concentrations)
# take a timestep towards the end_time.
# the size of the step will be determined by the ODE solver
# depending on how quickly things are changing.
reactor_network.step(end_time)
print("Reached end time {0:.2f} ms in {1} steps".format(times[-1]*1e3, len(times)))
# convert the lists into arrays
concentrations = np.array(concentrations)
times = np.array(times)
pressures = np.array(pressures)
temperatures = np.array(temperatures)
if plot:
plt.subplot(2,1,1)
plt.plot(times*1e3, pressures/1e5)
plt.ylabel("Pressure (bar)", color='b')
ax2 = plt.gca().twinx()
ax2.set_ylabel('Temperature (K)', color='r')
ax2.plot(times*1e3, temperatures, 'r')
i_ch = gas.species_index('ch')
i_o2 = gas.species_index('o2')
excited_oh_generation = concentrations[:,i_o2] * concentrations[:,i_ch]
if plot:
plt.subplot(2,1,2)
plt.plot(times*1e3, excited_oh_generation, 'g')
plt.ylabel("OH* emission")
plt.ylim(0,max(1e-8,1.1*max(excited_oh_generation)))
plt.xlabel("Time (ms)")
plt.tight_layout()
plt.show()
step_with_highest_oh_gen = excited_oh_generation.argmax()
if step_with_highest_oh_gen > 1 and excited_oh_generation.max()>1e-20:
ignition_time_ms = 1e3 * times[step_with_highest_oh_gen]
print("At {0} K {1} bar, ignition delay time is {2} ms".format(temperature, pressure, ignition_time_ms))
return ignition_time_ms
else:
print("At {0} K {1} bar, no ignition detected".format(temperature, pressure))
return np.infty
```
Let's test it at 1000 K, 10 bar.
```
get_ignition_delay(1000, 10, plot=True)
```
Now let's repeat it at a range of temperatures and pressures, and plot all the delay times on one graph
```
temperatures = np.linspace(1000,1500.,25)
ignition_delay_times = np.zeros_like(temperatures)
for P in [10,50]:
for i,T in enumerate(temperatures):
ignition_delay_times[i] = get_ignition_delay(T, P)
plt.semilogy(1000./temperatures, ignition_delay_times, 'o-', label='{0} bar'.format(P))
plt.legend(loc='best')
plt.xlabel("1000K / temperature")
plt.ylabel("Ignition delay time (ms)")
plt.ylim(1e-2,)
plt.show()
```
## Question (d)
Explain why this look as you would expect from Arrhenius behaviour.
## Question (e)
Repeat the analysis but going down to 650K (i.e. cover the range 650-1500K).
Describe and try to explain what you find.
```
temperatures = np.linspace(650,1500.,25)
ignition_delay_times = np.zeros_like(temperatures)
for P in [10,50]:
for i,T in enumerate(temperatures):
ignition_delay_times[i] = get_ignition_delay(T, P)
plt.semilogy(1000./temperatures, ignition_delay_times, 'o-', label='{0} bar'.format(P))
plt.legend(loc='best')
plt.xlabel("1000K / temperature")
plt.ylabel("Ignition delay time (ms)")
plt.ylim(1e-2,)
plt.show()
```
| github_jupyter |
## Image Classification `CNN` + `Tansfare Learning`
> Classifying image from our own dataset with `10` classes.
### Imports
```
import tensorflow as tf
from tensorflow import keras
import numpy as np
import os, random
import matplotlib.pyplot as plt
import shutil
from tensorflow.keras.preprocessing.image import ImageDataGenerator
```
> We are going to use `ImageDataGenerator` to preprocess our images so the file structures should look as follows
```
data
class_1:
- image
- image
...
class_n:
- image
- image
...
```
```
class Electronics:
IMAGES_PATH = 'data_mixed'
IMAGES_BALANCED = 'data'
TRAIN = 'data/train'
VALIDATION = 'data/validation'
TEST = './test'
class_names = os.listdir(Electronics.IMAGES_PATH)
class_names
```
> We want to come up with the following structure on our image `files`
```
data-
train -
class_1
-img1
-img2
-img3
-...
class_2
-...
test -
-img1
-img2
-img3
validation -
class_1
-img1
-img2
-img3
-...
class_2
-...
```
* We are going to move files around using code, and make sure that we have `100` images for each class for `training`
`20` images for each class for `testing` and `20` images for each class for `validation`
#### Folders Achitecture
<p align="center">
<img src="https://miro.medium.com/max/700/1*HpvpA9pBJXKxaPCl5tKnLg.jpeg"/>
</p>
> As you can see in the above picture, the test folder should also contain a single folder inside which all the test images are present(Think of it as “unlabeled” class , this is there because the flow_from_directory() expects at least one directory under the given directory path).
```
train_size = 50
test_size = 10
validation_size = 10
if os.path.exists(Electronics.TRAIN) == False:
os.makedirs(Electronics.TRAIN)
if os.path.exists(Electronics.TEST) == False:
os.makedirs(Electronics.TEST)
if os.path.exists(Electronics.VALIDATION) == False:
os.makedirs(Electronics.VALIDATION)
print("Done.")
for cate in ["laptop", "desktop", "cellphone"]: # 3 classes for simplicity
for i in range(train_size):
images_paths = os.listdir(os.path.join(Electronics.IMAGES_PATH, cate))
random.shuffle(images_paths)
image_path = os.path.join(os.path.join(Electronics.IMAGES_PATH, cate), images_paths[0])
if os.path.exists(os.path.join(Electronics.TRAIN, cate)) == False:
os.makedirs(os.path.join(Electronics.TRAIN, cate))
shutil.move(f'{image_path}', f'{Electronics.TRAIN}/{cate}/{images_paths[0]}')
print(f"Sucess\nMoved:\t\t{train_size} images\nCategory:\t{cate}\nFrom:\t\t{Electronics.IMAGES_PATH+'/'+cate}\nTo\t\t{Electronics.TRAIN}\n\n")
for i in range(validation_size):
images_paths = os.listdir(os.path.join(Electronics.IMAGES_PATH, cate))
random.shuffle(images_paths)
image_path = os.path.join(os.path.join(Electronics.IMAGES_PATH, cate), images_paths[0])
if os.path.exists(os.path.join(Electronics.VALIDATION, cate)) == False:
os.makedirs(os.path.join(Electronics.VALIDATION, cate))
shutil.move(f'{image_path}', f'{Electronics.VALIDATION}/{cate}/{images_paths[0]}')
print(f"Sucess\nMoved:\t\t{validation_size} images\nCategory:\t{cate}\nFrom:\t\t{Electronics.IMAGES_PATH+'/'+cate}\nTo\t\t{Electronics.VALIDATION}\n\n")
for i in range(test_size):
images_paths = os.listdir(os.path.join(Electronics.IMAGES_PATH, cate))
random.shuffle(images_paths)
image_path = os.path.join(os.path.join(Electronics.IMAGES_PATH, cate), images_paths[0])
shutil.move(f'{image_path}', f'{Electronics.TEST}')
print(f"Sucess\nMoved:\t\t{test_size} images\nCategory:\t{cate}\nFrom:\t\t{Electronics.IMAGES_PATH+'/'+cate}\nTo\t\t{Electronics.TEST}\n\n")
print("Done.")
```
> Creating `ImageDataGenerator` for each sample
```
train_datagen = ImageDataGenerator(
rescale=1./255,
fill_mode="nearest",
)
test_datagen = ImageDataGenerator(rescale=1./255)
validation_datagen = ImageDataGenerator(
rescale=1./255,
fill_mode="nearest",
)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(224, 224),
batch_size=8,
class_mode="categorical"
)
validation_generator = validation_datagen.flow_from_directory(
'data/validation',
target_size=(224, 224),
batch_size=8,
class_mode="categorical"
)
test_generator = test_datagen.flow_from_directory(
'.',
target_size=(224, 224),
classes=["test"],
batch_size = 1
)
test_generator.filenames[:2]
test_generator[0][1]
```
> ``Model`` Creation ``CNN``
```
model = keras.Sequential([
keras.layers.Input(shape=(224, 224, 3)),
keras.layers.Conv2D(32, (2, 2), activation='relu'),
keras.layers.MaxPool2D((2, 2)),
keras.layers.Conv2D(64, (2, 2), activation='relu'),
keras.layers.MaxPool2D((2, 2)),
keras.layers.Conv2D(128, (2, 2), activation='relu'),
keras.layers.MaxPool2D((2, 2)),
keras.layers.Conv2D(64, (2, 2), activation='relu'),
keras.layers.Flatten(),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dense(3, activation='softmax')
])
model.summary()
model.compile(
metrics=["accuracy"],
optimizer = keras.optimizers.Adam(),
loss = keras.losses.CategoricalCrossentropy(),
)
EPOCHS = 5
history = model.fit(train_generator,batch_size=8, validation_data=validation_generator, epochs=EPOCHS)
import pandas as pd
pd.DataFrame(history.history).plot()
predictions = tf.argmax(model.predict(test_generator), axis=1)
predictions
print(test_generator[0][0][0].shape)
plt.imshow(test_generator[2][0][0])
```
### `Transfer Learning` using `VGG16` model.
```
base_model = keras.applications.vgg16.VGG16()
base_model.summary()
```
> Create a `Sequential` model.
```
model2 = keras.Sequential()
```
> And the layers from the `pretrained` model exept the output layer.
```
for layer in base_model.layers[0:-1]:
model2.add(layer)
model2.summary()
```
> Freeze the model by turning `trainable = False` we don't want to retrain the layers again.
```
for layer in model2.layers:
layer.trainable = False
model2.summary()
```
> Creating our `output` layer with 7 classes and add it to the model.
```
output_layer = tf.keras.layers.Dense(3, activation='softmax')
model.add(output_layer)
model.summary()
```
> Compile the `model`.
```
model2.compile(
loss = tf.keras.losses.categorical_crossentropy,
optimizer = tf.keras.optimizers.Adam(),
metrics = ['acc']
)
```
> `Train` the model.
```
EPOCHS = 5
history = model.fit(train_generator,batch_size=8, validation_data=validation_generator, epochs=EPOCHS)
```
> The model is still **over-fitting** 😭😭😭
| github_jupyter |
# Using `scipy.integrate`
## Authors
Zach Pace, Lia Corrales, Stephanie T. Douglas
## Learning Goals
* perform numerical integration in the `astropy` and scientific python context
* trapezoidal approximation
* gaussian quadrature
* use `astropy`'s built-in black-body curves
* understand how `astropy`'s units interact with one another
* define a Python class
* how the `__call__` method works
* add $\LaTeX$ labels to `matplotlib` figures using the `latex_inline` formatter
## Keywords
modeling, units, synphot, OOP, LaTeX, astrostatistics, matplotlib, units, physics
## Companion Content
* http://synphot.readthedocs.io/en/latest/
* [Using Astropy Quantities for astrophysical calculations](http://www.astropy.org/astropy-tutorials/rst-tutorials/quantities.html)
## Summary
In this tutorial, we will use the examples of the Planck function and the stellar initial mass function (IMF) to illustrate how to integrate numerically, using the trapezoidal approximation and Gaussian quadrature. We will also explore making a custom class, an instance of which is callable in the same way as a function. In addition, we will encounter `astropy`'s built-in units, and get a first taste of how to convert between them. Finally, we will use $\LaTeX$ to make our figure axis labels easy to read.
```
import numpy as np
from scipy import integrate
from astropy.modeling.models import BlackBody
from astropy import units as u, constants as c
import matplotlib.pyplot as plt
%matplotlib inline
```
## The Planck function
The Planck function describes how a black-body radiates energy. We will explore how to find bolometric luminosity using the Planck function in both frequency and wavelength space.
Let's say we have a black-body at 5000 Kelvin. We can find out the total intensity (bolometric) from this object, by integrating the Planck function. The simplest way to do this is by approximating the integral using the trapezoid rule. Let's do this first using the frequency definition of the Planck function.
We'll define a photon frequency grid, and evaluate the Planck function at those frequencies. Those will be used to numerically integrate using the trapezoidal rule. By multiplying a `numpy` array by an `astropy` unit, we get a `Quantity`, which is effectively a combination of one or more numbers and a unit.
<div class="alert alert-info">
**Note on printing units**:
Quantities and units can be printed to strings using the [Format String Syntax](https://docs.python.org/3/library/string.html#format-string-syntax). This demonstration uses the `latex_inline` format that is built in to the `astropy.units` package. To see additional ways to format quantities, see the [Getting Started](http://docs.astropy.org/en/stable/units/#getting-started) section of the astropy.units documentation pages.
</div>
```
bb = BlackBody(5000. * u.Kelvin)
nu = np.linspace(1., 3000., 1000) * u.THz
bb5000K_nu = bb(nu)
plt.plot(nu, bb5000K_nu)
plt.xlabel(r'$\nu$, [{0:latex_inline}]'.format(nu.unit))
plt.ylabel(r'$I_{\nu}$, ' + '[{0:latex_inline}]'.format(bb5000K_nu.unit))
plt.title('Planck function in frequency')
plt.show()
```
### Using $LaTeX$ for axis labels
Here, we've used $LaTeX$ markup to add nice-looking axis labels. To do that, we enclose $LaTeX$ markup text in dollar signs, within a string `r'\$ ... \$'`. The `r` before the open-quote denotes that the string is "raw," and backslashes are treated literally. This is the suggested format for axis label text that includes markup.
Now we numerically integrate using the trapezoid rule.
```
np.trapz(x=nu, y=bb5000K_nu).to('erg s-1 cm-2 sr-1')
```
Now we can do something similar, but for a wavelength grid. We want to integrate over an equivalent wavelength range to the frequency range we did earlier. We can transform the maximum frequency into the corresponding (minimum) wavelength by using the `.to()` method, with the addition of an *equivalency*.
```
lam = np.linspace(nu.max().to(u.AA, equivalencies=u.spectral()),
nu.min().to(u.AA, equivalencies=u.spectral()), 1000)
bb_lam = BlackBody(bb.temperature,
scale=1.0 * u.erg / (u.cm ** 2 * u.AA * u.s * u.sr))
bb5000K_lam = bb_lam(lam)
plt.plot(lam, bb5000K_lam)
plt.xlim([1.0e3, 5.0e4])
plt.xlabel(r'$\lambda$, [{0:latex_inline}]'.format(lam.unit))
plt.ylabel(r'$I_{\lambda}$, ' + '[{0:latex_inline}]'.format(bb5000K_lam.unit))
plt.title('Planck function in wavelength')
plt.show()
np.trapz(x=lam, y=bb5000K_lam).to('erg s-1 cm-2 sr-1')
```
Notice this is within a couple percent of the answer we got in frequency space, despite our bad sampling at small wavelengths!
Many `astropy` functions use units and quantities directly. As you gain confidence working with them, consider incorporating them into your regular workflow. Read more [here](http://docs.astropy.org/en/stable/units/) about how to use units.
### How to simulate actual observations
As of Fall 2017, `astropy` does not explicitly support constructing synthetic observations of models like black-body curves. The [synphot library](https://synphot.readthedocs.io/en/latest/) does allow this. You can use `synphot` to perform tasks like turning spectra into visual magnitudes by convolving with a filter curve.
## The stellar initial mass function (IMF)
The stellar initial mass function tells us how many of each mass of stars are formed. In particular, low-mass stars are much more abundant than high-mass stars are. Let's explore more of the functionality of `astropy` using this concept.
People generally think of the IMF as a power-law probability density function. In other words, if you count the stars that have been born recently from a cloud of gas, their distribution of masses will follow the IMF. Let's write a little class to help us keep track of that:
```
class PowerLawPDF(object):
def __init__(self, gamma, B=1.):
self.gamma = gamma
self.B = B
def __call__(self, x):
return x**self.gamma / self.B
```
### The `__call__` method
By defining the method `__call__`, we are telling the Python interpreter that an instance of the class can be called like a function. When called, an instance of this class, takes a single argument, `x`, but it uses other attributes of the instance, like `gamma` and `B`.
### More about classes
Classes are more advanced data structures, which can help you keep track of functionality within your code that all works together. You can learn more about classes in [this tutorial](https://www.codecademy.com/ja/courses/learn-python/lessons/introduction-to-classes/exercises/why-use-classes).
## Integrating using Gaussian quadrature
In this section, we'll explore a method of numerical integration that does not require having your sampling grid set-up already. `scipy.integrate.quad` with reference [here](https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.integrate.quad.html) takes a function and both a lower and upper bound, and our `PowerLawPDF` class takes care of this just fine.
Now we can use our new class to normalize our IMF given the mass bounds. This amounts to normalizing a probability density function. We'll use Gaussian quadrature (`quad`) to find the integral. `quad` returns the numerical value of the integral and its uncertainty. We only care about the numerical value, so we'll pack the uncertainty into `_` (a placeholder variable). We immediately throw the integral into our IMF object and use it for normalizing!
To read more about *generalized packing and unpacking* in Python, look at the original proposal, [PEP 448](https://www.python.org/dev/peps/pep-0448/), which was accepted in 2015.
```
salpeter = PowerLawPDF(gamma=-2.35)
salpeter.B, _ = integrate.quad(salpeter, a=0.01, b=100.)
m_grid = np.logspace(-2., 2., 100)
plt.loglog(m_grid, salpeter(m_grid))
plt.xlabel(r'Stellar mass [$M_{\odot}$]')
plt.ylabel('Probability density')
plt.show()
```
### How many more M stars are there than O stars?
Let's compare the number of M dwarf stars (mass less than 60% solar) created by the IMF, to the number of O stars (mass more than 15 times solar).
```
n_m, _ = integrate.quad(salpeter, a=.01, b=.6)
n_o, _ = integrate.quad(salpeter, a=15., b=100.)
print(n_m / n_o)
```
There are almost 21000 as many low-mass stars born as there are high-mass stars!
### Where is all the mass?
Now let's compute the relative total masses for all O stars and all M stars born. To do this, weight the Salpeter IMF by mass (i.e., add an extra factor of mass to the integral). To do this, we define a new function that takes the old power-law IMF as one of its arguments. Since this argument is unchanged throughout the integral, it is passed into the tuple `args` within `quad`. It's important that there is only *one* argument that changes over the integral, and that it is the *first* argument that the function being integrated accepts.
Mathematically, the integral for the M stars is
$$ m^M = \int_{.01 \, M_{\odot}}^{.6 \, M_{\odot}} m \, {\rm IMF}(m) \, dm $$
and it amounts to weighting the probability density function (the IMF) by mass. More generally, you find the value of some property $\rho$ that depends on $m$ by calculating
$$ \rho(m)^M = \int_{.01 \, M_{\odot}}^{.6 \, M_{\odot}} \rho(m) \, {\rm IMF}(m) \, dm $$
```
def IMF_m(m, imf):
return imf(m) * m
m_m, _ = integrate.quad(IMF_m, a=.01, b=.6, args=(salpeter, ))
m_o, _ = integrate.quad(IMF_m, a=15., b=100., args=(salpeter, ))
m_m / m_o
```
So about 20 times as much mass is tied up in M stars as in O stars.
### Extras
* Now compare the total luminosity from all O stars to total luminosity from all M stars. This requires a mass-luminosity relation, like this one which you will use as $\rho(m)$:
$$
\frac{L}{L_{\odot}} (M) =
\begin{cases}
\hfill .23 \left( \frac{M}{M_{\odot}} \right)^{2.3} \hfill , \hfill & .1 < \frac{M}{M_{\odot}} < .43 \\
\hfill \left( \frac{M}{M_{\odot}} \right)^{4} \hfill , \hfill & .43 < \frac{M}{M_{\odot}} < 2 \\
\hfill 1.5 \left( \frac{M}{M_{\odot}} \right)^{3.5} \hfill , \hfill & 2 < \frac{M}{M_{\odot}} < 20 \\
\hfill 3200 \left( \frac{M}{M_{\odot}} \right) \hfill , \hfill & 20 < \frac{M}{M_{\odot}} < 100 \\
\end{cases},
$$
* Think about which stars are producing most of the light, and which stars have most of the mass. How might this result in difficulty inferring stellar masses from the light they produce? If you're interested in learning more, see [this review article](https://ned.ipac.caltech.edu/level5/Sept14/Courteau/Courteau_contents.html).
## Challenge problems
* Right now, we aren't worried about the bounds of the power law, but the IMF should drop off to zero probability at masses below .01 solar masses and above 100 solar masses. Modify `PowerLawPDF` in a way that allows both `float` and `numpy.ndarray` inputs.
* Modify the `PowerLawPDF` class to explicitly use `astropy`'s `units` constructs.
* Derive a relationship between recent star-formation rate and $H\alpha$ luminosity. In other words, find a value of $C$ for the function
$${\rm SFR \, [\frac{M_{\odot}}{yr}]} = {\rm C \, L_{H\alpha} \, [\frac{erg}{s}]} \, .$$
* How does this depend on the slope and endpoints of the IMF?
* Take a look at Appendix B of [Hunter & Elmegreen 2004, AJ, 128, 2170](http://adsabs.harvard.edu/cgi-bin/bib_query?arXiv:astro-ph/0408229)
* What effect does changing the power-law index or upper mass limit of the IMF have on the value of $C$?
* Predict the effect on the value of $C$ of using a different form of the IMF, like Kroupa or Chabrier (both are lighter on the low-mass end).
* If you're not tired of IMFs yet, try defining a new class that implements a broken-power-law (Kroupa) or log-parabola (Chabrier) IMF. Perform the same calculations as above.
| github_jupyter |
# Prevalencia
Vamos a analizar el influjo de la prevalencia, en el devenir de la enfermedad
<div class="alert alert-block alert-info">
En epidemiología, se denomina <strong>prevalencia</strong> a la proporción de individuos de un grupo o una población (en medicina, persona), que presentan una característica o evento determinado (en medicina, enfermedades).
</div>
Se define <strong>R0</strong>, como el nº de personas que infecta cada persona, en cada ciclo de infección.
<div class="alert alert-block alert-info">
Vamos a generar una lista de tablasy gráficas para ver la evolución del nº de infectados, al variar la prevalencia, y el R0.
</div>
```
import numpy as np
import pandas as pd
import time
from datetime import datetime, date, time, timedelta
from IPython.display import display, HTML
import matplotlib.dates as mdates
SITUACION_INICIAL = 1
prevalencia = 0
R0 = 5.7
DIAS_EN_REINFECTAR=5
### Calculamos la capacidad del sistema sanitario.
####Cuanto se tardaria en copar las camas de uci en cada escenario
"""
"Antes de la crisis sanitaria, España disponía de unas 4.500 camas UCI, capacidad que aumentó hasta las 8.000"
Madrid cuenta con 1.750 camas
Cataluña tiene 1.722 camas
Andalucía con 1.200 camas.
Canarias cuenta con 595 camas.
Euskadi con capacidad para 550 camas.
Castilla-León tiene 500 camas.
Aragón con 300 camas.
Castilla-La Mancha cuenta con 300 camas.
Galicia tiene 274 camas.
Comunidad Valenciana con 254 plazas libres.
Navarra con 156 camas.
Murcia tiene 123 camas.
Baleares con 120 camas.
Extremadura cuenta con 100 camas.
Cantabria con 64 camas.
Asturias cuenta con 61 camas.
La Rioja tiene 23 plazas.
TOTAL = 8092
"""
"De los 11.424 pacientes de Covid-19 ingresados en Madrid, según datos del Ministerio de Sanidad, 1.332 están en la UCI, un 11,7%."
"Si para una prevalencia de 10% (750000 personas para la comunidad de madrid)"
# Calculamos la capacidad del sistema sanitario - el nº de enfermos que puede haber antes de que colapse
NUMERO_CAMAS_UCI=8092
PORCENTAJE_ENFERMOS_NECESITADOS_HOSPITALIZACION = 0.088 # https://www.redaccionmedica.com/secciones/sanidad-hoy/coronavirus-en-personal-sanitario-hospitalizacion-en-el-8-8-de-casos-9925
PORCENTAJE_HOSPITALIZADOS_NECESITADOS_UCI = 0.05 #https://www.elperiodico.com/es/sociedad/20200316/coronavirus-hospitalizados-graves-contagio-7891866
CAPACIDAD_SISTEMA_SANITARIO = NUMERO_CAMAS_UCI / PORCENTAJE_ENFERMOS_NECESITADOS_HOSPITALIZACION / PORCENTAJE_HOSPITALIZADOS_NECESITADOS_UCI
CAPACIDAD_SISTEMA_SANITARIO = int(CAPACIDAD_SISTEMA_SANITARIO)
print ("La estimacion de la capacidad del sistema sanitario es " , CAPACIDAD_SISTEMA_SANITARIO )
def Get_Header(GENERACIONES,df,FECHA_INICIAL_STR = '2020-02-01'):
array_fechas = []
FECHA_INICIAL = datetime.strptime(FECHA_INICIAL_STR, "%Y-%m-%d")
modified_date = FECHA_INICIAL
NUM_GENERACIONES = range(1,GENERACIONES)
for generacion in NUM_GENERACIONES:
modified_date += timedelta(days=DIAS_EN_REINFECTAR)
array_fechas.append(datetime.strftime(modified_date, "%Y-%m-%d"))
df.columns = array_fechas
return df
def Calcular_Cuadro_Prevalencias(R0,GENERACIONES,ARRAY_PREVALENCIAS, SITUACION_INICIAL=1,FECHA_INICIAL_STR = '2020-02-01'):
diccionario_prevalencias = {}
array=[]
for prevalencia in ARRAY_PREVALENCIAS :
infectados_en_esta_generacion = SITUACION_INICIAL
NUM_GENERACIONES = range(1,GENERACIONES)
array=[]
for generacion in NUM_GENERACIONES:
prevalencia_esta_iteracion = min(45000000,np.sum(array)) / 45000000
#print ("infectados_en_esta_generacion",infectados_en_esta_generacion,R0,prevalencia,prevalencia_esta_iteracion)
infectados_en_esta_generacion = int(infectados_en_esta_generacion * R0 * max(0,( 1 - (prevalencia + prevalencia_esta_iteracion)) ) )
#infectados_en_esta_generacion = infectados_en_esta_generacion * R0 * ( 1 - prevalencia)
array.append(infectados_en_esta_generacion)
diccionario_prevalencias['prevalencia ' + str("{:.1f}".format(prevalencia)) + ' y R0 ' + str(R0)] = array
df = pd.DataFrame.from_dict(diccionario_prevalencias,'index')
df = Get_Header(GENERACIONES,df,FECHA_INICIAL_STR)
df = df.astype(np.int64)
return df.T
# Auxiliary functions
def interpolate_dataframe(df,freq):
if freq == 'H':
rng = pd.date_range(df.index.min(), df.index.max() + pd.Timedelta(23, 'H'), freq='H')
elif freq == 'D' :
rng = pd.date_range(
datetime.strptime(str(df.index.min())[:10]+' 00:00:00', "%Y-%m-%d %H:%M:%S") ,
datetime.strptime(str(df.index.max())[:10]+' 00:00:00', "%Y-%m-%d %H:%M:%S"),
freq='D')
df.index = pd.to_datetime(df.index)
df2 = df.reindex(rng)
df = df2
for column in df.columns :
s = pd.Series(df[column])
s.interpolate(method="quadratic", inplace =True)
df[column] = pd.DataFrame([s]).T
df.index.name = 'Fecha'
return df
# first execution
GENERACIONES=8
ARRAY_PREVALENCIAS = np.linspace(0,0.70,8)
ARRAY_PREVALENCIAS
df = Calcular_Cuadro_Prevalencias(R0=R0,GENERACIONES=GENERACIONES,ARRAY_PREVALENCIAS=ARRAY_PREVALENCIAS)
df
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
def Get_Chart(df, title="default"):
fig = plt.figure(figsize=(8, 6), dpi=80)
for ca in df.columns:
plt.plot(df[ca])
plt.legend(df.columns)
fig.suptitle(title, fontsize=20)
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b-%d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=45)
return plt
from IPython.display import display, HTML
ARRAY_R0S = [2.7,
3.7 ,
4.7,
5.7,
6.7 ]
for R0 in ARRAY_R0S :
print("Tabla de como varía el nº de infectados, según varía la prevalencia, con R0 = " + str(R0))
df = Calcular_Cuadro_Prevalencias(R0=R0,GENERACIONES=GENERACIONES,ARRAY_PREVALENCIAS=ARRAY_PREVALENCIAS)
display(HTML (df.to_html()))
plt = Get_Chart(df=interpolate_dataframe(df,'D'), title = 'Numero de infecciones por semana, con R0 = ' + str(R0))
```
TODO:
- Hacer gráfico de la evolución del nº de infectados, en el confinamiento (R0<1)
# Conclusiones:
- Para un R0 estimado de 5.7 del COVID-19, hace falta un <strong>70% de prevalencia</strong>, para que no haya brotes masivos.
- Las <strong>medidas de distanciamiento social</strong>, influyen para mantener bajo el R0, el cual influye mucho en el número de infectados.
- Con <strong>prevalencia bajas</strong>, aunque no haya inmunidad de grupo, el <strong>nº de infectados desciende significativamente</strong>. (En España, que los estudios sugieren una prevalencia menor del 10%, podría haber 1/3 ó 1/2 muertos)
- Además de la prevalencia, tambien <strong>es importante</strong>, tener controlado <strong>,el nº de personas infectadas en cada momento</strong>:
- Hay que <strong>detectar los brotes lo mas pronto posible</strong>, y reducir el R0 mediante la <strong>búsqueda de contactos del infectado</strong> cuando el brote es aún pequeño.
- En caso contrario, si no se puede controlar el brote, como el nº de infectados se disparará, se pueden realizar <strong>confinamientos parciales intermitentes</strong>, para reducir el R0 por debajo de 1, hasta que el nº de infectados baje.
## Bonus : ¿ Sirve de algo quedarse en casa ?
```
ARRAY_R0S = [2.7,
3.7 ,
4.7,
5.7,
6.7 ]
df_R0s = pd.DataFrame()
for MI_R0 in ARRAY_R0S :
df = Calcular_Cuadro_Prevalencias(R0=MI_R0,GENERACIONES=GENERACIONES,ARRAY_PREVALENCIAS=ARRAY_PREVALENCIAS)
df_R0s[df.columns[0]] = df[df.columns[0]]
print("Tabla de como varía el nº de infectados, según varía el R0 " )
display(HTML (df_R0s.to_html()))
print("Total de infectados en cada escenario : " )
print( df_R0s.astype(np.int64).sum(axis=0) )
plt = Get_Chart(interpolate_dataframe(df_R0s,'D') ,title= 'Comparativa de nº de infectados variando el R0')
```
Como se ve en la gráfica, bajar el R0, es muy importante, para detener el nº de infectados, y por ende en número de los fallecidos.
## Re- Bonus : Se me ha hecho larguísimo, ¿ Podría haber estado menos tiempo en casa ?
```
def comparacion_semanas(SITUACION_INICIAL=1):
GENERACIONES=8
prevalencia=0
diccionario_prevalencias={}
R0=5.7
i=1
for NUM_GENERACIONES in range(5,8) :
infectados_en_esta_generacion=SITUACION_INICIAL
array = []
for generacion in range(1,NUM_GENERACIONES):
infectados_en_esta_generacion = infectados_en_esta_generacion * R0 * (1-prevalencia)
array.append(infectados_en_esta_generacion)
valor_actual = array[-1]
# Calculamos el R0 con el confinamiento
# En alemanio confinada el R0 estimado es un 0.7. Nosotros debería ser mas bajo
NUEVO_R0 = 0.5
while infectados_en_esta_generacion > 1 :
infectados_en_esta_generacion = infectados_en_esta_generacion * NUEVO_R0 * (1-prevalencia)
array.append(infectados_en_esta_generacion)
diccionario_prevalencias[' R0 ' + str(R0) + ', parando en la semana ' + str(generacion)] = array
i=1
df = pd.DataFrame.from_dict(diccionario_prevalencias,'index')
df = Get_Header(df.shape[1]+1,df)
df = df.T
df.index = pd.to_datetime(df.index)
return df
df = comparacion_semanas(SITUACION_INICIAL=60)
df= df.fillna(0)
print("Total de infectados en cada escenario : " )
print( df.astype(np.int64).sum(axis=0) )
df_interpolate = interpolate_dataframe(df=df,freq='H')
df_interpolate['CAPACIDAD SISTEMA SANITARIO' ] = CAPACIDAD_SISTEMA_SANITARIO
plt = Get_Chart(df = df_interpolate,title= 'Comparativa de infectados y tiempo de confinamiento, según el momento de empezar')
plt.ylabel('Infectados en unidades de millón', size = 10)
print("Tabla de como varía el nº de infectados, según varía la semana de inicio del confinamiento " )
df.style.format ({ c : "{:20,.0f}" for c in df.columns }).background_gradient(cmap='Wistia', )
```
Como se ve en del anterior gráfico, el tiempo que dura confinamiento, y el nº de infectados varía enormemente.
Sin duda la demora en adoptar las restricciones - como se ve con esta gráfica de datos teóricos- ha influido en el tiempo de confinamiento, y mucho peor, ha costado un gran número de vidas.
## ¿ Se acabará la pandemia en verano ?
```
import numpy as np
### Aqui
INFECTIVIDAD_POR_DEFECTO = 5.7
ARRAY_R0S = [INFECTIVIDAD_POR_DEFECTO]
infectividad = INFECTIVIDAD_POR_DEFECTO
ARRAY_R0S_TITULOS = ["Infectividad primera ola"]
prevalencia = 0.1
infectividad *= 1 - prevalencia
ARRAY_R0S.append(infectividad)
ARRAY_R0S_TITULOS.append("lo anterior + prevalencia actual")
""" We simulated social distancing by reducing R0
by a fixed proportion, which ranged between 0 and 60%, on par with the reduction in R0
achieved in China through intense social distancing measures
"""
distanciamiento_social_efectivo = 0.3
infectividad *= 1 - distanciamiento_social_efectivo
ARRAY_R0S.append(infectividad)
ARRAY_R0S_TITULOS.append("lo anterior + distanciamiento social efectivo")
"""
Uso de mascarillas
The team investigated the varying effectiveness of facemasks. Previous research shows that even homemade masks made from cotton t-shirts or dishcloths can prove 90% effective at preventing transmission.
The study suggests that an entire population wearing masks of just 75% effectiveness can bring a very high 'R' number of 4.0—the UK was close to this before lockdown—all the way down to under 1.0, even without aid of lockdowns.
https://medicalxpress.com/news/2020-06-widespread-facemask-covid-.html
"""
porcentaje_efectividad_mascarillas = 0.7
porcentaje_poblacion_usa_mascarillas = 0.6
infectividad *= 1 - (porcentaje_efectividad_mascarillas * porcentaje_poblacion_usa_mascarillas)
ARRAY_R0S.append(infectividad)
ARRAY_R0S_TITULOS.append("lo anterior + uso mascarillas")
incremento_grados_temperatura_media = 15
infectividad_verano = 1 - ( incremento_grados_temperatura_media * 0.012 )
infectividad *= infectividad_verano
ARRAY_R0S.append(infectividad)
ARRAY_R0S_TITULOS.append("lo anterior + temperaturas de verano")
"""
Confinamiento :
One study from France estimated that timely lockdowns pushed R0 down to 0.5 from 3.3
https://hal-pasteur.archives-ouvertes.fr/pasteur-02548181/document
"""
#efectividad_confinamiento=0.88
#infectividad_confinamiento = 1 - ( efectividad_confinamiento )
#infectividad *= infectividad_confinamiento
infectividad = 0.5
ARRAY_R0S.append(infectividad)
ARRAY_R0S_TITULOS.append("lo anterior + confinamiento")
ARRAY_TITULOS = []
for i,element in enumerate(ARRAY_R0S):
titulo = str(ARRAY_R0S_TITULOS[i]) + " , R0=" + str(ARRAY_R0S[i])[0:5]
ARRAY_TITULOS.append(titulo)
df_R0s = pd.DataFrame()
GENERACIONES=4
SITUACION_INICIAL=1000
FECHA_INICIAL_STR = '2020-07-01'
for MI_R0 in ARRAY_R0S :
df = Calcular_Cuadro_Prevalencias(
SITUACION_INICIAL = SITUACION_INICIAL,
R0 = MI_R0,
GENERACIONES = GENERACIONES,
ARRAY_PREVALENCIAS = ARRAY_PREVALENCIAS,
FECHA_INICIAL_STR = FECHA_INICIAL_STR
)
df_R0s[df.columns[0]] = df[df.columns[0]]
df_R0s.columns = ARRAY_TITULOS
print("Total de infectados en cada escenario : " )
print("Ejemplo de si salieramos todos del estado de alarma " )
print( df_R0s.astype(np.int64).sum(axis=0) )
plt = Get_Chart(interpolate_dataframe(df_R0s,'D') ,title= 'Comparativa número de infectados por cada factor')
df_R0s.style.format ({ c : "{:20,.0f}" for c in df_R0s.columns }).background_gradient(cmap='Wistia', )
```
De los datos anteriores parece que <strong>la epidemia no se va a poder contener durante el verano</strong>, salvo que alguno de los factores esté minusvalorado :
- Que en la nueva normalidad este tan atemorizada que haya un distanciamento social muy efectivo,
- Que use mascarillas la práctica totalidad de la población, o
- Que el incremento de la radiación solar tenga mas incidencia que la estimada.
No obstante, aunque no se acabará en verano, podemos intentar estimar si habrá una segunda ola que necesite confinamiento en verano.
## ¿ Cuándo llegará la segunda ola ?
Para estimar si el sistema sanitario se verá sobrepasado otra vez, intentamos calcular la capacidad del sistema sanitario.
Sabieno el nº de camas UCIs, el porcentaje de hospitalizados que necesita una cama UCI, y el porcentaje de enfermos que necesitan hospitalización, estimamos la capacidad del sistema sanitario.
```
GENERACIONES=12
SITUACION_INICIAL=1000
FECHA_INICIAL_STR = '2020-07-01'
df_R0s = pd.DataFrame()
for MI_R0 in ARRAY_R0S :
df = Calcular_Cuadro_Prevalencias(
SITUACION_INICIAL = SITUACION_INICIAL,
R0 = MI_R0,
GENERACIONES = GENERACIONES,
ARRAY_PREVALENCIAS = ARRAY_PREVALENCIAS,
FECHA_INICIAL_STR = FECHA_INICIAL_STR
)
df_R0s[df.columns[0]] = df[df.columns[0]]
df_R0s.columns = ARRAY_TITULOS
df_master = df_R0s.copy()
df_R0s = interpolate_dataframe(df_R0s,'D')
df_R0s = df_R0s[df_R0s < (CAPACIDAD_SISTEMA_SANITARIO * 1.5) ]
df_R0s['CAPACIDAD SISTEMA SANITARIO' ] = CAPACIDAD_SISTEMA_SANITARIO
#plt = Get_Chart(df_R0s ,title= 'Comparativa número de infectados por cada factor')
title = 'Cuando se tarda en superar la capacidad del sistema sanitario, por escenario.'
df = df_R0s
fig = plt.figure(figsize=(8, 6), dpi=80)
ax = plt.gca()
ax.set_ylim([0,CAPACIDAD_SISTEMA_SANITARIO*2])
for ca in df.columns:
plt.plot(df[ca])
plt.legend(df.columns)
fig.suptitle(title, fontsize=20)
#return plt
ax.legend(df.columns, loc='upper left')
plt.ylabel('Infectados en unidades de millón', size = 10)
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b-%d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=7))
plt
print("Estimación de cuando se supera la capacidad del sistema sanitario, en cada escenario")
print("Presuponiendo una situación inicial de 1.000 infectados")
df_master
df_master = df_master[df_master < (CAPACIDAD_SISTEMA_SANITARIO * 2) ]
df_master.style.format ({ c : "{:20,.0f}" for c in df_master.columns }).background_gradient(cmap='Wistia', )
```
### Conclusiones
Estas son estimaciones, no datos reales, pero las tendencias son :
- Parece que las <strong>temperaturas de verano</strong> pueden <strong>ralentizar la infección</strong> lo bastante como para que no tengamos otra ola hasta despues del verano.
- Tras el verano, <strong>futuras olas parecen inevitables</strong> en el plazo de <strong>entre un mes y dos</strong> de la finalización de las altas temperaturas.
- Habrá que adelantarse, con <strong>periodos de confinamientos intermitentes</strong>. La cantidad y duración de estos confinamientos aún deben estimarse.
- Futuros cambios de infectividad, o nuevos tratamientos podrían variar este escenario.
### Vamos a generar nuevas olas epidemicas, en cada escenario.
Las olas progresaran hasta que alcancen la capacidad del sistema sanitario, en cuyo caso habrá un confinamiento
TO-DO : Estimar mejor cuando se ocupan las camas uci (la duración de una estancia uci está entre 2-11 semanas)
TO-DO : Ajustar mejor los parámetros de mortalidad con saturación del sistema.
```
#### PRedicciones a futuro
"""
Crear 30.
Borrar las mayores de umbral superios.
Aplicar hasta que haya menos de x
"""
from datetime import date
RO_CALOR= 1.702
GENERACIONES=9
def calcular_prevision(
FECHA_FINAL_STR,
FECHA_INICIAL_STR,
SITUACION_INICIAL,
POBLACION_INICIAL_INFECTADA,
R0_max,
R0_min,
Umbral_max,
Umbral_min):
print( FECHA_FINAL_STR,
FECHA_INICIAL_STR,
SITUACION_INICIAL,
POBLACION_INICIAL_INFECTADA,
R0_max,
R0_min,
Umbral_max,
Umbral_min)
df_temp = pd.DataFrame()
df = pd.DataFrame(columns = ['Infectados'])
#
while FECHA_INICIAL_STR < FECHA_FINAL_STR :
df_temp = pd.DataFrame()
PREVALENCIA = (POBLACION_INICIAL_INFECTADA + df.iloc[:,0].sum()) /45000000
ARRAY_PREVALENCIAS = []
ARRAY_PREVALENCIAS.append(PREVALENCIA)
# Subida
PERIODO_CALOR = ( FECHA_INICIAL_STR[5:] > '06-15' ) & ( FECHA_INICIAL_STR[5:] < '09-15' )
R0_DESCONTADO_CALOR = RO_CALOR if PERIODO_CALOR else R0_max
print(f"""SITUACION_INICIAL={SITUACION_INICIAL},
R0 = {R0_DESCONTADO_CALOR} ,
GENERACIONES = {GENERACIONES} ,
ARRAY_PREVALENCIAS = {ARRAY_PREVALENCIAS} ,
FECHA_INICIAL_STR = {FECHA_INICIAL_STR}""")
df_temp = Calcular_Cuadro_Prevalencias( SITUACION_INICIAL = SITUACION_INICIAL ,
R0 = R0_DESCONTADO_CALOR ,
GENERACIONES = GENERACIONES ,
ARRAY_PREVALENCIAS = ARRAY_PREVALENCIAS ,
FECHA_INICIAL_STR = FECHA_INICIAL_STR )
df_temp['Infectados'] = df_temp.iloc[:,0]
df_temp = df_temp[(df_temp['Infectados'] < Umbral_max )]
df_temp = df_temp[(df_temp['Infectados'] != 0 )]
df_temp.dropna()
df_temp = df_temp.loc[~df_temp.index.duplicated(keep='last')]
df_temp = df_temp['Infectados']
df_temp = pd.DataFrame(df_temp)
df = pd.concat([df_temp,df])
df = df.sort_index()
# BAJADA TODO
# Bajada
PREVALENCIA = (POBLACION_INICIAL_INFECTADA + df.iloc[:,0].sum()) /45000000
ARRAY_PREVALENCIAS = []
ARRAY_PREVALENCIAS.append(PREVALENCIA)
SITUACION_INICIAL = df.iloc[-1]['Infectados']
FECHA_INICIAL_STR = df.index[-1]
df_temp = pd.DataFrame()
df_temp = Calcular_Cuadro_Prevalencias(
SITUACION_INICIAL=SITUACION_INICIAL,
R0=R0_min,
GENERACIONES=40,
ARRAY_PREVALENCIAS=ARRAY_PREVALENCIAS,
FECHA_INICIAL_STR = FECHA_INICIAL_STR )
df_temp['Infectados'] = df_temp.iloc[:,0]
df_temp = df_temp[(df_temp['Infectados'] > Umbral_min)]
df_temp = df_temp[(df_temp['Infectados'] != 0 )]
df_temp.dropna()
df_temp = df_temp.loc[~df_temp.index.duplicated(keep='last')]
df_temp = df_temp['Infectados']
df_temp = pd.DataFrame(df_temp)
df = pd.concat([df_temp,df])
df = df.sort_index()
SITUACION_INICIAL = df.iloc[-1]['Infectados']
FECHA_INICIAL_STR = df.index[-1]
df = df.dropna()
df = df.loc[~df.index.duplicated(keep='last')]
return df
SITUACION_INICIAL = 1000
POBLACION_INICIAL_INFECTADA = 4500000
R0_max = 5.7
R0_min = 0.5
Umbral_max = CAPACIDAD_SISTEMA_SANITARIO
Umbral_min = 10000
FECHA_INICIAL_STR = '2020-07-01'
FECHA_FINAL_STR = '2021-01-01'
df = calcular_prevision(
FECHA_FINAL_STR,
FECHA_INICIAL_STR,
SITUACION_INICIAL,
POBLACION_INICIAL_INFECTADA,
R0_max,
R0_min,
Umbral_max,
Umbral_min)
df
""" diccionario_R0s = {"Infectividad con prevalencia original, R0=5.7" : { R0 : 5.7, POBLACION_INICIAL_INFECTADA : 4500000} ,
"lo anterior + distanciamiento social efectivo , R0=3.590" : 3.590,
"lo anterior + uso mascarillas , R0=2.082" : 2.082 ,
"lo anterior + temperaturas de verano , R0=1.707" : 1.702
}
Infectividad primera ola , R0=5.7 223383
lo anterior + distanciamiento social efectivo , R0=3.989 83430
lo anterior + uso mascarillas , R0=2.314 20062
lo anterior + temperaturas de verano , R0=1.897 12331
lo anterior + confinamiento , R0=0.5 875
"""
array_parametros = [
{ "descripcion" : "Infectividad con prevalencia original, R0=5.7" , 'R0' : 5.7 , 'POBLACION_INICIAL_INFECTADA' : 0} ,
]
array_parametros = [
{ "descripcion" : "Infectividad con prevalencia original, R0=5.7" , 'R0' : 5.7 , 'POBLACION_INICIAL_INFECTADA' : 0} ,
{ "descripcion" : "Infectividad con prevalencia actual , R0=5.13" , 'R0' : 5.7 , 'POBLACION_INICIAL_INFECTADA' : 4500000} ,
{ "descripcion" : "lo anterior + distanciamiento social efectivo , R0=3.590" , 'R0' : 3.989 , 'POBLACION_INICIAL_INFECTADA' : 4500000} ,
{ "descripcion" : "lo anterior + uso mascarillas , R0=2.082" , 'R0' : 2.314 , 'POBLACION_INICIAL_INFECTADA' : 4500000} ,
]
df_array = []
dict_default_values = {
"SITUACION_INICIAL" : 10000 ,
"R0_min" : 0.5 ,
"Umbral_max" : CAPACIDAD_SISTEMA_SANITARIO ,
"Umbral_min" : 5000 ,
"FECHA_INICIAL_STR" : '2020-07-01' ,
"FECHA_FINAL_STR" : '2021-07-01'
}
for dict_escenario in array_parametros:
## Juntamos los valores por defecto, y los que cambian cada vez.
param = {**dict_escenario, ** dict_default_values}
#print(param)
df_temp = pd.DataFrame()
df_temp = calcular_prevision(
FECHA_FINAL_STR = param['FECHA_FINAL_STR' ],
FECHA_INICIAL_STR = param['FECHA_INICIAL_STR' ],
SITUACION_INICIAL = param['SITUACION_INICIAL' ],
POBLACION_INICIAL_INFECTADA = param['POBLACION_INICIAL_INFECTADA'],
R0_max = param['R0' ],
R0_min = param['R0_min' ],
Umbral_max = param['Umbral_max' ],
Umbral_min = param['Umbral_min' ]
)
df_temp = df_temp.astype(np.int64)
df_temp = df_temp.loc[~df_temp.index.duplicated(keep='last')]
suma = int(df_temp.sum(axis=0)/1000000)
#print(param['descripcion'], df_temp.tail(1).index[-1],"suma: " , suma)
df_array.append(df_temp)
DIAS_CONFINAMIENTO = df_temp.shape[0] - (df_temp['Infectados'] - df_temp['Infectados'].shift(1) > 0).sum()
plt = Get_Chart(df=interpolate_dataframe(df_temp,'D'),
title = param['descripcion'] +" \n, "+ str(suma) + " millones infectados, " + str(DIAS_CONFINAMIENTO) + ", dias de confinamiento." )
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=14))
ax = plt.gca()
ax.set_ylim([0,Umbral_max])
param = {}
df = pd.concat(df_array)
"""Infectividad primera ola , R0=5.7 int64
lo anterior + prevalencia actual , R0=5.13 int64
lo anterior + distanciamiento social efectivo , R0=3.590 int64
lo anterior + uso mascarillas , R0=2.082 int64
lo anterior + temperaturas de verano , R0=1.707 int64
lo anterior + confinamiento , R0=0.5 int64
dtype: object
"""
##### de aqui a abajo, solo debug
SITUACION_INICIAL=829339
R0 = 5.7
GENERACIONES = 15
ARRAY_PREVALENCIAS = [0.10]
FECHA_INICIAL_STR = '2020-10-04'
Calcular_Cuadro_Prevalencias( SITUACION_INICIAL = SITUACION_INICIAL ,
R0 = R0 ,
GENERACIONES = GENERACIONES ,
ARRAY_PREVALENCIAS = ARRAY_PREVALENCIAS ,
FECHA_INICIAL_STR = FECHA_INICIAL_STR )
df['incremento'] = df['Infectados'] - df['Infectados'].shift(1) > 0
df['incremento'].count()
kk = df.head(20)
kk['incremento'].sum()
```
| github_jupyter |
# Entity Extraction from old-style SciSpacy NER Models
These models identify the entity span in an input sentence, but don't attempt to separately link to an external taxonomy. The following variations are possible here. Replace the `MODEL_NAME, MODEL_ALIAS` line in the cell below and repeat run to extract named entity information from the chosen model.
We can run this notebook with different values of `MODEL_NAME` and `MODEL_ALIAS` to create different entity dumps from each model.
```
import dask.dataframe as dd
import json
import numpy as np
import pandas as pd
import s3fs
import spacy
import scispacy
from dask.distributed import Client, progress, get_worker
# MODEL_NAME, MODEL_ALIAS = "en_ner_craft_md", "craft"
MODEL_NAME, MODEL_ALIAS = "en_ner_jnlpba_md", "jnlpba"
# MODEL_NAME, MODEL_ALIAS = "en_ner_bc5cdr_md", "bc5cdr"
# MODEL_NAME, MODEL_ALIAS = "en_ner_bionlp13cg_md", "bionlp"
BUCKET_NAME = "saturn-elsevierinc"
SENTENCE_FOLDER = "/".join(["s3:/", BUCKET_NAME, "cord19-sents-pq-sm"])
ENTITIES_FOLDER = "/".join(["s3:/", BUCKET_NAME,
"cord19-ents-{:s}-pq-sm".format(MODEL_ALIAS)])
sentences_df = dd.read_parquet(SENTENCE_FOLDER, engine="pyarrow")
sentences_df.head(npartitions=10)
len(sentences_df)
```
## Processing
```
client = Client(processes=False, n_workers=2, threads_per_worker=1)
client
# from dask.distributed import Client
# from dask_saturn import SaturnCluster
# cluster = SaturnCluster(n_workers=20)
# client = Client(cluster)
def handle_batch(sents, nlp, ent_class):
docs = nlp.pipe(sents, n_threads=16, batch_size=len(sents))
ents_list = []
for doc in docs:
ents = []
for eid, ent in enumerate(doc.ents):
ents.append((eid, ent_class, ent.text, ent.label_,
1.0, ent.start_char, ent.end_char))
ents_list.append(ents)
return ents_list
def handle_partition(part):
worker = get_worker()
try:
nlp = worker.nlp
except:
nlp = spacy.load(MODEL_NAME)
worker.nlp = nlp
batch_size = 32
sent_batch, ent_batch, entities = [], [], []
for _, row in part.iterrows():
if len(sent_batch) % batch_size == 0 and len(sent_batch) > 0:
ent_batch = handle_batch(sent_batch, nlp, MODEL_ALIAS)
entities.extend(ent_batch)
sent_batch = []
try:
sent_batch.append(row.stext)
except ValueError:
continue
if len(sent_batch) > 0:
ent_batch = handle_batch(sent_batch, nlp, MODEL_ALIAS)
entities.extend(ent_batch)
return entities
entities_df = sentences_df.copy()
entities_df["entities"] = entities_df.map_partitions(
lambda part: handle_partition(part), meta=("object"))
entities_df = entities_df.drop(columns=["stext"])
entities_df = entities_df.explode("entities")
entities_df = entities_df.dropna()
entities_df["eid"] = entities_df.apply(
lambda row: row.entities[0], meta=("int"), axis=1)
entities_df["eclass"] = entities_df.apply(
lambda row: row.entities[1], meta=("str"), axis=1)
entities_df["etext"] = entities_df.apply(
lambda row: row.entities[2], meta=("str"), axis=1)
entities_df["elabel"] = entities_df.apply(
lambda row: row.entities[3], meta=("str"), axis=1)
entities_df["escore"] = entities_df.apply(
lambda row: row.entities[4], meta=("float"), axis=1)
entities_df["ent_start_char"] = entities_df.apply(
lambda row: row.entities[5], meta=("int"), axis=1)
entities_df["ent_end_char"] = entities_df.apply(
lambda row: row.entities[6], meta=("int"), axis=1)
entities_df = entities_df.drop(columns=["entities"])
entities_df.cord_uid = entities_df.cord_uid.astype(str)
entities_df.pid = entities_df.pid.astype(str)
entities_df.sid = entities_df.sid.astype(np.int32)
entities_df.eid = entities_df.eid.astype(np.int32)
entities_df.eclass = entities_df.eclass.astype(str)
entities_df.etext = entities_df.etext.astype(str)
entities_df.elabel = entities_df.elabel.astype(str)
entities_df.escore = entities_df.escore.astype(np.float32)
entities_df.ent_start_char = entities_df.ent_start_char.astype(np.int32)
entities_df.ent_end_char = entities_df.ent_end_char.astype(np.int32)
fs = s3fs.S3FileSystem()
if fs.exists(ENTITIES_FOLDER):
fs.rm(ENTITIES_FOLDER, recursive=True)
entities_df.to_parquet(ENTITIES_FOLDER, engine="pyarrow", compression="snappy")
```
## Verify Result
```
entities_df = dd.read_parquet(ENTITIES_FOLDER, engine="pyarrow")
entities_df.head(npartitions=10)
len(entities_df)
```
| github_jupyter |
Welcome to a series on programming quantum computers. There's no shortage of hype around quantum computing on the internet, but I am going to still outline the propositions made by quantum computing in general, as well as how this pertains to us and programmers who intend to work with quantum computers, which we will be doing immediately in this series.
<h4><strong>Warning</strong></h4>
The subject matter of quantum physics in general is very advanced, complex, confusing, and complicated. I am not a quantum physics expert. Dare I suggest too, no one is a quantum physics expert, but there are people far more qualified than me to talk on the theories and physics side of things.
I will still outline some basics, but I highly encourage you to dive into the theory and conceptual side of quantum physics on your own because I really am not the best person to educate there. I am going to focus mainly on the programming and application side of things here.
That said, some basic foundation is required, so:
<h4><strong>What are quantum computers?</strong></h4>
Quantum computers are machines that work with qubits (quantum bits) rather than regular bits.
<h4><strong>What's a qubit?</strong></h4>
A regular bit is a transistor that registers either a high or low voltage, which corresponds to 1 or 0 respectively. Through advances in technology over the years, we have bits that are nearly the size of atoms, which is absolutely incredible.
A quantum bit is a 2-state quantum "device." Many things can be used as qubits, such as a photon's horizontal and vertical polarization, or the spin up or spin down of an electron. What this means for us as computer scientists is that a qubit can be a 0, 1, or both.
Because...
Qubits also have 2 other very important properties:
<ul>
<li>Superposition - this is where a qubit is, while left unobserved, all of its possible states. Once observed, it will collapse into one of the possible states.</li>
<li>Entanglement - This is where one qubit's state is linked to another. When entangled with eachother, a change in one of the entangled qubits will change the other instanty. At any distance. Take take that both of those words are fully intended. Instantly and *any* distance, which is what Einstein referred to as "Spooky action at a distance," since this appeared to violate various rules like transmitting information faster than the speed of light. This is referred to as quantum non-locality. I haven't personally seen a compelling explanation as to why this isn't spooky action at a distance, but we are assured by super smart people that it isn't... even though the behavior is as such at least with what we'll be doing. Still seems spooky to me.</li>
</ul>
<h4><strong>What, what?</strong></h4>
You're going to be thinking that a lot, but these are the properties that make quantum computers very compelling to use for certain problems. Both superposition and entanglement seem like magic to me, but both are proven qualities of qubits.
<h4><strong>What problems are quantum computers for?</strong></h4>
Quantum computers wont be replacing classical computers. They're more likely to continue working alongside them, just as they are already doing today. As you will see, we tend use a classical computer to represent a quantum circuit to the quantum computer, the quantum computer runs some cycles on this circuit, and then reports back to us again with a classical bit response.
Quantum computers work very well for problems that exponentially explode.
Problems like logistics, such as the most ideal delivery route for trucks, or even concepts like planning for companies, where each choice branches out into new possible choices and opportunities.
This might still seem vague. Let's consider you're planning a charity dinner and you're attempting to seat people together who will keep eachother in good, donating, moods.
Let's pretend for now you just have 1 table with 5 seats. How many combinations do we have here? Well, it's 5x4x3x2x1, or 5 factorial, which is 120. This means there are 120 possible combinations.
What happens if we added... just one more seat? That'd be 6 factorial, which gives us 6x5-factorial, or 720. Just one more seat is 6x as many combinations.
What about 10 seats? That's 3,628,800 combinations.
Modeling is probably the most near-term real-world example that I see quantum computers being useful.
Classical computers can barely simulate many single molecules...and they do a very poor job of it (due to the issue of how many possibilities there are given a situation). You currently cant rely on a classical computer to do chemistry simulations reliably.
A quantum computer can actually model many molecules already today reliably, and it only gets better from here seemingly.
<h4><strong>How do quantum computers consider, fundamentally, more possibilities at once?</strong></h4>
With classical computers:
`n_states = 2 x n_bits`
With quantum computers:
`n_states = 2^n_bits`
Being exponential like this gives us some phenomenal properties. This is why quantum computers can help us with heavy-optimization types of tasks, or just tasks that have many possibilities.
<h4><strong>Quantum computers are probability</strong></h4>
I think the simplest explanation of a quantum computer that I can come up with for now is that:
Classical computers can approximate and synthesize probability, they are not truly accurate and can *never* be with most tasks that use probability, unless of course you really can fit the entire range of possibilities into memory. Then you can get pretty close. But again, the probability space tends to explode as you add more variables.
Quantum computers do not model probability. They simply *are* the probability.
For this reason, quantum computers are also great for modeling things like molecules. With quantum computers, we could perform many chemistry experiments solely on quantum computers, which should significantly speed up many areas of research.
<h4><strong>Sold! I want a quantum computer</strong></h4>
You can't really buy a quantum computer yet, unless you build one yourself. As much as I can guess, a quantum computer would likely be somewhere between ten million dollars and one hundred million dollars, if not more, and require specialized personel and equipment to keep running. It appears that DWAVE has actually sold at least one $15 million USD quantum computer.
I have to admit though, I am a bit confused on exactly what D-Wave's computers are as compared to what IBM and Google are working on. D-Wave's machines have seemingly been able to have x50 more qubits than everyone else. I see they use some different methods, and no one has seemingly been able to prove their machines are inferior yet. This will be interesting to see play out.
<h4><strong>But we *can* access quantum computers in the cloud!</strong></h4>
So these companies who have no doubt spent hundreds of millions of dollars on these quantum computers...what do they do with them?
It turns out, opening them up to the public, for free, is not uncommon.
Google, Microsoft, IBM, D-Wave, and I am sure many others all offer some form of cloud-based quantum computer access.
I checked into all of the providers that I knew about, and, surprisingly, found IBM's to be the easiest to get up and running with. You really can go from nothing to running on an actual quantum computer in a few minutes.
For free.
FREE.
I feel like making access to quantum computers free is like NASA letting people drive a Mars rover around Mars for free. It's insane!
... but we're currently at a stage with Quantum computers where the technology exists but no one really knows what all we can do here, or what will come of it.
So, let's take advantage of this absurd time and play with the bleeding edge of technology!
IBM has also done what I would call a phenomenal job with making both the tools as well as education available for free to anyone who is interested in diving in. I hope to continue this trend for them.
<h4><strong>Requisites:</strong></h4>
I am assuming you already have python installed and are beyond the python basics. If not, please start with the basics here: <strong><a href="https://pythonprogramming.net/introduction-learn-python-3-tutorials/" target="blank">Python 3 programming basics tutorial</a></strong>.
`pip install qiskit numpy jupyterlab matplotlib qiskit-ibmq-provider`
I personally found that I had to force install `qiskit-ibmq-provider`:
(on linux):
`sudo pip3.7 install --upgrade --force-reinstall qiskit-ibmq-provider`
# Let's get started!
Quantum computers essentially are different to their core, the "bit." Thus, as painful as it may be, we will be starting at the bit-level, the qubit.
We quite literally will be building out the "circuits" for our bits, creating various types of gates.
The following circuit appears to me to be the "hello_world" quantum circuit of choice for everyone, so I'll stick with it.
```
import qiskit as q
%matplotlib inline
circuit = q.QuantumCircuit(2,2) # 2 qubits, 2 classical bits
circuit.x(0) # not gate, flips qubit 0.
circuit.cx(0, 1) #cnot, controlled not, Flips 2nd qubit's value if first qubit is 1
circuit.measure([0,1], [0,1]) # ([qbitregister], [classicalbitregister]) Measure qubit 0 and 1 to classical bits 0 and 1
circuit.draw() # text-based visualization. (pretty cool ...actually! Nice job whoever did this.)
circuit.draw(output="mpl") # matplotlib-based visualization.
```
Alright, we've built our quantum circuit. Time to run the circuit. This is a quantum computing tutorial, so let's go ahead and just get running something on a Quantum Computer out of the way so we can add this to our resume.
Head to <strong><a href="https://quantum-computing.ibm.com/" target="blank">Quantum-computing.ibm.com</a></strong>, create an account, and then you can click on the account icon at the top right (at least at the time of my writing this), then choose "my account"
<img src="https://pythonprogramming.net/static/images/quantum-computing/ibm-account.png" style="max-width:500px">
From there, you can click "copy token" which will copy your token to clipboard, which we will use in a moment:
<img src="https://pythonprogramming.net/static/images/quantum-computing/qiskit-token.png" style="max-width:500px">
Once you've got your token, you're ready to try to connect to a quantum computer and run there.
Now you can connect with your token by doing:
```py
IBMQ.save_account("TOKEN HERE")
```
I am going to store my token to a file so I dont accidentally share it, but you can just paste it right in here as a string. For me, however, I will do:
```
from qiskit import IBMQ
IBMQ.save_account(open("token.txt","r").read())
```
This saves on your actual machine, so you only need to do this once ever (unless your token changes), which is why I am getting the "Credentials already present" message. From then on, you can just do:
```
IBMQ.load_account()
IBMQ.providers()
provider = IBMQ.get_provider("ibm-q")
for backend in provider.backends():
try:
qubit_count = len(backend.properties().qubits)
except:
qubit_count = "simulated"
print(f"{backend.name()} has {backend.status().pending_jobs} queued and {qubit_count} qubits")
from qiskit.tools.monitor import job_monitor
backend = provider.get_backend("ibmq_london")
job = q.execute(circuit, backend=backend, shots=500)
job_monitor(job)
from qiskit.visualization import plot_histogram
from matplotlib import style
style.use("dark_background") # I am using dark mode notebook, so I use this to see the chart.
result = job.result()
counts = result.get_counts(circuit)
plot_histogram([counts], legend=['Device'])
```
So mostly we got 11 at the end of our test (each number corresponds to a bit value here), which was expected. Our default qubit value is 0, then we used a not game (`.x`), which then made it a one. Then we applied an exclusive or, which would flip the 2nd (target qubit), if the first (the control qubit), was a 1. It was, so this is why our intended answer was indeed a 11. As you can see, however, we got some 01, 10, and some 00. What's this?
This is noise.
Expect noise, and rely on probability.
This is why we perform many "shots." Depending on the probability distribution possible for your output, you will want to perform a relevant number of "shots" to get the right answer.
Regardless, you did it! Time to update that resume to say "Quantum Programmer!"
Now, let's update this slightly. This is a *pretty* boring circuit.
```
circuit = q.QuantumCircuit(2,2) # 2 qbits, 2 classical bits.
circuit.h(0) # Hadamard gate, puts qubit 0 into superposition
circuit.cx(0, 1) #cnot, controlled not, Flips 2nd qubit's value if first qubit is 1
circuit.measure([0,1], [0,1]) # ([qbitregister], [classicalbitregister]) Measure qubit 0 and 1 to classical bits 0 and 1
```
This time we're adding `circuit.h(0)`, which adds a Hadamard gate to qubit 0. This puts that qubit into superposition. How might this impact the measured output of 500 shots do you think?
```
circuit.draw(output="mpl")
backend = provider.get_backend("ibmq_london")
job = q.execute(circuit, backend=backend, shots=500)
job_monitor(job)
result = job.result()
counts = result.get_counts(circuit)
plot_histogram([counts], legend=['Device'])
```
Notice now that we get mostly 00 and 11 as results. There's some 01 and 10 as noise, but we see what we expect.
Recall the `controlled not` gate will flip the target qubit (2nd one) if the control qubit (the first) is 1.
The 2nd qubit was 0 since it was never turned on, and the 1st qubit (qubit 0) was in superposition. Superposition means the qubit is in any of the possible states, but will collapse upon a single state when observed.
This is why we make many observations, so that we can see the actual distribution of outcomes.
Thus, we see close to a 50/50 split between 00 and 11 with this circuit. There's obviously noise too, but, in an ideal quantum computer, we'd only see 00 and 11.
As awesome as it really is to run on a quantum computer, it's fairly silly for us to just tinker around on a real quantum computer. Instead, we want to do most of our research and development work on a quantum simulator.
This saves us time (not waiting in a queue) along with getting out of the way for other people who have done their R&D locally and are now ready to test on the real thing.
To do R&D locally, it's quite simple. Rather than using an actual backend, we use a simulator. We could also use the simulator backend that IBM hosts.
```
from qiskit import Aer # simulator framework from qiskit
# will create a statevector of possibilities.
sim_backend = Aer.get_backend('qasm_simulator')
```
Can also iterate:
```
for backend in Aer.backends():
print(backend)
```
From: https://quantum-computing.ibm.com/jupyter/tutorial/advanced/aer/1_aer_provider.ipynb
- QasmSimulator: Allows ideal and noisy multi-shot execution of qiskit circuits and returns counts or memory
- StatevectorSimulator: Allows ideal single-shot execution of qiskit circuits and returns the final statevector of the simulator after application
- UnitarySimulator: Allows ideal single-shot execution of qiskit circuits and returns the final unitary matrix of the circuit itself. Note that the circuit cannot contain measure or reset operations for this backend
We'll use the `qasm_simulator`, since this most closely matches what we did above.
That said, when using simulators, we can use the unitary simulator if we would rather get something more like a matrix output. A statevector simulator will return a statevector. The Qasm sim returns the counts that we've seen so far.
Different outputs allow us different visualizations.
```
job = q.execute(circuit, backend=sim_backend, shots=500)
job_monitor(job)
result = job.result()
counts = result.get_counts(circuit)
plot_histogram([counts], legend=['Device'])
```
Notice we didn't get a perfect 50/50, but there was no 01 or 10. Why not? Well, the "simulator" simulates a *perfect* quantum machine. One day, we may reach a perfect quantum machine. For now, perfect quantum machines only exist in the sim.
Anyway, now you can try tinkering about in the simulator, then you can test your results for real on an actual quantum computer when you're ready.
I think that's enough for now! More to come in the future.
If you have questions, please come join us in the <strong><a href="https://discord.gg/sentdex" target="blank">discord channel</a></strong>, otherwise I will see you in the next tutorial!
More resources:
- General intro to quantum computing: https://www.youtube.com/watch?v=7susESgnDv8
- Check out the qiskit textbook: https://community.qiskit.org/textbook/
- as well as Abe's YT lectures here: https://www.youtube.com/watch?v=a1NZC5rqQD8&list=PLOFEBzvs-Vvp2xg9-POLJhQwtVktlYGbY
- https://quantum-computing.ibm.com/jupyter also has some useful notebooks.
| github_jupyter |
# TensorBoard with Fashion MNIST
In this week's exercise you will train a convolutional neural network to classify images of the Fashion MNIST dataset and you will use TensorBoard to explore how it's confusion matrix evolves over time.
## Setup
```
# Load the TensorBoard notebook extension.
%load_ext tensorboard
import io
import itertools
import numpy as np
import sklearn.metrics
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow import keras
from datetime import datetime
from os import getcwd
print("TensorFlow version: ", tf.__version__)
```
## Load the Fashion-MNIST Dataset
We are going to use a CNN to classify images in the the [Fashion-MNIST](https://research.zalando.com/welcome/mission/research-projects/fashion-mnist/) dataset. This dataset consist of 70,000 grayscale images of fashion products from 10 categories, with 7,000 images per category. The images have a size of $28\times28$ pixels.
First, we load the data. Even though these are really images, we will load them as NumPy arrays and not as binary image objects. The data is already divided into training and testing sets.
```
# Load the data.
train_images = np.load(f"{getcwd()}/../tmp2/train_images.npy")
train_labels = np.load(f"{getcwd()}/../tmp2/train_labels.npy")
test_images = np.load(f"{getcwd()}/../tmp2/test_images.npy")
test_labels = np.load(f"{getcwd()}/../tmp2/test_labels.npy")
# The labels of the images are integers representing classes.
# Here we set the Names of the integer classes, i.e., 0 -> T-short/top, 1 -> Trouser, etc.
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
```
## Format the Images
`train_images` is a NumPy array with shape `(60000, 28, 28)` and `test_images` is a NumPy array with shape `(10000, 28, 28)`. However, our model expects arrays with shape `(batch_size, height, width, channels)` . Therefore, we must reshape our NumPy arrays to also include the number of color channels. Since the images are grayscale, we will set `channels` to `1`. We will also normalize the values of our NumPy arrays to be in the range `[0,1]`.
```
# Pre-process images
train_images = train_images.reshape(60000, 28, 28, 1)
train_images = train_images / 255.0
test_images = test_images.reshape(10000, 28, 28, 1)
test_images = test_images / 255.0
```
## Build the Model
We will build a simple CNN and compile it.
```
# Build the model
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
## Plot Confusion Matrix
When training a classifier, it's often useful to see the [confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix gives you detailed knowledge of how your classifier is performing on test data.
In the cell below, we will define a function that returns a Matplotlib figure containing the plotted confusion matrix.
```
def plot_confusion_matrix(cm, class_names):
"""
Returns a matplotlib figure containing the plotted confusion matrix.
Args:
cm (array, shape = [n, n]): a confusion matrix of integer classes
class_names (array, shape = [n]): String names of the integer classes
"""
figure = plt.figure(figsize=(8, 8))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
# Normalize the confusion matrix.
cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)
# Use white text if squares are dark; otherwise black.
threshold = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, cm[i, j], horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
```
## TensorBoard Callback
We are now ready to train the CNN and regularly log the confusion matrix during the process. In the cell below, you will create a [Keras TensorBoard callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard) to log basic metrics.
```
# Clear logs prior to logging data.
!rm -rf logs/image
# Create log directory
logdir = "logs/image/" + datetime.now().strftime("%Y%m%d-%H%M%S")
# EXERCISE: Define a TensorBoard callback. Use the log_dir parameter
# to specify the path to the directory where you want to save the
# log files to be parsed by TensorBoard.
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
file_writer_cm = tf.summary.create_file_writer(logdir + '/cm')
```
## Convert Matplotlib Figure to PNG
Unfortunately, the Matplotlib file format cannot be logged as an image, but the PNG file format can be logged. So, you will create a helper function that takes a Matplotlib figure and converts it to PNG format so it can be written.
```
def plot_to_image(figure):
"""
Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call.
"""
buf = io.BytesIO()
plt.savefig(buf, format="png")
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# EXERCISE: Use tf.image.decode_png to convert the PNG buffer
# to a TF image. Make sure you use 4 channels.
image = tf.image.decode_png(buf.getvalue(), channels=4)
# EXERCISE: Use tf.expand_dims to add the batch dimension
image = tf.expand_dims(image, 0)
return image
```
## Confusion Matrix
In the cell below, you will define a function that calculates the confusion matrix.
```
def log_confusion_matrix(epoch, logs):
# EXERCISE: Use the model to predict the values from the test_images.
test_pred_raw = model.predict(test_images)
test_pred = np.argmax(test_pred_raw, axis=1)
# EXERCISE: Calculate the confusion matrix using sklearn.metrics
cm = sklearn.metrics.confusion_matrix(test_labels, test_preds)
figure = plot_confusion_matrix(cm, class_names=class_names)
cm_image = plot_to_image(figure)
# Log the confusion matrix as an image summary.
with file_writer_cm.as_default():
tf.summary.image("Confusion Matrix", cm_image, step=epoch)
# Define the per-epoch callback.
cm_callback = keras.callbacks.LambdaCallback(on_epoch_end=log_confusion_matrix)
```
## Running TensorBoard
The next step will be to run the code shown below to render the TensorBoard. Unfortunately, TensorBoard cannot be rendered within the Coursera environment. Therefore, we won't run the code below.
```python
# Start TensorBoard.
%tensorboard --logdir logs/image
# Train the classifier.
model.fit(train_images,
train_labels,
epochs=5,
verbose=0, # Suppress chatty output
callbacks=[tensorboard_callback, cm_callback],
validation_data=(test_images, test_labels))
```
However, you are welcome to download the notebook and run the above code locally on your machine or in Google's Colab to see TensorBoard in action. Below are some example screenshots that you should see when executing the code:
<table>
<tr>
<td>
<img src="../tmp2/tensorboard_01.png" width="500"/>
</td>
<td>
<img src="../tmp2/tensorboard_02.png" width="500"/>
</td>
</tr>
</table>
<br>
<br>
<table>
<tr>
<td>
<img src="../tmp2/tensorboard_03.png" width="500"/>
</td>
<td>
<img src="../tmp2/tensorboard_04.png" width="500"/>
</td>
</tr>
</table>
# Submission Instructions
```
# Now click the 'Submit Assignment' button above.
```
# When you're done or would like to take a break, please run the two cells below to save your work and close the Notebook. This frees up resources for your fellow learners.
```
%%javascript
<!-- Save the notebook -->
IPython.notebook.save_checkpoint();
%%javascript
<!-- Shutdown and close the notebook -->
window.onbeforeunload = null
window.close();
IPython.notebook.session.delete();
```
| github_jupyter |
```
from causaleffect import *
'''Define G (example in section 3.3 of paper "Identifying Causal Effects with the R Package causaleffect")'''
G1 = createGraph(["X<->Y", "Z->Y", "X->Z", "W->X", "W->Z"])
#plotGraph(G1)
'''Define G2 (example Figure 1a of paper "Identification of Joint Interventional
Distributions in Recursive Semi-Markovian Causal Models")'''
G2 = createGraph(["X->Y_1", "Y_2<->W_1", "W_1<->Y_1", "W_1->X", "W_2->Y_2", "W_2<->W_1", "X<->W_2"])
#plotGraph(G2)
'''Define G2b (example Figure 1b of paper "Identification of Joint Interventional
Distributions in Recursive Semi-Markovian Causal Models")'''
G2b = G2.copy()
G2b.add_edges([(3,4)])
G2b.es[-1]["confounding"] = 0
#plotGraph(G2b)
'''Define G3a (example Figure 1a of paper "Identification of Conditional
Interventional Distributions")'''
G3a = createGraph(["X->Z", "X<->Z", "Z->Y"])
#plotGraph(G3a)
'''Define G3b (example Figure 1b of paper "Identification of Conditional
Interventional Distributions")'''
G3b = createGraph(["X->Z", "X<->Z", "Z<-Y"])
#plotGraph(G3b)
'''Define G3c (example Figure 1c of paper "Identification of Conditional
Interventional Distributions")'''
G3c = createGraph(["X->Z", "X<->Z", "Z<-Y", "W->Y"])
#plotGraph(G3c)
'''Define G7 (example Figure 7 of paper "Identifying Causal Effects with the R Package causaleffect")'''
G7 = createGraph(["X<->Y", "X->Z_1", "X<->Z_2", "X<-Z_2", "X<->Z_3", "Y<-Z_1", "Y<->Z_2", "Y<-Z_3", "Z_1<-Z_2", "Z_2->Z_3"])
#plotGraph(G7)
'''Define G8 (example Figure 8 of paper "Identifying Causal Effects with the R Package causaleffect")'''
G8 = createGraph(["X->Y", "W->Y", "Z->X", "Z->W"])
#plotGraph(G8)
plotGraph(G1)
#expected input: ID({'Y'}, {'X'}, G1, verbose=False)
#expected output: \sum_{w, z}P(w)P(z|w, x)\\left(\sum_{x}P(x|w)P(y|w, x, z)\\right)
#------------------------------------------
#expected input: ID({'Y_1', 'Y_2'}, {'X'}, G2, verbose=False)
#expected output: \sum_{w_2}P(w_2)P(y_2|w_2)\\left(\sum_{w_1}P(w_1)P(y_1|w_1, x)\\right)
#------------------------------------------
#expected input: ID({'Y_1', 'Y_2'}, {'X'}, G2b, verbose=False)
#expected output: Hedge found
#------------------------------------------
#expected input: ID({'Y'}, {'X'}, G3a, cond={'Z'}, verbose=False)
#expected output: P(y|x, z)
#------------------------------------------
#expected input: ID({'Y'}, {'X'}, G3b, verbose=False)
#expected output: P(y)
#------------------------------------------
#expected input: ID({'W'}, {'X'}, G3c, cond = {'Z'}, verbose=False)
#expected output: Hedge found
#------------------------------------------
#expected input: ID({'Y', 'Z_1', 'Z_2', 'Z_3'}, {'X'}, G7, verbose=False)
#expected output: P(z_1|x, z_2)P(z_3|z_2)\left(\sum_{x, y, z_3}P(x|z_2)P(y|x, z_1, z_2, z_3)P(z_2)P(z_3|x, z_2)\right)\frac{\left(\sum_{x}P(x|z_2)P(y|x, z_1, z_2, z_3)P(z_2)P(z_3|x, z_2)\right)}{\sum_{x, y}P(x|z_2)P(y|x, z_1, z_2, z_3)P(z_2)P(z_3|x, z_2)}
#------------------------------------------
#expected input: ID({'Y'}, {'X', 'W'}, G8, verbose=False)
#expected output: P(y|w, x, z)
#------------------------------------------
Prob = ID({'Y'}, {'X', 'W'}, G8, verbose=False)
print(Prob.printLatex(simplify=True, complete_simplification=True))
```
| github_jupyter |
### Mutable Sequences
When dealing with mutable sequences, we have a few more things we can do - essentially adding, removing and replacing elements in the sequence.
This **mutates** the sequence. The sequence's memory address has not changed, but the internal **state** of the sequence has.
#### Replacing Elements
We can replace a single element as follows:
```
l = [1, 2, 3, 4, 5]
print(id(l))
l[0] = 'a'
print(id(l), l)
```
We can remove all elements from the sequence:
```
l = [1, 2, 3, 4, 5]
l.clear()
print(l)
```
Note that this is **NOT** the same as doing this:
```
l = [1, 2, 3, 4, 5]
l = []
print(l)
```
The net effect may look the same, `l` is an empty list, but observe the memory addresses:
```
l = [1, 2, 3, 4, 5]
print(id(l))
l.clear()
print(l, id(l))
```
vs
```
l = [1, 2, 3, 4, 5]
print(id(l))
l = []
print(l, id(l))
```
In the second case you can see that the object referenced by `l` has changed, but not in the first case.
Why might this be important?
Suppose you have the following setup:
```
suits = ['Spades', 'Hearts', 'Diamonds', 'Clubs']
alias = suits
suits = []
print(suits, alias)
```
But using clear:
```
suits = ['Spades', 'Hearts', 'Diamonds', 'Clubs']
alias = suits
suits.clear()
print(suits, alias)
```
Big difference!!
We can also replace elements using slicing and extended slicing. Here's an example, but we'll come back to this in a lot of detail:
```
l = [1, 2, 3, 4, 5]
print(id(l))
l[0:2] = ['a', 'b', 'c', 'd', 'e']
print(id(l), l)
```
#### Appending and Extending
We can also append elements to the sequence (note that this is **not** the same as concatenation):
```
l = [1, 2, 3]
print(id(l))
l.append(4)
print(l, id(l))
```
If we had "appended" the value `4` using concatenation:
```
l = [1, 2, 3]
print(id(l))
l = l + [4]
print(id(l), l)
```
If we want to add more than one element at a time, we can extend a sequence with the contents of any iterable (not just sequences):
```
l = [1, 2, 3, 4, 5]
print(id(l))
l.extend({'a', 'b', 'c'})
print(id(l), l)
```
Of course, since we extended using a set, there was not gurantee of positional ordering.
If we extend with another sequence, then positional ordering is retained:
```
l = [1, 2, 3]
l.extend(('a', 'b', 'c'))
print(l)
```
#### Removing Elements
We can remove (and retrieve at the same time) an element from a mutable sequence:
```
l = [1, 2, 3, 4]
print(id(l))
popped = l.pop(1)
print(id(l), popped, l)
```
If we do not specify an index for `pop`, then the **last** element is popped:
```
l = [1, 2, 3, 4]
popped = l.pop()
print(popped)
print(id(l), popped, l)
```
#### Inserting Elements
We can insert an element at a specific index. What this means is that the element we are inserting will eb **at** that index position, and element that was at that position and all the remaining elements to the right are pushed out:
```
l = [1, 2, 3, 4]
print(id(l))
l.insert(1, 'a')
print(id(l), l)
```
#### Reversing a Sequence
We can also do in-place reversal:
```
l = [1, 2, 3, 4]
print(id(l))
l.reverse()
print(id(l), l)
```
We can also reverse a sequence using extended slicing (we'll come back to this later):
```
l = [1, 2, 3, 4]
l[::-1]
```
But this is **NOT** mutating the sequence - the slice is returning a **new** sequence - that happens to be reversed.
```
l = [1, 2, 3, 4]
print(id(l))
l = l[::-1]
print(id(l), l)
```
#### Copying Sequences
We can create a copy of a sequence:
```
l = [1, 2, 3, 4]
print(id(l))
l2 = l.copy()
print(id(l2), l2)
```
Note that the `id` of `l` and `l2` is not the same.
In this case, using slicing does work the same as using the `copy` method:
```
l = [1, 2, 3, 4]
print(id(l))
l2 = l[:]
print(id(l2), l2)
```
As you can see in both cases we end up with new objects.
So, use copy() or [:] - up to you, they end up doing the same thing.
We'll come back to copying in some detail in an upcoming video as this is an important topic with some subtleties.
| github_jupyter |
```
# Based on Huggingface interface
# - https://huggingface.co/transformers/notebooks.html
# - https://github.com/huggingface/notebooks/blob/master/transformers_doc/quicktour.ipynb
# -
# Transformers installation, if needed
#! pip install transformers datasets
```
# Task: Sentiment analysis
```
# Default model used is - "distilbert-base-uncased-finetuned-sst-2-english"
from transformers import pipeline
classifier = pipeline('sentiment-analysis')
data = ["NSDWRs (or secondary standards) are non-enforceable guidelines regulating contaminants that may cause cosmetic effects (such as skin or tooth discoloration) or aesthetic effects (such as taste, odor, or color) in drinking water.",
" EPA recommends secondary standards to water systems but does not require systems to comply with the standard. ",
"However, states may choose to adopt them as enforceable standards."]
# Now run to see sentiments
results = classifier(data)
for result in results:
print(f"label: {result['label']}, with score: {round(result['score'], 4)}")
data2 = ["this is good",
"this is not bad",
"this is bad bad bad",
"this is too good",
"this is too bad",
"this is not bad",
"No one did a bad action",
"Jamil did a bad action",
"John did a bad action"]
# Now run to see sentiments
results = classifier(data2)
i = 0
for result in results:
print(f"text: {data2[i]} -> label: {result['label']}, with score: {round(result['score'], 4)}")
i = i+1
```
# Task: Question Answering
```
question_answerer = pipeline("question-answering")
# From tutorial
context = r"""
Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a
question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune
a model on a SQuAD task, you may leverage the examples/pytorch/question-answering/run_squad.py script.
"""
result = question_answerer(question="What is extractive question answering?", context=context)
print(f"Answer: '{result['answer']}', score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}")
result = question_answerer(question="What is a good example of a question answering dataset?", context=context)
print(f"Answer: '{result['answer']}', score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}")
context = r"""
National Secondary Drinking Water Regulations (NSDWRs)
NSDWRs (or secondary standards) are non-enforceable guidelines regulating contaminants that may cause cosmetic effects (such as skin or tooth discoloration) or aesthetic effects (such as taste, odor, or color) in drinking water.
EPA recommends secondary standards to water systems but does not require systems to comply with the standard. However, states may choose to adopt them as enforceable standards.
"""
result = question_answerer(question="What are NSDWRs (or secondary standards)?", context=context)
print(f"Answer: '{result['answer']}', score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}")
result = question_answerer(question="What does EPA recommend?", context=context)
print(f"Answer: '{result['answer']}', score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}")
question_answerer = pipeline("question-answering", model = "distilbert-base-uncased-finetuned-sst-2-english")
result = question_answerer(question="What is extractive question answering?", context=context)
print(f"Answer: '{result['answer']}', score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}")
result = question_answerer(question="What is a good example of a question answering dataset?", context=context)
print(f"Answer: '{result['answer']}', score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}")
# Straight from HuggingFace tutorial
from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering
import tensorflow as tf
tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
# Caused error. Fixed with solution adapted from - https://discuss.huggingface.co/t/the-question-answering-example-in-the-doc-throws-an-attributeerror-exception-please-help/2611
# model = TFAutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
model = TFAutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad", return_dict=True)
text = r"""
🤗 Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides general-purpose
architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet…) for Natural Language Understanding (NLU) and Natural
Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between
TensorFlow 2.0 and PyTorch.
"""
questions = [
"How many pretrained models are available in 🤗 Transformers?",
"What does 🤗 Transformers provide?",
"🤗 Transformers provides interoperability between which frameworks?",
]
for question in questions:
inputs = tokenizer(question, text, add_special_tokens=True, return_tensors="tf")
input_ids = inputs["input_ids"].numpy()[0]
outputs = model(inputs)
answer_start_scores = outputs.start_logits
answer_end_scores = outputs.end_logits
# Get the most likely beginning of answer with the argmax of the score
answer_start = tf.argmax(answer_start_scores, axis=1).numpy()[0]
# Get the most likely end of answer with the argmax of the score
answer_end = tf.argmax(answer_end_scores, axis=1).numpy()[0] + 1
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))
print(f"Question: {question}")
print(f"Answer: {answer}")
# Make into a function
def performQA(text, questions):
for question in questions:
inputs = tokenizer(question, text, add_special_tokens=True, return_tensors="tf")
input_ids = inputs["input_ids"].numpy()[0]
outputs = model(inputs)
answer_start_scores = outputs.start_logits
answer_end_scores = outputs.end_logits
# Get the most likely beginning of answer with the argmax of the score
answer_start = tf.argmax(answer_start_scores, axis=1).numpy()[0]
# Get the most likely end of answer with the argmax of the score
answer_end = tf.argmax(answer_end_scores, axis=1).numpy()[0] + 1
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))
print(f"Question: {question}")
print(f"Answer: {answer}")
# Try on water examples
text = r"""
National Secondary Drinking Water Regulations (NSDWRs)
NSDWRs (or secondary standards) are non-enforceable guidelines regulating contaminants that may cause cosmetic effects (such as skin or tooth discoloration) or aesthetic effects (such as taste, odor, or color) in drinking water.
EPA recommends secondary standards to water systems but does not require systems to comply with the standard. However, states may choose to adopt them as enforceable standards.
"""
questions = [
"What are NSDWRs?",
"What are NSDWRs (or secondary standards)?",
"What does EPA recommend?",
]
# See it in action
performQA(text, questions)
```
# Task: Summarization
```
from transformers import pipeline
summarizer = pipeline("summarization")
ARTICLE = """ New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York.
A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband.
Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared "I do" five more times, sometimes only within two weeks of each other.
In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her "first and only" marriage.
Barrientos, now 39, is facing two criminal counts of "offering a false instrument for filing in the first degree," referring to her false statements on the
2010 marriage license application, according to court documents.
Prosecutors said the marriages were part of an immigration scam.
On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further.
After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective
Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002.
All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say.
Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages.
Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted.
The case was referred to the Bronx District Attorney\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\'s
Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt, Turkey, Georgia, Pakistan and Mali.
Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force.
If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18.
"""
print(summarizer(ARTICLE, max_length=130, min_length=30, do_sample=False))
```
| github_jupyter |
# Measuring a Multiport Device with a 2-Port Network Analyzer
## Introduction
In microwave measurements, one commonly needs to measure a n-port deveice with a m-port network analyzer ($m<n$ of course).
<img src="nports_with_2ports.svg"/>
This can be done by terminating each non-measured port with a matched load, and assuming the reflected power is negligable. With multiple measurements, it is then possible to reconstitute the original n-port. The first section of this example illustrates this method.
However, in some cases this may not provide the most accurate results, or even be possible in all measurement environments. Or, sometime it is not possible to have matched loads for all ports. The second part of this example presents an elegent solution to this problem, using impedance renormalization. We'll call it *Tippet's technique*, because it has a good ring to it.
```
import skrf as rf
from itertools import combinations
%matplotlib inline
from pylab import *
rf.stylely()
```
## Matched Ports
Let's assume that you have a 2-ports VNA. In order to measure a n-port network, you will need at least $p=n(n-1)/2$ measurements between the different pair of ports (total number of unique pairs of a set of n).
For example, let's assume we wants to measure a 3-ports network with a 2-ports VNA. One needs to perform at least 3 measurements: between ports 1 & 2, between ports 2 & 3 and between ports 1 & 3. We will assume these measurements are then converted into three 2-ports `Network`. To build the full 3-ports `Network`, one needs to provide a list of these 3 (sub)networks to the scikit-rf builtin function `n_twoports_2_nport`. While the order of the measurements in the list is not important, pay attention to define the `Network.name` properties of these subnetworks to contain the port index, for example `p12` for the measurement between ports 1&2 or `p23` between 2&3, etc.
Let's suppose we want to measure a tee:
```
tee = rf.data.tee
print(tee)
```
For the sake of the demonstration, we will "fake" the 3 distincts measurements by extracting 3 subsets of the orignal Network, ie. 3 subnetworks:
```
# 2 port Networks as if one measures the tee with a 2 ports VNA
tee12 = rf.subnetwork(tee, [0, 1]) # 2 port Network btw ports 1 & 2, port 3 being matched
tee23 = rf.subnetwork(tee, [1, 2]) # 2 port Network btw ports 2 & 3, port 1 being matched
tee13 = rf.subnetwork(tee, [0, 2]) # 2 port Network btw ports 1 & 3, port 2 being matched
```
In reality of course, these three Networks comes from three measurements with distincts pair of ports, the non-used port being properly matched.
Before using the `n_twoports_2_nport` function, one must define the name of these subsets by setting the `Network.name` property, in order the function to know which corresponds to what:
```
tee12.name = 'tee12'
tee23.name = 'tee23'
tee13.name = 'tee13'
```
Now we can build the 3-ports Network from these three 2-port subnetworks:
```
ntw_list = [tee12, tee23, tee13]
tee_rebuilt = rf.n_twoports_2_nport(ntw_list, nports=3)
print(tee_rebuilt)
# this is an ideal example, both Network are thus identical
print(tee == tee_rebuilt)
```
## Tippet's Technique
This example demonstrates a numerical test of the technique described in "*A Rigorous Technique for Measuring the Scattering Matrix of a Multiport Device with a 2-Port Network Analyzer*" [1].
In *Tippets technique*, several sub-networks are measured in a similar way as before, but the port terminations are not assumed to be matched. Instead, the terminations just have to be known and no more than one can be completely reflective. So, in general $|\Gamma| \ne 1$.
During measurements, each port is terminated with a consistent termination. So port 1 is always terminated with $Z_1$ when not being measured. Once measured, each sub-network is re-normalized to these port impedances. Think about that. Finally the composit network is contructed, and may then be re-normalized to the desired system impedance, say $50$ ohm.
* [1] J. C. Tippet and R. A. Speciale, “A Rigorous Technique for Measuring the Scattering Matrix of a Multiport Device with a 2-Port Network Analyzer,” IEEE Transactions on Microwave Theory and Techniques, vol. 30, no. 5, pp. 661–666, May 1982.
## Outline of Tippet's Technique
Following the example given in [1], measuring a 4-port network with a 2-port network analyzer.
An outline of the technique:
1. Calibrate 2-port network analyzer
2. Get four known terminations ($Z_1, Z_2, Z_3,Z_4$). No more than one can have $|\Gamma| = 1$
3. Measure all combinations of 2-port subnetworks (there are 6). Each port not currently being measured must be terminated with its corresponding load.
4. Renormalize each subnetwork to the impedances of the loads used to terminate it when note being measured.
5. Build composite 4-port, renormalize to VNA impedance.
## Implementation
First, we create a Media object, which is used to generate networks for testing. We will use WR-10 Rectangular waveguide.
```
wg = rf.wr10
wg.frequency.npoints = 101
```
Next, lets generate a random 4-port network which will be the DUT, that we are trying to measure with out 2-port network analyzer.
```
dut = wg.random(n_ports = 4,name= 'dut')
dut
```
Now, we need to define the loads used to terminate each port when it is not being measured, note as described in [1] not more than one can be have full reflection, $|\Gamma| = 1$
```
loads = [wg.load(.1+.1j),
wg.load(.2-.2j),
wg.load(.3+.3j),
wg.load(.5),
]
# construct the impedance array, of shape FXN
z_loads = array([k.z.flatten() for k in loads]).T
```
Create required measurement port combinations. There are 6 different measurements required to measure a 4-port with a 2-port VNA. In general, #measurements = $n\choose 2$, for n-port DUT on a 2-port VNA.
```
ports = arange(dut.nports)
port_combos = list(combinations(ports, 2))
port_combos
```
Now to do it. Ok we loop over the port combo's and connect the loads to the right places, simulating actual measurements. Each raw subnetwork measurement is saved, along with the renormalized subnetwork. Finally, we stuff the result into the 4-port composit network.
```
composite = wg.match(nports = 4) # composite network, to be filled.
measured,measured_renorm = {},{} # measured subnetworks and renormalized sub-networks
# ports `a` and `b` are the ports we will connect the VNA too
for a,b in port_combos:
# port `c` and `d` are the ports which we will connect the loads too
c,d =ports[(ports!=a)& (ports!=b)]
# determine where `d` will be on four_port, after its reduced to a three_port
e = where(ports[ports!=c]==d)[0][0]
# connect loads
three_port = rf.connect(dut,c, loads[c],0)
two_port = rf.connect(three_port,e, loads[d],0)
# save raw and renormalized 2-port subnetworks
measured['%i%i'%(a,b)] = two_port.copy()
two_port.renormalize(c_[z_loads[:,a],z_loads[:,b]])
measured_renorm['%i%i'%(a,b)] = two_port.copy()
# stuff this 2-port into the composite 4-port
for i,m in enumerate([a,b]):
for j,n in enumerate([a,b]):
composite.s[:,m,n] = two_port.s[:,i,j]
# properly copy the port impedances
composite.z0[:,a] = two_port.z0[:,0]
composite.z0[:,b] = two_port.z0[:,1]
# finally renormalize from
composite.renormalize(50)
```
## Results
### Self-Consistency
Note that 6-measurements of 2-port subnetworks works out to 24-sparameters, and we only need 16. This is because each reflect, s-parameter is measured three-times. As, in [1], we will use this redundent measurement as a check of our accuracy.
The renormalized networks are stored in a dictionary with names based on their port indecies, from this you can see that each have been renormalized to the appropriate z0.
```
measured_renorm
```
Plotting all three raw measurements of $S_{11}$, we can see that they are not in agreement. These plots answer to plots 5 and 7 of [1]
```
s11_set = rf.NS([measured[k] for k in measured if k[0]=='0'])
figure(figsize = (8,4))
subplot(121)
s11_set .plot_s_db(0,0)
subplot(122)
s11_set .plot_s_deg(0,0)
tight_layout()
```
However, the renormalized measurements agree perfectly. These plots answer to plots 6 and 8 of [1]
```
s11_set = rf.NS([measured_renorm[k] for k in measured_renorm if k[0]=='0'])
figure(figsize = (8,4))
subplot(121)
s11_set .plot_s_db(0,0)
subplot(122)
s11_set .plot_s_deg(0,0)
tight_layout()
```
### Test For Accuracy
Making sure our composite network is the same as our DUT
```
composite == dut
```
Nice!. How close ?
```
sum((composite - dut).s_mag)
```
Dang!
## Practical Application
This could be used in many ways. In waveguide, one could just make a measurement of a radiating open after a standard two-port calibration (like TRL). Then using *Tippets technique*, you can leave each port wide open while not being measured. This way you dont have to buy a bunch of loads. How sweet would that be?
## More Complex Simulations
```
def tippits(dut, gamma, noise=None):
'''
simulate tippits technique on a 4-port dut.
'''
ports = arange(dut.nports)
port_combos = list(combinations(ports, 2))
loads = [wg.load(gamma) for k in ports]
# construct the impedance array, of shape FXN
z_loads = array([k.z.flatten() for k in loads]).T
composite = wg.match(nports = dut.nports) # composite network, to be filled.
#measured,measured_renorm = {},{} # measured subnetworks and renormalized sub-networks
# ports `a` and `b` are the ports we will connect the VNA too
for a,b in port_combos:
# port `c` and `d` are the ports which we will connect the loads too
c,d =ports[(ports!=a)& (ports!=b)]
# determine where `d` will be on four_port, after its reduced to a three_port
e = where(ports[ports!=c]==d)[0][0]
# connect loads
three_port = rf.connect(dut,c, loads[c],0)
two_port = rf.connect(three_port,e, loads[d],0)
if noise is not None:
two_port.add_noise_polar(*noise)
# save raw and renormalized 2-port subnetworks
measured['%i%i'%(a,b)] = two_port.copy()
two_port.renormalize(c_[z_loads[:,a],z_loads[:,b]])
measured_renorm['%i%i'%(a,b)] = two_port.copy()
# stuff this 2-port into the composite 4-port
for i,m in enumerate([a,b]):
for j,n in enumerate([a,b]):
composite.s[:,m,n] = two_port.s[:,i,j]
# properly copy the port impedances
composite.z0[:,a] = two_port.z0[:,0]
composite.z0[:,b] = two_port.z0[:,1]
# finally renormalize from
composite.renormalize(50)
return composite
wg.frequency.npoints = 11
dut = wg.random(4)
#er = lambda gamma: mean((tippits(dut,gamma)-dut).s_mag)/mean(dut.s_mag)
def er(gamma, *args):
return max(abs(tippits(dut, rf.db_2_mag(gamma),*args).s_db-dut.s_db).flatten())
gammas = linspace(-80,0,11)
title('Error vs $|\Gamma|$')
plot(gammas, [er(k) for k in gammas])
plot(gammas, [er(k) for k in gammas])
semilogy()
xlabel('$|\Gamma|$ of Loads (dB)')
ylabel('Max Error in DUT\'s dB(S)')
figure()
#er = lambda gamma: max(abs(tippits(dut,gamma,(1e-5,.1)).s_db-dut.s_db).flatten())
noise = (1e-5,.1)
title('Error vs $|\Gamma|$ with reasonable noise')
plot(gammas, [er(k, noise) for k in gammas])
plot(gammas, [er(k,noise) for k in gammas])
semilogy()
xlabel('$|\Gamma|$ of Loads (dB)')
ylabel('Max Error in DUT\'s dB(S)')
```
| github_jupyter |
<a href="https://colab.research.google.com/github/kyle-gao/ML_ipynb/blob/master/TF_TPU_test.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
Playing around with the TPU tutorial notebook before training transformer on TPU
```
import tensorflow as tf
import os
import tensorflow_datasets as tfds
import time
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_cluster(resolver)
# This is the TPU initialization code that has to be at the beginning.
tf.tpu.experimental.initialize_tpu_system(resolver)
print("All devices: ", tf.config.list_logical_devices('TPU'))
strategy = tf.distribute.TPUStrategy(resolver)
def create_model():
return tf.keras.Sequential(
[tf.keras.layers.Conv2D(256, 3, activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(256, 3, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)])
def get_dataset(batch_size, is_training=True):
split = 'train' if is_training else 'test'
dataset, info = tfds.load(name='mnist', split=split, with_info=True,
as_supervised=True, try_gcs=True)
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
dataset = dataset.map(scale)
# Only shuffle and repeat the dataset in training. The advantage to have a
# infinite dataset for training is to avoid the potential last partial batch
# in each epoch, so users don't need to think about scaling the gradients
# based on the actual batch size.
if is_training:
dataset = dataset.shuffle(10000)
dataset = dataset.repeat()
#dataset = dataset.batch(batch_size)
dataset = dataset.batch(batch_size).prefetch(8)
return dataset
with strategy.scope():
model = create_model()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['sparse_categorical_accuracy'])
batch_size = 200
steps_per_epoch = 60000 // batch_size
validation_steps = 10000 // batch_size
train_dataset = get_dataset(batch_size, is_training=True)
test_dataset = get_dataset(batch_size, is_training=False)
model.fit(train_dataset,
epochs=5,
steps_per_epoch=steps_per_epoch,
validation_data=test_dataset,
validation_steps=validation_steps,verbose = 2)
#TPU 11s/epoch
#no cache
#no pretech
"""Epoch 1/5
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/data/ops/multi_device_iterator_ops.py:601: get_next_as_optional (from tensorflow.python.data.ops.iterator_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Iterator.get_next_as_optional()` instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/data/ops/multi_device_iterator_ops.py:601: get_next_as_optional (from tensorflow.python.data.ops.iterator_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Iterator.get_next_as_optional()` instead.
1/300 [..............................] - ETA: 22:40 - loss: 2.3021 - sparse_categorical_accuracy: 0.1300WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0026s vs `on_train_batch_end` time: 0.0308s). Check your callbacks.
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0026s vs `on_train_batch_end` time: 0.0308s). Check your callbacks.
299/300 [============================>.] - ETA: 0s - loss: 0.1350 - sparse_categorical_accuracy: 0.9583WARNING:tensorflow:Callbacks method `on_test_batch_end` is slow compared to the batch time (batch time: 0.0014s vs `on_test_batch_end` time: 0.0115s). Check your callbacks.
WARNING:tensorflow:Callbacks method `on_test_batch_end` is slow compared to the batch time (batch time: 0.0014s vs `on_test_batch_end` time: 0.0115s). Check your callbacks.
300/300 [==============================] - 18s 60ms/step - loss: 0.1347 - sparse_categorical_accuracy: 0.9585 - val_loss: 0.0443 - val_sparse_categorical_accuracy: 0.9863
Epoch 2/5
300/300 [==============================] - 11s 36ms/step - loss: 0.0337 - sparse_categorical_accuracy: 0.9896 - val_loss: 0.0404 - val_sparse_categorical_accuracy: 0.9881
Epoch 3/5
300/300 [==============================] - 11s 36ms/step - loss: 0.0189 - sparse_categorical_accuracy: 0.9938 - val_loss: 0.0465 - val_sparse_categorical_accuracy: 0.9871
Epoch 4/5
300/300 [==============================] - 11s 37ms/step - loss: 0.0122 - sparse_categorical_accuracy: 0.9959 - val_loss: 0.0542 - val_sparse_categorical_accuracy: 0.9859
Epoch 5/5
300/300 [==============================] - 11s 36ms/step - loss: 0.0109 - sparse_categorical_accuracy: 0.9964 - val_loss: 0.0484 - val_sparse_categorical_accuracy: 0.9869
<tensorflow.python.keras.callbacks.History at 0x7fcf79ed3400>"""
#TPU 11s/epoch
#with cache
#with pretech
"""
Epoch 1/5
1/300 [..............................] - ETA: 15:57 - loss: 2.3030 - sparse_categorical_accuracy: 0.1100WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0025s vs `on_train_batch_end` time: 0.0316s). Check your callbacks.
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0025s vs `on_train_batch_end` time: 0.0316s). Check your callbacks.
299/300 [============================>.] - ETA: 0s - loss: 0.1310 - sparse_categorical_accuracy: 0.9584WARNING:tensorflow:Callbacks method `on_test_batch_end` is slow compared to the batch time (batch time: 0.0015s vs `on_test_batch_end` time: 0.0129s). Check your callbacks.
WARNING:tensorflow:Callbacks method `on_test_batch_end` is slow compared to the batch time (batch time: 0.0015s vs `on_test_batch_end` time: 0.0129s). Check your callbacks.
300/300 [==============================] - 16s 54ms/step - loss: 0.1306 - sparse_categorical_accuracy: 0.9585 - val_loss: 0.0464 - val_sparse_categorical_accuracy: 0.9853
Epoch 2/5
300/300 [==============================] - 11s 36ms/step - loss: 0.0342 - sparse_categorical_accuracy: 0.9891 - val_loss: 0.0398 - val_sparse_categorical_accuracy: 0.9874
Epoch 3/5
300/300 [==============================] - 11s 36ms/step - loss: 0.0188 - sparse_categorical_accuracy: 0.9940 - val_loss: 0.0415 - val_sparse_categorical_accuracy: 0.9869
Epoch 4/5
300/300 [==============================] - 11s 36ms/step - loss: 0.0131 - sparse_categorical_accuracy: 0.9958 - val_loss: 0.0455 - val_sparse_categorical_accuracy: 0.9864
Epoch 5/5
300/300 [==============================] - 11s 37ms/step - loss: 0.0087 - sparse_categorical_accuracy: 0.9970 - val_loss: 0.0475 - val_sparse_categorical_accuracy: 0.9876
<tensorflow.python.keras.callbacks.History at 0x7fcf79fbe320>"""
#Something is bottlenecking each batch.
#TPU 9s/epoch
#with cache
#with pretech
#verbose = 2
"""Epoch 1/5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0019s vs `on_train_batch_end` time: 0.0290s). Check your callbacks.
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0019s vs `on_train_batch_end` time: 0.0290s). Check your callbacks.
WARNING:tensorflow:Callbacks method `on_test_batch_end` is slow compared to the batch time (batch time: 0.0014s vs `on_test_batch_end` time: 0.0116s). Check your callbacks.
WARNING:tensorflow:Callbacks method `on_test_batch_end` is slow compared to the batch time (batch time: 0.0014s vs `on_test_batch_end` time: 0.0116s). Check your callbacks.
300/300 - 15s - loss: 0.1381 - sparse_categorical_accuracy: 0.9587 - val_loss: 0.0423 - val_sparse_categorical_accuracy: 0.9851
Epoch 2/5
300/300 - 9s - loss: 0.0339 - sparse_categorical_accuracy: 0.9891 - val_loss: 0.0395 - val_sparse_categorical_accuracy: 0.9877
Epoch 3/5
300/300 - 9s - loss: 0.0190 - sparse_categorical_accuracy: 0.9938 - val_loss: 0.0381 - val_sparse_categorical_accuracy: 0.9881
Epoch 4/5
300/300 - 9s - loss: 0.0113 - sparse_categorical_accuracy: 0.9959 - val_loss: 0.0455 - val_sparse_categorical_accuracy: 0.9877
Epoch 5/5
300/300 - 10s - loss: 0.0101 - sparse_categorical_accuracy: 0.9967 - val_loss: 0.0435 - val_sparse_categorical_accuracy: 0.9890
<tensorflow.python.keras.callbacks.History at 0x7fcf6f98bc88>"""
# Create the model, optimizer and metrics inside strategy scope, so that the
# variables can be mirrored on each device.
with strategy.scope():
model = create_model()
optimizer = tf.keras.optimizers.Adam()
training_loss = tf.keras.metrics.Mean('training_loss', dtype=tf.float32)
training_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
'training_accuracy', dtype=tf.float32)
# Calculate per replica batch size, and distribute the datasets on each TPU
# worker.
per_replica_batch_size = batch_size // strategy.num_replicas_in_sync
train_dataset = strategy.experimental_distribute_datasets_from_function(
lambda _: get_dataset(per_replica_batch_size, is_training=True))
@tf.function
def train_step(iterator):
"""The step function for one training step"""
def step_fn(inputs):
"""The computation to run on each TPU device."""
images, labels = inputs
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, logits, from_logits=True)
loss = tf.nn.compute_average_loss(loss, global_batch_size=batch_size)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(list(zip(grads, model.trainable_variables)))
training_loss.update_state(loss * strategy.num_replicas_in_sync)
training_accuracy.update_state(labels, logits)
strategy.run(step_fn, args=(next(iterator),))
steps_per_eval = 10000 // batch_size
train_iterator = iter(train_dataset)
for epoch in range(5):
tstart = time.time()
print('Epoch: {}/5'.format(epoch))
for step in range(steps_per_epoch):
train_step(train_iterator)
print('Current step: {}, training loss: {}, accuracy: {}%'.format(
optimizer.iterations.numpy(),
round(float(training_loss.result()), 4),
round(float(training_accuracy.result()) * 100, 2)))
tend = time.time()
print('Time per epoch: {}/5'.format(tend-tstart))
training_loss.reset_states()
training_accuracy.reset_states()
@tf.function
def train_multiple_steps(iterator, steps):
"""The step function for one training step"""
def step_fn(inputs):
"""The computation to run on each TPU device."""
images, labels = inputs
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, logits, from_logits=True)
loss = tf.nn.compute_average_loss(loss, global_batch_size=batch_size)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(list(zip(grads, model.trainable_variables)))
training_loss.update_state(loss * strategy.num_replicas_in_sync)
training_accuracy.update_state(labels, logits)
for _ in tf.range(steps):
strategy.run(step_fn, args=(next(iterator),))
# Convert `steps_per_epoch` to `tf.Tensor` so the `tf.function` won't get
# retraced if the value changes.
train_multiple_steps(train_iterator, tf.convert_to_tensor(steps_per_epoch))
print('Current step: {}, training loss: {}, accuracy: {}%'.format(
optimizer.iterations.numpy(),
round(float(training_loss.result()), 4),
round(float(training_accuracy.result()) * 100, 2)))
```
| github_jupyter |
<table> <tr>
<td style="background-color:#ffffff;">
<a href="http://qworld.lu.lv" target="_blank"><img src="..\images\qworld.jpg" width="25%" align="left"> </a></td>
<td style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
prepared by <a href="http://abu.lu.lv" target="_blank">Abuzer Yakaryilmaz</a> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>)
</td>
</tr></table>
<table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
$ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
<h1> <font color="blue"> Solutions for </font>Phase Kickback</h1>
<a id="task2"></a>
<h3> Task 1</h3>
What is the other eigenstate of the $NOT$ operator? What is the corresponding eigenvalue?
<h3> Solution </h3>
Note that $NOT\ket{+} = NOT(\sqrttwo \ket{0} + \sqrttwo \ket{1}) = \sqrttwo NOT\ket{0} + \sqrttwo NOT\ket{1} = \sqrttwo \ket{1} + \sqrttwo \ket{0} = \ket{+}$
Hence, we can conclude that $\ket{+}$ is an eigenstate of the $NOT$ operator with eigenvalue 1.
<a id="task2"></a>
<h3> Task 2</h3>
Create a quantum circuit with two qubits. In this task, we will not follow Qiskit order.
Set the state of the first qubit to $ \ket{0} $.
Set the state of the second qubit to $ \ket{1} $.
Apply Hadamard to both qubits.
Apply $CNOT$ operator, where the controller qubit is the first qubit and the target qubit is the second qubit.
Apply Hadamard to both qubits.
Measure the outcomes.
We start in quantum state $ \ket{01} $. What is the outcome?
<h3> Solution </h3>
```
# import all necessary objects and methods for quantum circuits
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
qreg1 = QuantumRegister(2) # quantum register with 2 qubits
creg1 = ClassicalRegister(2) # classical register with 2 bits
mycircuit1 = QuantumCircuit(qreg1,creg1) # quantum circuit with quantum and classical registers
# the first qubit is in |0>
# set the second qubit to |1>
mycircuit1.x(qreg1[1]) # apply x-gate (NOT operator)
# apply Hadamard to both qubits.
mycircuit1.h(qreg1[0])
mycircuit1.h(qreg1[1])
# apply CNOT operator, where the controller qubit is the first qubit and the target qubit is the second qubit.
mycircuit1.cx(qreg1[0],qreg1[1])
# apply Hadamard to both qubits.
mycircuit1.h(qreg1[0])
mycircuit1.h(qreg1[1])
# measure both qubits
mycircuit1.measure(qreg1,creg1)
# execute the circuit 100 times in the local simulator
job = execute(mycircuit1,Aer.get_backend('qasm_simulator'),shots=100)
counts = job.result().get_counts(mycircuit1)
# print the reverse of the outcome
for outcome in counts:
reverse_outcome = ''
for i in outcome:
reverse_outcome = i + reverse_outcome
print("We start in quantum state 01, and",reverse_outcome,"is observed",counts[outcome],"times")
```
<a id="task3"></a>
<h3> Task 3 </h3>
Create a circuit with 7 qubits. In this tasl, we will not follow Qiskit order.
Set the states of the first six qubits to $ \ket{0} $.
Set the state of the last qubit to $ \ket{1} $.
Apply Hadamard operators to all qubits.
Apply $CNOT$ operator (first-qubit,last-qubit)
<br>
Apply $CNOT$ operator (fourth-qubit,last-qubit)
<br>
Apply $CNOT$ operator (fifth-qubit,last-qubit)
Apply Hadamard operators to all qubits.
Measure all qubits.
For each CNOT operator, do we have a phase-kickback effect?
<h3> Solution </h3>
```
# import all necessary objects and methods for quantum circuits
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
# Create a circuit with 7 qubits.
n = 7
qreg2 = QuantumRegister(n) # quantum register with 7 qubits
creg2 = ClassicalRegister(n) # classical register with 7 bits
mycircuit2 = QuantumCircuit(qreg2,creg2) # quantum circuit with quantum and classical registers
# the first six qubits are already in |0>
# set the last qubit to |1>
mycircuit2.x(qreg2[n-1]) # apply x-gate (NOT operator)
# apply Hadamard to all qubits.
for i in range(n):
mycircuit2.h(qreg2[i])
# apply CNOT operator (first-qubit,last-qubit)
# apply CNOT operator (fourth-qubit,last-qubit)
# apply CNOT operator (fifth-qubit,last-qubit)
mycircuit2.cx(qreg2[0],qreg2[n-1])
mycircuit2.cx(qreg2[3],qreg2[n-1])
mycircuit2.cx(qreg2[4],qreg2[n-1])
# apply Hadamard to all qubits.
for i in range(n):
mycircuit2.h(qreg2[i])
# measure all qubits
mycircuit2.measure(qreg2,creg2)
# execute the circuit 100 times in the local simulator
job = execute(mycircuit2,Aer.get_backend('qasm_simulator'),shots=100)
counts = job.result().get_counts(mycircuit2)
# print the reverse of the outcome
for outcome in counts:
reverse_outcome = ''
for i in outcome:
reverse_outcome = i + reverse_outcome
print(reverse_outcome,"is observed",counts[outcome],"times")
for i in range(len(reverse_outcome)):
print("the final value of the qubit nr.",(i+1),"is",reverse_outcome[i])
```
| github_jupyter |
```
from __future__ import print_function ## Force python3-like printing
try:
from importlib import reload
except:
pass
%matplotlib inline
# %matplotlib notebook
from matplotlib import pyplot as plt
import os
import warnings
import numpy as np
from astropy.table import Table
from scipy.integrate import simps
import pycoco as pcc
reload(pcc) ## FOR DEV
reload(pcc.kcorr) ## FOR DEV
reload(pcc.functions) ## FOR DEV
reload(pcc.classes) ## FOR DEV
reload(pcc.colours)
warnings.resetwarnings() ## FOR DEV
V = pcc.functions.load_filter('/Users/berto/Code/CoCo/data/filters/BessellV.dat')
B = pcc.functions.load_filter('/Users/berto/Code/CoCo/data/filters/BessellB.dat')
vega = pcc.kcorr.load_vega()
AB = pcc.kcorr.load_AB()
fig = plt.figure(figsize=[8, 4])
fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,
right = 0.99, hspace=0, wspace = 0)
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.plot(vega.wavelength, vega.flux)
ax1.plot(AB.wavelength, AB.flux)
ax2.plot(B.wavelength, B.throughput)
ax2.plot([B.lambda_effective.value, B.lambda_effective.value], [0,1], color = pcc.colours.hex["black"], ls = ":")
ax2.plot(V.wavelength, V.throughput)
ax2.plot([V.lambda_effective.value, V.lambda_effective.value], [0,1], color = pcc.colours.hex["black"], ls = ":")
```
Test whether the scaling matches that in Bohlin & Gilliland, 2004, AJ, 127, 3508 (the source of the spectrum) i.e. absolute flux level is 3.46 × 10-9 ergs cm-2 s-1 at 5556 Angstroms
```
vega.data[np.logical_and(vega.data["wavelength"] > 5550., vega.data["wavelength"] < 5560.)]
```
Want to find the flux in a given band, take Bessell V as an example.
```
filter_name = "BessellV"
filter_object = pcc.kcorr.load_filter("/Users/berto/Code/CoCo/data/filters/" + filter_name + ".dat")
```
Resample the filter wavelength to that of the spectrum, and calculate the resulting transmission
```
filter_object.resample_response(new_wavelength = AB.wavelength)
transmitted_spec = filter_object.throughput * AB.flux
integrated_flux = simps(transmitted_spec, AB.wavelength)
print(integrated_flux)
filter_area = simps(filter_object.throughput, filter_object.wavelength)
zp_AB = -2.5*np.log10(integrated_flux/filter_area)
print(zp_AB)
```
These steps are combined in **`pycoco.kcorr.calc_AB_zp()`**
```
print(pcc.kcorr.calc_AB_zp("BessellV"))
```
Equivalently, for Vega Magnitudes - **`pycoco.calc_vega_zp`**
```
print(pcc.kcorr.calc_vega_zp("BessellV"))
```
$m = -2.5log_{10}(F) - m_{zp}$
so
$F = 10^{\frac{m - m_{zp}}{-2.5}}$
and
$\Delta F = 1.086F \frac{\Delta m}{m}$
```
sn = pcc.classes.SNClass("SN2006aj")
sn.load_phot( path = os.path.join(pcc.defaults._default_data_dir_path,"lc/SN2006aj.dat"))
sn.load_list(os.path.join(pcc.defaults._default_coco_dir_path, "lists/SN2006aj.list"))
# sn.load_spec()
# sn.check_overlaps()
# sn.get_lcfit("/Users/berto/Code/CoCo/recon/SN2006aj.dat")
sn.phot.load(os.path.join(pcc.defaults._default_data_dir_path,"lc/SN2006aj.dat"))
sn.plot_lc()
mag = -2.5*np.log10(sn.phot.data['BessellV']["flux"]) - zp_AB + 0.2
t = sn.phot.data['BessellV']["MJD"]
for i in zip(t, mag):
print(i)
import pycoco.litdata as plit
def get_lit_sn(snname = "2006aj"):
lit_sn = OrderedDict()
data = plit.load_CfA_phot_table()
sndata = data[np.where(data["SN"] == snname)]
snname = "2006aj"
data = plit.load_CfA_phot_table()
sndata = data[np.where(data["SN"] == snname)]
sndata[np.where(sndata["Filter"] == "BessellV")]
import pycoco
isinstance(pycoco.classes.FilterClass, FilterClass)
from pycoco.classes import FilterClass
```
| github_jupyter |
# W-net Model - Train
```
%matplotlib inline
import matplotlib.pylab as plt
import numpy as np
import os
import glob
import sys
from keras.optimizers import Adam
# Importing our w-net model
MY_UTILS_PATH = "../Modules/"
if not MY_UTILS_PATH in sys.path:
sys.path.append(MY_UTILS_PATH)
import frequency_spatial_network as fsnet
# Importing callbacks and data augmentation utils
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.preprocessing.image import ImageDataGenerator
# Train Set
train_path = "/home/ubuntu/volume1/Raw_data/Kspace/Train/*.npy"
kspace_files_train = np.asarray(glob.glob(train_path))
# Validation set
val_path = "/home/ubuntu/volume1/Raw_data/Kspace/Val/*.npy"
kspace_files_val = np.asarray(glob.glob(val_path))
indexes = np.arange(kspace_files_train.size,dtype = int)
np.random.shuffle(indexes)
kspace_files_train = kspace_files_train[indexes]
print(kspace_files_train[-1])
print(len(kspace_files_train))
print(kspace_files_val[-1])
print(len(kspace_files_val))
under_rate = '20'
imshape = (256,256)
norm = np.sqrt(imshape[0]*imshape[1])
nchannels = 2 #complex data real + imag
# undersampling patterns - uncentred k-space
var_sampling_mask = np.load("../Data/sampling_mask_" + under_rate + "perc.npy")
print("Undersampling:", 1.0*var_sampling_mask.sum()/var_sampling_mask.size)
print("Mask type:", var_sampling_mask.dtype)
plt.figure()
plt.imshow(~var_sampling_mask,cmap = "gray")
plt.axis("off")
plt.show()
```
## Load Train Data
```
# Get number of samples
ntrain = 0
for ii in range(len(kspace_files_train)):
ntrain += np.load(kspace_files_train[ii]).shape[0]
# Load train data
rec_train = np.zeros((ntrain,imshape[0],imshape[1],2))
kspace_train = np.zeros((ntrain,imshape[0],imshape[1],2))
aux_counter = 0
for ii in range(len(kspace_files_train)):
aux_kspace = np.load(kspace_files_train[ii])/norm
aux = aux_kspace.shape[0]
aux2 = np.fft.ifft2(aux_kspace[:,:,:,0]+1j*aux_kspace[:,:,:,1])
rec_train[aux_counter:aux_counter+aux,:,:,0] = aux2.real
rec_train[aux_counter:aux_counter+aux,:,:,1] = aux2.imag
kspace_train[aux_counter:aux_counter+aux,:,:,0] = aux_kspace[:,:,:,0]
kspace_train[aux_counter:aux_counter+aux,:,:,1] = aux_kspace[:,:,:,1]
aux_counter+=aux
# Shuffle training
indexes = np.arange(rec_train.shape[0],dtype = int)
np.random.shuffle(indexes)
rec_train = rec_train[indexes]
kspace_train[:,var_sampling_mask,:] = 0 # undersample k-space
# save k-space and image domain stats
stats = np.zeros(4)
stats[0] = kspace_train.mean()
stats[1] = kspace_train.std()
aux = np.abs(rec_train[:,:,:,0] +1j*rec_train[:,:,:,1])
stats[2] = aux.mean()
stats[3] = aux.std()
np.save("../Data/stats_fs_unet_norm_" + under_rate + ".npy",stats)
print("Number of training samples", rec_train.shape[0])
kspace_train = 0 # release memory
```
## Load Validation Data
```
# Get number of samples
nval = 0
for ii in range(len(kspace_files_val)):
nval += np.load(kspace_files_val[ii]).shape[0]
kspace_val = np.zeros((nval,imshape[0],imshape[1],nchannels))
rec_val = np.zeros((nval,imshape[0],imshape[1],1))
aux_counter = 0
for ii in range(len(kspace_files_val)):
aux_kspace = np.load(kspace_files_val[ii])/norm
aux = aux_kspace.shape[0]
kspace_val[aux_counter:aux_counter+aux] = aux_kspace
rec_val[aux_counter:aux_counter+aux,:,:,0] = \
np.abs(np.fft.ifft2(aux_kspace[:,:,:,0]+1j*aux_kspace[:,:,:,1]))
aux_counter+=aux
# Undersampling kspace
kspace_val2 = kspace_val.copy()
kspace_val[:,var_sampling_mask,:] = 0
kspace_val = (kspace_val-stats[0])/stats[1]
print("Number of samples", kspace_val.shape[0])
print("Kspace under stats", kspace_val.mean(),kspace_val.std())
print("Kspace full stats", kspace_val2.mean(),kspace_val2.std())
print("Rec stats", rec_val.mean(),rec_val.std())
```
## Initialize Model
```
epochs = 250
batch_size= 16
model = fsnet.wnet(stats[0],stats[1],stats[2],stats[3],\
kshape = (5,5),kshape2=(3,3))
opt = Adam(lr=1e-3,decay = 1e-7)
model.compile(loss = [fsnet.nrmse,fsnet.nrmse],optimizer=opt, loss_weights=[0.01, 0.99])
model_name = "../Models/wnet_" + under_rate + ".hdf5"
if os.path.isfile(model_name):
model.load_weights(model_name)
print(model.summary())
# Early stopping callback to shut down training after
#10 epochs with no improvement
earlyStopping = EarlyStopping(monitor='val_loss',
patience=20,
verbose=0, mode='min')
# Checkpoint callback to save model along the epochs
checkpoint = ModelCheckpoint(model_name, mode = 'min', \
monitor='val_loss',verbose=0,\
save_best_only=True, save_weights_only = True)
```
## Data Augmentation
```
seed = 905
image_datagen1 = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.075,
height_shift_range=0.075,
shear_range=0.25,
zoom_range=0.25,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest')
image_datagen2 = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.075,
height_shift_range=0.075,
shear_range=0.25,
zoom_range=0.25,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest')
image_datagen1.fit(rec_train[:,:,:,0,np.newaxis], augment=True, seed=seed)
image_datagen2.fit(rec_train[:,:,:,1,np.newaxis], augment=True, seed=seed)
image_generator1 = image_datagen1.flow(rec_train[:,:,:,0,np.newaxis],batch_size = batch_size,seed = seed)
image_generator2 = image_datagen1.flow(rec_train[:,:,:,1,np.newaxis],batch_size = batch_size,seed = seed)
def combine_generator(gen1,gen2,under_mask,stats):
while True:
rec_real = gen1.next()
rec_imag = gen2.next()
kspace = np.fft.fft2(rec_real[:,:,:,0]+1j*rec_imag[:,:,:,0])
kspace2 = np.zeros((kspace.shape[0],kspace.shape[1],kspace.shape[2],2))
kspace2[:,:,:,0] = kspace.real
kspace2[:,:,:,1] = kspace.imag
kspace_under = kspace2.copy()
kspace_under[:,var_sampling_mask,:] = 0
kspace_under = (kspace_under-stats[0])/stats[1]
rec = np.abs(rec_real[:,:,:,0]+1j*rec_imag[:,:,:,0])[:,:,:,np.newaxis]
yield(kspace_under, [kspace2,rec])
# combine generators into one which yields image and masks
combined = combine_generator(image_generator1,image_generator2, var_sampling_mask,stats)
# sample data augmentation
for ii in combined:
print(ii[1][1].shape)
plt.figure()
plt.subplot(121)
plt.imshow(ii[1][1][10,:,:,0],cmap = 'gray')
plt.axis("off")
plt.subplot(122)
plt.imshow(np.log(1+np.abs(ii[1][0][10,:,:,0] + 1j*ii[1][0][8,:,:,1])),cmap = 'gray')
plt.axis("off")
plt.show()
break
```
## Train model
```
hist = model.fit_generator(combined,
epochs=epochs,
steps_per_epoch=rec_train.shape[0] / batch_size,
verbose=1,
validation_data= (kspace_val,[kspace_val2,rec_val]),
callbacks=[checkpoint,earlyStopping])
```
| github_jupyter |
# Bidirectional LSTM Sentiment Classifier
In this notebook, we use a *bidirectional* LSTM to classify IMDB movie reviews by their sentiment.
#### Load dependencies
```
import tensorflow
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Embedding, SpatialDropout1D, LSTM
from tensorflow.keras.layers import Bidirectional # new!
from tensorflow.keras.callbacks import ModelCheckpoint
import os
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
```
#### Set hyperparameters
```
# output directory name:
output_dir = 'model_output/biLSTM'
# training:
epochs = 6
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 10000
max_review_length = 200 # doubled!
pad_type = trunc_type = 'pre'
drop_embed = 0.2
# LSTM layer architecture:
n_lstm = 256
drop_lstm = 0.2
```
#### Load data
```
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words) # removed n_words_to_skip
```
#### Preprocess data
```
x_train = pad_sequences(x_train, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
```
#### Design neural network architecture
```
model = Sequential()
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length))
model.add(SpatialDropout1D(drop_embed))
model.add(Bidirectional(LSTM(n_lstm, dropout=drop_lstm)))
model.add(Dense(1, activation='sigmoid'))
# LSTM layer parameters double due to both reading directions
model.summary()
```
#### Configure model
```
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
```
#### Train!
```
# - with this toy dataset, the complex interplay of words over long sentence segments, won't be learned much
# - so our CNN picking up location-invariant segments of two to four words that predict review sentiment
# - these are simpler and so easier to learn from the data
# - CNN therefore outperforms on the IMDB data set
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_valid, y_valid), callbacks=[modelcheckpoint])
```
#### Evaluate
```
model.load_weights(output_dir+"/weights.04.hdf5")
y_hat = model.predict_proba(x_valid)
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
"{:0.2f}".format(roc_auc_score(y_valid, y_hat)*100.0)
```
| github_jupyter |
# Árboles de decisión y bosques
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
```
Ahora vamos a ver una serie de modelos basados en árboles de decisión. Los árboles de decisión son modelos muy intuitivos. Codifican una serie de decisiones del tipo "SI" "ENTONCES", de forma similar a cómo las personas tomamos decisiones. Sin embargo, qué pregunta hacer y cómo proceder a cada respuesta es lo que aprenden a partir de los datos.
Por ejemplo, si quisiéramos crear una guía para identificar un animal que encontramos en la naturaleza, podríamos hacer una serie de preguntas:
- ¿El animal mide más o menos de un metro?
- *más*: ¿Tiene cuernos?
- *Sí*: ¿Son más largos de 10cm?
- *No*: ¿Tiene collar?
- *menos*: ¿Tiene dos piernas o cuatro?
- *Dos*: ¿Tiene alas?
- *Cuatro*: ¿Tiene una cola frondosa?
Y así... Esta forma de hacer particiones binarias en base a preguntas es la esencia de los árboles de decisión.
Una de las ventajas más importantes de los modelos basados en árboles es que requieren poco procesamiento de los datos.
Pueden trabajar con variables de distintos tipos (continuas y discretas) y no les afecta la escala de las variables.
Otro beneficio es que los modelos basados en árboles son "no paramétricos", lo que significa que no tienen un conjunto fijo de parámetros a aprender. En su lugar, un modelo de árbol puede ser más y más flexible, si le proporcionamos más datos. En otras palabras, el número de parámetros libres aumenta según aumentan los datos disponibles y no es un valor fijo, como pasa en los modelos lineales.
## Regresión con árboles de decisión
Un árbol de decisión funciona de una forma más o menos similar a los predictores basados en el vecino más cercano. Se utiliza de la siguiente forma:
```
from figures import make_dataset
x, y = make_dataset()
X = x.reshape(-1, 1)
plt.figure()
plt.xlabel(u'Característica X')
plt.ylabel('Objetivo y')
plt.scatter(X, y);
from sklearn.tree import DecisionTreeRegressor
reg = DecisionTreeRegressor(max_depth=5)
reg.fit(X, y)
X_fit = np.linspace(-3, 3, 1000).reshape((-1, 1))
y_fit_1 = reg.predict(X_fit)
plt.figure()
plt.plot(X_fit.ravel(), y_fit_1, color='blue', label=u"predicción")
plt.plot(X.ravel(), y, '.k', label="datos de entrenamiento")
plt.legend(loc="best");
```
Un único árbol de decisión nos permite estimar la señal de una forma no paraḿetrica, pero está claro que tiene algunos problemas. En algunas regiones, el modelo muestra un alto sesgo e infra-aprende los datos (observa las regiones planas, donde no predecimos correctamente los datos), mientras que en otras el modelo muestra varianza muy alta y sobre aprende los datos (observa los picos pequeños de la superficie obtenida, guiados por puntos de entrenamiento "ruidosos").
Clasificación con árboles de decisión
==================
Los árboles de decisión para clasificación actúan de una forma muy similar, asignando todos los ejemplos de una hoja a la etiqueta mayoritaria en esa hoja:
```
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from figures import plot_2d_separator
X, y = make_blobs(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=100)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf = DecisionTreeClassifier(max_depth=5)
clf.fit(X_train, y_train)
plt.figure()
plot_2d_separator(clf, X, fill=True)
plt.scatter(X_train[:, 0], X_train[:, 1], c=np.array(['b', 'r'])[y_train], s=60, alpha=.7, edgecolor='k')
plt.scatter(X_test[:, 0], X_test[:, 1], c=np.array(['b', 'r'])[y_test], s=60, edgecolor='k');
```
Hay varios parámetros que controla la complejidad de un árbol, pero uno que es bastante fácil de entender es la máxima profundidad. Esto limita hasta que nivel se puede afinar particionando el espacio, o, lo que es lo mismo, cuantos antecedentes tienen como máximo las reglas "Si-Entonces".
Es importante ajustar este parámetro de la mejor forma posible para árboles y modelos basados en árboles. El gráfico interactivo que encontramos a continuación muestra como se produce infra-ajuste y sobre-ajuste para este modelo. Tener un ``max_depth=1`` es claramente un caso de infra-ajuste, mientras que profundidades de 7 u 8 claramente sobre-ajustan. La máxima profundidad para un árbol en este dataset es 8, ya que, a partir de ahí, todas las ramas tienen ejemplos de un única clase. Es decir, todas las ramas son **puras**.
En el gráfico interactivo, las regiones a las que se les asignan colores azules o rojos indican que la clase predicha para ese región es una o la otra. El grado del color indica la probabilidad para esa clase (más oscuro, mayor probabilidad), mientras que las regiones amarillas tienen la misma probabilidad para las dos clases. Las probabilidades se asocian a la cantidad de ejemplos que hay de cada clase en la región evaluada.
```
%matplotlib notebook
from figures import plot_tree_interactive
plot_tree_interactive()
```
Los árboles de decisión son rápidos de entrenar, fáciles de entender y suele llevar a modelos interpretables. Sin embargo, un solo árbol de decisión a veces tiende al sobre-aprendizaje. Jugando con el gráfico anterior, puedes ver como el modelo empieza a sobre-entrenar antes incluso de que consiga una buena separación de los datos.
Por tanto, en la práctica, es más común combinar varios árboles para producir modelos que generalizan mejor. El método más común es el uso de bosques aleatorios y *gradient boosted trees*.
## Bosques aleatorios
Los bosques aleatorios son simplemente conjuntos de varios árboles, que han sido construidos usando subconjuntos aleatorios diferentes de los datos (muestreados con reemplazamiento) y subconjuntos aleatorios distintos de características (sin reemplazamiento). Esto hace que los árboles sean distintos entre si, y que cada uno aprenda aspectos distintos de los datos. Al final, las predicciones se promedian, llegando a una predicción suavizada que tiende a sobre-entrenar menos.
```
from figures import plot_forest_interactive
plot_forest_interactive()
```
## Elegir el estimador óptimo usando validación cruzada
```
# Este código puede llevar bastante tiempo
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
digits = load_digits()
X, y = digits.data, digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
rf = RandomForestClassifier(n_estimators=200)
parameters = {'max_features':['sqrt', 'log2', 10],
'max_depth':[5, 7, 9]}
clf_grid = GridSearchCV(rf, parameters, n_jobs=-1)
clf_grid.fit(X_train, y_train)
clf_grid.score(X_train, y_train)
clf_grid.score(X_test, y_test)
clf_grid.best_params_
```
## Gradient Boosting
Otro método útil tipo *ensemble* es el *Boosting*. En lugar de utilizar digamos 200 estimadores en paralelo, construimos uno por uno los 200 estimadores, de forma que cada uno refina los resultados del anterior. La idea es que aplicando un conjunto de modelos muy simples, se obtiene al final un modelo final mejor que los modelos individuales.
```
from sklearn.ensemble import GradientBoostingRegressor
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5, learning_rate=.2)
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
```
<div class="alert alert-success">
<b>Ejercicio: Validación cruzada para Gradient Boosting</b>:
<ul>
<li>
Utiliza una búsqueda *grid* para optimizar los parámetros `learning_rate` y `max_depth` de un *Gradient Boosted
Decision tree* para el dataset de los dígitos manuscritos.
</li>
</ul>
</div>
```
from sklearn.datasets import load_digits
from sklearn.ensemble import GradientBoostingClassifier
digits = load_digits()
X_digits, y_digits = digits.data, digits.target
# divide el dataset y aplica búsqueda grid
```
## Importancia de las características
Las clases ``RandomForest`` y ``GradientBoosting`` tienen un atributo `feature_importances_` una vez que han sido entrenados. Este atributo es muy importante e interesante. Básicamente, cuantifica la contribución de cada característica al rendimiento del árbol.
```
X, y = X_digits[y_digits < 2], y_digits[y_digits < 2]
rf = RandomForestClassifier(n_estimators=300, n_jobs=1)
rf.fit(X, y)
print(rf.feature_importances_) # un valor por característica
plt.figure()
plt.imshow(rf.feature_importances_.reshape(8, 8), cmap=plt.cm.viridis, interpolation='nearest')
```
| github_jupyter |
```
#remove cell visibility
from IPython.display import HTML
tag = HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide()
} else {
$('div.input').show()
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
Toggle cell visibility <a href="javascript:code_toggle()">here</a>.''')
display(tag)
%matplotlib inline
import control
import numpy
import sympy as sym
from IPython.display import display, Markdown
import ipywidgets as widgets
import matplotlib.pyplot as plt
#print a matrix latex-like
def bmatrix(a):
"""Returns a LaTeX bmatrix - by Damir Arbula (ICCT project)
:a: numpy array
:returns: LaTeX bmatrix as a string
"""
if len(a.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(a).replace('[', '').replace(']', '').splitlines()
rv = [r'\begin{bmatrix}']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
rv += [r'\end{bmatrix}']
return '\n'.join(rv)
# Display formatted matrix:
def vmatrix(a):
if len(a.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(a).replace('[', '').replace(']', '').splitlines()
rv = [r'\begin{vmatrix}']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
rv += [r'\end{vmatrix}']
return '\n'.join(rv)
#matrixWidget is a matrix looking widget built with a VBox of HBox(es) that returns a numPy array as value !
class matrixWidget(widgets.VBox):
def updateM(self,change):
for irow in range(0,self.n):
for icol in range(0,self.m):
self.M_[irow,icol] = self.children[irow].children[icol].value
#print(self.M_[irow,icol])
self.value = self.M_
def dummychangecallback(self,change):
pass
def __init__(self,n,m):
self.n = n
self.m = m
self.M_ = numpy.matrix(numpy.zeros((self.n,self.m)))
self.value = self.M_
widgets.VBox.__init__(self,
children = [
widgets.HBox(children =
[widgets.FloatText(value=0.0, layout=widgets.Layout(width='90px')) for i in range(m)]
)
for j in range(n)
])
#fill in widgets and tell interact to call updateM each time a children changes value
for irow in range(0,self.n):
for icol in range(0,self.m):
self.children[irow].children[icol].value = self.M_[irow,icol]
self.children[irow].children[icol].observe(self.updateM, names='value')
#value = Unicode('example@example.com', help="The email value.").tag(sync=True)
self.observe(self.updateM, names='value', type= 'All')
def setM(self, newM):
#disable callbacks, change values, and reenable
self.unobserve(self.updateM, names='value', type= 'All')
for irow in range(0,self.n):
for icol in range(0,self.m):
self.children[irow].children[icol].unobserve(self.updateM, names='value')
self.M_ = newM
self.value = self.M_
for irow in range(0,self.n):
for icol in range(0,self.m):
self.children[irow].children[icol].value = self.M_[irow,icol]
for irow in range(0,self.n):
for icol in range(0,self.m):
self.children[irow].children[icol].observe(self.updateM, names='value')
self.observe(self.updateM, names='value', type= 'All')
#self.children[irow].children[icol].observe(self.updateM, names='value')
#overlaod class for state space systems that DO NOT remove "useless" states (what "professor" of automatic control would do this?)
class sss(control.StateSpace):
def __init__(self,*args):
#call base class init constructor
control.StateSpace.__init__(self,*args)
#disable function below in base class
def _remove_useless_states(self):
pass
```
## Missile attitude control
Missile guidance is usually achieved by means of acceleration commands. The dynamic model of the missile of interest is given by the following equations obtained by system identification:
\begin{cases}
\dot{\textbf{X}}=\begin{bmatrix}\dot{x_1} \\ \dot{x_2} \\ \dot{x_3}\end{bmatrix} =\begin{bmatrix}-1.364 & −92.82 & −128.46 \\ 1 & −4.68 & −0.087 \\ 0 & 0 & -190 \end{bmatrix} \begin{bmatrix} x_1 \\x_2 \\ x_3 \end{bmatrix}+\begin{bmatrix}0 \\0 \\190\end{bmatrix}u \\
a=\begin{bmatrix}1.36 & −184.26 & 76.43\end{bmatrix}\textbf{X},
\end{cases}
where $x_1$ is pitch rate in rad/s, $x_2$ is angle of attack in rad, $x_3$ is rudder angle in rad, $u$ is rudder command in rad/s, and the output is lateral acceleration $a$ in m/$\text{s}^2$. Aim is to design a flight control system that regulates missile acceleration to the desired value $a_d$. The maximum rudder deflection is $\pm$17 degrees.
The aim is to design a regulator that controls the missile acceleration by acting on rudder angle according to the following specifications:
- steady-state error (in response to a desired acceleration step input equal to $3 G \simeq 30$ $\text{m/s}^2$) is less than 1%,
- max overshoot: 5%,
- settling time for 5% tolerance band is less than 0.1 seconds.
### Regulator design
#### Controller design
The system's controllability matrix $\mathcal{C}$ is:
```
A = numpy.matrix('-1.364 -92.82 -128.46; 1 -4.68 -0.087; 0 0 -190')
B = numpy.matrix('0; 0; 190')
C = numpy.matrix('1.36 -184.26 76.43')
D = numpy.matrix('0')
Ctrb = control.ctrb(A,B)
display(Markdown(bmatrix(Ctrb)))
# print(numpy.linalg.matrix_rank(Ctrb))
```
that has rank equal to 3, so the system is controllable.
The transfer function of the system is
```
sys = sss(A,B,C,0)
print(control.ss2tf(sys))
```
and the poles and zeros are
```
print('Poles: ', sys.pole())
print('Zeros: ', sys.zero())
```
We find that placing 2 imaginary poles near the zeros results in a good response, so the poles that we choose are $-2+19.1i$, $-2-19.1i$ and $-45$.
#### Observer design
The system's observability matrix $\mathcal{O}$ is:
```
Obsv = control.obsv(A,C)
display(Markdown(bmatrix(Obsv)))
# print(numpy.linalg.matrix_rank(Obsv))
```
that has rank equal to 3, so the system is observable.
The only requirement that we have for the observer is that the error dynamics converges in approximately less than 0.1 s. A good choice for the observer poles is $-50$, $-50$, and $-50$.
### How to use this notebook?
Try to achieve the performance requirements with other locations of the poles and with errors in the initial state of the observer.
```
# Preparatory cell
X0 = numpy.matrix('0.0; 0.0; 0.0')
K = numpy.matrix([8/15,-4.4,-4])
L = numpy.matrix([[23],[66],[107/3]])
Aw = matrixWidget(3,3)
Aw.setM(A)
Bw = matrixWidget(3,1)
Bw.setM(B)
Cw = matrixWidget(1,3)
Cw.setM(C)
X0w = matrixWidget(3,1)
X0w.setM(X0)
Kw = matrixWidget(1,3)
Kw.setM(K)
Lw = matrixWidget(3,1)
Lw.setM(L)
eig1c = matrixWidget(1,1)
eig2c = matrixWidget(2,1)
eig3c = matrixWidget(1,1)
eig1c.setM(numpy.matrix([-60.]))
eig2c.setM(numpy.matrix([[-2.],[-19.1]]))
eig3c.setM(numpy.matrix([-45.]))
eig1o = matrixWidget(1,1)
eig2o = matrixWidget(2,1)
eig3o = matrixWidget(1,1)
eig1o.setM(numpy.matrix([-50.]))
eig2o.setM(numpy.matrix([[-50.],[0.]]))
eig3o.setM(numpy.matrix([-50.]))
# Misc
#create dummy widget
DW = widgets.FloatText(layout=widgets.Layout(width='0px', height='0px'))
#create button widget
START = widgets.Button(
description='Test',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Test',
icon='check'
)
def on_start_button_clicked(b):
#This is a workaround to have intreactive_output call the callback:
# force the value of the dummy widget to change
if DW.value> 0 :
DW.value = -1
else:
DW.value = 1
pass
START.on_click(on_start_button_clicked)
# Define type of method
selm = widgets.Dropdown(
options= ['Set K and L', 'Set the eigenvalues'],
value= 'Set the eigenvalues',
description='',
disabled=False
)
# Define the number of complex eigenvalues
sele = widgets.Dropdown(
options= ['0 complex eigenvalues', '2 complex eigenvalues'],
value= '2 complex eigenvalues',
description='Complex eigenvalues:',
style = {'description_width': 'initial'},
disabled=False
)
#define type of ipout
selu = widgets.Dropdown(
options=['impulse', 'step', 'sinusoid', 'square wave'],
value='step',
description='Type of reference:',
style = {'description_width': 'initial'},
disabled=False
)
# Define the values of the input
u = widgets.FloatSlider(
value=9.81*3,
min=0,
max=9.81*10,
step=0.1,
description='Reference:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
period = widgets.FloatSlider(
value=0.5,
min=0.001,
max=10,
step=0.001,
description='Period: ',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
)
gain_w2 = widgets.FloatText(
value=1.,
description='',
disabled=True
)
simTime = widgets.FloatText(
value=1.5,
description='',
disabled=False
)
# Support functions
def eigen_choice(sele):
if sele == '0 complex eigenvalues':
eig1c.children[0].children[0].disabled = False
eig2c.children[1].children[0].disabled = True
eig1o.children[0].children[0].disabled = False
eig2o.children[1].children[0].disabled = True
eig = 0
if sele == '2 complex eigenvalues':
eig1c.children[0].children[0].disabled = True
eig2c.children[1].children[0].disabled = False
eig1o.children[0].children[0].disabled = True
eig2o.children[1].children[0].disabled = False
eig = 2
return eig
def method_choice(selm):
if selm == 'Set K and L':
method = 1
sele.disabled = True
if selm == 'Set the eigenvalues':
method = 2
sele.disabled = False
return method
def main_callback2(Aw, Bw, X0w, K, L, eig1c, eig2c, eig3c, eig1o, eig2o, eig3o, u, period, selm, sele, selu, simTime, DW):
eige = eigen_choice(sele)
method = method_choice(selm)
if method == 1:
solc = numpy.linalg.eig(A-B*K)
solo = numpy.linalg.eig(A-L*C)
if method == 2:
if eige == 0:
K = control.acker(A, B, [eig1c[0,0], eig2c[0,0], eig3c[0,0]])
Kw.setM(K)
L = control.acker(A.T, C.T, [eig1o[0,0], eig2o[0,0], eig3o[0,0]]).T
Lw.setM(L)
if eige == 2:
K = control.acker(A, B, [eig3c[0,0],
numpy.complex(eig2c[0,0],eig2c[1,0]),
numpy.complex(eig2c[0,0],-eig2c[1,0])])
Kw.setM(K)
L = control.acker(A.T, C.T, [eig3o[0,0],
numpy.complex(eig2o[0,0],eig2o[1,0]),
numpy.complex(eig2o[0,0],-eig2o[1,0])]).T
Lw.setM(L)
sys = control.ss(A,B,numpy.vstack((C,numpy.zeros((B.shape[1],C.shape[1])))),numpy.vstack((D,numpy.eye(B.shape[1]))))
sysC = control.ss(numpy.zeros((1,1)),
numpy.zeros((1,numpy.shape(A)[0])),
numpy.zeros((numpy.shape(B)[1],1)),
-K)
sysE = control.ss(A-L*C,
numpy.hstack((L,B-L*D)),
numpy.eye(numpy.shape(A)[0]),
numpy.zeros((A.shape[0],C.shape[0]+B.shape[1])))
sys_append = control.append(sys, sysE, sysC, control.ss(A,B,numpy.eye(A.shape[0]),numpy.zeros((A.shape[0],B.shape[1]))))
Q = []
# y in ingresso a sysE
for i in range(C.shape[0]):
Q.append([B.shape[1]+i+1, i+1])
# u in ingresso a sysE
for i in range(B.shape[1]):
Q.append([B.shape[1]+C.shape[0]+i+1, C.shape[0]+i+1])
# u in ingresso a sys
for i in range(B.shape[1]):
Q.append([i+1, C.shape[0]+B.shape[1]+A.shape[0]+i+1])
# u in ingresso al sistema che ha come uscite gli stati reali
for i in range(B.shape[1]):
Q.append([2*B.shape[1]+C.shape[0]+A.shape[0]+i+1, C.shape[0]+i+1])
# xe in ingresso a sysC
for i in range(A.shape[0]):
Q.append([2*B.shape[1]+C.shape[0]+i+1, C.shape[0]+B.shape[1]+i+1])
inputv = [i+1 for i in range(B.shape[1])]
outputv = [i+1 for i in range(numpy.shape(sys_append.C)[0])]
sys_CL = control.connect(sys_append,
Q,
inputv,
outputv)
dcgain = control.dcgain(sys_CL[0,0])
gain_w2.value = dcgain
if dcgain != 0:
u1 = u/gain_w2.value
else:
print('The feedforward gain setted is 0 and it is changed to 1')
u1 = u/1
print('The static gain of the closed loop system (from the reference to the output) is: %.5f' %dcgain)
X0w1 = numpy.zeros((A.shape[0],1))
for j in range(A.shape[0]):
X0w1 = numpy.vstack((X0w1,X0w[j]))
X0w1 = numpy.vstack((X0w1,numpy.zeros((A.shape[0],1))))
if simTime != 0:
T = numpy.linspace(0, simTime, 10000)
else:
T = numpy.linspace(0, 1, 10000)
if selu == 'impulse': #selu
U = [0 for t in range(0,len(T))]
U[0] = u
U1 = [0 for t in range(0,len(T))]
U1[0] = u1
T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1)
if selu == 'step':
U = [u for t in range(0,len(T))]
U1 = [u1 for t in range(0,len(T))]
T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1)
if selu == 'sinusoid':
U = u*numpy.sin(2*numpy.pi/period*T)
U1 = u1*numpy.sin(2*numpy.pi/period*T)
T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1)
if selu == 'square wave':
U = u*numpy.sign(numpy.sin(2*numpy.pi/period*T))
U1 = u1*numpy.sign(numpy.sin(2*numpy.pi/period*T))
T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1)
# N.B. i primi 3 stati di xout sono quelli del sistema, mentre gli ultimi 3 sono quelli dell'osservatore
step_info_dict = control.step_info(sys_CL[0,0],SettlingTimeThreshold=0.05,T=T)
print('Step info: \n\tRise time =',step_info_dict['RiseTime'],'\n\tSettling time (5%) =',step_info_dict['SettlingTime'],'\n\tOvershoot (%)=',step_info_dict['Overshoot'])
print('Max x3 value (%)=', max(abs(yout[C.shape[0]+2*B.shape[1]+A.shape[0]+2]))/(numpy.pi/180*17)*100)
fig = plt.figure(num='Simulation1', figsize=(14,12))
fig.add_subplot(221)
plt.title('Output response')
plt.ylabel('Output')
plt.plot(T,yout[0],T,U,'r--')
plt.xlabel('$t$ [s]')
plt.axvline(x=0,color='black',linewidth=0.8)
plt.axhline(y=0,color='black',linewidth=0.8)
plt.legend(['$y$','Reference'])
plt.grid()
fig.add_subplot(222)
plt.title('Input')
plt.ylabel('$u$')
plt.plot(T,yout[C.shape[0]])
plt.xlabel('$t$ [s]')
plt.axvline(x=0,color='black',linewidth=0.8)
plt.axhline(y=0,color='black',linewidth=0.8)
plt.grid()
fig.add_subplot(223)
plt.title('States response')
plt.ylabel('States')
plt.plot(T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]],
T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]+1],
T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]+2],
T,[numpy.pi/180*17 for i in range(len(T))],'r--',
T,[-numpy.pi/180*17 for i in range(len(T))],'r--')
plt.xlabel('$t$ [s]')
plt.axvline(x=0,color='black',linewidth=0.8)
plt.axhline(y=0,color='black',linewidth=0.8)
plt.legend(['$x_{1}$','$x_{2}$','$x_{3}$','limit +$x_{3}$','limit -$x_{3}$'])
plt.grid()
fig.add_subplot(224)
plt.title('Estimation errors')
plt.ylabel('Errors')
plt.plot(T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]]-yout[C.shape[0]+B.shape[1]],
T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]+1]-yout[C.shape[0]+B.shape[1]+1],
T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]+2]-yout[C.shape[0]+B.shape[1]+2])
plt.xlabel('$t$ [s]')
plt.axvline(x=0,color='black',linewidth=0.8)
plt.axhline(y=0,color='black',linewidth=0.8)
plt.legend(['$e_{1}$','$e_{2}$','$e_{3}$'])
plt.grid()
#plt.tight_layout()
alltogether2 = widgets.VBox([widgets.HBox([selm,
sele,
selu]),
widgets.Label(' ',border=3),
widgets.HBox([widgets.Label('K:',border=3), Kw,
widgets.Label(' ',border=3),
widgets.Label(' ',border=3),
widgets.Label('Eigenvalues:',border=3),
eig1c,
eig2c,
eig3c,
widgets.Label(' ',border=3),
widgets.Label(' ',border=3),
widgets.Label('X0 est.:',border=3), X0w]),
widgets.Label(' ',border=3),
widgets.HBox([widgets.Label('L:',border=3), Lw,
widgets.Label(' ',border=3),
widgets.Label(' ',border=3),
widgets.Label('Eigenvalues:',border=3),
eig1o,
eig2o,
eig3o,
widgets.Label(' ',border=3),
widgets.VBox([widgets.Label('Inverse reference gain:',border=3),
widgets.Label('Simulation time [s]:',border=3)]),
widgets.VBox([gain_w2,simTime])]),
widgets.Label(' ',border=3),
widgets.HBox([u,
period,
START])])
out2 = widgets.interactive_output(main_callback2, {'Aw':Aw, 'Bw':Bw, 'X0w':X0w, 'K':Kw, 'L':Lw,
'eig1c':eig1c, 'eig2c':eig2c, 'eig3c':eig3c, 'eig1o':eig1o, 'eig2o':eig2o, 'eig3o':eig3o,
'u':u, 'period':period, 'selm':selm, 'sele':sele, 'selu':selu, 'simTime':simTime, 'DW':DW})
out2.layout.height = '860px'
display(out2, alltogether2)
```
| github_jupyter |
```
import tensorflow as tf
import random
import gym
import numpy as np
from collections import deque
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.optimizers import Adam
import gym_super_mario_bros
from gym_super_mario_bros.actions import RIGHT_ONLY
from nes_py.wrappers import JoypadSpace
from IPython.display import clear_output
from keras.models import save_model, load_model
import time
with tf.device("/gpu:0"):
env = gym_super_mario_bros.make('SuperMarioBros-v0')
env = JoypadSpace(env, RIGHT_ONLY)
total_reward = 0
done = True
for step in range(1000):
env.render()
if done:
state = env.reset()
state, reward, done, info = env.step(env.action_space.sample())
print(info)
total_reward += reward
clear_output(wait=True)
env.close()
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_space = state_size
self.action_space = action_size
self.memory = deque(maxlen=5000)
self.gamma = 0.8
self.chosenAction = 0
self.epsilon = 1
self.max_epsilon = 1
self.min_epsilon = 0.01
self.decay_epsilon = 0.0001
self.main_network = self.build_network()
self.target_network = self.build_network()
self.update_target_network()
def build_network(self):
model = Sequential()
model.add(Conv2D(64, (4,4), strides=4, padding='same', input_shape=self.state_space))
model.add(Activation('relu'))
model.add(Conv2D(64, (4,4), strides=4, padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3,3), strides=4, padding='same'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(self.action_space, activation='linear'))
model.compile(loss='mse', optimizer=Adam())
return model
def update_target_network(self):
self.target_network.set_weights(self.main_network.get_weights())
def act(self, state, onGround):
if onGround < 83:
print('on ground')
if random.uniform(0,1) < self.epsilon:
self.chosenAction = np.random.randint(self.action_space)
return self.chosenAction
q_value = self.main_network.predict(state)
self.chosenAction = np.argmax(q_value[0])
#print(q_value)
return self.chosenAction
else:
print('not on ground')
return self.chosenAction
def update_epsilon(self, episode):
self.epsilon = self.min_epsilon + (self.max_epsilon - self.min_epsilon) * np.exp(-self.decay_epsilon * episode)
def train(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = self.main_network.predict(state)
if done:
target[0][action] = reward
else:
target[0][action] = (reward + self.gamma * np.amax(self.target_network.predict(next_state)))
self.main_network.fit(state, target, epochs=1, verbose=0)
def store_transition(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def get_pred_act(self, state):
q_values = self.main_network.predict(state)
return np.argmax(q_values[0])
def load(self, name):
self.main_network = load_model(name)
self.target_network = load_model(name)
def save(self, name):
save_model(self.main_network, name)
action_size = env.action_space.n
state_size = (80, 88, 1)
from PIL import Image
def preprocess_state(state):
image = Image.fromarray(state)
image = image.resize((88,80))
image = image.convert('L')
image = np.array(image)
return image
num_episodes = 1000000
num_timesteps = 400000
batch_size = 64
DEBUG_LENGTH = 200
with tf.device("/gpu:0"):
dqn = DQNAgent(state_size, action_size)
with tf.device("/gpu:0"):
print('STARTING TRAINING')
stuck_buffer = deque(maxlen=DEBUG_LENGTH)
for i in range(num_episodes):
Return = 0
done = False
time_step = 0
onGround = 79
state = preprocess_state(env.reset())
state = state.reshape(-1, 80, 88, 1)
for t in range(num_timesteps):
#env.render()
time_step += 1
if t > 1 and stuck_buffer.count(stuck_buffer[-1]) > DEBUG_LENGTH - 50:
action = dqn.act(state, onGround=79)
else:
action = dqn.act(state, onGround)
next_state, reward, done, info = env.step(action)
onGround = info['y_pos']
stuck_buffer.append(info['x_pos'])
next_state = preprocess_state(next_state)
next_state = next_state.reshape(-1, 80, 88, 1)
dqn.store_transition(state, action, reward, next_state, done)
state = next_state
Return += reward
print(f"Episode is: {i}\nTotal Time Step: {time_step}\nCurrent Reward: {Return}\nEpsilon is: {dqn.epsilon}")
clear_output(wait=True)
if done:
break
if len(dqn.memory) > batch_size and i > 5:
dqn.train(batch_size)
dqn.update_epsilon(i)
clear_output(wait=True)
dqn.update_target_network()
env.close()
dqn.save('Mario.h5')
dqn.load('Mario.h5')
while 1:
done = False
state = preprocess_state(env.reset())
state = state.reshape(-1, 80, 88, 1)
total_reward = 0
onGround = 79
while not done:
env.render()
action = dqn.act(state, onGround)
next_state, reward, done, info = env.step(action)
onGround = info['y_pos']
next_state = preprocess_state(next_state)
next_state = next_state.reshape(-1, 80, 88, 1)
state = next_state
env.close()
```
| github_jupyter |
## What is a Variable?
A variable is any characteristics, number, or quantity that can be measured or counted. For example:
- Age (21, 35, 62, ...)
- Gender (male, female)
- Income (GBP 20000, GBP 35000, GBP 45000, ...)
- House price (GBP 350000, GBP 570000, ...)
- Country of birth (China, Russia, Costa Rica, ...)
- Eye colour (brown, green, blue, ...)
- Vehicle make (Ford, VolksWagen, ...)
...are examples of variables. They are called 'variable' because the value they take may vary (and it usually does) in a population.
Most variables in a data set can be classified into one of two major types:
**Numerical variables** and **categorical variables**
In this notebook, I will discuss Categorical variables
===================================================================================
## Categorical variables
The values of a categorical variable are selected from a group of **categories**, also called **labels**. Examples are gender (male or female) and marital status (never married, married, divorced or widowed).
Other examples of categorical variables include:
- Intended use of loan (debt-consolidation, car purchase, wedding expenses, ...)
- Mobile network provider (Vodafone, Orange, ...)
- Postcode
Categorical variables can be further categorised into ordinal and nominal variables.
### Ordinal categorical variables
Categorical variable in which categories can be meaningfully ordered are called ordinal. For example:
- Student's grade in an exam (A, B, C or Fail).
- Days of the week can be ordinal with Monday = 1 and Sunday = 7.
- Educational level, with the categories Elementary school, High school, College graduate and PhD ranked from 1 to 4.
### Nominal categorical variable
There isn't an intrinsic order of the labels. For example, country of birth (Argentina, England, Germany) is nominal. Other examples of nominal variables include:
- Postcode
- Vehicle make (Citroen, Peugeot, ...)
There is nothing that indicates an intrinsic order of the labels, and in principle, they are all equal.
**To be considered:**
Sometimes categorical variables are coded as numbers when the data are recorded (e.g. gender may be coded as 0 for males and 1 for females). The variable is still categorical, despite the use of numbers.
In a similar way, individuals in a survey may be coded with a number that uniquely identifies them (for example to avoid storing personal information for confidentiality). This number is really a label, and the variable then categorical. The number has no meaning other than making it possible to uniquely identify the observation (in this case the interviewed subject).
Ideally, when we work with a dataset in a business scenario, the data will come with a dictionary that indicates if the numbers in the variables are to be considered as categories or if they are numerical. And if the numbers are categoriies, the dictionary would explain what they intend to represent.
=============================================================================
## Real Life example: Peer to peer lending (Finance)
### Lending Club
**Lending Club** is a peer-to-peer Lending company based in the US. They match people looking to invest money with people looking to borrow money. When investors invest their money through Lending Club, this money is passed onto borrowers, and when borrowers pay their loans back, the capital plus the interest passes on back to the investors. It is a win for everybody as they can get typically lower loan rates and higher investor returns.
If you want to learn more about Lending Club follow this [link](https://www.lendingclub.com/).
The Lending Club dataset contains complete loan data for all loans issued through 2007-2015, including the current loan status (Current, Late, Fully Paid, etc.) and latest payment information. Features include credit scores, number of finance inquiries, address including zip codes and state, and collections among others. Collections indicates whether the customer has missed one or more payments and the team is trying to recover their money.
The file is a matrix of about 890 thousand observations and 75 variables. More detail on this dataset can be found in [Kaggle's website](https://www.kaggle.com/wendykan/lending-club-loan-data)
Let's go ahead and have a look at the variables!
====================================================================================================
To download the Lending Club loan book from Kaggle go to this [website](https://www.kaggle.com/wendykan/lending-club-loan-data)
Scroll down to the bottom of the page, and click on the link 'loan.csv', and then click the 'download' blue button towards the right of the screen, to download the dataset.
Unzip it, and save it to a directory of your choice.
**Note that you need to be logged in to Kaggle in order to download the datasets**.
If you save it in the same directory from which you are running this notebook, then you can load it the same way I will load it below.
====================================================================================================
```
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# let's load the dataset with just a few columns and a few rows,
# to speed things up
use_cols = ['id', 'purpose', 'loan_status', 'home_ownership']
data = pd.read_csv(
'loan.csv', usecols=use_cols).sample(
10000, random_state=44) # set a seed for reproducibility
data.head()
# let's inspect the variable home ownership,
# which indicates whether the borrowers own their home
# or if they are renting for example, among other things.
data.home_ownership.unique()
# let's make a bar plot, with the number of loans
# for each category of home ownership
fig = data['home_ownership'].value_counts().plot.bar()
fig.set_title('Home Ownership')
fig.set_ylabel('Number of customers')
```
The majority of the borrowers either own their house on a mortgage or rent their property. A few borrowers own their home completely. The category 'Other' seems to be empty. To be completely sure, we could print the numbers as below.
```
data['home_ownership'].value_counts()
```
There are 2 borrowers that have other arrangements for their property. For example, they could live with their parents, or live in a hotel.
```
# the "purpose" variable is another categorical variable
# that indicates how the borrowers intend to use the
# money they are borrowing, for example to improve their
# house, or to cancel previous debt.
data.purpose.unique()
# let's make a bar plot with the number of borrowers
# within each category
fig = data['purpose'].value_counts().plot.bar()
fig.set_title('Loan Purpose')
fig.set_ylabel('Number of customers')
```
The majority of the borrowers intend to use the loan for 'debt consolidation' or to repay their 'credit cards'. This is quite a common among borrowers. What the borrowers intend to do is, to consolidate all the debt that they have on different financial items, in one single debt, the new loan that they will take from Lending Club in this case. This loan will usually provide an advantage to the borrower, either in the form of lower interest rates than a credit card, for example, or longer repayment period.
```
# let's look at one additional categorical variable,
# "loan status", which represents the current status
# of the loan. This is whether the loan is still active
# and being repaid, or if it was defaulted,
# or if it was fully paid among other things.
data.loan_status.unique()
# let's make a bar plot with the number of borrowers
# within each category
fig = data['loan_status'].value_counts().plot.bar()
fig.set_title('Status of the Loan')
fig.set_ylabel('Number of customers')
```
We can see that the majority of the loans are active (current) and a big number have been 'Fully paid'. The remaining labels have the following meaning:
- Late (16-30 days): customer missed a payment
- Late (31-120 days): customer is behind in payments for more than a month
- Charged off: the company declared that they will not be able to recover the money for that loan ( money is typically lost)
- Issued: loan was granted but money not yet sent to borrower
- In Grace Period: window of time agreed with customer to wait for payment, usually, when customer is behind in their payments
```
# finally, let's look at a variable that is numerical,
# but its numbers have no real meaning, and therefore
# should be better considered as a categorical one.
data['id'].head()
```
In this case, each id represents one customer. This number is assigned in order to identify the customer if needed, while
maintaining confidentiality.
```
# The variable has as many different id values as customers,
# in this case 10000, because we loaded randomly
# 10000 rows/customers from the original dataset.
len(data['id'].unique())
```
**That is all for this demonstration. I hope you enjoyed the notebook, and see you in the next one.**
| github_jupyter |
# Contributing
Contributions are very welcome — please do ask questions and suggest ideas in [Issues](https://github.com/nategadzhi/notoma/issues), and feel free to implement features you want and submit them via Pull Requests.
%METADATA%
layout: default
nav_order: 4
title: Contributing
### Reporting issues
Please feel free to file a Github Issue if you found a behavior that you feel is weird or counterintuitive, bad UX, or any errors or bugs.
---
### How to participate
Notoma uses a [project board to keep track of what we're currently working on](https://github.com/nategadzhi/notoma/projects/4).
We're trying to mark issues that are small enough, and would onboard you into Notoma's internal workings with `good-first-issue` label. That said, feel free to comment on any issue that you'd like to work on, as long as it's not assigned and not in the ”In progress“ column yet.
Also, please feel free to ask any questions if you'd want to work on something for Notoma, but you're not sure where or how to start — just create a new issue, or ask a question in the existing one.
---
### Pull Requests
Thank you for considering writing some code for Notoma! This part describes the easiest way to get up to speed with Notoma development, and how to create a pull request.
Here's the steps you'd need to do in order to get your code merged:
1. Fork the repository
2. Development install with linters: `make contrib`
3. Make sure you regenerate documentation for anything you change: `pipenv run notoma-dev docs`
4. Commit to your fork branch and then do a PR.
#### Local dev setup
```bash
git clone git@github.com:nategadzhi/notoma.git
hub fork #or whatever
make contrib #will install dev dependencies and setup local pre-commit linters.
```
#### Linters
- [`black`](https://github.com/psf/black) for code formatting
- [`flake8`](https://gitlab.com/pycqa/flake8) for linting
- [Code Climate](https://codeclimate.com/github/nategadzhi/notoma) for code quality.
Linters are run automatically on each commit to any branch, and on pull requests as well.
#### Updating documentation
[Documentation website](https://nategadzhi.github.io/notoma/) and all of it's docs is just a Jekyll site, located in [`./docs/`](https://github.com/nategadzhi/notoma/tree/master/docs). The markdown files in there are automatically generated from Jupyter Notebooks in `./notebooks` in this repo.
Please edit the notebooks, not the markdown files directly, and please include proposed documentation changes with your pull requests.
| github_jupyter |
# Quantization of Signals
*This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [Sascha.Spors@uni-rostock.de](mailto:Sascha.Spors@uni-rostock.de).*
## Requantization of a Speech Signal
The following example illustrates the requantization of a speech signal. The signal was originally recorded with a wordlength of $w=16$ bits. It is requantized by a [uniform mid-tread quantizer](linear_uniform_characteristic.ipynb#Mid-Tread-Chacteristic-Curve) to various wordlengths. The signal-to-noise ratio (SNR) after quantization is computed and a portion of the (quantized) signal is plotted. It is further possible to listen to the requantized signal and the quantization error. Note, the level of the quantization error has been normalized for better audability of the effects.
```
import numpy as np
import matplotlib.pyplot as plt
import soundfile as sf
%matplotlib inline
idx = 130000 # index to start plotting
def uniform_midtread_quantizer(x, w):
# quantization step
Q = 1/(2**(w-1))
# limiter
x = np.copy(x)
idx = np.where(x <= -1)
x[idx] = -1
idx = np.where(x > 1 - Q)
x[idx] = 1 - Q
# linear uniform quantization
xQ = Q * np.floor(x/Q + 1/2)
return xQ
def evaluate_requantization(x, xQ):
e = xQ - x
# SNR
SNR = 10*np.log10(np.var(x)/np.var(e))
print('SNR: {:2.1f} dB'.format(SNR))
# plot signals
plt.figure(figsize=(10, 4))
plt.plot(x[idx:idx+100], label=r'signal $x[k]$')
plt.plot(xQ[idx:idx+100], label=r'requantized signal $x_Q[k]$')
plt.plot(e[idx:idx+100], label=r'quantization error $e[k]$')
plt.xlabel(r'sample index $k$')
plt.grid()
plt.legend()
# normalize error
e = .2 * e / np.max(np.abs(e))
return e
# load speech sample
x, fs = sf.read('../data/speech.wav')
# normalize sample
x = x/np.max(np.abs(x))
```
**Original Signal**
<audio src="../data/speech.wav" controls>Your browser does not support the audio element.</audio>
[../data/speech.wav](../data/speech.wav)
### Requantization to 8 bit
```
xQ = uniform_midtread_quantizer(x, 8)
e = evaluate_requantization(x, xQ)
sf.write('speech_8bit.wav', xQ, fs)
sf.write('speech_8bit_error.wav', e, fs)
```
**Requantized Signal**
<audio src="speech_8bit.wav" controls>Your browser does not support the audio element.</audio>
[speech_8bit.wav](speech_8bit.wav)
**Quantization Error**
<audio src="speech_8bit_error.wav" controls>Your browser does not support the audio element.</audio>
[speech_8bit_error.wav](speech_8bit_error.wav)
### Requantization to 6 bit
```
xQ = uniform_midtread_quantizer(x, 6)
e = evaluate_requantization(x, xQ)
sf.write('speech_6bit.wav', xQ, fs)
sf.write('speech_6bit_error.wav', e, fs)
```
**Requantized Signal**
<audio src="speech_6bit.wav" controls>Your browser does not support the audio element.</audio>
[speech_6bit.wav](speech_6bit.wav)
**Quantization Error**
<audio src="speech_6bit_error.wav" controls>Your browser does not support the audio element.</audio>
[speech_6bit_error.wav](speech_6bit_error.wav)
### Requantization to 4 bit
```
xQ = uniform_midtread_quantizer(x, 4)
e = evaluate_requantization(x, xQ)
sf.write('speech_4bit.wav', xQ, fs)
sf.write('speech_4bit_error.wav', e, fs)
```
**Requantized Signal**
<audio src="speech_4bit.wav" controls>Your browser does not support the audio element.</audio>
[speech_4bit.wav](speech_4bit.wav)
**Quantization Error**
<audio src="speech_4bit_error.wav" controls>Your browser does not support the audio element.</audio>
[speech_4bit_error.wav](speech_4bit_error.wav)
### Requantization to 2 bit
```
xQ = uniform_midtread_quantizer(x, 2)
e = evaluate_requantization(x, xQ)
sf.write('speech_2bit.wav', xQ, fs)
sf.write('speech_2bit_error.wav', e, fs)
```
**Requantized Signal**
<audio src="speech_2bit.wav" controls>Your browser does not support the audio element.</audio>
[speech_2bit.wav](speech_2bit.wav)
**Quantization Error**
<audio src="speech_2bit_error.wav" controls>Your browser does not support the audio element.</audio>
[speech_2bit_error.wav](speech_2bit_error.wav)
**Copyright**
This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples*.
| github_jupyter |
# Import libraries
```
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
%matplotlib inline
```
# Read csv
```
data = pd.read_csv('Data/ml.csv')
data.columns
data.shape
data['is_banked'] = data['is_banked'].apply(str)
data['code_module'] = data['code_module'].apply(str)
data['code_presentation'] = data['code_presentation'].apply(str)
data['date_submitted'] = -(data['date_submitted'])
data.head()
data_ml = data[['date_submitted', 'is_banked', 'score',
'code_module', 'code_presentation', 'gender', 'region',
'highest_education', 'imd_band', 'age_band', 'num_of_prev_attempts',
'studied_credits', 'disability', 'final_result',
'module_presentation_length']]
to_dummies = ['is_banked','code_module', 'code_presentation', 'gender', 'region',
'highest_education', 'imd_band', 'age_band', 'disability', 'final_result',]
data_ml = pd.get_dummies(data_ml, columns=to_dummies)
data_ml.columns
data_data = data_ml[['date_submitted', 'num_of_prev_attempts', 'studied_credits',
'module_presentation_length', 'is_banked_0', 'is_banked_1',
'code_module_AAA', 'code_module_BBB', 'code_module_CCC',
'code_module_DDD', 'code_module_EEE', 'code_module_FFF',
'code_module_GGG', 'code_presentation_2013B', 'code_presentation_2013J',
'code_presentation_2014B', 'code_presentation_2014J', 'gender_F',
'gender_M', 'region_East Anglian Region', 'region_East Midlands Region',
'region_Ireland', 'region_London Region', 'region_North Region',
'region_North Western Region', 'region_Scotland',
'region_South East Region', 'region_South Region',
'region_South West Region', 'region_Wales',
'region_West Midlands Region', 'region_Yorkshire Region',
'highest_education_A Level or Equivalent',
'highest_education_HE Qualification',
'highest_education_Lower Than A Level',
'highest_education_No Formal quals',
'highest_education_Post Graduate Qualification', 'imd_band_0-10%',
'imd_band_10-20', 'imd_band_20-30%', 'imd_band_30-40%',
'imd_band_40-50%', 'imd_band_50-60%', 'imd_band_60-70%',
'imd_band_70-80%', 'imd_band_80-90%', 'imd_band_90-100%', 'imd_band_?',
'age_band_0-35', 'age_band_35-55', 'age_band_55<=', 'disability_N',
'disability_Y', 'final_result_Distinction', 'final_result_Fail',
'final_result_Pass', 'final_result_Withdrawn']]
data_target = data_ml["score"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test=train_test_split(data_data, data_target, test_size=0.3, random_state=42)
#Grid search for parameter selection for a Random Forest Regressor model
param_grid = {
'n_estimators': [100, 200, 300, 400, 500],
'max_features': ['auto','sqrt','log2'],
'max_depth': [25, 15]
}
RFR = RandomForestRegressor(n_jobs=-1)
GS = GridSearchCV(RFR, param_grid, cv=5, verbose = 3)
GS.fit(X_train, y_train)
GS.best_params_
RFR = RandomForestRegressor(max_depth = 25, max_features='sqrt', n_estimators=500)
RFR.fit(X_train, y_train)
y_train_pred = RFR.predict(X_train)
y_pred = RFR.predict(X_test)
r2 = r2_score(y_train, y_train_pred)
mae = mean_absolute_error(y_train, y_train_pred)
print ('TRAIN MODEL METRICS:')
print('The R2 score is: ' + str(r2))
print('The MAE score is: ' + str(mae))
plt.scatter(y_train, y_train_pred)
plt.plot([0,100], [0,100], color='red')
plt.show()
r2 = r2_score(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
print ('TEST MODEL METRICS:')
print('The R2 score is: ' + str(r2))
print('The MAE score is: ' + str(mae))
plt.scatter(y_test, y_pred)
plt.plot([0,100], [0,100], color='red')
plt.show()
```
| github_jupyter |
```
#https://www.powercms.in/blog/how-get-json-data-remote-url-python-script
import urllib.request, json
#save url inside variable as raw string
url = r"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=5min&apikey=demo"
#use urllib.request.urlopen()
response = urllib.request.urlopen(url)
#from var saved (HTTPresponse type), use .read() + .decode('utf-8')
string = response.read().decode('utf-8')
#load string saved into json data
jsondata = json.loads(string)
print(jsondata)
#https://www.alphavantage.co/
#https://www.alphavantage.co/documentation/
#https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=5min&apikey=demo
#https://www.alphavantage.co/support/#api-key
#API KEY is FROM ALPHA VANTAGE
jsondata['Meta Data']
jsondata['Time Series (5min)']
#Get all keys of the timing available
list(jsondata['Time Series (5min)'].keys())
jsondata['Time Series (5min)']['2019-03-22 16:00:00']
jsondata['Time Series (5min)']['2019-03-22 16:00:00']['1. open']
#from class datetime import subclass datetime
from datetime import datetime
#Get all keys of time series (datetime) as string type and save it onto a list
lastsaved = list(jsondata['Time Series (5min)'].keys())
#lastsaved_DT = []
#Reverse lastsaved to sort time by chronological order (earliest to latest) then append to new list
#for element in reversed(lastsaved):
# lastsaved_DT.append(datetime.strptime(element, "%Y-%m-%d %H:%M:%S"))
lastsaved_reversed = []
for element in reversed(lastsaved):
lastsaved_reversed.append(element)
print(lastsaved_reversed)
microsoftopen = []
microsofthigh = []
microsoftlow = []
microsoftclose = []
for string in lastsaved:
microsoftopen.append(float(jsondata['Time Series (5min)'][string]['1. open']))
microsofthigh.append(float(jsondata['Time Series (5min)'][string]['2. high']))
microsoftlow.append(float(jsondata['Time Series (5min)'][string]['3. low']))
microsoftclose.append(float(jsondata['Time Series (5min)'][string]['4. close']))
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import LinearLocator
%matplotlib inline
figure = plt.figure(1,figsize=(24,14))
microsoftplot = figure.add_subplot(111)
microsoftplot.set_xlabel('Date & Time',fontsize=24)
microsoftplot.set_ylabel('Stock Price (USD)',fontsize=24)
microsoftplot.set_title('Microsoft Stock Price',fontsize=32)
microsoftplot.tick_params(axis='y', which='major', labelsize=16)
microsoftplot.tick_params(axis='x', which='major', labelsize=12)
microsoftdailyopen = microsoftplot.plot(lastsaved_reversed,microsoftopen,color='blue', label='Open')
microsoftdailyhigh = microsoftplot.plot(lastsaved_reversed,microsofthigh,'--',color='green', label='High')
microsoftdailylow = microsoftplot.plot(lastsaved_reversed,microsoftlow,'--',color='red', label='Low')
microsoftdailyclose = microsoftplot.plot(lastsaved_reversed,microsoftclose,'o', color='black', label='Close')
microsoftplot.legend(loc=1, fontsize=24)
microsoftplot.xaxis.set_major_locator(MaxNLocator(integer=True))
microsoftplot.yaxis.set_major_locator(LinearLocator(9))
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
%matplotlib inline
#Assignment: Plot a graph of 9 companies in a 6x3 plot (using top 3x3)
#And for bottom 3x3, plot all 9 graphs together with normalized value from initial year 2014
#Range from 2014 to 2018
year = [2014, 2015, 2016, 2017, 2018,2019]
#Create list of all 9 companies and the years from end Nov each year 2014-2018
apple_price = [93.95,112.15,104.05,144.85,169.49,191.05]
microsoft_price = [39.01,50.29,57.05,69.98,94.39,117.05]
amazon_price = [332.63,668.45,760.16,1186,1502.06,1764.77]
google_price = [523.52,758.88,771.82,1046.40,1035.61,1207.65]
samsung_price = [24460/1150,25700/1150,31720/1150,55460/1150,42400/1150,46550/1150]
facebook_price = [73.75,107.32,117.02,182.78,131.73,164.34]
netflix_price = [51.47,123.84,115.21,195.75,258.82,361.01]
huawei_price = [6.95/6.5,13.2/6.5,13.80/6.5,8.99/6.5,4.67/6.5,4.66/6.5]
alibaba_price = [110.73,79.95,93.39,191.19,150.33,176.26]
#Create normalized list of 9 companies stock prices
apple_price_n = np.array(apple_price)/apple_price[0]
microsoft_price_n = np.array(microsoft_price)/microsoft_price[0]
amazon_price_n = np.array(amazon_price)/amazon_price[0]
google_price_n = np.array(google_price)/google_price[0]
samsung_price_n = np.array(samsung_price)/samsung_price[0]
facebook_price_n = np.array(facebook_price)/facebook_price[0]
netflix_price_n = np.array(netflix_price)/netflix_price[0]
huawei_price_n = np.array(huawei_price)/huawei_price[0]
alibaba_price_n = np.array(alibaba_price)/alibaba_price[0]
#Plan of subplot layering (1x1 one for title, 2x1 for overlaid graphs, 6x3 one for small graphs but only occupy top 9 spaces)
#Create big figure
stocks = plt.figure(1,figsize=(24,30))
#Create big subplot for title and remove frame (frameon=False), remove tick parameters
stocks_title = stocks.add_subplot(111, frameon=False) #remove frame but need remove ticks/axes
stocks_title.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
stocks_title.set_title('Graph of stock prices (USD) from 2014-2019 of 9 tech companies', fontsize=32)
#Create big subplot for mega chart using 2x1 plot using normal .add_subplot
#Change x-axis to be integer (year) and y-axis to be integer (price_index)
stocks_big = stocks.add_subplot(212)
apple_n = stocks_big.plot(year, apple_price_n, '-o', label='Apple', linewidth = 4, markersize=8)
microsoft_n = stocks_big.plot(year, microsoft_price_n, '-o', label='Microsoft', linewidth = 4, markersize=8)
amazon_n = stocks_big.plot(year, amazon_price_n, '-o', label='Amazon', linewidth = 4, markersize=8)
google_n = stocks_big.plot(year, google_price_n, '-o', label='Google', linewidth = 4, markersize=8)
samsung_n = stocks_big.plot(year, samsung_price_n, '-o', label='Samsung', linewidth = 4, markersize=8)
facebook_n = stocks_big.plot(year, facebook_price_n, '-o', label='Facebook', linewidth = 4, markersize=8)
netflix_n = stocks_big.plot(year, netflix_price_n, '-o', label='Netflix', linewidth = 4, markersize=8)
huawei_n = stocks_big.plot(year, huawei_price_n, '-o', label='Huawei', linewidth = 4, markersize=8)
alibaba_n = stocks_big.plot(year, alibaba_price_n, '-o', label='Alibaba', linewidth = 4, markersize=8)
#legend_n = stocks_big.legend(loc=2, fontsize=24) #see below for loc positions in subplot
#get all handle/variable storing the legend in a tuple from 0-8 and also all label of the legend var in a tuple from 0-8
#each element n of the tuple has a pair of handle/label associated
handlesN, labelsN = plt.gca().get_legend_handles_labels()
order = [6,2,1,3,5,0,4,8,7] #labels checked for order of best performing stock and least performing n index auto obtained
stocks_big.legend([handlesN[idx] for idx in order],[labelsN[idx] for idx in order], loc=2, fontsize=24)
stocks_big.tick_params(axis='both', which='major', labelsize=22)
stocks_big.set_title('Graph of stock price index of all major tech companies', fontsize=26)
stocks_big.set_xlabel('Year',fontsize=24)
stocks_big.set_ylabel('Stock Price Index',fontsize=24)
stocks_big.xaxis.set_major_locator(MaxNLocator(integer=True))
stocks_big.yaxis.set_major_locator(MaxNLocator(integer=True))
#Create figure with subplot size 6x3 plot using fig, axes_small (axes_small is np.array)
#Found out that it is easier to use big figure and overlay many different size of subplot (1x1 title), (2x1 big chart),
#(6x3 small charts) into a single figure
#instead of using fig, axes to define fig and axes (matrix size) tgt since there is varying sizes
#Change x-axis to be integer (year) and y-axis to be float (prices)
netflix_s = stocks.add_subplot(631)
netflix_s.plot(year,netflix_price, '-o',color=(231/255,84/255,128/255), label='Netflix') #define color as rgb tuple
netflix_s.legend(fontsize=16, loc=2)
netflix_s.tick_params(axis='both', which='major', labelsize=18)
netflix_s.xaxis.set_major_locator(MaxNLocator(integer=True))
amazon_s = stocks.add_subplot(632)
amazon_s.plot(year,amazon_price, '-o', color='green', label='Amazon')
amazon_s.legend(fontsize=16, loc=2)
amazon_s.tick_params(axis='both', which='major', labelsize=14)
amazon_s.xaxis.set_major_locator(MaxNLocator(integer=True))
microsoft_s = stocks.add_subplot(633)
microsoft_s.plot(year,microsoft_price, '-o', color='orange', label='Microsoft')
microsoft_s.legend(fontsize=16, loc=2)
microsoft_s.tick_params(axis='both', which='major', labelsize=14)
microsoft_s.xaxis.set_major_locator(MaxNLocator(integer=True))
google_s = stocks.add_subplot(634)
google_s.plot(year,google_price, '-o', color='red', label='Google')
google_s.legend(fontsize=16, loc=2)
google_s.tick_params(axis='both', which='major', labelsize=14)
google_s.xaxis.set_major_locator(MaxNLocator(integer=True))
apple_s = stocks.add_subplot(635)
apple_s.plot(year,apple_price, '-o', color='blue', label='Apple')
apple_s.legend(fontsize=16, loc=2)
apple_s.tick_params(axis='both', which='major', labelsize=14)
apple_s.xaxis.set_major_locator(MaxNLocator(integer=True))
facebook_s = stocks.add_subplot(636)
facebook_s.plot(year,facebook_price, '-o', color='brown', label='Facebook')
facebook_s.legend(fontsize=16, loc=2)
facebook_s.tick_params(axis='both', which='major', labelsize=14)
facebook_s.xaxis.set_major_locator(MaxNLocator(integer=True))
samsung_s = stocks.add_subplot(637)
samsung_s.plot(year,samsung_price, '-o',color='m', label='Samsung') #m is magenta
samsung_s.legend(fontsize=16, loc=2)
samsung_s.tick_params(axis='both', which='major', labelsize=14)
samsung_s.xaxis.set_major_locator(MaxNLocator(integer=True))
alibaba_s = stocks.add_subplot(638)
alibaba_s.plot(year,alibaba_price, '-o', color='#999900', label='Alibaba') #can put html hex code for dark yellow
alibaba_s.legend(fontsize=16, loc=2)
alibaba_s.tick_params(axis='both', which='major', labelsize=14)
alibaba_s.xaxis.set_major_locator(MaxNLocator(integer=True))
huawei_s = stocks.add_subplot(639)
huawei_s.plot(year,huawei_price, '-o', color='0.25', label='Huawei') #value of 0-1 as string means gray
huawei_s.legend(fontsize=16, loc=1)
huawei_s.tick_params(axis='both', which='major', labelsize=14)
huawei_s.xaxis.set_major_locator(MaxNLocator(integer=True))
#Create all 9 subplots in small cells with ORIGINAL prices
plt.show()
```
| github_jupyter |
# **Boston BLUE bikes Analysis**
Team Member: Zhangcheng Guo, Chang-Han Chen, Ziqi Shan, Tsung Yen Wu, Jiahui Xu
### Topic Background and Motivation
>A rapidly growing industry, bike-sharing, replaces traditional bike rentals. BLUE bikes' renting procedures are fully automated from picking up, returning, and making payments. With bike-sharing businesses like BLUE bikes, users can easily rent a bike from a particular position and return to another position without artificial interference. Currently, there are about over 500 bike-sharing programs around the world which are composed of over 2.3 million bicycles. In Boston, the BLUE bike has over 300 bike stations and 5000 bikes in service. With growing station coverage in Boston, BLUE bikes can bring more convenience and therefore, promote more usage.
>Moreover, BLUE bikes promote the action of 'Go Green', which has become a popular way of commuting in response to climate change. BLUE bikes' business model serves as a means of providing an option to Go Green, and promotes more physical activities. It also reduces the concern of stolen bikes for users, which is a common concern in Boston.
### Project Objective
>With good causes of BLUE bikes, it incentivses us more to learn more about the bussiness, and align our objective with BLUE bike's cause. We aim to help maximize bike trips for BLUE bikes to provide a healthier and more eco freindly way of commuting by looking more indepth into potential variables that affect trip volume.
### Dataset Information and Processing
#### Data Profile
>'BLuebikes trip data' contains monthly bike trip data, and includes:
>
>- Trip Duration (seconds)
- Start Time and Date
- Stop Time and Date
- Start Station Name & ID
- End Station Name & ID
- Bike ID
- User Type (Casual = Single Trip or Day Pass user; Member = Annual or Monthly Member)
- Birth Year
- Gender, self-reported by capital (0=unknown; 1=male; 2=female)
>
>
>
>In addition to Bluebikes's data, weather information from NOAA is merged into original dataset considering impact of weather on bike rentals.
- TAVG - average temperature for the day (in whole degrees Fahrenheit). This is based on the arithmetic average of the maximum and minimum temperature for the day.
- SNOW - daily amount of snowfall (in inches to the tenths)
- AWND - average wind speed in miles per hour miles per hour, to tenths)
- PRCP - precipitation (in inches to the tenths)
>
>
>Two new columns listed are added to gain further infomation on each station
- District
- Total Docks (of each stations)
#### Dataset Source
>Bluebikes Trip Data, current_bluebikes_stations: https://s3.amazonaws.com/hubway-data/index.html
>
>NOAA Boston 2019 Weather Datase: <https://www.ncdc.noaa.gov/cdo-web/datasets/GHCND/stations/GHCND:USW00014739/detail>
#### Raw Datasets
Here are sneakpeaks of datasets. Please note that our datasets are stored in private google drive. To request access, please email kguo@bu.edu. No commercial use allowed.
```
from google.colab import drive
drive.mount('/content/drive')
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
## Sneakpeak of BLUEbikes dataset
blueraw = pd.read_csv('/content/drive/Shareddrives/MSBA BA 775 Team 1/Bluebikes/bb2019.csv',index_col=0);
blueraw.head(3)
## Sneakpeak of NOAA Boston weather dataset
bosweather = pd.read_csv('/content/drive/Shareddrives/MSBA BA 775 Team 1/Bluebikes/BosWeather19.csv',index_col=0);
bosweather.head(3)
## Sneakpeak of distrcit&docks dataset
bikestations=pd.read_csv('/content/drive/Shareddrives/MSBA BA 775 Team 1/Bluebikes/current_bluebikes_stations.csv',index_col=0,header = 1)
bikestations.head(3)
```
#### Data Preprocessing
'blueraw' dataset contains some null cells, which is processed and modified as following:
- 'age':
'age' column is created and calculated from [today's year - birthyear] for better visualization. Some cells record user age higher than 80 years old, which is suspected to be the user's manual input error. Therefore, ages over 80 years old are replaced by age median 31 to eliminate outliers but not affecting age distribution.
- 'bikeid':
bikeid column is converted from int64 to str to be treated as categorical variable.
- 'gender':
Gender columns, originally recorded as 0,1,2 are changed into 'U' (unknown), 'M' (male) and 'F' (female) to be easier treated as categorical variables.
- 'Holiday':
'Holiday' is created from starttime infomation to record whether it is weekend,Federal holidays or workdays , 1 for yes, 0 for no.
- 'District' and 'End District':
Two columns contain some missing District cells after merged with 'bike stations'. This is due to the 'bike stations' dataset recording the latest stations, while'blueraw 'dataset records the trip that occurred in 2019. With the rapid growth of the BLUE bikes business, some stations are removed or added between 2019 and now.
With those stations that can be found on Bluebikes, District names are manually added.
Rows with columns start and end station recorded 'Mobile Temporary Station' and 'Warehouse Bonfire Station in a Box', 'MTL-ECO4-01', 'Warehouse Lab PBSC' is removed (83 rows).
'blueraw' is then merged with 'bosweather' and 'bikestations', and recorded as 'Blue19', the main dataset, shown below.
```
Blue19=pd.read_csv('/content/drive/Shareddrives/MSBA BA 775 Team 1/Bluebikes/Blue19.csv',index_col=0);
Blue19['starttime'] = pd.to_datetime(Blue19['starttime']) ##Convert time variable to datetime
Blue19['stoptime'] = pd.to_datetime(Blue19['stoptime'])
Blue19['Date'] = pd.to_datetime(Blue19['Date'])
Blue19.head(3)
Blue19.info()
# check missing data
Blue19.dropna(how= 'any', inplace =True) ## delete cells that are na, 83 rows of empty district names
pd.isnull(Blue19).sum()
```
### Data Behavior
##### **User Demographics**
```
genderdis = Blue19.groupby('gender').count()['starttime'] ##age distribution
userdis = Blue19.groupby('usertype').count()['starttime'] ##User Type Distribution
fig, axis = plt.subplots(1, 2, figsize=(15,7))
plt.ylabel('')
axis[0].set_title("Gender Distribution",fontsize = 20)
axis[1].set_title("User Type Distribution",fontsize = 20)
colors=['lightblue','steelblue','darkseagreen']
ax = genderdis.plot.pie(autopct="%.1f%%", ax = axis[0],colors=colors,fontsize = 18);
ax = userdis.plot.pie(autopct="%.1f%%", ax = axis[1],colors=colors, fontsize = 18);
plt.xlabel(' ');
plt.ylabel(' ');
mkt= Blue19[['bikeid','usertype', 'age', 'gender']]
pd.DataFrame(mkt['age'].agg({'mean','median','std'}))
g=sns.catplot(data=mkt,x='gender',y='age',kind='box',aspect=3,palette='Blues_r');
plt.title('Age Distribution by Gender', fontsize= 20);
g.fig.set_figwidth(12)
g.fig.set_figheight(10)
```
* As shown above, more than half of the users are male which is 65.15%, and subscribers take up 78.83% of user composition.
The average age is 35 years old and the median is 31 with a standard deviation of 11.55. If divided the users into subscribers and non-subscribers, the subscribers are around 34 years old on average, whereas the non-subscribers are 41 years old on average. The difference in age between male and female is not significant, as male is around 34 years old on average and female is around 33 years old on average.
#### **Time Variables**
> * The goal is to know if there are more trips on weekdays or on weekends. To do this, we counted the average trip counts in each day of week. The result shows that on average, it is between 7,000 to 7,500 trip counts from Monday to Friday (on weekdays), while it is roughly 6,000 when it comes to Saturday and Sunday (on weekends). It seems that BLUEbikes are more popular on weekdays than on weekends.
* In addition, the average usage times are longer on weekends than on weekways. On weekdays, a user spends around 15 to 17 minutes on BLUEbikes on average, while on weekends, a user spends roughly 22 to 23 minutes on BLUEbikes on average. This phenomenon may be explained that there are more commuters on weekdays that they choose BLUEbikes for short distances in high flexibility. For example, for commuters who take the subway for work/school, they may choose BLUEbikes to connect between subway station to the place they work for/study at. So, the travelled distances and durations of the BLUEbikes on weekdays could be rather short.On weekends, however, there are probably more recreational uses on BLUEbikes such as riding bikes along the coast and that encourages users to utilize BLUEbikes regardless of how much time spent.
###### Data Processing
```
blue19_dur = Blue19[['tripduration', 'starttime', 'stoptime', 'Date']]
blue19_dur['starttime'] = pd.to_datetime(blue19_dur['starttime']);
blue19_dur['stoptime'] = pd.to_datetime(blue19_dur['stoptime']);
blue19_dur['Date'] = pd.to_datetime(blue19_dur['Date']);
blue19_dur['duration_manual'] = (blue19_dur['stoptime'] - blue19_dur['starttime']).astype('timedelta64[s]')
blue19_dur[blue19_dur['tripduration'] != blue19_dur['duration_manual']].count()
blue19_dur_diff = blue19_dur[blue19_dur['tripduration'] != blue19_dur['duration_manual']]
(blue19_dur_diff['duration_manual'] - blue19_dur_diff['tripduration']).value_counts()
blue19_dur.loc[Blue19['tripduration'] != blue19_dur['duration_manual'], 'Date'].unique().astype('datetime64[D]');
blue19_dur_clean = blue19_dur[(blue19_dur['tripduration'] == blue19_dur['duration_manual']) & (blue19_dur['tripduration'] <= 86400)]
blue19_dur_clean['start_dayofweek'] = blue19_dur_clean['starttime'].dt.dayofweek
blue19_dur_clean['start_hour'] = blue19_dur_clean['starttime'].dt.hour
blue19_dur_clean.groupby('start_dayofweek')['tripduration'].count()/blue19_dur_clean.groupby('start_dayofweek')['Date'].nunique();
blue19_dur_clean.groupby('start_dayofweek')['tripduration'].mean();
dw_hr_count = (blue19_dur_clean.groupby(['start_dayofweek', 'start_hour'])['tripduration'].count()/blue19_dur_clean.groupby(['start_dayofweek', 'start_hour'])['Date'].nunique()).reset_index(name = 'trip_count').sort_values('trip_count', ascending = False)
print(dw_hr_count.head(10))
```
###### **Hourly behavior**
* Longer trip at midnight
* Users spend less time at early mornings on weekdays
* The average trip duration is about 26 minutes = 1560 seconds
```
blue19_dur_clean.groupby(['start_dayofweek', 'start_hour'])['tripduration'].mean().reset_index().sort_values('tripduration', ascending = False).head(10)
dw_hr_duration = blue19_dur_clean.groupby(['start_dayofweek', 'start_hour'])['tripduration'].mean().reset_index().sort_values('tripduration', ascending = True)
print(dw_hr_duration.head(10))
dw_hr_duration['start_dayofweek'] = dw_hr_duration['start_dayofweek'].astype('category')
sns.relplot(x = 'start_hour', y = 'tripduration', data = dw_hr_duration, hue = 'start_dayofweek', kind = 'line',linewidth = 2, palette=['lightblue','blue','dodgerblue','darkblue','teal','darkred','red']).fig.suptitle('Hourly Trip Durations by Day of Week',fontsize = 15);
```
###### **Weekly behavior**
* More popular on weedays than on weekends
* Average useage times are longer on weekends
* 8 am and 5 pm are the most popular
Note that the dt.dayofweek: <br><br/>
0: Monday; 1: Tuesday; 2: Wednesday; 3: Thursday; 4: Friday; 5: Saturday; 6: Sunday
```
dw_hr_count['start_dayofweek'] = dw_hr_count['start_dayofweek'].astype('category')
sns.relplot(x = 'start_hour', y = 'trip_count', data = dw_hr_count, hue = 'start_dayofweek', kind = 'line', palette=['lightblue','blue','dodgerblue','darkblue','teal','darkred','red'], linewidth = 2).fig.suptitle('Hourly Trip Counts by Day of Week',fontsize = 15);
```
###### **Holiday**
* Less rides on holidays
* People prefer riding on working day
```
# with_cnt: table with a new column called 'count', which represent the daily total count
# new: table that only contain 'date', 'count', 'season', 'Holiday'
with_cnt=pd.read_csv('/content/drive/Shareddrives/MSBA BA 775 Team 1/Bluebikes/with_cnt.csv',index_col=0)
new=pd.read_csv('/content/drive/Shareddrives/MSBA BA 775 Team 1/Bluebikes/new.csv',index_col=0)
holiday=with_cnt.groupby('Holiday')[['count']].mean()
holiday.rename(columns={'count':'count_mean'},inplace=True)
holiday=holiday.reset_index()
holiday
sns.catplot(x='Holiday',y='count', data=with_cnt, kind='box',palette='Blues_r' )
plt.title('Holiday Trip Counts', fontsize=15)
plt.xlabel("Holiday",fontsize=12);
plt.ylabel("Trip Counts",fontsize=12);
```
###### **Monthly**
* More bike using in Sep, Aug and July
* Seasonal pattern
```
month=new.groupby('month')[['count']].sum()
month=month.reset_index().sort_values(by='count',ascending=False)
month['percentage']=round(month['count']/with_cnt.shape[0]*100,2)
month['mean']=round(month['count']/30,2)
month
sns.catplot(x='month',y='mean', data=month, kind='bar',ci=None,palette = 'Blues_r')
plt.title('Average Bike Use On Month', fontsize=15)
plt.ylabel('mean count')
```
###### **Season**
* More bike using in Summer and Autumn
Good weather and more opportunities
```
season=new.groupby('season').agg({'count':['sum','mean']})
season=season.reset_index()
season
sns.catplot(x='season',y='count', data=with_cnt,kind='box', palette='Blues_r');
plt.title('Seasonal Trip Counts', fontsize= 15);
plt.xlabel("Season",fontsize=12);
plt.ylabel("Trip Counts",fontsize=12);
```
##### **Weather**
To observe relationship between trip counts and affect of weather such as wind, rain, snow and temperature, two axises plots are created for each category to visually see the relationships.
```
aggweather = {'tripduration':'count','AWND':'mean','PRCP':'mean', 'SNOW':'mean', 'TAVG':'mean'}
weathercount =Blue19.resample('M', on = 'starttime').agg(aggweather).reset_index()
weathercount = weathercount.rename(columns = {'starttime':'month','tripduration':'count'})
fig, axis = plt.subplots(2, 2, figsize=(15,10))
axis[0][0].set_title("TAVG",fontsize = 20)
axis[0][1].set_title("Wind",fontsize = 20)
axis[1][0].set_title("Snow",fontsize = 20)
axis[1][1].set_title("Precipitation",fontsize = 20)
fig.suptitle('Trip Counts and Weather Condistions 2019', fontsize = 25)
evenly_spaced_interval = np.linspace(0, 1, 8)
colors = [plt.cm.Blues_r(x) for x in evenly_spaced_interval]
ax = weathercount.plot(x = 'month' , y = 'TAVG',legend = False, ax =axis [0][0],linewidth = 5,fontsize = 12,color = colors[1])
ax2 = ax.twinx()
weathercount.plot(x="month", y="count", ax=ax2,legend = False, color="r",linewidth = 2,fontsize = 12)
axa = weathercount.plot(x = 'month' , y = 'AWND',legend = False, ax =axis [0][1],linewidth = 5,fontsize = 12,color = colors[2])
ax2 = axa.twinx()
weathercount.plot(x="month", y="count", ax=ax2,legend = False, color="r",linewidth = 2,fontsize = 12)
axb = weathercount.plot(x = 'month' , y = 'SNOW',legend = False, ax =axis [1][0],linewidth = 5,fontsize = 12,color = colors[3])
ax2 = axb.twinx()
weathercount.plot(x="month", y="count", ax=ax2,legend = False, color="r",linewidth = 2,fontsize = 12)
axc = weathercount.plot(x = 'month' , y = 'PRCP',legend = False, ax =axis [1][1],linewidth = 5,fontsize = 12,color = colors[4])
ax2 = axc.twinx()
weathercount.plot(x="month", y="count", ax=ax2,legend = False, color="r",linewidth = 2,fontsize = 12)
ax.figure.legend(fontsize = 12);
```
From the graph presented above, it is very clear to observe that high temperature on average occurs higher bike rentals than lower temperature. Inverse to snowfall, when more snow, there are fewer bike trips. The same trend applies to average wind speed, in which the wind speed in Boston is observed to be seasonal, higher in winter months, and lower in summer months.
It is reasonable to state that temperature, snowfall, and wind speed are seasonal, which later in the analysis. The seasonal factor can be considered in predicting the number of bike rentals.
As for precipitation, it is not very clear from the whole year observation; therefore a monthly line plot is performed to observe closer the relationship between precipitation and trip counts.
```
import datetime
import calendar
aggprcp = {'tripduration':'count','PRCP':'mean'}
prcpcount = Blue19.resample('D', on = 'starttime').agg(aggprcp).reset_index()
prcpcount['monthcat'] = pd.DatetimeIndex(prcpcount['starttime']).month
prcpcount = prcpcount.rename(columns = {'tripduration':'count'})
evenly_spaced_interval = np.linspace(0, 1, 12)
colors = [plt.cm.Blues_r(x) for x in evenly_spaced_interval]
fig, axis = plt.subplots(4, 3, figsize=(20,12))
axis = axis.ravel()
fig.suptitle('Monthly Trip Counts and Precipitation', fontsize = 25)
for i in prcpcount['monthcat'].unique():
prcp = prcpcount[prcpcount['monthcat'] == i]
ax = prcp.plot(x = 'starttime' , y = 'PRCP',legend =False, ax =axis [i-1],color = colors[4], linewidth = 4);
ax.set(xlabel = 'Time (day in month)')
ax2 = ax.twinx()
prcp.plot(x="starttime", y="count", ax=ax2, color="r",legend =False, linewidth = 2);
axis[i-1].set_title(calendar.month_name[i],fontsize = 18)
handles, labels = ax.get_legend_handles_labels()
lgd = dict(zip(labels, handles))
handles2, labels2 = ax2.get_legend_handles_labels()
lgd[labels2[0]] = handles2[0]
ax.figure.legend(lgd.values(), lgd.keys(), fontsize =20);
```
From the plots above, it is apparent that when the blue line peaks, the red line drops. In other words, when higher rainfall is observed, trip counts decrease accordingly. Though a direct relationship cannot be assumed, it is rational to state that whether raining or not is a factor affecting Bluebike renting.
#### **Location**
To observe the relationship between trip counts and location.
Start station, end station, and bike docks data were extracted to analyze.
###### Data Processing
```
stations = pd.read_csv('/content/drive/Shareddrives/MSBA BA 775 Team 1/Bluebikes/current_bluebikes_stations.csv',index_col=0,skiprows=1);
popular_start=Blue19.loc[:,['start station name']].value_counts(ascending =False).to_frame().reset_index()
popular_start.columns = [ 'start station name', 'trip counts']
popular_end=Blue19.loc[:,['end station name']].value_counts(ascending = False).to_frame().reset_index()
popular_end.columns = [ 'end station name', 'trip counts']
start_docks=popular_start.merge(stations, left_on = 'start station name', right_on = 'Name', how = 'left')
start_docks =start_docks[['start station name', 'trip counts','Total docks']]
end_docks=popular_end.merge(stations, left_on = 'end station name', right_on = 'Name', how = 'left')
end_docks =end_docks[['end station name', 'trip counts','Total docks']]
#### District Dock Count
districtdock = bikestations.groupby('District').sum().reset_index().sort_values(by = 'Total docks',ascending =False)
districtdock = districtdock[['District','Total docks']]
#### District Trip Count
districtct = Blue19.groupby('District').count().reset_index().sort_values(by = 'starttime',ascending =False)
districtct.rename({'age':'Trip Count'}, axis = 1, inplace = True)
districtct = districtct[['District', 'Trip Count']]
#### District Station Count
station_count=Blue19.pivot_table(values='start station name', index=['District'], aggfunc=pd.Series.nunique)
#### Merged Table
disbar = districtct.merge(station_count, how = 'left', on ='District')
disbar.rename({'start station name': 'Station Count'}, axis = 1, inplace = True)
disbar =disbar.merge(districtdock, how = 'left', on = 'District')
disbar
```
###### District Infomation
```
disbar
from matplotlib.lines import Line2D
fig, axis = plt.subplots(1, 2, figsize=(17,6))
axis[0].set_title("District Trip Count vs. Dock Counts",fontsize = 20)
axis[1].set_title("District Trip Count vs. Station Counts",fontsize = 20)
ax = sns.pointplot(x ='District', y='Trip Count' ,data=disbar, ax=axis[0])
ax2 = ax.twinx()
ax =sns.pointplot( x ='District', y='Total docks', data=disbar, ax = ax2, color = 'lightcoral')
custom_lines = [Line2D([0], [0], lw=2),
Line2D([0], [0], color='lightcoral', lw=2)]
ax.legend(custom_lines, ['Trip Count', 'Total Docks'],fontsize = 15);
axa = sns.pointplot(x ='District', y='Trip Count' ,data=disbar,ax=axis[1])
ax2 = axa.twinx()
axb =sns.pointplot( x ='District', y='Station Count', data=disbar, ax = ax2, color = 'darkseagreen')
custom_lines = [Line2D([0], [0], lw=2),
Line2D([0], [0], color='darkseagreen', lw=2)]
axb.legend(custom_lines, ['Trip Count', 'Station Count'],fontsize = 15);
```
> By observing two point plots of dock counts and station counts compared to trip counts. It can be observed that both trends almost align except Cambridge exhbits a dip compared to demand, which can potentially result in short of supply due to shortages in bikes.
###### Station Infomation
```
start_docks=popular_start.merge(stations, left_on = 'start station name', right_on = 'Name', how = 'left')
start_docks =start_docks[['start station name', 'trip counts','Total docks']]
start_docks.head()
end_docks=popular_end.merge(stations, left_on = 'end station name', right_on = 'Name', how = 'left')
end_docks =end_docks[['end station name', 'trip counts','Total docks']]
end_docks.head()
```
> Boston and Cambridge have the highest trip counts if we group by the district. From the charts and the comparison between the number of docks in the starting station and the end station and their trip counts, the dock number of the starting station and the end station with the highest frequency do not align. Therefore, it can be concluded that more trip counts come with more bike docks, which is also proper to match both factors along with its geographic locations to optimize profit for BLUEbikes.
#### **Correlation Matrix**
* Season and average temperature are highly correlated with daily count
* Choose features with absolute scores > 0.15 to put in our model
```
cor_table= pd.read_csv('/content/drive/Shareddrives/MSBA BA 775 Team 1/Bluebikes/cor_table.csv', index_col=0)
a=cor_table.copy()
dic={'winter':0, 'spring':1,'summer':2,'autumn':3, 'Subscriber':1, 'Customer':0,'Cambridge':0, 'Boston':1, 'Somerville':2, 'Brookline':3, 'Everett':4, 'nan':5, 'U':0,'M':1, 'F':2}
c=a.replace(dic)
c.head(3)
corrMatrix =c.corr() ##correlation calculation
cmap=sns.diverging_palette(245, 1, as_cmap=True, n = 6,)
def magnify():
return [dict(selector="th",
props=[("font-size", "7pt")]),
dict(selector="td",
props=[('padding', "0em 0em")]),
dict(selector="th:hover",
props=[("font-size", "12pt")]),
dict(selector="tr:hover td:hover",
props=[('max-width', '200px'),
('font-size', '12pt')])
]
corrMatrix.style.background_gradient(cmap, axis=1,vmin=-0.6)\
.set_properties(**{'max-width': '80px', 'font-size': '10pt'})\
.set_caption("Correlation")\
.set_precision(2)\
.set_table_styles(magnify())\
mask = np.zeros_like(corrMatrix)
mask[np.triu_indices_from(mask)] = True
with sns.axes_style("white"):
f, ax = plt.subplots(figsize=(7, 5))
ax = sns.heatmap(corrMatrix, mask=mask, vmax=.3, square=True, cmap='Blues')
```
> From correlation heat map, it can be observed that month, season and temperature average have higher correlation compared to trip counts. Snow, pricipitation and wind speed follow. These factors are considered in regression model in later section described. Month variable will not be included since it accounts for time sequence.
### Predicting with Models
#### Machine Learning - Trip Counts Prediction
First, we need to sort out useful features and label for machine learning. However, due to our limitation with knowledge of time series analysis using scikit-learn package, the time-related features are temporarily discarded so that simply linear regression models can be applied in this dataset.
#### Machine Learning Dataset Processing
```
# Copy from original dataset
BlueML = Blue19.copy()
# Select feasible predictors and create dummy variables for categorical variables
BlueML_pre = pd.get_dummies(BlueML[['tripduration', 'starttime', 'month', 'season', 'Holiday', 'gender', 'age', 'AWND', 'PRCP',
'SNOW', 'TAVG', 'District']], drop_first = True)
# Resample data on a daily basis
BlueML_1 = BlueML_pre.resample('D', on = 'starttime').agg({'tripduration':len, 'Holiday':np.mean,
'AWND':np.mean, 'PRCP':np.mean, 'SNOW':np.mean, 'TAVG':np.mean,
'season_spring':np.mean, 'season_summer':np.mean, 'season_winter':np.mean,
'District_Brookline':np.mean, 'District_Cambridge':np.mean,
'District_Everett':np.mean, 'District_Somerville':np.mean})
BlueML_1.columns = ['trip_count', 'Holiday', 'AWND', 'PRCP', 'SNOW', 'TAVG',
'season_spring', 'season_summer', 'season_winter', 'District_Brookline', 'District_Cambridge',
'District_Everett', 'District_Somerville']
# Remove NAs after resampling
BlueML_1 = BlueML_1[BlueML_1['AWND'].notnull()]
# Import scikit-learn packages
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
```
##### First Model
According to the correlation analysis, both season and temperature are features having the highest correlation with daily trip counts. So, in the first model, the two features are included to predict daily trip counts.
The rooted mean squared error of the first model is 2,088.75 and the R-squared is 0.66. It turns out that season and temperature have some predictive power but are still not strong enough.
```
# First try: Use season and temperature to predict trip counts
X1_df = BlueML_1[['season_spring', 'season_summer', 'season_winter', 'TAVG']]
y1_df = BlueML_1['trip_count']
# Choose 30% of data as testing data
X1_train, X1_test, y1_train, y1_test = train_test_split(X1_df, y1_df, test_size = .3, random_state = 833)
# Fit the linear regression model and predict y_test
model_1 = LinearRegression()
model_1.fit(X1_train, y1_train)
y1_model = model_1.predict(X1_test)
# Calculate mean_squared_error and r^2 score
rmse1 = np.sqrt(mean_squared_error(y1_test, y1_model))
r2_1 = r2_score(y1_test, y1_model)
print('The RMSE is {}.'.format(round(rmse1, 2)))
print('The R2_score is {}.'.format(round(r2_1, 2)))
# Plot actual y vs. predicted y
sns.relplot(x = y1_test, y = y1_model, kind = 'scatter')
plt.xlabel('Actual Trip Count')
plt.ylabel('Predicted Trip Count')
plt.title('Linear Regression (Actual vs. Predicted)')
plt.plot([0, 14000], [0, 14000], linewidth = 1, c = 'red', linestyle = '--')
```
##### Second Model
Next, there are other variables also have some correlation with daily tip counts, and it is worth observing if the machine learning model can predict the label better when adding these variables. So, the 'AWND' (average wind speed), 'PRCP' (precipitation), 'SNOW' (snowfall), and 'Holiday' (whether the day is holiday) are included in training the machine learning model to see if we can predict daily trip counts better.
This time, the RMSE decreases to 1,563.58 and the R-squared is 0.81. With other weather features and the 'Holiday' variable, the RMSE and R-squared is greatly improved. Also, when observing the scatter plot of actual testing data and predicted data, the dots are more concentrated to the 45-degree line, which means that the predicted daily trip counts are closer to the actual daily trip counts.
```
# Next model: Use season, weather, and holiday features to predict trip counts
X2_df = BlueML_1[['season_spring', 'season_summer', 'season_winter', 'Holiday', 'AWND', 'PRCP', 'SNOW', 'TAVG']]
y2_df = BlueML_1['trip_count']
# Choose 30% of data as testing data
X2_train, X2_test, y2_train, y2_test = train_test_split(X2_df, y2_df, test_size = .2, random_state = 833)
# Fit the linear regression model and predict y_test
model_2 = LinearRegression()
model_2.fit(X2_train, y2_train)
y2_model = model_2.predict(X2_test)
# Calculate mean_squared_error and r^2 score
rmse2 = np.sqrt(mean_squared_error(y2_test, y2_model))
r2_2 = r2_score(y2_test, y2_model)
print('The RMSE is {}.'.format(round(rmse2, 2)))
print('The R2_score is {}.'.format(round(r2_2, 2)))
# Plot actual y vs. predicted y
sns.relplot(x = y2_test, y = y2_model, kind = 'scatter')
plt.xlabel('Actual Trip Count')
plt.ylabel('Predicted Trip Count')
plt.title('Linear Regression (Actual vs. Predicted)')
plt.plot([0, 14000], [0, 14000], linewidth=1, c='red', linestyle='--')
```
#### Machine Learning - Daily Bike Inflow or Outflow of District
Also, we are interested to know if it is available to use the features in the dataset to predict daily inflow and outflow of bikes in districts. For example, if there are so many bikes outflow from Boston district to Somerville District, then the BLUEbikes company should dispatch more bikes from other districts to Boston in order to make sure its sufficiency.
##### Load Data
```
# Load data: Difference Count of bike IDs between those start at District X and those end at District X
BlueReg = pd.read_csv('/content/drive/Shareddrives/MSBA BA 775 Team 1/Bluebikes/Bluereg.csv', index_col = 0);
BlueReg.head()
# Discard one dummy variable of the same categorical varible to avoid multicollinearity
BlueReg_pre = BlueReg.drop(['District_Somerville', 'season_winter'], axis = 1)
```
##### Fit the Model
Now, we use weather, season, district, and holiday features to predict the daily bike inflow or outflow of districts. The result turns out that our target variable is not well-explained by our predictors. The RMSE is 18.59 while the R-squared is 0.18. The RMSE seems to be low, but as for this target variable, the standard deviation is only 21.18.
Despite the limitation of our knowledge in more advanced topics of scikit-learn, we suggest that if we can make use of the time-series analysis, we may probably come up with a better result.
```
# Select feasible features for LinearRegression
X3_df = BlueReg_pre[['PRCP', 'SNOW', 'TAVG', 'AWND', 'season_spring', 'season_summer', 'season_autumn', 'District_Boston',
'District_Brookline', 'District_Cambridge', 'District_Everett', 'Holiday']]
y3_df = BlueReg_pre['Bike Count Difference']
# Choose 30% of data as testing data
X3_train, X3_test, y3_train, y3_test = train_test_split(X3_df, y3_df, test_size = .3, random_state = 833)
# Fit the linear regression model and predict y_test
model_3 = LinearRegression()
model_3.fit(X3_train, y3_train)
y3_model = model_3.predict(X3_test)
# Import mean_squared_error and r^2 score from scikit-learn and calculate
rmse3 = np.sqrt(mean_squared_error(y3_test, y3_model))
r2_3 = r2_score(y3_test, y3_model)
print('The RMSE is {}.'.format(round(rmse3, 2)))
print('The R2_score is {}.'.format(round(r2_3, 2)))
# Plot actual y vs. predicted y
sns.relplot(x = y3_test, y = y3_model, kind = 'scatter')
plt.xlabel('Actual Trip Count')
plt.ylabel('Predicted Trip Count')
plt.title('Linear Regression (Actual vs. Predicted)')
plt.plot([-100, 100], [-100, 100], linewidth=1, c='red', linestyle='--')
plt.xlim((-120,80))
# Zoom in to see if there is any pattern
# Plot actual y vs. predicted y
sns.relplot(x = y3_test, y = y3_model, kind = 'scatter')
plt.xlabel('Actual Trip Count')
plt.ylabel('Predicted Trip Count')
plt.title('Linear Regression (Actual vs. Predicted)')
plt.xlim((-30,30))
plt.ylim((-30,30))
plt.plot([-100, 100], [-100, 100], linewidth=1, c='red', linestyle='--')
# The standard deviation of the target variable
print(round(BlueReg_pre['Bike Count Difference'].std(), 2))
```
### Conclusion
> From the analysis presented above, it can be concluded that trip counts are affected by seasonal and weather factors such as precipitation, wind speed, snow, and temperature. Whether or not it is a holiday also affects trip counts. From our sklearn LinearRegression model predicting daily trip counts, our model is fairly accurate with given information.
>However, there exist limitations when attempting to come up with a practice prediction. We mean to predict the hourly difference of each station between the number of pick up bikes and returned bikes, which we can suggest timely allocation of bikes to stations in need. First, variables in datasets are not sufficient enough to explain the reasons for user pickups and returns at one station. There also exists equipment limitation when running too many datasets, RAM space runs when run on Google Colab (we chose to work on Google Colab because the sizes of the dataset are too big, which causes loss of data when transporting, with the only exception of importing and saving dataset on to Google Drive, which can be easily accessed from Google Colab). Most importantly, insufficient knowledge in performing time series prediction refrained us from a more accurate model for the BLUE bikes dataset; therefore, we resided on linear regression.
> There involves further learning and practicing in our technical skills to deliver ideal results. Limitations of this project will be our motivation moving forward.
| github_jupyter |
# Dog Breed Identification
This example is based on a very popular [Udacity project](https://github.com/udacity/dog-project). The goal is to classify images of dogs according to their breed.
In this notebook, you will take the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed.

In this real-world setting, you will need to piece together a series of models to perform different tasks.
### The Road Ahead
We break the notebook into separate steps. Feel free to use the links below to navigate the notebook.
* [Step 0](#step0): Install requirements & download datasets
* [Step 1](#step1): Import Datasets
* [Step 2](#step2): Detect Dogs
* [Step 3](#step3): Create a CNN to Classify Dog Breeds (from Scratch)
* [Step 4](#step4): Create a CNN (VGG16) to Classify Dog Breeds (using Transfer Learning)
* [Step 5](#step5): Create a CNN (ResNet-50) to Classify Dog Breeds (using Transfer Learning)
* [Step 6](#step6): Write your Algorithm
* [Step 7](#step7): Test Your Algorithm
---
<a id='step0'></a>
## Step 0: Install requirements & download datasets
### Download datasets
```
# Download and unzip the dog dataset
!wget https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip
!unzip -qo dogImages.zip
!rm dogImages.zip
# Download the VGG-16 bottleneck features for the dog dataset
!wget https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogVGG16Data.npz -O bottleneck_features/DogVGG16Data.npz
# Download the ResNet50 features for the dog dataset
!wget https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogResnet50Data.npz -O bottleneck_features/DogResnet50Data.npz
```
### Below is the `imports` cell. This is where we import all the necessary libraries
```
from sklearn.datasets import load_files
from keras.utils import np_utils
import numpy as np
from glob import glob
import os
import random
import cv2
import matplotlib.pyplot as plt
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from tqdm import tqdm
from keras.applications.resnet50 import preprocess_input, decode_predictions
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
import extract_bottleneck_features as ebf
from keras import optimizers
```
### Install requirements
```
!pip3 install --user -r requirements/requirements.txt
```
### Pipeline Parameters
This is the `pipeline-parameters` cell. Use it to define the parameters you will use for hyperparameter tuning. These variables will be converted to KFP pipeline parameters, so make sure they are used as global variables throughout the notebook.
```
nodes_number = 256
learning_rate = 0.0001
```
<a id='step1'></a>
## Step 1: Import Datasets
### Import Dog Dataset
In the code cell below, we import a dataset of dog images. We populate a few variables through the use of the `load_files` function from the scikit-learn library:
- `train_files`, `valid_files`, `test_files` - numpy arrays containing file paths to images
- `train_targets`, `valid_targets`, `test_targets` - numpy arrays containing onehot-encoded classification labels
- `dog_names` - list of string-valued dog breed names for translating label
```
# define function to load train, test, and validation datasets
def load_dataset(path):
data = load_files(path)
dog_files = np.array(data['filenames'])
dog_targets = np_utils.to_categorical(np.array(data['target']), 133)
return dog_files, dog_targets
# load train, test, and validation datasets
train_files, train_targets = load_dataset('dogImages/train')
valid_files, valid_targets = load_dataset('dogImages/valid')
test_files, test_targets = load_dataset('dogImages/test')
# load list of dog names
dog_names = [item[20:-1] for item in sorted(glob("dogImages/train/*/"))]
# print statistics about the dataset
print('There are %d total dog categories.' % len(dog_names))
print('There are %s total dog images.' % len(np.hstack([train_files, valid_files, test_files])))
print('There are %d training dog images.' % len(train_files))
print('There are %d validation dog images.' % len(valid_files))
print('There are %d test dog images.'% len(test_files))
dog_files_short = train_files[:100]
```
---
<a id='step2'></a>
## Step 2: Detect Dogs
In this section, we use a pre-trained [ResNet-50](http://ethereon.github.io/netscope/#/gist/db945b393d40bfa26006) model to detect dogs in images. Our first line of code downloads the ResNet-50 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). Given an image, this pre-trained ResNet-50 model returns a prediction (derived from the available categories in ImageNet) for the object that is contained in the image.
```
# define ResNet50 model
ResNet50_mod = ResNet50(weights='imagenet')
```
### Pre-process the Data
When using TensorFlow as backend, Keras CNNs require a 4D array (which we'll also refer to as a 4D tensor) as input, with shape
$$
(\text{nb_samples}, \text{rows}, \text{columns}, \text{channels}),
$$
where `nb_samples` corresponds to the total number of images (or samples), and `rows`, `columns`, and `channels` correspond to the number of rows, columns, and channels for each image, respectively.
The `path_to_tensor` function below takes a string-valued file path to a color image as input and returns a 4D tensor suitable for supplying to a Keras CNN. The function first loads the image and resizes it to a square image that is $224 \times 224$ pixels. Next, the image is converted to an array, which is then resized to a 4D tensor. In this case, since we are working with color images, each image has three channels. Likewise, since we are processing a single image (or sample), the returned tensor will always have shape
$$
(1, 224, 224, 3).
$$
The `paths_to_tensor` function takes a numpy array of string-valued image paths as input and returns a 4D tensor with shape
$$
(\text{nb_samples}, 224, 224, 3).
$$
Here, `nb_samples` is the number of samples, or number of images, in the supplied array of image paths. It is best to think of `nb_samples` as the number of 3D tensors (where each 3D tensor corresponds to a different image) in your dataset!
```
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
```
### Making Predictions with ResNet-50
Getting the 4D tensor ready for ResNet-50, and for any other pre-trained model in Keras, requires some additional processing. First, the RGB image is converted to BGR by reordering the channels. All pre-trained models have the additional normalization step that the mean pixel (expressed in RGB as $[103.939, 116.779, 123.68]$ and calculated from all pixels in all images in ImageNet) must be subtracted from every pixel in each image. This is implemented in the imported function `preprocess_input`. If you're curious, you can check the code for `preprocess_input` [here](https://github.com/fchollet/keras/blob/master/keras/applications/imagenet_utils.py).
Now that we have a way to format our image for supplying to ResNet-50, we are now ready to use the model to extract the predictions. This is accomplished with the `predict` method, which returns an array whose $i$-th entry is the model's predicted probability that the image belongs to the $i$-th ImageNet category. This is implemented in the `ResNet50_predict_labels` function below.
By taking the argmax of the predicted probability vector, we obtain an integer corresponding to the model's predicted object class, which we can identify with an object category through the use of this [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a).
```
def ResNet50_predict_labels(img_path):
# returns prediction vector for image located at img_path
img = preprocess_input(path_to_tensor(img_path))
return np.argmax(ResNet50_mod.predict(img))
```
### Write a Dog Detector
While looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained ResNet-50 model, we need only check if the `ResNet50_predict_labels` function above returns a value between 151 and 268 (inclusive).
We use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not).
```
### returns "True" if a dog is detected in the image stored at img_path
def dog_detector(img_path):
prediction = ResNet50_predict_labels(img_path)
return ((prediction <= 268) & (prediction >= 151))
```
### Assess the Dog Detector
We use the code cell below to test the performance of the `dog_detector` function.
- What percentage of the images in `dog_files_short` have a detected dog?
```
n_dog = np.sum([dog_detector(img) for img in dog_files_short])
dog_percentage = n_dog/len(dog_files_short)
print('{:.0%} of the files have a detected dog'.format(dog_percentage))
```
---
<a id='step3'></a>
## Step 3: Create a CNN to Classify Dog Breeds (from Scratch)
Now that we have a function for detecting dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 1%. In later steps, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.
Be careful with adding too many trainable layers! More parameters means longer training, which means you are more likely to need a GPU to accelerate the training process. Thankfully, Keras provides a handy estimate of the time that each epoch is likely to take; you can extrapolate this estimate to figure out how long it will take for your algorithm to train.
We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have great difficulty in distinguishing between a Brittany and a Welsh Springer Spaniel.
Brittany | Welsh Springer Spaniel
- | -
<img src="images/Brittany_02625.jpg" width="100"> | <img src="images/Welsh_springer_spaniel_08203.jpg" width="200">
It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels).
Curly-Coated Retriever | American Water Spaniel
- | -
<img src="images/Curly-coated_retriever_03896.jpg" width="200"> | <img src="images/American_water_spaniel_00648.jpg" width="200">
Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed.
Yellow Labrador | Chocolate Labrador | Black Labrador
- | - | -
<img src="images/Labrador_retriever_06457.jpg" width="150"> | <img src="images/Labrador_retriever_06455.jpg" width="240"> | <img src="images/Labrador_retriever_06449.jpg" width="220">
We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%.
Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun!
### Pre-process the Data
We rescale the images by dividing every pixel in every image by 255.
```
# pre-process the data for Keras
train_tensors = paths_to_tensor(train_files).astype('float32')/255
valid_tensors = paths_to_tensor(valid_files).astype('float32')/255
test_tensors = paths_to_tensor(test_files).astype('float32')/255
```
### Model Architecture
Create a CNN to classify dog breed. At the end of your code cell block, summarize the layers of your model by executing the line:
model.summary()
Here is a sample architecture of such a model:

```
# Define the model architecture
model = Sequential()
model.add(Conv2D(input_shape=train_tensors.shape[1:],filters=16,kernel_size=2, activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(filters=32,kernel_size=2, activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(filters=64,kernel_size=2, activation='relu'))
model.add(MaxPooling2D())
model.add(GlobalAveragePooling2D())
model.add(Dense(133,activation='softmax'))
model.summary()
```
### Compile the Model
```
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
```
### Train the Model
Train your model in the code cell below. Use model checkpointing to save the model that attains the best validation loss.
```
### specify the number of epochs that you would like to use to train the model.
# Train for 20 epochs only when using a GPU, otherwise it will take a lot of time
# epochs = 20
# Train for 1 epoch when using a CPU.
epochs = 1
os.makedirs('saved_models', exist_ok=True)
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5',
verbose=1, save_best_only=True)
model.fit(train_tensors, train_targets,
validation_data=(valid_tensors, valid_targets),
epochs=epochs, batch_size=32, callbacks=[checkpointer], verbose=1)
```
### Load the Model with the Best Validation Loss
```
model.load_weights('saved_models/weights.best.from_scratch.hdf5')
```
### Test the Model
Try out your model on the test dataset of dog images. Ensure that your test accuracy is greater than 1%.
```
# get index of predicted dog breed for each image in test set
dog_breed_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
# report test accuracy
test_accuracy = 100*np.sum(np.array(dog_breed_predictions)==np.argmax(test_targets, axis=1))/len(dog_breed_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
```
---
<a id='step4'></a>
## Step 4: Create a CNN (VGG16) to Classify Dog Breeds (using Transfer Learning)
To reduce training time without sacrificing accuracy, we show you how to train a CNN using Transfer Learning. In the following step, you will get a chance to use Transfer Learning to train your own CNN.
Transfer Learning is a fine-tuning of a network that was pre-trained on some big dataset with new classification layers. The idea behind is that we want to keep all the good features learned in the lower levels of the network (because there's a high probability the new images will also have those features) and just learn a new classifier on top of those. This tends to work well, especially with small datasets that don't allow for a full training of the network from scratch (it's also much faster than a full training).
One way of doing Transfer Learning is by using bottlenecks. A bottleneck, also called embedding, is the internal representation of one of the input samples in the network, at a certain depth level. We can think of a bottleneck at level N as the output of the network stopped after N layers. Why is this useful? Because we can precompute the bottlenecks for all our samples using a pre-trained network and then simulate the training of only the last layers of the network without having to actually recompute all the (expensive) parts up to the bottleneck point.
Here we will uses pre-computed bottlenecks, but if you want to take a look at how you could generate them yourself, take a look [here](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html).
### Obtain Bottleneck Features
```
bottleneck_features = np.load('bottleneck_features/DogVGG16Data.npz')
train_VGG16 = bottleneck_features['train']
valid_VGG16 = bottleneck_features['valid']
test_VGG16 = bottleneck_features['test']
```
### Model Architecture
The model uses the the pre-trained VGG-16 architecture as a fixed feature extractor, where the last convolutional output of VGG-16 is fed as input to our model. We only add a global average pooling layer and a fully connected layer, where the latter contains one node for each dog category and is equipped with a softmax.
```
VGG16_model = Sequential()
VGG16_model.add(GlobalAveragePooling2D(input_shape=train_VGG16.shape[1:]))
VGG16_model.add(Dense(133, activation='softmax'))
VGG16_model.summary()
```
### Compile the Model
```
VGG16_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
```
### Train the Model
```
os.makedirs('saved_models', exist_ok=True)
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.VGG16.hdf5',
verbose=1, save_best_only=True)
VGG16_model.fit(train_VGG16, train_targets,
validation_data=(valid_VGG16, valid_targets),
epochs=20, batch_size=32, callbacks=[checkpointer], verbose=1)
```
### Load the Model with the Best Validation Loss
```
VGG16_model.load_weights('saved_models/weights.best.VGG16.hdf5')
```
### Test the Model
Now, we can use the CNN to test how well it identifies breed within our test dataset of dog images. We print the test accuracy below.
```
# get index of predicted dog breed for each image in test set
VGG16_predictions = [np.argmax(VGG16_model.predict(np.expand_dims(feature, axis=0))) for feature in test_VGG16]
# report test accuracy
test_accuracy = 100*np.sum(np.array(VGG16_predictions)==np.argmax(test_targets, axis=1))/len(VGG16_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
```
### Predict Dog Breed with the Model
```
def VGG16_predict_breed(img_path):
# extract bottleneck features
bottleneck_feature = ebf.extract_VGG16(path_to_tensor(img_path))
# obtain predicted vector
predicted_vector = VGG16_model.predict(bottleneck_feature)
# return dog breed that is predicted by the model
return dog_names[np.argmax(predicted_vector)].split('.')[-1]
# Show first dog image
img_path = test_files[0]
img = cv2.imread(img_path)
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(cv_rgb)
plt.show()
# Print groundtruth and predicted dog breed
gtruth = np.argmax(test_targets[0])
gtruth = dog_names[gtruth].split('.')[-1]
pred = VGG16_predict_breed(img_path)
print("Groundtruth dog breed: {}".format(gtruth))
print("Predicted dog breed: {}".format(pred))
```
---
<a id='step5'></a>
## Step 5: Create a CNN (ResNet-50) to Classify Dog Breeds (using Transfer Learning)
You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set.
In Step 4, we used transfer learning to create a CNN using VGG-16 bottleneck features. In this section, we will use the bottleneck features from a different pre-trained model.
### Obtain Bottleneck Features
```
bottleneck_features = np.load('bottleneck_features/DogResnet50Data.npz')
train_ResNet50 = bottleneck_features['train']
valid_ResNet50 = bottleneck_features['valid']
test_ResNet50 = bottleneck_features['test']
```
### Model Architecture
Create a CNN to classify dog breed. At the end of your code cell block, summarize the layers of your model.
```
ResNet50_model = Sequential()
ResNet50_model.add(Flatten(input_shape=train_ResNet50.shape[1:]))
# The layer below includes a hyperparameter (nodes_number)
ResNet50_model.add(Dense(int(nodes_number), activation='relu'))
ResNet50_model.add(Dense(133, activation='softmax'))
# Summarize the layers of the model
ResNet50_model.summary()
```
### Compile the Model
```
### Learning rate (learning_rate) is a hyperparameter in this example
opt = optimizers.Adam(float(learning_rate))
ResNet50_model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
```
### Train the Model
Train your model in the code cell below. Use model checkpointing to save the model that attains the best validation loss.
```
os.makedirs('saved_models', exist_ok=True)
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.ResNet50.hdf5',
verbose=1, save_best_only=True)
### Train the model.
ResNet50_model.fit(train_ResNet50, train_targets,
validation_data=(valid_ResNet50, valid_targets),
epochs=20, batch_size=32, callbacks=[checkpointer], verbose=1)
```
### Load the Model with the Best Validation Loss
```
### Load the model weights with the best validation
ResNet50_model.load_weights('saved_models/weights.best.ResNet50.hdf5')
```
### Test the Model
Try out your model on the test dataset of dog images. Ensure that your test accuracy is greater than 60%.
```
# get index of predicted dog breed for each image in test set
ResNet50_predictions = [np.argmax(ResNet50_model.predict(np.expand_dims(feature, axis=0))) for feature in test_ResNet50]
# report test accuracy
test_accuracy_resnet = 100*np.sum(np.array(ResNet50_predictions)==np.argmax(test_targets, axis=1))/len(ResNet50_predictions)
print('Test accuracy: %.4f%%' % test_accuracy_resnet)
```
### Predict Dog Breed with the Model
```
def predict_breed(img_path):
img = path_to_tensor(img_path)
bottleneck_feature = ebf.extract_Resnet50(img)
predicted = ResNet50_model.predict(bottleneck_feature)
idx = np.argmax(predicted)
return dog_names[idx].split('.')[-1]
# Show first dog image
img_path = test_files[0]
img = cv2.imread(img_path)
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(cv_rgb)
plt.show()
# Print groundtruth and predicted dog breed
gtruth = np.argmax(test_targets[0])
gtruth = dog_names[gtruth].split('.')[-1]
pred = predict_breed(img_path)
print("Groundtruth dog breed: {}".format(gtruth))
print("Predicted dog breed: {}".format(pred))
```
---
<a id='step6'></a>
## Step 6: Write your Algorithm
Write an algorithm that accepts a file path to an image and first determines whether the image contains a dog or not. Then,
- if a __dog__ is detected in the image, return the predicted breed.
```
def return_breed(img_path):
pred = None
dog = False
if dog_detector(img_path):
dog = True
print('Dog detected')
else:
print('No dog detected')
if dog:
pred = predict_breed(img_path)
print('This photo looks like a(n) {}'.format(pred))
return pred
# Run for the second dog image
img_path = test_files[1]
img = cv2.imread(img_path)
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(cv_rgb)
plt.show()
pred = return_breed(img_path)
```
---
<a id='step7'></a>
## Step 7: Test Your Algorithm
In this section, you will take your new algorithm for a spin! If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog?
```
for img_path in sorted(glob("check_images/*")):
print(img_path)
img = cv2.imread(img_path)
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(cv_rgb)
plt.show()
return_breed(img_path)
```
### Pipeline Metrics
This is the `pipeline-metrics` cell. Use it to define the pipeline metrics that KFP will produce for every pipeline run. Kale will associate each one of these metrics to the steps that produced them. Also, you will have to choose one these metrics as the Katib search objective metric.
```
print(test_accuracy_resnet)
```
| github_jupyter |
# Visual English
### Eryk Wdowiak
This notebook attempts to illustrate the English text that we're using to develop a neural machine translator.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
%matplotlib inline
import nltk
from nltk.tokenize import word_tokenize
from nltk.probability import FreqDist
from nltk.collocations import *
# import string
# import re
from wordcloud import WordCloud
import mxnet as mx
from mxnet import gluon
from mxnet import nd
import gluonnlp as nlp
from data import transform_data_word2vec, preprocess_dataset
from model import SG, CBOW
from utils import print_time
context = mx.cpu()
## I thought this function would do far more than just run NLTK tokenizer.
## We'll leave it in place. It keeps our options open.
def process_line(line):
tokens = word_tokenize(line)
return tokens
## read in the lemmatized data
df = pd.read_csv('dataset/train-mparamu_v2-lemmatized.en',header=None)
df.columns = ['en_text']
# df.head()
```
### frequencies
```
## flatten data to count words
proc_eng = list(map(process_line, df.en_text))
flat_eng = [item for sublist in proc_eng for item in sublist]
freq_eng = FreqDist(flat_eng)
freq_eng.most_common(20)
```
### counts
```
# create counts
eng_bar_words = [x[0] for x in freq_eng.most_common(25)]
eng_bar_counts = [x[1] for x in freq_eng.most_common(25)]
# put data into dictionary
eng_dict = dict(zip(eng_bar_words, eng_bar_counts))
# set the color of our bar graphs
color = cm.viridis_r(np.linspace(.4,.8, 30))
fig, axs = plt.subplots(figsize=(8,4))
axs.bar(eng_bar_words, eng_bar_counts , color=color)
axs.title.set_text('most common English lemmas')
for ax in fig.axes:
plt.sca(ax)
plt.xticks(rotation=45)
plt.tight_layout(pad=0)
plt.savefig('wb-en_lemmas.png')
plt.show()
# create cloud of Sicilian words by frequency
wordcloud = WordCloud(colormap='Spectral').generate_from_frequencies(eng_dict)
plt.figure(figsize=(10,10), facecolor='k')
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.tight_layout(pad=0)
plt.savefig('wb-en_lemma-cloud.png')
plt.show()
```
### make wordcloud from embeddings
```
## load datafile (so that we can retrieve vocabulary)
datafile = 'dataset/train-mparamu_v3-lemmatized.en.tsv'
## CBOW model
model = CBOW
parmfile = './logs/en-cbow-r4-e01.params'
fname_insert = 'cbow'
## skipgram model
# model = SG
# parmfile = './logs/en-skip-r2-e24.params'
# fname_insert = 'skip'
## both trained with hyperparameters:
output_dim = 300
batch_size = 128
## load the data
data = nlp.data.TSVDataset(datafile)
data, vocab, idx_to_counts = preprocess_dataset( data )
## load the model
embedding = model(token_to_idx=vocab.token_to_idx, output_dim=output_dim,
batch_size=batch_size, #num_negatives=num_negatives,
negatives_weights=mx.nd.array(idx_to_counts))
embedding.load_parameters(parmfile)
## get the word vectors
wvecs = embedding.embedding_out.weight.data()
## count words with at least "min_words" appearances
min_words = 10
num_over_min = len( np.array(idx_to_counts)[ np.array(idx_to_counts)>= min_words ] )
print('vocabulary length: ' + str(len(vocab)))
print('lemmas over ' + str(min_words) + ' times: ' + str(num_over_min))
## pairwise cosine similarity
def cos_sim(wordx, wordy):
xx = wvecs[vocab.token_to_idx[wordx],]
yy = wvecs[vocab.token_to_idx[wordy],]
return nd.dot(xx, yy) / (nd.norm(xx) * nd.norm(yy))
## full matrix of cosine similarity
def cos_mat( vecs ):
## dot product divided by the norms
xtx = nd.dot( vecs , vecs.T)
nmx = nd.sqrt( nd.diag(xtx) ).reshape((-1,1))
cnm = nd.dot( nmx , nmx.T )
return xtx / cnm
## create "WC Dict" ("word-to-cosine dictionary") for wordcloud
def mk_wcdict(word,k_words):
## where to start? first two tokens are: <BOS> <EOS>
sv_start = 2
## get cosine matrix
cosmat = cos_mat( wvecs[sv_start:-1,] )
## get the row of cosines
idx_to_lookup = vocab.token_to_idx[word] - sv_start
row_looked_up = cosmat[idx_to_lookup,]
## nearest neighbors by cosine similarity
knn_cosmat = row_looked_up.argsort()[::-1][1:k_words+1].astype(int).asnumpy()
## indexes of nearest neighbors in vocab list
knn_vocab_idx = list(knn_cosmat + sv_start)
## get the words and cosine measures
knn_vocab_words = [vocab.idx_to_token[idx] for idx in knn_vocab_idx]
knn_vocab_cosines = [cosmat[idx_to_lookup,idx].asnumpy()[0] for idx in knn_cosmat]
## return the dictionary for wordcloud
return dict(zip(knn_vocab_words,knn_vocab_cosines))
# create a cloud of 25 words for Don Chisciotti!
knn_wc_dict = mk_wcdict('chisciotti',25)
wordcloud = WordCloud(colormap='Spectral').generate_from_frequencies(knn_wc_dict)
plt.figure(figsize=(10,10), facecolor='k')
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.tight_layout(pad=0)
fname = 'wc-en-' + fname_insert + '_chisciotti.png'
plt.savefig(fname)
plt.show()
# create a cloud of 25 words for Sanciu Panza!
knn_wc_dict = mk_wcdict('sanciu',25)
wordcloud = WordCloud(colormap='Spectral').generate_from_frequencies(knn_wc_dict)
plt.figure(figsize=(10,10), facecolor='k')
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.tight_layout(pad=0)
fname = 'wc-en-' + fname_insert + '_sanciu.png'
plt.savefig(fname)
plt.show()
```
### bigrams and trigrams
```
bigram_measures = nltk.collocations.BigramAssocMeasures()
trigram_measures = nltk.collocations.TrigramAssocMeasures()
eng_bi_finder = BigramCollocationFinder.from_words(flat_eng)
# eng_bi_finder.apply_freq_filter(5)
eng_bi_scored = eng_bi_finder.score_ngrams(bigram_measures.raw_freq)
eng_bi_scored[:10]
eng_bi_pmi_finder = BigramCollocationFinder.from_words(flat_eng)
eng_bi_pmi_finder.apply_freq_filter(5)
eng_bi_pmi_scored = eng_bi_pmi_finder.score_ngrams(bigram_measures.pmi)
eng_bi_pmi_scored[0:10]
eng_tri_finder = TrigramCollocationFinder.from_words(flat_eng)
# eng_tri_finder.apply_freq_filter(5)
eng_tri_scored = eng_tri_finder.score_ngrams(trigram_measures.raw_freq)
eng_tri_scored[:10]
eng_tri_pmi_finder = TrigramCollocationFinder.from_words(flat_eng)
eng_tri_pmi_finder.apply_freq_filter(5)
eng_tri_pmi_scored = eng_tri_pmi_finder.score_ngrams(trigram_measures.pmi)
eng_tri_pmi_scored[0:10]
```
| github_jupyter |
```sql
-- Create a new table
CREATE TABLE people (
first_name VARCHAR(30) NOT NULL,
has_pet BOOLEAN DEFAULT true,
pet_type VARCHAR(10) NOT NULL,
pet_name VARCHAR(30),
pet_age INT
);
```
```sql
-- Creating tables for PH-EmployeeDB
CREATE TABLE departments (
dept_no VARCHAR(4) NOT NULL,
dept_name VARCHAR(40) NOT NULL,
PRIMARY KEY (dept_no),
UNIQUE (dept_name)
);
```
```sql
-- Creating tables for PH-EmployeeDB
-- Creating tables for PH-EmployeeDB
CREATE TABLE departments (
dept_no VARCHAR(4) NOT NULL,
dept_name VARCHAR(40) NOT NULL,
PRIMARY KEY (dept_no),
UNIQUE (dept_name)
);
CREATE TABLE employees (
emp_no INT NOT NULL,
birth_date DATE NOT NULL,
first_name VARCHAR NOT NULL,
last_name VARCHAR NOT NULL,
gender VARCHAR NOT NULL,
hire_date DATE NOT NULL,
PRIMARY KEY (emp_no)
);
CREATE TABLE dept_manager(
dept_no VARCHAR(4) NOT NULL,
emp_no INT NOT NULL,
from_date DATE NOT NULL,
to_date DATE NOT NULL,
FOREIGN KEY (emp_no) REFERENCES employees (emp_no),
FOREIGN KEY (dept_no) REFERENCES departments (dept_no),
PRIMARY KEY (emp_no, dept_no)
);
CREATE TABLE salaries (
emp_no INT NOT NULL,
salary INT NOT NULL,
from_date DATE NOT NULL,
to_date DATE NOT NULL,
FOREIGN KEY (emp_no) REFERENCES employees (emp_no),
PRIMARY KEY (emp_no)
);
CREATE TABLE dept_emp (
emp_no INT NOT NULL,
dept_no VARCHAR(4) NOT NULL,
from_date DATE NOT NULL,
to_date DATE NOT NULL,
FOREIGN KEY (emp_no) REFERENCES employees (emp_no),
FOREIGN KEY (dept_no) REFERENCES departments (dept_no),
PRIMARY KEY (emp_no, dept_no)
);
CREATE TABLE titles(
emp_no INT NOT NULL,
title VARCHAR NOT NULL,
from_date DATE NOT NULL,
to_date DATE NOT NULL,
FOREIGN KEY (emp_no) REFERENCES employees (emp_no)
);
```
```sql
SELECT first_name, last_name
FROM employees
WHERE birth_date BETWEEN '1952-01-01' AND '1955-12-31';
--born in 1952
SELECT first_name, last_name
FROM employees
WHERE birth_date BETWEEN '1952-01-01' AND '1952-12-31';
-- both on 1953 (22875)
SELECT first_name, last_name
FROM employees
WHERE birth_date BETWEEN '1953-01-01' AND '1953-12-31';
-- both on 1954 (23228)
SELECT first_name, last_name
FROM employees
WHERE birth_date BETWEEN '1954-01-01' AND '1954-12-31';
-- both on 1955 (23104)
SELECT first_name, last_name
FROM employees
WHERE birth_date BETWEEN '1955-01-01' AND '1955-12-31';
-- Retirement eligibility (41380)
SELECT first_name, last_name
FROM employees
WHERE (birth_date BETWEEN '1952-01-01' AND '1955-12-31')
AND (hire_date BETWEEN '1985-01-01' AND '1988-12-31');
-- Number of employees retiring
SELECT COUNT(first_name)
FROM employees
WHERE (birth_date BETWEEN '1952-01-01' AND '1955-12-31')
AND (hire_date BETWEEN '1985-01-01' AND '1988-12-31');
```
| github_jupyter |
<a href="https://colab.research.google.com/github/queiyanglim/trading_algorithm/blob/master/brent_wti_copula.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!git clone https://github.com/queiyanglim/trading_algorithm.git
import os
os.getcwd()
import math
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import sys
from trading_algorithm.oil_trading.brent_wti_regression_spread.regression_signal import rolling_regression_trading_signal, rolling_regression_trading_rule
import matplotlib as mpl
from scipy.integrate import quad
from scipy.optimize import minimize
from statsmodels.distributions.empirical_distribution import ECDF
from scipy.stats import kendalltau
Epsilon = sys.float_info.epsilon
mpl.rcParams["figure.figsize"] = (20, 3)
plt.style.use("classic")
# Data
path = "https://github.com/queiyanglim/trading_algorithm/blob/master/oil_trading/data/daily.csv?raw=true"
df_pull = pd.read_csv(path, header=[0,1], index_col = 0)
# Prepare data
brent = df_pull.brent.CLOSE
brent.name = "brent"
wti = df_pull.wti.CLOSE
wti.name = "wti"
df_raw= pd.concat([brent, wti], axis = 1)
df = df_raw.copy()
df = df.dropna()
df.head()
df.plot(figsize = (15, 3), grid = True, title = "1st Future: Brent and WTI")
plt.show()
df.head()
spread = df.brent - df.wti
spread.plot(figsize=(15,3), grid = True)
```
## Copula Spread Strategy
1. Clayton Copula
2. Gumbel Copula
3. Frank Copula
Source: [quantconnect](https://www.quantconnect.com/tutorials/strategy-library/pairs-trading-copula-vs-cointegration)
Kendall's tau: [Kendall's Tau](https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient)
```
# Generate Return series
df_ret = np.log(df / df.shift(1)).dropna()
df_ret.head()
# Kendall Tau and Theta Parameter Estimation
def estimate_parameter(copula_family, tau):
if copula_family == "clayton":
return 2 * tau / (1 - tau)
elif copula_family == "gumbel":
return 1 / (1 - tau)
elif copula_family == "frank":
# Debeye's 1st order function:
integrand = lambda t: t / (np.exp(t) -1)
frank_argmin = lambda theta: ((tau - 1)/4 - (quad(integrand, Epsilon, theta)[0]/theta - 1)/theta)**2
return float(minimize(frank_argmin, x0 = 4, method = "BFGS", tol = 1e-5).x)
# C(u,v;theta) Log-likelihood function
def copula_log_pdf(copula_family, theta, u, v):
if copula_family == 'clayton':
pdf = (theta + 1) * ((u ** (-theta) + v ** (-theta) - 1) ** (-2 - 1 / theta)) * (u ** (-theta - 1) * v ** (-theta - 1))
elif copula_family == 'frank':
num = -theta * (np.exp(-theta) - 1) * (np.exp(-theta * (u + v)))
denom = ((np.exp(-theta * u) - 1) * (np.exp(-theta * v) - 1) + (np.exp(-theta) - 1)) ** 2
pdf = num / denom
elif copula_family == 'gumbel':
A = (-np.log(u)) ** theta + (-np.log(v)) ** theta
c = np.exp(-A ** (1 / theta))
pdf = c * (u * v) ** (-1) * (A ** (-2 + 2 / theta)) * ((np.log(u) * np.log(v)) ** (theta - 1)) * (1 + (theta - 1) * A ** (-1 / theta))
# TODO: issue when u = 1 and v = 1
if math.isnan(A) or math.isnan(c) or math.isnan(pdf):
print("error: ", "u: ", u, "v: ", v, "theta: ", theta, "\n")
print("A:", A, "c:", c, "pdf:", pdf)
return np.log(pdf)
```
Long spread: y - x if mispricing index, $MI_{y|x} < 0.05$ and $MI_{x|y} > 0.95$
Short spread: y - x if mispricing index, $MI_{y|x} > 0.05$ and $MI_{x|y} < 0.95$
```
def misprice_index(u, v, copula_family, theta):
if copula_family == 'clayton':
MI_u_v = v ** (-theta - 1) * (u ** (-theta) + v ** (-theta) - 1) ** (-1 / theta - 1) # P(U<u|V=v)
MI_v_u = u ** (-theta - 1) * (u ** (-theta) + v ** (-theta) - 1) ** (-1 / theta - 1) # P(V<v|U=u)
elif copula_family == 'frank':
A = (np.exp(-theta * u) - 1) * (np.exp(-theta * v) - 1) + (np.exp(-theta * v) - 1)
B = (np.exp(-theta * u) - 1) * (np.exp(-theta * v) - 1) + (np.exp(-theta * u) - 1)
C = (np.exp(-theta * u) - 1) * (np.exp(-theta * v) - 1) + (np.exp(-theta) - 1)
MI_u_v = B / C
MI_v_u = A / C
elif copula_family == 'gumbel':
A = (-np.log(u)) ** theta + (-np.log(v)) ** theta
C_uv = np.exp(-A ** (1 / theta)) # C_uv is gumbel copula function C(u,v)
MI_u_v = C_uv * (A ** ((1 - theta) / theta)) * (-np.log(v)) ** (theta - 1) * (1.0 / v)
MI_v_u = C_uv * (A ** ((1 - theta) / theta)) * (-np.log(u)) ** (theta - 1) * (1.0 / u)
return MI_u_v, MI_v_u
# Confidence bands
top_band = 0.95
bottom_band = 1 - top_band
update_window = 200
signal = {}
long_short = 0
for i in range(update_window, len(df)+1):
df_ret = df.iloc[:i].tail(update_window)
df_ret = np.log(df_ret / df_ret.shift(1)).dropna()
current_time = df_ret.index[-1]
# y/v: brent, x/u: wti
x = df_ret.wti
y = df_ret.brent
# New Signals:
x_ret_new, y_ret_new = x[-1], y[-1]
# Convert return series of brent and wti into uniform values U and V using ECDF
ecdf_x, ecdf_y = ECDF(x), ECDF(y)
u, v = [ecdf_x(s) for s in x], [ecdf_y(s) for s in y]
# Additional control to prevent feeding u=1.0 and v=1.0 which
# results in invalid pdf
zip_u_v = list(zip(u,v))
if (1.0, 1.0) in zip_u_v:
one_one_index = zip_u_v.index((1.0, 1.0))
del zip_u_v[one_one_index]
tau, tau_p_val = kendalltau(x,y)
AIC_criteria = {}
for fam in ["clayton", "frank", "gumbel"]:
param = estimate_parameter(fam, tau)
lpdf = [ copula_log_pdf(fam, param, x, y) for x, y in zip_u_v]
lpdf = np.nan_to_num(lpdf)
log_likelihood = np.sum(lpdf)
AIC_criteria[fam] = [param, -2 * log_likelihood + 2 * (1) ] # k = 1 for 1 parameter
# Select the best copula based on the lowest AIC
best_copula = min(AIC_criteria.items(), key = lambda x: x[1][1])
u_new = ecdf_x(x_ret_new)
v_new = ecdf_y(y_ret_new)
MI_u_v, MI_v_u = misprice_index(u_new, v_new, best_copula[0], best_copula[1][0])
# M_v_u < 0.05 and M_u_v > 0.95, long spread
# M_v_u > 0.95 and M_u_v < 0.05, short spread
if (MI_u_v < bottom_band and MI_v_u > top_band):
long_short = -1
elif (MI_u_v > top_band and MI_v_u < bottom_band):
long_short = 1
# else:
# long_short = 0
signal[current_time] = long_short
```
```
df["position"] = pd.DataFrame(signal,index=[0]).T
df_with_pos = df
df_with_pos["spread"] = df_with_pos.brent - df_with_pos.wti
df_with_pos = df_with_pos.dropna()
position = []
def append_position_data(slice_data, brent_pos, wti_pos):
global position
position.append({"timestamp": data.name,
"brent_pos": brent_pos,
"wti_pos": wti_pos})
style = dict(size=15, color='gray')
df_with_pos.index = pd.to_datetime(df_with_pos.index, format="%Y-%m-%d")
ax = df_with_pos.spread.plot(figsize=(30,10), grid = True)
current_pos = 0
traded = []
for i, data in df_with_pos.iterrows():
# Open position
if current_pos == 0:
# On long signal
if data.position != 0:
brent_pos = data.position * np.floor(capital/data.brent)
wti_pos = - data.position * np.floor(capital/data.wti)
current_pos = data.position
open_price = data.spread
if data.position == 1:
ax.text(i, data.spread, "buy")
elif data.position == -1:
ax.text(i, data.spread, "sell")
# When there is open position, decide if need to close
elif current_pos != 0:
# If current position is not equal to signal, close position
if current_pos != data.position:
close_price = data.spread
traded.append(current_pos * (close_price - open_price))
brent_pos, wti_pos, current_pos = 0, 0, 0
ax.text(i, data.spread, "close")
append_position_data(data, brent_pos, wti_pos)
df_pos = pd.DataFrame(position).set_index("timestamp")
plt.bar(range(len(traded)), traded)
plt.show()
plt.plot(np.cumsum(traded))
```
| github_jupyter |

# 02 - RDD: RESILENT DISTRIBUTED DATASETS
Colección inmutable y distribuida de elementos que pueden manipularse en paralelo
Un programa Spark opera sobre RDDs:
Spark automáticamente distribuye los datos y paraleliza las operaciones
```
!pip install pyspark
# Create apache spark context
from pyspark import SparkContext
sc = SparkContext(master="local", appName="Mi app")
# Stop apache spark context
sc.stop()
```
## Creación de RDDs
Se pueden crear de dos formas:
1. Paralelizando una colección en el programa driver
2. Leyendo datos de un fichero
```
# 1. Paralelizando una colección en el programa driver
rdd1 = sc.parallelize([1,2,3])
print("rdd1: ", rdd1.glom().collect())
import numpy as np
rdd2 = sc.parallelize(np.array(range(100)))
print("rdd2: ", rdd2.glom().collect())
# 2. Leyendo datos de un fichero
quijote = sc.textFile("data/quijote.txt")
print(quijote.take(1000))
```
## Particiones
Spark divide el RDD en en conjunto de particiones
- El número de particiones por defecto es función del tamaño del
cluster o del número de bloques del fichero (p.e. bloques HDFS)
- Se puede especificar otro valor en el momento de crear el RDD
```
import numpy as np
rdd1 = sc.parallelize(np.array(range(100)))
print("rdd1: ", rdd2.glom().collect())
print(rdd1.getNumPartitions())
print("------------")
rdd2 = sc.parallelize(np.array(range(100)), 6)
print(rdd2.glom().collect())
print(rdd2.getNumPartitions())
```
## Transformaciones
Operaciones sobre RDDs que devuelven un nuevo RDD
- Se computan de forma “perezosa” ( *lazy* )
- Normalmente, ejecutan una función (anónima o no) sobre cada uno de
los elementos del RDD de origen
```
quijs = quijote.filter(lambda l: "Quijote" in l)
sanchs = quijote.filter(lambda l: "Sancho" in l)
quijssancs = quijs.intersection(sanchs)
quijssancs.cache()
```
### Transformaciones elemento-a-elemento
Generan un nuevo RDD a partir de uno dado
Todas las transformaciones:
- filter(func)
- map(func)
- flatMap(func)
- sample(withReplacement, faction, seed=None)
- distinct(func)
- groupBy(func)
---
* `map(func)` aplica una función a los elementos de un RDD
```
# Obtener los valores positivos de un rango de números
rdd = sc.parallelize(range(-5, 5)) # Rango [-5, 5)
filtered_rdd = rdd.filter(lambda x: x >= 0) # Devuelve los positivos
assert filtered_rdd.collect() == [0, 1, 2, 3, 4]
print(filtered_rdd.collect())
print([0, 1, 2, 3, 4])
```
* `filter(func)` filtra los elementos de un RDD
```
def add1(x):
return(x+1)
print("RDD original:", filtered_rdd.collect())
squared_rdd = (filtered_rdd
.map(add1) # Añade 1 a cada elemento del RDD
.map(lambda x: (x, x*x))) # Para cada elemento, obtén una tupla (x, x**2)
print("Resultado Esperado:", [(1, 1), (2, 4), (3, 9), (4, 16), (5, 25)])
print("Resultado obtenido", squared_rdd.collect())
```
* `flatMap(func)` igual que `map`, pero “aplana” la salida
```
squaredflat_rdd = (filtered_rdd
.map(add1)
.flatMap(lambda x: (x, x*x))) # Da la salida en forma de lista
print("Resultado Esperado:", [1, 1, 2, 4, 3, 9, 4, 16, 5, 25])
print("Resultado obtenido", squaredflat_rdd.collect())
```
* `sample(withReplacement, fraction, seed=None)` devuelve una muestra del RDD
* `withReplacement` - si True, cada elemento puede aparecer varias veces en la muestra
* `fraction` - tamaño esperado de la muestra como una fracción del tamaño del RDD
- **sin reemplazo**: probabilidad de seleccionar un elemento, su valor debe ser [0, 1]
- **con reemplazo**: número esperado de veces que se escoge un elemento, su valor debe ser >= 0
* `seed` - semilla para el generador de números aleatorios
```
srdd1 = squaredflat_rdd.sample(False, 0.5)
srdd2 = squaredflat_rdd.sample(True, 2)
srdd3 = squaredflat_rdd.sample(False, 0.8, 14)
print('s1={0}\ns2={1}\ns3={2}'.format(srdd1.collect(), srdd2.collect(), srdd3.collect()))
```
* `distinct()` devuelve un nuevo RDD sin duplicados
* El orden de la salida no está definido
```
distinct_rdd = squaredflat_rdd.distinct()
print("Original: ", squaredflat_rdd.collect())
print("Resultado: ", distinct_rdd.collect())
```
* `groupBy(func)` devuelve un RDD con los datos agrupados en formato clave/valor,
usando una función para obtener la clave
```
grouped_rdd = distinct_rdd.groupBy(lambda x: x%3)
print(grouped_rdd.collect())
print([(x,sorted(y)) for (x,y) in grouped_rdd.collect()])
```
---
### Transformaciones sobre dos RDDs
Operaciones tipo conjunto sobre dos RDDs
Transformaciones disponibles:
* `rdda.union(rddb)`
* `rdda.intersection(rddb)`
* `rdda.subtract(rddb)`
* `rdda.cartesian(rddb)`
---
* `rdda.union(rddb)` devuelve un RDD con los datos de los dos de partida
```
rdda = sc.parallelize(['a', 'b', 'c'])
rddb = sc.parallelize(['c', 'd', 'e'])
rddu = rdda.union(rddb)
print("Resultado Esperado:", ['a', 'b', 'c', 'c', 'd', 'e'])
print("Resultado obtenido", rddu.collect())
```
* `rdda.intersection(rddb)` devuelve un RDD con los datos comunes en ambos RDDs
```
rddi = rdda.intersection(rddb)
print("Resultado Esperado:", ['c'])
print("Resultado obtenido", rddi.collect())
```
* `rdda.subtract(rddb)` devuelve un RDD con los datos del primer RDD menos los del segundo
```
rdds = rdda.subtract(rddb)
print("Resultado Esperado:", ['a', 'b'])
print("Resultado obtenido", rdds.collect())
rddc = rdda.cartesian(rddb)
print("Resultado Esperado:", [('a','c'),('a','d'),('a','e'),('b','c'),('b','d'),('b','e'),('c','c'), ('c','d'), ('c','e')])
print("Resultado obtenido", rddc.collect())
```
## Acciones
Obtienen datos de salida a partir de los RDDs
* Devuelven valores al driver o al sistema de almacenamiento
* Fuerzan a que se realicen las transformaciones pendientes
### Acciones sobre RDDs simples
Obtienen datos (simples o compuestos) a partir de un RDD
#### Principales acciones de agregación: `reduce` y `fold`
* `reduce(op)` combina los elementos de un RDD en paralelo, aplicando un operador
* El operador de reducción debe ser un *monoide conmutativo* (operador binario asociativo y conmutativo)
* Primero se realiza la redución a nivel de partición y luego se van reduciendo los valores intermedios
```
rdd = sc.parallelize(range(1,10), 8) # rango [1, 10)
print(rdd.glom().collect())
# Reducción con una función lambda
p = rdd.reduce(lambda x,y: x*y) # r = 1*2*3*4*5*6*7*8*9 = 362880
print("1*2*3*4*5*6*7*8*9 = {0}".format(p))
# Reducción con un operador predefinido
from operator import add
s = rdd.reduce(add) # s = 1+2+3+4+5+6+7+8+9 = 45
print("1+2+3+4+5+6+7+8+9 = {0}".format(s))
# Prueba con un operador no conmutativo
p = rdd.reduce(lambda x,y: x-y) # r = 1-2-3-4-5-6-7-8-9 = -43
print("1-2-3-4-5-6-7-8-9 = {0}".format(p))
# No funciona con RDDs vacíos
#sc.parallelize([]).reduce(add)
```
* `fold(cero, op)` versión general de `reduce`:
* Debemos proporcionar un valor inicial `cero` para el operador
* El valor inicial debe ser el valor identidad para el operador (p.e. 0 para suma; 1 para producto, o una lista vacía para concatenación de listas)
* Permite utilizar RDDs vacíos
* La función `op` debe ser un monoide conmutativo para garantizar un resultado consistente
* Comportamiento diferente a las operaciones `fold` de lenguajes como Scala
* El operador se aplica a nivel de partición (usando `cero` como valor inicial), y finalmente entre todas las particiones (usando `cero`de nuevo)
* Para operadores no conmutativos el resultado podría ser diferente del obtenido mediante un `fold` secuencial
```
rdd = sc.parallelize([[1, 2, 3, 4], [-10, -9, -8, -7, -6, -5, -4], ['a', 'b', 'c']])
print(rdd.glom().collect())
f = rdd.fold([], lambda x,y: x+y)
print(f)
# Se puede hacer un fold de un RDD vacío
sc.parallelize([]).fold(0, add)
```
#### Otras acciones de agregación: `aggregate`
* `aggregate(cero,seqOp,combOp)`: Devuelve una colección agregando los elementos del RDD usando dos funciones:
1. `seqOp` - agregación a nivel de partición: se crea un acumulador por partición (inicializado a `cero`) y se agregan los valores de la partición en el acumulador
2. `combOp` - agregación entre particiones: se agregan los acumuladores de todas las particiones
* Ambas agregaciones usan un valor inicial `cero` (similar al caso de `fold`).
* Versión general de `reduce` y `fold`
* La primera función (`seqOp`) puede devolver un tipo, U, diferente del tipo T de los elementos del RDD
* `seqOp` agregar datos de tipo T y devuelve un tipo U
* `combOp` agrega datos de tipo U
* `cero` debe ser de tipo U
* Permite devolver un tipo diferente al de los elementos del RDD de entrada.
```
l = [1, 2, 3, 4, 5, 6, 7, 8]
rdd = sc.parallelize(l)
# acc es una tupla de tres elementos (List, Double, Int)
# En el primer elemento de acc (lista) le concatenamos los elementos del RDD al cuadrado
# en el segundo, acumulamos los elementos del RDD usando multiplicación
# y en el tercero, contamos los elementos del RDD
seqOp = (lambda acc, val: (acc[0]+[val*val],
acc[1]*val,
acc[2]+1))
# Para cada partición se genera una tupla tipo acc
# En esta operación se combinan los tres elementos de las tuplas
combOp = (lambda acc1, acc2: (acc1[0]+acc2[0],
acc1[1]*acc2[1],
acc1[2]+acc2[2]))
a = rdd.aggregate(([], 1., 0), seqOp, combOp)
print(a)
print("Resultado Esperado:", a[1])
print("Resultado obtenido", 8.*7.*6.*5.*4.*3.*2.*1.)
print("--------------")
print("Resultado Esperado:", a[2])
print("Resultado obtenido", len(l))
```
#### Acciones para contar elementos
- `count()` devuelve un entero con el número exacto de elementos del RDD
- `countApprox(timeout, confidence=0.95)` versión aproximada de `count()` que devuelve un resultado potencialmente incompleto en un tiempo máximo, incluso si no todas las tareas han finalizado. (Experimental).
- `timeout` es un entero largo e indica el tiempo en milisegundos
- `confidence` probabilidad de obtener el valor real. Si `confidence` es 0.90 quiere decir que si se ejecuta múltiples veces, se espera que el 90% de ellas se obtenga el valor correcto. Valor [0,1]
- `countApproxDistinct(relativeSD=0.05)` devuelve una estimación del número de elementos diferentes del RDD. (Experimental).
- `relativeSD` – exactitud relativa (valores más pequeños implican menor error, pero requieren más memoria; debe ser mayor que 0.000017).
```
rdd = sc.parallelize([i % 20 for i in range(10000)], 16)
#print(rdd.collect())
print("Número total de elementos: {0}".format(rdd.count()))
print("Número de elementos distintos: {0}".format(rdd.distinct().count()))
print("Número total de elementos (aprox.): {0}".format(rdd.countApprox(1, 0.4)))
print("Número de elementos distintos (approx.): {0}".format(rdd.countApproxDistinct(0.5)))
```
- `countByValue()` devuelve el número de apariciones de cada elemento del RDD como un mapa (o diccionario) de tipo clave/valor
- Las claves son los elementos del RDD y cada valor, el número de ocurrencias de la clave asociada al mismo
```
rdd = sc.parallelize(list("abracadabra")).cache()
mimapa = rdd.countByValue()
print(type(mimapa))
print(mimapa.items())
```
#### Acciones para obtener valores
Estos métodos deben usarse con cuidado, si el resultado esperado es muy grande puede saturar la memoria del driver
- `collect()` devuelve una lista con todos los elementos del RDD
```
lista = rdd.collect()
print(lista)
```
- `take(n)` devuelve los `n` primeros elementos del RDD
- `takeSample(withRep, n, [seed])` devuelve `n` elementos aleatorios del RDD
- `withRep`: si True, en la muestra puede aparecer el mismo elemento varias veces
- `seed`: semilla para el generador de números aleatorios
```
t = rdd.take(4)
print(t)
s = rdd.takeSample(False, 4)
print(s)
```
- `top(n)` devuelve una lista con los primeros `n` elementos del RDD ordenados en orden descendente
- `takeOrdered(n,[orden])` devuelve una lista con los primeros `n` elementos del RDD en orden ascendente (opuesto a `top`), o siguiendo el orden indicado en la función opcional
```
rdd = sc.parallelize([8, 4, 2, 9, 3, 1, 10, 5, 6, 7]).cache()
print("4 elementos más grandes: {0}".format(rdd.top(4)))
print("4 elementos más pequeños: {0}".format(rdd.takeOrdered(4)))
print("4 elementos más grandes: {0}".format(rdd.takeOrdered(4, lambda x: -x)))
```
| github_jupyter |
# SYCL Task Scheduling and Data Dependences
##### Sections
- [Buffers and Accessors](#Buffers-and-Accessors)
- [Memory Management](#Memory-Management)
- [Explicit Data Movement](#Explicit-Data-Movement)
- [Implicit data movement](#Implicit-data-movement)
- [What is USM?](#What-is-Unified-Shared-Memory?)
- [Types of USM](#Types-of-USM)
- _Code:_ [USM Explicit data Movement](#USM-Explicit-data-Movement)
- _Code:_ [USM Implicit data Movement](#USM-Implicit-data-Movement)
- [Accessors](#Accessors)
- [Access modes](#Access-modes)
- [Graph Scheduling](#Execution-Graph-Scheduling)
- _Code:_ [RAW - Read after Write](#RAW-Read-after-Write)
- _Code:_ [WAR WAW- Write after Read and Write after Write](#WAR-WAW-Write-after-Read-and-Write-after-Write)
- _Code:_ [Implicit dependency with Accessors](#Implicit-dependency-with-Accessors)
- [Graphs and Dependencies](#Graphs-and-Dependencies)
- [Graphs in DPC++](#Graphs-in-DPC++)
- [Dependency in Graphs](#Dependency-in-Linear-dependency-chain-graphs-and-y-pattern-Graphs)
- [In-Order Queues](#In-Order-Queues)
- _Code:_ [Linear dependence chain using in-order queues](#Linear-dependence-chain-using-in-order-queues)
- _Code:_ [Y Pattern using in-order queues](#Y-Pattern-using-in-order-queues)
- [Event-based dependencies](#Event-based-dependencies)
- _Code:_ [Linear dependence chain using events](#Linear-dependence-chain-using-events)
- _Code:_ [Y Pattern using events](#Y-Pattern-using-events)
- _Code:_ [Linear dependence chain using Buffers and Accessors](#Linear-dependence-chain-using-Buffers-and-Accessors)
- _Code:_ [Y Pattern using Buffers and Accessors](#Y-Pattern-using-Buffers-and-Accessors)
## Learning Objectives
* Utilize USM and Buffers and Accessors to apply Memory management and take control over data movement implicitly and explicitly
* Utilize different types of data dependences that are important for ensuring execution of graph scheduling
* Select the correct modes of dependences in Graphs scheduling.
## Buffers and Accessors
__Buffers__ are high level abstarction for data and these are accessbile either on the host machine or on the devices. Buffers encapsulate data in a SYCL application across both devices and host. __Accessors__ is the mechanism to access buffer data. Buffers are 1, 2 or 3 dimensional data.
## Memory Management
Managing multiple memories can be accomplished, broadly, in two ways:
* Explicitly by the programmer
* Implicitly by the runtime.
Each method has its advantages and drawbacks, and programmers may choose one or the other depending on circumstances or personal preference.
### Explicit Data Movement
In a DPC++ program one option for managing multiple memories is for the programmer to explicitly copy data between host and the device and once the computation is done it needs to be copied back to the host from the device. This can be done explicitly by the programmer.
Also, once we offload computation to a device by submitting tasks to a queue and the kernel computes new results,
the data needs to be copied back to the host program. One of the main advantages of explicit transfer is that the programmer has full control over when data is transferred between the device and the host and back to host from the device, and this is important and can be essential to obtaining the best performance on some hardware.
The disadvantage of explicit data movement is that transferring explicitly by the programmer can be tedious process and error prone. Transferring incorrect data or transferring the data back to host at incorrect time can lead to incorrect results.
Getting all of the data movement correct up front can be a time-consuming task.
### Implicit data movement
The alternative to explicit data movement is implicit data movement. This is controlled by the runtime or driver and here the runtime is responsible for ensuring that data is transferred to the appropriate memory before it is used.
The advantage of implicit data movement is that it requires less effort on the programmer’s part and all the heavy lifting is done by the runtime. This also reduces the opportunity to introduce errors into the program since the runtime will automatically identify both when data transfers must be performed and how much data must be transferred.
The drawback of implicit data movement is that the programmer has less or no control over the behavior of the runtime’s implicit mechanisms. The runtime will provide functional correctness but may not move data in an optimal fashion that could have a negative impact on program performance.
### Selecting the right strategy: explicit or implicit
A programmer might choose to begin using implicit data movement to simplify porting an application to a new device. As we
begin tuning the application for performance, we might start replacing implicit data movement with explicit in performance-critical parts of the code.
## What is Unified Shared Memory?
Unified Shared Memory (USM) is a DPC++ tool for data management. USM is a
__pointer-based approach__ that should be familiar to C and C++ programmers who use malloc
or new to allocate data. USM __simplifies development__ for the programmer when __porting existing
C/C++ code__ to DPC++.
### USM Explicit data Movement
The DPC++ code below shows an implementation of USM using <code>malloc_device</code>, in which data movement between host and device should be done explicitly by developer using <code>memcpy</code>. This allows developers to have more __controlled movement of data__ between host and device.
The DPC++ code below demonstrates USM Explicit Data Movement: Inspect code, there are no modifications necessary:
1. Inspect the code cell below and click run ▶ to save the code to file.
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/USM_explicit.cpp
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: MIT
#include <CL/sycl.hpp>
#include<array>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
std::array<int,N> host_array;
int *device_array = malloc_device<int>(N, Q);
for (int i = 0; i < N; i++)
host_array[i] = N;
// Submit the queue
Q.submit([&](handler &h) {
// copy hostArray to deviceArray
h.memcpy(device_array, &host_array[0], N * sizeof(int));
});
Q.wait();
Q.submit([&](handler &h) {
h.parallel_for(N, [=](id<1> i) { device_array[i]++; });
});
Q.wait();
Q.submit([&](handler &h) {
// copy deviceArray back to hostArray
h.memcpy(&host_array[0], device_array, N * sizeof(int));
});
Q.wait();
free(device_array, Q);
return 0;
}
```
#### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_usm_explicit.sh; if [ -x "$(command -v qsub)" ]; then ./q run_usm_explicit.sh; else ./run_usm_explicit.sh; fi
```
### USM Implicit data Movement
The DPC++ code below shows an implementation of USM using <code>malloc_shared</code>, in which data movement happens implicitly between host and device. Useful to __get functional quickly with minimum amount of code__ and developers will not having worry about moving memory between host and device.
```
%%writefile lab/USM_implicit.cpp
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: MIT
#include <CL/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
int *host_array = malloc_host<int>(N, Q);
int *shared_array = malloc_shared<int>(N, Q);
for (int i = 0; i < N; i++) {
// Initialize hostArray on host
host_array[i] = i;
}
// Submit the queue
Q.submit([&](handler &h) {
h.parallel_for(N, [=](id<1> i) {
// access sharedArray and hostArray on device
shared_array[i] = host_array[i] + 1;
});
});
Q.wait();
for (int i = 0; i < N; i++) {
// access sharedArray on host
host_array[i] = shared_array[i];
}
free(shared_array, Q);
free(host_array, Q);
return 0;
}
```
#### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_usm_implicit.sh; if [ -x "$(command -v qsub)" ]; then ./q run_usm_implicit.sh; else ./run_usm_implicit.sh; fi
```
# Accessors
Data represented by a buffer cannot be directly accessed through the buffer object. Instead, we must create accessor objects that allow us to safely access a buffer’s data. Accessors inform the runtime where and how we want to access data, allowing the runtime to ensure that the right data is in the right place at the right time and the kernels dont run until the data is available.
### Access modes
When creating an accessor, we must inform the runtime how we are going to use it by specifying an access mode as described in the below table.
Access modes are how the runtime is able to perform implicit data movement.
When the accessor is created with __access::mode::read_write__ we intend to both read and write to the buffer through the accessor.
`read_only` tells the runtime that the data needs to be available on the device before this kernel can begin executing. Similarly, __`access::mode::write`__ lets the runtime know that we will modify the contents of a buffer and may need to copy the results back after computation has ended.
The runtime uses accessors to order the use of data, but it can also use this data to optimize scheduling of kernels and data movement.
| Access Mode | Description |
|:---|:---|
| __read__ | Read only Access|
| __write__ | Write-only access. Previous contents not discarded |
| __read_write__ | Read and Write access |
| __atomic__ |Read and write atomic access |
### Execution Graph Scheduling
Execution graphs are the mechanism that we use to achieve proper sequencing of kernels, and data movement in an application. Dependences between kernels are fundamentally based on what data a kernel accesses. A kernel needs to be certain that it reads the correct data before it can compute its output.
There are three types of data dependences that are important for ensuring correct execution.
* Read-after-Write (RAW) : Occurs when one task needs to read data produced by a different task. This type of dependence describes the flow of data between two kernels.
* Write-after-Read (WAR) : The second type of dependence happens when one task needs to update data after another task has read it.
* Write-after-Write (WAW) : The final type of data dependence occurs when two tasks try to write the same data.
#### RAW-Read after Write
The DPC++ code below demonstrates creating accessors: Inspect code, there are no modifications necessary:
1. Inspect the code cell below and click run ▶ to save the code to file
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/accessors_RAW.cpp
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <array>
using namespace sycl;
constexpr int N = 42;
int main() {
std::array<int,N> a, b, c;
for (int i = 0; i < N; i++) {
a[i] = b[i] = c[i] = 0;
}
queue Q;
//Create Buffers
buffer A{a};
buffer B{b};
buffer C{c};
Q.submit([&](handler &h) {
accessor accA(A, h, read_only);
accessor accB(B, h, write_only);
h.parallel_for( // computeB
N,
[=](id<1> i) { accB[i] = accA[i] + 1; });
});
Q.submit([&](handler &h) {
accessor accA(A, h, read_only);
h.parallel_for( // readA
N,
[=](id<1> i) {
// Useful only as an example
int data = accA[i];
});
});
Q.submit([&](handler &h) {
// RAW of buffer B
accessor accB(B, h, read_only);
accessor accC(C, h, write_only);
h.parallel_for( // computeC
N,
[=](id<1> i) { accC[i] = accB[i] + 2; });
});
// read C on host
host_accessor host_accC(C, read_only);
for (int i = 0; i < N; i++) {
std::cout << host_accC[i] << " ";
}
std::cout << "\n";
return 0;
}
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_accessor_RAW.sh;if [ -x "$(command -v qsub)" ]; then ./q run_accessor_RAW.sh; else ./run_accessor_RAW.sh; fi
```
#### WAR WAW-Write after Read and Write after Write
WAR happens when one task needs to update data after another task has read it and WAW occurs when two tasks try to write the same data.
The DPC++ code below demonstrates creating accessors: Inspect code, there are no modifications necessary:
1. Inspect the code cell below and click run ▶ to save the code to file
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/accessors_WAR_WAW.cpp
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: MIT
#include <CL/sycl.hpp>
#include <array>
using namespace sycl;
constexpr int N = 42;
int main() {
std::array<int,N> a, b;
for (int i = 0; i < N; i++) {
a[i] = b[i] = 0;
}
queue Q;
buffer A{a};
buffer B{b};
Q.submit([&](handler &h) {
accessor accA(A, h, read_only);
accessor accB(B, h, write_only);
h.parallel_for( // computeB
N, [=](id<1> i) {
accB[i] = accA[i] + 1;
});
});
Q.submit([&](handler &h) {
// WAR of buffer A
accessor accA(A, h, write_only);
h.parallel_for( // rewriteA
N, [=](id<1> i) {
accA[i] = 21 + 21;
});
});
Q.submit([&](handler &h) {
// WAW of buffer B
accessor accB(B, h, write_only);
h.parallel_for( // rewriteB
N, [=](id<1> i) {
accB[i] = 30 + 12;
});
});
host_accessor host_accA(A, read_only);
host_accessor host_accB(B, read_only);
for (int i = 0; i < N; i++) {
std::cout << host_accA[i] << " " << host_accB[i] << " ";
}
std::cout << "\n";
return 0;
}
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_accessor_WAW.sh;if [ -x "$(command -v qsub)" ]; then ./q run_accessor_WAW.sh; else ./run_accessor_WAW.sh; fi
```
## Graphs and Dependencies
We discussed data management and ordering the uses of data and the abstraction behind graphs in DPC++: dependences.
Dependences between kernels are fundamentally based on what data a kernel accesses. A kernel needs to be certain that it reads the correct data before it can compute its output.
We described the three types of data dependences that are important for ensuring correct execution. The first, Read-after-Write (RAW), occurs when one task needs to read data produced by a different task. This type of dependence describes the flow of data between two kernels. The second type of dependence happens when one task needs to update data after another task has read it. We call that type of dependence a Write-afterRead (WAR) dependence. The final type of data dependence occurs when two tasks try to write the same data. This is known as a Write-after-Write (WAW) dependence.
Data dependences are the building blocks we will use to build graphs. This set of dependences is all we need to express both simple linear chains of kernels and large, complex graphs with hundreds of kernels with elaborate dependences. No matter which types of graph a computation needs, DPC++ graphs ensure that a program will execute correctly based on the expressed dependences. However, it is up to the programmer to make sure that a graph correctly expresses all the dependences in a program.
## Graphs in DPC++
A command group can contain three different things: an action, its dependences, and miscellaneous host code. Of these three things, the one that is always required is the action since without it, the command group really doesn’t do anything. Most command groups will also express dependences, but there are cases where they may not.
Command groups are typically expressed as a C++ lambda expression passed to the submit method. Command groups can also be expressed through shortcut methods on queue objects that take a kernel and set of event-based dependences.
There are two types of actions that may be performed by a command group: kernels and explicit memory operations. Kernels
are defined through calls to a parallel_for or single_task method and express computations that we want to perform on our devices. Operations for explicit data movement are the second type of action. Examples from USM include memcpy, memset, and fill operations. Examples from buffers include copy, fill, and update_host.
## Dependency in Linear dependency chain graphs and y pattern Graphs
The two patterns that are explained below are linear dependence chains where one task executes after another and a “Y” pattern where two independent tasks must execute before successive tasks.
In a __linear dependence__ chain the first node represents the initialization of data, while the second node presents the
reduction operation that will accumulate the data into a single result.

In a __“Y” pattern__ we independently initialize two different pieces of data. After the data is initialized, an addition kernel
will sum the two vectors together. Finally, the last node in the graph accumulates the result into a single value.

In the below examples for each pattern we will see three different implementations.
* In-order queues.
* Event-based dependences.
* Using buffers and accessors to express data dependences between command groups.
### In-Order Queues
The other main component of a command group is the set of dependences that must be satisfied before the action defined by the group can execute. DPC++ allows these dependences to be specified in several ways. If a program uses in-order DPC++ queues, the in-order semantics of the queue specify implicit dependences between successively enqueued command groups. One task cannot execute until the previously submitted task has completed.
### Linear dependence chain using in-order queues
In the below example the inorder queues already guarantee a sequential order of execution between command groups. The first kernel we submit initializes the elements of an array to 1. The next kernel then takes those elements and sums them together into the first element.
Since our queue is in order, we do not need to do anything else to express that the second kernel should not execute
until the first kernel has completed. Finally, we wait for the queue to finish executing all its tasks, and we check that we obtained the expected result.
The DPC++ code below demonstrates creating Linear dependence In-Order Queues: Inspect code, there are no modifications necessary:
1. Inspect the code cell below and click run ▶ to save the code to file
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/Linear_inorder_queues.cpp
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q{property::queue::in_order()};
int *data = malloc_shared<int>(N, Q);
Q.parallel_for(N, [=](id<1> i) { data[i] = 1; });
Q.single_task([=]() {
for (int i = 1; i < N; i++)
data[0] += data[i];
});
Q.wait();
assert(data[0] == N);
return 0;
}
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_linear_inorder.sh;if [ -x "$(command -v qsub)" ]; then ./q run_linear_inorder.sh; else ./run_linear_inorder.sh; fi
```
### Y Pattern using in-order queues
In the below example we can see a “Y” pattern using in-order queues. In this example, we declare two arrays, data1 and data2. We then define two kernels that will each initialize one of the arrays. These kernels do not depend on each other, but because the queue is in order, the kernels must execute one after the other.
Note that you can swap the order of these two kernels in this example. After the second kernel has executed, the third kernel adds the elements of the second array to those of the first array. The final kernel sums up the elements of the first array
to compute the same result we did in our examples for linear dependence chains.
This summation kernel depends on the previous kernel, but this linear chain is also captured by the in-order queue. Finally, we wait for all kernels to complete and validate that we successfully computed the final result.
The DPC++ code below demonstrates creating Linear dependence In-Order Queues: Inspect code, there are no modifications necessary:
1. Inspect the code cell below and click run ▶ to save the code to file
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/y_pattern_inorder_queues.cpp
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q{property::queue::in_order()};
int *data1 = malloc_shared<int>(N, Q);
int *data2 = malloc_shared<int>(N, Q);
Q.parallel_for(N, [=](id<1> i) { data1[i] = 1; });
Q.parallel_for(N, [=](id<1> i) { data2[i] = 2; });
Q.parallel_for(N, [=](id<1> i) { data1[i] += data2[i]; });
Q.single_task([=]() {
for (int i = 1; i < N; i++)
data1[0] += data1[i];
data1[0] /= 3;
});
Q.wait();
assert(data1[0] == N);
return 0;
}
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_y_inorder.sh;if [ -x "$(command -v qsub)" ]; then ./q run_y_inorder.sh; else ./run_y_inorder.sh; fi
```
## Event-based dependencies
Event-based dependences are another way to specify what must be complete before a command group may execute. These event-based
dependences may be specified in two ways. The first way is used when a command group is specified as a lambda passed to a queue’s submit method. In this case, the programmer invokes the depends_on method of the command group handler object, passing either an event or vector of events as parameter.
The other way is used when a command group is created from the shortcut methods defined on the queue object. When the
programmer directly invokes parallel_for or single_task on a queue, an event or vector of events may be passed as an extra parameter.
### Linear dependence chain using events
In the below example we can see usage of an out-of-order queue and event-based dependences. Here, we capture the event returned by the first call to parallel_for. The second kernel is then able to specify a dependence on that event and the kernel execution it represents by passing it as a parameter to depends_on.
The DPC++ code below demonstrates creating In-Order Queues: Inspect code, there are no modifications necessary:
1. Inspect the code cell below and click run ▶ to save the code to file
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/linear_event_graphs.cpp
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
int *data = malloc_shared<int>(N, Q);
auto e = Q.parallel_for(N, [=](id<1> i) { data[i] = 1; });
Q.submit([&](handler &h) {
h.depends_on(e);
h.single_task([=]() {
for (int i = 1; i < N; i++)
data[0] += data[i];
});
});
Q.wait();
assert(data[0] == N);
return 0;
}
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_linear_events.sh;if [ -x "$(command -v qsub)" ]; then ./q run_linear_events.sh; else ./run_linear_events.sh; fi
```
### Y Pattern using events
Below is a “Y” pattern example with out-of-order queues instead of in-order queues. Since the dependences are no longer implicit
due to the order of the queue, we must explicitly specify the dependences between command groups using events.
We define two independent kernels that have no initial dependences. We represent these kernels by two events, e1 and e2. When we define our third kernel, we must specify that it depends on the first two kernels. We do this by saying that it depends on events e1 and e2 to complete before it may execute.
However, in this example, we use a shortcut form to specify these dependences instead of the handler’s depends_on method. Here, we
pass the events as an extra parameter to parallel_for. Since we want to
pass multiple events at once, we use the form that accepts a std::vector of events, as modern C++ simplifies this by automatically
converting the expression {e1, e2} into the appropriate vector.
The DPC++ code below demonstrates creating Linear dependence In-Order Queues: Inspect code, there are no modifications necessary:
1. Inspect the code cell below and click run ▶ to save the code to file
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/y_pattern_events.cpp
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
int *data1 = malloc_shared<int>(N, Q);
int *data2 = malloc_shared<int>(N, Q);
auto e1 = Q.parallel_for(N, [=](id<1> i) { data1[i] = 1; });
auto e2 = Q.parallel_for(N, [=](id<1> i) { data2[i] = 2; });
auto e3 = Q.parallel_for(range{N}, {e1, e2},
[=](id<1> i) { data1[i] += data2[i]; });
Q.single_task(e3, [=]() {
for (int i = 1; i < N; i++)
data1[0] += data1[i];
data1[0] /= 3;
});
Q.wait();
assert(data1[0] == N);
return 0;
}
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_y_events.sh;if [ -x "$(command -v qsub)" ]; then ./q run_y_events.sh; else ./run_y_events.sh; fi
```
### Linear dependence chain using Buffers and Accessors
In the below example we show how to write linear dependence chain using buffers and accessors instead of USM pointers. Here we use an outof-order queue but use data dependences specified through accessors instead of event-based dependences to order the execution of the command groups.
The second kernel reads the data produced by the first kernel, and the runtime can see this because we declare accessors based
on the same underlying buffer object. Unlike the previous examples, we do not wait for the queue to finish executing all its tasks. Instead, we declare a host accessor that defines a data dependence between the output of the second kernel and our assertion that we computed the correct answer on the host.
The DPC++ code below demonstrates creating Linear Dependence chain using Buffers and Accessors: Inspect code, there are no modifications necessary:
1. Inspect the code cell below and click run ▶ to save the code to file
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/linear_buffers_graphs.cpp
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
buffer<int> data{range{N}};
Q.submit([&](handler &h) {
accessor a{data, h};
h.parallel_for(N, [=](id<1> i) { a[i] = 1; });
});
Q.submit([&](handler &h) {
accessor a{data, h};
h.single_task([=]() {
for (int i = 1; i < N; i++)
a[0] += a[i];
});
});
host_accessor h_a{data};
assert(h_a[0] == N);
return 0;
}
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_linear_buffer.sh;if [ -x "$(command -v qsub)" ]; then ./q run_linear_buffer.sh; else ./run_linear_buffer.sh; fi
```
### Y Pattern using Buffers and Accessors
Below is a “Y” pattern example with buffers and Accessors. We replace USM pointers and events with buffers and accessors. This example represents the two arrays data1 and data2 as buffer objects. Our kernels no longer use the shortcut methods for defining kernels since we must associate accessors with a command group handler.
The third kernel must capture the dependence on the first two kernels. Here this is accomplished by declaring accessors for our buffers. Since we have previously declared accessors for these buffers, the runtime is able to properly order the execution of these kernels.
As we saw in our buffer and accessor example for linear dependence chains, our final kernel orders itself by updating the values produced in the third kernel. We retrieve the final value of our computation by declaring a host accessor that will wait for the final kernel to finish executing before moving the data back to the host where we can read it and assert we computed the correct result.
The DPC++ code below demonstrates creating Linear dependence In-Order Queues: Inspect code, there are no modifications necessary:
1. Inspect the code cell below and click run ▶ to save the code to file
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/y_pattern_buffers.cpp
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
buffer<int> data1{range{N}};
buffer<int> data2{range{N}};
Q.submit([&](handler &h) {
accessor a{data1, h};
h.parallel_for(N, [=](id<1> i) { a[i] = 1; });
});
Q.submit([&](handler &h) {
accessor b{data2, h};
h.parallel_for(N, [=](id<1> i) { b[i] = 2; });
});
Q.submit([&](handler &h) {
accessor a{data1, h};
accessor b{data2, h, read_only};
h.parallel_for(N, [=](id<1> i) { a[i] += b[i]; });
});
Q.submit([&](handler &h) {
accessor a{data1, h};
h.single_task([=]() {
for (int i = 1; i < N; i++)
a[0] += a[i];
a[0] /= 3;
});
});
host_accessor h_a{data1};
assert(h_a[0] == N);
return 0;
}
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_y_buffer.sh;if [ -x "$(command -v qsub)" ]; then ./q run_y_buffer.sh; else ./run_y_buffer.sh; fi
```
# Summary
In this module you learned:
* How to utilize USM and Buffers and Accessors to apply Memory management and take control over data movement implicitly and explicitly
* How to utilize different types of data dependences that are important for ensuring execution of graph scheduling
* Select the correct modes of dependences in Graphs scheduling.
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Transferência de aprendizado com TensorFlow Hub
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/images/transfer_learning_with_hub"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />Ver em TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/pt-br/tutorials/images/transfer_learning_with_hub.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Executar no Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/pt-br/tutorials/images/transfer_learning_with_hub.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />Ver código fonte no GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/pt-br/tutorials/images/transfer_learning_with_hub.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Baixar notebook</a>
</td>
</table>
O [TensorFlow Hub] (http://tensorflow.org/hub) é uma maneira de compartilhar componentes de modelo pré-treinados. Consulte o [TensorFlow Module Hub] (https://tfhub.dev/) para obter uma lista pesquisável de modelos pré-treinados. Este tutorial demonstra:
1. Como usar o TensorFlow Hub com o `tf.keras`.
1. Como fazer a classificação da imagem usando o TensorFlow Hub.
1. Como fazer um simples aprendizado de transferência.
## Configuração
```
from __future__ import absolute_import, division, print_function, unicode_literals
import matplotlib.pylab as plt
try:
# %tensorflow_version only exists in Colab.
!pip install tf-nightly
except Exception:
pass
import tensorflow as tf
!pip install -U tf-hub-nightly
!pip install tfds-nightly
import tensorflow_hub as hub
from tensorflow.keras import layers
```
## Um Classificador ImageNet
### Baixar o classificador
Use `hub.module` para carregar uma mobilenet e `tf.keras.layers.Lambda` para envolvê-la como uma camada keras. Qualquer [URL do classificador de imagem compatível com TensorFlow 2] (https://tfhub.dev/s?q=tf2&module-type=image-classification) do tfhub.dev funcionará aqui.
```
classifier_url ="https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/2" #@param {type:"string"}
IMAGE_SHAPE = (224, 224)
classifier = tf.keras.Sequential([
hub.KerasLayer(classifier_url, input_shape=IMAGE_SHAPE+(3,))
])
```
### Execute-o em uma única imagem
Faça o download de uma única imagem para experimentar o modelo.
```
import numpy as np
import PIL.Image as Image
grace_hopper = tf.keras.utils.get_file('image.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg')
grace_hopper = Image.open(grace_hopper).resize(IMAGE_SHAPE)
grace_hopper
grace_hopper = np.array(grace_hopper)/255.0
grace_hopper.shape
```
Adicione uma dimensão em batch e passe a imagem para o modelo.
```
result = classifier.predict(grace_hopper[np.newaxis, ...])
result.shape
```
O resultado é um vetor de 1001 elementos de logits, classificando a probabilidade de cada classe para a imagem.
Portanto, o ID da classe superior pode ser encontrado com argmax:
```
predicted_class = np.argmax(result[0], axis=-1)
predicted_class
```
### Decodificar as previsões
Temos o ID da classe previsto,
Busque as etiquetas `ImageNet` e decodifique as previsões
```
labels_path = tf.keras.utils.get_file('ImageNetLabels.txt','https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())
plt.imshow(grace_hopper)
plt.axis('off')
predicted_class_name = imagenet_labels[predicted_class]
_ = plt.title("Prediction: " + predicted_class_name.title())
```
## Transferência de aprendizado simples
Usando o TF Hub, é simples treinar novamente a camada superior do modelo para reconhecer as classes em nosso conjunto de dados.
### Conjunto de Dados
Neste exemplo, você usará o conjunto de dados de flores TensorFlow:
```
data_root = tf.keras.utils.get_file(
'flower_photos','https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
```
A maneira mais simples de carregar esses dados em nosso modelo é usando `tf.keras.preprocessing.image.ImageDataGenerator`,
Todos os módulos de imagem do TensorFlow Hub esperam entradas flutuantes na faixa `[0,1]`. Use o parâmetro `rescale` do `ImageDataGenerator` para conseguir isso.
O tamanho da imagem será tratado posteriormente.
```
image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1/255)
image_data = image_generator.flow_from_directory(str(data_root), target_size=IMAGE_SHAPE)
```
O objeto resultante é um iterador que retorna os pares `image_batch, label_batch`.
```
for image_batch, label_batch in image_data:
print("Image batch shape: ", image_batch.shape)
print("Label batch shape: ", label_batch.shape)
break
```
### Rode o classificador em um lote de imagens
Agora, execute o classificador em um lote de imagens.
```
result_batch = classifier.predict(image_batch)
result_batch.shape
predicted_class_names = imagenet_labels[np.argmax(result_batch, axis=-1)]
predicted_class_names
```
Agora verifique como essas previsões estão alinhadas com as imagens:
```
plt.figure(figsize=(10,9))
plt.subplots_adjust(hspace=0.5)
for n in range(30):
plt.subplot(6,5,n+1)
plt.imshow(image_batch[n])
plt.title(predicted_class_names[n])
plt.axis('off')
_ = plt.suptitle("ImageNet predictions")
```
Veja o arquivo `LICENSE.txt` para atribuições de imagem.
Os resultados estão longe de serem perfeitos, mas razoáveis, considerando que essas não são as classes para as quais o modelo foi treinado (exceto "daisy").
### Faça o download do modelo sem cabeça
O TensorFlow Hub também distribui modelos sem a camada de classificação superior. Eles podem ser usados para transferir facilmente o aprendizado.
Qualquer [URL do vetor de recurso de imagem compatível com Tensorflow 2] (https://tfhub.dev/s?module-type=image-feature-vector&q=tf2) do tfhub.dev funcionará aqui.
```
feature_extractor_url = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/2" #@param {type:"string"}
```
Crie o extrator de características.
```
feature_extractor_layer = hub.KerasLayer(feature_extractor_url,
input_shape=(224,224,3))
```
Isto retorna um vetor de tamanho 1280 para cada imagem:
```
feature_batch = feature_extractor_layer(image_batch)
print(feature_batch.shape)
```
Congele as variáveis na camada extrator de característica, para que o treinamento modifique apenas a nova camada do classificador.
```
feature_extractor_layer.trainable = False
```
### Anexar um cabeçalho de classificação
Agora envolva a camada do hub em um modelo `tf.keras.Sequential` e adicione uma nova camada de classificação.
```
model = tf.keras.Sequential([
feature_extractor_layer,
layers.Dense(image_data.num_classes, activation='softmax')
])
model.summary()
predictions = model(image_batch)
predictions.shape
```
### Treine o Modelo
Use compile para configurar o processo de treinamento:
```
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['acc'])
```
Agora use o método `.fit` para treinar o modelo.
Para manter este exemplo, treine apenas duas épocas. Para visualizar o progresso do treinamento, use um retorno de chamada personalizado para registrar a perda e a acurácia de cada lote individualmente, em vez da média da época.
```
class CollectBatchStats(tf.keras.callbacks.Callback):
def __init__(self):
self.batch_losses = []
self.batch_acc = []
def on_train_batch_end(self, batch, logs=None):
self.batch_losses.append(logs['loss'])
self.batch_acc.append(logs['acc'])
self.model.reset_metrics()
steps_per_epoch = np.ceil(image_data.samples/image_data.batch_size)
batch_stats_callback = CollectBatchStats()
history = model.fit_generator(image_data, epochs=2,
steps_per_epoch=steps_per_epoch,
callbacks = [batch_stats_callback])
```
Agora, depois de apenas algumas iterações de treinamento, já podemos ver que o modelo está progredindo na tarefa.
```
plt.figure()
plt.ylabel("Loss")
plt.xlabel("Training Steps")
plt.ylim([0,2])
plt.plot(batch_stats_callback.batch_losses)
plt.figure()
plt.ylabel("Accuracy")
plt.xlabel("Training Steps")
plt.ylim([0,1])
plt.plot(batch_stats_callback.batch_acc)
```
### Verificando as previsões
Para refazer a plotagem de antes, primeiro obtenha a lista ordenada de nomes de classe:
```
class_names = sorted(image_data.class_indices.items(), key=lambda pair:pair[1])
class_names = np.array([key.title() for key, value in class_names])
class_names
```
Execute o lote de imagens através do modelo e converta os índices em nomes de classe.
```
predicted_batch = model.predict(image_batch)
predicted_id = np.argmax(predicted_batch, axis=-1)
predicted_label_batch = class_names[predicted_id]
```
Plote o resultado
```
label_id = np.argmax(label_batch, axis=-1)
plt.figure(figsize=(10,9))
plt.subplots_adjust(hspace=0.5)
for n in range(30):
plt.subplot(6,5,n+1)
plt.imshow(image_batch[n])
color = "green" if predicted_id[n] == label_id[n] else "red"
plt.title(predicted_label_batch[n].title(), color=color)
plt.axis('off')
_ = plt.suptitle("Model predictions (green: correct, red: incorrect)")
```
## Exporte seu modelo
Agora que você treinou o modelo, exporte-o como um modelo salvo:
```
import time
t = time.time()
export_path = "/tmp/saved_models/{}".format(int(t))
model.save(export_path, save_format='tf')
export_path
```
Agora confirme que podemos recarregá-lo e ainda dá os mesmos resultados:
```
reloaded = tf.keras.models.load_model(export_path)
result_batch = model.predict(image_batch)
reloaded_result_batch = reloaded.predict(image_batch)
abs(reloaded_result_batch - result_batch).max()
```
Este modelo salvo pode ser carregado para inferência posteriormente ou convertido para [TFLite] (https://www.tensorflow.org/lite/convert/) ou [TFjs] (https://github.com/tensorflow/tfjs-converter).
| github_jupyter |
# Synchronisation in Complex Networks
```
import numpy as np
import matplotlib.pylab as plt
import networkx as nx
from NetworkFunctions import *
from NetworkClasses import *
N = 100; # number of nodes
m = 2;
G = nx.barabasi_albert_graph(N,m,seed=None); # Barabasi-Albert graph
A = nx.to_numpy_matrix(G); # creates adjacency matrix
w = np.random.uniform(-2, 2, N); # defines natural frequencies
K = .5 # coupling constant
alpha = 1 # SL parameter
F = np.zeros(N)
for i in range(int(N/5)):
F[5*i] = 1
Omega = np.pi
# initial conditions
theta0 = np.random.uniform(0, 2*np.pi, N)
rho0 = np.random.uniform(0.1, 0.9, N) # so the system doesn't fall into the attractor
z0 = rho0*np.exp(1j*theta0)
z0[:5]
nx.draw(G, node_color='turquoise', edge_color='grey', with_labels=True)
plt.show()
```
## Stuart-Landau Model
The equations for a (forced) complex network of $N$ Stuart-Landau oscillators with natural frequencies $\omega_k$, limit cycle parameter $\alpha$, adjacency matrix $A$, coupling strength (or average coupling strength, for the case where $A$ is weighted) $\lambda$ and a forced term of type $ F_k e^{i \Omega t} $ that acts on a fraction $f = N_F/N$, where $N_F$ is the number of forced oscillators (nonzero $F$), can be written in the following forms:
### 1. Complex Form
$$ \dot{z}_k (z,t) = \{ \alpha^2 + i \omega - |z_k|^2 \} z_k + \lambda \sum_{j=1}^N A_{ij} (z_j - z_k) + F_k e^{i \Omega t} $$
### 2. Real Polar Form
Substituting $z_k = \rho_k e^{i \theta_k}$ in the above equation, we find:
$$ \dot{\rho}_i (\rho, \theta, t) = \rho_i (\alpha^2 - \rho_i^2) + \lambda \sum_{j=1}^N A_{ij} \left\{ \rho_j \cos{(\theta_j - \theta_i)} - \rho_i \right\} + F_i \cos{(\Omega t - \theta_i)} $$
$$ \dot{\theta}_i (\rho, \theta, t) = \omega_i + \lambda \sum_{j=1}^N A_{ij} \frac{\rho_j}{\rho_i} \sin{(\theta_j - \theta_i)} + F_i \sin{(\Omega t - \theta_i)} $$
The Jacobian is then:
$$ J = \left[ \begin{matrix} \frac{\partial \dot{\rho}}{\partial \rho} && \frac{\partial \dot{\rho}}{\partial \theta} \\
\frac{\partial \dot{\theta}}{\partial \rho} && \frac{\partial \dot{\theta}}{\partial \theta} \end{matrix} \right] $$
where, for a network with no self-edges ($A_{jj} = 0\ \forall j$):
$$ \frac{\partial \dot{\rho}_i}{\partial \rho_j} = (\alpha^2 - 3\rho_i^2 - \lambda A_{ij}) \delta_{ij} + \lambda A_{ij} \cos{(\theta_j - \theta_i)} $$
$$ \frac{\partial \dot{\rho}_i}{\partial \theta_j} = - \lambda A_{ij} \rho_j \sin{(\theta_j - \theta_i)} - \delta_{ij} F_i \sin{(\Omega t - \theta_i)} $$
$$ \frac{\partial \dot{\theta}_i}{\partial \rho_j} = \frac{\lambda}{\rho_i} A_{ij} \sin{(\theta_j - \theta_i)} $$
$$ \frac{\partial \dot{\theta}_i}{\partial \rho_j} = \lambda A_{ij} \frac{\rho_j}{\rho_i} \cos{(\theta_j - \theta_i)} + \delta_{ij} F_i \cos{(\Omega t - \theta_i)}$$
### 3. Real Rectangular Form
Substituting $z_k = x_k + iy_k$ in the complex system holds:
$$ \dot{x}_i (x, y, t) = (\alpha^2 - x^2_i - y_i^2) x_i - \omega_i y_i + \lambda \sum_{j=1}^N A_{ij} (x_j - x_i) + F_i \cos{(\Omega t)} $$
$$ \dot{y}_i (x, y, t) = (\alpha^2 - x^2_i - y_i^2) y_i + \omega_i x_i + \lambda \sum_{j=1}^N A_{ij} (y_j - y_i) + F_i \sin{(\Omega t)} $$
The Jacobian is then defined by:
$$ J = \left[ \begin{matrix} \frac{\partial \dot{x}}{\partial x} && \frac{\partial \dot{x}}{\partial y} \\
\frac{\partial \dot{y}}{\partial x} && \frac{\partial \dot{y}}{\partial y} \end{matrix} \right] $$
where:
$$ \frac{\partial \dot{x}_i}{\partial x_j} = \delta_{ij} (\alpha^2 - y_i^2 - 3x_i^2) + \lambda A_{ij} $$
$$ \frac{\partial \dot{x}_i}{\partial y_j} = - \delta_{ij} (2 x_i y_i + \omega_i) $$
$$ \frac{\partial \dot{y}_i}{\partial x_j} = - \delta_{ij} (2 x_i y_i - \omega_i) $$
$$ \frac{\partial \dot{y}_i}{\partial y_j} = \delta_{ij} (\alpha^2 - x_i^2 - 3y_i^2) + \lambda A_{ij} $$
with $k_i = \sum_{i=1}^N A_{ij} (1 - \delta_{ij})$ being the node degree of the $i$th node (excluding self-edges)
## Kuramoto Model
The equations for a (forced) complex network of $N$ Kuramoto oscillators with natural frequencies $\omega_k$, adjacency matrix $A$, coupling strength (or average coupling strength, for the case where $A$ is weighted) $\lambda$ and a forced term of type $ F_i \cos{(\Omega t - \theta)} $ that acts on a fraction $f = N_F/N$, where $N_F$ is the number of forced oscillators (nonzero $F$), can be written as:
$$ \dot{\theta}_i = \omega_i + \lambda \sum_{j=1}^N A_{ij} \sin{(\theta_j - \theta_i)} + F_i \sin{(\Omega t - \theta_i)} $$
which gives the Jacobian:
$$ J_{ij} = \frac{\partial \dot{\theta}_i}{\partial \theta_j} = A_{ij} \cos{(\theta_j - \theta_i)} - \delta_{ij} F_i \cos{(\Omega t - \theta_i)} $$
```
SL = StuartLandau(w, A, K, alpha)
SLforced = StuartLandau(w, A, K, alpha, F, Omega)
kuramoto = KuramotoNetwork(w, A, K)
Kforced = KuramotoNetwork(w, A, K, F, Omega)
%%time
t = np.arange(0,50,.2)
z, _ = SL.integrate(z0, t)
z_f, _ = SLforced.integrate(z0, t)
%%time
theta, _ = kuramoto.integrate(theta0, t)
theta_f, _ = Kforced.integrate(theta0, t)
osc=5
fig, (ax1, ax2) = plt.subplots(2, 1)
fig.suptitle('Time Evolution for an oscillator in the network')
ax1.set_title('Stuart-Landau')
ax1.set_ylabel('$Re(z)$')
ax1.set_xticks([])
ax2.set_title('Kuramoto')
ax2.set_xlabel('$t$')
ax2.set_ylabel(r'$\theta(t)$')
ax2.set_ylim([-1.2, 1.2])
ax1.plot(t, np.real(z[osc]), label='free', color='lightseagreen')
ax1.plot(t, np.real(z_f[osc]), label='forced', color='g')
ax1.plot(t, F[osc]*np.cos(Omega*t), label='force', color='pink', linewidth='.6')
ax1.legend()
ax2.plot(t, np.cos(theta[i*osc]), label='free', color='purple')
ax2.plot(t, np.cos(theta_f[i*osc]), label='forced', color='goldenrod')
ax2.plot(t, F[osc]*np.cos(Omega*t), label='force', color='cyan', linewidth='.3')
ax2.legend()
plt.show()
fig, ax = plt.subplots(1,1)
ax.set_title('Stuart-Landau Oscillator Trajectories')
ax.set_xlabel(r'$\Re(z)$')
ax.set_ylabel(r'$\Im(z)$')
for i in [7, 48, 22]:
ax.plot(np.real(z[i]), np.imag(z[i]), label=i)
ax.legend()
plt.show()
osc=5
fig, ax = plt.subplots(1, 1)
fig.suptitle('Stuart-Landau Time Evolution')
ax.set_xlabel('$t$')
ax.set_ylabel(r'$\Re{z(t)}$')
for osc in range(4):
ax.plot(t, np.real(z[osc]), label=osc+1)
ax.legend()
plt.show()
```
## Order Parameter
For a network of N oscillators with phase $\theta_i$, we can measure the system's synchronization with:
$$ \mathrm{z}(t) = r(t) e^{i \psi(t)} = \frac{1}{N} \sum_{j=1}^N e^{i \theta_j(t)} $$
The real part $r$ is called order parameter, whereas $\psi$ is the mean phase of the system. When the system is not synchronized, $r \approx 0$, whereas global synchronization is said to be achieved when $r \to 1$.
```
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(-1.2, 1.2)
ax.set_ylim(-1.2, 1.2)
ax.set_aspect('equal')
ax.axis('off')
# Plots points corresponding to the oscillators' phase positions at time t
ax.scatter(np.cos(theta[:,200]), y = np.sin(theta[:,200]), marker = '*', color='crimson')
# Finds the order parameter at the time instant t
thetaT = np.transpose(theta)
order_par = sum(np.exp(thetaT[200]*1j))/N
r = np.absolute(order_par)
psi = np.angle(order_par)
# Plots horizontal and vertical diameters of the circle
ax.plot([-1, 1], [0, 0], linewidth = '.5', color = 'grey')
ax.plot([0, 0], [-1, 1], linewidth = '.5', color = 'grey')
#Plots unit circle
circle = plt.Circle((0,0), radius = 1.0, linewidth = '0.8', color = 'grey', fill = False)
ax.add_patch(circle)
#Plots order parameter line
ax.plot([0, r*np.cos(psi)], [0, r*np.sin(psi)], linewidth = '2.0', color = 'teal')
ax.scatter(r*np.cos(psi), r*np.sin(psi), marker='o', color='teal')
# Shows mean phase
s = np.arange(0,1,0.05)
if r>0.4:
ax.plot(0.25*np.cos(psi*s), 0.25*np.sin(psi*s), color='darkorange')
else:
ax.plot((2*r/3)*np.cos(psi*s), (2*r/3)*np.sin(psi*s), color='darkorange')
plt.show()
```
### Average in time
In practice, we actually calculate the mean value of $r$ and $psi$ (as well as their standard deviation) over a time interval $[t_0, t_0 + \Delta t]$ corresponding to at least one full oscillation period of the system so one can be sure our data is statistically relevant and time fluctuations are accounted for:
$$ \langle r \rangle = \frac{1}{\Delta t} \int_{t_0}^{t_0+\Delta t} r(t) dt $$
since we find the time evolution of the phase through numerical integration already, the integral above is performed as a Riemmann sum of the numerically obtained values. We also find it's useful to computate the angular velocity $\dot{\psi} = \frac{d \psi}{dt}$ of the mean phase, for it produces more insights on the colective dyamical behavior of the system.
We may then calculate such parameters for a range of different coupling constants $\lambda$ the to see how the synchronization behavior is affected.
```
%%time
sync_par = OrderParameter(SL, z0, 40, 50, 0.1, Kf=3)
K = sync_par['K']
r = sync_par['r']
r_std = sync_par['r_std']
psi = sync_par['psi']
psi_std = sync_par['psi_std']
psidot = sync_par['psidot']
psidot_std = sync_par['psidot_std']
fig, ax1 = plt.subplots(1, 1)
ax1.set_title("Order Parameter")
ax1.set_ylabel("r")
ax1.set_xlabel(r'$\lambda$')
ax1.set_ylim(0,1.2)
ax1.errorbar(K ,r, yerr=r_std, marker='^', color = 'darkred', fmt='o', elinewidth=.5, capsize=2)
ax1.plot([0, 3], [1, 1], linewidth = .8, color = 'grey')
plt.show()
fig, ax2 = plt.subplots(1,1)
ax2.set_title("Mean Phase")
ax2.set_ylabel(r'$\psi$')
ax2.set_xlabel(r'$\lambda$')
ax2.errorbar(K ,psi, yerr=psi_std, marker='x', color = 'seagreen', fmt='o', elinewidth=.5, capsize=2)
plt.show()
fig, ax3 = plt.subplots(1,1)
ax3.set_title("Mean Phase Velocity")
ax3.set_xlabel(r'$\lambda$')
ax3.set_ylabel(r'$\dot{\psi}$')
ax3.errorbar(K ,psidot, yerr=psidot_std, marker='d', color = 'royalblue', fmt='o', elinewidth=.5, capsize=2)
plt.show()
```
### Average in initial states
To assure statistical relevance, what may also be done is to find such parameters for a set with several different initial conditions and then take the average. This way, there can be certainty on the fact that the main dynamical properties of the systems indeed depends on the network itself, not relying on any specific initial configuration. We define the standard deviation $\sigma^{(r)}_{z_0}$ of $r$ in the initial conditions as:
$$ \sigma^{(r)}_{z_0} = \langle \ \langle r + \sigma^{(r)}_t \rangle_t + \langle r \rangle_t \ \rangle_{z0} $$
where $\langle \rangle_t$ is the time average, $\sigma^{(r)}_t$ the standard deviation with respect to time (both for a single initial condition $z_0$), and $\langle \rangle_{z_0}$ the average through all initial states $z_0$. It's worth to remark that we mantain $0.1 < \rho_0 < 0.9$ in the Stuart-Landau case, as for larger values of $\rho$ the system may fall into one of its attractors, which is not the situation we desire to analyze.
```
%%time
sync_par_av = AverageOrderPar(SL, 10, 40, 50, 0, Kf=3.2, dK=0.1, dt=0.2)
K_av = sync_par_av['K']
r_av = sync_par_av['r']
r_std_av = sync_par_av['r_std']
psi_av = sync_par_av['psi']
psi_std_av = sync_par_av['psi_std']
psidot_av = sync_par_av['psidot']
psidot_std_av = sync_par_av['psidot_std']
fig, ax1 = plt.subplots(1, 1)
ax1.set_title("Order Parameter")
ax1.set_ylabel("r")
ax1.set_xlabel(r'$\lambda$')
ax1.set_ylim(0,1.2)
ax1.errorbar(K_av ,r_av, yerr=r_std_av, marker='^', color = 'darkred', fmt='o', elinewidth=.5, capsize=2)
ax1.plot([0, 3], [1, 1], linewidth = .8, color = 'grey')
plt.show()
fig, ax2 = plt.subplots(1,1)
ax2.set_title("Mean Phase")
ax2.set_ylabel(r'$\psi$')
ax2.set_xlabel(r'$\lambda$')
ax2.errorbar(K_av ,psi_av, yerr=psi_std_av, marker='x', color = 'seagreen', fmt='o', elinewidth=.5, capsize=2)
plt.show()
fig, ax3 = plt.subplots(1,1)
ax3.set_title("Mean Phase Velocity")
ax3.set_xlabel(r'$\lambda$')
ax3.set_ylabel(r'$\dot{\psi}$')
ax3.errorbar(K_av ,psidot_av, yerr=psidot_std_av, marker='d', color = 'royalblue', fmt='o', elinewidth=.5, capsize=2)
plt.show()
```
## Randomly distribuited coupling
Here we intend to study how a random distribution of the coupling strength may affect the overall synchronization. To achieve that, we redefine the adjacency matrix so that each nonzero element has its value defined by some probability distribution function. We also normalize the elements of $A$ by the mean value of such distribution so that the mean coupling is absorved into our coupling parameter $\lambda$.
### Gamma distribution
A Gamma distribuition of shape $k$ and scale $\theta$ (as it's used in *scipy.stats.gamma*) is defined by:
$$ f(x; k, \theta) = \frac{x^{k-1} e^{- \frac{x}{\theta}}}{\theta^k \Gamma(k)} $$
so that $\langle x \rangle = k \theta$ and $\sigma^2 = \langle x^2 \rangle - \langle x \rangle^2 = k \theta^2 $
```
shape = 1
SLgamma, Kav, Kstd = GammaCoupling(SL, shape)
t = np.arange(0,50,.2)
z_gamma, t = SLgamma.integrate(z0,t)
fig, ax1 = plt.subplots(1,1)
fig.suptitle('Time Evolution')
ax1.set_ylabel('$Re(z)$')
ax1.set_xlabel('$t$')
ax1.set_ylim([-1.2, 1.2])
for osc in range(3):
ax1.plot(t, np.real(z_gamma[7*osc]))
plt.show
%%time
sync_par_gamma = OrderParameter(SLgamma, z0, 40, 50, 0.1, Kf=3)
K_gamma = sync_par['K']
r_gamma = sync_par['r']
r_std_gamma = sync_par['r_std']
psi_gamma = sync_par['psi']
psi_std_gamma = sync_par['psi_std']
psidot_gamma = sync_par['psidot']
psidot_std_gamma = sync_par['psidot_std']
fig, ax1 = plt.subplots(1, 1)
ax1.set_title("Order Parameter")
ax1.set_ylabel("r")
ax1.set_xlabel(r'$\lambda$')
ax1.set_ylim(0,1.2)
ax1.errorbar(K_av ,r_av, yerr=r_std_av, marker='^', color = 'darkred', fmt='o', elinewidth=.5, capsize=2)
ax1.plot([0, 3], [1, 1], linewidth = .8, color = 'grey')
plt.show()
fig, ax2 = plt.subplots(1,1)
ax2.set_title("Mean Phase")
ax2.set_ylabel(r'$\psi$')
ax2.set_xlabel(r'$\lambda$')
ax2.errorbar(K_av ,psi_av, yerr=psi_std_av, marker='x', color = 'seagreen', fmt='o', elinewidth=.5, capsize=2)
plt.show()
fig, ax3 = plt.subplots(1,1)
ax3.set_title("Mean Phase Velocity")
ax3.set_xlabel(r'$\lambda$')
ax3.set_ylabel(r'$\dot{\psi}$')
ax3.errorbar(K_av ,psidot_av, yerr=psidot_std_av, marker='d', color = 'royalblue', fmt='o', elinewidth=.5, capsize=2)
plt.show()
```
| github_jupyter |
## ALS Implementation
- This notebook |is implementation of ALS algorithm from "collaborative filtering for implicit dataset"
### Initialize parameters
- r_lambda: normalization parameter
- alpha: confidence level
- nf: dimension of latent vector of each user and item
- initilzed values(40, 200, 40) are the best parameters from the paper
```
r_lambda = 40
nf = 200
alpha = 40
```
### Initialize original rating matrix data
- make sample (10 x 11) matrix
- 10 : num of users
- 11 : num of items
```
import numpy as np
# sample rating matrix
R = np.array([[0, 0, 0, 4, 4, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 4, 0],
[0, 3, 4, 0, 3, 0, 0, 2, 2, 0, 0],
[0, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 5, 0, 0, 5, 0],
[0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5],
[0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 4],
[0, 0, 0, 0, 0, 0, 5, 0, 0, 5, 0],
[0, 0, 0, 3, 0, 0, 0, 0, 4, 5, 0]])
print(R.shape)
```
### Initialize user and item latent factor matrix
- nu: num of users (10)
- ni: num of items (11)
- nf: dimension of latent vector
```
nu = R.shape[0]
ni = R.shape[1]
# initialize X and Y with very small values
X = np.random.rand(nu, nf) * 0.01
Y = np.random.rand(ni, nf) * 0.01
print(X)
```
### Initialize Binary Rating Matrix P
- Convert original rating matrix R into P
- Pui = 1 if Rui > 0
- Pui = 0 if Rui = 0
```
P = np.copy(R)
P[P > 0] = 1
print(P)
```
### Initialize Confidence Matrix C
- Cui = 1 + alpha * Rui
- Cui means confidence level of certain rating data
```
C = 1 + alpha * R
print(C)
```
### Set up loss function
- C: confidence matrix
- P: binary rating matrix
- X: user latent matrix
- Y: item latent matrix
- r_lambda: regularization lambda
- xTy: predict matrix
- Total_loss = (confidence_level * predict loss) + regularization loss
```
def loss_function(C, P, xTy, X, Y, r_lambda):
predict_error = np.square(P - xTy)
confidence_error = np.sum(C * predict_error)
regularization = r_lambda * (np.sum(np.square(X)) + np.sum(np.square(Y)))
total_loss = confidence_error + regularization
return np.sum(predict_error), confidence_error, regularization, total_loss
```
### Optimization Function for user and item
- X[u] = (yTCuy + lambda*I)^-1yTCuy
- Y[i] = (xTCix + lambda*I)^-1xTCix
- two formula is the same when it changes X to Y and u to i
```
def optimize_user(X, Y, C, P, nu, nf, r_lambda):
yT = np.transpose(Y)
for u in range(nu):
Cu = np.diag(C[u])
yT_Cu_y = np.matmul(np.matmul(yT, Cu), Y)
lI = np.dot(r_lambda, np.identity(nf))
yT_Cu_pu = np.matmul(np.matmul(yT, Cu), P[u])
X[u] = np.linalg.solve(yT_Cu_y + lI, yT_Cu_pu)
def optimize_item(X, Y, C, P, ni, nf, r_lambda):
xT = np.transpose(X)
for i in range(ni):
Ci = np.diag(C[:, i])
xT_Ci_x = np.matmul(np.matmul(xT, Ci), X)
lI = np.dot(r_lambda, np.identity(nf))
xT_Ci_pi = np.matmul(np.matmul(xT, Ci), P[:, i])
Y[i] = np.linalg.solve(xT_Ci_x + lI, xT_Ci_pi)
```
### Train
- usually ALS algorithm repeat train steps for 10 ~ 15 times
```
predict_errors = []
confidence_errors = []
regularization_list = []
total_losses = []
for i in range(15):
if i!=0:
optimize_user(X, Y, C, P, nu, nf, r_lambda)
optimize_item(X, Y, C, P, ni, nf, r_lambda)
predict = np.matmul(X, np.transpose(Y))
predict_error, confidence_error, regularization, total_loss = loss_function(C, P, predict, X, Y, r_lambda)
predict_errors.append(predict_error)
confidence_errors.append(confidence_error)
regularization_list.append(regularization)
total_losses.append(total_loss)
print('----------------step %d----------------' % i)
print("predict error: %f" % predict_error)
print("confidence error: %f" % confidence_error)
print("regularization: %f" % regularization)
print("total loss: %f" % total_loss)
predict = np.matmul(X, np.transpose(Y))
print('final predict')
print([predict])
from matplotlib import pyplot as plt
%matplotlib inline
plt.subplots_adjust(wspace=100.0, hspace=20.0)
fig = plt.figure()
fig.set_figheight(10)
fig.set_figwidth(10)
predict_error_line = fig.add_subplot(2, 2, 1)
confidence_error_line = fig.add_subplot(2, 2, 2)
regularization_error_line = fig.add_subplot(2, 2, 3)
total_loss_line = fig.add_subplot(2, 2, 4)
predict_error_line.set_title("Predict Error")
predict_error_line.plot(predict_errors)
confidence_error_line.set_title("Confidence Error")
confidence_error_line.plot(confidence_errors)
regularization_error_line.set_title("Regularization")
regularization_error_line.plot(regularization_list)
total_loss_line.set_title("Total Loss")
total_loss_line.plot(total_losses)
plt.show()
```
| github_jupyter |
```
import kwant
import numpy as np
import matplotlib.pyplot as pyplot
import tinyarray
%matplotlib inline
import scipy
from tqdm.notebook import tqdm
```
$$H = v_f(k_y \sigma_x - k_x\sigma_y) + (m_0 - m_1(k_x^2 + k_y^2))\sigma_z\tau_z + M_z\sigma_z$$
$$H = v_f(k_x\sigma_x - k_y\sigma_y) + (m_0 - m_1(k_x^2 + k_y^2))\sigma_z$$
```
hamiltonian = """
vf*(k_y*kron(sigma_0,sigma_x) - k_x*kron(sigma_0,sigma_y))+ (m0-m1*(k_x**2+k_y**2))*kron(sigma_z,sigma_z) + Mz(x,y)*kron(sigma_0,sigma_z)
"""
a = 1
W =80
L =80
template = kwant.continuum.discretize(hamiltonian,grid = a)
lat = template.lattice
def shape(site):
(x,y) = site.pos
return (0 <= y <W and 0 <=x <L)
def lead_shape_sd(site):
(x,y) = site.pos
return (0 <= y<W)
def lead_shape_2(site):
(x,y) = site.pos
return (L/5 <= x<L*2/5)
def lead_shape_3(site):
(x,y) = site.pos
return (3*L/5 <= x<L*4/5)
def lead_shape_4(site):
(x,y) = site.pos
return (L/5 <= x<L*2/5)
def lead_shape_5(site):
(x,y) = site.pos
return (3*L/5 <= x<L*4/5)
syst = kwant.Builder()
syst.fill(template,shape,(0,0))
lead1s = kwant.Builder(kwant.TranslationalSymmetry([-a,0]))
lead1s.fill(template,lead_shape_sd,(0,0))
lead1d = lead1s.reversed()
lead2 = kwant.Builder(kwant.TranslationalSymmetry([0,a]))
lead2.fill(template,lead_shape_2,(L/5,0))
lead3 = kwant.Builder(kwant.TranslationalSymmetry([0,a]))
lead3.fill(template,lead_shape_3,(L*3/5,0))
lead4 = kwant.Builder(kwant.TranslationalSymmetry([0,-a]))
lead4.fill(template,lead_shape_4,(L/5,0))
lead5 = kwant.Builder(kwant.TranslationalSymmetry([0,-a]))
lead5.fill(template,lead_shape_5,(L*3/5,0))
syst.attach_lead(lead1s)
syst.attach_lead(lead1d)
syst.attach_lead(lead2)
syst.attach_lead(lead3)
syst.attach_lead(lead4)
syst.attach_lead(lead5)
fig,ax = pyplot.subplots()
kwant.plot(syst,ax = ax)
syst=syst.finalized()
def Mz(x,y):
return 0.1
#params = dict(r0=20, delta=10, J=1)
params = {'vf':1,'m1':1,'m0':-0.5,'Mz':Mz}
wf = kwant.wave_function(syst, energy=0, params=params)
params = {'vf':1,'m1':1,'m0':-0.5,'Mz':Mz}
kwant.plotter.bands(syst.leads[4],params = params, momenta = np.linspace(-0.3,0.3,201), show = False)
pyplot.grid()
pyplot.xlim(-.3, 0.3)
pyplot.ylim(-0.6,0.6)
pyplot.xlabel('momentum [1/A]')
pyplot.ylabel('energy [eV]')
pyplot.show()
nnls=scipy.optimize.nnls
energies = np.linspace(-1,1,100)
dataxx = []
dataxy = []
for energy in tqdm(energies):
smatrix = kwant.smatrix(syst,energy,params = params)
R = nnls(smatrix.conductance_matrix(),np.array((1,0,0,-1,0)))[0]
dataxy.append(R[1]-R[4])
dataxx.append(R[1]-R[2])
pyplot.figure()
pyplot.plot(energies,dataxx,energies,dataxy)
pyplot.show()
%%time
a = 1
r = 30
template = kwant.continuum.discretize(hamiltonian,grid = a)
lat = template.lattice
def circle(site):
x,y = site.pos
return (x**2 + y**2 <= r**2)
def rect(site):
x,y= site.pos
return (0 <= y <W and 0 <=x <L)
syst = kwant.Builder()
syst.fill(template,rect,(0,0))
syst.eradicate_dangling()
kwant.plot(syst)
syst_without_lead = syst.finalized()
where = lambda s : np.linalg.norm(s.pos)<1.1
s_factory = kwant.kpm.LocalVectors(syst_without_lead)
cond_xx = kwant.kpm.conductivity(syst_without_lead, alpha = 'x',beta = 'x',params=params)
s_factory = kwant.kpm.LocalVectors(syst_without_lead)
cond_xy = kwant.kpm.conductivity(syst_without_lead, alpha = 'x',beta = 'y',params=params)
energies = np.linspace(-2,2,200)
#energies = cond_xx.energies
cond_array_xx = np.array([cond_xx(e,temperature = 1E-6) for e in energies])
cond_array_xy = np.array([cond_xy(e,temperature = 1E-6) for e in energies])
cond_array_xx/=W*L
cond_array_xy/=W*L
params = dict(r0=20, delta=10, J=1)
wf = kwant.wave_function(syst, energy=-1, params=params)
psi = wf(0)[0]
fig,ax = pyplot.subplots()
plt = ax.plot(energies,np.abs(cond_array_xx),energies,np.abs(cond_array_xy))
ax.set_xlim([-1,1])
fig
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.